Example usage for java.sql Statement EXECUTE_FAILED

List of usage examples for java.sql Statement EXECUTE_FAILED

Introduction

In this page you can find the example usage for java.sql Statement EXECUTE_FAILED.

Prototype

int EXECUTE_FAILED

To view the source code for java.sql Statement EXECUTE_FAILED.

Click Source Link

Document

The constant indicating that an error occurred while executing a batch statement.

Usage

From source file:Main.java

public static void checkUpdateCounts(int[] updateCounts) {
    for (int i = 0; i < updateCounts.length; i++) {
        if (updateCounts[i] >= 0) {
            System.out.println("OK; updateCount=" + updateCounts[i]);
        } else if (updateCounts[i] == Statement.SUCCESS_NO_INFO) {
            System.out.println("OK; updateCount=Statement.SUCCESS_NO_INFO");
        } else if (updateCounts[i] == Statement.EXECUTE_FAILED) {
            System.out.println("Failure; updateCount=Statement.EXECUTE_FAILED");
        }// w  w  w  .j a  v  a  2s.c om
    }
}

From source file:chh.utils.db.source.common.JdbcClient.java

public void executeInsertQuery(String query, List<List<Column>> columnLists) {
    Connection connection = null;
    try {//from ww  w  .  j av  a 2s.co  m
        connection = connectionProvider.getConnection();
        boolean autoCommit = connection.getAutoCommit();
        if (autoCommit) {
            connection.setAutoCommit(false);
        }

        LOG.debug("Executing query {}", query);

        PreparedStatement preparedStatement = connection.prepareStatement(query);
        if (queryTimeoutSecs > 0) {
            preparedStatement.setQueryTimeout(queryTimeoutSecs);
        }

        for (List<Column> columnList : columnLists) {
            setPreparedStatementParams(preparedStatement, columnList);
            preparedStatement.addBatch();
        }

        int[] results = preparedStatement.executeBatch();
        if (Arrays.asList(results).contains(Statement.EXECUTE_FAILED)) {
            connection.rollback();
            throw new RuntimeException(
                    "failed at least one sql statement in the batch, operation rolled back.");
        } else {
            try {
                connection.commit();
            } catch (SQLException e) {
                throw new RuntimeException("Failed to commit insert query " + query, e);
            }
        }
    } catch (SQLException e) {
        throw new RuntimeException("Failed to execute insert query " + query, e);
    } finally {
        closeConnection(connection);
    }
}

From source file:com.google.enterprise.connector.sharepoint.dao.SimpleSharePointDAOTest.java

public void testHandleBatchUpdateException() {
    Query query = Query.UDS_INSERT;
    SqlParameterSource[] namedParams = new SqlParameterSource[memberships.size()];
    int count = 0;
    for (UserGroupMembership membership : memberships) {
        namedParams[count++] = query.createParameter(membership.getUserId(), membership.getUserName(),
                membership.getGroupId(), membership.getGroupName(), membership.getNamespace());
    }/* w  w w . j av  a 2  s  . co  m*/
    String sqlState = "";
    String reason = "";
    int vendorCode = 0;
    int[] updateCounts = null;
    // Scenario 1: When driver executes all the queries
    try {
        updateCounts = new int[namedParams.length];
        for (int i = 0; i < namedParams.length; ++i) {
            if (i % 2 == 0) {
                updateCounts[i] = Statement.EXECUTE_FAILED;
            } else {
                updateCounts[i] = 1;
            }
        }
        BatchUpdateException batchUpdateException = new BatchUpdateException(reason, sqlState, vendorCode,
                updateCounts);
        int[] status = simpleSPDAO.handleBatchUpdateExceptionForMSSQLAndMySQL(batchUpdateException,
                Query.UDS_INSERT, namedParams);
        assertNotNull(status);
        assertEquals(status.length, namedParams.length);
    } catch (Exception e) {
        fail(e.getMessage());
    }

    // Scenario 2: When driver stops execution when a query fails
    try {
        updateCounts = new int[namedParams.length - 1];
        for (int i = 0; i < namedParams.length - 1; ++i) {
            if (i % 2 == 0) {
                updateCounts[i] = Statement.EXECUTE_FAILED;
            } else {
                updateCounts[i] = 1;
            }
        }

        BatchUpdateException batchUpdateException = new BatchUpdateException(reason, sqlState, vendorCode,
                updateCounts);
        int[] status = simpleSPDAO.handleBatchUpdateExceptionForMSSQLAndMySQL(batchUpdateException,
                Query.UDS_INSERT, namedParams);
        assertNotNull(status);
        assertEquals(status.length, namedParams.length - 1);
    } catch (Exception e) {
        fail(e.getMessage());
    }
}

From source file:com.adaptris.core.services.jdbc.JdbcBatchingDataCaptureService.java

protected static long rowsUpdated(int[] rc) throws SQLException {
    List<Integer> result = Arrays.asList(ArrayUtils.toObject(rc));
    if (result.contains(Statement.EXECUTE_FAILED)) {
        throw new SQLException("Batch Execution Failed.");
    }/*from   w w w.  j  a v  a  2s. com*/
    return result.stream().filter(e -> !(e == Statement.SUCCESS_NO_INFO)).mapToLong(i -> i).sum();
}

From source file:ConsumerServer.java

public void processMessages() {
    // for this exercise start from offset 0
    // produce batches of n size for jdbc and insert

    // for this table
    // char(10), char(20), long
    String sqlInsert = "INSERT INTO kblog.BLOGDATA VALUES (?,?,?,?,?)";
    String sqlUpsert = "UPSERT INTO kblog.BLOGDATA VALUES (?,?,?,?,?)";
    final String UDFCALL = " select * from udf(kblog.kaf3('nap007:9092'," + " 'gid'," + " 'blogit'," + "  0,"
            + " 'null'," + " 'C10C20IC55C55'," + " '|'," + "  -1," + "  1000 ))";
    final String SQLUPSERT = "upsert using load into kblog.blogdata ";
    final String SQLINSERT = "insert into kblog.blogdata ";
    final String SQLUPSERT2 = "upsert into kblog.blogdata ";

    try {//w  ww  . j ava 2s . c om
        if (t2Connect) {
            // T2
            Class.forName("org.apache.trafodion.jdbc.t2.T2Driver");
            conn = DriverManager.getConnection("jdbc:t2jdbc:");
        } else {
            // T4
            Class.forName("org.trafodion.jdbc.t4.T4Driver");
            conn = DriverManager.getConnection("jdbc:t4jdbc://nap007:23400/:", "trafodion", "passw");
        }
        conn.setAutoCommit(autoCommit);
    } catch (SQLException sx) {
        System.out.println("SQL error: " + sx.getMessage());
        System.exit(1);
    } catch (ClassNotFoundException cx) {
        System.out.println("Driver class not found: " + cx.getMessage());
        System.exit(2);

    }

    // message processing loop
    String[] msgFields;
    long numRows = 0;
    long totalRows = 0;
    int[] batchResult;

    if (udfMode == 0 && insMode == 0) {
        // missing cmd line setting
        System.out.println("*** Neither UDF nor INSERT mode specified - aborting ***");
        System.exit(2);
    }

    try {
        if (udfMode > 0) {
            long diff = 0;

            long startTime = System.currentTimeMillis();
            switch (udfMode) {
            case 1: // upsert using load
                pStmt = conn.prepareStatement(SQLUPSERT + UDFCALL);
                totalRows = pStmt.executeUpdate();
                diff = (System.currentTimeMillis() - startTime);
                System.out.println("Upsert loaded row count: " + totalRows + " in " + diff + " ms");
                break;

            case 2: // insert 
                pStmt = conn.prepareStatement(SQLINSERT + UDFCALL);
                totalRows = pStmt.executeUpdate();
                if (!autoCommit) {
                    conn.commit();
                    diff = (System.currentTimeMillis() - startTime);
                    System.out
                            .println("Insert row count (autocommit off): " + totalRows + " in " + diff + " ms");
                } else {
                    diff = (System.currentTimeMillis() - startTime);
                    System.out
                            .println("Insert row count (autocommit on): " + totalRows + " in " + diff + " ms");
                }
                break;

            case 3: // upsert 
                pStmt = conn.prepareStatement(SQLUPSERT2 + UDFCALL);
                totalRows = pStmt.executeUpdate();
                if (!autoCommit) {
                    conn.commit();
                    diff = (System.currentTimeMillis() - startTime);
                    System.out
                            .println("Upsert row count (autocommit off): " + totalRows + " in " + diff + " ms");
                } else {
                    diff = (System.currentTimeMillis() - startTime);
                    System.out
                            .println("Upsert row count (autocommit on): " + totalRows + " in " + diff + " ms");
                }
                break;

            default: // illegal value
                System.out.println("*** Only udf values 1,2,3 allowed; found: " + udfMode);
                System.exit(2);

            } // switch

        } // udfMode
        else { // iterative insert/upsert

            switch (insMode) {
            case 1: // insert
                pStmt = conn.prepareStatement(sqlInsert);
                break;
            case 2: //upsert
                pStmt = conn.prepareStatement(sqlUpsert);
                break;
            default: // illegal
                System.out.println("*** Only insert values 1,2 allowed; found: " + insMode);
                System.exit(2);
            } // switch

            kafka.subscribe(Arrays.asList(topic));
            // priming poll
            kafka.poll(100);
            // always start from beginning
            kafka.seekToBeginning(Arrays.asList(new TopicPartition(topic, 0)));

            // enable autocommit and singleton inserts for comparative timings

            long startTime = System.currentTimeMillis();
            while (true) {
                // note that we don't commitSync to kafka - tho we should
                ConsumerRecords<String, String> records = kafka.poll(streamTO);
                if (records.isEmpty())
                    break; // timed out
                for (ConsumerRecord<String, String> msg : records) {
                    msgFields = msg.value().split("\\" + Character.toString(delimiter));

                    // position info for this message
                    long offset = msg.offset();
                    int partition = msg.partition();
                    String topic = msg.topic();

                    pStmt.setString(1, msgFields[0]);
                    pStmt.setString(2, msgFields[1]);
                    pStmt.setLong(3, Long.parseLong(msgFields[2]));
                    pStmt.setString(4, msgFields[3]);
                    pStmt.setString(5, msgFields[4]);
                    numRows++;
                    totalRows++;
                    if (autoCommit) {
                        // single ins/up sert
                        pStmt.executeUpdate();
                    } else {
                        pStmt.addBatch();
                        if ((numRows % commitCount) == 0) {
                            numRows = 0;
                            batchResult = pStmt.executeBatch();
                            conn.commit();
                        }
                    }

                } // for each msg

            } // while true

            // get here when poll returns no records
            if (numRows > 0 && !autoCommit) {
                // remaining rows
                batchResult = pStmt.executeBatch();
                conn.commit();
            }
            long diff = (System.currentTimeMillis() - startTime);
            if (autoCommit)
                System.out.println("Total rows: " + totalRows + " in " + diff + " ms");
            else
                System.out.println(
                        "Total rows: " + totalRows + " in " + diff + " ms; batch size = " + commitCount);

            kafka.close();
        } // else

    } // try
    catch (ConsumerTimeoutException to) {
        System.out.println("consumer time out; " + to.getMessage());
        System.exit(1);
    } catch (BatchUpdateException bx) {
        int[] insertCounts = bx.getUpdateCounts();
        int count = 1;
        for (int i : insertCounts) {
            if (i == Statement.EXECUTE_FAILED)
                System.out.println("Error on request #" + count + ": Execute failed");
            else
                count++;
        }
        System.out.println(bx.getMessage());
        System.exit(1);

    } catch (SQLException sx) {
        System.out.println("SQL error: " + sx.getMessage());
        System.exit(1);
    }

}

From source file:lib.JdbcTemplate.java

@Override
public int[] batchUpdate(final String... sql) throws DataAccessException {
    Assert.notEmpty(sql, "SQL array must not be empty");
    if (logger.isDebugEnabled()) {
        logger.debug("Executing SQL batch update of " + sql.length + " statements");
    }//  w  ww.  ja va  2 s . co m

    class BatchUpdateStatementCallback implements StatementCallback<int[]>, SqlProvider {

        private String currSql;

        @Override
        public int[] doInStatement(Statement stmt) throws SQLException, DataAccessException {
            int[] rowsAffected = new int[sql.length];
            if (JdbcUtils.supportsBatchUpdates(stmt.getConnection())) {
                for (String sqlStmt : sql) {
                    this.currSql = appendSql(this.currSql, sqlStmt);
                    stmt.addBatch(sqlStmt);
                }
                try {
                    rowsAffected = stmt.executeBatch();
                } catch (BatchUpdateException ex) {
                    String batchExceptionSql = null;
                    for (int i = 0; i < ex.getUpdateCounts().length; i++) {
                        if (ex.getUpdateCounts()[i] == Statement.EXECUTE_FAILED) {
                            batchExceptionSql = appendSql(batchExceptionSql, sql[i]);
                        }
                    }
                    if (StringUtils.hasLength(batchExceptionSql)) {
                        this.currSql = batchExceptionSql;
                    }
                    throw ex;
                }
            } else {
                for (int i = 0; i < sql.length; i++) {
                    this.currSql = sql[i];
                    if (!stmt.execute(sql[i])) {
                        rowsAffected[i] = stmt.getUpdateCount();
                    } else {
                        throw new InvalidDataAccessApiUsageException("Invalid batch SQL statement: " + sql[i]);
                    }
                }
            }
            return rowsAffected;
        }

        private String appendSql(String sql, String statement) {
            return (StringUtils.isEmpty(sql) ? statement : sql + "; " + statement);
        }

        @Override
        public String getSql() {
            return this.currSql;
        }
    }

    return execute(new BatchUpdateStatementCallback());
}

From source file:org.apache.ddlutils.platform.PlatformImplBase.java

/**
 * Performs the batch for the given statement, and checks that the specified amount of rows have been changed.
 * /*from   w  ww . j  a  v a  2s.  c  om*/
 * @param statement The prepared statement
 * @param numRows   The number of rows that should change
 * @param table     The changed table
 */
private void executeBatch(PreparedStatement statement, int numRows, Table table)
        throws DatabaseOperationException {
    if (statement != null) {
        try {
            Connection connection = statement.getConnection();

            beforeInsert(connection, table);

            int[] results = statement.executeBatch();

            closeStatement(statement);
            afterInsert(connection, table);

            boolean hasSum = true;
            int sum = 0;

            for (int idx = 0; (results != null) && (idx < results.length); idx++) {
                if (results[idx] < 0) {
                    hasSum = false;
                    if (results[idx] == Statement.EXECUTE_FAILED) {
                        _log.warn("The batch insertion of row " + idx + " into table " + table.getName()
                                + " failed but the driver is able to continue processing");
                    } else if (results[idx] != Statement.SUCCESS_NO_INFO) {
                        _log.warn("The batch insertion of row " + idx + " into table " + table.getName()
                                + " returned an undefined status value " + results[idx]);
                    }
                } else {
                    sum += results[idx];
                }
            }
            if (hasSum && (sum != numRows)) {
                _log.warn("Attempted to insert " + numRows + " rows into table " + table.getName()
                        + " but changed " + sum + " rows");
            }
        } catch (SQLException ex) {
            if (ex instanceof BatchUpdateException) {
                SQLException sqlEx = ((BatchUpdateException) ex).getNextException();

                throw new DatabaseOperationException("Error while inserting into the database", sqlEx);
            } else {
                throw new DatabaseOperationException("Error while inserting into the database", ex);
            }
        }
    }
}

From source file:org.cerberus.crud.dao.impl.TestCaseCountryPropertiesDAO.java

@Override
public Answer createTestCaseCountryPropertiesBatch(List<TestCaseCountryProperties> listOfPropertiesToInsert) {
    Answer answer = new Answer();
    MessageEvent rs = null;//from  w  w w  .j  a  v a  2s  .  c o m
    StringBuilder query = new StringBuilder();
    query.append(
            "INSERT INTO testcasecountryproperties (`Test`,`TestCase`,`Country`,`Property` , `Description`, `Type`");
    query.append(",`Database`,`Value1`,`Value2`,`Length`,`RowLimit`,`Nature`,`RetryNb`,`RetryPeriod`) ");
    query.append("VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)");

    Connection connection = this.databaseSpring.connect();
    try {
        PreparedStatement preStat = connection.prepareStatement(query.toString());
        try {
            for (TestCaseCountryProperties prop : listOfPropertiesToInsert) {
                preStat.setString(1, prop.getTest());
                preStat.setString(2, prop.getTestCase());
                preStat.setString(3, prop.getCountry());
                preStat.setString(4, prop.getProperty());
                preStat.setString(5, prop.getDescription());
                preStat.setString(6, prop.getType());
                preStat.setString(7, prop.getDatabase());
                preStat.setString(8, prop.getValue1());
                preStat.setString(9, prop.getValue2());
                preStat.setInt(10, prop.getLength());
                preStat.setInt(11, prop.getRowLimit());
                preStat.setString(12, prop.getNature());
                preStat.setInt(13, prop.getRetryNb());
                preStat.setInt(14, prop.getRetryPeriod());

                preStat.addBatch();
            }

            //executes the batch         
            preStat.executeBatch();

            int affectedRows[] = preStat.executeBatch();

            //verify if some of the statements failed
            boolean someFailed = ArrayUtils.contains(affectedRows, 0)
                    || ArrayUtils.contains(affectedRows, Statement.EXECUTE_FAILED);

            if (someFailed == false) {
                rs = new MessageEvent(MessageEventEnum.DATA_OPERATION_OK);
                rs.setDescription(
                        rs.getDescription().replace("%ITEM%", "Property").replace("%OPERATION%", "CREATE"));
            } else {
                rs = new MessageEvent(MessageEventEnum.DATA_OPERATION_ERROR_EXPECTED);
                rs.setDescription(rs.getDescription().replace("%ITEM%", "Property")
                        .replace("%OPERATION%", "CREATE")
                        .replace("%REASON%", "Some problem occurred while creating the new property! "));
            }

        } catch (SQLException exception) {
            rs = new MessageEvent(MessageEventEnum.DATA_OPERATION_ERROR_UNEXPECTED);
            rs.setDescription(
                    rs.getDescription().replace("%DESCRIPTION%", "It was not possible to update table."));
            LOG.error("Unable to execute query : " + exception.toString());
        } finally {
            if (preStat != null) {
                preStat.close();
            }
        }
    } catch (SQLException exception) {
        rs = new MessageEvent(MessageEventEnum.DATA_OPERATION_ERROR_UNEXPECTED);
        rs.setDescription(rs.getDescription().replace("%DESCRIPTION%", "It was not possible to update table."));
        LOG.error("Unable to execute query : " + exception.toString());
    } finally {
        try {
            if (!this.databaseSpring.isOnTransaction()) {
                if (connection != null) {
                    connection.close();
                }
            }
        } catch (SQLException e) {
            LOG.warn(e.toString());
        }
    }

    answer.setResultMessage(rs);
    return answer;
}

From source file:org.cerberus.crud.dao.impl.TestCaseExecutionInQueueDAO.java

@Override
public List<TestCaseExecutionInQueue> toQueued(int fetchSize) throws CerberusException {
    List<TestCaseExecutionInQueue> result = new ArrayList<>();
    final String selectByStateQuery = UNLIMITED_FETCH_SIZE == fetchSize ? QUERY_FIND_BY_STATE_WITH_DEPENDENCIES
            : QUERY_FIND_BY_STATE_WITH_DEPENDENCIES_LIMITED;

    try (Connection connection = this.databaseSpring.connect();
            PreparedStatement selectWaitingsStatement = connection.prepareStatement(selectByStateQuery);
            PreparedStatement updateStateStatement = connection
                    .prepareStatement(QUERY_UPDATE_STATE_FROM_STATE)) {
        // Select all executions in queue in WAITING state
        selectWaitingsStatement.setString(1, TestCaseExecutionInQueue.State.WAITING.name());
        if (UNLIMITED_FETCH_SIZE != fetchSize) {
            selectWaitingsStatement.setInt(2, fetchSize);
        }//  www.j  a  v a2s .  c om
        ResultSet waitings = selectWaitingsStatement.executeQuery();

        // Then set their state to QUEUED by checking state is still the same
        while (waitings.next()) {
            try {
                TestCaseExecutionInQueue waiting = loadWithDependenciesFromResultSet(waitings);
                fillUpdateStateFromStateStatement(waiting.getId(), TestCaseExecutionInQueue.State.WAITING,
                        TestCaseExecutionInQueue.State.QUEUED, updateStateStatement);
                updateStateStatement.addBatch();
                result.add(waiting);
            } catch (SQLException | FactoryCreationException e) {
                LOG.warn("Unable to add execution in queue id " + waitings.getLong(COLUMN_ID)
                        + " to the batch process from setting its state from WAITING to QUEUED", e);
            }
        }

        // And finally remove those which have not been updated
        int[] batchExecutionResult = updateStateStatement.executeBatch();
        for (int batchExecutionResultIndex = 0, removedCount = 0; batchExecutionResultIndex < batchExecutionResult.length; batchExecutionResultIndex++) {
            if (Statement.EXECUTE_FAILED == batchExecutionResult[batchExecutionResultIndex]) {
                LOG.warn("Unable to move execution state from WAITING to QUEUED for id "
                        + result.get(batchExecutionResultIndex));
            }
            if (batchExecutionResult[batchExecutionResultIndex] <= 0) {
                int resultIndexToRemove = batchExecutionResultIndex - removedCount++;
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Removing execution id " + result.get(resultIndexToRemove)
                            + " from result because of getting no successful result count ("
                            + batchExecutionResult[batchExecutionResultIndex]);
                }
                result.remove(resultIndexToRemove);
            }
        }
        return result;
    } catch (SQLException e) {
        LOG.warn("Unable to state from WAITING to QUEUED state for executions in queue", e);
        throw new CerberusException(new MessageGeneral(MessageGeneralEnum.DATA_OPERATION_ERROR));
    }
}

From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java

private List<DataPoint> getRemainingDataPoints(List<DataPoint> data, int[] execInfo) {
    List<DataPoint> res = new ArrayList<DataPoint>();
    int idx = 0;// w  w w. j  a va 2  s .  c o  m

    // this is the case for mysql
    if (execInfo.length == 0) {
        return res;
    }

    for (Iterator<DataPoint> i = data.iterator(); i.hasNext(); idx++) {
        DataPoint pt = i.next();

        if (execInfo[idx] == Statement.EXECUTE_FAILED) {
            res.add(pt);
        }
    }

    if (log.isDebugEnabled()) {
        log.debug("Need to deal with " + res.size() + " unhandled " + "data points (out of " + execInfo.length
                + ")");
    }
    return res;
}