Example usage for java.sql Statement CLOSE_CURRENT_RESULT

List of usage examples for java.sql Statement CLOSE_CURRENT_RESULT

Introduction

In this page you can find the example usage for java.sql Statement CLOSE_CURRENT_RESULT.

Prototype

int CLOSE_CURRENT_RESULT

To view the source code for java.sql Statement CLOSE_CURRENT_RESULT.

Click Source Link

Document

The constant indicating that the current ResultSet object should be closed when calling getMoreResults.

Usage

From source file:com.alibaba.wasp.jdbc.JdbcStatement.java

/**
 * Move to the next result set. This method always returns false.
 * //from   w ww  .j  a  va  2s .c  o  m
 * @param current
 *          Statement.CLOSE_CURRENT_RESULT, Statement.KEEP_CURRENT_RESULT, or
 *          Statement.CLOSE_ALL_RESULTS
 * @return false
 */
@Override
public boolean getMoreResults(int current) throws SQLException {
    try {
        switch (current) {
        case Statement.CLOSE_CURRENT_RESULT:
        case Statement.CLOSE_ALL_RESULTS:
            checkClosed();
            closeOldResultSet();
            break;
        case Statement.KEEP_CURRENT_RESULT:
            // nothing to do
            break;
        default:
            throw JdbcException.getInvalidValueException("current", current);
        }
        return false;
    } catch (Exception e) {
        throw Logger.logAndConvert(log, e);
    }
}

From source file:com.alibaba.wasp.jdbc.TestPreparedStatement.java

public void testGetMoreResults() throws SQLException {
    Statement stat = conn.createStatement();
    PreparedStatement prep;/*w w w .  jav a 2 s  . c o  m*/
    ResultSet rs;
    stat.execute("CREATE TABLE TEST(ID INT)");
    stat.execute("INSERT INTO TEST VALUES(1)");

    prep = conn.prepareStatement("SELECT * FROM TEST");
    // just to check if it doesn't throw an exception - it may be null
    prep.getMetaData();
    assertTrue(prep.execute());
    rs = prep.getResultSet();
    assertFalse(prep.getMoreResults());
    assertEquals(-1, prep.getUpdateCount());
    // supposed to be closed now
    assertThrows(SQLErrorCode.OBJECT_CLOSED, rs).next();
    assertEquals(-1, prep.getUpdateCount());

    prep = conn.prepareStatement("UPDATE TEST SET ID = 2");
    assertFalse(prep.execute());
    assertEquals(1, prep.getUpdateCount());
    assertFalse(prep.getMoreResults(Statement.CLOSE_CURRENT_RESULT));
    assertEquals(-1, prep.getUpdateCount());
    // supposed to be closed now
    assertThrows(SQLErrorCode.OBJECT_CLOSED, rs).next();
    assertEquals(-1, prep.getUpdateCount());

    prep = conn.prepareStatement("DELETE FROM TEST");
    prep.executeUpdate();
    assertFalse(prep.getMoreResults());
    assertEquals(-1, prep.getUpdateCount());
}

From source file:net.starschema.clouddb.jdbc.BQStatementRoot.java

/**
 * <p>// w  ww.  j a v a2  s .  c o  m
 * <h1>Implementation Details:</h1><br>
 * Multiple result sets are not supported currently. we check that the
 * result set is open, the parameter is acceptable, and close our current
 * resultset or throw a FeatureNotSupportedException
 * </p>
 * 
 * @param current
 *            - one of the following Statement constants indicating what
 *            should happen to current ResultSet objects obtained using the
 *            method getResultSet: Statement.CLOSE_CURRENT_RESULT,
 *            Statement.KEEP_CURRENT_RESULT, or Statement.CLOSE_ALL_RESULTS
 * @throws BQSQLException
 */

public boolean getMoreResults(int current) throws SQLException {
    if (this.closed) {
        throw new BQSQLException("Statement is closed.");
    }
    if (current == Statement.CLOSE_CURRENT_RESULT || current == Statement.KEEP_CURRENT_RESULT
            || current == Statement.CLOSE_ALL_RESULTS) {

        if (BQDatabaseMetadata.multipleOpenResultsSupported
                && (current == Statement.KEEP_CURRENT_RESULT || current == Statement.CLOSE_ALL_RESULTS)) {
            throw new BQSQLFeatureNotSupportedException();
        }
        // Statement.CLOSE_CURRENT_RESULT
        this.close();
        return false;
    } else {
        throw new BQSQLException("Wrong parameter.");
    }
}

From source file:org.apache.nifi.processors.standard.AbstractExecuteSQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;//from   ww  w .  j ava 2s  .  c o m
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final List<FlowFile> resultSetFlowFiles = new ArrayList<>();

    final ComponentLog logger = getLogger();
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE)
            .evaluateAttributeExpressions().asInteger();
    final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions()
            .asInteger();
    final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField;

    SqlWriter sqlWriter = configureSqlWriter(session, context, fileToProcess);

    final String selectQuery;
    if (context.getProperty(SQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, Charset.defaultCharset())));
        selectQuery = queryContents.toString();
    }

    int resultCount = 0;
    try (final Connection con = dbcpService
            .getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes());
            final PreparedStatement st = con.prepareStatement(selectQuery)) {
        st.setQueryTimeout(queryTimeout); // timeout in seconds

        if (fileToProcess != null) {
            JdbcCommon.setParameters(st, fileToProcess.getAttributes());
        }
        logger.debug("Executing query {}", new Object[] { selectQuery });

        int fragmentIndex = 0;
        final String fragmentId = UUID.randomUUID().toString();

        final StopWatch executionTime = new StopWatch(true);

        boolean hasResults = st.execute();

        long executionTimeElapsed = executionTime.getElapsed(TimeUnit.MILLISECONDS);

        boolean hasUpdateCount = st.getUpdateCount() != -1;

        while (hasResults || hasUpdateCount) {
            //getMoreResults() and execute() return false to indicate that the result of the statement is just a number and not a ResultSet
            if (hasResults) {
                final AtomicLong nrOfRows = new AtomicLong(0L);

                try {
                    final ResultSet resultSet = st.getResultSet();
                    do {
                        final StopWatch fetchTime = new StopWatch(true);

                        FlowFile resultSetFF;
                        if (fileToProcess == null) {
                            resultSetFF = session.create();
                        } else {
                            resultSetFF = session.create(fileToProcess);
                            resultSetFF = session.putAllAttributes(resultSetFF, fileToProcess.getAttributes());
                        }

                        try {
                            resultSetFF = session.write(resultSetFF, out -> {
                                try {
                                    nrOfRows.set(sqlWriter.writeResultSet(resultSet, out, getLogger(), null));
                                } catch (Exception e) {
                                    throw (e instanceof ProcessException) ? (ProcessException) e
                                            : new ProcessException(e);
                                }
                            });

                            long fetchTimeElapsed = fetchTime.getElapsed(TimeUnit.MILLISECONDS);

                            // set attributes
                            final Map<String, String> attributesToAdd = new HashMap<>();
                            attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));
                            attributesToAdd.put(RESULT_QUERY_DURATION,
                                    String.valueOf(executionTimeElapsed + fetchTimeElapsed));
                            attributesToAdd.put(RESULT_QUERY_EXECUTION_TIME,
                                    String.valueOf(executionTimeElapsed));
                            attributesToAdd.put(RESULT_QUERY_FETCH_TIME, String.valueOf(fetchTimeElapsed));
                            attributesToAdd.put(RESULTSET_INDEX, String.valueOf(resultCount));
                            attributesToAdd.putAll(sqlWriter.getAttributesToAdd());
                            resultSetFF = session.putAllAttributes(resultSetFF, attributesToAdd);
                            sqlWriter.updateCounters(session);

                            // if fragmented ResultSet, determine if we should keep this fragment; set fragment attributes
                            if (maxRowsPerFlowFile > 0) {
                                // if row count is zero and this is not the first fragment, drop it instead of committing it.
                                if (nrOfRows.get() == 0 && fragmentIndex > 0) {
                                    session.remove(resultSetFF);
                                    break;
                                }

                                resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_ID, fragmentId);
                                resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_INDEX,
                                        String.valueOf(fragmentIndex));
                            }

                            logger.info("{} contains {} records; transferring to 'success'",
                                    new Object[] { resultSetFF, nrOfRows.get() });
                            // Report a FETCH event if there was an incoming flow file, or a RECEIVE event otherwise
                            if (context.hasIncomingConnection()) {
                                session.getProvenanceReporter().fetch(resultSetFF,
                                        "Retrieved " + nrOfRows.get() + " rows",
                                        executionTimeElapsed + fetchTimeElapsed);
                            } else {
                                session.getProvenanceReporter().receive(resultSetFF,
                                        "Retrieved " + nrOfRows.get() + " rows",
                                        executionTimeElapsed + fetchTimeElapsed);
                            }
                            resultSetFlowFiles.add(resultSetFF);

                            // If we've reached the batch size, send out the flow files
                            if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) {
                                session.transfer(resultSetFlowFiles, REL_SUCCESS);
                                session.commit();
                                resultSetFlowFiles.clear();
                            }

                            fragmentIndex++;
                        } catch (Exception e) {
                            // Remove the result set flow file and propagate the exception
                            session.remove(resultSetFF);
                            if (e instanceof ProcessException) {
                                throw (ProcessException) e;
                            } else {
                                throw new ProcessException(e);
                            }
                        }
                    } while (maxRowsPerFlowFile > 0 && nrOfRows.get() == maxRowsPerFlowFile);

                    // If we are splitting results but not outputting batches, set count on all FlowFiles
                    if (outputBatchSize == 0 && maxRowsPerFlowFile > 0) {
                        for (int i = 0; i < resultSetFlowFiles.size(); i++) {
                            resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i),
                                    FRAGMENT_COUNT, Integer.toString(fragmentIndex)));
                        }
                    }
                } catch (final SQLException e) {
                    throw new ProcessException(e);
                }

                resultCount++;
            }

            // are there anymore result sets?
            try {
                hasResults = st.getMoreResults(Statement.CLOSE_CURRENT_RESULT);
                hasUpdateCount = st.getUpdateCount() != -1;
            } catch (SQLException ex) {
                hasResults = false;
                hasUpdateCount = false;
            }
        }

        // Transfer any remaining files to SUCCESS
        session.transfer(resultSetFlowFiles, REL_SUCCESS);
        resultSetFlowFiles.clear();

        //If we had at least one result then it's OK to drop the original file, but if we had no results then
        //  pass the original flow file down the line to trigger downstream processors
        if (fileToProcess != null) {
            if (resultCount > 0) {
                session.remove(fileToProcess);
            } else {
                fileToProcess = session.write(fileToProcess,
                        out -> sqlWriter.writeEmptyResultSet(out, getLogger()));
                fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, "0");
                fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(),
                        sqlWriter.getMimeType());
                session.transfer(fileToProcess, REL_SUCCESS);
            }
        } else if (resultCount == 0) {
            //If we had no inbound FlowFile, no exceptions, and the SQL generated no result sets (Insert/Update/Delete statements only)
            // Then generate an empty Output FlowFile
            FlowFile resultSetFF = session.create();

            resultSetFF = session.write(resultSetFF, out -> sqlWriter.writeEmptyResultSet(out, getLogger()));
            resultSetFF = session.putAttribute(resultSetFF, RESULT_ROW_COUNT, "0");
            resultSetFF = session.putAttribute(resultSetFF, CoreAttributes.MIME_TYPE.key(),
                    sqlWriter.getMimeType());
            session.transfer(resultSetFF, REL_SUCCESS);
        }
    } catch (final ProcessException | SQLException e) {
        //If we had at least one result then it's OK to drop the original file, but if we had no results then
        //  pass the original flow file down the line to trigger downstream processors
        if (fileToProcess == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, fileToProcess, e });
                fileToProcess = session.penalize(fileToProcess);
            } else {
                logger.error("Unable to execute SQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(fileToProcess, REL_FAILURE);
        }
    }
}