Example usage for org.springframework.transaction.support TransactionTemplate TransactionTemplate

List of usage examples for org.springframework.transaction.support TransactionTemplate TransactionTemplate

Introduction

In this page you can find the example usage for org.springframework.transaction.support TransactionTemplate TransactionTemplate.

Prototype

public TransactionTemplate(PlatformTransactionManager transactionManager,
        TransactionDefinition transactionDefinition) 

Source Link

Document

Construct a new TransactionTemplate using the given transaction manager, taking its default settings from the given transaction definition.

Usage

From source file:alfio.manager.AdminReservationManager.java

public Result<Pair<TicketReservation, List<Ticket>>> createReservation(AdminReservationModification input,
        String eventName, String username) {
    DefaultTransactionDefinition definition = new DefaultTransactionDefinition();
    TransactionTemplate template = new TransactionTemplate(transactionManager, definition);
    return template.execute(status -> {
        try {/*from  ww  w . j  ava 2  s .  co m*/
            Result<Pair<TicketReservation, List<Ticket>>> result = eventRepository
                    .findOptionalByShortNameForUpdate(eventName).map(e -> validateTickets(input, e))
                    .map(r -> r
                            .flatMap(p -> transactionalCreateReservation(p.getRight(), p.getLeft(), username)))
                    .orElse(Result.error(ErrorCode.EventError.NOT_FOUND));
            if (!result.isSuccess()) {
                log.debug("Error during update of reservation eventName: {}, username: {}, reservation: {}",
                        eventName, username, AdminReservationModification.summary(input));
                status.setRollbackOnly();
            }
            return result;
        } catch (Exception e) {
            log.error("Error during update of reservation eventName: {}, username: {}, reservation: {}",
                    eventName, username, AdminReservationModification.summary(input));
            status.setRollbackOnly();
            return Result.error(singletonList(ErrorCode.custom(
                    e instanceof DuplicateReferenceException ? "duplicate-reference" : "", e.getMessage())));
        }
    });
}

From source file:alfio.manager.TicketReservationManager.java

public TicketReservationManager(EventRepository eventRepository, OrganizationRepository organizationRepository,
        TicketRepository ticketRepository, TicketReservationRepository ticketReservationRepository,
        TicketCategoryRepository ticketCategoryRepository,
        TicketCategoryDescriptionRepository ticketCategoryDescriptionRepository,
        ConfigurationManager configurationManager, PaymentManager paymentManager,
        PromoCodeDiscountRepository promoCodeDiscountRepository, SpecialPriceRepository specialPriceRepository,
        TransactionRepository transactionRepository, NotificationManager notificationManager,
        MessageSource messageSource, TemplateManager templateManager,
        PlatformTransactionManager transactionManager, WaitingQueueManager waitingQueueManager,
        PluginManager pluginManager, TicketFieldRepository ticketFieldRepository,
        AdditionalServiceRepository additionalServiceRepository,
        AdditionalServiceItemRepository additionalServiceItemRepository,
        AdditionalServiceTextRepository additionalServiceTextRepository,
        InvoiceSequencesRepository invoiceSequencesRepository, AuditingRepository auditingRepository,
        UserRepository userRepository, ExtensionManager extensionManager) {
    this.eventRepository = eventRepository;
    this.organizationRepository = organizationRepository;
    this.ticketRepository = ticketRepository;
    this.ticketReservationRepository = ticketReservationRepository;
    this.ticketCategoryRepository = ticketCategoryRepository;
    this.ticketCategoryDescriptionRepository = ticketCategoryDescriptionRepository;
    this.configurationManager = configurationManager;
    this.paymentManager = paymentManager;
    this.promoCodeDiscountRepository = promoCodeDiscountRepository;
    this.specialPriceRepository = specialPriceRepository;
    this.transactionRepository = transactionRepository;
    this.notificationManager = notificationManager;
    this.messageSource = messageSource;
    this.templateManager = templateManager;
    this.waitingQueueManager = waitingQueueManager;
    this.pluginManager = pluginManager;
    this.requiresNewTransactionTemplate = new TransactionTemplate(transactionManager,
            new DefaultTransactionDefinition(TransactionDefinition.PROPAGATION_REQUIRES_NEW));
    this.ticketFieldRepository = ticketFieldRepository;
    this.additionalServiceRepository = additionalServiceRepository;
    this.additionalServiceItemRepository = additionalServiceItemRepository;
    this.additionalServiceTextRepository = additionalServiceTextRepository;
    this.invoiceSequencesRepository = invoiceSequencesRepository;
    this.auditingRepository = auditingRepository;
    this.userRepository = userRepository;
    this.extensionManager = extensionManager;
}

From source file:org.cfr.capsicum.server.ServerRuntimeFactoryBean.java

@Override
public <T> T execute(final @Nonnull TransactionDefinition transactionDefinition,
        final TransactionCallback<T> action) throws TransactionException {
    TransactionTemplate transactionTemplate = new TransactionTemplate(transactionManager,
            Assert.notNull(transactionDefinition, "transactionDefinition is required"));
    return transactionTemplate.execute(action);
}

From source file:org.apache.ctakes.ytex.uima.mapper.DocumentMapperServiceImpl.java

public Integer saveDocument(final JCas jcas, final String analysisBatch, final boolean bStoreDocText,
        final boolean bStoreCAS, final boolean bInsertAnnotationContainmentLinks,
        final Set<String> setTypesToIgnore) {
    if (log.isTraceEnabled())
        log.trace("begin saveDocument");
    // communicate options to mappers using thread local variable
    final DefaultTransactionDefinition txDef = new DefaultTransactionDefinition(
            TransactionDefinition.PROPAGATION_REQUIRES_NEW);
    txDef.setIsolationLevel("orcl".equals(this.dbType) ? TransactionDefinition.ISOLATION_READ_COMMITTED
            : TransactionDefinition.ISOLATION_READ_UNCOMMITTED);
    final TransactionTemplate txTemplate = new TransactionTemplate(this.getTransactionManager(), txDef);
    final int documentId = txTemplate.execute(new TransactionCallback<Integer>() {

        @Override/*from w w w  . ja v  a  2 s . co m*/
        public Integer doInTransaction(TransactionStatus arg0) {
            Document doc = createDocument(jcas, analysisBatch, bStoreDocText, bStoreCAS);
            sessionFactory.getCurrentSession().save(doc);
            // make sure the document has been saved
            getSessionFactory().getCurrentSession().flush();
            saveAnnotationsHib(jcas, bInsertAnnotationContainmentLinks, setTypesToIgnore, doc);
            extractAndSaveDocKey(jcas, doc);
            return doc.getDocumentID();
        }
    });
    if (log.isTraceEnabled())
        log.trace("end saveDocument");
    return documentId;
}

From source file:org.sakaiproject.tool.assessment.facade.ItemHashUtil.java

/**
 * Bit of a hack to allow reuse between {@link ItemFacadeQueries} and {@link PublishedItemFacadeQueries}.
 * Arguments are rather arbitrary extension points to support what we happen to <em>know</em> are the differences
 * between item and published item processing, as well as the common utilities/service dependencies.
 *
 * @param batchSize//from   w  w  w  .ja  v a2  s.  c om
 * @param hqlQueries
 * @param concreteType
 * @param hashAndAssignCallback
 * @param hibernateTemplate
 * @return
 */
BackfillItemHashResult backfillItemHashes(int batchSize, Map<String, String> hqlQueries,
        Class<? extends ItemDataIfc> concreteType, Function<ItemDataIfc, ItemDataIfc> hashAndAssignCallback,
        HibernateTemplate hibernateTemplate) {

    final long startTime = System.currentTimeMillis();
    log.debug("Hash backfill starting for items of type [" + concreteType.getSimpleName() + "]");

    if (batchSize <= 0) {
        batchSize = 100;
    }
    final int flushSize = batchSize;

    final AtomicInteger totalItems = new AtomicInteger(0);
    final AtomicInteger totalItemsNeedingBackfill = new AtomicInteger(0);
    final AtomicInteger batchNumber = new AtomicInteger(0);
    final AtomicInteger recordsRead = new AtomicInteger(0);
    final AtomicInteger recordsUpdated = new AtomicInteger(0);
    final Map<Long, Throwable> hashingErrors = new TreeMap<>();
    final Map<Integer, Throwable> otherErrors = new TreeMap<>();
    final List<Long> batchElapsedTimes = new ArrayList<>();
    // always needed as *printable* average per-batch timing value, so just store as string. and cache at this
    // scope b/c we sometimes need to print a single calculation multiple times, e.g. in last batch and
    // at method exit
    final AtomicReference<String> currentAvgBatchElapsedTime = new AtomicReference<>("0.00");
    final AtomicBoolean areMoreItems = new AtomicBoolean(true);

    // Get the item totals up front since a) we know any questions created while the job is running will be
    // assigned hashes and thus won't need to be handled by the job and b) makes bookkeeping within the job much
    // easier
    hibernateTemplate.execute(session -> {
        session.setDefaultReadOnly(true);
        totalItems.set(countItems(hqlQueries, session));
        totalItemsNeedingBackfill.set(countItemsNeedingHashBackfill(hqlQueries, session));
        log.debug("Hash backfill required for [" + totalItemsNeedingBackfill + "] of [" + totalItems
                + "] items of type [" + concreteType.getSimpleName() + "]");
        return null;
    });

    while (areMoreItems.get()) {
        long batchStartTime = System.currentTimeMillis();
        batchNumber.getAndIncrement();
        final AtomicInteger itemsHashedInBatch = new AtomicInteger(0);
        final AtomicInteger itemsReadInBatch = new AtomicInteger(0);
        final AtomicReference<Throwable> failure = new AtomicReference<>(null);

        // Idea here is a) avoid very long running transactions and b) avoid reading all items into memory
        // and c) avoid weirdness, e.g. duplicate results, when paginating complex hibernate objects. So
        // there's a per-batch transaction, and each batch re-runs the same two item lookup querys, one to
        // get the list of IDs for the next page of items, and one to resolve those IDs to items
        try {
            new TransactionTemplate(transactionManager, requireNewTransaction()).execute(status -> {
                hibernateTemplate.execute(session -> {
                    List<ItemDataIfc> itemsInBatch = null;
                    try { // resource cleanup block
                        session.setFlushMode(FlushMode.MANUAL);
                        try { // initial read block (failures here are fatal)

                            // set up the actual result set for this batch of items. use error count to skip over failed items
                            final List<Long> itemIds = itemIdsNeedingHashBackfill(hqlQueries, flushSize,
                                    hashingErrors.size(), session);
                            itemsInBatch = itemsById(itemIds, hqlQueries, session);

                        } catch (RuntimeException e) {
                            // Panic on failure to read counts and/or the actual items in the batch.
                            // Otherwise would potentially loop indefinitely since this design has no way way to
                            // skip this page of results.
                            log.error("Failed to read batch of hashable items. Giving up at record ["
                                    + recordsRead + "] of [" + totalItemsNeedingBackfill + "] Type: ["
                                    + concreteType.getSimpleName() + "]", e);
                            areMoreItems.set(false); // force overall loop to exit
                            throw e; // force txn to give up
                        }

                        for (ItemDataIfc item : itemsInBatch) {
                            recordsRead.getAndIncrement();
                            itemsReadInBatch.getAndIncrement();

                            // Assign the item's hash/es
                            try {
                                log.debug("Backfilling hash for item [" + recordsRead + "] of ["
                                        + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName()
                                        + "] ID: [" + item.getItemId() + "]");
                                hashAndAssignCallback.apply(item);
                                itemsHashedInBatch.getAndIncrement();
                            } catch (Throwable t) {
                                // Failures considered ignorable here... probably some unexpected item state
                                // that prevented hash calculation.
                                //
                                // Re the log statement... yes, the caller probably logs exceptions, but likely
                                // without stack traces, and we'd like to advertise failures as quickly as possible,
                                // so we go ahead and emit an error log here.
                                log.error("Item hash calculation failed for item [" + recordsRead + "] of ["
                                        + totalItemsNeedingBackfill + "] Type: [" + concreteType.getSimpleName()
                                        + "] ID: [" + (item == null ? "?" : item.getItemId()) + "]", t);
                                hashingErrors.put(item.getItemId(), t);
                            }

                        }
                        if (itemsHashedInBatch.get() > 0) {
                            session.flush();
                            recordsUpdated.getAndAdd(itemsHashedInBatch.get());
                        }
                        areMoreItems.set(itemsInBatch.size() >= flushSize);

                    } finally {
                        quietlyClear(session); // potentially very large, so clear aggressively
                    }
                    return null;
                }); // end session
                return null;
            }); // end transaction
        } catch (Throwable t) {
            // We're still in the loop over all batches, but something caused the current batch (and its
            // transaction) to exit abnormally. Logging of both success and failure cases is quite detailed,
            // and needs the same timing calcs, so is consolidated into the  'finally' block below.
            failure.set(t);
            otherErrors.put(batchNumber.get(), t);
        } finally {
            // Detailed batch-level reporting
            final long batchElapsed = (System.currentTimeMillis() - batchStartTime);
            batchElapsedTimes.add(batchElapsed);
            currentAvgBatchElapsedTime.set(new DecimalFormat("#.00")
                    .format(batchElapsedTimes.stream().collect(Collectors.averagingLong(l -> l))));
            if (failure.get() == null) {
                log.debug("Item hash backfill batch flushed to database. Type: [" + concreteType.getSimpleName()
                        + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch
                        + "] Items succeeded in batch: [" + itemsHashedInBatch + "] Total items attempted: ["
                        + recordsRead + "] Total items succeeded: [" + recordsUpdated
                        + "] Total attemptable items: [" + totalItemsNeedingBackfill + "] Elapsed batch time: ["
                        + batchElapsed + "ms] Avg time/batch: [" + currentAvgBatchElapsedTime + "ms]");
            } else {
                // yes, caller probably logs exceptions later, but probably without stack traces, and we'd
                // like to advertise failures as quickly as possible, so we go ahead and emit an error log
                // here.
                log.error("Item hash backfill failed. Type: [" + concreteType.getSimpleName()
                        + "] Batch number: [" + batchNumber + "] Items attempted in batch: [" + itemsReadInBatch
                        + "] Items flushable (but failed) in batch: [" + itemsHashedInBatch
                        + "] Total items attempted: [" + recordsRead + "] Total items succeeded: ["
                        + recordsUpdated + "] Total attemptable items: [" + totalItemsNeedingBackfill
                        + "] Elapsed batch time: [" + batchElapsed + "ms] Avg time/batch: ["
                        + currentAvgBatchElapsedTime + "ms]", failure.get());
            }
        }
    } // end loop over all batches

    final long elapsedTime = System.currentTimeMillis() - startTime;
    log.debug("Hash backfill completed for items of type [" + concreteType.getSimpleName()
            + "]. Total items attempted: [" + recordsRead + "] Total items succeeded: [" + recordsUpdated
            + "] Target attemptable items: [" + totalItemsNeedingBackfill + "] Total elapsed time: ["
            + elapsedTime + "ms] Total batches: [" + batchNumber + "] Avg time/batch: ["
            + currentAvgBatchElapsedTime + "ms]");

    return new BackfillItemHashResult(elapsedTime, totalItems.get(), totalItemsNeedingBackfill.get(),
            recordsRead.get(), recordsUpdated.get(), flushSize, hashingErrors, otherErrors);
}

From source file:org.springframework.amqp.rabbit.listener.SimpleMessageListenerContainer.java

private boolean receiveAndExecute(final BlockingQueueConsumer consumer) throws Throwable {

    if (this.transactionManager != null) {
        try {//from  www . j  ava2  s  .c om
            return new TransactionTemplate(this.transactionManager, this.transactionAttribute)
                    .execute(new TransactionCallback<Boolean>() {
                        @Override
                        public Boolean doInTransaction(TransactionStatus status) {
                            ConnectionFactoryUtils.bindResourceToTransaction(
                                    new RabbitResourceHolder(consumer.getChannel(), false),
                                    getConnectionFactory(), true);
                            try {
                                return doReceiveAndExecute(consumer);
                            } catch (RuntimeException e) {
                                throw e;
                            } catch (Throwable e) { //NOSONAR
                                // ok to catch Throwable here because we re-throw it below
                                throw new WrappedTransactionException(e);
                            }
                        }
                    });
        } catch (WrappedTransactionException e) {
            throw e.getCause();
        }
    }

    return doReceiveAndExecute(consumer);

}

From source file:org.springframework.batch.core.step.tasklet.TaskletStep.java

/**
 * Process the step and update its context so that progress can be monitored
 * by the caller. The step is broken down into chunks, each one executing in
 * a transaction. The step and its execution and execution context are all
 * given an up to date {@link BatchStatus}, and the {@link JobRepository} is
 * used to store the result. Various reporting information are also added to
 * the current context governing the step execution, which would normally be
 * available to the caller through the step's {@link ExecutionContext}.<br>
 *
 * @throws JobInterruptedException if the step or a chunk is interrupted
 * @throws RuntimeException if there is an exception during a chunk
 * execution/*from   w w  w  . j  av a  2 s  .co m*/
 *
 */
@Override
protected void doExecute(StepExecution stepExecution) throws Exception {
    stepExecution.getExecutionContext().put(TASKLET_TYPE_KEY, tasklet.getClass().getName());
    stepExecution.getExecutionContext().put(STEP_TYPE_KEY, this.getClass().getName());

    stream.update(stepExecution.getExecutionContext());
    getJobRepository().updateExecutionContext(stepExecution);

    // Shared semaphore per step execution, so other step executions can run
    // in parallel without needing the lock
    final Semaphore semaphore = createSemaphore();

    stepOperations.iterate(new StepContextRepeatCallback(stepExecution) {

        @Override
        public RepeatStatus doInChunkContext(RepeatContext repeatContext, ChunkContext chunkContext)
                throws Exception {

            StepExecution stepExecution = chunkContext.getStepContext().getStepExecution();

            // Before starting a new transaction, check for
            // interruption.
            interruptionPolicy.checkInterrupted(stepExecution);

            RepeatStatus result;
            try {
                result = new TransactionTemplate(transactionManager, transactionAttribute)
                        .execute(new ChunkTransactionCallback(chunkContext, semaphore));
            } catch (UncheckedTransactionException e) {
                // Allow checked exceptions to be thrown inside callback
                throw (Exception) e.getCause();
            }

            chunkListener.afterChunk(chunkContext);

            // Check for interruption after transaction as well, so that
            // the interrupted exception is correctly propagated up to
            // caller
            interruptionPolicy.checkInterrupted(stepExecution);

            return result;
        }

    });

}

From source file:org.springframework.test.context.jdbc.DatabaseInitializerTestExecutionListener.java

/**
 * Execute the SQL scripts configured via the supplied
 * {@link DatabaseInitializer @DatabaseInitializer} for the given
 * {@link ExecutionPhase} and {@link TestContext}.
 *
 * <p>Special care must be taken in order to properly support the
 * {@link DatabaseInitializer#requireNewTransaction requireNewTransaction}
 * flag./*from   www. j  a v  a  2s  .co m*/
 *
 * @param databaseInitializer the {@code @DatabaseInitializer} to parse
 * @param executionPhase the current execution phase
 * @param testContext the current {@code TestContext}
 * @param classLevel {@code true} if {@link DatabaseInitializer @DatabaseInitializer}
 * was declared at the class level
 */
@SuppressWarnings("serial")
private void executeDatabaseInitializer(DatabaseInitializer databaseInitializer, ExecutionPhase executionPhase,
        TestContext testContext, boolean classLevel) throws Exception {
    if (logger.isDebugEnabled()) {
        logger.debug(String.format("Processing %s for execution phase [%s] and test context %s.",
                databaseInitializer, executionPhase, testContext));
    }

    if (executionPhase != databaseInitializer.executionPhase()) {
        return;
    }

    final ResourceDatabasePopulator populator = new ResourceDatabasePopulator();
    populator.setSqlScriptEncoding(databaseInitializer.encoding());
    populator.setSeparator(databaseInitializer.separator());
    populator.setCommentPrefix(databaseInitializer.commentPrefix());
    populator.setBlockCommentStartDelimiter(databaseInitializer.blockCommentStartDelimiter());
    populator.setBlockCommentEndDelimiter(databaseInitializer.blockCommentEndDelimiter());
    populator.setContinueOnError(databaseInitializer.continueOnError());
    populator.setIgnoreFailedDrops(databaseInitializer.ignoreFailedDrops());

    String[] scripts = getScripts(databaseInitializer, testContext, classLevel);
    scripts = TestContextResourceUtils.convertToClasspathResourcePaths(testContext.getTestClass(), scripts);
    populator.setScripts(
            TestContextResourceUtils.convertToResources(testContext.getApplicationContext(), scripts));
    if (logger.isDebugEnabled()) {
        logger.debug("Executing SQL scripts: " + ObjectUtils.nullSafeToString(scripts));
    }

    final DataSource dataSource = TestContextTransactionUtils.retrieveDataSource(testContext,
            databaseInitializer.dataSource());
    final PlatformTransactionManager transactionManager = TestContextTransactionUtils
            .retrieveTransactionManager(testContext, databaseInitializer.transactionManager());

    int propagation = databaseInitializer.requireNewTransaction()
            ? TransactionDefinition.PROPAGATION_REQUIRES_NEW
            : TransactionDefinition.PROPAGATION_REQUIRED;

    TransactionAttribute transactionAttribute = TestContextTransactionUtils
            .createDelegatingTransactionAttribute(testContext, new DefaultTransactionAttribute(propagation));

    new TransactionTemplate(transactionManager, transactionAttribute)
            .execute(new TransactionCallbackWithoutResult() {

                @Override
                public void doInTransactionWithoutResult(TransactionStatus status) {
                    populator.execute(dataSource);
                };
            });
}

From source file:org.springframework.test.context.jdbc.SqlScriptsTestExecutionListener.java

/**
 * Execute the SQL scripts configured via the supplied {@link Sql @Sql}
 * annotation for the given {@link ExecutionPhase} and {@link TestContext}.
 * <p>Special care must be taken in order to properly support the configured
 * {@link SqlConfig#transactionMode}./*  w  w w.  j  a v a2  s  .  c  o m*/
 * @param sql the {@code @Sql} annotation to parse
 * @param executionPhase the current execution phase
 * @param testContext the current {@code TestContext}
 * @param classLevel {@code true} if {@link Sql @Sql} was declared at the class level
 */
private void executeSqlScripts(Sql sql, ExecutionPhase executionPhase, TestContext testContext,
        boolean classLevel) throws Exception {

    if (executionPhase != sql.executionPhase()) {
        return;
    }

    MergedSqlConfig mergedSqlConfig = new MergedSqlConfig(sql.config(), testContext.getTestClass());
    if (logger.isDebugEnabled()) {
        logger.debug(String.format("Processing %s for execution phase [%s] and test context %s.",
                mergedSqlConfig, executionPhase, testContext));
    }

    final ResourceDatabasePopulator populator = new ResourceDatabasePopulator();
    populator.setSqlScriptEncoding(mergedSqlConfig.getEncoding());
    populator.setSeparator(mergedSqlConfig.getSeparator());
    populator.setCommentPrefix(mergedSqlConfig.getCommentPrefix());
    populator.setBlockCommentStartDelimiter(mergedSqlConfig.getBlockCommentStartDelimiter());
    populator.setBlockCommentEndDelimiter(mergedSqlConfig.getBlockCommentEndDelimiter());
    populator.setContinueOnError(mergedSqlConfig.getErrorMode() == ErrorMode.CONTINUE_ON_ERROR);
    populator.setIgnoreFailedDrops(mergedSqlConfig.getErrorMode() == ErrorMode.IGNORE_FAILED_DROPS);

    String[] scripts = getScripts(sql, testContext, classLevel);
    scripts = TestContextResourceUtils.convertToClasspathResourcePaths(testContext.getTestClass(), scripts);
    List<Resource> scriptResources = TestContextResourceUtils
            .convertToResourceList(testContext.getApplicationContext(), scripts);
    for (String stmt : sql.statements()) {
        if (StringUtils.hasText(stmt)) {
            stmt = stmt.trim();
            scriptResources.add(new ByteArrayResource(stmt.getBytes(), "from inlined SQL statement: " + stmt));
        }
    }
    populator.setScripts(scriptResources.toArray(new Resource[scriptResources.size()]));
    if (logger.isDebugEnabled()) {
        logger.debug("Executing SQL scripts: " + ObjectUtils.nullSafeToString(scriptResources));
    }

    String dsName = mergedSqlConfig.getDataSource();
    String tmName = mergedSqlConfig.getTransactionManager();
    DataSource dataSource = TestContextTransactionUtils.retrieveDataSource(testContext, dsName);
    PlatformTransactionManager txMgr = TestContextTransactionUtils.retrieveTransactionManager(testContext,
            tmName);
    boolean newTxRequired = (mergedSqlConfig.getTransactionMode() == TransactionMode.ISOLATED);

    if (txMgr == null) {
        Assert.state(!newTxRequired,
                () -> String.format(
                        "Failed to execute SQL scripts for test context %s: "
                                + "cannot execute SQL scripts using Transaction Mode "
                                + "[%s] without a PlatformTransactionManager.",
                        testContext, TransactionMode.ISOLATED));
        Assert.state(dataSource != null,
                () -> String.format("Failed to execute SQL scripts for test context %s: "
                        + "supply at least a DataSource or PlatformTransactionManager.", testContext));
        // Execute scripts directly against the DataSource
        populator.execute(dataSource);
    } else {
        DataSource dataSourceFromTxMgr = getDataSourceFromTransactionManager(txMgr);
        // Ensure user configured an appropriate DataSource/TransactionManager pair.
        if (dataSource != null && dataSourceFromTxMgr != null && !dataSource.equals(dataSourceFromTxMgr)) {
            throw new IllegalStateException(String.format(
                    "Failed to execute SQL scripts for test context %s: "
                            + "the configured DataSource [%s] (named '%s') is not the one associated with "
                            + "transaction manager [%s] (named '%s').",
                    testContext, dataSource.getClass().getName(), dsName, txMgr.getClass().getName(), tmName));
        }
        if (dataSource == null) {
            dataSource = dataSourceFromTxMgr;
            Assert.state(dataSource != null, () -> String.format("Failed to execute SQL scripts for "
                    + "test context %s: could not obtain DataSource from transaction manager [%s] (named '%s').",
                    testContext, txMgr.getClass().getName(), tmName));
        }
        final DataSource finalDataSource = dataSource;
        int propagation = (newTxRequired ? TransactionDefinition.PROPAGATION_REQUIRES_NEW
                : TransactionDefinition.PROPAGATION_REQUIRED);
        TransactionAttribute txAttr = TestContextTransactionUtils.createDelegatingTransactionAttribute(
                testContext, new DefaultTransactionAttribute(propagation));
        new TransactionTemplate(txMgr, txAttr).execute(status -> {
            populator.execute(finalDataSource);
            return null;
        });
    }
}