Example usage for org.springframework.kafka.transaction KafkaTransactionManager KafkaTransactionManager

List of usage examples for org.springframework.kafka.transaction KafkaTransactionManager KafkaTransactionManager

Introduction

In this page you can find the example usage for org.springframework.kafka.transaction KafkaTransactionManager KafkaTransactionManager.

Prototype

public KafkaTransactionManager(ProducerFactory<K, V> producerFactory) 

Source Link

Document

Create a new KafkaTransactionManager, given a ProducerFactory.

Usage

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings({ "rawtypes", "unchecked" })
private void testConsumeAndProduceTransactionGuts(boolean chained, boolean handleError) throws Exception {
    Consumer consumer = mock(Consumer.class);
    final TopicPartition topicPartition = new TopicPartition("foo", 0);
    willAnswer(i -> {//from   w  ww .  jav a2 s .c  o m
        ((ConsumerRebalanceListener) i.getArgument(1))
                .onPartitionsAssigned(Collections.singletonList(topicPartition));
        return null;
    }).given(consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class));
    ConsumerRecords records = new ConsumerRecords(Collections.singletonMap(topicPartition,
            Collections.singletonList(new ConsumerRecord<>("foo", 0, 0, "key", "value"))));
    final AtomicBoolean done = new AtomicBoolean();
    willAnswer(i -> {
        if (done.compareAndSet(false, true)) {
            return records;
        } else {
            Thread.sleep(500);
            return null;
        }
    }).given(consumer).poll(any(Duration.class));
    ConsumerFactory cf = mock(ConsumerFactory.class);
    willReturn(consumer).given(cf).createConsumer("group", "", null);
    Producer producer = mock(Producer.class);
    final CountDownLatch closeLatch = new CountDownLatch(2);
    willAnswer(i -> {
        closeLatch.countDown();
        return null;
    }).given(producer).close();
    ProducerFactory pf = mock(ProducerFactory.class);
    given(pf.transactionCapable()).willReturn(true);
    final List<String> transactionalIds = new ArrayList<>();
    willAnswer(i -> {
        transactionalIds.add(TransactionSupport.getTransactionIdSuffix());
        return producer;
    }).given(pf).createProducer();
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    PlatformTransactionManager ptm = tm;
    if (chained) {
        ptm = new ChainedKafkaTransactionManager(new SomeOtherTransactionManager(), tm);
    }
    ContainerProperties props = new ContainerProperties("foo");
    props.setGroupId("group");
    props.setTransactionManager(ptm);
    final KafkaTemplate template = new KafkaTemplate(pf);
    props.setMessageListener((MessageListener) m -> {
        template.send("bar", "baz");
        if (handleError) {
            throw new RuntimeException("fail");
        }
    });
    KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, props);
    container.setBeanName("commit");
    if (handleError) {
        container.setErrorHandler((e, data) -> {
        });
    }
    container.start();
    assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue();
    InOrder inOrder = inOrder(producer);
    inOrder.verify(producer).beginTransaction();
    inOrder.verify(producer).sendOffsetsToTransaction(
            Collections.singletonMap(topicPartition, new OffsetAndMetadata(0)), "group");
    inOrder.verify(producer).commitTransaction();
    inOrder.verify(producer).close();
    inOrder.verify(producer).beginTransaction();
    ArgumentCaptor<ProducerRecord> captor = ArgumentCaptor.forClass(ProducerRecord.class);
    inOrder.verify(producer).send(captor.capture(), any(Callback.class));
    assertThat(captor.getValue()).isEqualTo(new ProducerRecord("bar", "baz"));
    inOrder.verify(producer).sendOffsetsToTransaction(
            Collections.singletonMap(topicPartition, new OffsetAndMetadata(1)), "group");
    inOrder.verify(producer).commitTransaction();
    inOrder.verify(producer).close();
    container.stop();
    verify(pf, times(2)).createProducer();
    verifyNoMoreInteractions(producer);
    assertThat(transactionalIds.get(0)).isEqualTo("group.foo.0");
    assertThat(transactionalIds.get(0)).isEqualTo("group.foo.0");
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings({ "rawtypes", "unchecked" })
@Test/* w  w w .j  av a2s  .  c  o  m*/
public void testConsumeAndProduceTransactionRollback() throws Exception {
    Consumer consumer = mock(Consumer.class);
    final TopicPartition topicPartition0 = new TopicPartition("foo", 0);
    final TopicPartition topicPartition1 = new TopicPartition("foo", 1);
    Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
    recordMap.put(topicPartition0,
            Collections.singletonList(new ConsumerRecord<>("foo", 0, 0, "key", "value")));
    recordMap.put(topicPartition1,
            Collections.singletonList(new ConsumerRecord<>("foo", 1, 0, "key", "value")));
    ConsumerRecords records = new ConsumerRecords(recordMap);
    final AtomicBoolean done = new AtomicBoolean();
    willAnswer(i -> {
        if (done.compareAndSet(false, true)) {
            return records;
        } else {
            Thread.sleep(500);
            return null;
        }
    }).given(consumer).poll(any(Duration.class));
    final CountDownLatch seekLatch = new CountDownLatch(2);
    willAnswer(i -> {
        seekLatch.countDown();
        return null;
    }).given(consumer).seek(any(), anyLong());
    ConsumerFactory cf = mock(ConsumerFactory.class);
    willReturn(consumer).given(cf).createConsumer("group", "", null);
    Producer producer = mock(Producer.class);
    final CountDownLatch closeLatch = new CountDownLatch(1);
    willAnswer(i -> {
        closeLatch.countDown();
        return null;
    }).given(producer).close();
    ProducerFactory pf = mock(ProducerFactory.class);
    given(pf.transactionCapable()).willReturn(true);
    given(pf.createProducer()).willReturn(producer);
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    ContainerProperties props = new ContainerProperties(new TopicPartitionInitialOffset("foo", 0),
            new TopicPartitionInitialOffset("foo", 1));
    props.setGroupId("group");
    props.setTransactionManager(tm);
    final KafkaTemplate template = new KafkaTemplate(pf);
    props.setMessageListener((MessageListener) m -> {
        template.send("bar", "baz");
        throw new RuntimeException("fail");
    });
    KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, props);
    container.setBeanName("rollback");
    container.start();
    assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue();
    assertThat(seekLatch.await(10, TimeUnit.SECONDS)).isTrue();
    InOrder inOrder = inOrder(producer);
    inOrder.verify(producer).beginTransaction();
    ArgumentCaptor<ProducerRecord> captor = ArgumentCaptor.forClass(ProducerRecord.class);
    verify(producer).send(captor.capture(), any(Callback.class));
    assertThat(captor.getValue()).isEqualTo(new ProducerRecord("bar", "baz"));
    inOrder.verify(producer, never()).sendOffsetsToTransaction(anyMap(), anyString());
    inOrder.verify(producer, never()).commitTransaction();
    inOrder.verify(producer).abortTransaction();
    inOrder.verify(producer).close();
    verify(consumer).seek(topicPartition0, 0);
    verify(consumer).seek(topicPartition1, 0);
    verify(consumer, never()).commitSync(anyMap());
    container.stop();
    verify(pf, times(1)).createProducer();
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings({ "rawtypes", "unchecked" })
@Test// w  w  w . j ava2s .  c  o m
public void testConsumeAndProduceTransactionRollbackBatch() throws Exception {
    Consumer consumer = mock(Consumer.class);
    final TopicPartition topicPartition0 = new TopicPartition("foo", 0);
    final TopicPartition topicPartition1 = new TopicPartition("foo", 1);
    Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
    recordMap.put(topicPartition0,
            Collections.singletonList(new ConsumerRecord<>("foo", 0, 0, "key", "value")));
    recordMap.put(topicPartition1,
            Collections.singletonList(new ConsumerRecord<>("foo", 1, 0, "key", "value")));
    ConsumerRecords records = new ConsumerRecords(recordMap);
    final AtomicBoolean done = new AtomicBoolean();
    willAnswer(i -> {
        if (done.compareAndSet(false, true)) {
            return records;
        } else {
            Thread.sleep(500);
            return null;
        }
    }).given(consumer).poll(any(Duration.class));
    final CountDownLatch seekLatch = new CountDownLatch(2);
    willAnswer(i -> {
        seekLatch.countDown();
        return null;
    }).given(consumer).seek(any(), anyLong());
    ConsumerFactory cf = mock(ConsumerFactory.class);
    willReturn(consumer).given(cf).createConsumer("group", "", null);
    Producer producer = mock(Producer.class);
    final CountDownLatch closeLatch = new CountDownLatch(1);
    willAnswer(i -> {
        closeLatch.countDown();
        return null;
    }).given(producer).close();
    ProducerFactory pf = mock(ProducerFactory.class);
    given(pf.transactionCapable()).willReturn(true);
    given(pf.createProducer()).willReturn(producer);
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    ContainerProperties props = new ContainerProperties(new TopicPartitionInitialOffset("foo", 0),
            new TopicPartitionInitialOffset("foo", 1));
    props.setGroupId("group");
    props.setTransactionManager(tm);
    final KafkaTemplate template = new KafkaTemplate(pf);
    props.setMessageListener((BatchMessageListener) recordlist -> {
        template.send("bar", "baz");
        throw new RuntimeException("fail");
    });
    KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, props);
    container.setBeanName("rollback");
    container.start();
    assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue();
    assertThat(seekLatch.await(10, TimeUnit.SECONDS)).isTrue();
    InOrder inOrder = inOrder(producer);
    inOrder.verify(producer).beginTransaction();
    ArgumentCaptor<ProducerRecord> captor = ArgumentCaptor.forClass(ProducerRecord.class);
    verify(producer).send(captor.capture(), any(Callback.class));
    assertThat(captor.getValue()).isEqualTo(new ProducerRecord("bar", "baz"));
    inOrder.verify(producer, never()).sendOffsetsToTransaction(anyMap(), anyString());
    inOrder.verify(producer, never()).commitTransaction();
    inOrder.verify(producer).abortTransaction();
    inOrder.verify(producer).close();
    verify(consumer).seek(topicPartition0, 0);
    verify(consumer).seek(topicPartition1, 0);
    verify(consumer, never()).commitSync(anyMap());
    container.stop();
    verify(pf, times(1)).createProducer();
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings("unchecked")
@Test/*w w  w . java2 s  .  c o  m*/
public void testRollbackRecord() throws Exception {
    logger.info("Start testRollbackRecord");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTest1", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic1, topic2);
    containerProps.setGroupId("group");
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("rr.");

    final KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    final AtomicBoolean failed = new AtomicBoolean();
    final CountDownLatch latch = new CountDownLatch(3);
    final AtomicReference<String> transactionalId = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        latch.countDown();
        if (failed.compareAndSet(false, true)) {
            throw new RuntimeException("fail");
        }
        /*
         * Send a message to topic2 and wait for it so we don't stop the container too soon.
         */
        if (message.topic().equals(topic1)) {
            template.send(topic2, "bar");
            template.flush();
            transactionalId.set(KafkaTestUtils.getPropertyValue(
                    ProducerFactoryUtils.getTransactionalResourceHolder(pf).getProducer(),
                    "delegate.transactionManager.transactionalId", String.class));
        }
    });

    @SuppressWarnings({ "rawtypes" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRollbackRecord");
    container.start();

    template.setDefaultTopic(topic1);
    template.executeInTransaction(t -> {
        template.sendDefault(0, 0, "foo");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    final CountDownLatch subsLatch = new CountDownLatch(1);
    consumer.subscribe(Arrays.asList(topic1), new ConsumerRebalanceListener() {

        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            // empty
        }

        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            subsLatch.countDown();
        }

    });
    ConsumerRecords<Integer, String> records = null;
    int n = 0;
    while (subsLatch.getCount() > 0 && n++ < 600) {
        records = consumer.poll(Duration.ofMillis(100));
    }
    assertThat(subsLatch.await(1, TimeUnit.MILLISECONDS)).isTrue();
    assertThat(records.count()).isEqualTo(0);
    // depending on timing, the position might include the offset representing the commit in the log
    assertThat(consumer.position(new TopicPartition(topic1, 0))).isGreaterThanOrEqualTo(1L);
    assertThat(transactionalId.get()).startsWith("rr.group.txTopic");
    assertThat(KafkaTestUtils.getPropertyValue(pf, "consumerProducers", Map.class)).isEmpty();
    logger.info("Stop testRollbackRecord");
    pf.destroy();
    consumer.close();
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@Test
public void testMaxFailures() throws Exception {
    logger.info("Start testMaxFailures");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTestMaxFailures", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic3);
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Object, Object> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("maxAtt.");
    final KafkaTemplate<Object, Object> template = new KafkaTemplate<>(pf);
    final CountDownLatch latch = new CountDownLatch(1);
    AtomicReference<String> data = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        data.set(message.value());/*w w w. j av a2  s .  co m*/
        if (message.offset() == 0) {
            throw new RuntimeException("fail for max failures");
        }
        latch.countDown();
    });

    @SuppressWarnings({ "rawtypes", "unchecked" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testMaxFailures");
    final CountDownLatch recoverLatch = new CountDownLatch(1);
    DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template) {

        @Override
        public void accept(ConsumerRecord<?, ?> record, Exception exception) {
            super.accept(record, exception);
            recoverLatch.countDown();
        }

    };
    DefaultAfterRollbackProcessor<Integer, String> afterRollbackProcessor = spy(
            new DefaultAfterRollbackProcessor<>(recoverer, 3));
    container.setAfterRollbackProcessor(afterRollbackProcessor);
    final CountDownLatch stopLatch = new CountDownLatch(1);
    container.setApplicationEventPublisher(e -> {
        if (e instanceof ConsumerStoppedEvent) {
            stopLatch.countDown();
        }
    });
    container.start();

    template.setDefaultTopic(topic3);
    template.executeInTransaction(t -> {
        RecordHeaders headers = new RecordHeaders(
                new RecordHeader[] { new RecordHeader("baz", "qux".getBytes()) });
        ProducerRecord<Object, Object> record = new ProducerRecord<>(topic3, 0, 0, "foo", headers);
        template.send(record);
        template.sendDefault(0, 0, "bar");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(data.get()).isEqualTo("bar");
    assertThat(recoverLatch.await(10, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic3DLT);
    ConsumerRecord<Integer, String> dltRecord = KafkaTestUtils.getSingleRecord(consumer, topic3DLT);
    assertThat(dltRecord.value()).isEqualTo("foo");
    DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper();
    Map<String, Object> map = new HashMap<>();
    mapper.toHeaders(dltRecord.headers(), map);
    MessageHeaders headers = new MessageHeaders(map);
    assertThat(new String(headers.get(KafkaHeaders.DLT_EXCEPTION_FQCN, byte[].class)))
            .contains("RuntimeException");
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_MESSAGE, byte[].class))
            .isEqualTo("fail for max failures".getBytes());
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_STACKTRACE)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_OFFSET, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_PARTITION, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TOPIC, byte[].class)).isEqualTo(topic3.getBytes());
    assertThat(headers.get("baz")).isEqualTo("qux".getBytes());
    pf.destroy();
    assertThat(stopLatch.await(10, TimeUnit.SECONDS)).isTrue();
    verify(afterRollbackProcessor).clearThreadState();
    logger.info("Stop testMaxAttempts");
}