Example usage for org.springframework.kafka.listener DeadLetterPublishingRecoverer DeadLetterPublishingRecoverer

List of usage examples for org.springframework.kafka.listener DeadLetterPublishingRecoverer DeadLetterPublishingRecoverer

Introduction

In this page you can find the example usage for org.springframework.kafka.listener DeadLetterPublishingRecoverer DeadLetterPublishingRecoverer.

Prototype

public DeadLetterPublishingRecoverer(
        Map<Class<?>, KafkaTemplate<? extends Object, ? extends Object>> templates) 

Source Link

Document

Create an instance with the provided templates and a default destination resolving function that returns a TopicPartition based on the original topic (appended with ".DLT") from the failed record, and the same partition as the failed record.

Usage

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@Test
public void testMaxFailures() throws Exception {
    logger.info("Start testMaxFailures");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTestMaxFailures", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic3);
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Object, Object> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("maxAtt.");
    final KafkaTemplate<Object, Object> template = new KafkaTemplate<>(pf);
    final CountDownLatch latch = new CountDownLatch(1);
    AtomicReference<String> data = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        data.set(message.value());/*from w w  w.j  a v a 2s . c o  m*/
        if (message.offset() == 0) {
            throw new RuntimeException("fail for max failures");
        }
        latch.countDown();
    });

    @SuppressWarnings({ "rawtypes", "unchecked" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testMaxFailures");
    final CountDownLatch recoverLatch = new CountDownLatch(1);
    DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template) {

        @Override
        public void accept(ConsumerRecord<?, ?> record, Exception exception) {
            super.accept(record, exception);
            recoverLatch.countDown();
        }

    };
    DefaultAfterRollbackProcessor<Integer, String> afterRollbackProcessor = spy(
            new DefaultAfterRollbackProcessor<>(recoverer, 3));
    container.setAfterRollbackProcessor(afterRollbackProcessor);
    final CountDownLatch stopLatch = new CountDownLatch(1);
    container.setApplicationEventPublisher(e -> {
        if (e instanceof ConsumerStoppedEvent) {
            stopLatch.countDown();
        }
    });
    container.start();

    template.setDefaultTopic(topic3);
    template.executeInTransaction(t -> {
        RecordHeaders headers = new RecordHeaders(
                new RecordHeader[] { new RecordHeader("baz", "qux".getBytes()) });
        ProducerRecord<Object, Object> record = new ProducerRecord<>(topic3, 0, 0, "foo", headers);
        template.send(record);
        template.sendDefault(0, 0, "bar");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(data.get()).isEqualTo("bar");
    assertThat(recoverLatch.await(10, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic3DLT);
    ConsumerRecord<Integer, String> dltRecord = KafkaTestUtils.getSingleRecord(consumer, topic3DLT);
    assertThat(dltRecord.value()).isEqualTo("foo");
    DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper();
    Map<String, Object> map = new HashMap<>();
    mapper.toHeaders(dltRecord.headers(), map);
    MessageHeaders headers = new MessageHeaders(map);
    assertThat(new String(headers.get(KafkaHeaders.DLT_EXCEPTION_FQCN, byte[].class)))
            .contains("RuntimeException");
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_MESSAGE, byte[].class))
            .isEqualTo("fail for max failures".getBytes());
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_STACKTRACE)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_OFFSET, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_PARTITION, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TOPIC, byte[].class)).isEqualTo(topic3.getBytes());
    assertThat(headers.get("baz")).isEqualTo("qux".getBytes());
    pf.destroy();
    assertThat(stopLatch.await(10, TimeUnit.SECONDS)).isTrue();
    verify(afterRollbackProcessor).clearThreadState();
    logger.info("Stop testMaxAttempts");
}