List of usage examples for org.springframework.kafka.core DefaultKafkaConsumerFactory createConsumer
default Consumer<K, V> createConsumer()
From source file:org.springframework.kafka.listener.ConcurrentMessageListenerContainerTests.java
@Test public void testAckOnErrorRecord() throws Exception { logger.info("Start ack on error"); Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); final CountDownLatch latch = new CountDownLatch(4); ContainerProperties containerProps = new ContainerProperties(topic9); containerProps.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("auto ack on error: " + message); latch.countDown();//from w w w.ja v a2s . c o m if (message.value().startsWith("b")) { throw new RuntimeException(); } }); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.RECORD); containerProps.setAckOnError(false); ConcurrentMessageListenerContainer<Integer, String> container = new ConcurrentMessageListenerContainer<>(cf, containerProps); container.setConcurrency(2); container.setBeanName("testAckOnError"); container.start(); ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic()); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic9); template.sendDefault(0, 0, "foo"); template.sendDefault(1, 0, "bar"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "qux"); template.flush(); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); container.stop(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic9, 0), new TopicPartition(topic9, 1))); // this consumer is positioned at 1, the next offset after the successfully // processed 'foo' // it has not been updated because 'bar' failed assertThat(consumer.position(new TopicPartition(topic9, 0))).isEqualTo(1); // this consumer is positioned at 1, the next offset after the successfully // processed 'qux' // it has been updated even 'baz' failed assertThat(consumer.position(new TopicPartition(topic9, 1))).isEqualTo(2); consumer.close(); logger.info("Stop ack on error"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testRecordAck() throws Exception { logger.info("Start record ack"); Map<String, Object> props = KafkaTestUtils.consumerProps("test6", "false", embeddedKafka); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic6); containerProps.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("record ack: " + message); });//from w w w . java 2 s .c o m containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.RECORD); containerProps.setAckOnError(false); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testRecordAcks"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch latch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { latch.countDown(); } } } }).given(containerConsumer).commitSync(any()); ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic()); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic6); template.sendDefault(0, 0, "foo"); template.sendDefault(1, 0, "bar"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "qux"); template.flush(); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic6, 0), new TopicPartition(topic6, 1))); assertThat(consumer.position(new TopicPartition(topic6, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic6, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop record ack"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testBatchAck() throws Exception { logger.info("Start batch ack"); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic7);/*from w ww. j a va 2s.c o m*/ template.sendDefault(0, 0, "foo"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "bar"); template.sendDefault(1, 0, "qux"); template.flush(); Map<String, Object> props = KafkaTestUtils.consumerProps("test6", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic7); containerProps.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("batch ack: " + message); }); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.BATCH); containerProps.setPollTimeout(10000); containerProps.setAckOnError(false); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchAcks"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch firstBatchLatch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { firstBatchLatch.countDown(); } } try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { latch.countDown(); } } } }).given(containerConsumer).commitSync(any()); assertThat(firstBatchLatch.await(9, TimeUnit.SECONDS)).isTrue(); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic7, 0), new TopicPartition(topic7, 1))); assertThat(consumer.position(new TopicPartition(topic7, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic7, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop batch ack"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testBatchListener() throws Exception { logger.info("Start batch listener"); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic8);// w w w .j a v a 2s . co m template.sendDefault(0, 0, "foo"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "bar"); template.sendDefault(1, 0, "qux"); template.flush(); Map<String, Object> props = KafkaTestUtils.consumerProps("test8", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic8); containerProps.setMessageListener((BatchMessageListener<Integer, String>) messages -> { logger.info("batch listener: " + messages); }); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.BATCH); containerProps.setPollTimeout(10000); containerProps.setAckOnError(false); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchListener"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch firstBatchLatch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { firstBatchLatch.countDown(); } } try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { latch.countDown(); } } } }).given(containerConsumer).commitSync(any()); assertThat(firstBatchLatch.await(9, TimeUnit.SECONDS)).isTrue(); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic8, 0), new TopicPartition(topic8, 1))); assertThat(consumer.position(new TopicPartition(topic8, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic8, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop batch listener"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testBatchListenerManual() throws Exception { logger.info("Start batch listener manual"); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic9);//w w w .ja va2 s . c o m template.sendDefault(0, 0, "foo"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "bar"); template.sendDefault(1, 0, "qux"); template.flush(); Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic9); final CountDownLatch latch = new CountDownLatch(4); containerProps.setMessageListener((BatchAcknowledgingMessageListener<Integer, String>) (messages, ack) -> { logger.info("batch listener manual: " + messages); for (int i = 0; i < messages.size(); i++) { latch.countDown(); } ack.acknowledge(); }); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.MANUAL_IMMEDIATE); containerProps.setPollTimeout(10000); containerProps.setAckOnError(false); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchListenerManual"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch commitLatch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { commitLatch.countDown(); } } } }).given(containerConsumer).commitSync(any()); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic9, 0), new TopicPartition(topic9, 1))); assertThat(consumer.position(new TopicPartition(topic9, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic9, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop batch listener manual"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testBatchListenerErrors() throws Exception { logger.info("Start batch listener errors"); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic10);// w w w . j a v a 2s .c om template.sendDefault(0, 0, "foo"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "bar"); template.sendDefault(1, 0, "qux"); template.flush(); Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic10); containerProps.setMessageListener((BatchMessageListener<Integer, String>) messages -> { logger.info("batch listener errors: " + messages); throw new RuntimeException("intentional"); }); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.BATCH); containerProps.setPollTimeout(10000); containerProps.setAckOnError(true); final CountDownLatch latch = new CountDownLatch(4); containerProps.setGenericErrorHandler((BatchErrorHandler) (t, messages) -> { new BatchLoggingErrorHandler().handle(t, messages); for (int i = 0; i < messages.count(); i++) { latch.countDown(); } }); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchListenerErrors"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch commitLatch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { commitLatch.countDown(); } } } }).given(containerConsumer).commitSync(any()); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic10, 0), new TopicPartition(topic10, 1))); assertThat(consumer.position(new TopicPartition(topic10, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic10, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop batch listener errors"); }
From source file:org.springframework.kafka.listener.TransactionalContainerTests.java
@SuppressWarnings("unchecked") @Test//from w w w.j av a2s. c o m public void testRollbackRecord() throws Exception { logger.info("Start testRollbackRecord"); Map<String, Object> props = KafkaTestUtils.consumerProps("txTest1", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "group"); props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic1, topic2); containerProps.setGroupId("group"); containerProps.setPollTimeout(10_000); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); senderProps.put(ProducerConfig.RETRIES_CONFIG, 1); DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); pf.setTransactionIdPrefix("rr."); final KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); final AtomicBoolean failed = new AtomicBoolean(); final CountDownLatch latch = new CountDownLatch(3); final AtomicReference<String> transactionalId = new AtomicReference<>(); containerProps.setMessageListener((MessageListener<Integer, String>) message -> { latch.countDown(); if (failed.compareAndSet(false, true)) { throw new RuntimeException("fail"); } /* * Send a message to topic2 and wait for it so we don't stop the container too soon. */ if (message.topic().equals(topic1)) { template.send(topic2, "bar"); template.flush(); transactionalId.set(KafkaTestUtils.getPropertyValue( ProducerFactoryUtils.getTransactionalResourceHolder(pf).getProducer(), "delegate.transactionManager.transactionalId", String.class)); } }); @SuppressWarnings({ "rawtypes" }) KafkaTransactionManager tm = new KafkaTransactionManager(pf); containerProps.setTransactionManager(tm); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testRollbackRecord"); container.start(); template.setDefaultTopic(topic1); template.executeInTransaction(t -> { template.sendDefault(0, 0, "foo"); return null; }); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); container.stop(); Consumer<Integer, String> consumer = cf.createConsumer(); final CountDownLatch subsLatch = new CountDownLatch(1); consumer.subscribe(Arrays.asList(topic1), new ConsumerRebalanceListener() { @Override public void onPartitionsRevoked(Collection<TopicPartition> partitions) { // empty } @Override public void onPartitionsAssigned(Collection<TopicPartition> partitions) { subsLatch.countDown(); } }); ConsumerRecords<Integer, String> records = null; int n = 0; while (subsLatch.getCount() > 0 && n++ < 600) { records = consumer.poll(Duration.ofMillis(100)); } assertThat(subsLatch.await(1, TimeUnit.MILLISECONDS)).isTrue(); assertThat(records.count()).isEqualTo(0); // depending on timing, the position might include the offset representing the commit in the log assertThat(consumer.position(new TopicPartition(topic1, 0))).isGreaterThanOrEqualTo(1L); assertThat(transactionalId.get()).startsWith("rr.group.txTopic"); assertThat(KafkaTestUtils.getPropertyValue(pf, "consumerProducers", Map.class)).isEmpty(); logger.info("Stop testRollbackRecord"); pf.destroy(); consumer.close(); }
From source file:org.springframework.kafka.listener.TransactionalContainerTests.java
@Test public void testMaxFailures() throws Exception { logger.info("Start testMaxFailures"); Map<String, Object> props = KafkaTestUtils.consumerProps("txTestMaxFailures", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "group"); props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic3); containerProps.setPollTimeout(10_000); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); senderProps.put(ProducerConfig.RETRIES_CONFIG, 1); DefaultKafkaProducerFactory<Object, Object> pf = new DefaultKafkaProducerFactory<>(senderProps); pf.setTransactionIdPrefix("maxAtt."); final KafkaTemplate<Object, Object> template = new KafkaTemplate<>(pf); final CountDownLatch latch = new CountDownLatch(1); AtomicReference<String> data = new AtomicReference<>(); containerProps.setMessageListener((MessageListener<Integer, String>) message -> { data.set(message.value());//from ww w . ja v a 2s .c o m if (message.offset() == 0) { throw new RuntimeException("fail for max failures"); } latch.countDown(); }); @SuppressWarnings({ "rawtypes", "unchecked" }) KafkaTransactionManager tm = new KafkaTransactionManager(pf); containerProps.setTransactionManager(tm); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testMaxFailures"); final CountDownLatch recoverLatch = new CountDownLatch(1); DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template) { @Override public void accept(ConsumerRecord<?, ?> record, Exception exception) { super.accept(record, exception); recoverLatch.countDown(); } }; DefaultAfterRollbackProcessor<Integer, String> afterRollbackProcessor = spy( new DefaultAfterRollbackProcessor<>(recoverer, 3)); container.setAfterRollbackProcessor(afterRollbackProcessor); final CountDownLatch stopLatch = new CountDownLatch(1); container.setApplicationEventPublisher(e -> { if (e instanceof ConsumerStoppedEvent) { stopLatch.countDown(); } }); container.start(); template.setDefaultTopic(topic3); template.executeInTransaction(t -> { RecordHeaders headers = new RecordHeaders( new RecordHeader[] { new RecordHeader("baz", "qux".getBytes()) }); ProducerRecord<Object, Object> record = new ProducerRecord<>(topic3, 0, 0, "foo", headers); template.send(record); template.sendDefault(0, 0, "bar"); return null; }); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(data.get()).isEqualTo("bar"); assertThat(recoverLatch.await(10, TimeUnit.SECONDS)).isTrue(); container.stop(); Consumer<Integer, String> consumer = cf.createConsumer(); embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic3DLT); ConsumerRecord<Integer, String> dltRecord = KafkaTestUtils.getSingleRecord(consumer, topic3DLT); assertThat(dltRecord.value()).isEqualTo("foo"); DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper(); Map<String, Object> map = new HashMap<>(); mapper.toHeaders(dltRecord.headers(), map); MessageHeaders headers = new MessageHeaders(map); assertThat(new String(headers.get(KafkaHeaders.DLT_EXCEPTION_FQCN, byte[].class))) .contains("RuntimeException"); assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_MESSAGE, byte[].class)) .isEqualTo("fail for max failures".getBytes()); assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_STACKTRACE)).isNotNull(); assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_OFFSET, byte[].class)[3]).isEqualTo((byte) 0); assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_PARTITION, byte[].class)[3]).isEqualTo((byte) 0); assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP, byte[].class)).isNotNull(); assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE, byte[].class)).isNotNull(); assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TOPIC, byte[].class)).isEqualTo(topic3.getBytes()); assertThat(headers.get("baz")).isEqualTo("qux".getBytes()); pf.destroy(); assertThat(stopLatch.await(10, TimeUnit.SECONDS)).isTrue(); verify(afterRollbackProcessor).clearThreadState(); logger.info("Stop testMaxAttempts"); }