List of usage examples for org.springframework.kafka.core KafkaTemplate KafkaTemplate
public KafkaTemplate(ProducerFactory<K, V> producerFactory)
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testRecordAck() throws Exception { logger.info("Start record ack"); Map<String, Object> props = KafkaTestUtils.consumerProps("test6", "false", embeddedKafka); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic6); containerProps.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("record ack: " + message); });//from www .ja v a2 s. c o m containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.RECORD); containerProps.setAckOnError(false); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testRecordAcks"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch latch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { latch.countDown(); } } } }).given(containerConsumer).commitSync(any()); ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic()); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic6); template.sendDefault(0, 0, "foo"); template.sendDefault(1, 0, "bar"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "qux"); template.flush(); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic6, 0), new TopicPartition(topic6, 1))); assertThat(consumer.position(new TopicPartition(topic6, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic6, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop record ack"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testBatchAck() throws Exception { logger.info("Start batch ack"); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic7);/* www . j a va2 s. c om*/ template.sendDefault(0, 0, "foo"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "bar"); template.sendDefault(1, 0, "qux"); template.flush(); Map<String, Object> props = KafkaTestUtils.consumerProps("test6", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic7); containerProps.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("batch ack: " + message); }); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.BATCH); containerProps.setPollTimeout(10000); containerProps.setAckOnError(false); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchAcks"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch firstBatchLatch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { firstBatchLatch.countDown(); } } try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { latch.countDown(); } } } }).given(containerConsumer).commitSync(any()); assertThat(firstBatchLatch.await(9, TimeUnit.SECONDS)).isTrue(); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic7, 0), new TopicPartition(topic7, 1))); assertThat(consumer.position(new TopicPartition(topic7, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic7, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop batch ack"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testBatchListener() throws Exception { logger.info("Start batch listener"); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic8);/*www. ja va 2s . c o m*/ template.sendDefault(0, 0, "foo"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "bar"); template.sendDefault(1, 0, "qux"); template.flush(); Map<String, Object> props = KafkaTestUtils.consumerProps("test8", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic8); containerProps.setMessageListener((BatchMessageListener<Integer, String>) messages -> { logger.info("batch listener: " + messages); }); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.BATCH); containerProps.setPollTimeout(10000); containerProps.setAckOnError(false); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchListener"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch firstBatchLatch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { firstBatchLatch.countDown(); } } try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { latch.countDown(); } } } }).given(containerConsumer).commitSync(any()); assertThat(firstBatchLatch.await(9, TimeUnit.SECONDS)).isTrue(); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic8, 0), new TopicPartition(topic8, 1))); assertThat(consumer.position(new TopicPartition(topic8, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic8, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop batch listener"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testBatchListenerManual() throws Exception { logger.info("Start batch listener manual"); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic9);//from w w w.j a va 2 s . c om template.sendDefault(0, 0, "foo"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "bar"); template.sendDefault(1, 0, "qux"); template.flush(); Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic9); final CountDownLatch latch = new CountDownLatch(4); containerProps.setMessageListener((BatchAcknowledgingMessageListener<Integer, String>) (messages, ack) -> { logger.info("batch listener manual: " + messages); for (int i = 0; i < messages.size(); i++) { latch.countDown(); } ack.acknowledge(); }); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.MANUAL_IMMEDIATE); containerProps.setPollTimeout(10000); containerProps.setAckOnError(false); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchListenerManual"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch commitLatch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { commitLatch.countDown(); } } } }).given(containerConsumer).commitSync(any()); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic9, 0), new TopicPartition(topic9, 1))); assertThat(consumer.position(new TopicPartition(topic9, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic9, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop batch listener manual"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testBatchListenerErrors() throws Exception { logger.info("Start batch listener errors"); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic10);/*from ww w. j a va2 s . c o m*/ template.sendDefault(0, 0, "foo"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "bar"); template.sendDefault(1, 0, "qux"); template.flush(); Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic10); containerProps.setMessageListener((BatchMessageListener<Integer, String>) messages -> { logger.info("batch listener errors: " + messages); throw new RuntimeException("intentional"); }); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.BATCH); containerProps.setPollTimeout(10000); containerProps.setAckOnError(true); final CountDownLatch latch = new CountDownLatch(4); containerProps.setGenericErrorHandler((BatchErrorHandler) (t, messages) -> { new BatchLoggingErrorHandler().handle(t, messages); for (int i = 0; i < messages.count(); i++) { latch.countDown(); } }); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testBatchListenerErrors"); container.start(); Consumer<?, ?> containerConsumer = spyOnConsumer(container); final CountDownLatch commitLatch = new CountDownLatch(2); willAnswer(invocation -> { @SuppressWarnings({ "unchecked" }) Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation .getArguments()[0]; try { return invocation.callRealMethod(); } finally { for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) { if (entry.getValue().offset() == 2) { commitLatch.countDown(); } } } }).given(containerConsumer).commitSync(any()); assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue(); Consumer<Integer, String> consumer = cf.createConsumer(); consumer.assign(Arrays.asList(new TopicPartition(topic10, 0), new TopicPartition(topic10, 1))); assertThat(consumer.position(new TopicPartition(topic10, 0))).isEqualTo(2); assertThat(consumer.position(new TopicPartition(topic10, 1))).isEqualTo(2); container.stop(); consumer.close(); logger.info("Stop batch listener errors"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
private void testSeekGuts(Map<String, Object> props, String topic) throws Exception { logger.info("Start seek " + topic); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props); ContainerProperties containerProps = new ContainerProperties(topic11); final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(6)); final AtomicBoolean seekInitial = new AtomicBoolean(); final CountDownLatch idleLatch = new CountDownLatch(1); class Listener implements MessageListener<Integer, String>, ConsumerSeekAware { private ConsumerSeekCallback callback; private Thread registerThread; private Thread messageThread; @Override//w w w . j a v a2 s . c o m public void onMessage(ConsumerRecord<Integer, String> data) { messageThread = Thread.currentThread(); latch.get().countDown(); if (latch.get().getCount() == 2 && !seekInitial.get()) { callback.seek(topic11, 0, 1); callback.seek(topic11, 1, 1); } } @Override public void registerSeekCallback(ConsumerSeekCallback callback) { this.callback = callback; this.registerThread = Thread.currentThread(); } @Override public void onPartitionsAssigned(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) { if (seekInitial.get()) { for (Entry<TopicPartition, Long> assignment : assignments.entrySet()) { callback.seek(assignment.getKey().topic(), assignment.getKey().partition(), assignment.getValue() - 1); } } } @Override public void onIdleContainer(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) { for (Entry<TopicPartition, Long> assignment : assignments.entrySet()) { callback.seek(assignment.getKey().topic(), assignment.getKey().partition(), assignment.getValue() - 1); } idleLatch.countDown(); } } Listener messageListener = new Listener(); containerProps.setMessageListener(messageListener); containerProps.setSyncCommits(true); containerProps.setAckMode(AckMode.RECORD); containerProps.setAckOnError(false); containerProps.setIdleEventInterval(60000L); KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf, containerProps); container.setBeanName("testRecordAcks"); container.start(); ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic()); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic11); template.sendDefault(0, 0, "foo"); template.sendDefault(1, 0, "bar"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 0, "qux"); template.flush(); assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue(); container.stop(); assertThat(messageListener.registerThread).isSameAs(messageListener.messageThread); // Now test initial seek of assigned partitions. latch.set(new CountDownLatch(2)); seekInitial.set(true); container.start(); assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue(); // Now seek on idle latch.set(new CountDownLatch(2)); seekInitial.set(true); container.getContainerProperties().setIdleEventInterval(100L); final AtomicBoolean idleEventPublished = new AtomicBoolean(); container.setApplicationEventPublisher(new ApplicationEventPublisher() { @Override public void publishEvent(Object event) { // NOSONAR } @Override public void publishEvent(ApplicationEvent event) { idleEventPublished.set(true); } }); assertThat(idleLatch.await(60, TimeUnit.SECONDS)); assertThat(idleEventPublished.get()).isTrue(); assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue(); container.stop(); logger.info("Stop seek"); }
From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java
@Test public void testDefinedPartitions() throws Exception { this.logger.info("Start defined parts"); Map<String, Object> props = KafkaTestUtils.consumerProps("test3", "false", embeddedKafka); TopicPartitionInitialOffset topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 0L); CountDownLatch initialConsumersLatch = new CountDownLatch(2); DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<Integer, String>(props) { @Override//w ww .j ava 2 s . c o m public Consumer<Integer, String> createConsumer() { return new KafkaConsumer<Integer, String>(props) { @Override public ConsumerRecords<Integer, String> poll(long timeout) { try { return super.poll(timeout); } finally { initialConsumersLatch.countDown(); } } }; } }; ContainerProperties container1Props = new ContainerProperties(topic1Partition0); CountDownLatch latch1 = new CountDownLatch(2); container1Props.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("defined part: " + message); latch1.countDown(); }); KafkaMessageListenerContainer<Integer, String> container1 = new KafkaMessageListenerContainer<>(cf, container1Props); container1.setBeanName("b1"); container1.start(); CountDownLatch stopLatch1 = new CountDownLatch(1); willAnswer(invocation -> { try { return invocation.callRealMethod(); } finally { stopLatch1.countDown(); } }).given(spyOnConsumer(container1)).commitSync(any()); TopicPartitionInitialOffset topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, 0L); ContainerProperties container2Props = new ContainerProperties(topic1Partition1); CountDownLatch latch2 = new CountDownLatch(2); container2Props.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("defined part: " + message); latch2.countDown(); }); KafkaMessageListenerContainer<Integer, String> container2 = new KafkaMessageListenerContainer<>(cf, container2Props); container2.setBeanName("b2"); container2.start(); CountDownLatch stopLatch2 = new CountDownLatch(1); willAnswer(invocation -> { try { return invocation.callRealMethod(); } finally { stopLatch2.countDown(); } }).given(spyOnConsumer(container2)).commitSync(any()); assertThat(initialConsumersLatch.await(20, TimeUnit.SECONDS)).isTrue(); Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka); ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps); KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf); template.setDefaultTopic(topic13); template.sendDefault(0, 0, "foo"); template.sendDefault(1, 2, "bar"); template.sendDefault(0, 0, "baz"); template.sendDefault(1, 2, "qux"); template.flush(); assertThat(latch1.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(latch2.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(stopLatch1.await(60, TimeUnit.SECONDS)).isTrue(); container1.stop(); assertThat(stopLatch2.await(60, TimeUnit.SECONDS)).isTrue(); container2.stop(); cf = new DefaultKafkaConsumerFactory<>(props); // reset earliest ContainerProperties container3Props = new ContainerProperties(topic1Partition0, topic1Partition1); CountDownLatch latch3 = new CountDownLatch(4); container3Props.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("defined part e: " + message); latch3.countDown(); }); KafkaMessageListenerContainer<Integer, String> resettingContainer = new KafkaMessageListenerContainer<>(cf, container3Props); resettingContainer.setBeanName("b3"); resettingContainer.start(); CountDownLatch stopLatch3 = new CountDownLatch(1); willAnswer(invocation -> { try { return invocation.callRealMethod(); } finally { stopLatch3.countDown(); } }).given(spyOnConsumer(resettingContainer)).commitSync(any()); assertThat(latch3.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(stopLatch3.await(60, TimeUnit.SECONDS)).isTrue(); resettingContainer.stop(); assertThat(latch3.getCount()).isEqualTo(0L); cf = new DefaultKafkaConsumerFactory<>(props); // reset beginning for part 0, minus one for part 1 topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, -1000L); topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, -1L); ContainerProperties container4Props = new ContainerProperties(topic1Partition0, topic1Partition1); CountDownLatch latch4 = new CountDownLatch(3); AtomicReference<String> receivedMessage = new AtomicReference<>(); container4Props.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("defined part 0, -1: " + message); receivedMessage.set(message.value()); latch4.countDown(); }); resettingContainer = new KafkaMessageListenerContainer<>(cf, container4Props); resettingContainer.setBeanName("b4"); resettingContainer.start(); CountDownLatch stopLatch4 = new CountDownLatch(1); willAnswer(invocation -> { try { return invocation.callRealMethod(); } finally { stopLatch4.countDown(); } }).given(spyOnConsumer(resettingContainer)).commitSync(any()); assertThat(latch4.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(stopLatch4.await(60, TimeUnit.SECONDS)).isTrue(); resettingContainer.stop(); assertThat(receivedMessage.get()).isIn("baz", "qux"); assertThat(latch4.getCount()).isEqualTo(0L); // reset plus one template.sendDefault(0, 0, "FOO"); template.sendDefault(1, 2, "BAR"); template.flush(); topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 1L); topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, 1L); ContainerProperties container5Props = new ContainerProperties(topic1Partition0, topic1Partition1); final CountDownLatch latch5 = new CountDownLatch(4); final List<String> messages = new ArrayList<>(); container5Props.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("defined part 1: " + message); messages.add(message.value()); latch5.countDown(); }); resettingContainer = new KafkaMessageListenerContainer<>(cf, container5Props); resettingContainer.setBeanName("b5"); resettingContainer.start(); CountDownLatch stopLatch5 = new CountDownLatch(1); willAnswer(invocation -> { try { return invocation.callRealMethod(); } finally { stopLatch5.countDown(); } }).given(spyOnConsumer(resettingContainer)).commitSync(any()); assertThat(latch5.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(stopLatch5.await(60, TimeUnit.SECONDS)).isTrue(); resettingContainer.stop(); assertThat(messages).contains("baz", "qux", "FOO", "BAR"); this.logger.info("+++++++++++++++++++++ Start relative reset"); template.sendDefault(0, 0, "BAZ"); template.sendDefault(1, 2, "QUX"); template.sendDefault(0, 0, "FIZ"); template.sendDefault(1, 2, "BUZ"); template.flush(); topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 1L, true); topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, -1L, true); ContainerProperties container6Props = new ContainerProperties(topic1Partition0, topic1Partition1); final CountDownLatch latch6 = new CountDownLatch(4); final List<String> messages6 = new ArrayList<>(); container6Props.setMessageListener((MessageListener<Integer, String>) message -> { logger.info("defined part relative: " + message); messages6.add(message.value()); latch6.countDown(); }); resettingContainer = new KafkaMessageListenerContainer<>(cf, container6Props); resettingContainer.setBeanName("b6"); resettingContainer.start(); CountDownLatch stopLatch6 = new CountDownLatch(1); willAnswer(invocation -> { try { return invocation.callRealMethod(); } finally { stopLatch6.countDown(); } }).given(spyOnConsumer(resettingContainer)).commitSync(any()); assertThat(latch6.await(60, TimeUnit.SECONDS)).isTrue(); assertThat(stopLatch6.await(60, TimeUnit.SECONDS)).isTrue(); resettingContainer.stop(); assertThat(messages6).hasSize(4); assertThat(messages6).contains("FIZ", "BAR", "QUX", "BUZ"); this.logger.info("Stop auto parts"); }
From source file:org.springframework.kafka.listener.TransactionalContainerTests.java
@SuppressWarnings({ "rawtypes", "unchecked" }) private void testConsumeAndProduceTransactionGuts(boolean chained, boolean handleError) throws Exception { Consumer consumer = mock(Consumer.class); final TopicPartition topicPartition = new TopicPartition("foo", 0); willAnswer(i -> {/* w w w.j a v a 2s . co m*/ ((ConsumerRebalanceListener) i.getArgument(1)) .onPartitionsAssigned(Collections.singletonList(topicPartition)); return null; }).given(consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class)); ConsumerRecords records = new ConsumerRecords(Collections.singletonMap(topicPartition, Collections.singletonList(new ConsumerRecord<>("foo", 0, 0, "key", "value")))); final AtomicBoolean done = new AtomicBoolean(); willAnswer(i -> { if (done.compareAndSet(false, true)) { return records; } else { Thread.sleep(500); return null; } }).given(consumer).poll(any(Duration.class)); ConsumerFactory cf = mock(ConsumerFactory.class); willReturn(consumer).given(cf).createConsumer("group", "", null); Producer producer = mock(Producer.class); final CountDownLatch closeLatch = new CountDownLatch(2); willAnswer(i -> { closeLatch.countDown(); return null; }).given(producer).close(); ProducerFactory pf = mock(ProducerFactory.class); given(pf.transactionCapable()).willReturn(true); final List<String> transactionalIds = new ArrayList<>(); willAnswer(i -> { transactionalIds.add(TransactionSupport.getTransactionIdSuffix()); return producer; }).given(pf).createProducer(); KafkaTransactionManager tm = new KafkaTransactionManager(pf); PlatformTransactionManager ptm = tm; if (chained) { ptm = new ChainedKafkaTransactionManager(new SomeOtherTransactionManager(), tm); } ContainerProperties props = new ContainerProperties("foo"); props.setGroupId("group"); props.setTransactionManager(ptm); final KafkaTemplate template = new KafkaTemplate(pf); props.setMessageListener((MessageListener) m -> { template.send("bar", "baz"); if (handleError) { throw new RuntimeException("fail"); } }); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, props); container.setBeanName("commit"); if (handleError) { container.setErrorHandler((e, data) -> { }); } container.start(); assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue(); InOrder inOrder = inOrder(producer); inOrder.verify(producer).beginTransaction(); inOrder.verify(producer).sendOffsetsToTransaction( Collections.singletonMap(topicPartition, new OffsetAndMetadata(0)), "group"); inOrder.verify(producer).commitTransaction(); inOrder.verify(producer).close(); inOrder.verify(producer).beginTransaction(); ArgumentCaptor<ProducerRecord> captor = ArgumentCaptor.forClass(ProducerRecord.class); inOrder.verify(producer).send(captor.capture(), any(Callback.class)); assertThat(captor.getValue()).isEqualTo(new ProducerRecord("bar", "baz")); inOrder.verify(producer).sendOffsetsToTransaction( Collections.singletonMap(topicPartition, new OffsetAndMetadata(1)), "group"); inOrder.verify(producer).commitTransaction(); inOrder.verify(producer).close(); container.stop(); verify(pf, times(2)).createProducer(); verifyNoMoreInteractions(producer); assertThat(transactionalIds.get(0)).isEqualTo("group.foo.0"); assertThat(transactionalIds.get(0)).isEqualTo("group.foo.0"); }
From source file:org.springframework.kafka.listener.TransactionalContainerTests.java
@SuppressWarnings({ "rawtypes", "unchecked" }) @Test/* w w w. ja v a2s .c o m*/ public void testConsumeAndProduceTransactionRollback() throws Exception { Consumer consumer = mock(Consumer.class); final TopicPartition topicPartition0 = new TopicPartition("foo", 0); final TopicPartition topicPartition1 = new TopicPartition("foo", 1); Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>(); recordMap.put(topicPartition0, Collections.singletonList(new ConsumerRecord<>("foo", 0, 0, "key", "value"))); recordMap.put(topicPartition1, Collections.singletonList(new ConsumerRecord<>("foo", 1, 0, "key", "value"))); ConsumerRecords records = new ConsumerRecords(recordMap); final AtomicBoolean done = new AtomicBoolean(); willAnswer(i -> { if (done.compareAndSet(false, true)) { return records; } else { Thread.sleep(500); return null; } }).given(consumer).poll(any(Duration.class)); final CountDownLatch seekLatch = new CountDownLatch(2); willAnswer(i -> { seekLatch.countDown(); return null; }).given(consumer).seek(any(), anyLong()); ConsumerFactory cf = mock(ConsumerFactory.class); willReturn(consumer).given(cf).createConsumer("group", "", null); Producer producer = mock(Producer.class); final CountDownLatch closeLatch = new CountDownLatch(1); willAnswer(i -> { closeLatch.countDown(); return null; }).given(producer).close(); ProducerFactory pf = mock(ProducerFactory.class); given(pf.transactionCapable()).willReturn(true); given(pf.createProducer()).willReturn(producer); KafkaTransactionManager tm = new KafkaTransactionManager(pf); ContainerProperties props = new ContainerProperties(new TopicPartitionInitialOffset("foo", 0), new TopicPartitionInitialOffset("foo", 1)); props.setGroupId("group"); props.setTransactionManager(tm); final KafkaTemplate template = new KafkaTemplate(pf); props.setMessageListener((MessageListener) m -> { template.send("bar", "baz"); throw new RuntimeException("fail"); }); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, props); container.setBeanName("rollback"); container.start(); assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(seekLatch.await(10, TimeUnit.SECONDS)).isTrue(); InOrder inOrder = inOrder(producer); inOrder.verify(producer).beginTransaction(); ArgumentCaptor<ProducerRecord> captor = ArgumentCaptor.forClass(ProducerRecord.class); verify(producer).send(captor.capture(), any(Callback.class)); assertThat(captor.getValue()).isEqualTo(new ProducerRecord("bar", "baz")); inOrder.verify(producer, never()).sendOffsetsToTransaction(anyMap(), anyString()); inOrder.verify(producer, never()).commitTransaction(); inOrder.verify(producer).abortTransaction(); inOrder.verify(producer).close(); verify(consumer).seek(topicPartition0, 0); verify(consumer).seek(topicPartition1, 0); verify(consumer, never()).commitSync(anyMap()); container.stop(); verify(pf, times(1)).createProducer(); }
From source file:org.springframework.kafka.listener.TransactionalContainerTests.java
@SuppressWarnings({ "rawtypes", "unchecked" }) @Test//from w w w . j ava 2 s . c o m public void testConsumeAndProduceTransactionRollbackBatch() throws Exception { Consumer consumer = mock(Consumer.class); final TopicPartition topicPartition0 = new TopicPartition("foo", 0); final TopicPartition topicPartition1 = new TopicPartition("foo", 1); Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>(); recordMap.put(topicPartition0, Collections.singletonList(new ConsumerRecord<>("foo", 0, 0, "key", "value"))); recordMap.put(topicPartition1, Collections.singletonList(new ConsumerRecord<>("foo", 1, 0, "key", "value"))); ConsumerRecords records = new ConsumerRecords(recordMap); final AtomicBoolean done = new AtomicBoolean(); willAnswer(i -> { if (done.compareAndSet(false, true)) { return records; } else { Thread.sleep(500); return null; } }).given(consumer).poll(any(Duration.class)); final CountDownLatch seekLatch = new CountDownLatch(2); willAnswer(i -> { seekLatch.countDown(); return null; }).given(consumer).seek(any(), anyLong()); ConsumerFactory cf = mock(ConsumerFactory.class); willReturn(consumer).given(cf).createConsumer("group", "", null); Producer producer = mock(Producer.class); final CountDownLatch closeLatch = new CountDownLatch(1); willAnswer(i -> { closeLatch.countDown(); return null; }).given(producer).close(); ProducerFactory pf = mock(ProducerFactory.class); given(pf.transactionCapable()).willReturn(true); given(pf.createProducer()).willReturn(producer); KafkaTransactionManager tm = new KafkaTransactionManager(pf); ContainerProperties props = new ContainerProperties(new TopicPartitionInitialOffset("foo", 0), new TopicPartitionInitialOffset("foo", 1)); props.setGroupId("group"); props.setTransactionManager(tm); final KafkaTemplate template = new KafkaTemplate(pf); props.setMessageListener((BatchMessageListener) recordlist -> { template.send("bar", "baz"); throw new RuntimeException("fail"); }); KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, props); container.setBeanName("rollback"); container.start(); assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(seekLatch.await(10, TimeUnit.SECONDS)).isTrue(); InOrder inOrder = inOrder(producer); inOrder.verify(producer).beginTransaction(); ArgumentCaptor<ProducerRecord> captor = ArgumentCaptor.forClass(ProducerRecord.class); verify(producer).send(captor.capture(), any(Callback.class)); assertThat(captor.getValue()).isEqualTo(new ProducerRecord("bar", "baz")); inOrder.verify(producer, never()).sendOffsetsToTransaction(anyMap(), anyString()); inOrder.verify(producer, never()).commitTransaction(); inOrder.verify(producer).abortTransaction(); inOrder.verify(producer).close(); verify(consumer).seek(topicPartition0, 0); verify(consumer).seek(topicPartition1, 0); verify(consumer, never()).commitSync(anyMap()); container.stop(); verify(pf, times(1)).createProducer(); }