Example usage for org.springframework.kafka.core DefaultKafkaConsumerFactory DefaultKafkaConsumerFactory

List of usage examples for org.springframework.kafka.core DefaultKafkaConsumerFactory DefaultKafkaConsumerFactory

Introduction

In this page you can find the example usage for org.springframework.kafka.core DefaultKafkaConsumerFactory DefaultKafkaConsumerFactory.

Prototype

public DefaultKafkaConsumerFactory(Map<String, Object> configs) 

Source Link

Document

Construct a factory with the provided configuration.

Usage

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testRecordAck() throws Exception {
    logger.info("Start record ack");
    Map<String, Object> props = KafkaTestUtils.consumerProps("test6", "false", embeddedKafka);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic6);
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("record ack: " + message);
    });//www  .j  ava2 s.c o  m
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.RECORD);
    containerProps.setAckOnError(false);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRecordAcks");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch latch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    latch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());
    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic6);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "qux");
    template.flush();
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic6, 0), new TopicPartition(topic6, 1)));
    assertThat(consumer.position(new TopicPartition(topic6, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic6, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop record ack");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testBatchAck() throws Exception {
    logger.info("Start batch ack");

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic7);/*from w  w w.ja  v a2 s  . c om*/
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(1, 0, "qux");
    template.flush();

    Map<String, Object> props = KafkaTestUtils.consumerProps("test6", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic7);
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("batch ack: " + message);
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.BATCH);
    containerProps.setPollTimeout(10000);
    containerProps.setAckOnError(false);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testBatchAcks");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch firstBatchLatch = new CountDownLatch(1);
    final CountDownLatch latch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
            if (entry.getValue().offset() == 2) {
                firstBatchLatch.countDown();
            }
        }
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    latch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());

    assertThat(firstBatchLatch.await(9, TimeUnit.SECONDS)).isTrue();

    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic7, 0), new TopicPartition(topic7, 1)));
    assertThat(consumer.position(new TopicPartition(topic7, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic7, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop batch ack");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testBatchListener() throws Exception {
    logger.info("Start batch listener");

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic8);/*from ww  w. ja  v  a 2 s  .  c  om*/
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(1, 0, "qux");
    template.flush();

    Map<String, Object> props = KafkaTestUtils.consumerProps("test8", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic8);
    containerProps.setMessageListener((BatchMessageListener<Integer, String>) messages -> {
        logger.info("batch listener: " + messages);
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.BATCH);
    containerProps.setPollTimeout(10000);
    containerProps.setAckOnError(false);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testBatchListener");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch firstBatchLatch = new CountDownLatch(1);
    final CountDownLatch latch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
            if (entry.getValue().offset() == 2) {
                firstBatchLatch.countDown();
            }
        }
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    latch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());

    assertThat(firstBatchLatch.await(9, TimeUnit.SECONDS)).isTrue();

    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic8, 0), new TopicPartition(topic8, 1)));
    assertThat(consumer.position(new TopicPartition(topic8, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic8, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop batch listener");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testBatchListenerManual() throws Exception {
    logger.info("Start batch listener manual");

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic9);//from   w  ww. j  a v a  2s  .  c o  m
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(1, 0, "qux");
    template.flush();

    Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic9);
    final CountDownLatch latch = new CountDownLatch(4);
    containerProps.setMessageListener((BatchAcknowledgingMessageListener<Integer, String>) (messages, ack) -> {
        logger.info("batch listener manual: " + messages);
        for (int i = 0; i < messages.size(); i++) {
            latch.countDown();
        }
        ack.acknowledge();
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.MANUAL_IMMEDIATE);
    containerProps.setPollTimeout(10000);
    containerProps.setAckOnError(false);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testBatchListenerManual");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch commitLatch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    commitLatch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());

    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic9, 0), new TopicPartition(topic9, 1)));
    assertThat(consumer.position(new TopicPartition(topic9, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic9, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop batch listener manual");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testBatchListenerErrors() throws Exception {
    logger.info("Start batch listener errors");

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic10);/* www . j  a va  2s .co  m*/
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(1, 0, "qux");
    template.flush();

    Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic10);
    containerProps.setMessageListener((BatchMessageListener<Integer, String>) messages -> {
        logger.info("batch listener errors: " + messages);
        throw new RuntimeException("intentional");
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.BATCH);
    containerProps.setPollTimeout(10000);
    containerProps.setAckOnError(true);
    final CountDownLatch latch = new CountDownLatch(4);
    containerProps.setGenericErrorHandler((BatchErrorHandler) (t, messages) -> {
        new BatchLoggingErrorHandler().handle(t, messages);
        for (int i = 0; i < messages.count(); i++) {
            latch.countDown();
        }
    });

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testBatchListenerErrors");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch commitLatch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    commitLatch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());

    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic10, 0), new TopicPartition(topic10, 1)));
    assertThat(consumer.position(new TopicPartition(topic10, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic10, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop batch listener errors");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

private void testSeekGuts(Map<String, Object> props, String topic) throws Exception {
    logger.info("Start seek " + topic);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic11);
    final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(6));
    final AtomicBoolean seekInitial = new AtomicBoolean();
    final CountDownLatch idleLatch = new CountDownLatch(1);
    class Listener implements MessageListener<Integer, String>, ConsumerSeekAware {

        private ConsumerSeekCallback callback;

        private Thread registerThread;

        private Thread messageThread;

        @Override//from w  w w  .  jav a  2 s  .c om
        public void onMessage(ConsumerRecord<Integer, String> data) {
            messageThread = Thread.currentThread();
            latch.get().countDown();
            if (latch.get().getCount() == 2 && !seekInitial.get()) {
                callback.seek(topic11, 0, 1);
                callback.seek(topic11, 1, 1);
            }
        }

        @Override
        public void registerSeekCallback(ConsumerSeekCallback callback) {
            this.callback = callback;
            this.registerThread = Thread.currentThread();
        }

        @Override
        public void onPartitionsAssigned(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
            if (seekInitial.get()) {
                for (Entry<TopicPartition, Long> assignment : assignments.entrySet()) {
                    callback.seek(assignment.getKey().topic(), assignment.getKey().partition(),
                            assignment.getValue() - 1);
                }
            }
        }

        @Override
        public void onIdleContainer(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
            for (Entry<TopicPartition, Long> assignment : assignments.entrySet()) {
                callback.seek(assignment.getKey().topic(), assignment.getKey().partition(),
                        assignment.getValue() - 1);
            }
            idleLatch.countDown();
        }

    }
    Listener messageListener = new Listener();
    containerProps.setMessageListener(messageListener);
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.RECORD);
    containerProps.setAckOnError(false);
    containerProps.setIdleEventInterval(60000L);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRecordAcks");
    container.start();
    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic11);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "qux");
    template.flush();
    assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    assertThat(messageListener.registerThread).isSameAs(messageListener.messageThread);

    // Now test initial seek of assigned partitions.
    latch.set(new CountDownLatch(2));
    seekInitial.set(true);
    container.start();
    assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue();

    // Now seek on idle
    latch.set(new CountDownLatch(2));
    seekInitial.set(true);
    container.getContainerProperties().setIdleEventInterval(100L);
    final AtomicBoolean idleEventPublished = new AtomicBoolean();
    container.setApplicationEventPublisher(new ApplicationEventPublisher() {

        @Override
        public void publishEvent(Object event) {
            // NOSONAR
        }

        @Override
        public void publishEvent(ApplicationEvent event) {
            idleEventPublished.set(true);
        }

    });
    assertThat(idleLatch.await(60, TimeUnit.SECONDS));
    assertThat(idleEventPublished.get()).isTrue();
    assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    logger.info("Stop seek");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testDefinedPartitions() throws Exception {
    this.logger.info("Start defined parts");
    Map<String, Object> props = KafkaTestUtils.consumerProps("test3", "false", embeddedKafka);
    TopicPartitionInitialOffset topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 0L);

    CountDownLatch initialConsumersLatch = new CountDownLatch(2);

    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<Integer, String>(props) {

        @Override/* ww w.j  av  a  2 s .c o m*/
        public Consumer<Integer, String> createConsumer() {
            return new KafkaConsumer<Integer, String>(props) {

                @Override
                public ConsumerRecords<Integer, String> poll(long timeout) {
                    try {
                        return super.poll(timeout);
                    } finally {
                        initialConsumersLatch.countDown();
                    }
                }

            };
        }

    };

    ContainerProperties container1Props = new ContainerProperties(topic1Partition0);
    CountDownLatch latch1 = new CountDownLatch(2);
    container1Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part: " + message);
        latch1.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> container1 = new KafkaMessageListenerContainer<>(cf,
            container1Props);
    container1.setBeanName("b1");
    container1.start();

    CountDownLatch stopLatch1 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch1.countDown();
        }

    }).given(spyOnConsumer(container1)).commitSync(any());

    TopicPartitionInitialOffset topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, 0L);
    ContainerProperties container2Props = new ContainerProperties(topic1Partition1);
    CountDownLatch latch2 = new CountDownLatch(2);
    container2Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part: " + message);
        latch2.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> container2 = new KafkaMessageListenerContainer<>(cf,
            container2Props);
    container2.setBeanName("b2");
    container2.start();

    CountDownLatch stopLatch2 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch2.countDown();
        }

    }).given(spyOnConsumer(container2)).commitSync(any());

    assertThat(initialConsumersLatch.await(20, TimeUnit.SECONDS)).isTrue();

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic13);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 2, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 2, "qux");
    template.flush();

    assertThat(latch1.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(latch2.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch1.await(60, TimeUnit.SECONDS)).isTrue();
    container1.stop();
    assertThat(stopLatch2.await(60, TimeUnit.SECONDS)).isTrue();
    container2.stop();

    cf = new DefaultKafkaConsumerFactory<>(props);
    // reset earliest
    ContainerProperties container3Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    CountDownLatch latch3 = new CountDownLatch(4);
    container3Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part e: " + message);
        latch3.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> resettingContainer = new KafkaMessageListenerContainer<>(cf,
            container3Props);
    resettingContainer.setBeanName("b3");
    resettingContainer.start();

    CountDownLatch stopLatch3 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch3.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch3.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch3.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(latch3.getCount()).isEqualTo(0L);

    cf = new DefaultKafkaConsumerFactory<>(props);
    // reset beginning for part 0, minus one for part 1
    topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, -1000L);
    topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, -1L);
    ContainerProperties container4Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    CountDownLatch latch4 = new CountDownLatch(3);
    AtomicReference<String> receivedMessage = new AtomicReference<>();
    container4Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part 0, -1: " + message);
        receivedMessage.set(message.value());
        latch4.countDown();
    });
    resettingContainer = new KafkaMessageListenerContainer<>(cf, container4Props);
    resettingContainer.setBeanName("b4");

    resettingContainer.start();

    CountDownLatch stopLatch4 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch4.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch4.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch4.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(receivedMessage.get()).isIn("baz", "qux");
    assertThat(latch4.getCount()).isEqualTo(0L);

    // reset plus one
    template.sendDefault(0, 0, "FOO");
    template.sendDefault(1, 2, "BAR");
    template.flush();

    topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 1L);
    topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, 1L);
    ContainerProperties container5Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    final CountDownLatch latch5 = new CountDownLatch(4);
    final List<String> messages = new ArrayList<>();
    container5Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part 1: " + message);
        messages.add(message.value());
        latch5.countDown();
    });

    resettingContainer = new KafkaMessageListenerContainer<>(cf, container5Props);
    resettingContainer.setBeanName("b5");
    resettingContainer.start();

    CountDownLatch stopLatch5 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch5.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch5.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch5.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(messages).contains("baz", "qux", "FOO", "BAR");

    this.logger.info("+++++++++++++++++++++ Start relative reset");

    template.sendDefault(0, 0, "BAZ");
    template.sendDefault(1, 2, "QUX");
    template.sendDefault(0, 0, "FIZ");
    template.sendDefault(1, 2, "BUZ");
    template.flush();

    topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 1L, true);
    topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, -1L, true);
    ContainerProperties container6Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    final CountDownLatch latch6 = new CountDownLatch(4);
    final List<String> messages6 = new ArrayList<>();
    container6Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part relative: " + message);
        messages6.add(message.value());
        latch6.countDown();
    });

    resettingContainer = new KafkaMessageListenerContainer<>(cf, container6Props);
    resettingContainer.setBeanName("b6");
    resettingContainer.start();

    CountDownLatch stopLatch6 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch6.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch6.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch6.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(messages6).hasSize(4);
    assertThat(messages6).contains("FIZ", "BAR", "QUX", "BUZ");

    this.logger.info("Stop auto parts");
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings("unchecked")
@Test/*from   w w w .j  ava  2 s  .  c o  m*/
public void testRollbackRecord() throws Exception {
    logger.info("Start testRollbackRecord");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTest1", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic1, topic2);
    containerProps.setGroupId("group");
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("rr.");

    final KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    final AtomicBoolean failed = new AtomicBoolean();
    final CountDownLatch latch = new CountDownLatch(3);
    final AtomicReference<String> transactionalId = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        latch.countDown();
        if (failed.compareAndSet(false, true)) {
            throw new RuntimeException("fail");
        }
        /*
         * Send a message to topic2 and wait for it so we don't stop the container too soon.
         */
        if (message.topic().equals(topic1)) {
            template.send(topic2, "bar");
            template.flush();
            transactionalId.set(KafkaTestUtils.getPropertyValue(
                    ProducerFactoryUtils.getTransactionalResourceHolder(pf).getProducer(),
                    "delegate.transactionManager.transactionalId", String.class));
        }
    });

    @SuppressWarnings({ "rawtypes" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRollbackRecord");
    container.start();

    template.setDefaultTopic(topic1);
    template.executeInTransaction(t -> {
        template.sendDefault(0, 0, "foo");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    final CountDownLatch subsLatch = new CountDownLatch(1);
    consumer.subscribe(Arrays.asList(topic1), new ConsumerRebalanceListener() {

        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            // empty
        }

        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            subsLatch.countDown();
        }

    });
    ConsumerRecords<Integer, String> records = null;
    int n = 0;
    while (subsLatch.getCount() > 0 && n++ < 600) {
        records = consumer.poll(Duration.ofMillis(100));
    }
    assertThat(subsLatch.await(1, TimeUnit.MILLISECONDS)).isTrue();
    assertThat(records.count()).isEqualTo(0);
    // depending on timing, the position might include the offset representing the commit in the log
    assertThat(consumer.position(new TopicPartition(topic1, 0))).isGreaterThanOrEqualTo(1L);
    assertThat(transactionalId.get()).startsWith("rr.group.txTopic");
    assertThat(KafkaTestUtils.getPropertyValue(pf, "consumerProducers", Map.class)).isEmpty();
    logger.info("Stop testRollbackRecord");
    pf.destroy();
    consumer.close();
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@Test
public void testMaxFailures() throws Exception {
    logger.info("Start testMaxFailures");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTestMaxFailures", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic3);
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Object, Object> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("maxAtt.");
    final KafkaTemplate<Object, Object> template = new KafkaTemplate<>(pf);
    final CountDownLatch latch = new CountDownLatch(1);
    AtomicReference<String> data = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        data.set(message.value());/*from w w w.  ja va  2  s.  c o  m*/
        if (message.offset() == 0) {
            throw new RuntimeException("fail for max failures");
        }
        latch.countDown();
    });

    @SuppressWarnings({ "rawtypes", "unchecked" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testMaxFailures");
    final CountDownLatch recoverLatch = new CountDownLatch(1);
    DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template) {

        @Override
        public void accept(ConsumerRecord<?, ?> record, Exception exception) {
            super.accept(record, exception);
            recoverLatch.countDown();
        }

    };
    DefaultAfterRollbackProcessor<Integer, String> afterRollbackProcessor = spy(
            new DefaultAfterRollbackProcessor<>(recoverer, 3));
    container.setAfterRollbackProcessor(afterRollbackProcessor);
    final CountDownLatch stopLatch = new CountDownLatch(1);
    container.setApplicationEventPublisher(e -> {
        if (e instanceof ConsumerStoppedEvent) {
            stopLatch.countDown();
        }
    });
    container.start();

    template.setDefaultTopic(topic3);
    template.executeInTransaction(t -> {
        RecordHeaders headers = new RecordHeaders(
                new RecordHeader[] { new RecordHeader("baz", "qux".getBytes()) });
        ProducerRecord<Object, Object> record = new ProducerRecord<>(topic3, 0, 0, "foo", headers);
        template.send(record);
        template.sendDefault(0, 0, "bar");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(data.get()).isEqualTo("bar");
    assertThat(recoverLatch.await(10, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic3DLT);
    ConsumerRecord<Integer, String> dltRecord = KafkaTestUtils.getSingleRecord(consumer, topic3DLT);
    assertThat(dltRecord.value()).isEqualTo("foo");
    DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper();
    Map<String, Object> map = new HashMap<>();
    mapper.toHeaders(dltRecord.headers(), map);
    MessageHeaders headers = new MessageHeaders(map);
    assertThat(new String(headers.get(KafkaHeaders.DLT_EXCEPTION_FQCN, byte[].class)))
            .contains("RuntimeException");
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_MESSAGE, byte[].class))
            .isEqualTo("fail for max failures".getBytes());
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_STACKTRACE)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_OFFSET, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_PARTITION, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TOPIC, byte[].class)).isEqualTo(topic3.getBytes());
    assertThat(headers.get("baz")).isEqualTo("qux".getBytes());
    pf.destroy();
    assertThat(stopLatch.await(10, TimeUnit.SECONDS)).isTrue();
    verify(afterRollbackProcessor).clearThreadState();
    logger.info("Stop testMaxAttempts");
}