Example usage for org.springframework.kafka.core KafkaTemplate sendDefault

List of usage examples for org.springframework.kafka.core KafkaTemplate sendDefault

Introduction

In this page you can find the example usage for org.springframework.kafka.core KafkaTemplate sendDefault.

Prototype

@Override
    public ListenableFuture<SendResult<K, V>> sendDefault(Integer partition, K key, @Nullable V data) 

Source Link

Usage

From source file:org.springframework.kafka.listener.ConcurrentMessageListenerContainerTests.java

@Test
public void testAckOnErrorRecord() throws Exception {
    logger.info("Start ack on error");
    Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    final CountDownLatch latch = new CountDownLatch(4);
    ContainerProperties containerProps = new ContainerProperties(topic9);
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("auto ack on error: " + message);
        latch.countDown();//from  w w  w.  j  a  va2  s.c  o  m
        if (message.value().startsWith("b")) {
            throw new RuntimeException();
        }
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.RECORD);
    containerProps.setAckOnError(false);
    ConcurrentMessageListenerContainer<Integer, String> container = new ConcurrentMessageListenerContainer<>(cf,
            containerProps);
    container.setConcurrency(2);
    container.setBeanName("testAckOnError");
    container.start();
    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic9);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "qux");
    template.flush();
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic9, 0), new TopicPartition(topic9, 1)));
    // this consumer is positioned at 1, the next offset after the successfully
    // processed 'foo'
    // it has not been updated because 'bar' failed
    assertThat(consumer.position(new TopicPartition(topic9, 0))).isEqualTo(1);
    // this consumer is positioned at 1, the next offset after the successfully
    // processed 'qux'
    // it has been updated even 'baz' failed
    assertThat(consumer.position(new TopicPartition(topic9, 1))).isEqualTo(2);
    consumer.close();
    logger.info("Stop ack on error");
}

From source file:org.springframework.kafka.listener.ConcurrentMessageListenerContainerTests.java

@Test
public void testRebalanceWithSlowConsumer() throws Exception {
    this.logger.info("Start auto");
    Map<String, Object> props = KafkaTestUtils.consumerProps("test101", "false", embeddedKafka);
    props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "20000");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic1);
    final CountDownLatch latch = new CountDownLatch(8);
    final Set<String> listenerThreadNames = Collections.synchronizedSet(new HashSet<String>());
    List<String> receivedMessages = Collections.synchronizedList(new ArrayList<>());
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        listenerThreadNames.add(Thread.currentThread().getName());
        try {/*  w  ww  .ja  va2 s .  c  o m*/
            Thread.sleep(2000);
        } catch (InterruptedException e) {
            // ignore
        }
        receivedMessages.add(message.value());
        listenerThreadNames.add(Thread.currentThread().getName());
        latch.countDown();
    });

    ConcurrentMessageListenerContainer<Integer, String> container = new ConcurrentMessageListenerContainer<>(cf,
            containerProps);
    ConcurrentMessageListenerContainer<Integer, String> container2 = new ConcurrentMessageListenerContainer<>(
            cf, containerProps);
    container.setConcurrency(1);
    container2.setConcurrency(1);
    container.setBeanName("testAuto");
    container2.setBeanName("testAuto2");
    container.start();
    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic1);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 2, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(0, 2, "qux");
    template.sendDefault(1, 2, "corge");
    template.sendDefault(1, 2, "grault");
    template.sendDefault(1, 2, "garply");
    template.sendDefault(1, 2, "waldo");
    template.flush();
    container2.start();
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(receivedMessages).containsOnlyOnce("foo", "bar", "baz", "qux", "corge", "grault", "garply",
            "waldo");
    // all messages are received
    assertThat(receivedMessages).hasSize(8);
    // messages are received on separate threads
    assertThat(listenerThreadNames.size()).isGreaterThanOrEqualTo(2);
    container.stop();
    container2.stop();
    this.logger.info("Stop auto");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testSlowConsumerCommitsAreProcessed() throws Exception {
    Map<String, Object> props = KafkaTestUtils.consumerProps("slow", "false", embeddedKafka);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic5);
    containerProps.setAckCount(1);/*  ww w. ja va 2 s .  co  m*/
    containerProps.setPauseAfter(100);
    containerProps.setAckMode(AckMode.MANUAL_IMMEDIATE);
    containerProps.setSyncCommits(true);

    containerProps.setMessageListener((AcknowledgingMessageListener<Integer, String>) (message, ack) -> {
        logger.info("slow: " + message);
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
        ack.acknowledge();
    });

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);

    container.setBeanName("testSlow");

    container.start();
    Consumer<?, ?> consumer = spyOnConsumer(container);

    final CountDownLatch latch = new CountDownLatch(3);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            latch.countDown();
        }

    }).given(consumer).commitSync(any());

    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic5);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 2, "bar");
    template.flush();
    Thread.sleep(300);
    template.sendDefault(0, 0, "fiz");
    template.sendDefault(1, 2, "buz");
    template.flush();

    // Verify that commitSync is called when paused
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    verify(consumer, atLeastOnce()).pause(anyObject());
    verify(consumer, atLeastOnce()).resume(anyObject());
    container.stop();
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testCommitsAreFlushedOnStop() throws Exception {
    Map<String, Object> props = KafkaTestUtils.consumerProps("flushedOnStop", "false", embeddedKafka);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic5);
    containerProps.setAckCount(1);// w ww  .  j a  v a2  s. c o m
    containerProps.setPauseAfter(100);
    // set large values, ensuring that commits don't happen before `stop()`
    containerProps.setAckTime(20000);
    containerProps.setAckCount(20000);
    containerProps.setAckMode(AckMode.COUNT_TIME);

    final CountDownLatch latch = new CountDownLatch(4);
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("flushed: " + message);
        latch.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testManualFlushed");

    container.start();
    Consumer<?, ?> consumer = spyOnConsumer(container);
    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic5);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 2, "bar");
    template.flush();
    Thread.sleep(300);
    template.sendDefault(0, 0, "fiz");
    template.sendDefault(1, 2, "buz");
    template.flush();

    // Verify that commitSync is called when paused
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    // Verify that just the initial commit is processed before stop
    verify(consumer, times(1)).commitSync(any());
    container.stop();
    // Verify that a commit has been made on stop
    verify(consumer, times(2)).commitSync(any());
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testRecordAck() throws Exception {
    logger.info("Start record ack");
    Map<String, Object> props = KafkaTestUtils.consumerProps("test6", "false", embeddedKafka);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic6);
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("record ack: " + message);
    });//  w ww.j  ava2s. c o  m
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.RECORD);
    containerProps.setAckOnError(false);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRecordAcks");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch latch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    latch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());
    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic6);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "qux");
    template.flush();
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic6, 0), new TopicPartition(topic6, 1)));
    assertThat(consumer.position(new TopicPartition(topic6, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic6, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop record ack");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testBatchAck() throws Exception {
    logger.info("Start batch ack");

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic7);/*from   w  w w  . j a v a  2  s  .  c  o  m*/
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(1, 0, "qux");
    template.flush();

    Map<String, Object> props = KafkaTestUtils.consumerProps("test6", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic7);
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("batch ack: " + message);
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.BATCH);
    containerProps.setPollTimeout(10000);
    containerProps.setAckOnError(false);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testBatchAcks");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch firstBatchLatch = new CountDownLatch(1);
    final CountDownLatch latch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
            if (entry.getValue().offset() == 2) {
                firstBatchLatch.countDown();
            }
        }
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    latch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());

    assertThat(firstBatchLatch.await(9, TimeUnit.SECONDS)).isTrue();

    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic7, 0), new TopicPartition(topic7, 1)));
    assertThat(consumer.position(new TopicPartition(topic7, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic7, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop batch ack");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testBatchListener() throws Exception {
    logger.info("Start batch listener");

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic8);//from   w ww . j av  a2 s  .c  o m
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(1, 0, "qux");
    template.flush();

    Map<String, Object> props = KafkaTestUtils.consumerProps("test8", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic8);
    containerProps.setMessageListener((BatchMessageListener<Integer, String>) messages -> {
        logger.info("batch listener: " + messages);
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.BATCH);
    containerProps.setPollTimeout(10000);
    containerProps.setAckOnError(false);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testBatchListener");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch firstBatchLatch = new CountDownLatch(1);
    final CountDownLatch latch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
            if (entry.getValue().offset() == 2) {
                firstBatchLatch.countDown();
            }
        }
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    latch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());

    assertThat(firstBatchLatch.await(9, TimeUnit.SECONDS)).isTrue();

    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic8, 0), new TopicPartition(topic8, 1)));
    assertThat(consumer.position(new TopicPartition(topic8, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic8, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop batch listener");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testBatchListenerManual() throws Exception {
    logger.info("Start batch listener manual");

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic9);/*  ww  w .java  2 s.  c  o m*/
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(1, 0, "qux");
    template.flush();

    Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic9);
    final CountDownLatch latch = new CountDownLatch(4);
    containerProps.setMessageListener((BatchAcknowledgingMessageListener<Integer, String>) (messages, ack) -> {
        logger.info("batch listener manual: " + messages);
        for (int i = 0; i < messages.size(); i++) {
            latch.countDown();
        }
        ack.acknowledge();
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.MANUAL_IMMEDIATE);
    containerProps.setPollTimeout(10000);
    containerProps.setAckOnError(false);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testBatchListenerManual");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch commitLatch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    commitLatch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());

    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic9, 0), new TopicPartition(topic9, 1)));
    assertThat(consumer.position(new TopicPartition(topic9, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic9, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop batch listener manual");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testBatchListenerErrors() throws Exception {
    logger.info("Start batch listener errors");

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic10);//from ww w  .ja  v  a2 s. c om
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(1, 0, "qux");
    template.flush();

    Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic10);
    containerProps.setMessageListener((BatchMessageListener<Integer, String>) messages -> {
        logger.info("batch listener errors: " + messages);
        throw new RuntimeException("intentional");
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.BATCH);
    containerProps.setPollTimeout(10000);
    containerProps.setAckOnError(true);
    final CountDownLatch latch = new CountDownLatch(4);
    containerProps.setGenericErrorHandler((BatchErrorHandler) (t, messages) -> {
        new BatchLoggingErrorHandler().handle(t, messages);
        for (int i = 0; i < messages.count(); i++) {
            latch.countDown();
        }
    });

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testBatchListenerErrors");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch commitLatch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    commitLatch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());

    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic10, 0), new TopicPartition(topic10, 1)));
    assertThat(consumer.position(new TopicPartition(topic10, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic10, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop batch listener errors");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

private void testSeekGuts(Map<String, Object> props, String topic) throws Exception {
    logger.info("Start seek " + topic);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic11);
    final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(6));
    final AtomicBoolean seekInitial = new AtomicBoolean();
    final CountDownLatch idleLatch = new CountDownLatch(1);
    class Listener implements MessageListener<Integer, String>, ConsumerSeekAware {

        private ConsumerSeekCallback callback;

        private Thread registerThread;

        private Thread messageThread;

        @Override//from   w  w w.j  a va  2 s  .c o m
        public void onMessage(ConsumerRecord<Integer, String> data) {
            messageThread = Thread.currentThread();
            latch.get().countDown();
            if (latch.get().getCount() == 2 && !seekInitial.get()) {
                callback.seek(topic11, 0, 1);
                callback.seek(topic11, 1, 1);
            }
        }

        @Override
        public void registerSeekCallback(ConsumerSeekCallback callback) {
            this.callback = callback;
            this.registerThread = Thread.currentThread();
        }

        @Override
        public void onPartitionsAssigned(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
            if (seekInitial.get()) {
                for (Entry<TopicPartition, Long> assignment : assignments.entrySet()) {
                    callback.seek(assignment.getKey().topic(), assignment.getKey().partition(),
                            assignment.getValue() - 1);
                }
            }
        }

        @Override
        public void onIdleContainer(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
            for (Entry<TopicPartition, Long> assignment : assignments.entrySet()) {
                callback.seek(assignment.getKey().topic(), assignment.getKey().partition(),
                        assignment.getValue() - 1);
            }
            idleLatch.countDown();
        }

    }
    Listener messageListener = new Listener();
    containerProps.setMessageListener(messageListener);
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.RECORD);
    containerProps.setAckOnError(false);
    containerProps.setIdleEventInterval(60000L);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRecordAcks");
    container.start();
    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic11);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "qux");
    template.flush();
    assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    assertThat(messageListener.registerThread).isSameAs(messageListener.messageThread);

    // Now test initial seek of assigned partitions.
    latch.set(new CountDownLatch(2));
    seekInitial.set(true);
    container.start();
    assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue();

    // Now seek on idle
    latch.set(new CountDownLatch(2));
    seekInitial.set(true);
    container.getContainerProperties().setIdleEventInterval(100L);
    final AtomicBoolean idleEventPublished = new AtomicBoolean();
    container.setApplicationEventPublisher(new ApplicationEventPublisher() {

        @Override
        public void publishEvent(Object event) {
            // NOSONAR
        }

        @Override
        public void publishEvent(ApplicationEvent event) {
            idleEventPublished.set(true);
        }

    });
    assertThat(idleLatch.await(60, TimeUnit.SECONDS));
    assertThat(idleEventPublished.get()).isTrue();
    assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    logger.info("Stop seek");
}