Example usage for org.springframework.kafka.core KafkaTemplate flush

List of usage examples for org.springframework.kafka.core KafkaTemplate flush

Introduction

In this page you can find the example usage for org.springframework.kafka.core KafkaTemplate flush.

Prototype

@Override
public void flush() 

Source Link

Document

Note It only makes sense to invoke this method if the ProducerFactory serves up a singleton producer (such as the DefaultKafkaProducerFactory ).

Usage

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testBatchListenerErrors() throws Exception {
    logger.info("Start batch listener errors");

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic10);/*from  www  .  ja  v a  2 s  .c  o m*/
    template.sendDefault(0, 0, "foo");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(1, 0, "qux");
    template.flush();

    Map<String, Object> props = KafkaTestUtils.consumerProps("test9", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic10);
    containerProps.setMessageListener((BatchMessageListener<Integer, String>) messages -> {
        logger.info("batch listener errors: " + messages);
        throw new RuntimeException("intentional");
    });
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.BATCH);
    containerProps.setPollTimeout(10000);
    containerProps.setAckOnError(true);
    final CountDownLatch latch = new CountDownLatch(4);
    containerProps.setGenericErrorHandler((BatchErrorHandler) (t, messages) -> {
        new BatchLoggingErrorHandler().handle(t, messages);
        for (int i = 0; i < messages.count(); i++) {
            latch.countDown();
        }
    });

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testBatchListenerErrors");
    container.start();
    Consumer<?, ?> containerConsumer = spyOnConsumer(container);
    final CountDownLatch commitLatch = new CountDownLatch(2);
    willAnswer(invocation -> {

        @SuppressWarnings({ "unchecked" })
        Map<TopicPartition, OffsetAndMetadata> map = (Map<TopicPartition, OffsetAndMetadata>) invocation
                .getArguments()[0];
        try {
            return invocation.callRealMethod();
        } finally {
            for (Entry<TopicPartition, OffsetAndMetadata> entry : map.entrySet()) {
                if (entry.getValue().offset() == 2) {
                    commitLatch.countDown();
                }
            }
        }

    }).given(containerConsumer).commitSync(any());

    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue();
    Consumer<Integer, String> consumer = cf.createConsumer();
    consumer.assign(Arrays.asList(new TopicPartition(topic10, 0), new TopicPartition(topic10, 1)));
    assertThat(consumer.position(new TopicPartition(topic10, 0))).isEqualTo(2);
    assertThat(consumer.position(new TopicPartition(topic10, 1))).isEqualTo(2);
    container.stop();
    consumer.close();
    logger.info("Stop batch listener errors");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

private void testSeekGuts(Map<String, Object> props, String topic) throws Exception {
    logger.info("Start seek " + topic);
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic11);
    final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(6));
    final AtomicBoolean seekInitial = new AtomicBoolean();
    final CountDownLatch idleLatch = new CountDownLatch(1);
    class Listener implements MessageListener<Integer, String>, ConsumerSeekAware {

        private ConsumerSeekCallback callback;

        private Thread registerThread;

        private Thread messageThread;

        @Override//from w  w w .j a v  a 2  s .c om
        public void onMessage(ConsumerRecord<Integer, String> data) {
            messageThread = Thread.currentThread();
            latch.get().countDown();
            if (latch.get().getCount() == 2 && !seekInitial.get()) {
                callback.seek(topic11, 0, 1);
                callback.seek(topic11, 1, 1);
            }
        }

        @Override
        public void registerSeekCallback(ConsumerSeekCallback callback) {
            this.callback = callback;
            this.registerThread = Thread.currentThread();
        }

        @Override
        public void onPartitionsAssigned(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
            if (seekInitial.get()) {
                for (Entry<TopicPartition, Long> assignment : assignments.entrySet()) {
                    callback.seek(assignment.getKey().topic(), assignment.getKey().partition(),
                            assignment.getValue() - 1);
                }
            }
        }

        @Override
        public void onIdleContainer(Map<TopicPartition, Long> assignments, ConsumerSeekCallback callback) {
            for (Entry<TopicPartition, Long> assignment : assignments.entrySet()) {
                callback.seek(assignment.getKey().topic(), assignment.getKey().partition(),
                        assignment.getValue() - 1);
            }
            idleLatch.countDown();
        }

    }
    Listener messageListener = new Listener();
    containerProps.setMessageListener(messageListener);
    containerProps.setSyncCommits(true);
    containerProps.setAckMode(AckMode.RECORD);
    containerProps.setAckOnError(false);
    containerProps.setIdleEventInterval(60000L);

    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRecordAcks");
    container.start();
    ContainerTestUtils.waitForAssignment(container, embeddedKafka.getPartitionsPerTopic());
    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic11);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 0, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 0, "qux");
    template.flush();
    assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    assertThat(messageListener.registerThread).isSameAs(messageListener.messageThread);

    // Now test initial seek of assigned partitions.
    latch.set(new CountDownLatch(2));
    seekInitial.set(true);
    container.start();
    assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue();

    // Now seek on idle
    latch.set(new CountDownLatch(2));
    seekInitial.set(true);
    container.getContainerProperties().setIdleEventInterval(100L);
    final AtomicBoolean idleEventPublished = new AtomicBoolean();
    container.setApplicationEventPublisher(new ApplicationEventPublisher() {

        @Override
        public void publishEvent(Object event) {
            // NOSONAR
        }

        @Override
        public void publishEvent(ApplicationEvent event) {
            idleEventPublished.set(true);
        }

    });
    assertThat(idleLatch.await(60, TimeUnit.SECONDS));
    assertThat(idleEventPublished.get()).isTrue();
    assertThat(latch.get().await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    logger.info("Stop seek");
}

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testDefinedPartitions() throws Exception {
    this.logger.info("Start defined parts");
    Map<String, Object> props = KafkaTestUtils.consumerProps("test3", "false", embeddedKafka);
    TopicPartitionInitialOffset topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 0L);

    CountDownLatch initialConsumersLatch = new CountDownLatch(2);

    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<Integer, String>(props) {

        @Override//  w  w  w  .j  a  va  2  s  . c  om
        public Consumer<Integer, String> createConsumer() {
            return new KafkaConsumer<Integer, String>(props) {

                @Override
                public ConsumerRecords<Integer, String> poll(long timeout) {
                    try {
                        return super.poll(timeout);
                    } finally {
                        initialConsumersLatch.countDown();
                    }
                }

            };
        }

    };

    ContainerProperties container1Props = new ContainerProperties(topic1Partition0);
    CountDownLatch latch1 = new CountDownLatch(2);
    container1Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part: " + message);
        latch1.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> container1 = new KafkaMessageListenerContainer<>(cf,
            container1Props);
    container1.setBeanName("b1");
    container1.start();

    CountDownLatch stopLatch1 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch1.countDown();
        }

    }).given(spyOnConsumer(container1)).commitSync(any());

    TopicPartitionInitialOffset topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, 0L);
    ContainerProperties container2Props = new ContainerProperties(topic1Partition1);
    CountDownLatch latch2 = new CountDownLatch(2);
    container2Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part: " + message);
        latch2.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> container2 = new KafkaMessageListenerContainer<>(cf,
            container2Props);
    container2.setBeanName("b2");
    container2.start();

    CountDownLatch stopLatch2 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch2.countDown();
        }

    }).given(spyOnConsumer(container2)).commitSync(any());

    assertThat(initialConsumersLatch.await(20, TimeUnit.SECONDS)).isTrue();

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic13);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 2, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 2, "qux");
    template.flush();

    assertThat(latch1.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(latch2.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch1.await(60, TimeUnit.SECONDS)).isTrue();
    container1.stop();
    assertThat(stopLatch2.await(60, TimeUnit.SECONDS)).isTrue();
    container2.stop();

    cf = new DefaultKafkaConsumerFactory<>(props);
    // reset earliest
    ContainerProperties container3Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    CountDownLatch latch3 = new CountDownLatch(4);
    container3Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part e: " + message);
        latch3.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> resettingContainer = new KafkaMessageListenerContainer<>(cf,
            container3Props);
    resettingContainer.setBeanName("b3");
    resettingContainer.start();

    CountDownLatch stopLatch3 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch3.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch3.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch3.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(latch3.getCount()).isEqualTo(0L);

    cf = new DefaultKafkaConsumerFactory<>(props);
    // reset beginning for part 0, minus one for part 1
    topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, -1000L);
    topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, -1L);
    ContainerProperties container4Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    CountDownLatch latch4 = new CountDownLatch(3);
    AtomicReference<String> receivedMessage = new AtomicReference<>();
    container4Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part 0, -1: " + message);
        receivedMessage.set(message.value());
        latch4.countDown();
    });
    resettingContainer = new KafkaMessageListenerContainer<>(cf, container4Props);
    resettingContainer.setBeanName("b4");

    resettingContainer.start();

    CountDownLatch stopLatch4 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch4.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch4.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch4.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(receivedMessage.get()).isIn("baz", "qux");
    assertThat(latch4.getCount()).isEqualTo(0L);

    // reset plus one
    template.sendDefault(0, 0, "FOO");
    template.sendDefault(1, 2, "BAR");
    template.flush();

    topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 1L);
    topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, 1L);
    ContainerProperties container5Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    final CountDownLatch latch5 = new CountDownLatch(4);
    final List<String> messages = new ArrayList<>();
    container5Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part 1: " + message);
        messages.add(message.value());
        latch5.countDown();
    });

    resettingContainer = new KafkaMessageListenerContainer<>(cf, container5Props);
    resettingContainer.setBeanName("b5");
    resettingContainer.start();

    CountDownLatch stopLatch5 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch5.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch5.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch5.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(messages).contains("baz", "qux", "FOO", "BAR");

    this.logger.info("+++++++++++++++++++++ Start relative reset");

    template.sendDefault(0, 0, "BAZ");
    template.sendDefault(1, 2, "QUX");
    template.sendDefault(0, 0, "FIZ");
    template.sendDefault(1, 2, "BUZ");
    template.flush();

    topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 1L, true);
    topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, -1L, true);
    ContainerProperties container6Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    final CountDownLatch latch6 = new CountDownLatch(4);
    final List<String> messages6 = new ArrayList<>();
    container6Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part relative: " + message);
        messages6.add(message.value());
        latch6.countDown();
    });

    resettingContainer = new KafkaMessageListenerContainer<>(cf, container6Props);
    resettingContainer.setBeanName("b6");
    resettingContainer.start();

    CountDownLatch stopLatch6 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch6.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch6.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch6.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(messages6).hasSize(4);
    assertThat(messages6).contains("FIZ", "BAR", "QUX", "BUZ");

    this.logger.info("Stop auto parts");
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings("unchecked")
@Test/*from w  w w .ja v a2s  .  c om*/
public void testRollbackRecord() throws Exception {
    logger.info("Start testRollbackRecord");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTest1", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic1, topic2);
    containerProps.setGroupId("group");
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("rr.");

    final KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    final AtomicBoolean failed = new AtomicBoolean();
    final CountDownLatch latch = new CountDownLatch(3);
    final AtomicReference<String> transactionalId = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        latch.countDown();
        if (failed.compareAndSet(false, true)) {
            throw new RuntimeException("fail");
        }
        /*
         * Send a message to topic2 and wait for it so we don't stop the container too soon.
         */
        if (message.topic().equals(topic1)) {
            template.send(topic2, "bar");
            template.flush();
            transactionalId.set(KafkaTestUtils.getPropertyValue(
                    ProducerFactoryUtils.getTransactionalResourceHolder(pf).getProducer(),
                    "delegate.transactionManager.transactionalId", String.class));
        }
    });

    @SuppressWarnings({ "rawtypes" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRollbackRecord");
    container.start();

    template.setDefaultTopic(topic1);
    template.executeInTransaction(t -> {
        template.sendDefault(0, 0, "foo");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    final CountDownLatch subsLatch = new CountDownLatch(1);
    consumer.subscribe(Arrays.asList(topic1), new ConsumerRebalanceListener() {

        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            // empty
        }

        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            subsLatch.countDown();
        }

    });
    ConsumerRecords<Integer, String> records = null;
    int n = 0;
    while (subsLatch.getCount() > 0 && n++ < 600) {
        records = consumer.poll(Duration.ofMillis(100));
    }
    assertThat(subsLatch.await(1, TimeUnit.MILLISECONDS)).isTrue();
    assertThat(records.count()).isEqualTo(0);
    // depending on timing, the position might include the offset representing the commit in the log
    assertThat(consumer.position(new TopicPartition(topic1, 0))).isGreaterThanOrEqualTo(1L);
    assertThat(transactionalId.get()).startsWith("rr.group.txTopic");
    assertThat(KafkaTestUtils.getPropertyValue(pf, "consumerProducers", Map.class)).isEmpty();
    logger.info("Stop testRollbackRecord");
    pf.destroy();
    consumer.close();
}