Example usage for org.springframework.kafka.core KafkaTemplate sendDefault

List of usage examples for org.springframework.kafka.core KafkaTemplate sendDefault

Introduction

In this page you can find the example usage for org.springframework.kafka.core KafkaTemplate sendDefault.

Prototype

@Override
    public ListenableFuture<SendResult<K, V>> sendDefault(Integer partition, K key, @Nullable V data) 

Source Link

Usage

From source file:org.springframework.kafka.listener.KafkaMessageListenerContainerTests.java

@Test
public void testDefinedPartitions() throws Exception {
    this.logger.info("Start defined parts");
    Map<String, Object> props = KafkaTestUtils.consumerProps("test3", "false", embeddedKafka);
    TopicPartitionInitialOffset topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 0L);

    CountDownLatch initialConsumersLatch = new CountDownLatch(2);

    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<Integer, String>(props) {

        @Override/* w ww. j av  a2 s.c  om*/
        public Consumer<Integer, String> createConsumer() {
            return new KafkaConsumer<Integer, String>(props) {

                @Override
                public ConsumerRecords<Integer, String> poll(long timeout) {
                    try {
                        return super.poll(timeout);
                    } finally {
                        initialConsumersLatch.countDown();
                    }
                }

            };
        }

    };

    ContainerProperties container1Props = new ContainerProperties(topic1Partition0);
    CountDownLatch latch1 = new CountDownLatch(2);
    container1Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part: " + message);
        latch1.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> container1 = new KafkaMessageListenerContainer<>(cf,
            container1Props);
    container1.setBeanName("b1");
    container1.start();

    CountDownLatch stopLatch1 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch1.countDown();
        }

    }).given(spyOnConsumer(container1)).commitSync(any());

    TopicPartitionInitialOffset topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, 0L);
    ContainerProperties container2Props = new ContainerProperties(topic1Partition1);
    CountDownLatch latch2 = new CountDownLatch(2);
    container2Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part: " + message);
        latch2.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> container2 = new KafkaMessageListenerContainer<>(cf,
            container2Props);
    container2.setBeanName("b2");
    container2.start();

    CountDownLatch stopLatch2 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch2.countDown();
        }

    }).given(spyOnConsumer(container2)).commitSync(any());

    assertThat(initialConsumersLatch.await(20, TimeUnit.SECONDS)).isTrue();

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    ProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    template.setDefaultTopic(topic13);
    template.sendDefault(0, 0, "foo");
    template.sendDefault(1, 2, "bar");
    template.sendDefault(0, 0, "baz");
    template.sendDefault(1, 2, "qux");
    template.flush();

    assertThat(latch1.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(latch2.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch1.await(60, TimeUnit.SECONDS)).isTrue();
    container1.stop();
    assertThat(stopLatch2.await(60, TimeUnit.SECONDS)).isTrue();
    container2.stop();

    cf = new DefaultKafkaConsumerFactory<>(props);
    // reset earliest
    ContainerProperties container3Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    CountDownLatch latch3 = new CountDownLatch(4);
    container3Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part e: " + message);
        latch3.countDown();
    });
    KafkaMessageListenerContainer<Integer, String> resettingContainer = new KafkaMessageListenerContainer<>(cf,
            container3Props);
    resettingContainer.setBeanName("b3");
    resettingContainer.start();

    CountDownLatch stopLatch3 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch3.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch3.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch3.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(latch3.getCount()).isEqualTo(0L);

    cf = new DefaultKafkaConsumerFactory<>(props);
    // reset beginning for part 0, minus one for part 1
    topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, -1000L);
    topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, -1L);
    ContainerProperties container4Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    CountDownLatch latch4 = new CountDownLatch(3);
    AtomicReference<String> receivedMessage = new AtomicReference<>();
    container4Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part 0, -1: " + message);
        receivedMessage.set(message.value());
        latch4.countDown();
    });
    resettingContainer = new KafkaMessageListenerContainer<>(cf, container4Props);
    resettingContainer.setBeanName("b4");

    resettingContainer.start();

    CountDownLatch stopLatch4 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch4.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch4.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch4.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(receivedMessage.get()).isIn("baz", "qux");
    assertThat(latch4.getCount()).isEqualTo(0L);

    // reset plus one
    template.sendDefault(0, 0, "FOO");
    template.sendDefault(1, 2, "BAR");
    template.flush();

    topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 1L);
    topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, 1L);
    ContainerProperties container5Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    final CountDownLatch latch5 = new CountDownLatch(4);
    final List<String> messages = new ArrayList<>();
    container5Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part 1: " + message);
        messages.add(message.value());
        latch5.countDown();
    });

    resettingContainer = new KafkaMessageListenerContainer<>(cf, container5Props);
    resettingContainer.setBeanName("b5");
    resettingContainer.start();

    CountDownLatch stopLatch5 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch5.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch5.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch5.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(messages).contains("baz", "qux", "FOO", "BAR");

    this.logger.info("+++++++++++++++++++++ Start relative reset");

    template.sendDefault(0, 0, "BAZ");
    template.sendDefault(1, 2, "QUX");
    template.sendDefault(0, 0, "FIZ");
    template.sendDefault(1, 2, "BUZ");
    template.flush();

    topic1Partition0 = new TopicPartitionInitialOffset(topic13, 0, 1L, true);
    topic1Partition1 = new TopicPartitionInitialOffset(topic13, 1, -1L, true);
    ContainerProperties container6Props = new ContainerProperties(topic1Partition0, topic1Partition1);

    final CountDownLatch latch6 = new CountDownLatch(4);
    final List<String> messages6 = new ArrayList<>();
    container6Props.setMessageListener((MessageListener<Integer, String>) message -> {
        logger.info("defined part relative: " + message);
        messages6.add(message.value());
        latch6.countDown();
    });

    resettingContainer = new KafkaMessageListenerContainer<>(cf, container6Props);
    resettingContainer.setBeanName("b6");
    resettingContainer.start();

    CountDownLatch stopLatch6 = new CountDownLatch(1);

    willAnswer(invocation -> {

        try {
            return invocation.callRealMethod();
        } finally {
            stopLatch6.countDown();
        }

    }).given(spyOnConsumer(resettingContainer)).commitSync(any());

    assertThat(latch6.await(60, TimeUnit.SECONDS)).isTrue();

    assertThat(stopLatch6.await(60, TimeUnit.SECONDS)).isTrue();
    resettingContainer.stop();
    assertThat(messages6).hasSize(4);
    assertThat(messages6).contains("FIZ", "BAR", "QUX", "BUZ");

    this.logger.info("Stop auto parts");
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings("unchecked")
@Test/*from w  w  w .  ja v  a 2  s .c om*/
public void testRollbackRecord() throws Exception {
    logger.info("Start testRollbackRecord");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTest1", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic1, topic2);
    containerProps.setGroupId("group");
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("rr.");

    final KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    final AtomicBoolean failed = new AtomicBoolean();
    final CountDownLatch latch = new CountDownLatch(3);
    final AtomicReference<String> transactionalId = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        latch.countDown();
        if (failed.compareAndSet(false, true)) {
            throw new RuntimeException("fail");
        }
        /*
         * Send a message to topic2 and wait for it so we don't stop the container too soon.
         */
        if (message.topic().equals(topic1)) {
            template.send(topic2, "bar");
            template.flush();
            transactionalId.set(KafkaTestUtils.getPropertyValue(
                    ProducerFactoryUtils.getTransactionalResourceHolder(pf).getProducer(),
                    "delegate.transactionManager.transactionalId", String.class));
        }
    });

    @SuppressWarnings({ "rawtypes" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRollbackRecord");
    container.start();

    template.setDefaultTopic(topic1);
    template.executeInTransaction(t -> {
        template.sendDefault(0, 0, "foo");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    final CountDownLatch subsLatch = new CountDownLatch(1);
    consumer.subscribe(Arrays.asList(topic1), new ConsumerRebalanceListener() {

        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            // empty
        }

        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            subsLatch.countDown();
        }

    });
    ConsumerRecords<Integer, String> records = null;
    int n = 0;
    while (subsLatch.getCount() > 0 && n++ < 600) {
        records = consumer.poll(Duration.ofMillis(100));
    }
    assertThat(subsLatch.await(1, TimeUnit.MILLISECONDS)).isTrue();
    assertThat(records.count()).isEqualTo(0);
    // depending on timing, the position might include the offset representing the commit in the log
    assertThat(consumer.position(new TopicPartition(topic1, 0))).isGreaterThanOrEqualTo(1L);
    assertThat(transactionalId.get()).startsWith("rr.group.txTopic");
    assertThat(KafkaTestUtils.getPropertyValue(pf, "consumerProducers", Map.class)).isEmpty();
    logger.info("Stop testRollbackRecord");
    pf.destroy();
    consumer.close();
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@Test
public void testMaxFailures() throws Exception {
    logger.info("Start testMaxFailures");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTestMaxFailures", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic3);
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Object, Object> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("maxAtt.");
    final KafkaTemplate<Object, Object> template = new KafkaTemplate<>(pf);
    final CountDownLatch latch = new CountDownLatch(1);
    AtomicReference<String> data = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        data.set(message.value());/*from w w w . j  a v  a 2  s  . c o  m*/
        if (message.offset() == 0) {
            throw new RuntimeException("fail for max failures");
        }
        latch.countDown();
    });

    @SuppressWarnings({ "rawtypes", "unchecked" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testMaxFailures");
    final CountDownLatch recoverLatch = new CountDownLatch(1);
    DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template) {

        @Override
        public void accept(ConsumerRecord<?, ?> record, Exception exception) {
            super.accept(record, exception);
            recoverLatch.countDown();
        }

    };
    DefaultAfterRollbackProcessor<Integer, String> afterRollbackProcessor = spy(
            new DefaultAfterRollbackProcessor<>(recoverer, 3));
    container.setAfterRollbackProcessor(afterRollbackProcessor);
    final CountDownLatch stopLatch = new CountDownLatch(1);
    container.setApplicationEventPublisher(e -> {
        if (e instanceof ConsumerStoppedEvent) {
            stopLatch.countDown();
        }
    });
    container.start();

    template.setDefaultTopic(topic3);
    template.executeInTransaction(t -> {
        RecordHeaders headers = new RecordHeaders(
                new RecordHeader[] { new RecordHeader("baz", "qux".getBytes()) });
        ProducerRecord<Object, Object> record = new ProducerRecord<>(topic3, 0, 0, "foo", headers);
        template.send(record);
        template.sendDefault(0, 0, "bar");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(data.get()).isEqualTo("bar");
    assertThat(recoverLatch.await(10, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic3DLT);
    ConsumerRecord<Integer, String> dltRecord = KafkaTestUtils.getSingleRecord(consumer, topic3DLT);
    assertThat(dltRecord.value()).isEqualTo("foo");
    DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper();
    Map<String, Object> map = new HashMap<>();
    mapper.toHeaders(dltRecord.headers(), map);
    MessageHeaders headers = new MessageHeaders(map);
    assertThat(new String(headers.get(KafkaHeaders.DLT_EXCEPTION_FQCN, byte[].class)))
            .contains("RuntimeException");
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_MESSAGE, byte[].class))
            .isEqualTo("fail for max failures".getBytes());
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_STACKTRACE)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_OFFSET, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_PARTITION, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TOPIC, byte[].class)).isEqualTo(topic3.getBytes());
    assertThat(headers.get("baz")).isEqualTo("qux".getBytes());
    pf.destroy();
    assertThat(stopLatch.await(10, TimeUnit.SECONDS)).isTrue();
    verify(afterRollbackProcessor).clearThreadState();
    logger.info("Stop testMaxAttempts");
}