Example usage for org.springframework.kafka.listener ContainerProperties ContainerProperties

List of usage examples for org.springframework.kafka.listener ContainerProperties ContainerProperties

Introduction

In this page you can find the example usage for org.springframework.kafka.listener ContainerProperties ContainerProperties.

Prototype

public ContainerProperties(TopicPartitionOffset... topicPartitions) 

Source Link

Document

Create properties for a container that will assign itself the provided topic partitions.

Usage

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings({ "rawtypes", "unchecked" })
private void testConsumeAndProduceTransactionGuts(boolean chained, boolean handleError) throws Exception {
    Consumer consumer = mock(Consumer.class);
    final TopicPartition topicPartition = new TopicPartition("foo", 0);
    willAnswer(i -> {/* www .ja  v a2s.  c  o m*/
        ((ConsumerRebalanceListener) i.getArgument(1))
                .onPartitionsAssigned(Collections.singletonList(topicPartition));
        return null;
    }).given(consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class));
    ConsumerRecords records = new ConsumerRecords(Collections.singletonMap(topicPartition,
            Collections.singletonList(new ConsumerRecord<>("foo", 0, 0, "key", "value"))));
    final AtomicBoolean done = new AtomicBoolean();
    willAnswer(i -> {
        if (done.compareAndSet(false, true)) {
            return records;
        } else {
            Thread.sleep(500);
            return null;
        }
    }).given(consumer).poll(any(Duration.class));
    ConsumerFactory cf = mock(ConsumerFactory.class);
    willReturn(consumer).given(cf).createConsumer("group", "", null);
    Producer producer = mock(Producer.class);
    final CountDownLatch closeLatch = new CountDownLatch(2);
    willAnswer(i -> {
        closeLatch.countDown();
        return null;
    }).given(producer).close();
    ProducerFactory pf = mock(ProducerFactory.class);
    given(pf.transactionCapable()).willReturn(true);
    final List<String> transactionalIds = new ArrayList<>();
    willAnswer(i -> {
        transactionalIds.add(TransactionSupport.getTransactionIdSuffix());
        return producer;
    }).given(pf).createProducer();
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    PlatformTransactionManager ptm = tm;
    if (chained) {
        ptm = new ChainedKafkaTransactionManager(new SomeOtherTransactionManager(), tm);
    }
    ContainerProperties props = new ContainerProperties("foo");
    props.setGroupId("group");
    props.setTransactionManager(ptm);
    final KafkaTemplate template = new KafkaTemplate(pf);
    props.setMessageListener((MessageListener) m -> {
        template.send("bar", "baz");
        if (handleError) {
            throw new RuntimeException("fail");
        }
    });
    KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, props);
    container.setBeanName("commit");
    if (handleError) {
        container.setErrorHandler((e, data) -> {
        });
    }
    container.start();
    assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue();
    InOrder inOrder = inOrder(producer);
    inOrder.verify(producer).beginTransaction();
    inOrder.verify(producer).sendOffsetsToTransaction(
            Collections.singletonMap(topicPartition, new OffsetAndMetadata(0)), "group");
    inOrder.verify(producer).commitTransaction();
    inOrder.verify(producer).close();
    inOrder.verify(producer).beginTransaction();
    ArgumentCaptor<ProducerRecord> captor = ArgumentCaptor.forClass(ProducerRecord.class);
    inOrder.verify(producer).send(captor.capture(), any(Callback.class));
    assertThat(captor.getValue()).isEqualTo(new ProducerRecord("bar", "baz"));
    inOrder.verify(producer).sendOffsetsToTransaction(
            Collections.singletonMap(topicPartition, new OffsetAndMetadata(1)), "group");
    inOrder.verify(producer).commitTransaction();
    inOrder.verify(producer).close();
    container.stop();
    verify(pf, times(2)).createProducer();
    verifyNoMoreInteractions(producer);
    assertThat(transactionalIds.get(0)).isEqualTo("group.foo.0");
    assertThat(transactionalIds.get(0)).isEqualTo("group.foo.0");
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings({ "rawtypes", "unchecked" })
@Test//from w  w  w  .  java2s  . c o  m
public void testConsumeAndProduceTransactionExternalTM() throws Exception {
    Consumer consumer = mock(Consumer.class);
    final TopicPartition topicPartition = new TopicPartition("foo", 0);
    willAnswer(i -> {
        ((ConsumerRebalanceListener) i.getArgument(1))
                .onPartitionsAssigned(Collections.singletonList(topicPartition));
        return null;
    }).given(consumer).subscribe(any(Collection.class), any(ConsumerRebalanceListener.class));
    final ConsumerRecords records = new ConsumerRecords(Collections.singletonMap(topicPartition,
            Collections.singletonList(new ConsumerRecord<>("foo", 0, 0, "key", "value"))));
    final AtomicBoolean done = new AtomicBoolean();
    willAnswer(i -> {
        if (done.compareAndSet(false, true)) {
            return records;
        } else {
            Thread.sleep(500);
            return null;
        }
    }).given(consumer).poll(any(Duration.class));
    ConsumerFactory cf = mock(ConsumerFactory.class);
    willReturn(consumer).given(cf).createConsumer("group", "", null);
    Producer producer = mock(Producer.class);

    final CountDownLatch closeLatch = new CountDownLatch(1);

    willAnswer(i -> {
        closeLatch.countDown();
        return null;
    }).given(producer).close();

    final ProducerFactory pf = mock(ProducerFactory.class);
    given(pf.transactionCapable()).willReturn(true);
    given(pf.createProducer()).willReturn(producer);
    ContainerProperties props = new ContainerProperties("foo");
    props.setGroupId("group");
    props.setTransactionManager(new SomeOtherTransactionManager());
    final KafkaTemplate template = new KafkaTemplate(pf);
    props.setMessageListener((MessageListener<String, String>) m -> {
        template.send("bar", "baz");
        template.sendOffsetsToTransaction(Collections.singletonMap(new TopicPartition(m.topic(), m.partition()),
                new OffsetAndMetadata(m.offset() + 1)));
    });
    KafkaMessageListenerContainer container = new KafkaMessageListenerContainer<>(cf, props);
    container.setBeanName("commit");
    container.start();

    assertThat(closeLatch.await(10, TimeUnit.SECONDS)).isTrue();

    InOrder inOrder = inOrder(producer);
    inOrder.verify(producer).beginTransaction();
    ArgumentCaptor<ProducerRecord> captor = ArgumentCaptor.forClass(ProducerRecord.class);
    inOrder.verify(producer).send(captor.capture(), any(Callback.class));
    assertThat(captor.getValue()).isEqualTo(new ProducerRecord("bar", "baz"));
    inOrder.verify(producer).sendOffsetsToTransaction(
            Collections.singletonMap(topicPartition, new OffsetAndMetadata(1)), "group");
    inOrder.verify(producer).commitTransaction();
    inOrder.verify(producer).close();
    container.stop();
    verify(pf).createProducer();
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@Test
public void testMaxFailures() throws Exception {
    logger.info("Start testMaxFailures");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTestMaxFailures", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic3);
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Object, Object> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("maxAtt.");
    final KafkaTemplate<Object, Object> template = new KafkaTemplate<>(pf);
    final CountDownLatch latch = new CountDownLatch(1);
    AtomicReference<String> data = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        data.set(message.value());/*w  ww.ja  v  a 2s  .  c o m*/
        if (message.offset() == 0) {
            throw new RuntimeException("fail for max failures");
        }
        latch.countDown();
    });

    @SuppressWarnings({ "rawtypes", "unchecked" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testMaxFailures");
    final CountDownLatch recoverLatch = new CountDownLatch(1);
    DeadLetterPublishingRecoverer recoverer = new DeadLetterPublishingRecoverer(template) {

        @Override
        public void accept(ConsumerRecord<?, ?> record, Exception exception) {
            super.accept(record, exception);
            recoverLatch.countDown();
        }

    };
    DefaultAfterRollbackProcessor<Integer, String> afterRollbackProcessor = spy(
            new DefaultAfterRollbackProcessor<>(recoverer, 3));
    container.setAfterRollbackProcessor(afterRollbackProcessor);
    final CountDownLatch stopLatch = new CountDownLatch(1);
    container.setApplicationEventPublisher(e -> {
        if (e instanceof ConsumerStoppedEvent) {
            stopLatch.countDown();
        }
    });
    container.start();

    template.setDefaultTopic(topic3);
    template.executeInTransaction(t -> {
        RecordHeaders headers = new RecordHeaders(
                new RecordHeader[] { new RecordHeader("baz", "qux".getBytes()) });
        ProducerRecord<Object, Object> record = new ProducerRecord<>(topic3, 0, 0, "foo", headers);
        template.send(record);
        template.sendDefault(0, 0, "bar");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    assertThat(data.get()).isEqualTo("bar");
    assertThat(recoverLatch.await(10, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    embeddedKafka.consumeFromAnEmbeddedTopic(consumer, topic3DLT);
    ConsumerRecord<Integer, String> dltRecord = KafkaTestUtils.getSingleRecord(consumer, topic3DLT);
    assertThat(dltRecord.value()).isEqualTo("foo");
    DefaultKafkaHeaderMapper mapper = new DefaultKafkaHeaderMapper();
    Map<String, Object> map = new HashMap<>();
    mapper.toHeaders(dltRecord.headers(), map);
    MessageHeaders headers = new MessageHeaders(map);
    assertThat(new String(headers.get(KafkaHeaders.DLT_EXCEPTION_FQCN, byte[].class)))
            .contains("RuntimeException");
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_MESSAGE, byte[].class))
            .isEqualTo("fail for max failures".getBytes());
    assertThat(headers.get(KafkaHeaders.DLT_EXCEPTION_STACKTRACE)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_OFFSET, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_PARTITION, byte[].class)[3]).isEqualTo((byte) 0);
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TIMESTAMP_TYPE, byte[].class)).isNotNull();
    assertThat(headers.get(KafkaHeaders.DLT_ORIGINAL_TOPIC, byte[].class)).isEqualTo(topic3.getBytes());
    assertThat(headers.get("baz")).isEqualTo("qux".getBytes());
    pf.destroy();
    assertThat(stopLatch.await(10, TimeUnit.SECONDS)).isTrue();
    verify(afterRollbackProcessor).clearThreadState();
    logger.info("Stop testMaxAttempts");
}