Example usage for org.springframework.kafka.support KafkaHeaders OFFSET

List of usage examples for org.springframework.kafka.support KafkaHeaders OFFSET

Introduction

In this page you can find the example usage for org.springframework.kafka.support KafkaHeaders OFFSET.

Prototype

String OFFSET

To view the source code for org.springframework.kafka.support KafkaHeaders OFFSET.

Click Source Link

Document

The header for the partition offset.

Usage

From source file:org.springframework.kafka.support.converter.BatchMessagingMessageConverter.java

@Override
public Message<?> toMessage(List<ConsumerRecord<?, ?>> records, Acknowledgment acknowledgment,
        Consumer<?, ?> consumer, Type type) {
    KafkaMessageHeaders kafkaMessageHeaders = new KafkaMessageHeaders(this.generateMessageId,
            this.generateTimestamp);

    Map<String, Object> rawHeaders = kafkaMessageHeaders.getRawHeaders();
    List<Object> payloads = new ArrayList<>();
    List<Object> keys = new ArrayList<>();
    List<String> topics = new ArrayList<>();
    List<Integer> partitions = new ArrayList<>();
    List<Long> offsets = new ArrayList<>();
    List<String> timestampTypes = new ArrayList<>();
    List<Long> timestamps = new ArrayList<>();
    List<Map<String, Object>> convertedHeaders = new ArrayList<>();
    List<Headers> natives = new ArrayList<>();
    rawHeaders.put(KafkaHeaders.RECEIVED_MESSAGE_KEY, keys);
    rawHeaders.put(KafkaHeaders.RECEIVED_TOPIC, topics);
    rawHeaders.put(KafkaHeaders.RECEIVED_PARTITION_ID, partitions);
    rawHeaders.put(KafkaHeaders.OFFSET, offsets);
    rawHeaders.put(KafkaHeaders.TIMESTAMP_TYPE, timestampTypes);
    rawHeaders.put(KafkaHeaders.RECEIVED_TIMESTAMP, timestamps);
    if (this.headerMapper != null) {
        rawHeaders.put(KafkaHeaders.BATCH_CONVERTED_HEADERS, convertedHeaders);
    } else {/*from  w  ww  .jav a2s  . c o  m*/
        rawHeaders.put(KafkaHeaders.NATIVE_HEADERS, natives);
    }

    if (acknowledgment != null) {
        rawHeaders.put(KafkaHeaders.ACKNOWLEDGMENT, acknowledgment);
    }
    if (consumer != null) {
        rawHeaders.put(KafkaHeaders.CONSUMER, consumer);
    }

    boolean logged = false;
    for (ConsumerRecord<?, ?> record : records) {
        payloads.add(this.recordConverter == null || !containerType(type) ? extractAndConvertValue(record, type)
                : convert(record, type));
        keys.add(record.key());
        topics.add(record.topic());
        partitions.add(record.partition());
        offsets.add(record.offset());
        timestampTypes.add(record.timestampType().name());
        timestamps.add(record.timestamp());
        if (this.headerMapper != null) {
            Map<String, Object> converted = new HashMap<>();
            this.headerMapper.toHeaders(record.headers(), converted);
            convertedHeaders.add(converted);
        } else {
            if (this.logger.isDebugEnabled() && !logged) {
                this.logger.debug("No header mapper is available; Jackson is required for the default mapper; "
                        + "headers (if present) are not mapped but provided raw in "
                        + KafkaHeaders.NATIVE_HEADERS);
                logged = true;
            }
            natives.add(record.headers());
        }
    }
    return MessageBuilder.createMessage(payloads, kafkaMessageHeaders);
}

From source file:org.springframework.kafka.support.converter.MessagingMessageConverter.java

@Override
public Message<?> toMessage(ConsumerRecord<?, ?> record, Acknowledgment acknowledgment, Consumer<?, ?> consumer,
        Type type) {//from  w ww .j  a  v  a 2s .c om
    KafkaMessageHeaders kafkaMessageHeaders = new KafkaMessageHeaders(this.generateMessageId,
            this.generateTimestamp);

    Map<String, Object> rawHeaders = kafkaMessageHeaders.getRawHeaders();
    if (this.headerMapper != null) {
        this.headerMapper.toHeaders(record.headers(), rawHeaders);
    } else {
        if (this.logger.isDebugEnabled()) {
            this.logger.debug("No header mapper is available; Jackson is required for the default mapper; "
                    + "headers (if present) are not mapped but provided raw in " + KafkaHeaders.NATIVE_HEADERS);
        }
        rawHeaders.put(KafkaHeaders.NATIVE_HEADERS, record.headers());
    }
    rawHeaders.put(KafkaHeaders.RECEIVED_MESSAGE_KEY, record.key());
    rawHeaders.put(KafkaHeaders.RECEIVED_TOPIC, record.topic());
    rawHeaders.put(KafkaHeaders.RECEIVED_PARTITION_ID, record.partition());
    rawHeaders.put(KafkaHeaders.OFFSET, record.offset());
    rawHeaders.put(KafkaHeaders.TIMESTAMP_TYPE, record.timestampType().name());
    rawHeaders.put(KafkaHeaders.RECEIVED_TIMESTAMP, record.timestamp());

    if (acknowledgment != null) {
        rawHeaders.put(KafkaHeaders.ACKNOWLEDGMENT, acknowledgment);
    }
    if (consumer != null) {
        rawHeaders.put(KafkaHeaders.CONSUMER, consumer);
    }

    return MessageBuilder.createMessage(extractAndConvertValue(record, type), kafkaMessageHeaders);
}