record, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic, Consumer consumer) {
+ System.out.println("消费者收到消息:" + record.value() + "; topic:" + topic);
+ /*
+ * 如果需要手工提交异步 consumer.commitSync();
+ * 手工同步提交 consumer.commitAsync()
+ */
+ }
+}
+```
+
+#### 2.4 测试整合结果
+
+```java
+@Slf4j
+@RestController
+public class SendMsgController {
+
+ @Autowired
+ private KafKaCustomrProducer producer;
+ @Autowired
+ private KafkaTemplate kafkaTemplate;
+
+ /***
+ * 发送消息体为基本类型的消息
+ */
+ @GetMapping("sendSimple")
+ public void sendSimple() {
+ producer.sendMessage(Topic.SIMPLE, "hello spring boot kafka");
+ }
+}
+```
+
+
+
+## 三、关于多消费者组的测试
+
+#### 3.1 创建多分区主题
+
+```java
+/**
+ * @author : heibaiying
+ * @description : kafka配置类
+ */
+@Configuration
+public class KafkaConfig {
+
+ @Bean
+ public NewTopic groupTopic() {
+ // 指定主题名称,分区数量,和复制因子
+ return new NewTopic(Topic.GROUP, 10, (short) 2);
+ }
+
+}
+```
+
+#### 3.2 多消费者组对同一主题的监听
+
+1. 消费者1-1 监听主题的 0、1 分区
+2. 消费者1-2 监听主题的 2、3 分区
+3. 消费者1-3 监听主题的 0、1 分区
+4. 消费者2-1 监听主题的所有分区
+
+```java
+/**
+ * @author : heibaiying
+ * @description : kafka 消费者组
+ *
+ * 多个消费者群组可以共同读取同一个主题,彼此之间互不影响。
+ */
+@Component
+@Slf4j
+public class KafkaGroupConsumer {
+
+ // 分组1 中的消费者1
+ @KafkaListener(id = "consumer1-1", groupId = "group1", topicPartitions =
+ {@TopicPartition(topic = Topic.GROUP, partitions = {"0", "1"})
+ })
+ public void consumer1_1(ConsumerRecord record) {
+ System.out.println("consumer1-1 收到消息:" + record.value());
+ }
+
+ // 分组1 中的消费者2
+ @KafkaListener(id = "consumer1-2", groupId = "group1", topicPartitions =
+ {@TopicPartition(topic = Topic.GROUP, partitions = {"2", "3"})
+ })
+ public void consumer1_2(ConsumerRecord record) {
+ System.out.println("consumer1-2 收到消息:" + record.value());
+ }
+
+ // 分组1 中的消费者3
+ @KafkaListener(id = "consumer1-3", groupId = "group1", topicPartitions =
+ {@TopicPartition(topic = Topic.GROUP, partitions = {"0", "1"})
+ })
+ public void consumer1_3(ConsumerRecord record) {
+ System.out.println("consumer1-3 收到消息:" + record.value());
+ }
+
+ // 分组2 中的消费者
+ @KafkaListener(id = "consumer2-1", groupId = "group2", topics = Topic.GROUP)
+ public void consumer2_1(ConsumerRecord record) {
+ System.err.println("consumer2-1 收到消息:" + record.value());
+ }
+}
+
+```
+
+#### 3.2 发送消息时候指定主题的具体分区
+
+```java
+/***
+ * 多消费者组、组中多消费者对同一主题的消费情况
+ */
+@GetMapping("sendGroup")
+public void sendGroup() {
+ for (int i = 0; i < 4; i++) {
+ // 第二个参数指定分区,第三个参数指定消息键 分区优先
+ ListenableFuture> future = kafkaTemplate.send(Topic.GROUP, i % 4, "key", "hello group " + i);
+ future.addCallback(new ListenableFutureCallback>() {
+ @Override
+ public void onFailure(Throwable throwable) {
+ log.info("发送消息失败:" + throwable.getMessage());
+ }
+
+ @Override
+ public void onSuccess(SendResult sendResult) {
+ System.out.println("发送结果:" + sendResult.toString());
+ }
+ });
+ }
+}
+```
+
+测试结果:
+
+```yaml
+# 主要看每次发送结果中的 partition 属性,代表四次消息分别发送到了主题的0,1,2,3分区
+发送结果:SendResult [producerRecord=ProducerRecord(topic=spring.boot.kafka.newGroup, partition=1, headers=RecordHeaders(headers = [], isReadOnly = true), key=key, value=hello group 1, timestamp=null), recordMetadata=spring.boot.kafka.newGroup-1@13]
+发送结果:SendResult [producerRecord=ProducerRecord(topic=spring.boot.kafka.newGroup, partition=0, headers=RecordHeaders(headers = [], isReadOnly = true), key=key, value=hello group 0, timestamp=null), recordMetadata=spring.boot.kafka.newGroup-0@19]
+发送结果:SendResult [producerRecord=ProducerRecord(topic=spring.boot.kafka.newGroup, partition=3, headers=RecordHeaders(headers = [], isReadOnly = true), key=key, value=hello group 3, timestamp=null), recordMetadata=spring.boot.kafka.newGroup-3@13]
+发送结果:SendResult [producerRecord=ProducerRecord(topic=spring.boot.kafka.newGroup, partition=2, headers=RecordHeaders(headers = [], isReadOnly = true), key=key, value=hello group 2, timestamp=null), recordMetadata=spring.boot.kafka.newGroup-2@13]
+# 消费者组2 接收情况
+consumer2-1 收到消息:hello group 1
+consumer2-1 收到消息:hello group 0
+consumer2-1 收到消息:hello group 2
+consumer2-1 收到消息:hello group 3
+# 消费者1-1接收情况
+consumer1-1 收到消息:hello group 1
+consumer1-1 收到消息:hello group 0
+# 消费者1-3接收情况
+consumer1-3 收到消息:hello group 1
+consumer1-3 收到消息:hello group 0
+# 消费者1-2接收情况
+consumer1-2 收到消息:hello group 3
+consumer1-2 收到消息:hello group 2
+```
+
+#### 3.4 测试结果
+
+1. 和kafka 原本的机制一样,多消费者组之间对于同一个主题的消费彼此之间互不影响;
+2. 和kafka原本机制不一样的是,这里我们消费者1-1和消费1-3共同属于同一个消费者组,并且监听同样的分区,按照原本kafka的机制,群组保证每个分区只能被同一个消费者组的一个消费者使用,但是按照spring的声明方式实现的消息监听,这里被两个消费者都监听到了。
+
+
+
+## 四、序列化与反序列化
+
+用例采用的是第三方fastjson将实体类序列化为json后发送。实现如下:
+
+```java
+/***
+ * 发送消息体为bean的消息
+ */
+@GetMapping("sendBean")
+public void sendBean() {
+ Programmer programmer = new Programmer("xiaoming", 12, 21212.33f, new Date());
+ producer.sendMessage(Topic.BEAN, JSON.toJSON(programmer).toString());
+}
+
+```
+
+```java
+@Component
+@Slf4j
+public class KafkaBeanConsumer {
+
+ @KafkaListener(groupId = "beanGroup",topics = Topic.BEAN)
+ public void consumer(ConsumerRecord record) {
+ System.out.println("消费者收到消息:" + JSON.parseObject(record.value().toString(), Programmer.class));
+ }
+}
+```
+
diff --git a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/Producer/KafKaCustomrProducer.java b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/Producer/KafKaCustomrProducer.java
index 9c9cd07..3265a2f 100644
--- a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/Producer/KafKaCustomrProducer.java
+++ b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/Producer/KafKaCustomrProducer.java
@@ -28,6 +28,7 @@ public class KafKaCustomrProducer {
* 被用于ProducerRecord producerRecord,即生产者发送消息的key,value 类型
*/
ListenableFuture> future = kafkaTemplate.send(topic, object);
+
future.addCallback(new ListenableFutureCallback>() {
@Override
public void onFailure(Throwable throwable) {
diff --git a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/config/KafkaConfig.java b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/config/KafkaConfig.java
new file mode 100644
index 0000000..130fd8c
--- /dev/null
+++ b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/config/KafkaConfig.java
@@ -0,0 +1,21 @@
+package com.heibaiying.springboot.config;
+
+import com.heibaiying.springboot.constant.Topic;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+/**
+ * @author : heibaiying
+ * @description : kafka配置类
+ */
+@Configuration
+public class KafkaConfig {
+
+ @Bean
+ public NewTopic groupTopic() {
+ // 指定主题名称,分区数量,和复制因子
+ return new NewTopic(Topic.GROUP, 10, (short) 2);
+ }
+
+}
diff --git a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/constant/Topic.java b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/constant/Topic.java
index 8dd0e03..7659df1 100644
--- a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/constant/Topic.java
+++ b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/constant/Topic.java
@@ -3,7 +3,7 @@ package com.heibaiying.springboot.constant;
// 实际开发时建议写到配置文件中
public class Topic {
- public static final String GROUP="spring.boot.kafka.group";
+ public static final String GROUP="spring.boot.kafka.newGroup";
public static final String SIMPLE="spring.boot.kafka.simple";
public static final String BEAN="spring.boot.kafka.bean";
diff --git a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaBeanConsumer.java b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaBeanConsumer.java
index 4bef598..f6a0100 100644
--- a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaBeanConsumer.java
+++ b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaBeanConsumer.java
@@ -17,7 +17,7 @@ import org.springframework.stereotype.Component;
@Slf4j
public class KafkaBeanConsumer {
- @KafkaListener(groupId = "beanGroup", topics = Topic.BEAN)
+ @KafkaListener(groupId = "beanGroup",topics = Topic.BEAN)
public void consumer(ConsumerRecord record) {
System.out.println("消费者收到消息:" + JSON.parseObject(record.value().toString(), Programmer.class));
}
diff --git a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaGroupConsumer.java b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaGroupConsumer.java
index 7019486..1b6320e 100644
--- a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaGroupConsumer.java
+++ b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaGroupConsumer.java
@@ -4,34 +4,46 @@ import com.heibaiying.springboot.constant.Topic;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
+import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.stereotype.Component;
/**
* @author : heibaiying
* @description : kafka 消费者组
- *
+ *
* 多个消费者群组可以共同读取同一个主题,彼此之间互不影响。
- * 但主题的一个分区只能被同一个消费者群组里面的一个消费者读取。
*/
@Component
@Slf4j
public class KafkaGroupConsumer {
// 分组1 中的消费者1
- @KafkaListener(id = "consumer1-1", groupId = "group1", topics = Topic.GROUP)
+ @KafkaListener(id = "consumer1-1", groupId = "group1", topicPartitions =
+ {@TopicPartition(topic = Topic.GROUP, partitions = {"0", "1"})
+ })
public void consumer1_1(ConsumerRecord record) {
System.out.println("consumer1-1 收到消息:" + record.value());
}
// 分组1 中的消费者2
- @KafkaListener(id = "consumer1-2", groupId = "group1", topics = Topic.GROUP)
+ @KafkaListener(id = "consumer1-2", groupId = "group1", topicPartitions =
+ {@TopicPartition(topic = Topic.GROUP, partitions = {"2", "3"})
+ })
public void consumer1_2(ConsumerRecord record) {
System.out.println("consumer1-2 收到消息:" + record.value());
}
+ // 分组1 中的消费者3
+ @KafkaListener(id = "consumer1-3", groupId = "group1", topicPartitions =
+ {@TopicPartition(topic = Topic.GROUP, partitions = {"0", "1"})
+ })
+ public void consumer1_3(ConsumerRecord record) {
+ System.out.println("consumer1-3 收到消息:" + record.value());
+ }
+
// 分组2 中的消费者
@KafkaListener(id = "consumer2-1", groupId = "group2", topics = Topic.GROUP)
public void consumer2_1(ConsumerRecord record) {
- System.out.println("consumer2-1 收到消息:" + record.value());
+ System.err.println("consumer2-1 收到消息:" + record.value());
}
}
diff --git a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaSimpleConsumer.java b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaSimpleConsumer.java
index dfb191b..b53d6d1 100644
--- a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaSimpleConsumer.java
+++ b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/consumer/KafkaSimpleConsumer.java
@@ -4,11 +4,16 @@ import com.heibaiying.springboot.constant.Topic;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.PartitionInfo;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.stereotype.Component;
+import java.util.List;
+import java.util.Map;
+
/**
* @author : heibaiying
* @description : kafka 简单消息消费者
@@ -20,8 +25,12 @@ public class KafkaSimpleConsumer {
// 简单消费者
@KafkaListener(groupId = "simpleGroup", topics = Topic.SIMPLE)
- public void consumer1_1(ConsumerRecord record, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,Consumer consumer) {
+ public void consumer1_1(ConsumerRecord record, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic, Consumer consumer) {
System.out.println("消费者收到消息:" + record.value() + "; topic:" + topic);
- consumer.commitSync();
+
+ /*
+ * 如果需要手工提交异步 consumer.commitSync();
+ * 手工同步提交 consumer.commitAsync()
+ */
}
}
diff --git a/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/controller/SendMsgController.java b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/controller/SendMsgController.java
new file mode 100644
index 0000000..41e02d3
--- /dev/null
+++ b/spring-boot/spring-boot-kafka/src/main/java/com/heibaiying/springboot/controller/SendMsgController.java
@@ -0,0 +1,70 @@
+package com.heibaiying.springboot.controller;
+
+import com.alibaba.fastjson.JSON;
+import com.heibaiying.springboot.Producer.KafKaCustomrProducer;
+import com.heibaiying.springboot.bean.Programmer;
+import com.heibaiying.springboot.constant.Topic;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.kafka.core.KafkaTemplate;
+import org.springframework.kafka.support.SendResult;
+import org.springframework.util.concurrent.ListenableFuture;
+import org.springframework.util.concurrent.ListenableFutureCallback;
+import org.springframework.web.bind.annotation.GetMapping;
+import org.springframework.web.bind.annotation.RestController;
+
+import java.util.Date;
+
+/**
+ * @author : heibaiying
+ * @description : 测试消息发送
+ */
+@Slf4j
+@RestController
+public class SendMsgController {
+
+ @Autowired
+ private KafKaCustomrProducer producer;
+ @Autowired
+ private KafkaTemplate kafkaTemplate;
+
+ /***
+ * 发送消息体为基本类型的消息
+ */
+
+ @GetMapping("sendSimple")
+ public void sendSimple() {
+ producer.sendMessage(Topic.SIMPLE, "hello spring boot kafka");
+ }
+
+ /***
+ * 发送消息体为bean的消息
+ */
+ @GetMapping("sendBean")
+ public void sendBean() {
+ Programmer programmer = new Programmer("xiaoming", 12, 21212.33f, new Date());
+ producer.sendMessage(Topic.BEAN, JSON.toJSON(programmer).toString());
+ }
+
+
+ /***
+ * 多消费者组、组中多消费者对同一主题的消费情况
+ */
+ @GetMapping("sendGroup")
+ public void sendGroup() {
+ for (int i = 0; i < 4; i++) {
+ ListenableFuture> future = kafkaTemplate.send(Topic.GROUP, i % 4, "key", "hello group " + i);
+ future.addCallback(new ListenableFutureCallback>() {
+ @Override
+ public void onFailure(Throwable throwable) {
+ log.info("发送消息失败:" + throwable.getMessage());
+ }
+
+ @Override
+ public void onSuccess(SendResult sendResult) {
+ System.out.println("发送结果:" + sendResult.toString());
+ }
+ });
+ }
+ }
+}
diff --git a/spring-boot/spring-boot-kafka/src/main/resources/application.yml b/spring-boot/spring-boot-kafka/src/main/resources/application.yml
index cc921dd..56e2db4 100644
--- a/spring-boot/spring-boot-kafka/src/main/resources/application.yml
+++ b/spring-boot/spring-boot-kafka/src/main/resources/application.yml
@@ -25,7 +25,7 @@ spring:
# earliest :在偏移量无效的情况下,消费者将从起始位置读取分区的记录
auto-offset-reset: earliest
# 是否自动提交偏移量,默认值是true,为了避免出现重复数据和数据丢失,可以把它设置为false,然后手动提交偏移量
- enable-auto-commit: false
+ enable-auto-commit: true
# 键的反序列化方式
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# 值的反序列化方式
diff --git a/spring-boot/spring-boot-kafka/src/test/java/com/heibaiying/springboot/KafkaTests.java b/spring-boot/spring-boot-kafka/src/test/java/com/heibaiying/springboot/KafkaTests.java
deleted file mode 100644
index 82f8bcc..0000000
--- a/spring-boot/spring-boot-kafka/src/test/java/com/heibaiying/springboot/KafkaTests.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package com.heibaiying.springboot;
-
-import com.alibaba.fastjson.JSON;
-import com.heibaiying.springboot.Producer.KafKaCustomrProducer;
-import com.heibaiying.springboot.bean.Programmer;
-import com.heibaiying.springboot.constant.Topic;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.test.context.SpringBootTest;
-import org.springframework.kafka.core.KafkaTemplate;
-import org.springframework.test.context.junit4.SpringRunner;
-
-import java.util.Date;
-
-@RunWith(SpringRunner.class)
-@SpringBootTest
-public class KafkaTests {
-
- @Autowired
- private KafKaCustomrProducer producer;
-
- /***
- * 发送消息体为基本类型的消息
- */
- @Test
- public void sendSimple() {
- producer.sendMessage(Topic.SIMPLE, "hello spring boot kafka");
- }
-
- /***
- * 发送消息体为bean的消息
- */
- @Test
- public void sendBean() {
- Programmer programmer = new Programmer("xiaoming", 12, 21212.33f, new Date());
- producer.sendMessage(Topic.BEAN, JSON.toJSON(programmer).toString());
- }
-
-
- /***
- * 多消费者组、组中多消费者对同一主题的消费情况
- */
- @Test
- public void sendGroup() {
- for (int i = 0; i < 5; i++) {
- producer.sendMessage(Topic.GROUP, "hello group " + i);
- }
- }
-}
-