├── src └── main │ ├── resources │ └── META-INF │ │ └── spring.factories │ └── java │ └── paas │ └── foundation │ ├── mq │ ├── exception │ │ └── MessageQueueException.java │ ├── producer │ │ ├── Producer.java │ │ ├── internal │ │ │ └── kafka │ │ │ │ ├── KafkaProducerServer.java │ │ │ │ └── AbstractKafkaMqProducer.java │ │ └── ProduceMessage.java │ ├── consumer │ │ ├── MessageListener.java │ │ ├── ConsumeMessage.java │ │ └── internal │ │ │ └── kafka │ │ │ ├── KafkaConsumerServer.java │ │ │ ├── KafkaRetryConsumerServer.java │ │ │ └── AbstractKafkaConsumer.java │ └── utils │ │ └── KafkaConstantsUtil.java │ └── autoconfigure │ └── mq │ └── kafka │ ├── KafkaProperties.java │ └── KafkaAutoConfiguration.java ├── README.md └── pom.xml /src/main/resources/META-INF/spring.factories: -------------------------------------------------------------------------------- 1 | org.springframework.boot.autoconfigure.EnableAutoConfiguration=\ 2 | paas.foundation.autoconfigure.mq.kafka.KafkaAutoConfiguration 3 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/exception/MessageQueueException.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.exception; 2 | 3 | 4 | public class MessageQueueException extends Exception { 5 | 6 | public MessageQueueException(String message) { 7 | super(message); 8 | } 9 | 10 | } 11 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/producer/Producer.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.producer; 2 | 3 | public interface Producer { 4 | 5 | /** 6 | * 同步发送消息的接口 7 | * 8 | * @param message 待发送的消息 9 | * @return 消息成功发送到消息队列后返回消息队列分配的id,可用于后续查询消息,排查错误 10 | */ 11 | String send(ProduceMessage message); 12 | } 13 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/consumer/MessageListener.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.consumer; 2 | 3 | /** 4 | * 描述: 对应用户配置的消息监听 5 | * 6 | * @author wangpengpeng 7 | * @date 2020-07-01 11:49 8 | */ 9 | public interface MessageListener { 10 | /** 11 | * 消息主题,表示只接受给定主题下的消息 12 | * 13 | * @return 消息主题 14 | */ 15 | String getTopic(); 16 | 17 | /** 18 | * 处理接收到的消息 19 | * 20 | * @param message 收到的消息 21 | */ 22 | void process(ConsumeMessage message); 23 | 24 | } 25 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/utils/KafkaConstantsUtil.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.utils; 2 | 3 | 4 | /** 5 | * @author wangpengpeng 6 | * @date 2020-07-04 14:55 7 | */ 8 | public class KafkaConstantsUtil { 9 | public static final String RETRY_TOPIC = "RETRY_TOPIC"; 10 | public static final String DEAD_TOPIC = "DEAD_TOPIC"; 11 | public static final String TOPIC_CAPTION = "topic"; 12 | public static final String RETRY_COUNT = "retry_count"; 13 | public static final Integer RETRY_10_SECOND = 10000; 14 | public static final Integer RETRY_30_SECOND = 30000; 15 | public static final Integer RETRY_1_MIN = 60000; 16 | public static final Integer RETRY_2_MIN = 120000; 17 | public static final Integer RETRY_3_MIN = 180000; 18 | public static final Integer RETRY_4_MIN = 240000; 19 | public static final Integer RETRY_5_MIN = 300000; 20 | public static final Integer RETRY_6_MIN = 360000; 21 | public static final Integer RETRY_7_MIN = 420000; 22 | public static final Integer RETRY_8_MIN = 480000; 23 | public static final Integer RETRY_9_MIN = 540000; 24 | public static final Integer RETRY_10_MIN = 600000; 25 | public static final Integer RETRY_20_MIN = 1200000; 26 | public static final Integer RETRY_30_MIN = 1800000; 27 | public static final Integer RETRY_1_H = 3600000; 28 | public static final Integer RETRY_2_H = 7200000; 29 | } 30 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/autoconfigure/mq/kafka/KafkaProperties.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.autoconfigure.mq.kafka; 2 | 3 | import lombok.Data; 4 | import org.springframework.boot.context.properties.ConfigurationProperties; 5 | 6 | /** 7 | * 描述: 对应用户application.yaml中配置的属性 8 | * 9 | * @author wangpengpeng 10 | * @date 2020-07-01 11:49 11 | */ 12 | @Data 13 | @ConfigurationProperties(prefix = "paas.mq.kafka") 14 | public class KafkaProperties { 15 | 16 | /** 17 | * kafka的后台服务访问地址. 18 | */ 19 | private String bootstrapServer; 20 | 21 | /** 22 | * kafka安全访问协议(公网路由接入才需要). 23 | */ 24 | private String securityProtocol; 25 | 26 | /** 27 | * sasl机制(公网路由接入才需要). 28 | */ 29 | private String saslMechanism; 30 | 31 | /** 32 | * sasl访问策略(公网路由接入才需要). 33 | */ 34 | private String saslJaasConfig; 35 | 36 | /** 37 | * 会话超时时间,单位毫秒 38 | */ 39 | private Integer sessionTimeoutMs; 40 | 41 | /** 42 | * 值序列化. 43 | */ 44 | private String valueSerializerClassConfig; 45 | 46 | /** 47 | * 键序列化. 48 | */ 49 | private String keySerializerClassConfig; 50 | 51 | /** 52 | * 消息发送完的响应机制. 53 | * 0-消息发送出去即返回成功; 54 | * 1-消息发送后leader确认即返回成功; 55 | * -1-消息发送后leader和所有follower都确认才返回成功. 56 | */ 57 | private String acksConfig; 58 | 59 | /** 60 | * 消费者组 61 | */ 62 | private String groupId; 63 | 64 | /** 65 | * 重试次数 66 | */ 67 | private Integer retryCount; 68 | 69 | } 70 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/autoconfigure/mq/kafka/KafkaAutoConfiguration.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.autoconfigure.mq.kafka; 2 | 3 | import paas.foundation.mq.consumer.internal.kafka.KafkaConsumerServer; 4 | import paas.foundation.mq.producer.internal.kafka.KafkaProducerServer; 5 | import org.apache.kafka.clients.consumer.KafkaConsumer; 6 | import org.springframework.boot.autoconfigure.condition.ConditionalOnClass; 7 | import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; 8 | import org.springframework.boot.context.properties.EnableConfigurationProperties; 9 | import org.springframework.context.annotation.Bean; 10 | import org.springframework.context.annotation.Configuration; 11 | import org.springframework.scheduling.annotation.EnableScheduling; 12 | 13 | /** 14 | * 描述: Spring Boot Starter初始化 15 | * 1. 将EnableConfigurationProperties 加载到Spring上下文的容器中 16 | * 2. 当配置文件存在“paas.mq.kafka.bootstrap-server”时新建对象 17 | * 18 | * @author wangpengpeng 19 | * @date 2020-07-02 12:49 20 | */ 21 | @Configuration 22 | @EnableConfigurationProperties(KafkaProperties.class) 23 | @ConditionalOnClass({KafkaProducerServer.class, KafkaConsumer.class}) 24 | @EnableScheduling 25 | public class KafkaAutoConfiguration { 26 | 27 | @ConditionalOnProperty("paas.mq.kafka.bootstrap-server") 28 | @Bean(initMethod = "init") 29 | public KafkaProducerServer kafkaProducerServer() { 30 | return new KafkaProducerServer(); 31 | } 32 | 33 | @ConditionalOnProperty("paas.mq.kafka.bootstrap-server") 34 | @Bean(initMethod = "init") 35 | public KafkaConsumerServer kafkaConsumerServer() { 36 | return new KafkaConsumerServer(); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/producer/internal/kafka/KafkaProducerServer.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.producer.internal.kafka; 2 | 3 | import paas.foundation.mq.producer.ProduceMessage; 4 | import lombok.extern.slf4j.Slf4j; 5 | import org.apache.kafka.clients.producer.ProducerRecord; 6 | import org.apache.kafka.clients.producer.RecordMetadata; 7 | import org.apache.kafka.common.header.internals.RecordHeader; 8 | import org.apache.kafka.common.header.internals.RecordHeaders; 9 | 10 | import java.util.Iterator; 11 | import java.util.Map; 12 | 13 | 14 | /** 15 | * 描述: 抽象消费者 16 | * 17 | * @author wangpengpeng 18 | * @create 2020-07-05 14:34 19 | */ 20 | @Slf4j 21 | public class KafkaProducerServer extends AbstractKafkaMqProducer { 22 | 23 | @Override 24 | public String send(ProduceMessage message) { 25 | ProducerRecord producerRecord = new ProducerRecord<>(message.getTopic(), null, null, null, message.getPayload(), getUserProperties(message)); 26 | try { 27 | RecordMetadata recordMetadata = producer.send(producerRecord).get(); 28 | return String.valueOf(recordMetadata); 29 | } catch (Exception e) { 30 | log.error("发送失败 {}", e.getMessage()); 31 | } 32 | return null; 33 | } 34 | 35 | /** 36 | * 拼装header头 37 | * 38 | * @param message ProduceMessage 39 | * @return RecordHeaders 40 | */ 41 | private RecordHeaders getUserProperties(ProduceMessage message) { 42 | RecordHeader recordHeaders[] = null; 43 | Map messageUserProperties = message.getUserProperties(); 44 | int messageUserPropertiesSize = messageUserProperties.size(); 45 | 46 | if (messageUserPropertiesSize > 0) { 47 | recordHeaders = new RecordHeader[messageUserPropertiesSize]; 48 | Iterator> messageUserPropertiesIterator = messageUserProperties.entrySet().iterator(); 49 | while (messageUserPropertiesIterator.hasNext()) { 50 | Map.Entry entry = messageUserPropertiesIterator.next(); 51 | recordHeaders[--messageUserPropertiesSize] = new RecordHeader(entry.getKey(), entry.getValue().getBytes()); 52 | } 53 | } 54 | return new RecordHeaders(recordHeaders); 55 | } 56 | 57 | } 58 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/producer/ProduceMessage.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.producer; 2 | 3 | import com.alibaba.fastjson.JSON; 4 | import com.alibaba.fastjson.JSONObject; 5 | import com.aliyun.openservices.shade.org.apache.commons.codec.Charsets; 6 | 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | 10 | import lombok.Data; 11 | 12 | 13 | @Data 14 | public class ProduceMessage { 15 | 16 | /** 17 | * 消息所属的主题 18 | */ 19 | private final String topic; 20 | /** 21 | * 消息体的二进制数据 22 | */ 23 | private final byte[] payload; 24 | 25 | /** 26 | * 消息的tag,接收端可用于进行消息的二级分类,对发送端无影响。 27 | * 一般情况下接收消息的一端会接收topic相同的消息,当接收消息设置了相同的tag的时候仅接收同一主题下相同tag的消息 28 | */ 29 | private String tag; 30 | /** 31 | * 消息需要延迟发送的时间。 32 | * 默认情况下消息立即发送,当设置了该值后消息会在间隔给定的时间后再发送。 33 | * 如果没有设置该值,且没有设置{@link #getAtTime()},则消息会立即送达接收端 34 | */ 35 | private Integer delayTime; 36 | /** 37 | * 设置一个时间,表示消息进入消息队列后在指定的时间才会被推送给接收端。 38 | * 该值的优先级低于 {@link #getDelayTime()} 属性 39 | */ 40 | private Long atTime; 41 | 42 | /** 43 | * 如果此值不为NULL,则发送有序消息,同一个值下的消息会保证有序 44 | */ 45 | private String shardingKey = ""; 46 | 47 | /** 48 | * 消息队列附加的额外的属性。 49 | * 不解析消息体就能看到某些特殊的信息(例如租户Id,项目id等),可用于查询消息历史时过滤使用。 50 | */ 51 | private Map userProperties = new HashMap<>(); 52 | 53 | private ProduceMessage(String topic, byte[] data) { 54 | this.topic = topic; 55 | this.payload = data; 56 | } 57 | 58 | private ProduceMessage(String topic, String tag, byte[] data) { 59 | this.topic = topic; 60 | this.tag = tag; 61 | this.payload = data; 62 | } 63 | 64 | public static ProduceMessage fromBinary(String topic, byte[] data) { 65 | return new ProduceMessage(topic, data); 66 | } 67 | 68 | public static ProduceMessage fromString(String topic, String data) { 69 | byte[] binary = data.getBytes(Charsets.UTF_8); 70 | return new ProduceMessage(topic, binary); 71 | } 72 | 73 | public static ProduceMessage fromString(String topic, String tag, String data) { 74 | byte[] binary = data.getBytes(Charsets.UTF_8); 75 | return new ProduceMessage(topic, tag, binary); 76 | } 77 | 78 | public static ProduceMessage fromJSON(String topic, JSONObject data) { 79 | byte[] binary = JSON.toJSONBytes(data); 80 | return fromBinary(topic, binary); 81 | } 82 | 83 | public static ProduceMessage fromObject(String topic, Object obj) { 84 | byte[] binary = JSON.toJSONBytes(obj); 85 | return fromBinary(topic, binary); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/consumer/ConsumeMessage.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.consumer; 2 | 3 | import com.alibaba.fastjson.JSON; 4 | import com.alibaba.fastjson.JSONObject; 5 | 6 | import java.io.Serializable; 7 | import java.nio.charset.Charset; 8 | import java.util.Properties; 9 | 10 | import lombok.Getter; 11 | import lombok.Setter; 12 | 13 | /** 14 | * 描述: 接收消息的上层统一数据结构. 15 | * 16 | * @author wangpengpeng 17 | * @date 2020-07-02 12:49 18 | */ 19 | public class ConsumeMessage implements Serializable { 20 | /** 21 | * 消息队列返回的当前消息的id值 22 | */ 23 | @Getter 24 | private String messageId; 25 | 26 | /** 27 | * 消息体的二进制数组格式 28 | */ 29 | private final byte[] payload; 30 | 31 | /** 32 | * 消息主题 33 | */ 34 | @Getter 35 | private String topic; 36 | 37 | /** 38 | * 消息tag 39 | */ 40 | @Getter 41 | private String tag; 42 | /** 43 | * 消息是否已设置为提交状态 44 | */ 45 | @Getter 46 | private boolean committed; 47 | /** 48 | * 正常返回的情况下,消息队列框架是否自动提交消息 49 | */ 50 | @Getter 51 | @Setter 52 | private boolean autoCommit = true; 53 | 54 | /** 55 | * 返回当前消息失败重试的次数. 56 | */ 57 | @Getter 58 | @Setter 59 | private int reconsumeTimes; 60 | 61 | /** 62 | * userProperties存储了Ons和rabbitmq的Message的原始属性信息. 63 | * 直接通过key获取相应value. 64 | */ 65 | @Getter 66 | @Setter 67 | private Properties userProperties; 68 | 69 | public ConsumeMessage(String messageId, byte[] payload) { 70 | this.messageId = messageId; 71 | this.payload = payload; 72 | } 73 | 74 | public ConsumeMessage(String messageId, byte[] payload, String topic, String tag) { 75 | this.messageId = messageId; 76 | this.payload = payload; 77 | this.topic = topic; 78 | this.tag = tag; 79 | } 80 | 81 | /** 82 | * 提交消息,表示该消息已处理完成 83 | */ 84 | public final void commit() { 85 | this.committed = true; 86 | } 87 | 88 | /** 89 | * 读取消息内容,以byte数组形式返回 90 | */ 91 | public byte[] getValueAsBytes() { 92 | return payload; 93 | } 94 | 95 | /** 96 | * 读取消息内容,以Json对象形式返回 97 | */ 98 | public JSONObject getValueAsJson() { 99 | return (JSONObject) JSON.parse(payload); 100 | } 101 | 102 | /** 103 | * 读取消息内容,以对象形式返回 104 | */ 105 | public T getValueAsObject(Class cls) { 106 | return JSON.parseObject(payload, cls); 107 | } 108 | 109 | /** 110 | * 回读取消息内容,以字符串形式返 111 | */ 112 | public String getValueAsString() { 113 | return new String(payload, Charset.defaultCharset()); 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/producer/internal/kafka/AbstractKafkaMqProducer.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.producer.internal.kafka; 2 | 3 | import paas.foundation.autoconfigure.mq.kafka.KafkaProperties; 4 | import paas.foundation.mq.producer.Producer; 5 | import org.apache.kafka.clients.CommonClientConfigs; 6 | import org.apache.kafka.clients.producer.KafkaProducer; 7 | import org.apache.kafka.common.serialization.StringDeserializer; 8 | 9 | import javax.annotation.Resource; 10 | import java.util.Properties; 11 | 12 | /** 13 | * 描述: 抽象生产者 14 | * 15 | * @author wangpengpeng 16 | * @create 2020-07-05 14:55 17 | */ 18 | public abstract class AbstractKafkaMqProducer implements Producer { 19 | 20 | @Resource 21 | private KafkaProperties kafkaProperties; 22 | 23 | public KafkaProducer producer; 24 | 25 | public void init() { 26 | Properties properties = new Properties(); 27 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); 28 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); 29 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); 30 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); 31 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, kafkaProperties.getSessionTimeoutMs() == null ? 3000 : kafkaProperties.getSessionTimeoutMs()); 32 | //序列化类 33 | properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); // 键的序列化 34 | properties.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); // 值的序列化 35 | // 确认机制 36 | properties.put("acks", kafkaProperties.getAcksConfig() == null ? "1" : kafkaProperties.getAcksConfig()); 37 | //重试次数 38 | properties.put("retries", 0); 39 | // getBootstrapServer(必填 ) 40 | properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getBootstrapServer()); 41 | // 认证(公网) 42 | if (kafkaProperties.getSecurityProtocol() != null) { 43 | properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, kafkaProperties.getSecurityProtocol()); 44 | } 45 | if (kafkaProperties.getSaslJaasConfig() != null) { 46 | properties.put("sasl.jaas.config", kafkaProperties.getSaslJaasConfig()); 47 | } 48 | if (kafkaProperties.getSaslMechanism() != null) { 49 | properties.put("sasl.mechanism", kafkaProperties.getSaslMechanism()); 50 | } 51 | this.producer = new KafkaProducer<>(properties); 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Spring-Boot-Starter-Kafka 2 | spring-boot-starter-kafka, which allows users to quickly integrated kafka or Ckfka stream processing platform in a Spring Boot project, supports failure compensation mechanisms and dead-letter messages.There will be no repeat consumption or message loss. 3 | 4 | ### Start using the spring-boot-starter 5 | ##### 1. Introduce spring-boot-starter into your project's pom.xml (You need to execute Maven Install first to ensure that the Maven repository exists in Spring-boot-starter kafka) 6 | ```java 7 | 8 | paas.foundation 9 | spring-boot-starter-kafka 10 | 1.0.0-RELEASE 11 | 12 | ``` 13 | ##### 2. Kafka bootstrap-Server needs to be configured in application.yml 14 | 15 | ```java 16 | paas: 17 | mq: 18 | kafka: 19 | bootstrap-server: xxx.xx.xx.xx:9092 20 | ``` 21 | ##### 3. Start using: Producers send messages 22 | ```java 23 | import org.springframework.beans.factory.annotation.Autowired; 24 | import org.springframework.web.bind.annotation.GetMapping; 25 | import org.springframework.web.bind.annotation.RestController; 26 | import paas.foundation.mq.producer.ProduceMessage; 27 | import paas.foundation.mq.producer.Producer; 28 | import java.util.HashMap; 29 | import java.util.Map; 30 | 31 | @RestController 32 | public class Controller { 33 | @Autowired 34 | private Producer producer; 35 | 36 | @GetMapping("/send") 37 | void sendMsg() { 38 | for (int i = 1; i <= 30; i++) { 39 | ProduceMessage message = ProduceMessage.fromString("wpp_test_01", "hello world!"); 40 | Map userProperties = new HashMap<>(); 41 | userProperties.put("testHeader01", "1"); 42 | userProperties.put("testHeader02", "2"); 43 | message.setUserProperties(userProperties); 44 | producer.send(message); 45 | } 46 | } 47 | } 48 | ``` 49 | 50 | ##### 4. Consumer consumption messages 51 | ```java 52 | import org.springframework.stereotype.Component; 53 | import paas.foundation.mq.consumer.ConsumeMessage; 54 | import paas.foundation.mq.consumer.MessageListener; 55 | 56 | @Component 57 | public class KafkaListener implements MessageListener { 58 | 59 | /** 60 | * Declare which Topic to listen on 61 | */ 62 | @Override 63 | public String getTopic() { 64 | return "wpp_test_01"; 65 | } 66 | 67 | @Override 68 | public void process(ConsumeMessage message) { 69 | System.out.println("The message heard is" + message.getValueAsString()); 70 | System.out.println("The context information being listened to is " + message.getUserProperties()); 71 | } 72 | } 73 | ``` 74 | 75 | ### Advanced Function 76 | 77 | - Message retry 78 | 79 | Support consumption failure retry, total retry 16 times: 10s, 30s, 1min, 2min...10min, 20min, 30min, 1h, 2h were put into the dead letter topic( 80 | DEAD_TOPIC), waiting for manual consumption compensation.'retry-count: 2' means to retry twice 81 | ```java 82 | paas: 83 | mq: 84 | kafka: 85 | bootstrap-server: xxx.xx.xx.xx:9092 86 | retry-count: 2 87 | ``` 88 | - Message sending response mechanism (Acks - Config). 89 | 90 | 0 means that the message is sent out and the success is returned; 91 | 92 | 1 means after the message is sent, the leader will confirm and return successfully; 93 | 94 | -1 means represents the successful return of the message after the leader and all followers confirm. 95 | ```java 96 | paas: 97 | mq: 98 | kafka: 99 | bootstrap-server: xxx.xx.xx.xx:9092 100 | acks-config: 1 101 | ``` 102 | - Integrated ckfka 103 | ```java 104 | paas: 105 | mq: 106 | kafka: 107 | bootstrap-server: ckafka-xxxxxx.xx-beijing.ckafka.tencentcloudmq.com:6007 108 | security-protocol: SASL_PLAINTEXT 109 | sasl-mechanism: PLAIN 110 | sasl-jaas-config: org.apache.kafka.common.security.plain.PlainLoginModule required username="xxxxxx#root" password="xxxxxx"; 111 | ``` 112 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | paas.foundation 8 | spring-boot-starter-kafka 9 | 1.0.0-RELEASE 10 | 11 | 12 | org.springframework.boot 13 | spring-boot-starter-parent 14 | 1.5.6.RELEASE 15 | 16 | 17 | 18 | 19 | org.springframework.boot 20 | spring-boot-starter 21 | 22 | 23 | org.springframework.boot 24 | spring-boot-starter-test 25 | test 26 | 27 | 28 | com.aliyun.openservices 29 | ons-client 30 | 1.8.0.Final 31 | 32 | 33 | 34 | org.projectlombok 35 | lombok 36 | 1.16.18 37 | provided 38 | 39 | 40 | com.alibaba 41 | fastjson 42 | 1.2.28 43 | 44 | 45 | org.javassist 46 | javassist 47 | 3.21.0-GA 48 | 49 | 50 | commons-lang 51 | commons-lang 52 | 2.6 53 | 54 | 55 | commons-logging 56 | commons-logging 57 | 1.2 58 | 59 | 60 | org.springframework.boot 61 | spring-boot-configuration-processor 62 | 63 | 64 | org.apache.kafka 65 | kafka_2.11 66 | 1.0.0 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | org.apache.maven.plugins 75 | maven-compiler-plugin 76 | 77 | 1.8 78 | 1.8 79 | 80 | 81 | 82 | maven-source-plugin 83 | 84 | true 85 | 86 | 2.2.1 87 | 88 | 89 | compile 90 | 91 | jar 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | multicloud-release 102 | multicloud-release 103 | https://packages.glodon.com/artifactory/maven-multicloud-release 104 | 105 | 106 | multicloud-snapshot 107 | multicloud-snapshot 108 | https://packages.glodon.com/artifactory/maven-multicloud-snapshot 109 | 110 | 111 | 112 | 113 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/consumer/internal/kafka/KafkaConsumerServer.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.consumer.internal.kafka; 2 | 3 | import lombok.extern.slf4j.Slf4j; 4 | import paas.foundation.mq.consumer.ConsumeMessage; 5 | import paas.foundation.mq.utils.KafkaConstantsUtil; 6 | import org.apache.kafka.clients.consumer.ConsumerRecord; 7 | import org.apache.kafka.clients.consumer.ConsumerRecords; 8 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 9 | import org.apache.kafka.clients.producer.ProducerRecord; 10 | import org.apache.kafka.common.TopicPartition; 11 | import org.apache.kafka.common.header.Header; 12 | import org.apache.kafka.common.header.internals.RecordHeaders; 13 | import org.springframework.scheduling.annotation.EnableScheduling; 14 | import org.springframework.scheduling.annotation.Scheduled; 15 | 16 | import java.util.*; 17 | 18 | import static paas.foundation.mq.utils.KafkaConstantsUtil.RETRY_TOPIC; 19 | import static paas.foundation.mq.utils.KafkaConstantsUtil.TOPIC_CAPTION; 20 | 21 | 22 | /** 23 | * 描述: 真正消费者 24 | * 25 | * @author wangpengpeng 26 | * @date 2020-07-05 14:55 27 | */ 28 | @Slf4j 29 | @EnableScheduling 30 | public class KafkaConsumerServer extends AbstractKafkaConsumer { 31 | 32 | @Scheduled(fixedRate = 1) 33 | public void trigger() { 34 | this.topicKafkaConsumerConcurrentHashMap.forEach((itemTopic, itemKafkaConsumer) -> { 35 | 36 | Map currentOffsets = new HashMap(); 37 | ConsumerRecords records = itemKafkaConsumer.poll(1000); 38 | if (records.count() > 0) { 39 | log.info("拉取:{}条记录", records.count()); 40 | } 41 | for (ConsumerRecord record : records) { 42 | // 解析kafka header 43 | RecordHeaders recordHeaders = (RecordHeaders) record.headers(); 44 | Properties properties = getProperties(recordHeaders); 45 | // 解析kafka 转为consumeMessage 46 | ConsumeMessage consumeMessage = new ConsumeMessage(record.partition() + "@ " + record.offset(), String.valueOf(record.value()).getBytes(), record.topic(), null); 47 | consumeMessage.setUserProperties(properties); 48 | log.info("消费消息:timestamp ={}, topic={}, partition={}, offset={}, value={}, properties={}\n", record.timestamp(), record.topic(), record.partition(), record.offset(), record.value(), consumeMessage.getUserProperties()); 49 | 50 | try { 51 | this.topicMessageListenerListConcurrentHashMap.get(itemTopic).process(consumeMessage); 52 | currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1, "no metadata")); 53 | itemKafkaConsumer.commitSync(currentOffsets); 54 | } catch (Exception e) { 55 | currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1, "no metadata")); 56 | itemKafkaConsumer.commitSync(currentOffsets); 57 | log.error("业务异常消费失败:timestamp ={}, topic={}, partition={}, offset={}, value={}, properties={}\n", record.timestamp(), record.topic(), record.partition(), record.offset(), record.value(), consumeMessage.getUserProperties()); 58 | if (retryProducer != null) { 59 | int retryCount = Integer.parseInt(properties.getOrDefault(KafkaConstantsUtil.RETRY_COUNT, "0").toString()); 60 | recordHeaders.add(KafkaConstantsUtil.RETRY_COUNT, String.valueOf(retryCount + 1).getBytes()); 61 | recordHeaders.add(TOPIC_CAPTION, String.valueOf(record.topic()).getBytes()); 62 | ProducerRecord producerRecord = new ProducerRecord<>(RETRY_TOPIC, null, null, null, consumeMessage.getValueAsBytes(), recordHeaders); 63 | retryProducer.send(producerRecord); 64 | } 65 | } 66 | } 67 | itemKafkaConsumer.commitAsync(); 68 | }); 69 | } 70 | 71 | /** 72 | * 解析kafka recordHeaders为Properties 73 | * 74 | * @param recordHeaders recordHeaders 75 | * @return Properties 76 | */ 77 | public static Properties getProperties(RecordHeaders recordHeaders) { 78 | Iterator
headerIterator = recordHeaders.iterator(); 79 | Properties properties = new Properties(); 80 | while (headerIterator.hasNext()) { 81 | Header header = headerIterator.next(); 82 | properties.setProperty(header.key(), new String(header.value())); 83 | } 84 | return properties; 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/consumer/internal/kafka/KafkaRetryConsumerServer.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.consumer.internal.kafka; 2 | 3 | 4 | import lombok.extern.slf4j.Slf4j; 5 | import org.apache.kafka.clients.consumer.ConsumerRecord; 6 | import org.apache.kafka.clients.consumer.ConsumerRecords; 7 | import org.apache.kafka.clients.consumer.KafkaConsumer; 8 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 9 | import org.apache.kafka.clients.producer.KafkaProducer; 10 | import org.apache.kafka.clients.producer.ProducerRecord; 11 | import org.apache.kafka.common.TopicPartition; 12 | import org.apache.kafka.common.header.internals.RecordHeaders; 13 | import paas.foundation.mq.utils.KafkaConstantsUtil; 14 | 15 | import java.util.*; 16 | import java.util.concurrent.ConcurrentHashMap; 17 | 18 | /** 19 | * 描述: 重试消费者 20 | * 21 | * @author wangpengpeng 22 | * @create 2020-07-17 10:11 23 | */ 24 | @Slf4j 25 | public class KafkaRetryConsumerServer implements Runnable { 26 | 27 | private int propertiesRetryCount; 28 | private KafkaConsumer retryConsumer; 29 | private KafkaProducer retryProducer; 30 | 31 | public KafkaRetryConsumerServer(KafkaProducer retryProducer, KafkaConsumer retryConsumer, int propertiesRetryCount) { 32 | this.retryProducer = retryProducer; 33 | this.retryConsumer = retryConsumer; 34 | this.propertiesRetryCount = propertiesRetryCount; 35 | } 36 | 37 | @Override 38 | public void run() { 39 | log.info("开始消息重试线程"); 40 | while (true) { 41 | if (retryConsumer != null) { 42 | Map currentOffsets = new ConcurrentHashMap<>(); 43 | ConsumerRecords records = retryConsumer.poll(10000); 44 | for (ConsumerRecord record : records) { 45 | currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1, "retry commit")); 46 | retryConsumer.commitSync(currentOffsets); 47 | RecordHeaders recordHeaders = (RecordHeaders) record.headers(); 48 | Properties properties = KafkaConsumerServer.getProperties(recordHeaders); 49 | int retryCount = Integer.parseInt(properties.getProperty(KafkaConstantsUtil.RETRY_COUNT)); 50 | if (retryCount <= propertiesRetryCount) { 51 | Long oldTime = record.timestamp(); 52 | Long rightTime = getNeedRetryTime(record.timestamp(), retryCount); 53 | if (rightTime > System.currentTimeMillis()) { 54 | ProducerRecord producerRecord = new ProducerRecord<>(KafkaConstantsUtil.RETRY_TOPIC, null, oldTime, null, String.valueOf(record.value()).getBytes(), recordHeaders); 55 | retryProducer.send(producerRecord); 56 | } else { 57 | ProducerRecord producerRecord = new ProducerRecord<>(properties.getProperty(KafkaConstantsUtil.TOPIC_CAPTION), null, null, null, String.valueOf(record.value()).getBytes(), recordHeaders); 58 | log.info("==============【topic={} 第{}次发送消息重试】:value={}, properties={}\n", properties.getProperty(KafkaConstantsUtil.TOPIC_CAPTION), retryCount, record.value(), properties); 59 | retryProducer.send(producerRecord); 60 | } 61 | } else if (retryCount == propertiesRetryCount + 1) { 62 | recordHeaders.add(KafkaConstantsUtil.RETRY_COUNT, String.valueOf(retryCount).getBytes()); 63 | recordHeaders.add(KafkaConstantsUtil.TOPIC_CAPTION, String.valueOf(properties.getProperty(KafkaConstantsUtil.TOPIC_CAPTION)).getBytes()); 64 | ProducerRecord producerRecord = new ProducerRecord<>(KafkaConstantsUtil.DEAD_TOPIC, null, null, null, String.valueOf(record.value()).getBytes(), recordHeaders); 65 | properties.remove(KafkaConstantsUtil.RETRY_COUNT); 66 | log.warn("============== 重试{}次后放弃【结束】=======进入死信队列:topic={}, value={}, properties={}\n", propertiesRetryCount, KafkaConstantsUtil.DEAD_TOPIC, record.value(), properties); 67 | retryProducer.send(producerRecord); 68 | } 69 | } 70 | retryConsumer.commitSync(); 71 | } 72 | } 73 | } 74 | 75 | /** 76 | * 获取下次重试时间 77 | * 78 | * @param timestamp 消息的发送时间 79 | * @param retryCount 消息的重试次数 80 | * @return 需要重发消息的时间 81 | */ 82 | private Long getNeedRetryTime(Long timestamp, int retryCount) { 83 | Integer[] retryTimeArray = new Integer[]{ 84 | KafkaConstantsUtil.RETRY_10_SECOND, 85 | KafkaConstantsUtil.RETRY_30_SECOND, 86 | KafkaConstantsUtil.RETRY_1_MIN, 87 | KafkaConstantsUtil.RETRY_2_MIN, 88 | KafkaConstantsUtil.RETRY_3_MIN, 89 | KafkaConstantsUtil.RETRY_4_MIN, 90 | KafkaConstantsUtil.RETRY_5_MIN, 91 | KafkaConstantsUtil.RETRY_6_MIN, 92 | KafkaConstantsUtil.RETRY_7_MIN, 93 | KafkaConstantsUtil.RETRY_8_MIN, 94 | KafkaConstantsUtil.RETRY_9_MIN, 95 | KafkaConstantsUtil.RETRY_10_MIN, 96 | KafkaConstantsUtil.RETRY_20_MIN, 97 | KafkaConstantsUtil.RETRY_30_MIN, 98 | KafkaConstantsUtil.RETRY_1_H, 99 | KafkaConstantsUtil.RETRY_2_H 100 | }; 101 | return retryTimeArray[retryCount - 1] + timestamp; 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/main/java/paas/foundation/mq/consumer/internal/kafka/AbstractKafkaConsumer.java: -------------------------------------------------------------------------------- 1 | package paas.foundation.mq.consumer.internal.kafka; 2 | 3 | import paas.foundation.autoconfigure.mq.kafka.KafkaProperties; 4 | import paas.foundation.mq.consumer.MessageListener; 5 | import paas.foundation.mq.exception.MessageQueueException; 6 | import lombok.extern.slf4j.Slf4j; 7 | import org.apache.kafka.clients.CommonClientConfigs; 8 | import org.apache.kafka.clients.admin.AdminClient; 9 | import org.apache.kafka.clients.admin.KafkaAdminClient; 10 | import org.apache.kafka.clients.admin.NewTopic; 11 | import org.apache.kafka.clients.admin.TopicListing; 12 | import org.apache.kafka.clients.consumer.KafkaConsumer; 13 | import org.apache.kafka.clients.producer.KafkaProducer; 14 | import org.apache.kafka.common.serialization.StringDeserializer; 15 | import org.springframework.context.ApplicationContext; 16 | import org.springframework.scheduling.concurrent.CustomizableThreadFactory; 17 | 18 | import javax.annotation.Resource; 19 | import java.util.*; 20 | import java.util.concurrent.*; 21 | import java.util.stream.Collectors; 22 | 23 | import static paas.foundation.mq.utils.KafkaConstantsUtil.DEAD_TOPIC; 24 | import static paas.foundation.mq.utils.KafkaConstantsUtil.RETRY_TOPIC; 25 | 26 | /** 27 | * 描述: 抽象消费者 28 | * 29 | * @author wangpengpeng 30 | * @date 2020-07-04 14:55 31 | */ 32 | @Slf4j 33 | abstract class AbstractKafkaConsumer { 34 | 35 | /** 36 | * 主题与消息监听Map结合 37 | */ 38 | Map topicMessageListenerListConcurrentHashMap = new ConcurrentHashMap<>(); 39 | /** 40 | * 主题与消费者Map结合 41 | */ 42 | Map> topicKafkaConsumerConcurrentHashMap = new ConcurrentHashMap<>(); 43 | 44 | /** 45 | * 重试 46 | */ 47 | protected Properties properties; 48 | protected KafkaProducer retryProducer; 49 | private Integer propertiesRetryCount; 50 | private KafkaConsumer retryConsumer; 51 | 52 | /** 53 | * kafka所有topic集合 54 | */ 55 | Set topicSet = new CopyOnWriteArraySet<>(); 56 | 57 | @Resource 58 | private ApplicationContext applicationContext; 59 | @Resource 60 | private KafkaProperties kafkaProperties; 61 | 62 | /** 63 | * 初始化生产者 64 | */ 65 | void init() throws MessageQueueException { 66 | // 1.初始化kafka属性 67 | initKafkaProperties(); 68 | // 2.做TOPIC校验 69 | topicCreateAndGet(); 70 | // 3.初始化kafka消费者 71 | initKafkaConsumer(); 72 | // 4.初始化kafka重试 73 | initKafkaRetryConsumer(); 74 | } 75 | 76 | /** 77 | * 初始化kafka属性 78 | */ 79 | private void initKafkaProperties() { 80 | properties = new Properties(); 81 | // getBootstrapServer(必填 ) 82 | properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getBootstrapServer()); 83 | // 公网(非必填 ) 84 | if (kafkaProperties.getSecurityProtocol() != null) { 85 | properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, kafkaProperties.getSecurityProtocol()); 86 | } 87 | if (kafkaProperties.getSaslMechanism() != null) { 88 | properties.put("sasl.mechanism", kafkaProperties.getSaslMechanism()); 89 | } 90 | if (kafkaProperties.getSaslJaasConfig() != null) { 91 | properties.put("sasl.jaas.config", kafkaProperties.getSaslJaasConfig()); 92 | } 93 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); 94 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); 95 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); 96 | properties.put("enable.auto.commit", "false"); 97 | //序列化类 98 | properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 99 | properties.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer"); 100 | } 101 | 102 | /** 103 | * 创建重试,死信topic并获取所有topic列表 104 | */ 105 | private void topicCreateAndGet() throws MessageQueueException { 106 | AdminClient adminClient = KafkaAdminClient.create(properties); 107 | createTopics(adminClient, RETRY_TOPIC); 108 | createTopics(adminClient, DEAD_TOPIC); 109 | try { 110 | topicSet = getAllTopic(adminClient); 111 | log.info("获取kafka的topic列表为:{}", topicSet); 112 | } catch (Exception e) { 113 | log.error("获取kafka的topic列表失败 {}", e.getMessage()); 114 | throw new MessageQueueException("获取kafka的topic列表失败"); 115 | } 116 | } 117 | 118 | /** 119 | * 初始化kafka消费者 120 | */ 121 | private void initKafkaConsumer() throws MessageQueueException { 122 | Map messageListenerBeans = applicationContext.getBeansOfType(MessageListener.class); 123 | Collection messageListenerCollection = messageListenerBeans.values(); 124 | 125 | if (!messageListenerCollection.isEmpty()) { 126 | for (MessageListener messageListener : messageListenerCollection) { 127 | String topic = messageListener.getTopic(); 128 | if (topicSet.contains(topic)) { 129 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG, messageListener.getTopic()); 130 | KafkaConsumer consumer = new KafkaConsumer<>(properties); 131 | consumer.subscribe(Collections.singleton(messageListener.getTopic())); 132 | this.topicMessageListenerListConcurrentHashMap.put(messageListener.getTopic(), messageListener); 133 | this.topicKafkaConsumerConcurrentHashMap.put(messageListener.getTopic(), consumer); 134 | } else { 135 | log.error("kafka实例中不存在topic:{},请删除您在实例中创建该topic,或删除该topic对应的listener", topic); 136 | throw new MessageQueueException("kafka实例中不存在topid:" + topic + ",请删除您在实例中创建该topic,或删除该topic对应的listener"); 137 | } 138 | } 139 | } 140 | } 141 | 142 | /** 143 | * 初始化kafka重试 144 | */ 145 | private void initKafkaRetryConsumer() throws MessageQueueException { 146 | propertiesRetryCount = kafkaProperties.getRetryCount(); 147 | if (propertiesRetryCount != null && propertiesRetryCount > 0) { 148 | if (propertiesRetryCount > 16) { 149 | throw new MessageQueueException("retry-count应小于等于16"); 150 | } 151 | if (topicSet.contains(RETRY_TOPIC) && topicSet.contains(DEAD_TOPIC)) { 152 | initRetryProducer(); 153 | initRetryConsumer(); 154 | } else { 155 | throw new MessageQueueException("kafka实例中不存在RETRY_TOPIC或DEAD_TOPIC,请手动创建"); 156 | } 157 | } else { 158 | log.info("消息不进行失败重试"); 159 | } 160 | } 161 | 162 | /** 163 | * 初始化重试生产者 164 | */ 165 | private void initRetryProducer() { 166 | this.retryProducer = new org.apache.kafka.clients.producer.KafkaProducer<>(properties); 167 | } 168 | 169 | /** 170 | * 初始化重试消费者 171 | */ 172 | private void initRetryConsumer() { 173 | properties.put(org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG, RETRY_TOPIC); 174 | retryConsumer = new KafkaConsumer<>(properties); 175 | retryConsumer.subscribe(Collections.singleton(RETRY_TOPIC)); 176 | ThreadFactory namedThreadFactory = new CustomizableThreadFactory("retry-thread-pool"); 177 | ExecutorService singleThreadPool = new ThreadPoolExecutor(1, 1, 178 | 0L, TimeUnit.MILLISECONDS, 179 | new LinkedBlockingQueue(1024), namedThreadFactory, new ThreadPoolExecutor.AbortPolicy()); 180 | singleThreadPool.submit(new KafkaRetryConsumerServer(this.retryProducer, retryConsumer, propertiesRetryCount)); 181 | singleThreadPool.shutdown(); 182 | } 183 | 184 | /** 185 | * 获取所有的topic列表 186 | */ 187 | private static Set getAllTopic(AdminClient client) throws InterruptedException, ExecutionException { 188 | return client.listTopics().listings().get().stream().map(TopicListing::name).collect(Collectors.toSet()); 189 | } 190 | 191 | /** 192 | * 创建Topic 193 | * 腾讯云无法创建 194 | */ 195 | private static void createTopics(AdminClient adminClient, String name) { 196 | NewTopic newTopic = new NewTopic(name, 1, (short) 1); 197 | Collection newTopicList = new ArrayList<>(); 198 | newTopicList.add(newTopic); 199 | adminClient.createTopics(newTopicList); 200 | } 201 | } 202 | --------------------------------------------------------------------------------