├── README.md
├── awesome-netty-common
├── pom.xml
└── src
│ └── main
│ └── java
│ └── org
│ └── north
│ └── netty
│ └── common
│ └── utils
│ ├── ProtostuffHelper.java
│ └── SerializeUtils.java
├── awesome-netty-elasticsearch
└── pom.xml
├── awesome-netty-kafka
├── pom.xml
└── src
│ ├── main
│ └── java
│ │ └── com
│ │ └── north
│ │ └── netty
│ │ └── kafka
│ │ ├── KafkaClient.java
│ │ ├── bean
│ │ ├── AbstractKafkaResponse.java
│ │ ├── KafkaRequest.java
│ │ ├── KafkaRequestHeader.java
│ │ ├── KafkaResponse.java
│ │ ├── KafkaResponseHeader.java
│ │ ├── broker
│ │ │ └── Broker.java
│ │ ├── fetch
│ │ │ ├── AbortedTransaction.java
│ │ │ ├── FetchPartitionHeader.java
│ │ │ ├── FetchPartitionResp.java
│ │ │ ├── FetchRequest.java
│ │ │ ├── FetchResponse.java
│ │ │ ├── FetchTopicPartitionRequest.java
│ │ │ ├── FetchTopicRequest.java
│ │ │ └── FetchTopicResponse.java
│ │ ├── meta
│ │ │ ├── KafkaMetaRequest.java
│ │ │ └── KafkaMetaResponse.java
│ │ ├── msg
│ │ │ ├── ConsumerRecord.java
│ │ │ ├── KafkaMsgRecordBatch.java
│ │ │ └── KafkaMsgRecordV2.java
│ │ ├── partition
│ │ │ └── PartitionMateData.java
│ │ ├── produce
│ │ │ ├── PartitionData.java
│ │ │ ├── PartitionResponse.java
│ │ │ ├── ProduceRequest.java
│ │ │ ├── ProduceResponse.java
│ │ │ ├── Record.java
│ │ │ ├── TopicProduceData.java
│ │ │ └── TopicProduceRes.java
│ │ └── topic
│ │ │ └── TopicMetaData.java
│ │ ├── caches
│ │ └── RequestCacheCenter.java
│ │ ├── codec
│ │ └── KafkaResponseDecoder.java
│ │ ├── config
│ │ ├── KafkaConsumerConfig.java
│ │ └── KafkaProduceConfig.java
│ │ ├── enums
│ │ ├── ApiKeys.java
│ │ └── Errors.java
│ │ └── utils
│ │ ├── Crc32C.java
│ │ ├── PureJavaCrc32C.java
│ │ ├── SimplePartitioner.java
│ │ ├── StringSerializer.java
│ │ └── VarLengthUtils.java
│ └── test
│ └── java
│ └── test
│ └── kafkaClientTest.java
├── awesome-netty-mysql
└── pom.xml
├── awesome-netty-redis
├── pom.xml
└── src
│ ├── main
│ └── java
│ │ └── com
│ │ └── north
│ │ └── netty
│ │ └── redis
│ │ ├── clients
│ │ ├── AbstractRedisClient.java
│ │ ├── RedisBinaryClient.java
│ │ ├── RedisClient.java
│ │ └── RedisStringClient.java
│ │ ├── cmd
│ │ ├── AbstractCmd.java
│ │ ├── Cmd.java
│ │ ├── CmdResp.java
│ │ └── impl
│ │ │ ├── getcmd
│ │ │ ├── AbstractGetCmd.java
│ │ │ ├── binary
│ │ │ │ └── GetBinaryCmd.java
│ │ │ └── str
│ │ │ │ └── GetStringCmd.java
│ │ │ └── setcmd
│ │ │ ├── AbstractSetCmd.java
│ │ │ ├── binary
│ │ │ └── SetBinaryCmd.java
│ │ │ └── str
│ │ │ └── SetStringCmd.java
│ │ ├── codecs
│ │ ├── ByteBufToByteDecoder.java
│ │ ├── ByteToByteBufEncoder.java
│ │ └── RedisRespHandler.java
│ │ ├── config
│ │ └── RedisConfig.java
│ │ ├── connections
│ │ ├── ConnectionPool.java
│ │ └── RedisConnection.java
│ │ ├── enums
│ │ ├── ClientType.java
│ │ ├── ExpireMode.java
│ │ └── Xmode.java
│ │ ├── exceptions
│ │ ├── AwesomeNettyRedisException.java
│ │ └── FailedToGetConnectionException.java
│ │ └── utils
│ │ ├── CmdBuildUtils.java
│ │ ├── EncodeUtils.java
│ │ └── SymbolUtils.java
│ └── test
│ └── java
│ └── org
│ └── nort
│ └── netty
│ └── redis
│ └── test
│ ├── RedisBinaryClientTest.java
│ └── RedisStringClientTest.java
├── awesome-netty-zk
├── pom.xml
└── src
│ ├── main
│ └── java
│ │ └── org
│ │ └── north
│ │ └── netty
│ │ └── zk
│ │ ├── NettyZkClient.java
│ │ ├── bean
│ │ ├── AbstractZkResonse.java
│ │ ├── RequestHeader.java
│ │ ├── ZkRequest.java
│ │ ├── ZkResponse.java
│ │ ├── create
│ │ │ ├── ZkAcl.java
│ │ │ ├── ZkAclId.java
│ │ │ ├── ZkCreateRequest.java
│ │ │ └── ZkCreateResponse.java
│ │ ├── getchildren
│ │ │ ├── ZkGetChildrenRequest.java
│ │ │ └── ZkGetChildrenResponse.java
│ │ └── login
│ │ │ ├── ZkLoginRequest.java
│ │ │ └── ZkLoginResp.java
│ │ ├── factories
│ │ └── ZkCodecFactories.java
│ │ ├── registrys
│ │ └── ZkCaches.java
│ │ ├── utils
│ │ ├── CreateMode.java
│ │ └── OpCode.java
│ │ └── zkcodec
│ │ ├── ZkAbstractCodec.java
│ │ ├── ZkCodec.java
│ │ ├── createcodec
│ │ └── ZkCreateCodec.java
│ │ ├── getchildren
│ │ └── ZkGetChildrenCodec.java
│ │ └── login
│ │ ├── ZkLoginCodec.java
│ │ └── ZkLoginHandler.java
│ └── test
│ └── java
│ └── zk
│ └── ZkClientTest.java
└── pom.xml
/README.md:
--------------------------------------------------------------------------------
1 | ## 简介:
2 |
3 | > netty到底能干嘛? 很多刚开始接触或者正在想深入理解netty的同学,
4 | 都必须面临这个来自灵魂的拷问. 跟大多数netty的教程都从server端开始不一样,
5 | 这个项目反其道而行之, 从客户端出发, 通过使用netty跟kafka,zk,redis等等市面的开源框架的服务端进行交互,
6 | 让你更加深刻的理解netty在定制化协议方面的巨大优势的同时, 也可以让你理解这些开源框架的通讯原理,
7 | 从而也能理解netty在服务端的巨大优势
8 | ------------------------------
9 |
10 | ## 文档:
11 |
12 | ### zookeeper
13 | [https://juejin.im/post/5dd296c0e51d4508182449a6](https://juejin.im/post/5dd296c0e51d4508182449a6)
14 |
15 | ### redis
16 | [https://juejin.im/post/5dd33ebde51d4508587c0d7a](https://juejin.im/post/5dd33ebde51d4508587c0d7a)
17 |
18 | ### kafka
19 | [https://juejin.im/post/5ddb5605e51d4523551669b3](https://juejin.im/post/5ddb5605e51d4523551669b3)
20 |
21 |
22 |
23 | #### 涉及到的框架包括但是不限于:
24 | - zookeeper
25 | - redis
26 | - kafka
27 | - mysql(未开始)
28 | - elasticsearch(未开始)
29 | - dubbo(未开始)
30 |
--------------------------------------------------------------------------------
/awesome-netty-common/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | awesome-netty
7 | org.north.netty
8 | 1.0-SNAPSHOT
9 |
10 | 4.0.0
11 |
12 | awesome-netty-common
13 |
14 |
15 | io.netty
16 | netty-all
17 | 4.1.42.Final
18 |
19 |
20 | com.google.code.gson
21 | gson
22 | 2.2.4
23 |
24 |
25 | junit
26 | junit
27 | 4.12
28 |
29 |
30 |
31 | com.google.guava
32 | guava
33 | 25.0-jre
34 |
35 |
36 |
37 | com.dyuproject.protostuff
38 | protostuff-core
39 | 1.0.8
40 |
41 |
42 |
43 | com.dyuproject.protostuff
44 | protostuff-runtime
45 | 1.0.8
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 | org.apache.maven.plugins
54 | maven-compiler-plugin
55 |
56 | 1.8
57 | 1.8
58 |
59 |
60 |
61 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/awesome-netty-common/src/main/java/org/north/netty/common/utils/ProtostuffHelper.java:
--------------------------------------------------------------------------------
1 | package org.north.netty.common.utils;
2 | import com.dyuproject.protostuff.LinkedBuffer;
3 | import com.dyuproject.protostuff.ProtostuffIOUtil;
4 | import com.dyuproject.protostuff.Schema;
5 | import com.dyuproject.protostuff.runtime.RuntimeSchema;
6 | /**
7 | * protobuf
8 | * @author laihaohua
9 | */
10 | public class ProtostuffHelper {
11 | public ProtostuffHelper() {
12 | }
13 |
14 | public static byte[] serializeObject(T object, Class clz) {
15 | Schema schema = RuntimeSchema.createFrom(clz);
16 | LinkedBuffer BUFF = LinkedBuffer.allocate(512);
17 | return ProtostuffIOUtil.toByteArray(object, schema, BUFF);
18 | }
19 |
20 | public static T deSerializeObject(byte[] object, Class clz) {
21 | RuntimeSchema schema = RuntimeSchema.createFrom(clz);
22 |
23 | Object t;
24 | try {
25 | t = clz.newInstance();
26 | } catch (Exception var5) {
27 | var5.printStackTrace();
28 | throw new RuntimeException("init object failed.");
29 | }
30 |
31 | ProtostuffIOUtil.mergeFrom(object, t, schema);
32 | return (T) t;
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/awesome-netty-common/src/main/java/org/north/netty/common/utils/SerializeUtils.java:
--------------------------------------------------------------------------------
1 | package org.north.netty.common.utils;
2 |
3 | import com.google.common.collect.Lists;
4 | import io.netty.buffer.ByteBuf;
5 |
6 | import java.io.*;
7 | import java.nio.charset.StandardCharsets;
8 | import java.util.List;
9 |
10 | public class SerializeUtils {
11 | public static byte[] toByteArray(Object obj){
12 | try {
13 | ByteArrayOutputStream byteArrayOS = new ByteArrayOutputStream();
14 | ObjectOutputStream stream = new ObjectOutputStream(byteArrayOS);
15 | stream.writeObject(obj);
16 | stream.close();
17 | return byteArrayOS.toByteArray();
18 | } catch (IOException e) {
19 | e.printStackTrace();
20 | }
21 | return null;
22 | }
23 |
24 | public Object byteArrayToObj(byte[] bytes) {
25 | Object obj = null;
26 | try {
27 | ByteArrayInputStream bis = new ByteArrayInputStream (bytes);
28 | ObjectInputStream ois = new ObjectInputStream (bis);
29 | obj = ois.readObject();
30 | ois.close();
31 | bis.close();
32 | } catch (IOException ex) {
33 | ex.printStackTrace();
34 | } catch (ClassNotFoundException ex) {
35 | ex.printStackTrace();
36 | }
37 | return obj;
38 | }
39 |
40 | /**
41 | * 长度用int表示, 占4个字节
42 | * @param msg
43 | * @param out
44 | */
45 | public static void writeStringToBuffer(String msg, ByteBuf out){
46 | if (msg == null) {
47 | out.writeInt(-1);
48 | return;
49 | }
50 | byte [] bytes = msg.getBytes(StandardCharsets.UTF_8);
51 | // 字符串的长度
52 | out.writeInt(bytes.length);
53 | out.writeBytes(bytes);
54 | }
55 |
56 | /**
57 | * 长度用short表示 占2个字节
58 | * @param msg
59 | * @param out
60 | */
61 | public static void writeStringToBuffer2(String msg, ByteBuf out){
62 | if (msg == null) {
63 | out.writeShort(-1);
64 | return;
65 | }
66 | byte [] bytes = msg.getBytes(StandardCharsets.UTF_8);
67 | // 字符串的长度
68 | out.writeShort(bytes.length);
69 | out.writeBytes(bytes);
70 | }
71 |
72 | /**
73 | * 字符串长度是4个字节
74 | * @param in
75 | * @return
76 | */
77 | public static String readStringToBuffer( ByteBuf in){
78 | int strLen = in.readInt();
79 | if(strLen < 0){
80 | return null;
81 | }
82 | byte [] bytes = new byte[strLen];
83 | in.readBytes(bytes);
84 | String s = new String(bytes, StandardCharsets.UTF_8);
85 | return s;
86 | }
87 |
88 | /**
89 | * 字符串长度是2个字节
90 | * @param in
91 | * @return
92 | */
93 | public static String readStringToBuffer2( ByteBuf in){
94 | int strLen = in.readShort();
95 | if(strLen < 0){
96 | return null;
97 | }
98 | byte [] bytes = new byte[strLen];
99 | in.readBytes(bytes);
100 | String s = new String(bytes, StandardCharsets.UTF_8);
101 | return s;
102 | }
103 | public static void writeStringListToBuffer(List lists, ByteBuf out){
104 | if(lists == null){
105 | out.writeInt(-1);
106 | return;
107 | }
108 | out.writeInt(lists.size());
109 | for(String s : lists){
110 | writeStringToBuffer2(s, out);
111 | }
112 |
113 | }
114 |
115 | public static void writeByteArrToBuffer(byte [] buffer, ByteBuf out){
116 | if (buffer == null) {
117 | out.writeInt(-1);
118 | return;
119 | }
120 | // 字符串的长度
121 | out.writeInt(buffer.length);
122 | out.writeBytes(buffer);
123 | }
124 |
125 | }
126 |
--------------------------------------------------------------------------------
/awesome-netty-elasticsearch/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | awesome-netty
7 | org.north.netty
8 | 1.0-SNAPSHOT
9 |
10 | 4.0.0
11 |
12 | awesome-netty-elasticsearch
13 |
14 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | awesome-netty
7 | org.north.netty
8 | 1.0-SNAPSHOT
9 |
10 | 4.0.0
11 |
12 | awesome-netty-kafka
13 |
14 |
15 | org.north.netty
16 | awesome-netty-common
17 | 1.0-SNAPSHOT
18 |
19 |
20 |
21 |
22 |
23 | org.apache.maven.plugins
24 | maven-compiler-plugin
25 |
26 | 8
27 | 8
28 |
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/KafkaClient.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka;
2 |
3 | import com.google.common.collect.Lists;
4 | import com.north.netty.kafka.bean.fetch.*;
5 | import com.north.netty.kafka.bean.meta.KafkaMetaRequest;
6 | import com.north.netty.kafka.bean.meta.KafkaMetaResponse;
7 | import com.north.netty.kafka.bean.msg.ConsumerRecord;
8 | import com.north.netty.kafka.bean.msg.KafkaMsgRecordBatch;
9 | import com.north.netty.kafka.bean.msg.KafkaMsgRecordV2;
10 | import com.north.netty.kafka.bean.produce.*;
11 | import com.north.netty.kafka.caches.RequestCacheCenter;
12 | import com.north.netty.kafka.codec.KafkaResponseDecoder;
13 | import com.north.netty.kafka.config.KafkaConsumerConfig;
14 | import com.north.netty.kafka.config.KafkaProduceConfig;
15 | import com.north.netty.kafka.enums.Errors;
16 | import com.north.netty.kafka.utils.SimplePartitioner;
17 | import com.north.netty.kafka.utils.StringSerializer;
18 | import io.netty.bootstrap.Bootstrap;
19 | import io.netty.buffer.ByteBuf;
20 | import io.netty.buffer.PooledByteBufAllocator;
21 | import io.netty.channel.*;
22 | import io.netty.channel.nio.NioEventLoopGroup;
23 | import io.netty.channel.socket.nio.NioSocketChannel;
24 | import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
25 | import io.netty.handler.codec.LengthFieldPrepender;
26 |
27 | import java.util.ArrayList;
28 | import java.util.HashMap;
29 | import java.util.List;
30 | import java.util.Map;
31 | import java.util.concurrent.atomic.AtomicInteger;
32 |
33 | /**
34 | * @author laihaohua
35 | */
36 | public class KafkaClient {
37 | private Channel channel;
38 | private String clientId;
39 | private RequestCacheCenter requestCacheCenter = new RequestCacheCenter();
40 | private AtomicInteger requestId = new AtomicInteger(1);
41 |
42 | public KafkaClient(String clientId, String host, int port){
43 | this.clientId = clientId;
44 | EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
45 | Bootstrap bootstrap = new Bootstrap();
46 | bootstrap.option(ChannelOption.SO_KEEPALIVE, true);
47 | bootstrap.channel(NioSocketChannel.class);
48 | bootstrap.group(eventLoopGroup);
49 | bootstrap.handler(new ChannelInitializer() {
50 | @Override
51 | protected void initChannel(NioSocketChannel ch) throws Exception {
52 | ch.pipeline()
53 | .addLast(new LengthFieldPrepender(4))
54 | .addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE,0,4,0,4))
55 | .addLast(new KafkaResponseDecoder(requestCacheCenter));
56 | }
57 | });
58 | ChannelFuture channelFuture = null;
59 | try {
60 | channelFuture = bootstrap.connect(host, port).sync();
61 | this.channel = channelFuture.channel();
62 | } catch (InterruptedException e) {
63 | e.printStackTrace();
64 | }
65 |
66 | }
67 |
68 | public KafkaMetaResponse fetchMataData(String topic){
69 | Integer xId = requestId.getAndIncrement();
70 | KafkaMetaRequest kafkaMetaRequest = new KafkaMetaRequest(clientId, xId);
71 | kafkaMetaRequest.setTopics(Lists.newArrayList(topic));
72 | ByteBuf byteBuf = PooledByteBufAllocator.DEFAULT.buffer();
73 | kafkaMetaRequest.serializable(byteBuf);
74 | try {
75 | requestCacheCenter.putKafkaResponse(xId, new KafkaMetaResponse());
76 | this.channel.writeAndFlush(byteBuf).sync();
77 | KafkaMetaResponse response = (KafkaMetaResponse)requestCacheCenter.waitForResp(xId, 400000);
78 | return response;
79 | } catch (InterruptedException e) {
80 | e.printStackTrace();
81 | }
82 | return null;
83 | }
84 | public ProduceResponse send(KafkaProduceConfig config, String topic , String key, String val){
85 |
86 | if(config == null || topic == null || topic.isEmpty() || val == null || val.isEmpty()){
87 | throw new IllegalArgumentException("topic和val都不能为空");
88 | }
89 |
90 | // 序列化器 key 和 val都简单地用String序列化器
91 | byte [] keyBytes = StringSerializer.getBytes(key);
92 | byte [] valBytes = StringSerializer.getBytes(val);
93 |
94 | // 分区器
95 | int partition = SimplePartitioner.getPartion(topic, keyBytes, valBytes);
96 |
97 | KafkaMsgRecordV2 kafkaMsgRecordV2 = new KafkaMsgRecordV2(keyBytes, valBytes , null);
98 | KafkaMsgRecordBatch kafkaMsgRecordBatch = new KafkaMsgRecordBatch(kafkaMsgRecordV2);
99 |
100 | Record record = new Record();
101 | // 指定只发送到哪个分区
102 | record.setPartition(partition);
103 | record.setKafkaMsgRecordBatchList(new ArrayList<>());
104 | record.getKafkaMsgRecordBatchList().add(kafkaMsgRecordBatch);
105 |
106 | PartitionData partitionData = new PartitionData();
107 | partitionData.setRecordSset(record);
108 |
109 | TopicProduceData topicProduceData = new TopicProduceData();
110 | topicProduceData.setTopic(topic);
111 | topicProduceData.setData(Lists.newArrayList(partitionData));
112 |
113 | Integer xid = requestId.getAndIncrement();
114 | ProduceRequest produceRequest = new ProduceRequest(clientId, xid);
115 | produceRequest.setAcks(config.getAck());
116 | produceRequest.setTimeOut(config.getTimeout());
117 | produceRequest.setTransactionalId(null);
118 | produceRequest.setTopicData(Lists.newArrayList(topicProduceData));
119 |
120 |
121 | ByteBuf byteBuf = PooledByteBufAllocator.DEFAULT.buffer();
122 | produceRequest.serializable(byteBuf);
123 | try {
124 | requestCacheCenter.putKafkaResponse(xid, new ProduceResponse());
125 | this.channel.writeAndFlush(byteBuf).sync();
126 | ProduceResponse response = (ProduceResponse)requestCacheCenter.waitForResp(xid, config.getTimeout());
127 | return response;
128 | } catch (InterruptedException e) {
129 | e.printStackTrace();
130 | }
131 | return null;
132 |
133 | }
134 |
135 |
136 | public Map> poll(KafkaConsumerConfig consumerConfig, String topic, int partition, long fetchOffset){
137 | if(consumerConfig == null || topic == null){
138 | throw new IllegalArgumentException("必要参数不能为空");
139 | }
140 |
141 | Integer xid = requestId.getAndIncrement();
142 | FetchRequest fetchRequest = new FetchRequest(this.clientId, xid);
143 |
144 | FetchTopicPartitionRequest fetchTopicPartitionRequest = new FetchTopicPartitionRequest();
145 |
146 | fetchTopicPartitionRequest.setPartition(partition);
147 | fetchTopicPartitionRequest.setFetchOffset(fetchOffset);
148 | fetchTopicPartitionRequest.setLogStartOffset(0L);
149 | fetchTopicPartitionRequest.setMaxBytes(consumerConfig.getMaxBytes());
150 |
151 | FetchTopicRequest fetchTopicRequest = new FetchTopicRequest();
152 | fetchTopicRequest.setTopic(topic);
153 | fetchTopicRequest.setPartitions(new ArrayList<>());
154 | fetchTopicRequest.getPartitions().add(fetchTopicPartitionRequest);
155 |
156 | fetchRequest.setReplicaId(-1);
157 | fetchRequest.setMaxBytes(consumerConfig.getMaxBytes());
158 | fetchRequest.setMaxWaitTime(consumerConfig.getMaxWaitTime());
159 | fetchRequest.setMinBytes(consumerConfig.getMinBytes());
160 | byte b = 0;
161 | fetchRequest.setIsolationLevel(b);
162 | fetchRequest.setTopics(new ArrayList<>());
163 | fetchRequest.getTopics().add(fetchTopicRequest);
164 |
165 |
166 | ByteBuf byteBuf = PooledByteBufAllocator.DEFAULT.buffer();
167 | fetchRequest.serializable(byteBuf);
168 |
169 |
170 | try {
171 | requestCacheCenter.putKafkaResponse(xid, new FetchResponse());
172 | this.channel.writeAndFlush(byteBuf).sync();
173 | FetchResponse response = (FetchResponse)requestCacheCenter.waitForResp(xid, consumerConfig.getMaxWaitTime());
174 | return parseResp(response);
175 | } catch (InterruptedException e) {
176 | e.printStackTrace();
177 | }
178 | return null;
179 |
180 | }
181 |
182 |
183 | /**
184 | * kafka poll 返回的数据结构非常复杂, 这里解析出我们想要的数据
185 | * @param response
186 | * @return
187 | */
188 | private Map> parseResp(FetchResponse response ){
189 | if(response == null){
190 | return null;
191 | }
192 | Map> partitionRecordMap = new HashMap<>();
193 | for(FetchTopicResponse fetchTopicResponse : response.getResponses()){
194 | // 由于我们这里的实现只会订阅一个topic, 所以这里没有用到这个
195 | String topic = fetchTopicResponse.getTopic();
196 | List partitionResps = fetchTopicResponse.getPartitionResps();
197 | assert partitionResps != null;
198 | // 遍历这个topic的每一个分区
199 | for(FetchPartitionResp partitionResp : partitionResps){
200 | FetchPartitionHeader fetchPartitionHeader = partitionResp.getPartitionHeaders();
201 | short errorCode = fetchPartitionHeader.getErrorCode();
202 | if(errorCode != Errors.NONE.code()){
203 | // 这个分区的响应有错误的话 抛出异常
204 | throw new IllegalArgumentException("broker 返回错误: " + Errors.forCode(errorCode).message());
205 | }
206 | // 初始化这个分区的list
207 | Integer partition = fetchPartitionHeader.getPartition();
208 | if(!partitionRecordMap.containsKey(partition)){
209 | partitionRecordMap.putIfAbsent(partition, new ArrayList<>());
210 | }
211 | Record record = partitionResp.getRecordSset();
212 | assert record != null;
213 | List kafkaMsgRecordBatchList = record.getKafkaMsgRecordBatchList();
214 | // kafka的响应可能会分成很多个批次, 所以这里要一个批次一个批次地处理
215 | for(KafkaMsgRecordBatch kafkaMsgRecordBatch : kafkaMsgRecordBatchList){
216 | Long baseOffset = kafkaMsgRecordBatch.getBaseOffset();
217 | long firstTimestamp = kafkaMsgRecordBatch.getFirstTimestamp();
218 | List kafkaMsgRecordV2List = kafkaMsgRecordBatch.getMsgs();
219 | for(KafkaMsgRecordV2 kafkaMsgRecordV2 : kafkaMsgRecordV2List){
220 | ConsumerRecord consumerRecord = new ConsumerRecord();
221 | long offset = baseOffset + kafkaMsgRecordV2.getOffsetDelta();
222 | consumerRecord.setOffset(offset);
223 | long timestamp = firstTimestamp + kafkaMsgRecordV2.getTimestampDelta();
224 | consumerRecord.setTimeStamp(timestamp);
225 | consumerRecord.setKey(StringSerializer.getString(kafkaMsgRecordV2.getKey()));
226 | consumerRecord.setVal(StringSerializer.getString(kafkaMsgRecordV2.getValues()));
227 | partitionRecordMap.get(partition).add(consumerRecord);
228 | }
229 | }
230 |
231 | }
232 | }
233 | return partitionRecordMap;
234 | }
235 | }
236 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/AbstractKafkaResponse.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean;
2 |
3 | /**
4 | * @author laihaohua
5 | */
6 | public abstract class AbstractKafkaResponse implements KafkaResponse{
7 | protected Integer correlationId;
8 | protected KafkaResponseHeader kafkaResponseHeader;
9 |
10 | public KafkaResponseHeader getKafkaResponseHeader() {
11 | return kafkaResponseHeader;
12 | }
13 |
14 | public void setKafkaResponseHeader(KafkaResponseHeader kafkaResponseHeader) {
15 | this.kafkaResponseHeader = kafkaResponseHeader;
16 | }
17 |
18 | public Integer getCorrelationId() {
19 | return correlationId;
20 | }
21 |
22 | public void setCorrelationId(Integer correlationId) {
23 | this.correlationId = correlationId;
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/KafkaRequest.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean;
2 |
3 |
4 | import io.netty.buffer.ByteBuf;
5 |
6 | public interface KafkaRequest {
7 | public void serializable(ByteBuf out);
8 | }
9 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/KafkaRequestHeader.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean;
2 |
3 | import io.netty.buffer.ByteBuf;
4 | import org.north.netty.common.utils.SerializeUtils;
5 |
6 | import java.io.Serializable;
7 | import java.util.Arrays;
8 |
9 | /**
10 | * @author laihaohua
11 | */
12 | public class KafkaRequestHeader implements Serializable {
13 | private Short apiKey;
14 | private Short apiVersion;
15 | private Integer correlationId;
16 | private String clientId;
17 | public void serializable(ByteBuf out){
18 | out.writeShort(apiKey);
19 | out.writeShort(apiVersion);
20 | out.writeInt(correlationId);
21 | SerializeUtils.writeStringToBuffer2(clientId, out);
22 | }
23 |
24 | public Short getApiKey() {
25 | return apiKey;
26 | }
27 |
28 | public void setApiKey(Short apiKey) {
29 | this.apiKey = apiKey;
30 | }
31 |
32 | public Short getApiVersion() {
33 | return apiVersion;
34 | }
35 |
36 | public void setApiVersion(Short apiVersion) {
37 | this.apiVersion = apiVersion;
38 | }
39 |
40 | public Integer getCorrelationId() {
41 | return correlationId;
42 | }
43 |
44 | public void setCorrelationId(Integer correlationId) {
45 | this.correlationId = correlationId;
46 | }
47 |
48 | public String getClientId() {
49 | return clientId;
50 | }
51 |
52 | public void setClientId(String clientId) {
53 | this.clientId = clientId;
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/KafkaResponse.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean;
2 |
3 | import io.netty.buffer.ByteBuf;
4 |
5 | /**
6 | * @author laihaohua
7 | */
8 | public interface KafkaResponse {
9 |
10 | /**
11 | * 反序列化这个响应体
12 | * @param byteBuf
13 | */
14 | void deserialize(ByteBuf byteBuf);
15 |
16 | }
17 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/KafkaResponseHeader.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean;
2 |
3 | import io.netty.buffer.ByteBuf;
4 |
5 | import java.io.Serializable;
6 |
7 | /**
8 | * @author laihaohua
9 | */
10 | public class KafkaResponseHeader implements Serializable {
11 | private Integer correlationId;
12 | public void deserialize(ByteBuf byteBuf){
13 | this.correlationId = byteBuf.readInt();
14 | }
15 |
16 | public Integer getCorrelationId() {
17 | return correlationId;
18 | }
19 |
20 | public void setCorrelationId(Integer correlationId) {
21 | this.correlationId = correlationId;
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/broker/Broker.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.broker;
2 |
3 | import io.netty.buffer.ByteBuf;
4 | import org.north.netty.common.utils.SerializeUtils;
5 |
6 | import java.io.Serializable;
7 |
8 | /**
9 | * @author laihaohua
10 | */
11 | public class Broker implements Serializable {
12 | private Integer nodeId;
13 | private String host;
14 | private Integer port;
15 | private String rack;
16 |
17 | public Integer getNodeId() {
18 | return nodeId;
19 | }
20 |
21 | public void setNodeId(Integer nodeId) {
22 | this.nodeId = nodeId;
23 | }
24 |
25 | public String getHost() {
26 | return host;
27 | }
28 |
29 | public void setHost(String host) {
30 | this.host = host;
31 | }
32 |
33 | public Integer getPort() {
34 | return port;
35 | }
36 |
37 | public void setPort(Integer port) {
38 | this.port = port;
39 | }
40 |
41 | public String getRack() {
42 | return rack;
43 | }
44 |
45 | public void setRack(String rack) {
46 | this.rack = rack;
47 | }
48 |
49 | public void deserialize(ByteBuf byteBuf) {
50 | this.nodeId = byteBuf.readInt();
51 | this.host = SerializeUtils.readStringToBuffer2(byteBuf);
52 | this.port = byteBuf.readInt();
53 | this.rack = SerializeUtils.readStringToBuffer2(byteBuf);
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/fetch/AbortedTransaction.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.fetch;
2 |
3 | import io.netty.buffer.ByteBuf;
4 |
5 | public class AbortedTransaction {
6 | private Long producerId;
7 | private Long firstOffset;
8 | public void deserialize(ByteBuf in){
9 | this.producerId = in.readLong();
10 | this.firstOffset = in.readLong();
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/fetch/FetchPartitionHeader.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.fetch;
2 |
3 | import io.netty.buffer.ByteBuf;
4 |
5 | import java.io.Serializable;
6 | import java.util.ArrayList;
7 | import java.util.List;
8 |
9 | public class FetchPartitionHeader implements Serializable {
10 | private Integer partition;
11 | private Short errorCode;
12 | private Long highWaterMark;
13 | private Long lastStableOffset;
14 | private Long logStartOffset;
15 | private List abortedTransactions;
16 | public void deserialize(ByteBuf in){
17 | this.partition = in.readInt();
18 | this.errorCode = in.readShort();
19 | this.highWaterMark = in.readLong();
20 | this.lastStableOffset = in.readLong();
21 | this.logStartOffset = in.readLong();
22 | int count = in.readInt();
23 | if(count >= 0){
24 | abortedTransactions = new ArrayList<>(count);
25 | for(int i =0 ; i < count; i++){
26 | AbortedTransaction abortedTransaction = new AbortedTransaction();
27 | abortedTransaction.deserialize(in);
28 | abortedTransactions.add(abortedTransaction);
29 | }
30 | }
31 |
32 | }
33 |
34 | public Integer getPartition() {
35 | return partition;
36 | }
37 |
38 | public void setPartition(Integer partition) {
39 | this.partition = partition;
40 | }
41 |
42 | public Short getErrorCode() {
43 | return errorCode;
44 | }
45 |
46 | public void setErrorCode(Short errorCode) {
47 | this.errorCode = errorCode;
48 | }
49 |
50 | public Long getHighWaterMark() {
51 | return highWaterMark;
52 | }
53 |
54 | public void setHighWaterMark(Long highWaterMark) {
55 | this.highWaterMark = highWaterMark;
56 | }
57 |
58 | public Long getLastStableOffset() {
59 | return lastStableOffset;
60 | }
61 |
62 | public void setLastStableOffset(Long lastStableOffset) {
63 | this.lastStableOffset = lastStableOffset;
64 | }
65 |
66 | public Long getLogStartOffset() {
67 | return logStartOffset;
68 | }
69 |
70 | public void setLogStartOffset(Long logStartOffset) {
71 | this.logStartOffset = logStartOffset;
72 | }
73 |
74 | public List getAbortedTransactions() {
75 | return abortedTransactions;
76 | }
77 |
78 | public void setAbortedTransactions(List abortedTransactions) {
79 | this.abortedTransactions = abortedTransactions;
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/fetch/FetchPartitionResp.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.fetch;
2 |
3 | import com.north.netty.kafka.bean.produce.Record;
4 | import io.netty.buffer.ByteBuf;
5 | import org.north.netty.common.utils.SerializeUtils;
6 |
7 | import java.io.Serializable;
8 | import java.util.ArrayList;
9 | import java.util.Arrays;
10 | import java.util.List;
11 |
12 | public class FetchPartitionResp implements Serializable {
13 | private FetchPartitionHeader partitionHeaders;
14 | private Record recordSset;
15 | public void deserialize(ByteBuf in){
16 | partitionHeaders = new FetchPartitionHeader();
17 | partitionHeaders.deserialize(in);
18 | recordSset = new Record();
19 | recordSset.deserialize(in);
20 | }
21 |
22 | public FetchPartitionHeader getPartitionHeaders() {
23 | return partitionHeaders;
24 | }
25 |
26 | public void setPartitionHeaders(FetchPartitionHeader partitionHeaders) {
27 | this.partitionHeaders = partitionHeaders;
28 | }
29 |
30 | public Record getRecordSset() {
31 | return recordSset;
32 | }
33 |
34 | public void setRecordSset(Record recordSset) {
35 | this.recordSset = recordSset;
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/fetch/FetchRequest.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.fetch;
2 |
3 | import com.north.netty.kafka.bean.KafkaRequest;
4 | import com.north.netty.kafka.bean.KafkaRequestHeader;
5 | import com.north.netty.kafka.enums.ApiKeys;
6 | import io.netty.buffer.ByteBuf;
7 |
8 | import java.io.Serializable;
9 | import java.util.List;
10 |
11 | public class FetchRequest implements Serializable, KafkaRequest {
12 |
13 | private KafkaRequestHeader header;
14 | public FetchRequest(String clientId, Integer correlationId){
15 | super();
16 | header = new KafkaRequestHeader();
17 | header.setClientId(clientId);
18 | header.setCorrelationId(correlationId);
19 | header.setApiKey(ApiKeys.FETCH.id);
20 | header.setApiVersion(ApiKeys.FETCH.apiVersion);
21 | }
22 |
23 | /**
24 | * 副本的brokerId, 一般的消费者的话 直接用-1即可
25 | */
26 | private Integer replicaId;
27 | /**
28 | * 等待响应返回的最大ms
29 | */
30 | private Integer maxWaitTime;
31 | /**
32 | * 响应的最小字节数
33 | */
34 | private Integer minBytes;
35 | /**
36 | * 响应的最大字节数
37 | */
38 | private Integer maxBytes;
39 | /**
40 | * 事务隔离等级 0 读未提交 1 读已提交
41 | */
42 | private Byte isolationLevel;
43 | /**
44 | * 要拉取的topic
45 | */
46 | private List topics;
47 |
48 | @Override
49 | public void serializable(ByteBuf out){
50 | header.serializable(out);
51 | out.writeInt(replicaId);
52 | out.writeInt(maxWaitTime);
53 | out.writeInt(minBytes);
54 | out.writeInt(maxBytes);
55 | out.writeByte(isolationLevel);
56 | if(topics == null){
57 | out.writeInt(-1);
58 | }else {
59 | out.writeInt(topics.size());
60 | for(FetchTopicRequest fetchTopicRequest : topics){
61 | fetchTopicRequest.serializable(out);
62 | }
63 | }
64 | }
65 |
66 |
67 | public KafkaRequestHeader getHeader() {
68 | return header;
69 | }
70 |
71 | public void setHeader(KafkaRequestHeader header) {
72 | this.header = header;
73 | }
74 |
75 | public Integer getReplicaId() {
76 | return replicaId;
77 | }
78 |
79 | public void setReplicaId(Integer replicaId) {
80 | this.replicaId = replicaId;
81 | }
82 |
83 | public Integer getMaxWaitTime() {
84 | return maxWaitTime;
85 | }
86 |
87 | public void setMaxWaitTime(Integer maxWaitTime) {
88 | this.maxWaitTime = maxWaitTime;
89 | }
90 |
91 | public Integer getMinBytes() {
92 | return minBytes;
93 | }
94 |
95 | public void setMinBytes(Integer minBytes) {
96 | this.minBytes = minBytes;
97 | }
98 |
99 | public Integer getMaxBytes() {
100 | return maxBytes;
101 | }
102 |
103 | public void setMaxBytes(Integer maxBytes) {
104 | this.maxBytes = maxBytes;
105 | }
106 |
107 | public Byte getIsolationLevel() {
108 | return isolationLevel;
109 | }
110 |
111 | public void setIsolationLevel(Byte isolationLevel) {
112 | this.isolationLevel = isolationLevel;
113 | }
114 |
115 | public List getTopics() {
116 | return topics;
117 | }
118 |
119 | public void setTopics(List topics) {
120 | this.topics = topics;
121 | }
122 | }
123 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/fetch/FetchResponse.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.fetch;
2 |
3 | import com.north.netty.kafka.bean.AbstractKafkaResponse;
4 | import com.north.netty.kafka.bean.KafkaResponse;
5 | import io.netty.buffer.ByteBuf;
6 |
7 | import java.io.Serializable;
8 | import java.util.ArrayList;
9 | import java.util.Arrays;
10 | import java.util.List;
11 |
12 | public class FetchResponse extends AbstractKafkaResponse implements Serializable, KafkaResponse {
13 | /**
14 | * ms
15 | */
16 | private Integer throttleTime;
17 | private List responses;
18 |
19 | @Override
20 | public void deserialize(ByteBuf in) {
21 | this.throttleTime = in.readInt();
22 | int resCount = in.readInt();
23 | if(resCount >= 0){
24 | this.responses = new ArrayList<>(resCount);
25 | for(int i =0 ; i < resCount; i++){
26 | FetchTopicResponse fetchTopicResponse = new FetchTopicResponse();
27 | fetchTopicResponse.deserialize(in);
28 | responses.add(fetchTopicResponse);
29 | }
30 | }
31 | }
32 |
33 | public Integer getThrottleTime() {
34 | return throttleTime;
35 | }
36 |
37 | public void setThrottleTime(Integer throttleTime) {
38 | this.throttleTime = throttleTime;
39 | }
40 |
41 | public List getResponses() {
42 | return responses;
43 | }
44 |
45 | public void setResponses(List responses) {
46 | this.responses = responses;
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/fetch/FetchTopicPartitionRequest.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.fetch;
2 |
3 | import io.netty.buffer.ByteBuf;
4 |
5 | import java.io.Serializable;
6 |
7 | public class FetchTopicPartitionRequest implements Serializable {
8 | private Integer partition;
9 | private Long fetchOffset;
10 | private Long logStartOffset;
11 | private Integer maxBytes;
12 | public void serializable(ByteBuf out){
13 | out.writeInt(partition);
14 | out.writeLong(fetchOffset);
15 | out.writeLong(logStartOffset);
16 | out.writeInt(maxBytes);
17 | }
18 |
19 | public Integer getPartition() {
20 | return partition;
21 | }
22 |
23 | public void setPartition(Integer partition) {
24 | this.partition = partition;
25 | }
26 |
27 | public Long getFetchOffset() {
28 | return fetchOffset;
29 | }
30 |
31 | public void setFetchOffset(Long fetchOffset) {
32 | this.fetchOffset = fetchOffset;
33 | }
34 |
35 | public Long getLogStartOffset() {
36 | return logStartOffset;
37 | }
38 |
39 | public void setLogStartOffset(Long logStartOffset) {
40 | this.logStartOffset = logStartOffset;
41 | }
42 |
43 | public Integer getMaxBytes() {
44 | return maxBytes;
45 | }
46 |
47 | public void setMaxBytes(Integer maxBytes) {
48 | this.maxBytes = maxBytes;
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/fetch/FetchTopicRequest.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.fetch;
2 |
3 | import io.netty.buffer.ByteBuf;
4 | import org.north.netty.common.utils.SerializeUtils;
5 |
6 | import java.io.Serializable;
7 | import java.util.List;
8 |
9 | public class FetchTopicRequest implements Serializable {
10 | private String topic;
11 | private List partitions;
12 | public void serializable(ByteBuf out){
13 | SerializeUtils.writeStringToBuffer2(topic, out);
14 | if(partitions == null){
15 | out.writeInt(-1);
16 | }else {
17 | out.writeInt(partitions.size());
18 | for(FetchTopicPartitionRequest fetchTopicRequest : partitions){
19 | fetchTopicRequest.serializable(out);
20 | }
21 | }
22 | }
23 |
24 | public String getTopic() {
25 | return topic;
26 | }
27 |
28 | public void setTopic(String topic) {
29 | this.topic = topic;
30 | }
31 |
32 | public List getPartitions() {
33 | return partitions;
34 | }
35 |
36 | public void setPartitions(List partitions) {
37 | this.partitions = partitions;
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/fetch/FetchTopicResponse.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.fetch;
2 |
3 |
4 | import io.netty.buffer.ByteBuf;
5 | import org.north.netty.common.utils.SerializeUtils;
6 |
7 | import java.io.Serializable;
8 | import java.util.ArrayList;
9 | import java.util.List;
10 |
11 | public class FetchTopicResponse implements Serializable {
12 | private String topic;
13 | private List partitionResps;
14 |
15 | public void deserialize(ByteBuf in){
16 | this.topic = SerializeUtils.readStringToBuffer2(in);
17 | int partitionResCount = in.readInt();
18 | if(partitionResCount > 0){
19 | this.partitionResps = new ArrayList<>(partitionResCount);
20 | for(int i = 0; i < partitionResCount; i++){
21 | FetchPartitionResp fetchPartitionResp = new FetchPartitionResp();
22 | fetchPartitionResp.deserialize(in);
23 | partitionResps.add(fetchPartitionResp);
24 | }
25 | }
26 | }
27 |
28 | public String getTopic() {
29 | return topic;
30 | }
31 |
32 | public void setTopic(String topic) {
33 | this.topic = topic;
34 | }
35 |
36 | public List getPartitionResps() {
37 | return partitionResps;
38 | }
39 |
40 | public void setPartitionResps(List partitionResps) {
41 | this.partitionResps = partitionResps;
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/meta/KafkaMetaRequest.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.meta;
2 |
3 | import com.north.netty.kafka.bean.KafkaRequest;
4 | import com.north.netty.kafka.bean.KafkaRequestHeader;
5 | import com.north.netty.kafka.enums.ApiKeys;
6 | import io.netty.buffer.ByteBuf;
7 | import org.north.netty.common.utils.SerializeUtils;
8 |
9 | import java.io.Serializable;
10 | import java.util.List;
11 |
12 | /**
13 | * @author laihaohua
14 | */
15 | public class KafkaMetaRequest implements Serializable, KafkaRequest {
16 | private List topics;
17 | private boolean allowAutoTopicCreation = true;
18 | private KafkaRequestHeader header;
19 | public KafkaMetaRequest(String clientId, Integer correlationId){
20 | super();
21 | header = new KafkaRequestHeader();
22 | header.setClientId(clientId);
23 | header.setCorrelationId(correlationId);
24 | header.setApiKey(ApiKeys.METADATA.id);
25 | header.setApiVersion(ApiKeys.METADATA.apiVersion);
26 | }
27 | public List getTopics() {
28 | return topics;
29 | }
30 |
31 | public void setTopics(List topics) {
32 | this.topics = topics;
33 | }
34 |
35 | public boolean isAllowAutoTopicCreation() {
36 | return allowAutoTopicCreation;
37 | }
38 |
39 | public void setAllowAutoTopicCreation(boolean allowAutoTopicCreation) {
40 | this.allowAutoTopicCreation = allowAutoTopicCreation;
41 | }
42 |
43 | @Override
44 | public void serializable(ByteBuf out){
45 | header.serializable(out);
46 | SerializeUtils.writeStringListToBuffer(topics, out);
47 | out.writeBoolean(allowAutoTopicCreation);
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/meta/KafkaMetaResponse.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.meta;
2 |
3 | import com.north.netty.kafka.bean.AbstractKafkaResponse;
4 | import com.north.netty.kafka.bean.KafkaResponse;
5 | import com.north.netty.kafka.bean.broker.Broker;
6 | import com.north.netty.kafka.bean.topic.TopicMetaData;
7 | import io.netty.buffer.ByteBuf;
8 |
9 | import java.io.Serializable;
10 | import java.util.ArrayList;
11 | import java.util.List;
12 |
13 | /**
14 | * @author laihaohua
15 | */
16 | public class KafkaMetaResponse extends AbstractKafkaResponse implements Serializable, KafkaResponse {
17 | private Integer throttleTimeMs;
18 | private List brokers;
19 | private Integer controllerId = -1;
20 | /**
21 | * v2+的版本才有这个字段
22 | */
23 | private String clusterId;
24 | private List topicMetadata;
25 |
26 |
27 | public Integer getThrottleTimeMs() {
28 | return throttleTimeMs;
29 | }
30 |
31 | public void setThrottleTimeMs(Integer throttleTimeMs) {
32 | this.throttleTimeMs = throttleTimeMs;
33 | }
34 |
35 | public List getBrokers() {
36 | return brokers;
37 | }
38 |
39 | public void setBrokers(List brokers) {
40 | this.brokers = brokers;
41 | }
42 |
43 | public Integer getControllerId() {
44 | return controllerId;
45 | }
46 |
47 | public void setControllerId(Integer controllerId) {
48 | this.controllerId = controllerId;
49 | }
50 |
51 | public String getClusterId() {
52 | return clusterId;
53 | }
54 |
55 | public void setClusterId(String clusterId) {
56 | this.clusterId = clusterId;
57 | }
58 |
59 | public List getTopicMetadata() {
60 | return topicMetadata;
61 | }
62 |
63 | public void setTopicMetadata(List topicMetadata) {
64 | this.topicMetadata = topicMetadata;
65 | }
66 |
67 | @Override
68 | public void deserialize(ByteBuf byteBuf) {
69 | int brokerIdCount = byteBuf.readInt();
70 | if(brokerIdCount >= 0){
71 | brokers = new ArrayList<>(brokerIdCount);
72 | for(int i =0 ; i < brokerIdCount; i++){
73 | Broker broker = new Broker();
74 | broker.deserialize(byteBuf);
75 | brokers.add(broker);
76 | }
77 | }
78 | this.controllerId = byteBuf.readInt();
79 | int topicCount = byteBuf.readInt();
80 | if(topicCount >= 0){
81 | topicMetadata = new ArrayList<>(topicCount);
82 | for(int i =0 ; i < topicCount; i++){
83 | TopicMetaData topicMetaData = new TopicMetaData();
84 | topicMetaData.deserialize(byteBuf);
85 | topicMetadata.add(topicMetaData);
86 | }
87 | }
88 | }
89 |
90 |
91 | }
92 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/msg/ConsumerRecord.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.msg;
2 |
3 | import java.io.Serializable;
4 |
5 | public class ConsumerRecord implements Serializable {
6 | private long offset;
7 | private long timeStamp;
8 | private String key;
9 | private String val;
10 |
11 | public long getOffset() {
12 | return offset;
13 | }
14 |
15 | public void setOffset(long offset) {
16 | this.offset = offset;
17 | }
18 |
19 | public long getTimeStamp() {
20 | return timeStamp;
21 | }
22 |
23 | public void setTimeStamp(long timeStamp) {
24 | this.timeStamp = timeStamp;
25 | }
26 |
27 | public String getKey() {
28 | return key;
29 | }
30 |
31 | public void setKey(String key) {
32 | this.key = key;
33 | }
34 |
35 | public String getVal() {
36 | return val;
37 | }
38 |
39 | public void setVal(String val) {
40 | this.val = val;
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/msg/KafkaMsgRecordBatch.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.msg;
2 |
3 | import com.google.common.collect.Lists;
4 | import com.north.netty.kafka.utils.Crc32C;
5 | import io.netty.buffer.ByteBuf;
6 | import io.netty.buffer.PooledByteBufAllocator;
7 |
8 | import java.io.Serializable;
9 | import java.util.ArrayList;
10 | import java.util.List;
11 |
12 | public class KafkaMsgRecordBatch implements Serializable {
13 | private int totalSize;
14 | public KafkaMsgRecordBatch(){
15 |
16 | }
17 | public KafkaMsgRecordBatch(KafkaMsgRecordV2 kafkaMsgRecordV2){
18 | this.msgs = Lists.newArrayList(kafkaMsgRecordV2);
19 | this.baseOffset = 0L;
20 | this.firstTimestamp = this.maxTimestamp = System.currentTimeMillis();
21 | /**
22 | * 61 是KafkaMsgRecordBatch整个消息头部的大小, 12是baseOffset和length的长度和
23 | * KafkaMsgRecordBatch的长度就是 头部+消息体-12
24 | */
25 | this.length = 61 + kafkaMsgRecordV2.getMsgSize() - 12;
26 | this.totalSize = 61 + kafkaMsgRecordV2.getMsgSize();
27 | }
28 |
29 | public void deserialize(ByteBuf in){
30 | this.baseOffset = in.readLong();
31 | this.length = in.readInt();
32 | this.partitionLeaderVersion = in.readInt();
33 | this.magic = in.readByte();
34 | this.crc = in.readInt();
35 | this.attributes = in.readShort();
36 | this.lastOffsetDelta = in.readInt();
37 | this.firstTimestamp = in.readLong();
38 | this.maxTimestamp = in.readLong();
39 | this.producerId = in.readLong();
40 | this.epoch = in.readShort();
41 | this.sequence = in.readInt();
42 | this.numRecords = in.readInt();
43 | if(numRecords >= 0){
44 | msgs = new ArrayList<>(numRecords);
45 | for(int i=0; i msgs;
150 |
151 | public int getTotalSize() {
152 | return totalSize;
153 | }
154 |
155 | public void setTotalSize(int totalSize) {
156 | this.totalSize = totalSize;
157 | }
158 |
159 | public Long getBaseOffset() {
160 | return baseOffset;
161 | }
162 |
163 | public void setBaseOffset(Long baseOffset) {
164 | this.baseOffset = baseOffset;
165 | }
166 |
167 | public Integer getLength() {
168 | return length;
169 | }
170 |
171 | public void setLength(Integer length) {
172 | this.length = length;
173 | }
174 |
175 | public Integer getPartitionLeaderVersion() {
176 | return partitionLeaderVersion;
177 | }
178 |
179 | public void setPartitionLeaderVersion(Integer partitionLeaderVersion) {
180 | this.partitionLeaderVersion = partitionLeaderVersion;
181 | }
182 |
183 | public byte getMagic() {
184 | return magic;
185 | }
186 |
187 | public void setMagic(byte magic) {
188 | this.magic = magic;
189 | }
190 |
191 | public Integer getCrc() {
192 | return crc;
193 | }
194 |
195 | public void setCrc(Integer crc) {
196 | this.crc = crc;
197 | }
198 |
199 | public short getAttributes() {
200 | return attributes;
201 | }
202 |
203 | public void setAttributes(short attributes) {
204 | this.attributes = attributes;
205 | }
206 |
207 | public int getLastOffsetDelta() {
208 | return lastOffsetDelta;
209 | }
210 |
211 | public void setLastOffsetDelta(int lastOffsetDelta) {
212 | this.lastOffsetDelta = lastOffsetDelta;
213 | }
214 |
215 | public long getFirstTimestamp() {
216 | return firstTimestamp;
217 | }
218 |
219 | public void setFirstTimestamp(long firstTimestamp) {
220 | this.firstTimestamp = firstTimestamp;
221 | }
222 |
223 | public long getMaxTimestamp() {
224 | return maxTimestamp;
225 | }
226 |
227 | public void setMaxTimestamp(long maxTimestamp) {
228 | this.maxTimestamp = maxTimestamp;
229 | }
230 |
231 | public long getProducerId() {
232 | return producerId;
233 | }
234 |
235 | public void setProducerId(long producerId) {
236 | this.producerId = producerId;
237 | }
238 |
239 | public short getEpoch() {
240 | return epoch;
241 | }
242 |
243 | public void setEpoch(short epoch) {
244 | this.epoch = epoch;
245 | }
246 |
247 | public int getSequence() {
248 | return sequence;
249 | }
250 |
251 | public void setSequence(int sequence) {
252 | this.sequence = sequence;
253 | }
254 |
255 | public int getNumRecords() {
256 | return numRecords;
257 | }
258 |
259 | public void setNumRecords(int numRecords) {
260 | this.numRecords = numRecords;
261 | }
262 |
263 | public List getMsgs() {
264 | return msgs;
265 | }
266 |
267 | public void setMsgs(List msgs) {
268 | this.msgs = msgs;
269 | }
270 | }
271 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/msg/KafkaMsgRecordV2.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.msg;
2 |
3 | import com.north.netty.kafka.utils.VarLengthUtils;
4 | import io.netty.buffer.ByteBuf;
5 |
6 | import java.io.Serializable;
7 | import java.nio.charset.StandardCharsets;
8 | import java.util.HashMap;
9 | import java.util.Map;
10 |
11 | /**
12 | * @author laihaohua
13 | */
14 | public class KafkaMsgRecordV2 implements Serializable {
15 |
16 | private int msgBodySize;
17 |
18 | public KafkaMsgRecordV2(){
19 |
20 | }
21 | public KafkaMsgRecordV2(byte [] key, byte [] values, Map headers){
22 | this.key = key;
23 | this.values = values;
24 | if (headers == null){
25 | headers = new HashMap<>();
26 | }
27 | this.headers = headers;
28 | calcMsgSize();
29 |
30 | }
31 | private void calcMsgSize(){
32 | int size = 0;
33 | // attributes属性的大小
34 | size += 1;
35 | // offsetDelta的大小
36 | size += VarLengthUtils.sizeOfVarint(this.offsetDelta);
37 | // timestampDelta的大小
38 | size += VarLengthUtils.sizeOfVarlong(this.timestampDelta);
39 | if(this.key == null){
40 | // -1 占1位
41 | size += VarLengthUtils.NULL_VARINT_SIZE_BYTES;
42 | }else{
43 | size += VarLengthUtils.sizeOfVarint(this.key.length);
44 | size += this.key.length;
45 | }
46 | if(this.values == null){
47 | // -1 占1位
48 | size += VarLengthUtils.NULL_VARINT_SIZE_BYTES;
49 | }else{
50 | size += VarLengthUtils.sizeOfVarint(this.values.length);
51 | size += this.values.length;
52 | }
53 | size += VarLengthUtils.sizeOfVarint(headers.size());
54 | for (Map.Entry header : headers.entrySet()) {
55 | String headerKey = header.getKey();
56 | if (headerKey == null) {
57 | throw new IllegalArgumentException("Invalid null header key found in headers");
58 | }
59 |
60 | int headerKeySize = VarLengthUtils.utf8Length(headerKey);
61 | size += VarLengthUtils.sizeOfVarint(headerKeySize) + headerKeySize;
62 |
63 | byte[] headerValue = header.getValue();
64 | if (headerValue == null) {
65 | size += VarLengthUtils.NULL_VARINT_SIZE_BYTES;
66 | } else {
67 | size += VarLengthUtils.sizeOfVarint(headerValue.length) + headerValue.length;
68 | }
69 | }
70 |
71 | this.msgBodySize = size;
72 | this.msgSize = VarLengthUtils.sizeOfVarint(size) + size;
73 | }
74 | public void serializable(ByteBuf out){
75 | VarLengthUtils.writeVarint(msgBodySize, out);
76 | out.writeByte(attributes);
77 | VarLengthUtils.writeVarlong(timestampDelta, out);
78 | VarLengthUtils.writeVarint(offsetDelta, out);
79 | if(key == null){
80 | VarLengthUtils.writeVarint(-1, out);
81 | }else{
82 | VarLengthUtils.writeVarint(key.length, out);
83 | out.writeBytes(key);
84 | }
85 | if(values == null){
86 | VarLengthUtils.writeVarint(-1, out);
87 | }else{
88 | VarLengthUtils.writeVarint(values.length, out);
89 | out.writeBytes(values);
90 | }
91 |
92 | VarLengthUtils.writeVarint(headers.size(), out);
93 |
94 | for (Map.Entry header : headers.entrySet()) {
95 | String headerKey = header.getKey();
96 | if (headerKey == null) {
97 | throw new IllegalArgumentException("Invalid null header key found in headers");
98 | }
99 |
100 | byte[] utf8Bytes = headerKey.getBytes(StandardCharsets.UTF_8);
101 | VarLengthUtils.writeVarint(utf8Bytes.length, out);
102 | out.writeBytes(utf8Bytes);
103 |
104 | byte[] headerValue = header.getValue();
105 | if (headerValue == null) {
106 | VarLengthUtils.writeVarint(-1, out);
107 | } else {
108 | VarLengthUtils.writeVarint(headerValue.length, out);
109 | out.writeBytes(headerValue);
110 | }
111 | }
112 |
113 | }
114 | public void deserialize(ByteBuf in){
115 | this.msgBodySize = VarLengthUtils.readVarint(in);
116 | this.attributes = in.readByte();
117 | timestampDelta = VarLengthUtils.readVarlong(in);
118 | offsetDelta = VarLengthUtils.readVarint(in);
119 | int keyLen = VarLengthUtils.readVarint(in);
120 | if(keyLen >= 0){
121 | this.key = new byte[keyLen];
122 | in.readBytes(key);
123 | }
124 |
125 | int valueLen = VarLengthUtils.readVarint(in);
126 | if(valueLen >= 0){
127 | this.values = new byte[valueLen];
128 | in.readBytes(values);
129 | }
130 | int headerSize = VarLengthUtils.readVarint(in);
131 | if(headerSize >= 0){
132 | headers = new HashMap<>(headerSize);
133 | for(int i=0; i < headerSize; i++){
134 | keyLen = VarLengthUtils.readVarint(in);
135 | byte [] bs = new byte[keyLen];
136 | in.readBytes(bs);
137 | String key = new String(bs);
138 | valueLen = VarLengthUtils.readVarint(in);
139 | bs = new byte[valueLen];
140 | in.readBytes(bs);
141 | headers.put(key, bs);
142 | }
143 | }
144 | }
145 | /**
146 | * 消息总长度
147 | */
148 | private int msgSize;
149 | /**
150 | * 1字节的属性 我们这里没有用到压缩 所以写死为0
151 | */
152 | private byte attributes = 0;
153 | /**
154 | * 当前消息中的时间戳 与该批次中的第一条消息的时间戳的差值
155 | * 在我们这里的实现里,每个批次只发送一条消息, 所以这个值固定为0
156 | */
157 | private long timestampDelta = 0;
158 |
159 | /**
160 | * 当前消息的offset与该批次中的第一条消息的offset的差值
161 | * 在我们这里的实现里,每个批次只发送一条消息, 所以这个值固定为0
162 | */
163 | private int offsetDelta = 0;
164 | /**
165 | * 该消息的key
166 | */
167 | private byte [] key;
168 | /**
169 | * 该消息的value
170 | */
171 | private byte [] values;
172 |
173 | /**
174 | * 该消息的headers
175 | */
176 | private Map headers;
177 |
178 |
179 | public int getMsgSize() {
180 | return msgSize;
181 | }
182 |
183 | public void setMsgSize(int msgSize) {
184 | this.msgSize = msgSize;
185 | }
186 |
187 | public byte getAttributes() {
188 | return attributes;
189 | }
190 |
191 | public void setAttributes(byte attributes) {
192 | this.attributes = attributes;
193 | }
194 |
195 | public long getTimestampDelta() {
196 | return timestampDelta;
197 | }
198 |
199 | public void setTimestampDelta(long timestampDelta) {
200 | this.timestampDelta = timestampDelta;
201 | }
202 |
203 | public int getOffsetDelta() {
204 | return offsetDelta;
205 | }
206 |
207 | public void setOffsetDelta(int offsetDelta) {
208 | this.offsetDelta = offsetDelta;
209 | }
210 |
211 | public byte[] getKey() {
212 | return key;
213 | }
214 |
215 | public void setKey(byte[] key) {
216 | this.key = key;
217 | }
218 |
219 | public byte[] getValues() {
220 | return values;
221 | }
222 |
223 | public void setValues(byte[] values) {
224 | this.values = values;
225 | }
226 |
227 | public Map getHeaders() {
228 | return headers;
229 | }
230 |
231 | public void setHeaders(Map headers) {
232 | this.headers = headers;
233 | }
234 | }
235 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/partition/PartitionMateData.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.partition;
2 |
3 | import io.netty.buffer.ByteBuf;
4 |
5 | import java.io.Serializable;
6 | import java.util.ArrayList;
7 | import java.util.List;
8 |
9 | /**
10 | * @author laihaohua
11 | */
12 | public class PartitionMateData implements Serializable {
13 | private Short errorCode;
14 | private Integer partitionId;
15 | private Integer leader;
16 | private List replicas;
17 | private List isr;
18 | /**
19 | * v2版本才有offline_replicas
20 | */
21 | private List offlineReplicas;
22 |
23 | public Short getErrorCode() {
24 | return errorCode;
25 | }
26 |
27 | public void setErrorCode(Short errorCode) {
28 | this.errorCode = errorCode;
29 | }
30 |
31 | public Integer getPartitionId() {
32 | return partitionId;
33 | }
34 |
35 | public void setPartitionId(Integer partitionId) {
36 | this.partitionId = partitionId;
37 | }
38 |
39 | public Integer getLeader() {
40 | return leader;
41 | }
42 |
43 | public void setLeader(Integer leader) {
44 | this.leader = leader;
45 | }
46 |
47 | public List getReplicas() {
48 | return replicas;
49 | }
50 |
51 | public void setReplicas(List replicas) {
52 | this.replicas = replicas;
53 | }
54 |
55 | public List getIsr() {
56 | return isr;
57 | }
58 |
59 | public void setIsr(List isr) {
60 | this.isr = isr;
61 | }
62 |
63 | public List getOfflineReplicas() {
64 | return offlineReplicas;
65 | }
66 |
67 | public void setOfflineReplicas(List offlineReplicas) {
68 | this.offlineReplicas = offlineReplicas;
69 | }
70 |
71 | public void deserialize(ByteBuf byteBuf) {
72 | this.errorCode = byteBuf.readShort();
73 | this.partitionId = byteBuf.readInt();
74 | this.leader = byteBuf.readInt();
75 | int replicasCount = byteBuf.readInt();
76 | if(replicasCount >= 0){
77 | replicas = new ArrayList<>(replicasCount);
78 | for(int i=0; i< replicasCount; i++){
79 | replicas.add(byteBuf.readInt());
80 | }
81 | }
82 | int isrCount = byteBuf.readInt();
83 | if(isrCount >= 0){
84 | isr = new ArrayList<>(isrCount);
85 | for(int i=0; i< isrCount; i++){
86 | isr.add(byteBuf.readInt());
87 | }
88 | }
89 | }
90 | }
91 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/produce/PartitionData.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.produce;
2 |
3 | import io.netty.buffer.ByteBuf;
4 |
5 | import java.io.Serializable;
6 |
7 | public class PartitionData implements Serializable {
8 | private Record recordSset;
9 | public void serializable(ByteBuf out){
10 | recordSset.serializable(out);
11 | }
12 |
13 | public Record getRecordSset() {
14 | return recordSset;
15 | }
16 |
17 | public void setRecordSset(Record recordSset) {
18 | this.recordSset = recordSset;
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/produce/PartitionResponse.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.produce;
2 |
3 | import io.netty.buffer.ByteBuf;
4 |
5 | import java.io.Serializable;
6 |
7 | public class PartitionResponse implements Serializable {
8 | private Integer partitionId;
9 | private Short errorCode;
10 | private Long baseOffset;
11 | private Long logAppendTime;
12 | private Long logStartOffset;
13 | public void deserialize(ByteBuf byteBuf) {
14 | this.partitionId = byteBuf.readInt();
15 | this.errorCode = byteBuf.readShort();
16 | this.baseOffset = byteBuf.readLong();
17 | this.logAppendTime = byteBuf.readLong();
18 | this.logStartOffset = byteBuf.readLong();
19 | }
20 |
21 | public Integer getPartitionId() {
22 | return partitionId;
23 | }
24 |
25 | public void setPartitionId(Integer partitionId) {
26 | this.partitionId = partitionId;
27 | }
28 |
29 | public Short getErrorCode() {
30 | return errorCode;
31 | }
32 |
33 | public void setErrorCode(Short errorCode) {
34 | this.errorCode = errorCode;
35 | }
36 |
37 | public Long getBaseOffset() {
38 | return baseOffset;
39 | }
40 |
41 | public void setBaseOffset(Long baseOffset) {
42 | this.baseOffset = baseOffset;
43 | }
44 |
45 | public Long getLogAppendTime() {
46 | return logAppendTime;
47 | }
48 |
49 | public void setLogAppendTime(Long logAppendTime) {
50 | this.logAppendTime = logAppendTime;
51 | }
52 |
53 | public Long getLogStartOffset() {
54 | return logStartOffset;
55 | }
56 |
57 | public void setLogStartOffset(Long logStartOffset) {
58 | this.logStartOffset = logStartOffset;
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/produce/ProduceRequest.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.produce;
2 |
3 | import com.north.netty.kafka.bean.KafkaRequest;
4 | import com.north.netty.kafka.bean.KafkaRequestHeader;
5 | import com.north.netty.kafka.enums.ApiKeys;
6 | import io.netty.buffer.ByteBuf;
7 | import org.north.netty.common.utils.SerializeUtils;
8 |
9 | import java.util.Arrays;
10 | import java.util.List;
11 |
12 | public class ProduceRequest implements KafkaRequest {
13 | private String transactionalId;
14 | private Short acks;
15 | private Integer timeOut;
16 | private List topicData;
17 | private KafkaRequestHeader requestHeader;
18 | public ProduceRequest(String clientId, Integer correlationId) {
19 | super();
20 | this.requestHeader = new KafkaRequestHeader();
21 | this.requestHeader.setClientId(clientId);
22 | this.requestHeader.setCorrelationId(correlationId);
23 | this.requestHeader.setApiKey(ApiKeys.PRODUCE.id);
24 | this.requestHeader.setApiVersion(ApiKeys.PRODUCE.apiVersion);
25 |
26 | }
27 |
28 | @Override
29 | public void serializable(ByteBuf out) {
30 | requestHeader.serializable(out);
31 | SerializeUtils.writeStringToBuffer2(transactionalId, out);
32 | out.writeShort(acks);
33 | out.writeInt(timeOut);
34 | if(topicData == null){
35 | out.writeInt(-1);
36 | }else {
37 | out.writeInt(topicData.size());
38 | for(TopicProduceData topicProduceData: topicData){
39 | topicProduceData.serializable(out);
40 |
41 | }
42 | }
43 |
44 | }
45 |
46 | public String getTransactionalId() {
47 | return transactionalId;
48 | }
49 |
50 | public void setTransactionalId(String transactionalId) {
51 | this.transactionalId = transactionalId;
52 | }
53 |
54 | public Short getAcks() {
55 | return acks;
56 | }
57 |
58 | public void setAcks(Short acks) {
59 | this.acks = acks;
60 | }
61 |
62 | public Integer getTimeOut() {
63 | return timeOut;
64 | }
65 |
66 | public void setTimeOut(Integer timeOut) {
67 | this.timeOut = timeOut;
68 | }
69 |
70 | public List getTopicData() {
71 | return topicData;
72 | }
73 |
74 | public void setTopicData(List topicData) {
75 | this.topicData = topicData;
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/produce/ProduceResponse.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.produce;
2 |
3 | import com.north.netty.kafka.bean.AbstractKafkaResponse;
4 | import com.north.netty.kafka.bean.KafkaResponse;
5 | import io.netty.buffer.ByteBuf;
6 |
7 | import java.io.Serializable;
8 | import java.util.ArrayList;
9 | import java.util.List;
10 |
11 | public class ProduceResponse extends AbstractKafkaResponse implements Serializable, KafkaResponse {
12 | private List topicProduceResList;
13 | private Integer throttleTimeMs;
14 |
15 | @Override
16 | public void deserialize(ByteBuf byteBuf) {
17 | int topicCount = byteBuf.readInt();
18 | if(topicCount >= 0){
19 | topicProduceResList = new ArrayList<>(topicCount);
20 | for(int i=0; i < topicCount; i++){
21 | TopicProduceRes topicProduceRes = new TopicProduceRes();
22 | topicProduceRes.deserialize(byteBuf);
23 | topicProduceResList.add(topicProduceRes);
24 | }
25 | }
26 | this.throttleTimeMs = byteBuf.readInt();
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/produce/Record.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.produce;
2 |
3 | import com.google.gson.Gson;
4 | import com.north.netty.kafka.bean.msg.KafkaMsgRecordBatch;
5 | import io.netty.buffer.ByteBuf;
6 |
7 | import java.io.Serializable;
8 | import java.util.ArrayList;
9 | import java.util.List;
10 |
11 | public class Record implements Serializable {
12 | private Integer partition;
13 | private List kafkaMsgRecordBatchList;
14 |
15 | public void serializable(ByteBuf out){
16 | out.writeInt(partition);
17 | if(kafkaMsgRecordBatchList != null){
18 | for(KafkaMsgRecordBatch kafkaMsgRecordBatch : kafkaMsgRecordBatchList){
19 | if(kafkaMsgRecordBatch == null){
20 | out.writeInt(-1);
21 | }else{
22 | out.writeInt(kafkaMsgRecordBatch.getTotalSize());
23 | kafkaMsgRecordBatch.serializable(out);
24 | }
25 | }
26 | }
27 |
28 |
29 | }
30 | public void deserialize(ByteBuf in){
31 | int totalSize = in.readInt();
32 | kafkaMsgRecordBatchList = new ArrayList<>();
33 | while(in.readerIndex() != in.writerIndex()){
34 | KafkaMsgRecordBatch kafkaMsgRecordBatch = new KafkaMsgRecordBatch();
35 | kafkaMsgRecordBatch.deserialize(in);
36 | kafkaMsgRecordBatchList.add(kafkaMsgRecordBatch);
37 | }
38 | }
39 |
40 | public Integer getPartition() {
41 | return partition;
42 | }
43 |
44 | public void setPartition(Integer partition) {
45 | this.partition = partition;
46 | }
47 |
48 | public List getKafkaMsgRecordBatchList() {
49 | return kafkaMsgRecordBatchList;
50 | }
51 |
52 | public void setKafkaMsgRecordBatchList(List kafkaMsgRecordBatchList) {
53 | this.kafkaMsgRecordBatchList = kafkaMsgRecordBatchList;
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/produce/TopicProduceData.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.produce;
2 |
3 | import io.netty.buffer.ByteBuf;
4 | import org.north.netty.common.utils.SerializeUtils;
5 |
6 | import java.io.Serializable;
7 | import java.util.List;
8 |
9 | public class TopicProduceData implements Serializable {
10 | private String topic;
11 | private List data;
12 | public void serializable(ByteBuf out){
13 | SerializeUtils.writeStringToBuffer2(topic, out);
14 | if(data == null){
15 | out.writeInt(-1);
16 | }else{
17 | out.writeInt(data.size());
18 | for(PartitionData partitionData : data){
19 | partitionData.serializable(out);
20 | }
21 | }
22 | }
23 |
24 | public String getTopic() {
25 | return topic;
26 | }
27 |
28 | public void setTopic(String topic) {
29 | this.topic = topic;
30 | }
31 |
32 | public List getData() {
33 | return data;
34 | }
35 |
36 | public void setData(List data) {
37 | this.data = data;
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/produce/TopicProduceRes.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.produce;
2 |
3 | import io.netty.buffer.ByteBuf;
4 | import org.north.netty.common.utils.SerializeUtils;
5 |
6 | import java.io.Serializable;
7 | import java.util.ArrayList;
8 | import java.util.List;
9 |
10 | public class TopicProduceRes implements Serializable {
11 | private String topic;
12 | private List partitionResponseList;
13 | public void deserialize(ByteBuf byteBuf) {
14 | this.topic = SerializeUtils.readStringToBuffer2(byteBuf);
15 | int partitionCount = byteBuf.readInt();
16 | if(partitionCount >= 0){
17 | partitionResponseList = new ArrayList<>();
18 | for(int i = 0; i < partitionCount; i++){
19 | PartitionResponse partitionResponse = new PartitionResponse();
20 | partitionResponse.deserialize(byteBuf);
21 | partitionResponseList.add(partitionResponse);
22 | }
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/bean/topic/TopicMetaData.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.bean.topic;
2 |
3 | import com.north.netty.kafka.bean.partition.PartitionMateData;
4 | import io.netty.buffer.ByteBuf;
5 | import org.north.netty.common.utils.SerializeUtils;
6 |
7 | import java.io.Serializable;
8 | import java.util.ArrayList;
9 | import java.util.List;
10 |
11 | /**
12 | * @author laihaohua
13 | */
14 | public class TopicMetaData implements Serializable {
15 | private Short errorCode;
16 | private String topicName;
17 | /**
18 | * 是否是内部的topic
19 | */
20 | private Boolean isInternal;
21 |
22 | private List partitionMateDataList;
23 |
24 |
25 | public Short getErrorCode() {
26 | return errorCode;
27 | }
28 |
29 | public void setErrorCode(Short errorCode) {
30 | this.errorCode = errorCode;
31 | }
32 |
33 | public String getTopicName() {
34 | return topicName;
35 | }
36 |
37 | public void setTopicName(String topicName) {
38 | this.topicName = topicName;
39 | }
40 |
41 | public Boolean getInternal() {
42 | return isInternal;
43 | }
44 |
45 | public void setInternal(Boolean internal) {
46 | isInternal = internal;
47 | }
48 |
49 | public List getPartitionMateDataList() {
50 | return partitionMateDataList;
51 | }
52 |
53 | public void setPartitionMateDataList(List partitionMateDataList) {
54 | this.partitionMateDataList = partitionMateDataList;
55 | }
56 |
57 | public void deserialize(ByteBuf byteBuf) {
58 | this.errorCode = byteBuf.readShort();
59 | this.topicName = SerializeUtils.readStringToBuffer2(byteBuf);
60 | this.isInternal = byteBuf.readBoolean();
61 | int count = byteBuf.readInt();
62 | if(count >= 0){
63 | partitionMateDataList = new ArrayList<>(count);
64 | for(int i=0; i < count; i++){
65 | PartitionMateData partitionMateData = new PartitionMateData();
66 | partitionMateData.deserialize(byteBuf);
67 | partitionMateDataList.add(partitionMateData);
68 | }
69 | }
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/caches/RequestCacheCenter.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.caches;
2 |
3 | import com.google.common.cache.Cache;
4 | import com.google.common.cache.CacheBuilder;
5 | import com.north.netty.kafka.bean.AbstractKafkaResponse;
6 |
7 | import java.util.concurrent.TimeUnit;
8 |
9 | /**
10 | * @author laihaohua
11 | */
12 | public final class RequestCacheCenter {
13 | private Cache cache = CacheBuilder.newBuilder()
14 | .concurrencyLevel(Runtime.getRuntime().availableProcessors())
15 | .expireAfterAccess(5, TimeUnit.MINUTES)
16 | .build();
17 |
18 | public void putKafkaResponse(Integer correlationId, AbstractKafkaResponse kafkaResponse){
19 | cache.put(correlationId, kafkaResponse);
20 | }
21 |
22 | public AbstractKafkaResponse getKafkaResponse(Integer correlationId){
23 | return cache.getIfPresent(correlationId);
24 | }
25 |
26 | public AbstractKafkaResponse waitForResp(Integer correlationId, long timeoutMs){
27 | long bt = System.currentTimeMillis();
28 | AbstractKafkaResponse response = null;
29 | while(response == null || response.getCorrelationId() == null){
30 | long et = System.currentTimeMillis();
31 | if(et > bt + timeoutMs){
32 | return null;
33 | }
34 | response = cache.getIfPresent(correlationId);
35 | }
36 | return response;
37 | }
38 |
39 |
40 | }
41 |
--------------------------------------------------------------------------------
/awesome-netty-kafka/src/main/java/com/north/netty/kafka/codec/KafkaResponseDecoder.java:
--------------------------------------------------------------------------------
1 | package com.north.netty.kafka.codec;
2 |
3 | import com.north.netty.kafka.bean.AbstractKafkaResponse;
4 | import com.north.netty.kafka.bean.KafkaResponseHeader;
5 | import com.north.netty.kafka.caches.RequestCacheCenter;
6 | import io.netty.buffer.ByteBuf;
7 | import io.netty.channel.ChannelHandlerContext;
8 | import io.netty.handler.codec.ByteToMessageDecoder;
9 |
10 | import java.util.List;
11 |
12 | public class KafkaResponseDecoder extends ByteToMessageDecoder {
13 | private RequestCacheCenter requestCacheCenter;
14 | public KafkaResponseDecoder(RequestCacheCenter requestCacheCenter){
15 | this.requestCacheCenter = requestCacheCenter;
16 | }
17 | @Override
18 | protected void decode(ChannelHandlerContext ctx, ByteBuf in, List