├── .gitignore
├── README.MD
├── pom.xml
└── src
├── main
├── java
│ └── simplerpc
│ │ ├── AutoBatchMode.java
│ │ ├── AutoBatchWriteHandler.java
│ │ ├── Commands.java
│ │ ├── HandShakeHandler.java
│ │ ├── IndexThreadFactory.java
│ │ ├── NettyConnectManageHandler.java
│ │ ├── NettyServer.java
│ │ ├── NettyServerConfig.java
│ │ ├── NettyTcpClient.java
│ │ ├── NettyTcpClientConfig.java
│ │ ├── NettyTcpClientRequest.java
│ │ ├── RequestHandler.java
│ │ ├── RequestProcessor.java
│ │ └── benchmark
│ │ ├── BenchBase.java
│ │ ├── ClientStarter.java
│ │ ├── RmqClient.java
│ │ ├── RmqServer.java
│ │ └── ServerStarter.java
└── resources
│ └── logback.xml
└── test
└── java
└── simplerpc
├── BIOBenchmark.java
├── NettyClientBenchmark.java
└── RmqBenchmark.java
/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | *.iml
3 | .idea
4 | .DS_Store
--------------------------------------------------------------------------------
/README.MD:
--------------------------------------------------------------------------------
1 | 本项目不是生产环境可用!
2 |
3 | ## 介绍
4 |
5 | 本项目是一个点对点rpc的demo,尽可能的进行性能优化,然后和rocketmq remoting进行性能对比。希望通过这个简单模型的分析,得出rocketmq remoting的性能改进方向。
6 |
7 | 本项目相对于rocketmq remoting有3点改进。
8 |
9 | 1. 尽可能减少对象创建、数组复制、线程切换。可以看到,在批量关闭的情况下,大部分场景对比rocketmq remoting都有少量提升。
10 | 2. 网络IO批量写,包括客户端和服务端都有批量。服务端目前有AUTO、ENABLE、DISABLE 3种模式。异步场景下TPS提升了十多倍。
11 | 3. 优化了异步背压能力。关于异步背压见下面专门的段落说明。
12 |
13 | rocketmq批量操作可以在3个层面进行:业务用户层面、消息处理层面、网络层面。这3种方式各有优劣。让用户来处理(业务用户层面)效果是最好的,但是使用复杂度就上升了,有些业务场景下甚至办不到。
14 |
15 | 本项目在网络IO层面进行自动批量处理,具体的做法类似TCP里面的Nagle算法,数据包来了以后,先不要写IO,等1ms,看看有没有后续的包可以一起批量写出去。这样做有时候提升了吞吐,但缺点也和Nagle类似:在某些场景下会大幅降低性能。
16 | 对于这个项目,具体是单线程(或少量线程)同步发送的场景。试想,如果客户端是单线程同步调用的,要拿到结果才会进行下一次调用,在服务器端白白多等了1ms(但是又等不到任何后续的数据),所以理论上tps就不会超过1000了。
17 | 于是这个批量的功能是不能默认打开的,让用户来设置是否打开这个功能也比较难,大部分用户很难说清楚是否应该打开,为了保险起见就只能关闭它,那就没有意义了。
18 |
19 | 面对这样的问题,本项目的改进方案是提供一个AUTO模式,程序按TCP连接统计近期的数据,根据一个算法来推测这个连接打开批量是否划算,在运行过程中会不断调整。
20 | 最终的效果可以看下面的测试结果表格,可以看到,AUTO模式的吞吐量在单线程同步发送时,和DISABLE模式接近,而在异步发送时,和ENABLE模式接近。
21 |
22 |
23 | ## 测试方式和结果
24 |
25 | 运行NettyClientBenchmark和RmqBenchmark的main方法可以得到测试结果。测试在Mac上运行,测试时间均为10秒。
26 | Simple RPC客户端自动批量阈值为200(所以同步发送的时候,线程数是否大于200,程序的逻辑会有差异)。
27 |
28 | 客户端128线程
29 |
30 | | rpc框架 | 调用方式 | 参数 | 服务端批量模式 | 结果(TPS) |
31 | | --- | --- | --- | --- | ---: |
32 | | RMQ Remoting | 同步调用 | 默认 | | 101,374 |
33 | | RMQ Remoting | 异步调用 | 默认 | | 101,997 |
34 | | Simple RPC | 同步调用 | 默认 | 自动 | 118,032 |
35 | | Simple RPC | 同步调用 | 默认 | 开启 | 121,644 |
36 | | Simple RPC | 同步调用 | 默认 | 关闭 | 103,662 |
37 | | Simple RPC | 异步调用 | 默认 | 自动 | 1,132,824 |
38 | | Simple RPC | 异步调用 | 默认 | 开启 | 1,176,219 |
39 | | Simple RPC | 异步调用 | 默认 | 关闭 | 107,453 |
40 | | Simple RPC | 异步调用 | 使用IO线程处理请求 | 开启 | 1,343,342 |
41 |
42 | 客户端256线程
43 |
44 | | rpc框架 | 调用方式 | 参数 | 服务端批量模式 | 结果(TPS) |
45 | | --- | --- | --- | --- | ---: |
46 | | RMQ Remoting | 同步调用 | 默认 | | 108,762 |
47 | | RMQ Remoting | 异步调用 | 默认 | | 96,250 |
48 | | Simple RPC | 同步调用 | 默认 | 自动 | 241,368 |
49 | | Simple RPC | 同步调用 | 默认 | 开启 | 242,345 |
50 | | Simple RPC | 同步调用 | 默认 | 关闭 | 97,612 |
51 | | Simple RPC | 异步调用 | 默认 | 自动 | 1,188,924 |
52 | | Simple RPC | 异步调用 | 默认 | 开启 | 1,171,117 |
53 | | Simple RPC | 异步调用 | 默认 | 关闭 | 103,415 |
54 |
55 | 客户端32线程
56 |
57 | | rpc框架 | 调用方式 | 参数 | 服务端批量模式 | 结果(TPS) |
58 | | --- | --- | --- | --- | ---: |
59 | | RMQ Remoting | 同步调用 | 默认 | | 84,892 |
60 | | RMQ Remoting | 异步调用 | 默认 | | 104,381 |
61 | | Simple RPC | 同步调用 | 默认 | 自动 | 92,168 |
62 | | Simple RPC | 同步调用 | 默认 | 开启 | **22,147** |
63 | | Simple RPC | 同步调用 | 默认 | 关闭 | 94,399 |
64 | | Simple RPC | 异步调用 | 默认 | 自动 | 1,155,895 |
65 | | Simple RPC | 异步调用 | 默认 | 开启 | 1,179,657 |
66 | | Simple RPC | 异步调用 | 默认 | 关闭 | 108,141 |
67 |
68 | 客户端1线程
69 |
70 | | rpc框架 | 调用方式 | 参数 | 服务端批量模式 | 结果(TPS) |
71 | | --- | --- | --- | --- | ---: |
72 | | RMQ Remoting | 同步调用 | 默认 | | 11,192 |
73 | | RMQ Remoting | 异步调用 | 默认 | | 114,825 |
74 | | Simple RPC | 同步调用 | 默认 | 自动 | 13,240 |
75 | | Simple RPC | 同步调用 | 默认 | 开启 | **668** |
76 | | Simple RPC | 同步调用 | 默认 | 关闭 | 13,520 |
77 | | Simple RPC | 异步调用 | 默认 | 自动 | **1,246,949** |
78 | | Simple RPC | 异步调用 | 默认 | 开启 | **1,320,533** |
79 | | Simple RPC | 异步调用 | 默认 | 关闭 | 111,999 |
80 | | Simple RPC | 异步调用 | 使用IO线程处理请求 | 开启 | **1,528,428** |
81 |
82 | 以上测试为了方便,是在Mac本机运行的。
83 | 也在Linux上进行了测试(使用ServerStarter和ClientStarter),客户端和服务端分别位于两台不同的服务器,不调整任何参数的情况下tps是80万,调整一下参数可以上百万。
84 |
85 | ## 关于异步背压
86 | 当rpc客户端(消息生产方)的异步调用速率大于响应速率(同时受服务器处理速率和网络速率的影响)的时候,客户端降低调用速率,使得tps自动适应最大响应速率,而不至于失败,这就是异步背压。
87 |
88 | 一个典型的例子就是TCP的流量控制,它会协商窗口的大小,控制客户端的发送速率,不至于淹没服务端,对上面的应用而言,TCP协议提供了可靠的服务。
89 | 但TCP的客户端和服务端是一对一的,消息服务器的客户端和服务器端是多对多的关系,这使得窗口协商非常困难。
90 |
91 | 我们无法通过简单的方式提供一个绝对可靠的异步背压,但是可以通过一些手段和配置,使得大部分情况下大tps异步调用不至于失败。
92 | 可以用本项目做一个实验,在服务端处理RPC请求的时候sleep1毫秒,在这种情况下,rocketmq remoting异步调用会大量失败,而simple rpc则可以全部成功。
93 |
94 | Simple RPC具有基本异步背压能力,核心要点是:
95 | 1. 通过semaphore来控制pending(已经发出但是没有收到响应)的请求数量,如果无法从semaphore获取到permit,即使是异步调用也应该堵塞,等待permit的释放。
96 | 2. 客户端buffer(semaphore)要小于服务端buffer。
97 | 3. 客户端异步发送超时时间要足够长(要大于服务端处理客户端所有pending数据的时间)。
98 |
99 | 要点2举例:如果客户端允许发送出去1000个请求,客户端在1ms内发出1000个请求,从1001个请求开始,由于无法获取到semaphore而堵塞等待;
100 | 而服务端的处理请求的Executor有10个线程(假设处理需要5ms),队列100,最多hold住110个请求,那么客户端发送过来的另外890个请求都无法进入Executor而快速失败,
101 | 快速失败的响应到达客户端以后会释放890个semaphore的permit,堵塞的异步发送程序(第1001个请求)开始得到permit继续发送,然后继续失败。最后的结果是,大部分的请求都是失败的。
102 |
103 | 要点3举例:如果客户端semaphore为1000,服务端处理能力为500tps,那么超时时间应该至少大于2秒。因为异步操作是几乎不需要时间的,这1000个请求马上会送出去,然后异步发送的程序由于获取不到permit陷入堵塞。
104 | 按服务器的处理能力,最后一个发送的请求要在2秒以后才能返回,所以超时时间至少要有2秒。
105 |
106 |
107 | 具体到rocketmq,一个异步发送的程序可能因为以下原因失败:
108 | 1. DefaultMQProduerImpl.send方法提交异步发送任务进线程池,会被拒绝(默认队列50000)。
109 | 2. 获取semaphoreAsync超时(默认65535个),此处获取semaphore有等待,用的是tryAcquire(long timeout, TimeUnit unit)
110 | 3. 服务端待处理消息队列满(默认队列10000)
111 | 4. 客户端超时(默认3000ms)
112 | 5. 服务端超时(受brokerFastFailureEnable、osPageCacheBusyTimeOutMills控制,默认1000ms)
113 | 6. 其它跨线程处理时,由于线程池处理队列已满,而被拒绝。比如netty不同的event loop之间(视具体配置,我没有细看)
114 |
115 | 有以下问题:
116 |
117 | 1. 上面原因1的Executor不利于背压堵塞;
118 | 2. 上面原因1的50000小于2的65535,结果是semaphore还有permit,但是1的Executor已经放不进去了;
119 | 3. 客户端buffer明显大于服务端;
120 | 4. 服务端超时1000ms小于客户端3000ms,客户端还愿意等,但服务端就清理掉了,不过这个osPageCacheBusy一般不容易触发,所以不常见;
121 |
122 |
123 | ## TODO
124 | 1. simple rpc需要更好的AUTO算法,更好的适应各种工况
125 | 2. 当前的AutoBatchWriteHandler不能很好的向write操作的future的listener发出通知。
126 | 3. simple rpc服务端和客户端最好具备简单的窗口机制。
127 | 4. rocketmq测试的过程中多次出现"createChannel: connect remote host[127.0.0.1:8888] success, AbstractBootstrap$PendingRegistrationPromise@4f40e8cb(success)",说明rocketmq在这里有一点并发问题
128 | 5. 自动批量对于同步调用提升不那么大(除非线程很多),而rocketmq异步调用在大tps场景异步调用容易出错,需要做适当的改造以具备基本背压能力。
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | huangli
8 | simple-rpc
9 | 1.0-SNAPSHOT
10 |
11 |
12 | 8
13 | 8
14 | 4.9.2
15 |
16 |
17 |
18 |
19 | io.netty
20 | netty-all
21 | 4.1.65.Final
22 |
23 |
24 | commons-cli
25 | commons-cli
26 | 1.4
27 |
28 |
29 | org.slf4j
30 | slf4j-api
31 | 1.7.30
32 |
33 |
34 | ch.qos.logback
35 | logback-classic
36 | 1.2.3
37 |
38 |
39 | org.junit.jupiter
40 | junit-jupiter-engine
41 | 5.7.1
42 | test
43 |
44 |
45 |
46 | org.apache.rocketmq
47 | rocketmq-remoting
48 | ${rmqVersion}
49 |
50 |
51 |
52 |
53 |
54 | client
55 |
56 | simplerpc.benchmark.ClientStarter
57 |
58 |
59 |
60 | server
61 |
62 | simplerpc.benchmark.ServerStarter
63 |
64 |
65 |
66 |
67 | rmqClient
68 |
69 | simplerpc.benchmark.RmqClient
70 |
71 |
72 |
73 | rmqServer
74 |
75 | simplerpc.benchmark.RmqServer
76 |
77 |
78 |
79 |
80 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/AutoBatchMode.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.util.concurrent.ThreadLocalRandom;
4 |
5 | /**
6 | * @author huangli
7 | */
8 | @SuppressWarnings("checkstyle:MagicNumber")
9 | public class AutoBatchMode {
10 |
11 | public static final int MODE_DISABLE = 0;
12 | public static final int MODE_ENABLE = 1;
13 | public static final int MODE_AUTO = 2;
14 |
15 | private static final float MAX_FACTOR = 1.5f;
16 | private static final float UPDATE_MIN = -0.5f;
17 | private static final float UPDATE_MAX = 0.2f;
18 |
19 | // 1秒
20 | private static final long RECOVER_INTERVAL = 1000 * 1000 * 1000;
21 | private static final float RECOVER_FACTOR = 0.1f;
22 |
23 | private final int mode;
24 | private final int maxBatchCount;
25 | private final int maxBufferSize;
26 | private final long timeWindowsNanos;
27 |
28 | private float factor = 1.0f;
29 |
30 | private boolean batchStarted;
31 | private boolean fullBatch;
32 | private int pendingCount;
33 | private int pendingSize;
34 |
35 | private long lastFlushTime = 0;
36 | private long lastDownTime = 0;
37 |
38 | public AutoBatchMode(int mode, int maxBatchCount, int maxBufferSize, long timeWindowsNanos) {
39 | this.mode = mode;
40 | this.maxBatchCount = maxBatchCount;
41 | this.maxBufferSize = maxBufferSize;
42 | this.timeWindowsNanos = timeWindowsNanos;
43 | }
44 |
45 | public boolean isBatchStarted() {
46 | return batchStarted;
47 | }
48 |
49 | public int getPendingSize() {
50 | return pendingSize;
51 | }
52 |
53 | public boolean startBatchIfNecessary() {
54 | if (batchStarted) {
55 | return false;
56 | }
57 | batchStarted = startBatchIfNecessary0();
58 | return batchStarted;
59 | }
60 |
61 | private boolean startBatchIfNecessary0() {
62 | if (mode == MODE_DISABLE) {
63 | return false;
64 | }
65 | if (mode == MODE_ENABLE) {
66 | return true;
67 | }
68 |
69 | long time = System.nanoTime();
70 | if (time - lastFlushTime > timeWindowsNanos) {
71 | // 第一次总是直接flush,后续是否flush,先从当前时间往前看一个时间窗口,看这段时候是否flush过。
72 | // 如果过去一段时间没有flush过,说明tps不大,batch很可能不会命中,命中了收益也很低,反而会拉长RT
73 | return false;
74 | }
75 | if (factor >= 1.0f) {
76 | return true;
77 | }
78 | if (factor == 0.0f && (time - lastDownTime) > RECOVER_INTERVAL) {
79 | // 每隔一段时间给一次复活的机会
80 | factor = RECOVER_FACTOR;
81 | }
82 | return ThreadLocalRandom.current().nextFloat() < factor;
83 | }
84 |
85 | public void addCount() {
86 | pendingCount++;
87 | }
88 |
89 | public void addSize(int size) {
90 | pendingSize += size;
91 | }
92 |
93 | public boolean shouldFlush() {
94 | boolean b = pendingCount >= maxBatchCount || pendingSize >= maxBufferSize;
95 | if (b) {
96 | fullBatch = true;
97 | }
98 | return b;
99 | }
100 |
101 | public void finish(boolean finishWindow) {
102 | long time = System.nanoTime();
103 | if (finishWindow && mode == MODE_AUTO) {
104 | if (fullBatch) {
105 | factor += UPDATE_MAX;
106 | } else {
107 | float batchRate = Math.max((float) pendingCount / maxBatchCount, (float) pendingSize / maxBufferSize);
108 | float delta = (UPDATE_MAX - UPDATE_MIN) * batchRate + UPDATE_MIN;
109 | factor = factor + delta;
110 | factor = Math.min(MAX_FACTOR, factor);
111 | if (factor <= 0) {
112 | factor = 0.0f;
113 | lastDownTime = time;
114 | }
115 | }
116 | }
117 |
118 | lastFlushTime = time;
119 | pendingCount = 0;
120 | pendingSize = 0;
121 |
122 | if (finishWindow) {
123 | fullBatch = false;
124 | batchStarted = false;
125 | }
126 | }
127 | }
128 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/AutoBatchWriteHandler.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.util.LinkedList;
4 | import java.util.concurrent.TimeUnit;
5 |
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import io.netty.buffer.ByteBuf;
10 | import io.netty.channel.ChannelDuplexHandler;
11 | import io.netty.channel.ChannelHandler;
12 | import io.netty.channel.ChannelHandlerContext;
13 | import io.netty.channel.ChannelPromise;
14 | import io.netty.util.AttributeKey;
15 |
16 | /**
17 | * @author huangli
18 | */
19 | @ChannelHandler.Sharable
20 | @SuppressWarnings("checkstyle:MagicNumber")
21 | public class AutoBatchWriteHandler extends ChannelDuplexHandler {
22 | private static final Logger logger = LoggerFactory.getLogger(AutoBatchWriteHandler.class);
23 |
24 | private static final AttributeKey STATUS = AttributeKey.valueOf("AutoBatchStatus");
25 |
26 | private final int autoBatchMode;
27 | private final int maxBufferSize;
28 | private final int maxBatchCount;
29 | private final long batchTimeWindowNanos;
30 |
31 | @SuppressWarnings("checkstyle:VisibilityModifier")
32 | private static class AutoBatchStatus {
33 | LinkedList msgs = new LinkedList<>();
34 | final AutoBatchMode mode;
35 |
36 | private long totalBytes;
37 | private long totalRequestFlushCount;
38 | private long totalActualFlushCount;
39 |
40 | public AutoBatchStatus(AutoBatchMode mode) {
41 | this.mode = mode;
42 | }
43 | }
44 |
45 | public AutoBatchWriteHandler(int autoBatchMode, int maxBatchCount, int maxBufferSize, long batchTimeWindowNanos) {
46 | this.autoBatchMode = autoBatchMode;
47 | this.maxBatchCount = maxBatchCount;
48 | this.maxBufferSize = maxBufferSize;
49 | this.batchTimeWindowNanos = batchTimeWindowNanos;
50 | }
51 |
52 | @Override
53 | public void channelActive(ChannelHandlerContext ctx) throws Exception {
54 | super.channelActive(ctx);
55 | AutoBatchMode mode = new AutoBatchMode(autoBatchMode, maxBatchCount, maxBufferSize, batchTimeWindowNanos);
56 | ctx.channel().attr(STATUS).set(new AutoBatchStatus(mode));
57 | }
58 |
59 | @Override
60 | public void channelInactive(ChannelHandlerContext ctx) throws Exception {
61 | AutoBatchStatus status = ctx.channel().attr(STATUS).get();
62 | super.channelInactive(ctx);
63 | logger.debug("[server] channelInactive avgBatchCount={}, avgBatchSize={}",
64 | (float) status.totalRequestFlushCount / status.totalActualFlushCount,
65 | (float) status.totalBytes / status.totalActualFlushCount);
66 | }
67 |
68 | @Override
69 | public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
70 | if (msg instanceof ByteBuf) {
71 | AutoBatchStatus status = ctx.channel().attr(STATUS).get();
72 | ByteBuf buffer = (ByteBuf) msg;
73 | status.totalBytes += buffer.readableBytes();
74 | if (status.mode.isBatchStarted()) {
75 | status.msgs.add(buffer);
76 | status.mode.addSize(buffer.readableBytes());
77 | // if (status.mode.shouldFlush()) {
78 | // doFlush(ctx, status);
79 | // }
80 | } else {
81 | super.write(ctx, msg, promise);
82 | }
83 | } else {
84 | super.write(ctx, msg, promise);
85 | }
86 | }
87 |
88 | @Override
89 | public void flush(ChannelHandlerContext ctx) throws Exception {
90 | AutoBatchStatus status = ctx.channel().attr(STATUS).get();
91 | status.totalRequestFlushCount++;
92 | if (status.mode.isBatchStarted()) {
93 | status.mode.addCount();
94 | if (status.mode.shouldFlush()) {
95 | doFlush(ctx, status, false);
96 | }
97 | } else if (status.mode.startBatchIfNecessary()) {
98 | status.mode.addCount();
99 | ctx.executor().schedule(() -> {
100 | doFlush(ctx, status, true);
101 | }, batchTimeWindowNanos, TimeUnit.NANOSECONDS);
102 | } else {
103 | status.totalActualFlushCount++;
104 | super.flush(ctx);
105 | status.mode.finish(false);
106 | }
107 | }
108 |
109 | private void doFlush(ChannelHandlerContext ctx, AutoBatchStatus status, boolean finishWindow) {
110 | LinkedList queue = status.msgs;
111 | status.totalActualFlushCount++;
112 | if (queue.size() > 0) {
113 | int bytes = status.mode.getPendingSize();
114 | ByteBuf buffer = ctx.alloc().directBuffer(bytes);
115 | for (ByteBuf buf : queue) {
116 | buffer.writeBytes(buf);
117 | buf.release();
118 | }
119 | queue.clear();
120 | ctx.writeAndFlush(buffer);
121 | } else {
122 | ctx.flush();
123 | }
124 | status.mode.finish(finishWindow);
125 | }
126 | }
127 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/Commands.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | /**
4 | * 本类定义协议常量。
5 | *
6 | * Frame结构
7 | * -----------------------------------------------------------------------------
8 | * | frame长度4字节 | frame类型1字节 | 指令代码2字节 | 请求序号4字节 | Body |
9 | * -----------------------------------------------------------------------------
10 | *
11 | * Frame长度的4个字节不包含自己;frame类型为请求和响应;由TYPE_REQ/TYPE_RESP定义;指令代码业务应该使用1000以上的,以下系统保留。
12 | *
13 | * frame类型目前用了最低的2bit,上面6个bit保留,高位的6个bit如果不支持,应该忽略。
14 | *
15 | * 请求Body结构自定义,响应Body结构为
16 | * ----------------------------
17 | * | 响应码2字节 | 响应内容 |
18 | * ----------------------------
19 | *
20 | * 如果响应码为成功,后面是自定义响应内容,如果为失败,后面是一个错误消息字符串。字符串使用4个字节标明长度,UTF8编码。
21 | *
22 | * @author huangli
23 | */
24 | public interface Commands {
25 | byte[] HAND_SHAKE_BYTES = new byte[] {
26 | -64, 2, -98, 0, -89, -8, 40, 111, 80, -61, 78, 59, 26, -104, -38, 29, 104, -100, 66, -78, -94, 3, 40, -33,
27 | -101, 52, 90, 60, 109, 53, 77, -12
28 | };
29 |
30 | short COMMAND_PING = 1;
31 | short COMMAND_CLOSE = 2;
32 |
33 | short CODE_SUCCESS = 1;
34 | short CODE_FAIL = 2;
35 |
36 | // 1个字节表示请求还是响应,2个字节表示指令类型,4个字节表示请求响应的序号
37 | int HEAD_LENGTH = 7;
38 |
39 | // 这个地方以后改成0和1比较好,省一个bit
40 | byte TYPE_REQ = 1;
41 | byte TYPE_RESP = 2;
42 |
43 | int TYPE_MASK = 3;
44 | }
45 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/HandShakeHandler.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.util.Arrays;
4 |
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import io.netty.buffer.ByteBuf;
9 | import io.netty.channel.ChannelHandlerContext;
10 | import io.netty.channel.ChannelInboundHandlerAdapter;
11 |
12 | /**
13 | * @author huangli
14 | */
15 | public class HandShakeHandler extends ChannelInboundHandlerAdapter {
16 | private static final Logger logger = LoggerFactory.getLogger(HandShakeHandler.class);
17 | private final byte[] handShakeBytes;
18 | private final boolean writeBack;
19 |
20 | private byte[] readBuffer;
21 | private int writeIndex;
22 |
23 | public HandShakeHandler(byte[] handShakeBytes, boolean writeBack) {
24 | this.handShakeBytes = handShakeBytes;
25 | this.readBuffer = new byte[handShakeBytes.length];
26 | this.writeBack = writeBack;
27 | }
28 |
29 | @Override
30 | public void channelRead(ChannelHandlerContext ctx, Object obj) {
31 | ByteBuf msg = (ByteBuf) obj;
32 | boolean needRelease = true;
33 | try {
34 | int readLen = readBuffer.length - writeIndex;
35 | readLen = Math.min(readLen, msg.readableBytes());
36 | msg.readBytes(readBuffer, writeIndex, readLen);
37 | writeIndex += readLen;
38 | if (writeIndex < readBuffer.length) {
39 | // 没读满,需要继续读
40 | return;
41 | }
42 | if (!Arrays.equals(readBuffer, handShakeBytes)) {
43 | logger.error("handshake mismatch: {}", ctx.channel().remoteAddress());
44 | ctx.close();
45 | } else {
46 | ctx.pipeline().remove(this);
47 | if (writeBack) {
48 | ByteBuf buf = ctx.alloc().buffer();
49 | buf.writeBytes(handShakeBytes);
50 | ctx.writeAndFlush(buf);
51 | }
52 | readBuffer = null;
53 | logger.info("[{}] handshake success: {}", writeBack ? "server" : "client", ctx.channel());
54 | if (msg.readableBytes() > 0) {
55 | needRelease = false;
56 | // 还需要继续用,所以不能释放,交给后面的链路释放
57 | ctx.fireChannelRead(msg);
58 | }
59 | }
60 | } finally {
61 | if (needRelease) {
62 | msg.release();
63 | }
64 | }
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/IndexThreadFactory.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.util.concurrent.ThreadFactory;
4 | import java.util.concurrent.atomic.AtomicInteger;
5 |
6 | /**
7 | * @author huangli
8 | */
9 | class IndexThreadFactory implements ThreadFactory {
10 |
11 | private final AtomicInteger threadIndex = new AtomicInteger(0);
12 | private final String name;
13 |
14 | public IndexThreadFactory(String name) {
15 | this.name = name;
16 | }
17 |
18 | @Override
19 | public Thread newThread(Runnable r) {
20 | return new Thread(r, name + "_" + this.threadIndex.incrementAndGet());
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/NettyConnectManageHandler.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.net.SocketAddress;
4 | import java.util.function.Consumer;
5 |
6 | import io.netty.channel.Channel;
7 | import io.netty.channel.ChannelDuplexHandler;
8 | import io.netty.channel.ChannelHandlerContext;
9 | import io.netty.channel.ChannelPromise;
10 | import io.netty.handler.timeout.IdleStateEvent;
11 | import io.netty.util.internal.logging.InternalLogger;
12 | import io.netty.util.internal.logging.InternalLoggerFactory;
13 |
14 | /**
15 | * @author huangli
16 | */
17 | public class NettyConnectManageHandler extends ChannelDuplexHandler {
18 |
19 | private static final InternalLogger logger =
20 | InternalLoggerFactory.getInstance(NettyConnectManageHandler.class.getName());
21 |
22 | private final String prefix;
23 | private final Consumer closeAction;
24 |
25 | public NettyConnectManageHandler(boolean server, Consumer closeAction) {
26 | this.prefix = server ? "[server]" : "[client]";
27 | this.closeAction = closeAction;
28 | }
29 |
30 | @Override
31 | public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, SocketAddress localAddress,
32 | ChannelPromise promise) throws Exception {
33 | super.connect(ctx, remoteAddress, localAddress, promise);
34 | logger.info("{} connected. remote={}", prefix, remoteAddress);
35 | }
36 |
37 | @Override
38 | public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
39 | super.disconnect(ctx, promise);
40 | if (logger.isInfoEnabled()) {
41 | SocketAddress s = ctx.channel() == null ? null : ctx.channel().remoteAddress();
42 | logger.info("{} disconnected. remote={}", prefix, s);
43 | }
44 | }
45 |
46 | @Override
47 | public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
48 | super.close(ctx, promise);
49 | if (logger.isInfoEnabled()) {
50 | SocketAddress s = ctx.channel() == null ? null : ctx.channel().remoteAddress();
51 | logger.info("{} closed. remote={}", prefix, s);
52 | }
53 | }
54 |
55 | @Override
56 | public void channelActive(ChannelHandlerContext ctx) throws Exception {
57 | super.channelActive(ctx);
58 | if (logger.isInfoEnabled()) {
59 | SocketAddress s = ctx.channel() == null ? null : ctx.channel().remoteAddress();
60 | logger.info("{} channelActive. remote={}", prefix, s);
61 | }
62 | }
63 |
64 | @Override
65 | public void channelInactive(ChannelHandlerContext ctx) throws Exception {
66 | super.channelInactive(ctx);
67 | if (logger.isInfoEnabled()) {
68 | SocketAddress s = ctx.channel() == null ? null : ctx.channel().remoteAddress();
69 | logger.info("{} channelInactive. remote={}", prefix, s);
70 | }
71 | }
72 |
73 | @Override
74 | public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
75 | if (evt instanceof IdleStateEvent) {
76 | IdleStateEvent event = (IdleStateEvent) evt;
77 | SocketAddress s = ctx.channel() == null ? null : ctx.channel().remoteAddress();
78 | logger.warn("{} detect {}. remote={}", prefix, event.state(), s);
79 | closeAction.accept(ctx.channel());
80 | }
81 | super.userEventTriggered(ctx, evt);
82 | }
83 |
84 | @Override
85 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
86 | SocketAddress s = ctx.channel() == null ? null : ctx.channel().remoteAddress();
87 | logger.warn(prefix + " exceptionCaught. remote=" + s, cause);
88 | closeAction.accept(ctx.channel());
89 | }
90 | }
91 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/NettyServer.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.util.concurrent.TimeUnit;
4 |
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import io.netty.bootstrap.ServerBootstrap;
9 | import io.netty.buffer.PooledByteBufAllocator;
10 | import io.netty.channel.Channel;
11 | import io.netty.channel.ChannelFutureListener;
12 | import io.netty.channel.ChannelHandler;
13 | import io.netty.channel.ChannelHandlerContext;
14 | import io.netty.channel.ChannelInitializer;
15 | import io.netty.channel.ChannelOption;
16 | import io.netty.channel.ChannelOutboundHandlerAdapter;
17 | import io.netty.channel.ChannelPromise;
18 | import io.netty.channel.EventLoopGroup;
19 | import io.netty.channel.epoll.EpollEventLoopGroup;
20 | import io.netty.channel.epoll.EpollServerSocketChannel;
21 | import io.netty.channel.nio.NioEventLoopGroup;
22 | import io.netty.channel.socket.SocketChannel;
23 | import io.netty.channel.socket.nio.NioServerSocketChannel;
24 | import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
25 | import io.netty.handler.timeout.IdleStateHandler;
26 | import io.netty.util.concurrent.DefaultEventExecutorGroup;
27 |
28 | /**
29 | * @author huangli
30 | */
31 | public class NettyServer {
32 |
33 | private static final Logger logger = LoggerFactory.getLogger(NettyServer.class);
34 | private final NettyServerConfig config;
35 |
36 | private EventLoopGroup eventLoopGroupBoss;
37 | private EventLoopGroup eventLoopGroupSelector;
38 | private DefaultEventExecutorGroup bizExecutorGroup;
39 |
40 | // 这个注入
41 | private final RequestHandler requestHandler = new RequestHandler();
42 |
43 | private final WriteExHandler writeExHandler = new WriteExHandler();
44 | private final AutoBatchWriteHandler autoBatchWriteHandler;
45 |
46 | public NettyServer(NettyServerConfig config) {
47 | this.config = config;
48 | if (config.getAutoBatchMode() != AutoBatchMode.MODE_DISABLE) {
49 | autoBatchWriteHandler = new AutoBatchWriteHandler(config.getAutoBatchMode(), config.getMaxBatchCount(),
50 | config.getMaxBufferSize(), config.getBatchTimeWindowsNanos());
51 | } else {
52 | autoBatchWriteHandler = null;
53 | }
54 | }
55 |
56 | public void start() throws Exception {
57 | ServerBootstrap serverBootstrap = new ServerBootstrap();
58 | if (config.isEpoll()) {
59 | this.eventLoopGroupBoss = new EpollEventLoopGroup(1, new IndexThreadFactory("ServerBoss"));
60 | this.eventLoopGroupSelector =
61 | new EpollEventLoopGroup(config.getIoThreads(), new IndexThreadFactory("ServerSelector"));
62 | } else {
63 | this.eventLoopGroupBoss = new NioEventLoopGroup(1, new IndexThreadFactory("ServerBoss"));
64 | this.eventLoopGroupSelector =
65 | new NioEventLoopGroup(config.getIoThreads(), new IndexThreadFactory("ServerSelector"));
66 | }
67 |
68 | if (config.getBizThreads() > 0) {
69 | this.bizExecutorGroup = new DefaultEventExecutorGroup(config.getBizThreads(), new IndexThreadFactory("ServerBiz"));
70 | }
71 |
72 | serverBootstrap.group(this.eventLoopGroupBoss, this.eventLoopGroupSelector)
73 | .channel(config.isEpoll() ? EpollServerSocketChannel.class : NioServerSocketChannel.class)
74 | .option(ChannelOption.SO_REUSEADDR, true)
75 | .childOption(ChannelOption.SO_KEEPALIVE, false)
76 | .childOption(ChannelOption.TCP_NODELAY, true)
77 | .childOption(ChannelOption.SO_SNDBUF, 65535)
78 | .childOption(ChannelOption.SO_RCVBUF, 65535)
79 | .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
80 | .localAddress(config.getPort())
81 | .childHandler(new ChannelInitializer() {
82 | @Override
83 | public void initChannel(SocketChannel ch) {
84 | if (config.getMaxIdleSeconds() > 0) {
85 | ch.pipeline().addLast(new IdleStateHandler(config.getMaxIdleSeconds(), 0, 0));
86 | }
87 | ch.pipeline().addLast(new HandShakeHandler(config.getHandShakeBytes(), true));
88 | ch.pipeline().addLast(writeExHandler);
89 | ch.pipeline().addLast(new LengthFieldBasedFrameDecoder(2 * 1024 * 1024, 0, 4, 0, 4));
90 |
91 | if (bizExecutorGroup != null) {
92 | if (autoBatchWriteHandler != null) {
93 | ch.pipeline().addLast(bizExecutorGroup, autoBatchWriteHandler);
94 | }
95 | ch.pipeline().addLast(bizExecutorGroup, requestHandler);
96 | } else {
97 | if (autoBatchWriteHandler != null) {
98 | ch.pipeline().addLast(autoBatchWriteHandler);
99 | }
100 | ch.pipeline().addLast(requestHandler);
101 | }
102 | ch.pipeline().addLast(new NettyConnectManageHandler(true, channel -> closeChannel(channel)));
103 | }
104 | });
105 |
106 | serverBootstrap.bind().sync();
107 | }
108 |
109 | private void closeChannel(Channel channel) {
110 | if (channel != null && channel.isActive()) {
111 | logger.info("closing channel {}", channel);
112 | channel.close();
113 | }
114 | }
115 |
116 | public void shutdown() {
117 | try {
118 | this.eventLoopGroupBoss.shutdownGracefully();
119 | this.eventLoopGroupBoss.awaitTermination(1000, TimeUnit.MILLISECONDS);
120 | this.eventLoopGroupSelector.shutdownGracefully();
121 | this.eventLoopGroupSelector.awaitTermination(1000, TimeUnit.MILLISECONDS);
122 | if (bizExecutorGroup != null) {
123 | this.bizExecutorGroup.shutdownGracefully();
124 | this.bizExecutorGroup.awaitTermination(1000, TimeUnit.MILLISECONDS);
125 | }
126 | } catch (Exception e) {
127 | logger.error("NettyRemotingServer shutdown exception, ", e);
128 | }
129 | }
130 |
131 | @ChannelHandler.Sharable
132 | private static class WriteExHandler extends ChannelOutboundHandlerAdapter {
133 | @Override
134 | public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
135 | ctx.write(msg, promise.addListener((ChannelFutureListener) future -> {
136 | if (!future.isSuccess()) {
137 | Throwable failureCause = future.cause();
138 | logger.warn("write fail. {}, msg: {}", ctx.channel().remoteAddress(), failureCause.toString());
139 | if (ctx.channel().isActive()) {
140 | logger.warn("close channel:" + ctx.channel());
141 | ctx.close();
142 | }
143 | }
144 | }));
145 | }
146 | }
147 |
148 | public RequestHandler getRequestHandler() {
149 | return requestHandler;
150 | }
151 |
152 | public NettyServerConfig getConfig() {
153 | return config;
154 | }
155 | }
156 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/NettyServerConfig.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | /**
4 | * @author huangli
5 | */
6 | public class NettyServerConfig {
7 | private int port = 12345;
8 |
9 | private byte[] handShakeBytes = Commands.HAND_SHAKE_BYTES;
10 |
11 | private boolean epoll;
12 |
13 | private int autoBatchMode;
14 | private int maxBufferSize = 32 * 1024;
15 | private int maxBatchCount = 100;
16 | private long batchTimeWindowsNanos = 1000 * 1000;
17 |
18 | private int maxIdleSeconds = 120;
19 |
20 | private int ioThreads = 4;
21 | private int bizThreads = 100;
22 |
23 | public int getPort() {
24 | return port;
25 | }
26 |
27 | public void setPort(int port) {
28 | this.port = port;
29 | }
30 |
31 | public int getAutoBatchMode() {
32 | return autoBatchMode;
33 | }
34 |
35 | public void setAutoBatchMode(int autoBatchMode) {
36 | this.autoBatchMode = autoBatchMode;
37 | }
38 |
39 | public int getMaxBufferSize() {
40 | return maxBufferSize;
41 | }
42 |
43 | public void setMaxBufferSize(int maxBufferSize) {
44 | this.maxBufferSize = maxBufferSize;
45 | }
46 |
47 | public int getMaxBatchCount() {
48 | return maxBatchCount;
49 | }
50 |
51 | public void setMaxBatchCount(int maxBatchCount) {
52 | this.maxBatchCount = maxBatchCount;
53 | }
54 |
55 | public long getBatchTimeWindowsNanos() {
56 | return batchTimeWindowsNanos;
57 | }
58 |
59 | public void setBatchTimeWindowsNanos(long batchTimeWindowsNanos) {
60 | this.batchTimeWindowsNanos = batchTimeWindowsNanos;
61 | }
62 |
63 | public int getMaxIdleSeconds() {
64 | return maxIdleSeconds;
65 | }
66 |
67 | public void setMaxIdleSeconds(int maxIdleSeconds) {
68 | this.maxIdleSeconds = maxIdleSeconds;
69 | }
70 |
71 | public int getIoThreads() {
72 | return ioThreads;
73 | }
74 |
75 | public void setIoThreads(int ioThreads) {
76 | this.ioThreads = ioThreads;
77 | }
78 |
79 | public int getBizThreads() {
80 | return bizThreads;
81 | }
82 |
83 | public void setBizThreads(int bizThreads) {
84 | this.bizThreads = bizThreads;
85 | }
86 |
87 | public byte[] getHandShakeBytes() {
88 | return handShakeBytes;
89 | }
90 |
91 | public void setHandShakeBytes(byte[] handShakeBytes) {
92 | this.handShakeBytes = handShakeBytes;
93 | }
94 |
95 | public boolean isEpoll() {
96 | return epoll;
97 | }
98 |
99 | public void setEpoll(boolean epoll) {
100 | this.epoll = epoll;
101 | }
102 | }
103 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/NettyTcpClient.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.io.IOException;
4 | import java.util.ArrayList;
5 | import java.util.Iterator;
6 | import java.util.List;
7 | import java.util.Map.Entry;
8 | import java.util.concurrent.CompletableFuture;
9 | import java.util.concurrent.ConcurrentHashMap;
10 | import java.util.concurrent.LinkedBlockingQueue;
11 | import java.util.concurrent.Semaphore;
12 | import java.util.concurrent.TimeUnit;
13 | import java.util.function.Supplier;
14 |
15 | import io.netty.bootstrap.Bootstrap;
16 | import io.netty.buffer.ByteBuf;
17 | import io.netty.channel.Channel;
18 | import io.netty.channel.ChannelFuture;
19 | import io.netty.channel.ChannelHandlerContext;
20 | import io.netty.channel.ChannelInboundHandlerAdapter;
21 | import io.netty.channel.ChannelInitializer;
22 | import io.netty.channel.ChannelOption;
23 | import io.netty.channel.ChannelPipeline;
24 | import io.netty.channel.EventLoopGroup;
25 | import io.netty.channel.epoll.EpollEventLoopGroup;
26 | import io.netty.channel.epoll.EpollSocketChannel;
27 | import io.netty.channel.nio.NioEventLoopGroup;
28 | import io.netty.channel.socket.SocketChannel;
29 | import io.netty.channel.socket.nio.NioSocketChannel;
30 | import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
31 | import io.netty.handler.codec.MessageToByteEncoder;
32 | import io.netty.handler.timeout.IdleStateHandler;
33 | import io.netty.util.concurrent.DefaultEventExecutorGroup;
34 | import io.netty.util.concurrent.EventExecutorGroup;
35 | import io.netty.util.concurrent.Future;
36 | import io.netty.util.internal.logging.InternalLogger;
37 | import io.netty.util.internal.logging.InternalLoggerFactory;
38 |
39 | /**
40 | * @author huangli
41 | */
42 | public class NettyTcpClient implements AutoCloseable {
43 |
44 | private static final InternalLogger logger = InternalLoggerFactory.getInstance(NettyTcpClient.class.getName());
45 |
46 | private final Supplier> servers;
47 | private final NettyTcpClientConfig config;
48 | private final Semaphore semaphore;
49 | private int requestId;
50 |
51 | private int serverIndex;
52 |
53 | private volatile DefaultEventExecutorGroup workerLoop;
54 | private volatile EventLoopGroup ioLoop;
55 | private volatile Bootstrap bootstrap;
56 |
57 | private volatile int status = STATUS_INIT;
58 | private static final int STATUS_INIT = 0;
59 | private static final int STATUS_STARTED = 1;
60 | private static final int STATUS_STOPPING = 2;
61 | private static final int STATUS_STOPPED = 4;
62 |
63 |
64 | private volatile ChannelFuture channelFuture;
65 | private final ConcurrentHashMap waitForResponseMap = new ConcurrentHashMap<>();
66 | private final LinkedBlockingQueue waitForWriteQueue = new LinkedBlockingQueue<>();
67 |
68 | private final Thread thread;
69 | private long lastCleanTimeNano = System.nanoTime();
70 |
71 | private final CompletableFuture closeFuture = new CompletableFuture<>();
72 |
73 | public interface Callback {
74 |
75 | void encode(ByteBuf out);
76 |
77 | T decode(ByteBuf in);
78 |
79 | }
80 |
81 | public NettyTcpClient(Supplier> servers, NettyTcpClientConfig config) {
82 | this.servers = servers;
83 | this.config = config;
84 | this.semaphore = new Semaphore(config.getMaxPending());
85 | this.thread = new Thread(this::run);
86 | }
87 |
88 | public CompletableFuture getCloseFuture() {
89 | return closeFuture;
90 | }
91 |
92 | private Channel connect() throws InterruptedException, IOException {
93 | List serversCopy = new ArrayList<>(servers.get());
94 | String[] serverAndPort = serversCopy.get(serverIndex).split(":");
95 | String server = serverAndPort[0];
96 | int port = Integer.parseInt(serverAndPort[1]);
97 | serverIndex++;
98 | if (serverIndex > serversCopy.size()) {
99 | serverIndex = 0;
100 | }
101 | ChannelFuture f = this.bootstrap.connect(server, port);
102 | f.sync();
103 | if (!f.isSuccess()) {
104 | throw new IOException("[client] connect to " + server + ":" + port + " fail: " + f.cause());
105 | }
106 | ByteBuf buf = f.channel().alloc().buffer();
107 | buf.writeBytes(config.getHandShakeBytes());
108 | ChannelFuture handshakeFuture = f.channel()
109 | .writeAndFlush(buf)
110 | .sync();
111 | if (!handshakeFuture.isSuccess()) {
112 | throw new IOException("[client] handshake with " + server + ":" + port + " fail: " + f.cause());
113 | }
114 | Channel channel = f.channel();
115 | this.channelFuture = f;
116 | return channel;
117 | }
118 |
119 | @SuppressWarnings("checkstyle:MagicNumber")
120 | public void start() throws Exception {
121 | this.thread.start();
122 | this.bootstrap = new Bootstrap();
123 | if (config.isEpoll()) {
124 | this.ioLoop = new EpollEventLoopGroup(config.getIoLoopThread(), new IndexThreadFactory("NettyClientIO"));
125 | } else {
126 | this.ioLoop = new NioEventLoopGroup(config.getIoLoopThread(), new IndexThreadFactory("NettyClientIO"));
127 | }
128 | if (config.getWorkLoopThread() > 0) {
129 | this.workerLoop = new DefaultEventExecutorGroup(config.getWorkLoopThread(), new IndexThreadFactory("NettyClientWorker"));
130 | }
131 |
132 |
133 | this.bootstrap.group(this.ioLoop).channel(config.isEpoll() ? EpollSocketChannel.class : NioSocketChannel.class)
134 | .option(ChannelOption.TCP_NODELAY, true)
135 | .option(ChannelOption.SO_KEEPALIVE, false)
136 | .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 3000)
137 | .option(ChannelOption.SO_SNDBUF, 65535)
138 | .option(ChannelOption.SO_RCVBUF, 65535)
139 | .handler(new ChannelInitializer() {
140 | @Override
141 | public void initChannel(SocketChannel ch) {
142 | ChannelPipeline pipeline = ch.pipeline();
143 | EventExecutorGroup group = workerLoop == null ? ioLoop : workerLoop;
144 | if (config.getMaxIdleSeconds() > 0) {
145 | pipeline.addLast(group, new IdleStateHandler(config.getMaxIdleSeconds(), 0, 0));
146 | }
147 | pipeline.addLast(group, new HandShakeHandler(config.getHandShakeBytes(), false));
148 | pipeline.addLast(group, new LengthFieldBasedFrameDecoder(config.getMaxFrameSize(), 0, 4, 0, 4));
149 | pipeline.addLast(group, new NettyTcpClientDecoder());
150 | pipeline.addLast(group, new NettyTcpClientEncoder());
151 | pipeline.addLast(group, new NettyConnectManageHandler(false, channel -> closeChannel(channel)));
152 | }
153 | });
154 | try {
155 | connect();
156 | status = STATUS_STARTED;
157 | } catch (Exception e) {
158 | close();
159 | throw e;
160 | }
161 | }
162 |
163 | private CompletableFuture errorFuture(Throwable e) {
164 | CompletableFuture f = new CompletableFuture<>();
165 | f.completeExceptionally(e);
166 | return f;
167 | }
168 |
169 | public CompletableFuture sendRequest(short command, Callback callback, long timeoutMillis) {
170 | boolean needRelease = false;
171 | try {
172 | if (status >= STATUS_STOPPING) {
173 | return errorFuture(new IOException("closed"));
174 | }
175 | if (status == STATUS_INIT) {
176 | return errorFuture(new IOException("not start"));
177 | }
178 | long deadLine = System.nanoTime() + (timeoutMillis << 20);
179 | // 如果pending请求太多semaphore没有permit了,这个时候就会堵塞直到超时
180 | // 但由于发送方被堵塞,server处理很快的情况下permit会迅速释放,不会导致单次请求超时,这样实现了异步背压
181 | boolean acquire = this.semaphore.tryAcquire(timeoutMillis, TimeUnit.MILLISECONDS);
182 | if (acquire && (deadLine - System.nanoTime() > 1_000_000)) {
183 | CompletableFuture future = new CompletableFuture<>();
184 | NettyTcpClientRequest request = new NettyTcpClientRequest(command, callback,
185 | timeoutMillis, deadLine, future, this);
186 | // 这个队列是无界的,肯定能直接放进去
187 | waitForWriteQueue.add(request);
188 | return future;
189 | } else {
190 | needRelease = acquire;
191 | return errorFuture(new IOException("too many pending requests"));
192 | }
193 | } catch (InterruptedException e) {
194 | return errorFuture(new IOException("InterruptedException"));
195 | } catch (Throwable e) {
196 | return errorFuture(new IOException("submit task error:" + e, e));
197 | } finally {
198 | if (needRelease) {
199 | this.semaphore.release();
200 | }
201 | }
202 | }
203 |
204 | private void setRequestId(NettyTcpClientRequest request) {
205 | while (request != null) {
206 | requestId++;
207 | if (requestId < 0) {
208 | requestId = 1;
209 | }
210 | request.setSeqId(requestId);
211 | request = request.getNext();
212 | }
213 | }
214 |
215 | private static void notifyError(NettyTcpClientRequest request, Throwable ex) {
216 | if (request.getNotified().compareAndSet(false, true)) {
217 | logger.info("error : {}", ex.toString());
218 | request.getClient().semaphore.release();
219 | request.getFuture().completeExceptionally(ex);
220 | }
221 | }
222 |
223 | @SuppressWarnings("unchecked")
224 | private static void notifySuccess(NettyTcpClientRequest request, Object data) {
225 | if (request.getNotified().compareAndSet(false, true)) {
226 | request.getClient().semaphore.release();
227 | request.getFuture().complete(data);
228 | }
229 | }
230 |
231 | private void run() {
232 | final int maxBatchSize = config.getMaxBatchSize();
233 | final int enableBatchPermits = config.getMaxPending() - config.getAutoBatchConcurrencyThreshold();
234 | final long maxBatchPendingNanos = config.getMaxAutoBatchPendingNanos();
235 | long totalRequest = 0;
236 | long totalBatch = 0;
237 | while (status <= STATUS_STOPPING) {
238 | try {
239 | NettyTcpClientRequest request;
240 | if (status < STATUS_STOPPING) {
241 | request = waitForWriteQueue.poll(1, TimeUnit.SECONDS);
242 | } else {
243 | request = waitForWriteQueue.poll();
244 | }
245 |
246 | if (request != null) {
247 | totalRequest++;
248 | NettyTcpClientRequest current = request;
249 | // 如果pending数超过阈值,就合并发送
250 | int restPermits = semaphore.availablePermits();
251 | if (restPermits < enableBatchPermits) {
252 | long restTime = maxBatchPendingNanos;
253 | for (int i = 0; i < maxBatchSize - 1; i++) {
254 | if (restTime < 1) {
255 | break;
256 | }
257 | long start = System.nanoTime();
258 | NettyTcpClientRequest next;
259 | if (status < STATUS_STOPPING) {
260 | next = waitForWriteQueue.poll(restTime, TimeUnit.NANOSECONDS);
261 | } else {
262 | next = waitForWriteQueue.poll();
263 | }
264 | if (next != null) {
265 | totalRequest++;
266 | current.setNext(next);
267 | current = next;
268 | } else {
269 | break;
270 | }
271 | restTime = restTime - (System.nanoTime() - start);
272 | }
273 | }
274 | totalBatch++;
275 | sendRequest0(channelFuture, request);
276 | } else if (status >= STATUS_STOPPING) {
277 | break;
278 | }
279 | cleanExpireRequest();
280 | } catch (InterruptedException e) {
281 | // ignore
282 | }
283 | }
284 | logger.debug("client socket write thread finished. avg batch size is " + 1.0 * totalRequest / totalBatch);
285 | doClose();
286 | }
287 |
288 | private void sendRequest0(ChannelFuture currentChannelFuture, NettyTcpClientRequest request) {
289 | try {
290 | Channel channel;
291 | if (currentChannelFuture == null) {
292 | channel = connect();
293 | } else {
294 | channel = currentChannelFuture.channel();
295 | if (channel == null || !channel.isActive()) {
296 | channel = connect();
297 | }
298 | }
299 | setRequestId(request);
300 |
301 | if (true) {
302 | NettyTcpClientRequest tmp = request;
303 | while (tmp != null) {
304 | waitForResponseMap.put(tmp.getSeqId(), tmp);
305 | tmp = tmp.getNext();
306 | }
307 | ChannelFuture writeResult = channel.writeAndFlush(request);
308 | Channel selectChannel = channel;
309 | writeResult.addListener(future -> processWriteError(future, request, selectChannel));
310 | } else {
311 | // 这个分支测试用的
312 | NettyTcpClientRequest tmp = request;
313 | while (tmp != null) {
314 | waitForResponseMap.put(tmp.getSeqId(), tmp);
315 | if (tmp.getNotified().compareAndSet(false, true)) {
316 | semaphore.release();
317 | tmp.getFuture().complete(null);
318 | }
319 | waitForResponseMap.remove(tmp.getSeqId());
320 | tmp = tmp.getNext();
321 | }
322 | }
323 | } catch (Throwable e) {
324 | NettyTcpClientRequest tmp = request;
325 | while (tmp != null) {
326 | if (tmp.getSeqId() > 0) {
327 | waitForResponseMap.remove(tmp.getSeqId());
328 | }
329 | notifyError(tmp, new IOException(e.toString(), e));
330 | tmp = tmp.getNext();
331 | }
332 | }
333 | }
334 |
335 | private void processWriteError(Future super Void> future, NettyTcpClientRequest request, Channel selectChannel) {
336 | if (!future.isSuccess()) {
337 | while (request != null) {
338 | waitForResponseMap.remove(request.getSeqId());
339 | notifyError(request, new IOException("write fail: ex=" + future.cause()));
340 | request = request.getNext();
341 | }
342 | logger.error("[client] write error: " + future.cause());
343 | closeChannel(selectChannel);
344 | }
345 | }
346 |
347 | private void cleanExpireRequest() {
348 | long currentNano = System.nanoTime();
349 | if (currentNano - lastCleanTimeNano > 1000 * 1000 * 1000) {
350 | doCleanExpireData(currentNano);
351 | lastCleanTimeNano = currentNano;
352 | }
353 | }
354 |
355 | private void doCleanExpireData(long currentNano) {
356 | Iterator> iterator = waitForResponseMap.entrySet().iterator();
357 | while (iterator.hasNext()) {
358 | Entry entry = iterator.next();
359 | NettyTcpClientRequest req = entry.getValue();
360 | if (currentNano > req.getDeadlineNano()) {
361 | notifyError(req, new IOException("timeout: " + req.getTimeout() + "ms"));
362 | iterator.remove();
363 | }
364 | }
365 | }
366 |
367 | private synchronized void closeChannel(Channel oldChannel) {
368 | ChannelFuture cf = this.channelFuture;
369 | if (oldChannel != null && cf != null && oldChannel == cf.channel()) {
370 | channelFuture = null;
371 | if (oldChannel.isActive()) {
372 | logger.info("closing channel {}", oldChannel);
373 | oldChannel.close();
374 | }
375 | }
376 | }
377 |
378 | @Override
379 | public void close() {
380 | if (this.status >= STATUS_STOPPING) {
381 | return;
382 | }
383 | this.status = STATUS_STOPPING;
384 | logger.info("netty tcp client closing: begin shutdown");
385 | this.thread.interrupt();
386 | }
387 |
388 | private void doClose() {
389 | long deadline = System.nanoTime() + config.getCloseTimeoutMillis() * 1000 * 1000;
390 | try {
391 | logger.debug("netty tcp client closing: clean waitForWriteQueue ...");
392 | waitForWriteQueue.forEach(c -> notifyError(c, new IOException("closed")));
393 |
394 | while (!waitForResponseMap.isEmpty()) {
395 | doCleanExpireData(System.nanoTime());
396 | if (System.nanoTime() > deadline) {
397 | break;
398 | }
399 | try {
400 | Thread.sleep(10);
401 | } catch (InterruptedException e) {
402 | //ignore
403 | }
404 | }
405 |
406 | logger.debug("netty tcp client closing: clean waitForResponseMap ...");
407 | waitForResponseMap.values().forEach(c -> notifyError(c, new IOException("closed")));
408 |
409 | logger.debug("netty tcp client closing: shutdown event loop ...");
410 | Future> f1 = this.ioLoop.shutdownGracefully(config.getCloseSilenceMillis(),
411 | config.getCloseTimeoutMillis(), TimeUnit.MILLISECONDS);
412 | Future> f2 = null;
413 | if (workerLoop != null) {
414 | f2 = this.workerLoop.shutdownGracefully(config.getCloseSilenceMillis(),
415 | config.getCloseTimeoutMillis(), TimeUnit.MILLISECONDS);
416 | }
417 |
418 | f1.sync();
419 | if (f2 != null) {
420 | f2.sync();
421 | }
422 |
423 | logger.info("netty tcp client closing: finish shutdown");
424 | closeFuture.complete(null);
425 | } catch (Throwable e) {
426 | logger.warn("netty tcp client close fail: {}", e.toString());
427 | closeFuture.completeExceptionally(e);
428 | }
429 | this.status = STATUS_STOPPED;
430 | }
431 |
432 | private class NettyTcpClientDecoder extends ChannelInboundHandlerAdapter {
433 | @Override
434 | public void channelRead(ChannelHandlerContext ctx, Object msg) {
435 | ByteBuf inBuffer = (ByteBuf) msg;
436 | try {
437 | int type = inBuffer.readByte() & Commands.TYPE_MASK;
438 | inBuffer.readShort(); // the command
439 | int seqId = inBuffer.readInt();
440 | if (type == Commands.TYPE_RESP) {
441 | NettyTcpClientRequest request = waitForResponseMap.remove(seqId);
442 | if (request != null) {
443 | try {
444 | Object result = request.getCallback().decode(inBuffer);
445 | notifySuccess(request, result);
446 | } catch (Throwable e) {
447 | notifyError(request, e);
448 | }
449 | } else {
450 | logger.debug("the request expired: {}", seqId);
451 | }
452 | } else {
453 | // TODO 暂时没有支持server push
454 | }
455 | } finally {
456 | inBuffer.release();
457 | }
458 | }
459 | }
460 |
461 | private static class NettyTcpClientEncoder extends MessageToByteEncoder {
462 |
463 | @Override
464 | protected void encode(ChannelHandlerContext ctx, NettyTcpClientRequest msg, ByteBuf out) {
465 | while (msg != null) {
466 | int startIndex = out.writerIndex();
467 | out.writeInt(0);
468 | int startReadableBytes = out.readableBytes();
469 |
470 | out.writeByte(Commands.TYPE_REQ);
471 | out.writeShort(msg.getCommand());
472 | out.writeInt(msg.getSeqId());
473 | msg.getCallback().encode(out);
474 |
475 | int endIndex = out.writerIndex();
476 | int len = out.readableBytes() - startReadableBytes;
477 | out.writerIndex(startIndex);
478 | out.writeInt(len);
479 | out.writerIndex(endIndex);
480 | msg = msg.getNext();
481 | }
482 | }
483 | }
484 |
485 | }
486 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/NettyTcpClientConfig.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | /**
4 | * @author huangli
5 | */
6 | public class NettyTcpClientConfig {
7 | private boolean epoll;
8 | private int maxPending = 1000;
9 | private int autoBatchConcurrencyThreshold = 200;
10 | private long closeSilenceMillis = 1000;
11 | private long closeTimeoutMillis = 5000;
12 | private byte[] handShakeBytes = Commands.HAND_SHAKE_BYTES;
13 |
14 | private int ioLoopThread = 1;
15 | private int workLoopThread = 1;
16 |
17 | private int maxBatchSize = 100;
18 | private long maxAutoBatchPendingNanos = 1_000_000; // 1毫秒
19 |
20 | private int maxFrameSize = 2 * 1024 * 1024;
21 |
22 | private int maxIdleSeconds = 120;
23 |
24 | public boolean isEpoll() {
25 | return epoll;
26 | }
27 |
28 | public void setEpoll(boolean epoll) {
29 | this.epoll = epoll;
30 | }
31 |
32 | public int getMaxPending() {
33 | return maxPending;
34 | }
35 |
36 | public void setMaxPending(int maxPending) {
37 | this.maxPending = maxPending;
38 | }
39 |
40 | public int getAutoBatchConcurrencyThreshold() {
41 | return autoBatchConcurrencyThreshold;
42 | }
43 |
44 | public void setAutoBatchConcurrencyThreshold(int autoBatchConcurrencyThreshold) {
45 | this.autoBatchConcurrencyThreshold = autoBatchConcurrencyThreshold;
46 | }
47 |
48 | public long getCloseTimeoutMillis() {
49 | return closeTimeoutMillis;
50 | }
51 |
52 | public void setCloseTimeoutMillis(long closeTimeoutMillis) {
53 | this.closeTimeoutMillis = closeTimeoutMillis;
54 | }
55 |
56 | public long getCloseSilenceMillis() {
57 | return closeSilenceMillis;
58 | }
59 |
60 | public void setCloseSilenceMillis(long closeSilenceMillis) {
61 | this.closeSilenceMillis = closeSilenceMillis;
62 | }
63 |
64 | public byte[] getHandShakeBytes() {
65 | return handShakeBytes;
66 | }
67 |
68 | public void setHandShakeBytes(byte[] handShakeBytes) {
69 | this.handShakeBytes = handShakeBytes;
70 | }
71 |
72 | public int getIoLoopThread() {
73 | return ioLoopThread;
74 | }
75 |
76 | public void setIoLoopThread(int ioLoopThread) {
77 | this.ioLoopThread = ioLoopThread;
78 | }
79 |
80 | public int getWorkLoopThread() {
81 | return workLoopThread;
82 | }
83 |
84 | public void setWorkLoopThread(int workLoopThread) {
85 | this.workLoopThread = workLoopThread;
86 | }
87 |
88 | public int getMaxBatchSize() {
89 | return maxBatchSize;
90 | }
91 |
92 | public void setMaxBatchSize(int maxBatchSize) {
93 | this.maxBatchSize = maxBatchSize;
94 | }
95 |
96 | public long getMaxAutoBatchPendingNanos() {
97 | return maxAutoBatchPendingNanos;
98 | }
99 |
100 | public void setMaxAutoBatchPendingNanos(long maxAutoBatchPendingNanos) {
101 | this.maxAutoBatchPendingNanos = maxAutoBatchPendingNanos;
102 | }
103 |
104 | public int getMaxFrameSize() {
105 | return maxFrameSize;
106 | }
107 |
108 | public void setMaxFrameSize(int maxFrameSize) {
109 | this.maxFrameSize = maxFrameSize;
110 | }
111 |
112 | public int getMaxIdleSeconds() {
113 | return maxIdleSeconds;
114 | }
115 |
116 | public void setMaxIdleSeconds(int maxIdleSeconds) {
117 | this.maxIdleSeconds = maxIdleSeconds;
118 | }
119 |
120 | }
121 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/NettyTcpClientRequest.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.util.concurrent.CompletableFuture;
4 | import java.util.concurrent.atomic.AtomicBoolean;
5 |
6 | import simplerpc.NettyTcpClient.Callback;
7 |
8 | /**
9 | * @author huangli
10 | */
11 | @SuppressWarnings("rawtypes")
12 | class NettyTcpClientRequest {
13 | private final AtomicBoolean notified = new AtomicBoolean(false);
14 | private final short command;
15 | private final Callback callback;
16 | private final long timeout;
17 | private final long deadlineNano;
18 | private volatile int seqId;
19 | private final CompletableFuture future;
20 | private final NettyTcpClient client;
21 | private volatile NettyTcpClientRequest next;
22 |
23 | public NettyTcpClientRequest(short command, Callback callback, long timeout, long deadlineNano,
24 | CompletableFuture future, NettyTcpClient client) {
25 | this.command = command;
26 | this.callback = callback;
27 | this.timeout = timeout;
28 | this.deadlineNano = deadlineNano;
29 | this.future = future;
30 | this.client = client;
31 | }
32 |
33 | public AtomicBoolean getNotified() {
34 | return notified;
35 | }
36 |
37 | public Callback getCallback() {
38 | return callback;
39 | }
40 |
41 | public long getDeadlineNano() {
42 | return deadlineNano;
43 | }
44 |
45 | public long getTimeout() {
46 | return timeout;
47 | }
48 |
49 | public int getSeqId() {
50 | return seqId;
51 | }
52 |
53 | public void setSeqId(int seqId) {
54 | this.seqId = seqId;
55 | }
56 |
57 | public short getCommand() {
58 | return command;
59 | }
60 |
61 | public CompletableFuture getFuture() {
62 | return future;
63 | }
64 |
65 | public NettyTcpClient getClient() {
66 | return client;
67 | }
68 |
69 | public NettyTcpClientRequest getNext() {
70 | return next;
71 | }
72 |
73 | public void setNext(NettyTcpClientRequest next) {
74 | this.next = next;
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/RequestHandler.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 |
6 | import io.netty.buffer.ByteBuf;
7 | import io.netty.channel.ChannelHandler;
8 | import io.netty.channel.ChannelHandlerContext;
9 | import io.netty.channel.ChannelInboundHandlerAdapter;
10 |
11 | /**
12 | * @author huangli
13 | */
14 | @ChannelHandler.Sharable
15 | public class RequestHandler extends ChannelInboundHandlerAdapter {
16 | private static final Logger logger = LoggerFactory.getLogger(RequestHandler.class);
17 |
18 | static class PingProcessor implements RequestProcessor {
19 | @Override
20 | public short getCommand() {
21 | return Commands.COMMAND_PING;
22 | }
23 |
24 | @Override
25 | public ByteBuf process(ChannelHandlerContext ctx, ByteBuf msg) {
26 | int len = msg.readableBytes();
27 | ByteBuf buffer = ctx.alloc().buffer(2 + len);
28 | buffer.writeShort(Commands.CODE_SUCCESS);
29 | buffer.writeBytes(msg);
30 | return buffer;
31 | }
32 | }
33 |
34 | private static final RequestProcessor PING_PROCESSOR = new PingProcessor();
35 |
36 | private static final RequestProcessor CLOSE_PROCESSOR = new RequestProcessor() {
37 | @Override
38 | public short getCommand() {
39 | return Commands.COMMAND_CLOSE;
40 | }
41 |
42 | @Override
43 | public ByteBuf process(ChannelHandlerContext ctx, ByteBuf msg) {
44 | ByteBuf buffer = ctx.alloc().buffer(2);
45 | buffer.writeShort(Commands.CODE_SUCCESS);
46 | return buffer;
47 | }
48 | };
49 |
50 | private final RequestProcessor[] requestProcessors = new RequestProcessor[Short.MAX_VALUE];
51 |
52 | public RequestHandler() {
53 | registerProcessor(PING_PROCESSOR);
54 | registerProcessor(CLOSE_PROCESSOR);
55 | }
56 |
57 | @Override
58 | public void channelRead(ChannelHandlerContext ctx, Object obj) {
59 | ByteBuf msg = (ByteBuf) obj;
60 | int type = msg.readByte() & Commands.TYPE_MASK;
61 | short command = msg.readShort();
62 | int seqId = msg.readInt();
63 | ByteBuf resp;
64 | try {
65 | if (type == Commands.TYPE_REQ) {
66 | RequestProcessor c = null;
67 | try {
68 | c = processReq(command);
69 | resp = c.process(ctx, msg);
70 | } catch (Exception e) {
71 | // 统一的指令错误处理,这种情况下不关闭连接
72 | resp = ctx.alloc().buffer();
73 | resp.writeShort(Commands.CODE_FAIL);
74 | // 用一下pingProcessor
75 | PING_PROCESSOR.writeString(resp, e.toString());
76 | }
77 |
78 | // 其它的地方出错由ExHandler处理,关闭连接
79 | ByteBuf header = ctx.alloc().buffer(4 + Commands.HEAD_LENGTH);
80 | header.writeInt(resp.readableBytes() + Commands.HEAD_LENGTH);
81 | header.writeByte(Commands.TYPE_RESP);
82 | header.writeShort(command);
83 | header.writeInt(seqId);
84 |
85 | ctx.write(header);
86 | ctx.writeAndFlush(resp);
87 |
88 | if (c == CLOSE_PROCESSOR) {
89 | logger.info("connection close. {}", ctx.channel());
90 | ctx.close();
91 | }
92 | } else {
93 | // TODO 暂不支持server push
94 | }
95 | } finally {
96 | msg.release();
97 | }
98 | }
99 |
100 | private RequestProcessor processReq(short command) throws Exception {
101 | try {
102 | RequestProcessor c = requestProcessors[command];
103 | if (c == null) {
104 | throw new Exception("unknown command: " + command);
105 | }
106 | return c;
107 | } catch (IndexOutOfBoundsException e) {
108 | throw new Exception("unknown command: " + command);
109 | }
110 | }
111 |
112 | public void registerProcessor(RequestProcessor processor) {
113 | requestProcessors[processor.getCommand()] = processor;
114 | }
115 | }
116 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/RequestProcessor.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.nio.charset.StandardCharsets;
4 | import java.util.Map;
5 | import java.util.Map.Entry;
6 |
7 | import io.netty.buffer.ByteBuf;
8 | import io.netty.channel.ChannelHandlerContext;
9 |
10 | /**
11 | * @author huangli
12 | */
13 | public interface RequestProcessor {
14 |
15 | short getCommand();
16 |
17 | ByteBuf process(ChannelHandlerContext ctx, ByteBuf msg) throws Exception;
18 |
19 | default void writeString(ByteBuf msg, String str) {
20 | if (str == null) {
21 | msg.writeInt(-1);
22 | } else {
23 | msg.markWriterIndex();
24 | msg.writeInt(0);
25 | int len = msg.writeCharSequence(str, StandardCharsets.UTF_8);
26 | int newIndex = msg.writerIndex();
27 | msg.resetWriterIndex();
28 | msg.writeInt(len);
29 | msg.writerIndex(newIndex);
30 | }
31 | }
32 |
33 | default String readString(ByteBuf msg) {
34 | int len = msg.readInt();
35 | if (len == -1) {
36 | return null;
37 | }
38 | byte[] bs = new byte[len];
39 | msg.readBytes(bs);
40 | return new String(bs, StandardCharsets.UTF_8);
41 | }
42 |
43 | default void writeMap(ByteBuf buffer, Map map) {
44 | if (map == null) {
45 | buffer.writeInt(-1);
46 | return;
47 | }
48 | buffer.writeInt(map.size());
49 | for (Entry en : map.entrySet()) {
50 | writeString(buffer, en.getKey());
51 | writeString(buffer, en.getValue());
52 | }
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/benchmark/BenchBase.java:
--------------------------------------------------------------------------------
1 | package simplerpc.benchmark;
2 |
3 | import java.text.DecimalFormat;
4 | import java.util.concurrent.atomic.LongAdder;
5 |
6 | /**
7 | * @author huangli
8 | * Created on 2021-09-14
9 | */
10 | public abstract class BenchBase {
11 |
12 | protected final int threadCount;
13 | private final long testTime;
14 | private final long warmupTime;
15 | private Thread[] threads;
16 | protected volatile boolean stop = false;
17 | protected LongAdder successCount = new LongAdder();
18 | protected LongAdder failCount = new LongAdder();
19 |
20 | public BenchBase(int threadCount, long testTime) {
21 | this(threadCount, testTime, 5000);
22 | }
23 |
24 | public BenchBase(int threadCount, long testTime, long warmupTime) {
25 | this.threadCount = threadCount;
26 | this.testTime = testTime;
27 | this.warmupTime = warmupTime;
28 | }
29 |
30 | public void init() throws Exception {
31 | }
32 |
33 | public void shutdown() throws Exception {
34 | }
35 |
36 | public void start() throws Exception {
37 | init();
38 | threads = new Thread[threadCount];
39 | for (int i = 0; i < threadCount; i++) {
40 | int threadIndex = i;
41 | threads[i] = new Thread(() -> run(threadIndex));
42 | threads[i].start();
43 | }
44 | Thread.sleep(warmupTime);
45 | long warmupCount = successCount.sum();
46 | long warmupFailCount = failCount.sum();
47 | Thread.sleep(testTime);
48 | stop = true;
49 | long sc = successCount.sum() - warmupCount;
50 | long fc = failCount.sum() - warmupFailCount;
51 | for (Thread t : threads) {
52 | t.join();
53 | }
54 | shutdown();
55 |
56 | double ops = sc * 1.0 / testTime * 1000;
57 | System.out.println("success sc:" + sc + ", ops=" + new DecimalFormat(",###").format(ops));
58 |
59 | ops = fc * 1.0 / testTime * 1000;
60 | System.out.println("fail sc:" + fc + ", ops=" + new DecimalFormat(",###").format(ops));
61 | }
62 |
63 | public void run(int threadIndex) {
64 | while (!stop) {
65 | test(threadIndex);
66 | }
67 | }
68 |
69 | public abstract void test(int threadIndex);
70 | }
71 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/benchmark/ClientStarter.java:
--------------------------------------------------------------------------------
1 | package simplerpc.benchmark;
2 |
3 | import java.nio.charset.StandardCharsets;
4 | import java.util.Collections;
5 | import java.util.Random;
6 | import java.util.concurrent.CompletableFuture;
7 |
8 | import org.apache.commons.cli.CommandLine;
9 | import org.apache.commons.cli.DefaultParser;
10 | import org.apache.commons.cli.Options;
11 |
12 | import io.netty.buffer.ByteBuf;
13 | import simplerpc.Commands;
14 | import simplerpc.NettyTcpClient;
15 | import simplerpc.NettyTcpClient.Callback;
16 | import simplerpc.NettyTcpClientConfig;
17 |
18 | /**
19 | * @author huangli
20 | * Created on 2021-10-06
21 | */
22 | public class ClientStarter extends BenchBase {
23 |
24 | private static CommandLine commandLine;
25 | private static boolean sync;
26 |
27 | private static int clientCount = 1;
28 | private NettyTcpClient[] client;
29 | private static byte[] DATA = "hello".getBytes(StandardCharsets.UTF_8);
30 |
31 | public ClientStarter(int threadCount, long time) {
32 | super(threadCount, time);
33 | }
34 |
35 | @Override
36 | public void init() throws Exception {
37 | client = new NettyTcpClient[clientCount];
38 | String host = commandLine.getOptionValue('h', "127.0.0.1");
39 | int port = Integer.parseInt(commandLine.getOptionValue('p', "12345"));
40 | for (int i = 0; i < clientCount; i++) {
41 | NettyTcpClientConfig c = new NettyTcpClientConfig();
42 | if (commandLine.hasOption("autoBatchConcurrencyThreshold")) {
43 | c.setAutoBatchConcurrencyThreshold(Integer.parseInt(commandLine.getOptionValue("autoBatchConcurrencyThreshold")));
44 | }
45 | if (commandLine.hasOption("maxBatchSize")) {
46 | c.setMaxBatchSize(Integer.parseInt(commandLine.getOptionValue("maxBatchSize")));
47 | }
48 | if (commandLine.hasOption("maxPending")) {
49 | c.setMaxPending(Integer.parseInt(commandLine.getOptionValue("maxPending")));
50 | }
51 | if (commandLine.hasOption('e')) {
52 | c.setEpoll(true);
53 | }
54 | client[i] = new NettyTcpClient(() -> Collections.singletonList(host + ":" + port), c);
55 | client[i].start();
56 | }
57 | }
58 |
59 | @Override
60 | public void shutdown() {
61 | for (int i = 0; i < clientCount; i++) {
62 | client[i].close();
63 | }
64 | }
65 |
66 | @Override
67 | public void test(int threadIndex) {
68 | NettyTcpClient c = client[threadIndex % clientCount];
69 |
70 | CompletableFuture fu = c.sendRequest(Commands.COMMAND_PING, new Callback() {
71 | @Override
72 | public void encode(ByteBuf out) {
73 | out.writeBytes(DATA);
74 | }
75 |
76 | @Override
77 | public Void decode(ByteBuf in) {
78 | short code = in.readShort();
79 | if (code != Commands.CODE_SUCCESS) {
80 | throw new RuntimeException();
81 | }
82 | byte[] bs = new byte[DATA.length];
83 | in.readBytes(bs);
84 | return null;
85 | }
86 | }, 10 * 1000);
87 |
88 | if (sync) {
89 | //同步调用
90 | try {
91 | fu.get();
92 | successCount.add(1);
93 | } catch (Exception e) {
94 | failCount.add(1);
95 | }
96 | } else {
97 | // 异步调用
98 | fu.handle((unused, throwable) -> {
99 | if (throwable != null) {
100 | failCount.add(1);
101 | } else {
102 | successCount.add(1);
103 | }
104 | return null;
105 | });
106 | }
107 | }
108 |
109 | public static void main(String[] args) throws Exception {
110 | Options options = new Options();
111 | options.addOption("e", "epoll", false, "use epoll");
112 | options.addOption("h", "host", true, "server ip");
113 | options.addOption("p", "port", true, "port");
114 | options.addOption("d", "duration", true, "test time in millis");
115 | options.addOption("t", "thread", true, "thread count");
116 | options.addOption("s", "sync", false, "sync mode");
117 | options.addOption(null, "autoBatchConcurrencyThreshold", true, "autoBatchConcurrencyThreshold");
118 | options.addOption(null, "maxBatchSize", true, "maxBatchSize");
119 | options.addOption(null, "maxPending", true, "maxPending");
120 | options.addOption("l", "length", true, "message size");
121 | options.addOption("c", "client", true, "client count");
122 |
123 |
124 | DefaultParser parser = new DefaultParser();
125 | commandLine = parser.parse(options, args, true);
126 |
127 | sync = commandLine.hasOption('s');
128 | if (commandLine.hasOption("l")) {
129 | byte[] b = new byte[Integer.parseInt(commandLine.getOptionValue("l"))];
130 | new Random().nextBytes(b);
131 | DATA = b;
132 | }
133 | if (commandLine.hasOption('c')) {
134 | clientCount = Integer.parseInt(commandLine.getOptionValue('c'));
135 | }
136 |
137 | int thread = Integer.parseInt(commandLine.getOptionValue('t', "1"));
138 | long duration = Long.parseLong(commandLine.getOptionValue('d', "10000"));
139 | new ClientStarter(thread, duration).start();
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/benchmark/RmqClient.java:
--------------------------------------------------------------------------------
1 | package simplerpc.benchmark;
2 |
3 | import java.nio.charset.StandardCharsets;
4 | import java.util.Random;
5 |
6 | import org.apache.commons.cli.CommandLine;
7 | import org.apache.commons.cli.DefaultParser;
8 | import org.apache.commons.cli.Options;
9 | import org.apache.rocketmq.remoting.netty.NettyClientConfig;
10 | import org.apache.rocketmq.remoting.netty.NettyRemotingClient;
11 | import org.apache.rocketmq.remoting.protocol.RemotingCommand;
12 |
13 | import simplerpc.Commands;
14 | import simplerpc.benchmark.RmqServer.ReqHeader;
15 |
16 | /**
17 | * @author huangli
18 | * Created on 2021-10-06
19 | */
20 | public class RmqClient extends BenchBase {
21 |
22 | private static CommandLine commandLine;
23 | private static boolean sync;
24 |
25 | private static int clientCount = 1;
26 | private NettyRemotingClient[] client;
27 | private static byte[] DATA = "hello".getBytes(StandardCharsets.UTF_8);
28 |
29 | private final String remoteAddr;
30 |
31 | public RmqClient(int threadCount, long time) {
32 | super(threadCount, time);
33 | String host = commandLine.getOptionValue('h', "127.0.0.1");
34 | int port = Integer.parseInt(commandLine.getOptionValue('p', "12345"));
35 | remoteAddr = host + ":" + port;
36 | }
37 |
38 | @Override
39 | public void init() throws Exception {
40 | client = new NettyRemotingClient[clientCount];
41 | for (int i = 0; i < clientCount; i++) {
42 | NettyClientConfig c = new NettyClientConfig();
43 | client[i] = new NettyRemotingClient(c);
44 | client[i].start();
45 | }
46 | }
47 |
48 | @Override
49 | public void shutdown() {
50 | for (int i = 0; i < clientCount; i++) {
51 | client[i].shutdown();
52 | }
53 | }
54 |
55 | @Override
56 | public void test(int threadIndex) {
57 | NettyRemotingClient c = client[threadIndex % clientCount];
58 |
59 | RemotingCommand req = RemotingCommand.createRequestCommand(Commands.COMMAND_PING, new ReqHeader());
60 | req.setBody(DATA);
61 | try {
62 | if (sync) {
63 | // 同步调用
64 | c.invokeSync(remoteAddr, req, 3000);
65 | successCount.add(1);
66 | } else {
67 | // 异步调用
68 | c.invokeAsync(remoteAddr, req, 3000, responseFuture -> {
69 | if (responseFuture.isSendRequestOK()) {
70 | successCount.add(1);
71 | } else {
72 | failCount.add(1);
73 | }
74 | });
75 | }
76 | } catch (Exception e) {
77 | e.printStackTrace();
78 | failCount.add(1);
79 | }
80 | }
81 |
82 | public static void main(String[] args) throws Exception {
83 | Options options = new Options();
84 | options.addOption("h", "host", true, "server ip");
85 | options.addOption("p", "port", true, "port");
86 | options.addOption("d", "duration", true, "test time in millis");
87 | options.addOption("t", "thread", true, "thread count");
88 | options.addOption("s", "sync", false, "sync mode");
89 | options.addOption("l", "length", true, "message size");
90 | options.addOption("c", "client", true, "client count");
91 |
92 |
93 | DefaultParser parser = new DefaultParser();
94 | commandLine = parser.parse(options, args, true);
95 |
96 | sync = commandLine.hasOption('s');
97 | if (commandLine.hasOption("l")) {
98 | byte[] b = new byte[Integer.parseInt(commandLine.getOptionValue("l"))];
99 | new Random().nextBytes(b);
100 | DATA = b;
101 | }
102 | if (commandLine.hasOption('c')) {
103 | clientCount = Integer.parseInt(commandLine.getOptionValue('c'));
104 | }
105 |
106 | int thread = Integer.parseInt(commandLine.getOptionValue('t', "1"));
107 | long duration = Long.parseLong(commandLine.getOptionValue('d', "10000"));
108 | new RmqClient(thread, duration).start();
109 | }
110 | }
111 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/benchmark/RmqServer.java:
--------------------------------------------------------------------------------
1 | package simplerpc.benchmark;
2 |
3 | import java.util.Random;
4 | import java.util.concurrent.Executors;
5 |
6 | import org.apache.commons.cli.CommandLine;
7 | import org.apache.commons.cli.DefaultParser;
8 | import org.apache.commons.cli.Options;
9 | import org.apache.rocketmq.remoting.CommandCustomHeader;
10 | import org.apache.rocketmq.remoting.exception.RemotingCommandException;
11 | import org.apache.rocketmq.remoting.netty.NettyRemotingServer;
12 | import org.apache.rocketmq.remoting.netty.NettyRequestProcessor;
13 | import org.apache.rocketmq.remoting.netty.NettyServerConfig;
14 | import org.apache.rocketmq.remoting.protocol.RemotingCommand;
15 |
16 | import io.netty.channel.ChannelHandlerContext;
17 | import simplerpc.Commands;
18 |
19 | /**
20 | * @author huangli
21 | * Created on 2021-10-06
22 | */
23 | public class RmqServer {
24 |
25 | public static class ReqHeader implements CommandCustomHeader {
26 | @Override
27 | public void checkFields() throws RemotingCommandException {
28 | }
29 | }
30 |
31 | public static class RespHeader implements CommandCustomHeader {
32 | @Override
33 | public void checkFields() throws RemotingCommandException {
34 | }
35 | }
36 |
37 | public static void main(String[] args) throws Exception {
38 | Options options = new Options();
39 | options.addOption("p", "port", true, "port");
40 | options.addOption("l", "length", true, "message size");
41 |
42 |
43 | DefaultParser parser = new DefaultParser();
44 | CommandLine commandLine = parser.parse(options, args, true);
45 | final byte[] DATA;
46 | if (commandLine.hasOption("l")) {
47 | DATA = new byte[Integer.parseInt(commandLine.getOptionValue("l"))];
48 | new Random().nextBytes(DATA);
49 | } else {
50 | DATA = null;
51 | }
52 |
53 | NettyServerConfig config = new NettyServerConfig();
54 | config.setListenPort(Integer.parseInt(commandLine.getOptionValue("p", "12345")));
55 | NettyRemotingServer server = new NettyRemotingServer(config);
56 | server.registerProcessor(Commands.COMMAND_PING, new NettyRequestProcessor() {
57 | @Override
58 | public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws Exception {
59 | try {
60 | request.decodeCommandCustomHeader(ReqHeader.class);
61 | RemotingCommand resp =
62 | RemotingCommand.createResponseCommand(Commands.COMMAND_PING, null, RespHeader.class);
63 | if (DATA == null) {
64 | resp.setBody(request.getBody());
65 | } else {
66 | resp.setBody(DATA);
67 | }
68 |
69 | // 如果在这里睡1ms,rocketmq remoting异步调用将会失败,因为没有背压能力
70 | // Thread.sleep(1);
71 |
72 | return resp;
73 | } catch (Exception e) {
74 | e.printStackTrace();
75 | return null;
76 | }
77 | }
78 |
79 | @Override
80 | public boolean rejectRequest() {
81 | return false;
82 | }
83 | }, Executors.newFixedThreadPool(4));
84 | server.start();
85 |
86 | Runtime.getRuntime().addShutdownHook(new Thread(() -> {
87 | server.shutdown();
88 | }));
89 | }
90 | }
91 |
--------------------------------------------------------------------------------
/src/main/java/simplerpc/benchmark/ServerStarter.java:
--------------------------------------------------------------------------------
1 | package simplerpc.benchmark;
2 |
3 | import org.apache.commons.cli.CommandLine;
4 | import org.apache.commons.cli.DefaultParser;
5 | import org.apache.commons.cli.Options;
6 |
7 | import simplerpc.AutoBatchMode;
8 | import simplerpc.NettyServer;
9 | import simplerpc.NettyServerConfig;
10 |
11 | /**
12 | * @author huangli
13 | * Created on 2021-10-06
14 | */
15 | public class ServerStarter {
16 | public static void main(String[] args) throws Exception {
17 | Options options = new Options();
18 | options.addOption("e", "epoll", false, "use epoll");
19 | options.addOption("p", "port", true, "port");
20 | options.addOption("m", "mode", true, "auto batch mode");
21 | options.addOption("t", "bizThreads", true, "biz thread count");
22 | options.addOption(null, "maxBufferSize", true, "maxBufferSize");
23 | options.addOption(null, "maxBatchCount", true, "maxBatchCount");
24 |
25 |
26 | DefaultParser parser = new DefaultParser();
27 | CommandLine commandLine = parser.parse(options, args, true);
28 |
29 | NettyServerConfig config = new NettyServerConfig();
30 | config.setPort(Integer.parseInt(commandLine.getOptionValue("p", "12345")));
31 | if (commandLine.hasOption("m")) {
32 | if ("enable".equalsIgnoreCase(commandLine.getOptionValue("m"))) {
33 | config.setAutoBatchMode(AutoBatchMode.MODE_ENABLE);
34 | } else if ("disable".equalsIgnoreCase(commandLine.getOptionValue("m"))) {
35 | config.setAutoBatchMode(AutoBatchMode.MODE_DISABLE);
36 | } else {
37 | config.setAutoBatchMode(AutoBatchMode.MODE_AUTO);
38 | }
39 | } else {
40 | config.setAutoBatchMode(AutoBatchMode.MODE_AUTO);
41 | }
42 | if (commandLine.hasOption('e')) {
43 | config.setEpoll(true);
44 | }
45 | if (commandLine.hasOption("t")) {
46 | config.setBizThreads(Integer.parseInt(commandLine.getOptionValue("t")));
47 | }
48 | if (commandLine.hasOption("maxBufferSize")) {
49 | config.setMaxBufferSize(Integer.parseInt(commandLine.getOptionValue("maxBufferSize")));
50 | }
51 | if (commandLine.hasOption("maxBatchCount")) {
52 | config.setMaxBatchCount(Integer.parseInt(commandLine.getOptionValue("maxBatchCount")));
53 | }
54 | NettyServer server = new NettyServer(config);
55 |
56 | server.start();
57 |
58 | Runtime.getRuntime().addShutdownHook(new Thread(() -> {
59 | server.shutdown();
60 | }));
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
5 |
6 |
7 | TRACE
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/src/test/java/simplerpc/BIOBenchmark.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.io.DataInputStream;
4 | import java.io.DataOutputStream;
5 | import java.io.EOFException;
6 | import java.io.IOException;
7 | import java.net.ServerSocket;
8 | import java.net.Socket;
9 | import java.util.concurrent.ConcurrentHashMap;
10 | import java.util.concurrent.ExecutorService;
11 | import java.util.concurrent.Executors;
12 | import java.util.function.BiConsumer;
13 |
14 | import simplerpc.benchmark.BenchBase;
15 |
16 | /**
17 | * @author huangli
18 | */
19 | public class BIOBenchmark extends BenchBase {
20 |
21 | private ServerSocket serverSocket;
22 | private Socket socketForServer;
23 | private Socket socketForClient;
24 | private Thread serverReadThread;
25 | private Thread clientReadThread;
26 | private DataOutputStream serverOutputStream;
27 | private DataOutputStream clientOutputStream;
28 | private ConcurrentHashMap> requestCallback = new ConcurrentHashMap<>();
29 | private ExecutorService serverExecutor = Executors.newFixedThreadPool(100);
30 | private long requestId;
31 |
32 | public BIOBenchmark(int threadCount, long time) {
33 | super(threadCount, time);
34 | }
35 |
36 | @Override
37 | public void init() throws Exception {
38 | int port = 23456;
39 | serverSocket = new ServerSocket(port);
40 | new Thread(() -> {
41 | try {
42 | socketForServer = serverSocket.accept();
43 |
44 | serverOutputStream = new DataOutputStream(socketForServer.getOutputStream());
45 | serverReadThread = new Thread(this::serverReadLoop);
46 | serverReadThread.start();
47 | } catch (IOException e) {
48 | e.printStackTrace();
49 | }
50 | }).start();
51 | socketForClient = new Socket("127.0.0.1", port);
52 | clientOutputStream = new DataOutputStream(socketForClient.getOutputStream());
53 | clientReadThread = new Thread(this::clientReadLoop);
54 | clientReadThread.start();
55 | }
56 |
57 | @Override
58 | public void shutdown() throws Exception {
59 | System.out.println("shutdown start");
60 | serverExecutor.shutdownNow();
61 | socketForClient.close();
62 | socketForServer.close();
63 | serverSocket.close();
64 | System.out.println("shutdown finish");
65 | }
66 |
67 | private void serverReadLoop() {
68 | try {
69 | DataInputStream dis = new DataInputStream(socketForServer.getInputStream());
70 | while (!stop) {
71 | long header = dis.readLong();
72 | String body = dis.readUTF();
73 | serverExecutor.submit(() -> {
74 | try {
75 | serverWrite(header, body);
76 | } catch (IOException e) {
77 | e.printStackTrace();
78 | }
79 | });
80 | }
81 | } catch (EOFException e) {
82 | System.out.println("[server] input stream closed");
83 | } catch (Exception e) {
84 | e.printStackTrace();
85 | }
86 | System.out.println("server read thread exit");
87 | }
88 |
89 | private void clientReadLoop() {
90 | try {
91 | DataInputStream dis = new DataInputStream(socketForClient.getInputStream());
92 | while (!stop) {
93 | long header = dis.readLong();
94 | String body = dis.readUTF();
95 | BiConsumer callback = requestCallback.remove(header);
96 | if (callback != null) {
97 | callback.accept(header, body);
98 | }
99 | }
100 | } catch (EOFException e) {
101 | System.out.println("[client] input stream closed");
102 | } catch (Exception e) {
103 | e.printStackTrace();
104 | }
105 | System.out.println("client read thread exit");
106 | }
107 |
108 | private void serverWrite(long header, String body) throws IOException {
109 | synchronized (serverOutputStream) {
110 | serverOutputStream.writeLong(header);
111 | serverOutputStream.writeUTF(body);
112 | serverOutputStream.flush();
113 | }
114 | }
115 |
116 | @Override
117 | public void test(int threadIndex) {
118 | try {
119 | synchronized (clientOutputStream) {
120 | requestId++;
121 | requestCallback.put(requestId, (header, body) -> {
122 | if (!stop) {
123 | successCount.add(1);
124 | }
125 | });
126 | clientOutputStream.writeLong(requestId);
127 | clientOutputStream.writeUTF("hello");
128 | clientOutputStream.flush();
129 | }
130 | } catch (Exception e) {
131 | e.printStackTrace();
132 | }
133 | }
134 |
135 | public static void main(String[] args) throws Exception {
136 | new BIOBenchmark(64, 10000).start();
137 | }
138 | }
139 |
--------------------------------------------------------------------------------
/src/test/java/simplerpc/NettyClientBenchmark.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.nio.charset.StandardCharsets;
4 | import java.util.Collections;
5 | import java.util.concurrent.CompletableFuture;
6 |
7 | import org.junit.jupiter.api.Assertions;
8 |
9 | import io.netty.buffer.ByteBuf;
10 | import simplerpc.NettyTcpClient.Callback;
11 | import simplerpc.benchmark.BenchBase;
12 |
13 | /**
14 | * @author huangli
15 | * Created on 2021-09-14
16 | */
17 | public class NettyClientBenchmark extends BenchBase {
18 |
19 | private final int clientCount = 1;
20 | private NettyServer server;
21 | private NettyTcpClient[] client;
22 | private final static byte[] DATA = "hello".getBytes(StandardCharsets.UTF_8);
23 |
24 | public NettyClientBenchmark(int threadCount, int time) {
25 | super(threadCount, time);
26 | }
27 |
28 | @Override
29 | public void init() throws Exception {
30 | NettyServerConfig config = new NettyServerConfig();
31 | config.setPort(12345);
32 | config.setAutoBatchMode(AutoBatchMode.MODE_AUTO);
33 | // config.setBizThreads(0);
34 | server = new NettyServer(config);
35 |
36 | server.start();
37 | client = new NettyTcpClient[clientCount];
38 | for (int i = 0; i < clientCount; i++) {
39 | NettyTcpClientConfig c = new NettyTcpClientConfig();
40 | client[i] = new NettyTcpClient(() -> Collections.singletonList("127.0.0.1:12345"), c);
41 | client[i].start();
42 | }
43 | }
44 |
45 | @Override
46 | public void shutdown() {
47 | for (int i = 0; i < clientCount; i++) {
48 | client[i].close();
49 | }
50 | server.shutdown();
51 | }
52 |
53 | @Override
54 | public void test(int threadIndex) {
55 | for (int i = 0; i < clientCount; i++) {
56 | CompletableFuture fu = client[i].sendRequest(Commands.COMMAND_PING, new Callback() {
57 | @Override
58 | public void encode(ByteBuf out) {
59 | out.writeBytes(DATA);
60 | }
61 |
62 | @Override
63 | public Void decode(ByteBuf in) {
64 | short code = in.readShort();
65 | if (code != Commands.CODE_SUCCESS) {
66 | throw new RuntimeException();
67 | }
68 | byte[] bs = new byte[DATA.length];
69 | in.readBytes(bs);
70 | Assertions.assertArrayEquals(DATA, bs);
71 | return null;
72 | }
73 | }, 3500);
74 |
75 | // 同步调用
76 | // try {
77 | // fu.get();
78 | // successCount.add(1);
79 | // } catch (Exception e) {
80 | // failCount.add(1);
81 | // }
82 |
83 | // 异步调用
84 | fu.handle((unused, throwable) -> {
85 | if (throwable != null) {
86 | failCount.add(1);
87 | } else {
88 | successCount.add(1);
89 | }
90 | return null;
91 | });
92 | }
93 | }
94 |
95 | public static void main(String[] args) throws Exception {
96 | new NettyClientBenchmark(128, 10000).start();
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/src/test/java/simplerpc/RmqBenchmark.java:
--------------------------------------------------------------------------------
1 | package simplerpc;
2 |
3 | import java.nio.charset.StandardCharsets;
4 | import java.util.concurrent.Executors;
5 |
6 | import org.apache.rocketmq.remoting.CommandCustomHeader;
7 | import org.apache.rocketmq.remoting.exception.RemotingCommandException;
8 | import org.apache.rocketmq.remoting.netty.NettyClientConfig;
9 | import org.apache.rocketmq.remoting.netty.NettyRemotingClient;
10 | import org.apache.rocketmq.remoting.netty.NettyRemotingServer;
11 | import org.apache.rocketmq.remoting.netty.NettyRequestProcessor;
12 | import org.apache.rocketmq.remoting.netty.NettyServerConfig;
13 | import org.apache.rocketmq.remoting.protocol.RemotingCommand;
14 |
15 | import io.netty.channel.ChannelHandlerContext;
16 | import simplerpc.benchmark.BenchBase;
17 |
18 | /**
19 | * @author huangli
20 | * Created on 2021-09-14
21 | */
22 | public class RmqBenchmark extends BenchBase {
23 |
24 | private NettyRemotingServer server;
25 | private NettyRemotingClient client;
26 | private static byte[] data = "hello".getBytes(StandardCharsets.UTF_8);
27 |
28 | public RmqBenchmark(int threadCount, int time) {
29 | super(threadCount, time);
30 | }
31 |
32 | public static class ReqHeader implements CommandCustomHeader {
33 | @Override
34 | public void checkFields() throws RemotingCommandException {
35 | }
36 | }
37 |
38 | public static class RespHeader implements CommandCustomHeader {
39 | @Override
40 | public void checkFields() throws RemotingCommandException {
41 | }
42 | }
43 |
44 |
45 | @Override
46 | public void init() throws Exception {
47 | server = new NettyRemotingServer(new NettyServerConfig());
48 | server.registerProcessor(Commands.COMMAND_PING, new NettyRequestProcessor() {
49 | @Override
50 | public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws Exception {
51 | try {
52 | request.decodeCommandCustomHeader(ReqHeader.class);
53 | RemotingCommand resp =
54 | RemotingCommand.createResponseCommand(Commands.COMMAND_PING, null, RespHeader.class);
55 | resp.setBody(request.getBody());
56 |
57 | // 如果在这里睡1ms,rocketmq remoting异步调用将会失败,因为没有背压能力
58 | // Thread.sleep(1);
59 |
60 | return resp;
61 | } catch (Exception e) {
62 | e.printStackTrace();
63 | return null;
64 | }
65 | }
66 |
67 | @Override
68 | public boolean rejectRequest() {
69 | return false;
70 | }
71 | }, Executors.newFixedThreadPool(4));
72 | server.start();
73 |
74 | client = new NettyRemotingClient(new NettyClientConfig());
75 | client.start();
76 | }
77 |
78 | @Override
79 | public void shutdown() throws Exception {
80 | Thread.sleep(1000);
81 | client.shutdown();
82 | server.shutdown();
83 | }
84 |
85 | @Override
86 | public void test(int threadIndex) {
87 | RemotingCommand req = RemotingCommand.createRequestCommand(Commands.COMMAND_PING, new ReqHeader());
88 | req.setBody(data);
89 | try {
90 | // 同步调用
91 | // client.invokeSync("127.0.0.1:8888", req, 3000);
92 | // successCount.add(1);
93 |
94 | // 异步调用
95 | client.invokeAsync("127.0.0.1:8888", req, 3000, responseFuture -> {
96 | if (responseFuture.isSendRequestOK()) {
97 | successCount.add(1);
98 | } else {
99 | failCount.add(1);
100 | }
101 | });
102 | } catch (Exception e) {
103 | e.printStackTrace();
104 | failCount.add(1);
105 | }
106 | }
107 |
108 | public static void main(String[] args) throws Exception {
109 | new RmqBenchmark(128, 10000).start();
110 | }
111 | }
112 |
--------------------------------------------------------------------------------