├── .gitignore
├── Client
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── cn
│ │ │ └── mageek
│ │ │ └── client
│ │ │ ├── Client.java
│ │ │ ├── Connection.java
│ │ │ ├── handler
│ │ │ ├── DataHandler.java
│ │ │ └── WatchHandler.java
│ │ │ └── log
│ │ │ └── keep.txt
│ └── resources
│ │ ├── app.properties
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── cn
│ └── mageek
│ └── client
│ ├── AppleAndOrangeWithnotGenerics.java
│ ├── ConnectionTest.java
│ ├── ZKClientTest.java
│ ├── ZKServerTest.java
│ ├── ZooKeeperClientTest.java
│ └── ZooKeeperServerTest.java
├── Common
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── cn
│ │ │ └── mageek
│ │ │ └── common
│ │ │ ├── command
│ │ │ ├── AbstractClientCommand.java
│ │ │ └── AbstractDataNodeCommand.java
│ │ │ ├── ha
│ │ │ ├── HAThirdParty.java
│ │ │ ├── NameNodeMaster.java
│ │ │ ├── NameNodeWatcher.java
│ │ │ └── ZKThirdParty.java
│ │ │ ├── helper
│ │ │ ├── KillDataNode.java
│ │ │ └── tcpclient.js
│ │ │ ├── log
│ │ │ └── keep.txt
│ │ │ ├── model
│ │ │ ├── DataRequest.java
│ │ │ ├── DataResponse.java
│ │ │ ├── DataType.java
│ │ │ ├── HeartbeatRequest.java
│ │ │ ├── HeartbeatResponse.java
│ │ │ ├── HeartbeatType.java
│ │ │ ├── LineType.java
│ │ │ ├── WatchReqProto.java
│ │ │ ├── WatchRequest.java
│ │ │ ├── WatchRespProto.java
│ │ │ ├── WatchResponse.java
│ │ │ ├── WebMsgObject.java
│ │ │ └── proto
│ │ │ │ ├── WatchReqProto.proto
│ │ │ │ └── WatchRespProto.proto
│ │ │ ├── res
│ │ │ └── Constants.java
│ │ │ └── util
│ │ │ ├── ConsistHash.java
│ │ │ ├── Decoder.java
│ │ │ ├── Encoder.java
│ │ │ ├── HAHelper.java
│ │ │ └── PropertyLoader.java
│ └── resources
│ │ └── log4j.properties
│ └── test
│ └── java
│ └── cn
│ └── mageek
│ └── common
│ ├── model
│ └── WatchProtoTest.java
│ └── util
│ └── ConsistHashTest.java
├── DataNode
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── cn
│ │ └── mageek
│ │ └── datanode
│ │ ├── command
│ │ ├── CommandAPPEND.java
│ │ ├── CommandCOMMAND.java
│ │ ├── CommandDECR.java
│ │ ├── CommandDECRBY.java
│ │ ├── CommandDEL.java
│ │ ├── CommandEXPIRE.java
│ │ ├── CommandGET.java
│ │ ├── CommandINCR.java
│ │ ├── CommandINCRBY.java
│ │ ├── CommandKEYS.java
│ │ ├── CommandSET.java
│ │ └── CommandSETNX.java
│ │ ├── handler
│ │ ├── BusinessHandler.java
│ │ ├── ClientHandler.java
│ │ ├── DataTransferHandler.java
│ │ ├── HeartBeatHandler.java
│ │ ├── OtherHandler.java
│ │ ├── PushMsgHandler.java
│ │ ├── RcvMsgHandler.java
│ │ └── SendMsgHandler.java
│ │ ├── job
│ │ ├── DataRunnable.java
│ │ ├── DataTransfer.java
│ │ ├── ExpireChecking.java
│ │ ├── Heartbeat.java
│ │ └── MSSync.java
│ │ ├── main
│ │ └── DataNode.java
│ │ ├── res
│ │ ├── CommandFactory.java
│ │ ├── ConstPool.java
│ │ └── JobFactory.java
│ │ └── service
│ │ ├── CronJobManager.java
│ │ └── DataManager.java
│ └── resources
│ ├── app.properties
│ └── log4j.properties
├── NameNode
├── pom.xml
└── src
│ └── main
│ ├── java
│ └── cn
│ │ └── mageek
│ │ └── namenode
│ │ ├── handler
│ │ ├── ClientWatcherHandler.java
│ │ └── DataNodeHeartBeatHandler.java
│ │ ├── log
│ │ └── keep.txt
│ │ ├── main
│ │ └── NameNode.java
│ │ ├── res
│ │ ├── CommandFactory.java
│ │ ├── ConstPool.java
│ │ └── CronJobFactory.java
│ │ └── service
│ │ ├── ClientManager.java
│ │ ├── CronJobManager.java
│ │ └── DataNodeManager.java
│ └── resources
│ ├── app.properties
│ └── log4j.properties
├── Readme.md
├── pom.xml
└── src
├── assembly
└── bin.xml
└── main
└── java
└── cn
└── mageek
└── CHKV
└── Main.java
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by .ignore support plugin (hsz.mobi)
2 | ### Java template
3 | # Compiled class file
4 | *.class
5 |
6 | # Log file
7 | *.log
8 |
9 | # BlueJ files
10 | *.ctxt
11 |
12 | # Mobile Tools for Java (J2ME)
13 | .mtj.tmp/
14 |
15 | # Package Files #
16 | *.jar
17 | *.war
18 | *.ear
19 | *.zip
20 | *.tar.gz
21 | *.rar
22 |
23 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
24 | hs_err_pid*
25 |
26 | # IntelliJ project files
27 | .idea
28 | *.iml
29 | out
30 | gen
31 |
32 | /Common/target/
33 | /Client/target/
34 | /DataNode/target/
35 | /NameNode/target/
36 | /target/
37 |
--------------------------------------------------------------------------------
/Client/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | CHKV
7 | cn.mageek
8 | 1.0-SNAPSHOT
9 | ../pom.xml
10 |
11 | 4.0.0
12 | Client
13 | jar
14 |
15 |
16 |
17 |
18 | cn.mageek
19 | Common
20 | 1.0-SNAPSHOT
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 | org.apache.maven.plugins
29 | maven-compiler-plugin
30 | 3.7.0
31 |
32 | 1.8
33 | 1.8
34 |
35 |
36 |
37 | maven-assembly-plugin
38 | 3.1.0
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 | com.spotify
48 | dockerfile-maven-plugin
49 | 1.3.4
50 |
51 | ${docker.image.prefix}/${project.artifactId}
52 |
53 |
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/Client/src/main/java/cn/mageek/client/Client.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.client;
2 |
3 | import cn.mageek.common.model.DataRequest;
4 | import cn.mageek.common.model.DataResponse;
5 |
6 | import java.util.List;
7 |
8 | import static cn.mageek.common.model.LineType.SINGLE_ERROR;
9 |
10 | /**
11 | * @author Mageek Chiu
12 | * @date 2018/5/10 0010:19:17
13 | */
14 | public class Client extends Connection {
15 |
16 | public Client() {
17 |
18 | }
19 |
20 | public Client(String nameNodeIP,String nameNodePort) {
21 | super(nameNodeIP,nameNodePort);
22 | }
23 |
24 | /**
25 | *
26 | * @param key key
27 | * @param value value
28 | * @return 设置是否成功
29 | */
30 | public boolean set(String key, String value){
31 | DataRequest request = new DataRequest("SET",key,value);
32 | DataResponse r = sendCommand(request);// 自动生成ID
33 | return !r.getLineType().equals(SINGLE_ERROR);
34 | }
35 |
36 | public int setnx(String key, String value){
37 | DataRequest request = new DataRequest("SETNX",key,value);
38 | DataResponse r = sendCommand(request);// 自动生成ID
39 | return r.getLineType().equals(SINGLE_ERROR) ? -1 : Integer.parseInt(r.getMsg());
40 | }
41 |
42 | /**
43 | *
44 | * @param key key
45 | * @return 删除键个数
46 | */
47 | public int del(String key){
48 | DataRequest request = new DataRequest("DEL",key,"");
49 | DataResponse r = sendCommand(request);
50 | return r.getLineType().equals(SINGLE_ERROR) ? -1 : Integer.parseInt(r.getMsg());
51 | }
52 |
53 | public String get(String key){
54 | DataRequest request = new DataRequest("GET",key,"");
55 | DataResponse r = sendCommand(request);
56 | return r.getMsg();
57 | }
58 |
59 | public List keys(String key){
60 | DataRequest request = new DataRequest("KEYS",key,"");
61 | DataResponse r = sendCommand(request);
62 | return r.getMsgList();
63 | }
64 |
65 | public int expire(String key,long value){
66 | DataRequest request = new DataRequest("EXPIRE",key,String.valueOf(value));
67 | DataResponse r = sendCommand(request);
68 | return r.getLineType().equals(SINGLE_ERROR) ? -1 : Integer.parseInt(r.getMsg());
69 | }
70 |
71 | public int incr(String key){
72 | DataRequest request = new DataRequest("INCR",key,"");
73 | DataResponse r = sendCommand(request);
74 | return r.getLineType().equals(SINGLE_ERROR) ? -1 : Integer.parseInt(r.getMsg());
75 | }
76 | public int decr(String key){
77 | DataRequest request = new DataRequest("DECR",key,"");
78 | DataResponse r = sendCommand(request);
79 | return r.getLineType().equals(SINGLE_ERROR) ? -1 : Integer.parseInt(r.getMsg());
80 | }
81 |
82 | /**
83 | *
84 | * @param key key
85 | * @param value must > 0
86 | * @return
87 | */
88 | public int incrby(String key,int value){
89 | DataRequest request = new DataRequest("INCRBY",key,String.valueOf(value));
90 | DataResponse r = sendCommand(request);
91 | return r.getLineType().equals(SINGLE_ERROR) ? -1 : Integer.parseInt(r.getMsg());
92 | }
93 |
94 | /**
95 | *
96 | * @param key key
97 | * @param value must > 0
98 | * @return
99 | */
100 | public int decrby(String key,int value){
101 | DataRequest request = new DataRequest("DECRBY",key,String.valueOf(value));
102 | DataResponse r = sendCommand(request);
103 | return r.getLineType().equals(SINGLE_ERROR) ? -1 : Integer.parseInt(r.getMsg());
104 | }
105 |
106 | public String append(String key,String value){
107 | DataRequest request = new DataRequest("APPEND",key,value);
108 | DataResponse r = sendCommand(request);
109 | return r.getMsg();
110 | }
111 |
112 | }
113 |
--------------------------------------------------------------------------------
/Client/src/main/java/cn/mageek/client/handler/DataHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.client.handler;
2 |
3 | import cn.mageek.common.model.DataResponse;
4 | import cn.mageek.common.util.Decoder;
5 | import io.netty.buffer.ByteBuf;
6 | import io.netty.channel.ChannelHandlerContext;
7 | import io.netty.channel.ChannelInboundHandlerAdapter;
8 | import io.netty.util.CharsetUtil;
9 | import org.slf4j.Logger;
10 | import org.slf4j.LoggerFactory;
11 |
12 | import java.util.List;
13 | import java.util.Map;
14 | import java.util.concurrent.ConcurrentHashMap;
15 |
16 | /**
17 | * 处理 来自NameNode 的 DataNode 监视事件
18 | * @author Mageek Chiu
19 | * @date 2018/3/10 0010:20:33
20 | */
21 | public class DataHandler extends ChannelInboundHandlerAdapter {
22 | private static final Logger logger = LoggerFactory.getLogger(DataHandler.class);
23 |
24 | private volatile Map dataResponseMap;// 存放所有响应
25 |
26 | public DataHandler(Map dataResponseMap) {
27 | this.dataResponseMap = dataResponseMap;
28 | }
29 |
30 | @Override
31 | public void channelActive(ChannelHandlerContext ctx) throws Exception {
32 | logger.info("opened connection to: {}",ctx.channel().remoteAddress());
33 | }
34 |
35 | @Override
36 | public void channelInactive(ChannelHandlerContext ctx) throws Exception {
37 | logger.info("closed connection: {}",ctx.channel().remoteAddress());
38 | }
39 |
40 | @Override
41 | public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
42 | ByteBuf buf = (ByteBuf) msg;
43 | // logger.debug("raw msg {},length,{}",buf.toString(CharsetUtil.UTF_8),buf.readableBytes());
44 | List responses = Decoder.bytesToDataResponse(buf);
45 | responses.forEach((response)->{
46 | logger.debug("DataNode received: {}",response);
47 | dataResponseMap.put(response.getID(),response);// 放置结果
48 | });
49 | // ctx.close();// 收到响应就关闭连接
50 | }
51 |
52 | @Override
53 | public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
54 |
55 | }
56 |
57 | @Override
58 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
59 | logger.error("connection to: {},error: ",ctx.channel().remoteAddress(),cause);
60 | ctx.close();
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/Client/src/main/java/cn/mageek/client/handler/WatchHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.client.handler;
2 |
3 | import cn.mageek.client.Connection;
4 | import cn.mageek.common.model.WatchRequest;
5 | import cn.mageek.common.model.WatchResponse;
6 | import io.netty.channel.ChannelHandlerContext;
7 | import io.netty.channel.ChannelInboundHandlerAdapter;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | /**
12 | * 处理 来自NameNode 的 DataNode 监视事件
13 | * @author Mageek Chiu
14 | * @date 2018/3/10 0010:16:22
15 | */
16 | public class WatchHandler extends ChannelInboundHandlerAdapter {
17 | private static final Logger logger = LoggerFactory.getLogger(WatchHandler.class);
18 |
19 | private Connection connection;
20 |
21 | public WatchHandler(Connection connection) {
22 | this.connection = connection;
23 | }
24 |
25 | @Override
26 | public void channelActive(ChannelHandlerContext ctx) throws Exception {
27 | logger.info("opened connection to: {}",ctx.channel().remoteAddress());
28 | // ctx.writeAndFlush(new WatchRequest(true));// 上线立即发送请求
29 | }
30 |
31 | @Override
32 | public void channelInactive(ChannelHandlerContext ctx) throws Exception {
33 | logger.info("closed connection: {}",ctx.channel().remoteAddress());
34 | }
35 |
36 | @Override
37 | public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
38 | WatchResponse response = (WatchResponse) msg;//
39 | logger.debug("received DataNode list: {}",response);
40 | // 修改本地的hash环
41 | connection.sortedServerMap = response.getHashCircle();
42 | }
43 |
44 | @Override
45 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
46 | logger.error("connection to: {},error: ",ctx.channel().remoteAddress(),cause);
47 | ctx.close();
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/Client/src/main/java/cn/mageek/client/log/keep.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MageekChiu/CHKV/82204cb94fee417e983c9cd602031ef0d8f2948b/Client/src/main/java/cn/mageek/client/log/keep.txt
--------------------------------------------------------------------------------
/Client/src/main/resources/app.properties:
--------------------------------------------------------------------------------
1 | client.namenode.ip=192.168.0.136
2 | client.namenode.port=10102
--------------------------------------------------------------------------------
/Client/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | ### 设置root###
2 | log4j.rootLogger = debug,stdout,D,E
3 |
4 |
5 | ### 输出信息到控制抬 ###
6 | log4j.appender.stdout = org.apache.log4j.ConsoleAppender
7 | log4j.appender.stdout.Target = System.out
8 | log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
9 | log4j.appender.stdout.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
10 |
11 |
12 | ### 输出DEBUG 级别以上的日志到=debug.log ###
13 | # daily 表示一天一个文件
14 | log4j.appender.D = org.apache.log4j.DailyRollingFileAppender
15 | log4j.appender.D.File = ./Client/src/main/java/cn/mageek/client/log/debug.log
16 | log4j.appender.D.DatePattern=yyyy-MM-dd-HH'.log'
17 | log4j.appender.D.Append = true
18 | log4j.appender.D.Threshold = DEBUG
19 | log4j.appender.D.layout = org.apache.log4j.PatternLayout
20 | log4j.appender.D.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
21 |
22 |
23 | ### 输出ERROR 级别以上的日志到=error.log ###
24 | log4j.appender.E = org.apache.log4j.DailyRollingFileAppender
25 | log4j.appender.E.File = ./Client/src/main/java/cn/mageek/client/log/error.log
26 | log4j.appender.E.DatePattern=yyyy-MM-dd-HH'.log'
27 | log4j.appender.E.Append = true
28 | log4j.appender.E.Threshold = ERROR
29 | log4j.appender.E.layout = org.apache.log4j.PatternLayout
30 | log4j.appender.E.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
31 |
32 |
33 | ## 调整每个模块的日志级别##
34 | log4j.logger.cn.mageek.client=debug
35 | log4j.logger.cn.mageek.common=debug
36 |
37 | log4j.logger.io.netty=warn
38 |
39 | log4j.logger.org.reflections=info
40 |
41 | log4j.logger.org.apache.zookeeper=info
--------------------------------------------------------------------------------
/Client/src/test/java/cn/mageek/client/AppleAndOrangeWithnotGenerics.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.client;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/5/20 0020:19:50
6 | */
7 |
8 | import java.util.*;
9 |
10 | class Snow {}
11 |
12 | class Powder extends Snow {}
13 | class Light extends Powder {}
14 | class Heavy extends Powder {}
15 | class Crusty extends Snow {}
16 | class Slush extends Snow {}
17 |
18 | public class AppleAndOrangeWithnotGenerics {
19 |
20 | public static void main(String[] args) {
21 |
22 | List snow1 = Arrays.asList(
23 | new Crusty(), new Slush(), new Powder()
24 | );
25 |
26 | List snow2 = Arrays.asList(
27 | new Light(), new Heavy()
28 | );
29 |
30 | List snow3 = new ArrayList();
31 | Collections.addAll(snow3, new Light(), new Heavy());
32 |
33 | List snow4 = Arrays.asList(new Light(), new Heavy());
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/Client/src/test/java/cn/mageek/client/ConnectionTest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.client;
2 |
3 | import org.junit.Test;
4 | import org.slf4j.Logger;
5 | import org.slf4j.LoggerFactory;
6 |
7 | import java.util.TreeMap;
8 | import java.util.regex.Matcher;
9 | import java.util.regex.Pattern;
10 |
11 | /**
12 | * @author Mageek Chiu
13 | * @date 2018/5/10 0010:20:51
14 | */
15 | public class ConnectionTest {
16 |
17 | private static final Logger logger = LoggerFactory.getLogger(ConnectionTest.class);
18 |
19 |
20 | public static void main(String... arg) throws Exception {
21 | // Connection connection = new Connection();
22 | // connection.connect();
23 |
24 | Client client = new Client();
25 | client.connect("192.168.0.136","10102");
26 |
27 | // logger.debug(client.set("192.168.0.136:10099","123456")+"");
28 | // logger.debug(client.get("192.168.0.136:10099")+"");
29 | // logger.debug(client.set("112","23")+"");
30 | // logger.debug(client.setnx("112","2333323")+"");
31 | // logger.debug(client.setnx("112","2333")+"");
32 | // logger.debug(client.setnx("112","232323")+"");
33 | // logger.debug(client.get("112")+"");
34 | // logger.debug(client.set("nba","23")+"");
35 | // logger.debug(client.set("nba rock","23")+"");
36 | // logger.debug(client.del("1321")+"");
37 | // logger.debug(client.keys("nba").toString());
38 | // logger.debug(client.keys("*").toString());
39 | // logger.debug(client.del("112")+"");
40 | // logger.debug(client.expire("nba",5)+"");// 5秒过期
41 | // logger.debug(client.get("nba")+"");
42 | // Thread.sleep(6000);
43 | // logger.debug(client.get("nba")+"");
44 |
45 | logger.debug(client.incrby("nba",10)+"");
46 | logger.debug(client.incr("nba")+"");
47 | // logger.debug(client.append("112","a")+"");
48 | // logger.debug(client.append("112","a")+"");
49 |
50 | //
51 | new Thread(() -> {
52 | logger.debug(client.incrby("nba",3)+"");
53 | logger.debug(client.incr("nba")+"");
54 | logger.debug(client.decrby("nba",4)+"");
55 | logger.debug(client.incr("nba")+"");
56 | logger.debug(client.incr("nba")+"");
57 | logger.debug(client.incrby("nba",2)+"");
58 | // logger.debug(client.append("112","b")+"");
59 | // logger.debug(client.append("112","b")+"");
60 | // logger.debug(client.append("112","b")+"");
61 | // logger.debug(client.append("112","b")+"");
62 | logger.debug(client.incrby("nba",4)+"");
63 | }).start();
64 | //
65 | logger.debug(client.incr("nba")+"");
66 | logger.debug(client.decr("nba")+"");
67 | logger.debug(client.decrby("nba",3)+"");
68 | logger.debug(client.decr("nba")+"");
69 | logger.debug(client.incrby("nba",6)+"");
70 | logger.debug(client.decr("nba")+"");
71 |
72 | // logger.debug(client.append("112","a")+"");
73 | // logger.debug(client.append("112","a")+"");
74 |
75 |
76 |
77 | // client.close();
78 |
79 |
80 |
81 | // try(Client client = new Client("192.168.0.136","10102")){
82 | // logger.debug(client.set("192.168.0.136:10099","123456")+"");
83 | // logger.debug(client.get("192.168.0.136:10099")+"");
84 | // logger.debug(client.set("112","23")+"");
85 | // logger.debug(client.del("1321")+"");
86 | // logger.debug(client.del("112")+"");
87 | // }
88 |
89 | }
90 |
91 | @Test
92 | public void SplitTest(){
93 | // 结果都是2 所以末尾有没有分隔符都一样
94 | String a = "aa\r\nbb";
95 | logger.debug("{}",a.split("\r\n").length);//2
96 | a = "aa\r\nbb\r\n";
97 | logger.debug("{}",a.split("\r\n").length);//2
98 |
99 |
100 | a = "aa";
101 | logger.debug("{}",a.split("\r\n").length);//1
102 |
103 | }
104 |
105 | @Test
106 | public void regexTest(){
107 | String regex = "a";
108 | Matcher matcher = Pattern.compile(regex).matcher("sssa");
109 | logger.debug(String.valueOf(matcher.matches()));
110 |
111 | }
112 | }
--------------------------------------------------------------------------------
/Client/src/test/java/cn/mageek/client/ZKClientTest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.client;
2 |
3 | import cn.mageek.common.ha.HAThirdParty;
4 | import cn.mageek.common.ha.ZKThirdParty;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import java.util.function.Consumer;
9 |
10 | import static cn.mageek.client.ZKServerTest.*;
11 |
12 | /**
13 | * @author Mageek Chiu
14 | * @date 2018/5/21 0010:18:51
15 | */
16 | public class ZKClientTest {
17 |
18 | private static final Logger logger = LoggerFactory.getLogger(ZKClientTest.class);
19 |
20 | public static void main(String... arg) {
21 | HAThirdParty party = new ZKThirdParty(CONNECT_ADDR,SESSION_TIMEOUT,CONNECTION_TIMEOUT,MASTER_NODE_PATH,1000,10);
22 | party.getInstantMaster();
23 |
24 | Consumer consumer = s -> {
25 | if (s==null){
26 | logger.error("masterNode is down, waiting");
27 | }else{
28 | logger.info("masterNode may have changed:{}",s);
29 | }
30 | };
31 |
32 | party.beginWatch(consumer);
33 | while (party.getMasterNode()==null);//忙等待
34 | logger.debug(party.getMasterNode());
35 |
36 | while (true){//防止线程结束
37 | try {
38 | Thread.sleep(2000);
39 | } catch (InterruptedException e) {
40 | e.printStackTrace();
41 | break;
42 | }
43 | }
44 | }
45 |
46 | }
--------------------------------------------------------------------------------
/Client/src/test/java/cn/mageek/client/ZKServerTest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.client;
2 |
3 | import cn.mageek.common.ha.HAThirdParty;
4 | import cn.mageek.common.ha.ZKThirdParty;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import java.lang.management.ManagementFactory;
9 | import java.util.function.Consumer;
10 |
11 | /**
12 | * @author Mageek Chiu
13 | * @date 2018/5/21 0010:18:51
14 | */
15 | public class ZKServerTest {
16 |
17 | private static final Logger logger = LoggerFactory.getLogger(ZKServerTest.class);
18 |
19 | public static final String CONNECT_ADDR = "127.0.0.1:2181,127.0.0.1:3181,127.0.0.1:4181";
20 | public static final int SESSION_TIMEOUT = 2000;
21 | public static final int CONNECTION_TIMEOUT = 8000;
22 | public static final String MASTER_NODE_PATH = "/CHKV/masterNode";
23 | public static String thisNode;
24 | public static String masterNode;
25 |
26 | public static void main(String... arg) {
27 | thisNode = ManagementFactory.getRuntimeMXBean().getName();
28 | logger.debug("thisNode: {}",thisNode);// 如果是NameNode就存储 面向dataNode的ip:port:面向client的ip:port;
29 |
30 | HAThirdParty party = new ZKThirdParty(CONNECT_ADDR,SESSION_TIMEOUT,CONNECTION_TIMEOUT,MASTER_NODE_PATH,1000,10);
31 | party.setThisNode(thisNode);
32 | boolean result = party.becomeMaster();
33 | if (result){
34 | logger.info("Successfully Became Master");
35 | }else {
36 | logger.info("Failed to Became Master");
37 | }
38 | masterNode = party.getInstantMaster();
39 | boolean result1 = thisNode.equals(masterNode);
40 | if (result1){
41 | logger.info("Confirmed, I am the Master,masterNode;{}",masterNode);
42 | }else {
43 | logger.info("Confirmed,I am the Standby,masterNode;{}",masterNode);
44 | }
45 |
46 | Consumer consumer = s -> {
47 | if (s==null){
48 | logger.error("masterNode is down, try to become Master");
49 | if (party.becomeMaster()){
50 | logger.info("Successfully tried to Became Master");
51 | }else {
52 | logger.info("Failed to try to Became Master");
53 | }
54 | }else{
55 | logger.info("masterNode:{}",s);
56 | }
57 | };
58 | party.beginWatch(consumer);
59 |
60 | while (true){//防止线程结束
61 | try {
62 | Thread.sleep(2000);
63 | } catch (InterruptedException e) {
64 | e.printStackTrace();
65 | break;
66 | }
67 | }
68 | }
69 |
70 | }
--------------------------------------------------------------------------------
/Client/src/test/java/cn/mageek/client/ZooKeeperClientTest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.client;
2 |
3 | import org.apache.curator.framework.CuratorFramework;
4 | import org.apache.curator.framework.CuratorFrameworkFactory;
5 | import org.apache.curator.framework.recipes.cache.ChildData;
6 | import org.apache.curator.framework.recipes.cache.NodeCache;
7 | import org.apache.curator.framework.recipes.cache.NodeCacheListener;
8 | import org.apache.zookeeper.data.Stat;
9 | import org.slf4j.Logger;
10 | import org.slf4j.LoggerFactory;
11 |
12 | import static cn.mageek.client.ZooKeeperServerTest.*;
13 |
14 | /**
15 | * @author Mageek Chiu
16 | * @date 2018/5/10 0010:20:51
17 | */
18 | public class ZooKeeperClientTest {
19 |
20 | private static final Logger logger = LoggerFactory.getLogger(ZooKeeperClientTest.class);
21 |
22 | private static volatile String masterInfo = null;
23 |
24 | public static void main(String... arg) throws Exception {
25 | // 构造连接
26 | CuratorFramework curator = CuratorFrameworkFactory
27 | .builder()
28 | .connectString(CONNECT_ADDR)
29 | .connectionTimeoutMs(CONNECTION_TIMEOUT)//连接创建超时时间
30 | .sessionTimeoutMs(SESSION_TIMEOUT)//会话超时时间
31 | .retryPolicy(policy)
32 | .build();
33 | curator.start();
34 |
35 | // 监听
36 | NodeCache cache = new NodeCache(curator, MASTER_NODE_PATH,false);
37 | cache.getListenable().addListener(()->{
38 | ChildData data = cache.getCurrentData();
39 | if (data != null){
40 | String path = data.getPath();
41 | Stat stat = data.getStat();
42 | String dataString = new String(data.getData());
43 | logger.debug("masterNode info, path:{},data:{},stat,{}",path,dataString,stat);
44 | masterInfo = dataString;
45 | }else {
46 | logger.info("masterNode is down, waiting");
47 | }
48 | });
49 | cache.start(true);
50 |
51 |
52 | // 获得主机,阻塞等待
53 | try {
54 | masterInfo = new String(curator.getData().forPath(MASTER_NODE_PATH));
55 | }catch (Exception e){
56 | logger.error("no masterInfo");
57 | masterInfo = null;
58 | }
59 | while (masterInfo==null);
60 | logger.info("masterInfo:{}",masterInfo);
61 |
62 | while (true){//防止线程结束
63 | Thread.sleep(2000);
64 | }
65 |
66 | }
67 |
68 | }
--------------------------------------------------------------------------------
/Client/src/test/java/cn/mageek/client/ZooKeeperServerTest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.client;
2 |
3 | import org.apache.curator.RetryPolicy;
4 | import org.apache.curator.framework.CuratorFramework;
5 | import org.apache.curator.framework.CuratorFrameworkFactory;
6 | import org.apache.curator.framework.recipes.cache.ChildData;
7 | import org.apache.curator.framework.recipes.cache.NodeCache;
8 | import org.apache.curator.retry.ExponentialBackoffRetry;
9 | import org.apache.zookeeper.CreateMode;
10 | import org.apache.zookeeper.data.Stat;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 | import java.lang.management.ManagementFactory;
14 |
15 | /**
16 | * @author Mageek Chiu
17 | * @date 2018/5/10 0010:20:51
18 | */
19 | public class ZooKeeperServerTest {
20 |
21 | private static final Logger logger = LoggerFactory.getLogger(ZooKeeperServerTest.class);
22 |
23 | public static final String CONNECT_ADDR = "127.0.0.1:2181,127.0.0.1:3181,127.0.0.1:4181";
24 | public static final int SESSION_TIMEOUT = 2000;
25 | public static final int CONNECTION_TIMEOUT = 8000;
26 | public static final String MASTER_NODE_PATH = "/example/masterNode";
27 | public static final RetryPolicy policy = new ExponentialBackoffRetry(1000,10);
28 | public static String thisNode;
29 | public static String masterNode;
30 |
31 | public static void main(String... arg) throws Exception {
32 |
33 | thisNode = ManagementFactory.getRuntimeMXBean().getName();
34 | logger.debug("my pid: {}",thisNode);
35 |
36 | // 构造连接
37 | CuratorFramework curator = CuratorFrameworkFactory
38 | .builder()
39 | .connectString(CONNECT_ADDR)
40 | .connectionTimeoutMs(CONNECTION_TIMEOUT)//连接创建超时时间
41 | .sessionTimeoutMs(SESSION_TIMEOUT)//会话超时时间
42 | .retryPolicy(policy)
43 | .build();
44 | curator.start();
45 |
46 | // 创建节点也就是成为master,阻塞等待
47 | boolean result = becomeMaster(curator);
48 | if (result){
49 | logger.info("Successfully Became Master");
50 | }else {
51 | logger.info("Failed to Became Master");
52 | }
53 |
54 | // 获取结果并再次确认
55 | boolean result1 = confirm(curator);
56 | if (result1){
57 | logger.info("Confirmed, I am the Master,masterNode;{}",masterNode);
58 | }else {
59 | logger.info("Confirmed,I am the Standby,masterNode;{}",masterNode);
60 | }
61 |
62 | // 监听
63 | NodeCache cache = new NodeCache(curator, MASTER_NODE_PATH,false);
64 | cache.getListenable().addListener(()->{
65 | ChildData data = cache.getCurrentData();
66 | if (data != null){
67 | String path = data.getPath();
68 | Stat stat = data.getStat();
69 | String dataString = new String(data.getData());
70 | logger.debug("masterNode info, path:{},data:{},stat,{}",path,dataString,stat);
71 | }else {
72 | logger.info("masterNode is down, try to become Master");
73 | if (becomeMaster(curator)){
74 | logger.info("Successfully tried to Became Master");
75 | }else {
76 | logger.info("Failed to try to Became Master");
77 | }
78 | }
79 | });
80 | cache.start(true);
81 |
82 | while (true){//防止线程结束
83 | Thread.sleep(2000);
84 | }
85 |
86 |
87 | }
88 |
89 | // 确认master
90 | private static boolean confirm(CuratorFramework curator) throws Exception {
91 | masterNode = new String(curator.getData().forPath(MASTER_NODE_PATH));
92 | logger.info("masterNode: {}",masterNode);
93 | // data = new String(curator.getData().forPath("/none"));// 没有该key的话就直接报exception
94 | return thisNode.equals(masterNode);
95 |
96 | }
97 |
98 | // 成为master
99 | private static boolean becomeMaster(CuratorFramework curator) throws Exception {
100 | // AtomicBoolean result = new AtomicBoolean(false);
101 | ////
102 | //// // 异步创建节点,不会报异常
103 | //// Executor executor = Executors.newFixedThreadPool(1);
104 | //// curator.create()
105 | //// .creatingParentContainersIfNeeded()
106 | //// .withMode(CreateMode.EPHEMERAL)
107 | //// .inBackground((curatorFramework,event)->{
108 | //// // 成功getResultCode 0,失败 -110
109 | //// logger.debug("getType: {}, getResultCode: {}, getName: {}, getData: {}",event.getType(),event.getResultCode(),event.getName(),event.getData());
110 | //// if (event.getResultCode()==0) result.set(true);
111 | //// },executor)
112 | //// .forPath(MASTER_NODE_PATH,thisNode.getBytes());
113 | ////
114 | //// // 等待结果
115 | //// Thread.sleep(5000);
116 | //// return result.get();
117 |
118 | // 同步创建节点,已存在会报异常
119 | String path= "";
120 | try {
121 | path = curator.create()
122 | .creatingParentContainersIfNeeded()
123 | .withMode(CreateMode.EPHEMERAL)
124 | .forPath(MASTER_NODE_PATH,thisNode.getBytes());// 存的值应该是本负载服务器的信息以及后面所有应用服务器的信息
125 | logger.debug(path);
126 | }catch (Exception e){
127 | logger.error(e.getMessage());
128 | }
129 | return MASTER_NODE_PATH.equals(path);
130 | }
131 |
132 | }
--------------------------------------------------------------------------------
/Common/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | CHKV
7 | cn.mageek
8 | 1.0-SNAPSHOT
9 | ../pom.xml
10 |
11 | 4.0.0
12 | Common
13 | jar
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 | org.apache.maven.plugins
24 | maven-compiler-plugin
25 | 3.7.0
26 |
27 | 1.8
28 | 1.8
29 |
30 |
31 |
32 | maven-assembly-plugin
33 | 3.1.0
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 | com.spotify
43 | dockerfile-maven-plugin
44 | 1.3.4
45 |
46 | ${docker.image.prefix}/${project.artifactId}
47 |
48 |
49 |
50 |
51 |
52 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/command/AbstractClientCommand.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.command;
2 |
3 | import cn.mageek.common.model.DataRequest;
4 | import cn.mageek.common.model.DataResponse;
5 | import cn.mageek.common.model.WebMsgObject;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | /**
12 | * client抽象命令类,处理不同的请求类型
13 | * @author Mageek Chiu
14 | * @date 2018/5/10 0007:19:27
15 | */
16 | public abstract class AbstractClientCommand {
17 |
18 | // client 解析 收到的 data response
19 | public abstract void receive(DataResponse response);
20 |
21 | // client 根据参数合成 DataRequest
22 | public abstract DataRequest send(String CMD,String... args);
23 |
24 |
25 | }
26 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/command/AbstractDataNodeCommand.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.command;
2 |
3 | import cn.mageek.common.model.DataRequest;
4 | import cn.mageek.common.model.DataResponse;
5 | import cn.mageek.common.model.WebMsgObject;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import java.util.Map;
10 |
11 | /**
12 | * 服务端抽象命令类,处理不同的请求类型
13 | * @author Mageek Chiu
14 | * @date 2018/5/6 0007:16:27
15 | */
16 | public abstract class AbstractDataNodeCommand {
17 |
18 | // public Map DATA_POOL;// 所有command都引用这一个map,而且多线程,所以要用ConcurrentHashMap
19 |
20 | // 设置数据存储池
21 | // public void setDataPool(Map dataPool){
22 | // this.DATA_POOL = dataPool;
23 | //// logger.debug("AbstractDataNodeCommand DATA_POOL initialized:{}",DATA_POOL.hashCode());
24 | // }
25 |
26 | // 处理收到来自客户端的消息,返回需要响应的消息,若无则返回null
27 | public abstract DataResponse receive(DataRequest dataRequest);
28 | // 根据参数返回发送给客户端的消息
29 | public abstract DataResponse send(WebMsgObject webMsgObject);
30 |
31 | }
32 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/ha/HAThirdParty.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.ha;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/5/21 0021:12:26
6 | */
7 | public abstract class HAThirdParty implements NameNodeWatcher,NameNodeMaster{
8 |
9 | // 第三方组件的连接
10 | // public Object con;
11 | // 第三方组件获取连接
12 | public abstract void getCon();
13 | // 第三方组件释放连接
14 | public abstract void releaseCon();
15 |
16 | // master节点
17 | protected volatile String masterNode;
18 | // 当前节点
19 | protected String thisNode;
20 |
21 | // 不一定是即时的
22 | public String getMasterNode() {
23 | return masterNode;
24 | }
25 |
26 | public void setMasterNode(String masterNode) {
27 | this.masterNode = masterNode;
28 | }
29 |
30 | public String getThisNode() {
31 | return thisNode;
32 | }
33 |
34 | public void setThisNode(String thisNode) {
35 | this.thisNode = thisNode;
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/ha/NameNodeMaster.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.ha;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/5/21 0021:12:15
6 | */
7 | public interface NameNodeMaster {
8 |
9 | // 成为master
10 | boolean becomeMaster();
11 |
12 | // 获得当前即时的master的 ip:port
13 | String getInstantMaster();
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/ha/NameNodeWatcher.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.ha;
2 |
3 | import java.util.function.Consumer;
4 |
5 | /**
6 | * @author Mageek Chiu
7 | * @date 2018/5/21 0021:12:09
8 | */
9 | public interface NameNodeWatcher {
10 |
11 | // 开始监听NameNode,发生变化时调用回调函数,能获得最新的 ip:port
12 | void beginWatch(Consumer nameNodeChanged);
13 |
14 | }
15 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/ha/ZKThirdParty.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.ha;
2 |
3 | import org.apache.curator.RetryPolicy;
4 | import org.apache.curator.framework.CuratorFramework;
5 | import org.apache.curator.framework.CuratorFrameworkFactory;
6 | import org.apache.curator.framework.recipes.cache.ChildData;
7 | import org.apache.curator.framework.recipes.cache.NodeCache;
8 | import org.apache.curator.retry.ExponentialBackoffRetry;
9 | import org.apache.zookeeper.CreateMode;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 |
13 | import java.util.function.Consumer;
14 |
15 |
16 | /**
17 | * @author Mageek Chiu
18 | * @date 2018/5/21 0021:12:35
19 | */
20 | public class ZKThirdParty extends HAThirdParty {
21 |
22 | private static final Logger logger = LoggerFactory.getLogger(ZKThirdParty.class);
23 |
24 | private CuratorFramework curator ;
25 |
26 | private String connectAddr;
27 | private int sessionTimeout;
28 | private int connectionTimeout;
29 | private String masterNodePath;
30 | private RetryPolicy policy;
31 |
32 | public ZKThirdParty(String connectAddr, int sessionTimeout, int connectionTimeout, String masterNodePath,int baseSleepTimeMs,int maxRetries) {
33 | this.connectAddr = connectAddr;
34 | this.sessionTimeout = sessionTimeout;
35 | this.connectionTimeout = connectionTimeout;
36 | this.masterNodePath = masterNodePath;
37 | this.policy = new ExponentialBackoffRetry(baseSleepTimeMs,maxRetries);
38 | getCon();
39 | }
40 |
41 | @Override
42 | public void getCon() {
43 | CuratorFramework curator = CuratorFrameworkFactory
44 | .builder()
45 | .connectString(connectAddr)
46 | .connectionTimeoutMs(connectionTimeout)//连接创建超时时间
47 | .sessionTimeoutMs(sessionTimeout)//会话超时时间
48 | .retryPolicy(policy)
49 | .build();
50 | curator.start();
51 | this.curator = curator;
52 | }
53 |
54 | @Override
55 | public void releaseCon() {
56 | connectAddr = null;
57 | masterNode = null;
58 | policy = null;
59 | curator.close();
60 | }
61 |
62 | @Override
63 | public boolean becomeMaster() {
64 | String path= "";
65 | try {
66 | path = curator.create()
67 | .creatingParentContainersIfNeeded()
68 | .withMode(CreateMode.EPHEMERAL)
69 | .forPath(masterNodePath,thisNode.getBytes());// 存的值应该是本服务器的信息以及后面所有应用服务器的信息
70 | logger.debug(path);
71 | }catch (Exception e){
72 | logger.error("becomeMaster Exception: {}",e.getMessage());
73 | }
74 | return masterNodePath.equals(path);
75 | }
76 |
77 | @Override
78 | public String getInstantMaster() {
79 | try {
80 | setMasterNode(new String(curator.getData().forPath(masterNodePath)));
81 | }catch (Exception e){
82 | logger.error("getInstantMaster Exception:{} ",e.getMessage());
83 | setMasterNode(null);
84 | }
85 | return getMasterNode();
86 | }
87 |
88 | @Override
89 | public void beginWatch(Consumer nameNodeChanged) {
90 | NodeCache cache = new NodeCache(curator, masterNodePath,false);
91 | cache.getListenable().addListener(()->{
92 | ChildData data = cache.getCurrentData();
93 | if (data != null){
94 | // String path = data.getPath();
95 | // Stat stat = data.getStat();
96 | String dataString = new String(data.getData());
97 | // logger.debug("masterNode info, path:{},data:{},stat,{}",path,dataString,stat);
98 | setMasterNode(dataString);
99 | }else {
100 | // logger.info("masterNode is down, waiting");
101 | setMasterNode(null);
102 | }
103 | nameNodeChanged.accept(getMasterNode());
104 | });
105 | try {
106 | cache.start(true);
107 | } catch (Exception e) {
108 | logger.error("beginWatch Exception",e);
109 | }
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/helper/KillDataNode.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.helper;
2 |
3 | //import com.sun.jna.win32.W32APIFunctionMapper.*;
4 | ///**
5 | // * @author Mageek Chiu
6 | // * @date 2018/5/9 0009:19:53
7 | // */
8 | //public class KillDataNode {
9 | // public static void main(String args[]) {
10 | // int processId = 34567;
11 | // Kernel32.INSTANCE.AttachConsole(processId);
12 | // Kernel32.INSTANCE.GenerateConsoleCtrlEvent(Kernel32.CTRL_C_EVENT, 0);
13 | // }
14 | //}
15 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/helper/tcpclient.js:
--------------------------------------------------------------------------------
1 | var net = require('net');
2 |
3 | // var HOST = '127.0.0.1';var PORT = 10100;// 自己实现的兼容redis服务 会收到消息 +OK $3 gem $-1 :1 :0
4 | // var HOST = '1*3.*0*.*4.6*';var PORT = 6***;//一个远程实际的redis服务器,会收到消息: -NOAUTH Authentication required. 因为没发送auth 信息
5 | var HOST = '127.0.0.1';var PORT = 6379;// 本机redis服务器 会收到消息 +OK $3 gem $-1 :1 :0
6 |
7 | var client = new net.Socket();
8 |
9 | client.connect(PORT, HOST, function() {
10 | console.log('连接到: ' + HOST + ':' + PORT);
11 | // set
12 | var msg = "*3\r\n" +
13 | "$3\r\n" +
14 | "SET\r\n" +
15 | "$2\r\n" +
16 | "sm\r\n" +
17 | "$3\r\n" +
18 | "gem\r\n";
19 | console.log("发送"+msg);
20 | client.write( msg );
21 |
22 | // set
23 | msg = "*3\r\n" +
24 | "$6\r\n" +
25 | "EXPIRE\r\n" +
26 | "$2\r\n" +
27 | "sm\r\n" +
28 | "$2\r\n" +
29 | "80\r\n";
30 | console.log("发送"+msg);
31 | client.write( msg );
32 |
33 |
34 | // set 多个命令,一次发六条收到消息就会粘包
35 | // 收到消息: +OK
36 | // 收到消息: +OK
+OK
+OK
+OK
+OK
37 |
38 | // var msg = "*3\r\n" +
39 | // "$3\r\n" +
40 | // "SET\r\n" +
41 | // "$2\r\n" +
42 | // "sm\r\n" +
43 | // "$3\r\n" +
44 | // "gem\r\n\t\n"+
45 | // "*3\r\n" +
46 | // "$3\r\n" +
47 | // "SET\r\n" +
48 | // "$2\r\n" +
49 | // "sf\r\n" +
50 | // "$3\r\n" +
51 | // "gem\r\n\t\n"+
52 | // "*3\r\n" +
53 | // "$3\r\n" +
54 | // "SET\r\n" +
55 | // "$2\r\n" +
56 | // "rm\r\n" +
57 | // "$3\r\n" +
58 | // "gem\r\n\t\n"+
59 | // "*3\r\n" +
60 | // "$3\r\n" +
61 | // "SET\r\n" +
62 | // "$2\r\n" +
63 | // "tm\r\n" +
64 | // "$3\r\n" +
65 | // "gem\r\n\t\n"+
66 | // "*3\r\n" +
67 | // "$3\r\n" +
68 | // "SET\r\n" +
69 | // "$2\r\n" +
70 | // "sw\r\n" +
71 | // "$3\r\n" +
72 | // "gem\r\n\t\n"+
73 | // "*3\r\n" +
74 | // "$3\r\n" +
75 | // "SET\r\n" +
76 | // "$2\r\n" +
77 | // "sg\r\n" +
78 | // "$3\r\n" +
79 | // "gam\r\n";
80 | // console.log("发送"+msg);
81 | // client.write( msg );
82 |
83 |
84 | // // get 存在
85 | // setTimeout(function () {
86 | // msg = "*2\r\n" +
87 | // "$3\r\n" +
88 | // "GET\r\n" +
89 | // "$2\r\n" +
90 | // "sm\r\n";
91 | // console.log("发送"+msg);
92 | // client.write(msg);
93 | // },1500);
94 | //
95 | // // get 不存在
96 | // setTimeout(function () {
97 | // msg = "*2\r\n" +
98 | // "$3\r\n" +
99 | // "GET\r\n" +
100 | // "$2\r\n" +
101 | // "em\r\n";
102 | // console.log("发送"+msg);
103 | // client.write(msg);
104 | // },2300);
105 | //
106 | // // del 存在
107 | // setTimeout(function () {
108 | // msg = "*2\r\n" +
109 | // "$3\r\n" +
110 | // "del\r\n" +
111 | // "$2\r\n" +
112 | // "sm\r\n";
113 | // console.log("发送"+msg);
114 | // client.write(msg);
115 | // },3200);
116 | //
117 | // // del 不存在
118 | // setTimeout(function () {
119 | // msg = "*2\r\n" +
120 | // "$3\r\n" +
121 | // "del\r\n" +
122 | // "$2\r\n" +
123 | // "sg\r\n";
124 | // console.log("发送"+msg);
125 | // client.write(msg);
126 | // },4200);
127 |
128 | // COMMAND
129 | // setTimeout(function () {
130 | // msg = "*1\r\n" +
131 | // "$7\r\n" +
132 | // "COMMAND\r\n" +
133 | // console.log("发送"+msg);
134 | // client.write(msg);
135 | // },5200);// 返回所有命令 很长很长
136 | });
137 |
138 | // 为客户端添加“data”事件处理函数
139 | // data是服务器发回的数据
140 | client.on('data', function(data) {
141 | data = data.toString("utf8");
142 | if (data.length>160)
143 | console.log('收到缩略消息: ' + data.substring(0,160));
144 | else
145 | console.log('收到消息: ' + data.replace(/\r\n/g,"
"));
146 | // console.log(data.length)
147 | });
148 |
149 | client.on('error', function(e) {
150 | console.log('连接出错'+e);
151 | });
152 |
153 | // 为客户端添加“close”事件处理函数
154 | client.on('close', function() {
155 | console.log('连接关闭');
156 | });
157 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/log/keep.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MageekChiu/CHKV/82204cb94fee417e983c9cd602031ef0d8f2948b/Common/src/main/java/cn/mageek/common/log/keep.txt
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/DataRequest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | import static cn.mageek.common.res.Constants.IDSplitter;
4 |
5 | /**
6 | * 客户端请求数据
7 | * @author Mageek Chiu
8 | * @date 2018/5/6 0006:13:28
9 | */
10 | public class DataRequest {
11 |
12 | private String command;
13 | private String key;
14 | private String value;
15 | private String dataType = DataType.A_STRING;
16 |
17 | private String ID;// 要实现请求响应的对应,必须要ID
18 |
19 | public DataRequest(String command, String key, String value) {
20 | this.command = command;
21 | this.key = key;
22 | this.value = value;
23 |
24 | setID(String.valueOf(System.currentTimeMillis()+IDSplitter+hashCode()));// 构造时设置ID
25 | }
26 |
27 | public DataRequest(String command, String key, String value, String ID) {
28 | this.command = command;
29 | this.key = key;
30 | this.value = value;
31 | this.ID = ID;
32 | }
33 |
34 | public String getCommand() {
35 | return command;
36 | }
37 |
38 | public void setCommand(String command) {
39 | this.command = command;
40 | }
41 |
42 | public String getKey() {
43 | return key;
44 | }
45 |
46 | public void setKey(String key) {
47 | this.key = key;
48 | }
49 |
50 | public String getValue() {
51 | return value;
52 | }
53 |
54 | public void setValue(String value) {
55 | this.value = value;
56 | }
57 |
58 | public String getDataType() {
59 | return dataType;
60 | }
61 |
62 | public void setDataType(String dataType) {
63 | this.dataType = dataType;
64 | }
65 |
66 | public String getID() {
67 | return ID;
68 | }
69 |
70 | public void setID(String ID) {
71 | this.ID = ID;
72 | }
73 |
74 | @Override
75 | public String toString() {
76 | return "DataRequest -- command: "+command+", key: "+key+", value: "+value+", dataType: "+dataType+", ID: "+ID;
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/DataResponse.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | import java.io.Serializable;
4 | import java.util.List;
5 |
6 | /**
7 | * 客户端获得数据
8 | * @author Mageek Chiu
9 | * @date 2018/5/6 0006:13:28
10 | */
11 | public class DataResponse implements Serializable {
12 | private String lineType ;// +OK,-error msg,:number,$length\r\nstring\r\n
13 | private String msg;
14 | private List msgList;
15 |
16 | private String ID;// 要实现请求响应的对应,必须要ID
17 |
18 | public DataResponse(String lineType, String msg) {
19 | this.lineType = lineType;
20 | this.msg = msg;
21 | }
22 |
23 | public DataResponse(String lineType, String msg, String ID) {
24 | this.lineType = lineType;
25 | this.msg = msg;
26 | this.ID = ID;
27 | }
28 |
29 | public DataResponse(String lineType, List msgList) {
30 | this.lineType = lineType;
31 | this.msgList = msgList;
32 | }
33 |
34 | public DataResponse(String lineType, List msgList, String ID) {
35 | this.lineType = lineType;
36 | this.msgList = msgList;
37 | this.ID = ID;
38 | }
39 |
40 | public DataResponse(String lineType, String msg, List msgList, String ID) {
41 | this.lineType = lineType;
42 | this.msg = msg;
43 | this.msgList = msgList;
44 | this.ID = ID;
45 | }
46 |
47 | public List getMsgList() {
48 | return msgList;
49 | }
50 |
51 | public void setMsgList(List msgList) {
52 | this.msgList = msgList;
53 | }
54 |
55 | public String getLineType() {
56 | return lineType;
57 | }
58 |
59 | public void setLineType(String lineType) {
60 | this.lineType = lineType;
61 | }
62 |
63 | public String getMsg() {
64 | return msg;
65 | }
66 |
67 | public void setMsg(String msg) {
68 | this.msg = msg;
69 | }
70 |
71 | public String getID() {
72 | return ID;
73 | }
74 |
75 | public void setID(String ID) {
76 | this.ID = ID;
77 | }
78 |
79 | @Override
80 | public String toString() {
81 | return "DataResponse -- lineType:"+getLineType()+",msg:"+getMsg()+",msgList:"+getMsgList()+", ID: "+ID;
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/DataType.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/5/6 0006:13:41
6 | */
7 | public class DataType {
8 | public static final String A_STRING = "STRING";
9 | public static final String A_SET = "SET";
10 | public static final String A_LIST = "LIST";
11 | public static final String A_HASH = "HASH";
12 | public static final String A_ZSET = "ZSET";
13 | }
14 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/HeartbeatRequest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | import java.io.Serializable;
4 |
5 | /**
6 | * 心跳请求
7 | * @author Mageek Chiu
8 | * @date 2018/5/6 0006:13:29
9 | */
10 | public class HeartbeatRequest implements Serializable {
11 |
12 | private String IPPort;// DataNode自己对客户端开放的的 IP:Port,节点之间迁移数据时可以复用
13 | private String status;// HeartbeatType ONLINE,OFFLINE,RUNNING
14 | private long memoryAvailable;// DataNode自己内存剩余,单位 Byte
15 |
16 | public HeartbeatRequest(String IPPort, String status, long memoryAvailable) {
17 | this.IPPort = IPPort;
18 | this.status = status;
19 | this.memoryAvailable = memoryAvailable;
20 | }
21 |
22 | // public HeartbeatRequest(String IPPort, long memoryAvailable) {
23 | // this.IPPort = IPPort;
24 | // this.memoryAvailable = memoryAvailable;
25 | // }
26 |
27 | public String getIPPort() {
28 | return IPPort;
29 | }
30 |
31 | public void setIPPort(String IPPort) {
32 | this.IPPort = IPPort;
33 | }
34 |
35 | public long getMemoryAvailable() {
36 | return memoryAvailable;
37 | }
38 |
39 | public void setMemoryAvailable(long memoryAvailable) {
40 | this.memoryAvailable = memoryAvailable;
41 | }
42 |
43 | public String getStatus() {
44 | return status;
45 | }
46 |
47 | public void setStatus(String status) {
48 | this.status = status;
49 | }
50 |
51 | @Override
52 | public String toString() {
53 | return "HeartbeatRequest -- IPPort:"+IPPort+",memoryAvailable Byte:"+memoryAvailable+",status:"+status;
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/HeartbeatResponse.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | import java.io.Serializable;
4 |
5 | /**
6 | * 心跳响应
7 | * @author Mageek Chiu
8 | * @date 2018/5/6 0006:13:29
9 | */
10 | public class HeartbeatResponse implements Serializable {
11 |
12 | private boolean ok ; // 是否依然运行
13 | private int dataNodeNumber ;// dataNode数量
14 | private String IPPort ; // 若不为 null 则需要转移数据到另一个节点,迁移方式采用与客户端相同的做法,但是一次打包多个数据,减小网络传输次数
15 |
16 | public HeartbeatResponse(boolean ok, int dataNodeNumber, String IPPort) {
17 | this.ok = ok;
18 | this.dataNodeNumber = dataNodeNumber;
19 | this.IPPort = IPPort;
20 | }
21 |
22 | public boolean isOk() {
23 | return ok;
24 | }
25 |
26 | public void setOk(boolean ok) {
27 | this.ok = ok;
28 | }
29 |
30 | public int getDataNodeNumber() {
31 | return dataNodeNumber;
32 | }
33 |
34 | public void setDataNodeNumber(int dataNodeNumber) {
35 | this.dataNodeNumber = dataNodeNumber;
36 | }
37 |
38 | public String getIPPort() {
39 | return IPPort;
40 | }
41 |
42 | public void setIPPort(String IPPort) {
43 | this.IPPort = IPPort;
44 | }
45 |
46 | @Override
47 | public String toString() {
48 | return "HeartbeatResponse -- ok:"+ok+",dataNodeNumber :"+dataNodeNumber+",IPPort:"+IPPort;
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/HeartbeatType.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/5/8 0008:15:30
6 | */
7 | public class HeartbeatType {
8 | public static final String ONLINE = "ONLINE";//发起上线
9 | public static final String OFFLINE = "OFFLINE";// 发起下线
10 | public static final String RUNNING = "RUNNING";// 运行中
11 | public static final String TRANSFERRING = "TRANSFERRING";// 迁移中
12 |
13 | // public static final String OFFTransfering = "OFFTransfering";// 下线迁移中
14 | // public static final String ONTransfering = "ONTransfering";// 上一个节点上线迁移中
15 | // public static final String SYNCTransfering = "SYNCTransfering";// 主从复制迁移中
16 | }
17 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/LineType.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/5/6 0006:13:32
6 | */
7 | public class LineType {
8 |
9 | public static final String SINGLE_RIGHT = "+"; //表示一个正确的状态信息,具体信息是当前行+后面的字符。
10 | public static final String SINGLE_ERROR = "-"; //表示一个错误信息,具体信息是当前行-后面的字符。
11 | public static final String LINE_NUM = "*"; //表示消息体总共有多少行,不包括当前行,*后面是具体的行数。
12 | public static final String NEXT_LEN = "$"; //表示下一行数据长度,不包括换行符长度\r\n,$后面则是对应的长度的数据。
13 | public static final String INT_NUM = ":"; //表示返回一个数值,:后面是相应的数字节符。
14 |
15 | }
16 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/WatchRequest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | import java.io.Serializable;
4 |
5 | /**
6 | * @author Mageek Chiu
7 | * @date 2018/5/6 0006:13:28
8 | */
9 | public class WatchRequest implements Serializable {
10 | private boolean immediately ; // 是否立即需要hash环信息,是就直接回复,不是就不回复而是等待有变化再推送
11 |
12 | public WatchRequest(boolean immediately) {
13 | this.immediately = immediately;
14 | }
15 |
16 | public boolean isImmediately() {
17 | return immediately;
18 | }
19 |
20 | public void setImmediately(boolean immediately) {
21 | this.immediately = immediately;
22 | }
23 |
24 | @Override
25 | public String toString() {
26 | return "WatchRequest -- immediately:"+immediately;
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/WatchResponse.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | import java.io.Serializable;
4 | import java.util.concurrent.ConcurrentSkipListMap;
5 |
6 | /**
7 | * @author Mageek Chiu
8 | * @date 2018/5/6 0006:13:28
9 | */
10 | public class WatchResponse implements Serializable {
11 | private ConcurrentSkipListMap hashCircle;// DataNode 构成的环
12 |
13 | public WatchResponse(ConcurrentSkipListMap hashCircle) {
14 | this.hashCircle = hashCircle;
15 | }
16 |
17 | public ConcurrentSkipListMap getHashCircle() {
18 | return hashCircle;
19 | }
20 |
21 | public void setHashCircle(ConcurrentSkipListMap hashCircle) {
22 | this.hashCircle = hashCircle;
23 | }
24 |
25 | @Override
26 | public String toString() {
27 | return "WatchResponse -- hashCircle:"+hashCircle;
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/WebMsgObject.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | import java.util.Map;
4 |
5 | /**
6 | * web传递的的消息对象
7 | * @author Mageek Chiu
8 | * @date 2018/3/8 0008:20:14
9 | */
10 | public class WebMsgObject {
11 | /**
12 | * 客户端标识
13 | */
14 | private String clientId;
15 | /**
16 | * 控制码
17 | */
18 | private String command;
19 | /**
20 | * 所需参数
21 | */
22 | private Map data;
23 | /**
24 | * 推送标识
25 | */
26 | private String ref;
27 |
28 | public String getClientId() {
29 | return clientId;
30 | }
31 |
32 | public void setClientId(String clientId) {
33 | this.clientId = clientId;
34 | }
35 |
36 | public String getCommand() {
37 | return command;
38 | }
39 |
40 | public void setCommand(String command) {
41 | this.command = command;
42 | }
43 |
44 | public Map getData() {
45 | return data;
46 | }
47 |
48 | public void setData(Map data) {
49 | this.data = data;
50 | }
51 |
52 | public String getRef() {
53 | return ref;
54 | }
55 |
56 | public void setRef(String ref) {
57 | this.ref = ref;
58 | }
59 |
60 | @Override
61 | public String toString() {
62 | return "\tclientId:" + clientId + "\n" +
63 | "\tcommand:" + command + "\n" +
64 | "\tref:" + ref + "\n" ;
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/proto/WatchReqProto.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | option java_package = "cn.mageek.common.model";
3 | option java_outer_classname = "WatchReqProto";
4 |
5 | /*
6 | F:
7 | F:\workspace\java\CHKV\Common\src\main\java\cn\mageek\common\model\proto>
8 |
9 | D:/proto/bin/protoc.exe -I=./ --java_out=../../../../../ ./WatchReqProto.proto
10 |
11 | */
12 |
13 | message WatchReq {
14 | bool immediately = 1;
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/model/proto/WatchRespProto.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | option java_package = "cn.mageek.common.model";
3 | option java_outer_classname = "WatchRespProto";
4 |
5 | /*
6 | F:
7 | F:\workspace\java\CHKV\Common\src\main\java\cn\mageek\common\model\proto>
8 |
9 | D:/proto/bin/protoc.exe -I=./ --java_out=../../../../../ ./WatchRespProto.proto
10 |
11 | */
12 | message WatchResp {
13 | map hashCircle = 1;
14 | }
15 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/res/Constants.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.res;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/5/9 0009:9:26
6 | */
7 | public class Constants {
8 | public static final String innerSplit = "\r\n";// 一个命令内部行之间间隔
9 | public static final String outerSplit = "\t\n";// 不同命令间隔
10 |
11 | public static final double pageSize = 5;// 一次转移几条数据
12 |
13 | public static final String offlineKey = "*#*OFFLINE*#*";
14 | public static final String offlineValue = "true";
15 | public static final String onlineValue = "false";
16 |
17 | public static final String IDSplitter = "__@__";
18 |
19 | public static final String IPSplitter = ":";
20 | }
21 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/util/ConsistHash.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.util;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 |
6 | import java.util.Iterator;
7 | import java.util.Map;
8 | import java.util.SortedMap;
9 | import java.util.TreeMap;
10 | import java.util.concurrent.ConcurrentSkipListMap;
11 |
12 | /**
13 | * 一致性hash 辅助类
14 | * @author Mageek Chiu
15 | * @date 2018/5/5 0005:20:17
16 | */
17 | public class ConsistHash {
18 | private static final Logger logger = LoggerFactory.getLogger(ConsistHash.class);
19 |
20 | /**
21 | * 将dataNode 加入到 hash 环中
22 | * @param IPPort 刚加入的dataNode节点
23 | * @param sortedServerMap dataNode hash 环
24 | */
25 | public static void circleAdd(ConcurrentSkipListMap sortedServerMap,String IPPort) {
26 | //key表示服务器的hash值,value表示服务器 ip:port
27 | int hash = getHash(IPPort);
28 | logger.debug("{} 入环, Hash {}",IPPort,hash);
29 | sortedServerMap.put(hash,IPPort);
30 | }
31 | /**
32 | * 将dataNode从hash 环中删除
33 | * @param IPPort 刚加入的dataNode节点
34 | * @param sortedServerMap dataNode hash 环
35 | */
36 | public static void cirlceDel(ConcurrentSkipListMap sortedServerMap,String IPPort){
37 | sortedServerMap.forEach((k,v)->{
38 | if (v.equals(IPPort)) {sortedServerMap.remove(k);}
39 | });
40 | }
41 |
42 | /**
43 | * 得到数据根据key应当路由到的server结点,也就是 >= hash(key) 的server
44 | * 如果是server节点,就要不能包含,防止找到本身
45 | * @param sortedServerMap server 的hash 环
46 | * @param key data的key 或者 server 的 ip:port
47 | * @param isServer 是server还是data
48 | * @return 服务器节点
49 | */
50 | public static String getServer(ConcurrentSkipListMap sortedServerMap,String key, boolean isServer) {
51 | if (sortedServerMap.isEmpty()) return key;// 环为空,返回自己就行了
52 | //得到该key的hash值
53 | int keyHash = getHash(key);
54 | //得到 >= 该Hash值的所有节点构成的子map
55 | SortedMap subMap = sortedServerMap.tailMap(keyHash,!isServer);// submap中所有节点的hash值 >= keyHash 是server就不包含,不是server是data就包含
56 | if(subMap.isEmpty()){
57 | //如果没有比该key的hash值大的,则从第一个node开始
58 | Integer i = sortedServerMap.firstKey();
59 | //返回对应的服务器
60 | return sortedServerMap.get(i);
61 | }else{
62 | //第一个Key就是顺时针过去离node最近的那个结点
63 | Integer i = subMap.firstKey();
64 | //返回对应的服务器
65 | return subMap.get(i);
66 | }
67 | }
68 |
69 |
70 |
71 | /**
72 | * 使用FNV1_32_HASH算法计算字符串的Hash值
73 | * @param str 服务器 ip:port 或者数据的 key
74 | * @return hash 值
75 | */
76 | public static int getHash(String str) {
77 | final int p = 16777619;
78 | int hash = (int) 2166136261L;
79 | for (int i = 0; i < str.length(); i++)
80 | hash = (hash ^ str.charAt(i)) * p;
81 | hash += hash << 13;
82 | hash ^= hash >> 7;
83 | hash += hash << 3;
84 | hash ^= hash >> 17;
85 | hash += hash << 5;
86 | // 如果算出来的值为负数则取其绝对值
87 | if (hash < 0)
88 | hash = Math.abs(hash);
89 | return hash;
90 | }
91 |
92 | }
93 |
94 | //public class ConsistentHashingWithVirtualNode {
95 | //
96 | // //待添加入Hash环的服务器列表
97 | // private static String[] servers = {"192.168.0.0:111", "192.168.0.1:111", "192.168.0.2:111",
98 | // "192.168.0.3:111", "192.168.0.4:111"};
99 | //
100 | // //真实结点列表,考虑到服务器上线、下线的场景,即添加、删除的场景会比较频繁,这里使用LinkedList会更好
101 | // private static List realNodes = new LinkedList();
102 | //
103 | // //虚拟节点,key表示虚拟节点的hash值,value表示虚拟节点的名称
104 | // private static SortedMap virtualNodes = new TreeMap();
105 | //
106 | // //虚拟节点的数目,这里写死,为了演示需要,一个真实结点对应5个虚拟节点
107 | // private static final int VIRTUAL_NODES = 5;
108 | //
109 | // static{
110 | // //先把原始的服务器添加到真实结点列表中
111 | // for(int i=0; i> 7;
134 | // hash += hash << 3;
135 | // hash ^= hash >> 17;
136 | // hash += hash << 5;
137 | //
138 | // // 如果算出来的值为负数则取其绝对值
139 | // if (hash < 0)
140 | // hash = Math.abs(hash);
141 | // return hash;
142 | // }
143 | //
144 | // //得到应当路由到的结点
145 | // private static String getServer(String key){
146 | // //得到该key的hash值
147 | // int hash = getHash(key);
148 | // // 得到大于该Hash值的所有Map
149 | // SortedMap subMap = virtualNodes.tailMap(hash);
150 | // String virtualNode;
151 | // if(subMap.isEmpty()){
152 | // //如果没有比该key的hash值大的,则从第一个node开始
153 | // Integer i = virtualNodes.firstKey();
154 | // //返回对应的服务器
155 | // virtualNode = virtualNodes.get(i);
156 | // }else{
157 | // //第一个Key就是顺时针过去离node最近的那个结点
158 | // Integer i = subMap.firstKey();
159 | // //返回对应的服务器
160 | // virtualNode = subMap.get(i);
161 | // }
162 | // //virtualNode虚拟节点名称要截取一下
163 | // if(StringUtils.isNotBlank(virtualNode)){
164 | // return virtualNode.substring(0, virtualNode.indexOf("&&"));
165 | // }
166 | // return null;
167 | // }
168 | //
169 | // public static void main(String[] args){
170 | // String[] keys = {"太阳", "月亮", "星星"};
171 | // for(int i=0; i bytesToDataRequests(ByteBuf in) throws Exception{
33 | String msg = in.toString(CharsetUtil.UTF_8);
34 | if (in.readableBytes() getDataRequests(String s) throws Exception {
42 |
43 | List dataRequestList = new LinkedList<>();
44 |
45 | String[] msgs = s.split(outerSplit);// 多个命令之间用 \t\n 分割 // 这个不属于redis,是我为了自己方便加的,redis的没有/t/n 这里也会解析出一个命令
46 | logger.debug("Decoder 获得 {} 条命令",msgs.length);
47 |
48 | for (String msg : msgs) {
49 | String[] strings = msg.split(innerSplit);// 单个命令内部用 \r\n 分割
50 |
51 | // 简单的校验
52 | int allLineNumber = strings.length;
53 | String ID = strings[allLineNumber-1];// Client发过来的才是ID,redis 协议没有这一段,所以可能是任何字段
54 | int ckvLineNumber = Integer.parseInt(strings[0].substring(1));
55 | // if (allLineNumber != 7 && allLineNumber != 5 && allLineNumber != 3) throw new Exception("all line number Exception");// 报文总行数
56 | // if (ckvLineNumber != 3 && ckvLineNumber != 2 && ckvLineNumber != 1) throw new Exception("command、key、value line number Exception");// command、key、value 的行数
57 |
58 | // command
59 | String command = strings[2].toUpperCase();// 命令全部转大写
60 | if (Integer.parseInt(strings[1].substring(1)) != command.length()) throw new Exception("command length Exception");
61 | if (command.equals("COMMAND")){// 没有 key
62 | dataRequestList.add(new DataRequest(command,"none","none",ID));
63 | continue;
64 | }
65 |
66 | // command key
67 | String key = strings[4];
68 | if (Integer.parseInt(strings[3].substring(1)) != key.length()) throw new Exception("key length Exception");
69 | String value = "none";// 没有 value
70 |
71 | // command key value
72 | if (allLineNumber >= 7){// 有 value
73 | value = strings[6];
74 | if (Integer.parseInt(strings[5].substring(1)) != value.length()) throw new Exception("value length Exception");
75 | }
76 |
77 | dataRequestList.add(new DataRequest(command,key,value,ID));
78 | }
79 | return dataRequestList;
80 | }
81 |
82 | /**
83 | * 将接收到的bit数据解析为消息对象DataResponse的列表,是redis协议的子集
84 | * @param in 输入buffer
85 | * @return DataRequest
86 | */
87 | public static List bytesToDataResponse(ByteBuf in) throws Exception{
88 |
89 | List responses = new LinkedList<>();
90 |
91 | String dataAll = in.toString(CharsetUtil.UTF_8);
92 | String[] datas = dataAll.split(outerSplit);
93 | // logger.debug("Decoder 获得 {} 条响应,{}",datas.length,dataAll);
94 |
95 | for (String data: datas){
96 | String[] lines = data.split(innerSplit);
97 | String ID = lines[lines.length-1];// Client请求的响应才是ID,redis 协议没有这一段,所以可能是任何字段
98 | String lineType = data.substring(0,1);
99 | logger.debug("Decoder 本条响应,lineType:{}",lineType);// keys 命令结果太长,导致DataHandler的channelRead触发了两次,这里自然解码出错,看长度正好在512 byte截断了,也就是分包了(与粘包相反),所以应该是可以配置的,结论是 RCVBUF_ALLOCATOR
100 | DataResponse response;
101 | String msg = "";
102 | List msgList = new LinkedList<>();
103 | switch (lineType){
104 | case SINGLE_RIGHT:
105 | case SINGLE_ERROR:
106 | case INT_NUM:
107 | msg = lines[0].substring(1);//+OK\r\n123123__@@__4r53243\r\n
108 | break;
109 | case NEXT_LEN:
110 | if( data.contains("-1")) msg="-1";// $-1\r\n123123__@@__4r53243\r\n
111 | else msg = lines[1];// $3\r\nGGG\r\n123123__@@__4r53243\r\n
112 | break;
113 | case LINE_NUM:
114 | // *3\r\n$2\r\nff\r\n$3\r\nddd\r\n$4\r\ntttt\r\n123123__@@__4r53243\r\n
115 | for (int i = 2;i 是因为解码 的时候把多个命令拆分了,分成多次回复,这样每次就只需要回复一个DataResponse就行,redis里面是有Bulk Reply的,也就是LINE_NUM
31 | * @param dataResponse 待编码对象
32 | * @return 返回buffer
33 | */
34 | public static ByteBuf dataResponseToBytes(DataResponse dataResponse){
35 | String response = "+OK"+ innerSplit;
36 | String lineType = dataResponse.getLineType(),msg = dataResponse.getMsg(),ID = dataResponse.getID();
37 | // 兼容redis协议
38 | switch (dataResponse.getLineType()){
39 | case SINGLE_RIGHT://+
40 | case SINGLE_ERROR://-
41 | case INT_NUM:// :
42 | response = lineType+msg+ innerSplit;// 以上三种格式一致
43 | break;
44 | case NEXT_LEN:// $
45 | if( msg.equals("-1")) response = lineType+"-1"+ innerSplit;// 未找到就直接-1,每一行结束都要有\r\n
46 | else response = lineType+msg.length()+ innerSplit +msg+ innerSplit;
47 | break;
48 | case LINE_NUM:// *
49 | // response = lineType+(msg.split(innerSplit).length/2)+innerSplit+msg;//msg里面每个部分都自带长度和innerSplit,不用再加了
50 | List msgList = dataResponse.getMsgList();
51 | response = lineType+msgList.size()+innerSplit;
52 | StringBuilder builder = new StringBuilder();
53 | msgList.forEach((v)->{
54 | builder.append("$").append(v.length()).append(innerSplit).append(v).append(innerSplit);
55 | });
56 | response += builder.toString();
57 | break;
58 | }
59 | if (ID.contains(IDSplitter)){// 是针对 Client 发来的 request 的 response,需要补上ID 和 命令间的分割,帮助客户端解决粘包问题
60 | response += (ID +innerSplit+outerSplit);
61 | }
62 | logger.debug("Encoded response:{}",response.replace("\r\n","
"));
63 | return Unpooled.copiedBuffer(response,CharsetUtil.UTF_8);
64 | }
65 |
66 | /**
67 | * 将消息对象dataRequest编码bit数据
68 | * @param dataRequests 待编码对象列表
69 | * @return 返回buffer
70 | */
71 | public static ByteBuf dataRequestToBytes(List dataRequests){
72 | StringBuilder requests = new StringBuilder();// 不保证线程安全,但是这里不需要
73 | for (DataRequest dataRequest : dataRequests) {
74 | String request = "";
75 | String key = dataRequest.getKey();
76 | String value = dataRequest.getValue();
77 | String command = dataRequest.getCommand();
78 | switch (command){
79 | // command key value
80 | case "SET":
81 | case "SETNX":
82 | case "EXPIRE":
83 | case "APPEND":
84 | case "INCRBY":
85 | case "DECRBY":
86 | request = "*3"+ innerSplit +"$"+command.length()+ innerSplit +command+ innerSplit +"$"+key.length()+ innerSplit +key+ innerSplit +"$"+value.length()+ innerSplit +value+ innerSplit ;
87 | break;
88 | // command key
89 | case "GET":
90 | case "INCR":
91 | case "DECR":
92 | case "KEYS":
93 | case "DEL":
94 | request = "*2"+ innerSplit +"$"+command.length()+ innerSplit +command+ innerSplit +"$"+key.length()+ innerSplit +key+ innerSplit ;
95 | break;
96 | // command
97 | case "COMMAND":
98 | request = "*1"+ innerSplit +"$"+command.length()+ innerSplit +command+ innerSplit ;
99 | break;
100 | }
101 | request += (dataRequest.getID()+innerSplit);// 补上ID,但是就不加字符长度了,不属于redis
102 | request += outerSplit;// 补上多条命令之间的分隔符
103 | requests.append(request);
104 | }
105 | String finalString = requests.toString();
106 | // finalString = finalString.substring(0,finalString.length()-outerSplit.length());// 去掉最后一个 outerSplit // 没必要
107 | // logger.debug("final dataRequests string {},length:{},request num:{}",finalString,finalString.length(),dataRequests.size());
108 | return Unpooled.copiedBuffer(finalString,CharsetUtil.UTF_8);
109 | }
110 |
111 | /**
112 | * 将消息对象HeartbeatResponse编码为bit数据
113 | * @param heartbeatResponse 待编码对象
114 | * @return 返回buffer
115 | */
116 | public static ByteBuf heartbeatResponseToBytes(HeartbeatResponse heartbeatResponse){
117 | String response = "";
118 | return Unpooled.copiedBuffer(response,CharsetUtil.UTF_8);
119 | }
120 |
121 | /**
122 | * 将消息对象HeartbeatRequest编码为bit数据
123 | * @param heartbeatRequest 待编码对象
124 | * @return 返回buffer
125 | */
126 | public static ByteBuf heartbeatRequestToBytes(HeartbeatRequest heartbeatRequest){
127 | String response = "";
128 | return Unpooled.copiedBuffer(response,CharsetUtil.UTF_8);
129 | }
130 | }
131 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/util/HAHelper.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.util;
2 |
3 | import static cn.mageek.common.res.Constants.IPSplitter;
4 |
5 | /**
6 | * @author Mageek Chiu
7 | * @date 2018/5/22 0022:15:35
8 | */
9 | public class HAHelper {
10 |
11 | private String[] strings;
12 |
13 | public static String getNodeString(String dataNodeIP,String dataNodePort,String clientIP,String clientPort){
14 | return dataNodeIP+IPSplitter+dataNodePort+IPSplitter+clientIP+IPSplitter+clientPort;
15 | }
16 |
17 | public HAHelper(String s) {
18 | strings = s.split(IPSplitter);
19 | }
20 |
21 | public String getDataNodeIP(){
22 | return strings[0];
23 | }
24 |
25 | public String getDataNodePort(){
26 | return strings[1];
27 | }
28 |
29 | public String getClientIP(){
30 | return strings[2];
31 | }
32 |
33 | public String getClientPort(){
34 | return strings[3];
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/Common/src/main/java/cn/mageek/common/util/PropertyLoader.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.util;
2 |
3 | import java.util.Properties;
4 |
5 | /**
6 | * @author Mageek Chiu
7 | * @date 2018/5/8 0008:13:04
8 | */
9 | public class PropertyLoader {
10 | /**
11 | * 先从环境变量读取属性,没有再从配置文件读取
12 | * @param pop 文件
13 | * @param key 属性key
14 | * @return 属性value
15 | */
16 | public static String load(Properties pop, String key) {
17 | String value = System.getProperty(key);
18 | value = (value == null ? pop.getProperty(key) : value);
19 | return value;
20 | }
21 |
22 | /**
23 | * 获得工作线程数量
24 | * @return
25 | */
26 | public static int loadWorkThread(Properties pop, String key){
27 | int defaultNum = Runtime.getRuntime().availableProcessors()-1;
28 | int loadNum = Integer.parseInt(load(pop,key));
29 | return loadNum > 0 ? loadNum : defaultNum;// 如果配置小于0 则用默认值处理器数-1,大于0 就使用配置值
30 | }
31 |
32 |
33 |
34 |
35 |
36 |
37 | }
38 |
--------------------------------------------------------------------------------
/Common/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | ### 设置root###
2 | log4j.rootLogger = debug,stdout,D,E
3 |
4 |
5 | ### 输出信息到控制抬 ###
6 | log4j.appender.stdout = org.apache.log4j.ConsoleAppender
7 | log4j.appender.stdout.Target = System.out
8 | log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
9 | log4j.appender.stdout.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
10 |
11 |
12 | ### 输出DEBUG 级别以上的日志到=debug.log ###
13 | # daily 表示一天一个文件
14 | log4j.appender.D = org.apache.log4j.DailyRollingFileAppender
15 | log4j.appender.D.File = ./Common/src/main/java/cn/mageek/common/log/debug.log
16 | log4j.appender.D.DatePattern=yyyy-MM-dd-HH'.log'
17 | log4j.appender.D.Append = true
18 | log4j.appender.D.Threshold = DEBUG
19 | log4j.appender.D.layout = org.apache.log4j.PatternLayout
20 | log4j.appender.D.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
21 |
22 |
23 | ### 输出ERROR 级别以上的日志到=error.log ###
24 | log4j.appender.E = org.apache.log4j.DailyRollingFileAppender
25 | log4j.appender.E.File = ./Common/src/main/java/cn/mageek/common/log/error.log
26 | log4j.appender.E.DatePattern=yyyy-MM-dd-HH'.log'
27 | log4j.appender.E.Append = true
28 | log4j.appender.E.Threshold = ERROR
29 | log4j.appender.E.layout = org.apache.log4j.PatternLayout
30 | log4j.appender.E.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
31 |
32 |
33 | ## 调整每个模块的日志级别##
34 | log4j.logger.cn.mageek.common=info
35 |
36 | log4j.logger.io.netty=warn
37 |
38 | log4j.logger.org.reflections=info
--------------------------------------------------------------------------------
/Common/src/test/java/cn/mageek/common/model/WatchProtoTest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.model;
2 |
3 | import org.junit.Test;
4 | import org.slf4j.Logger;
5 | import org.slf4j.LoggerFactory;
6 | import static org.junit.Assert.*;
7 |
8 | /**
9 | * @author Mageek Chiu
10 | * @date 2018/6/1 0001:13:14
11 | */
12 | public class WatchProtoTest {
13 | private static final Logger logger = LoggerFactory.getLogger(WatchProtoTest.class);
14 |
15 | @Test
16 | public void protoTest(){
17 | WatchReqProto.WatchReq.Builder builder = WatchReqProto.WatchReq.newBuilder();
18 | builder.setImmediately(false);
19 | WatchReqProto.WatchReq watchReq = builder.build();
20 | logger.info("req:{}",watchReq.getImmediately());
21 |
22 | WatchRespProto.WatchResp.Builder builder1 = WatchRespProto.WatchResp.newBuilder();
23 | builder1.putHashCircle(1,"asdasd");
24 | WatchRespProto.WatchResp watchResp = builder1.build();
25 | logger.info("resp:{}",watchResp.getHashCircleMap());
26 | }
27 |
28 | }
--------------------------------------------------------------------------------
/Common/src/test/java/cn/mageek/common/util/ConsistHashTest.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.common.util;
2 |
3 | import org.junit.Test;
4 |
5 | import static org.junit.Assert.*;
6 |
7 | /**
8 | * @author Mageek Chiu
9 | * @date 2018/5/8 0008:20:04
10 | */
11 | public class ConsistHashTest {
12 |
13 | @Test
14 | public void getServer() {
15 |
16 | }
17 | }
--------------------------------------------------------------------------------
/DataNode/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | CHKV
7 | cn.mageek
8 | 1.0-SNAPSHOT
9 | ../pom.xml
10 |
11 | 4.0.0
12 | DataNode
13 | jar
14 |
15 |
16 |
17 |
18 | cn.mageek
19 | Common
20 | 1.0-SNAPSHOT
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 | org.apache.maven.plugins
29 | maven-compiler-plugin
30 | 3.7.0
31 |
32 | 1.8
33 | 1.8
34 |
35 |
36 |
37 | maven-assembly-plugin
38 | 3.1.0
39 |
40 |
41 |
42 | cn.mageek.datanode.main.DataNode
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 | com.spotify
53 | dockerfile-maven-plugin
54 | 1.3.4
55 |
56 | ${docker.image.prefix}/${project.artifactId}
57 |
58 |
59 |
60 |
61 |
62 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandAPPEND.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.LineType;
7 | import cn.mageek.common.model.WebMsgObject;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 |
11 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
12 |
13 | /**
14 | * 单增 用CAS保证原子性
15 | * 这种当前操作依赖于之前值的命令都要这样保证
16 | * @author Mageek Chiu
17 | * @date 2018/5/6 0007:13:49
18 | */
19 | public class CommandAPPEND extends AbstractDataNodeCommand {
20 |
21 | // private static final Logger logger = LoggerFactory.getLogger(CommandAPPEND.class);
22 |
23 | @Override
24 | public DataResponse receive(DataRequest dataRequest) {
25 | String key = dataRequest.getKey();
26 | String value = dataRequest.getValue();
27 | if (DATA_POOL.putIfAbsent(key,value)==null){// 之前不存在
28 | return new DataResponse(LineType.SINGLE_RIGHT, value);
29 | }
30 |
31 | String oldValue;
32 | String newValue;
33 |
34 | do {// 之前存在
35 | oldValue = DATA_POOL.get(key);
36 | newValue = oldValue+value;
37 | } while (!DATA_POOL.replace(key, oldValue, newValue));
38 | // logger.debug(oldValue+"-"+newValue);
39 | return new DataResponse(LineType.SINGLE_RIGHT, newValue);
40 | }
41 |
42 | @Override
43 | public DataResponse send(WebMsgObject webMsgObject) {
44 | return null;
45 | }
46 |
47 | }
48 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandCOMMAND.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.WebMsgObject;
7 | import org.slf4j.Logger;
8 | import org.slf4j.LoggerFactory;
9 |
10 | import static cn.mageek.common.model.LineType.INT_NUM;
11 |
12 | /**
13 | * @author Mageek Chiu
14 | * @date 2018/5/6 0007:13:49
15 | */
16 | public class CommandCOMMAND extends AbstractDataNodeCommand {
17 |
18 | private static final Logger logger = LoggerFactory.getLogger(CommandCOMMAND.class);
19 |
20 | @Override
21 | public DataResponse receive(DataRequest dataRequest) {
22 | return new DataResponse(INT_NUM,"1");//
23 | }
24 |
25 | @Override
26 | public DataResponse send(WebMsgObject webMsgObject) {
27 | return null;
28 | }
29 |
30 | }
31 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandDECR.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.LineType;
7 | import cn.mageek.common.model.WebMsgObject;
8 |
9 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
10 |
11 | /**
12 | * 单增 用CAS保证原子性
13 | * 这种当前操作依赖于之前值的命令都要这样保证
14 | * @author Mageek Chiu
15 | * @date 2018/5/6 0007:13:49
16 | */
17 | public class CommandDECR extends AbstractDataNodeCommand {
18 |
19 | // private static final Logger logger = LoggerFactory.getLogger(CommandSET.class);
20 |
21 | @Override
22 | public DataResponse receive(DataRequest dataRequest) {
23 | String key = dataRequest.getKey();
24 | return CommandINCRBY.incrBy(key,"-1");
25 | }
26 |
27 | @Override
28 | public DataResponse send(WebMsgObject webMsgObject) {
29 | return null;
30 | }
31 |
32 | }
33 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandDECRBY.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.LineType;
7 | import cn.mageek.common.model.WebMsgObject;
8 |
9 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
10 |
11 | /**
12 | * 单增 用CAS保证原子性
13 | * 这种当前操作依赖于之前值的命令都要这样保证
14 | * @author Mageek Chiu
15 | * @date 2018/5/6 0007:13:49
16 | */
17 | public class CommandDECRBY extends AbstractDataNodeCommand {
18 |
19 | // private static final Logger logger = LoggerFactory.getLogger(CommandSET.class);
20 |
21 | @Override
22 | public DataResponse receive(DataRequest dataRequest) {
23 | String key = dataRequest.getKey();
24 | String val = "-"+dataRequest.getValue();
25 | return CommandINCRBY.incrBy(key,val);
26 | }
27 |
28 | @Override
29 | public DataResponse send(WebMsgObject webMsgObject) {
30 | return null;
31 | }
32 |
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandDEL.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.WebMsgObject;
7 | import org.slf4j.Logger;
8 | import org.slf4j.LoggerFactory;
9 | import static cn.mageek.common.model.LineType.INT_NUM;
10 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
11 |
12 |
13 | /**
14 | * @author Mageek Chiu
15 | * @date 2018/5/6 0007:13:49
16 | */
17 | public class CommandDEL extends AbstractDataNodeCommand {
18 |
19 | // private static final Logger logger = LoggerFactory.getLogger(CommandDEL.class);
20 |
21 | @Override
22 | public DataResponse receive(DataRequest dataRequest) {
23 | String oldValue = DATA_POOL.remove(dataRequest.getKey());//
24 | if (oldValue==null) return new DataResponse(INT_NUM,"0");// 不存在
25 | return new DataResponse(INT_NUM,"1");// 成功删除
26 | }
27 |
28 | @Override
29 | public DataResponse send(WebMsgObject webMsgObject) {
30 | return null;
31 | }
32 |
33 | }
34 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandEXPIRE.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.LineType;
7 | import cn.mageek.common.model.WebMsgObject;
8 |
9 | import static cn.mageek.datanode.main.DataNode.DATA_EXPIRE;
10 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
11 |
12 | /**
13 | * 具体策略类
14 | * @author Mageek Chiu
15 | * @date 2018/5/6 0007:13:49
16 | */
17 | public class CommandEXPIRE extends AbstractDataNodeCommand {
18 |
19 | // private static final Logger logger = LoggerFactory.getLogger(CommandSET.class);
20 |
21 | @Override
22 | public DataResponse receive(DataRequest dataRequest) {
23 |
24 | if (!DATA_POOL.containsKey(dataRequest.getKey())) return new DataResponse(LineType.INT_NUM,"0");//不存在返回0
25 |
26 | long seconds = Long.parseLong((dataRequest.getValue()))*1000;// 待存活秒数*1000 亦即 存活毫秒数
27 | seconds += (System.currentTimeMillis()); // 存活截止时间戳,单位毫秒
28 | DATA_EXPIRE.put(dataRequest.getKey(),seconds);//
29 | return new DataResponse(LineType.INT_NUM,"1");// 存在返回设置成功的数量
30 | }
31 |
32 | @Override
33 | public DataResponse send(WebMsgObject webMsgObject) {
34 | return null;
35 | }
36 |
37 | }
38 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandGET.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.*;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import static cn.mageek.common.model.LineType.NEXT_LEN;
9 | import static cn.mageek.datanode.main.DataNode.DATA_EXPIRE;
10 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
11 |
12 |
13 | /**
14 | * @author Mageek Chiu
15 | * @date 2018/5/6 0007:13:49
16 | */
17 | public class CommandGET extends AbstractDataNodeCommand {
18 |
19 | // private static final Logger logger = LoggerFactory.getLogger(CommandGET.class);
20 |
21 | @Override
22 | public DataResponse receive(DataRequest dataRequest) {
23 | String key = dataRequest.getKey();
24 |
25 | Long expireTime = DATA_EXPIRE.get(key);
26 | if (expireTime != null && ( expireTime< System.currentTimeMillis() )){// 已经过期
27 | DATA_POOL.remove(key);
28 | DATA_EXPIRE.remove(key);
29 | return new DataResponse(NEXT_LEN,"-1");
30 | }
31 |
32 | String answer = DATA_POOL.get(key);
33 | if(answer==null) return new DataResponse(NEXT_LEN,"-1");// 键不存在
34 | return new DataResponse(NEXT_LEN,answer);
35 | }
36 |
37 | @Override
38 | public DataResponse send(WebMsgObject webMsgObject) {
39 | return null;
40 | }
41 |
42 |
43 |
44 | }
45 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandINCR.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.LineType;
7 | import cn.mageek.common.model.WebMsgObject;
8 |
9 | import java.util.concurrent.atomic.AtomicLong;
10 |
11 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
12 |
13 | /**
14 | * 单增 用CAS保证原子性
15 | * 这种当前操作依赖于之前值的命令都要这样保证
16 | * @author Mageek Chiu
17 | * @date 2018/5/6 0007:13:49
18 | */
19 | public class CommandINCR extends AbstractDataNodeCommand {
20 |
21 | // private static final Logger logger = LoggerFactory.getLogger(CommandSET.class);
22 |
23 | @Override
24 | public DataResponse receive(DataRequest dataRequest) {
25 | String key = dataRequest.getKey();
26 | return CommandINCRBY.incrBy(key,"1");
27 | }
28 |
29 | @Override
30 | public DataResponse send(WebMsgObject webMsgObject) {
31 | return null;
32 | }
33 |
34 | }
35 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandINCRBY.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.LineType;
7 | import cn.mageek.common.model.WebMsgObject;
8 |
9 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
10 |
11 | /**
12 | * 单增 用CAS保证原子性
13 | * 这种当前操作依赖于之前值的命令都要这样保证
14 | * @author Mageek Chiu
15 | * @date 2018/5/6 0007:13:49
16 | */
17 | public class CommandINCRBY extends AbstractDataNodeCommand {
18 |
19 | // private static final Logger logger = LoggerFactory.getLogger(CommandSET.class);
20 |
21 | @Override
22 | public DataResponse receive(DataRequest dataRequest) {
23 | String key = dataRequest.getKey();
24 | String val = dataRequest.getValue();
25 | return CommandINCRBY.incrBy(key,val);
26 | }
27 |
28 | @Override
29 | public DataResponse send(WebMsgObject webMsgObject) {
30 | return null;
31 | }
32 |
33 | public static DataResponse incrBy(String key,String val){
34 | int value = Integer.parseInt(val);
35 | if (DATA_POOL.putIfAbsent(key,val)==null){// 之前不存在,置为value
36 | return new DataResponse(LineType.INT_NUM, val);
37 | }
38 |
39 | String oldValue;
40 | int newValue;
41 | do {// 之前存在
42 | oldValue = DATA_POOL.get(key);
43 | newValue = Integer.parseInt(oldValue) + value;
44 | } while (!DATA_POOL.replace(key, oldValue, String.valueOf(newValue)));
45 |
46 | return new DataResponse(LineType.INT_NUM, String.valueOf(newValue));
47 | }
48 |
49 | }
50 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandKEYS.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.WebMsgObject;
7 | import org.slf4j.Logger;
8 | import org.slf4j.LoggerFactory;
9 |
10 | import java.util.ArrayList;
11 | import java.util.LinkedList;
12 | import java.util.List;
13 | import java.util.stream.Collectors;
14 |
15 | import static cn.mageek.common.model.LineType.INT_NUM;
16 | import static cn.mageek.common.model.LineType.LINE_NUM;
17 | import static cn.mageek.common.res.Constants.innerSplit;
18 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
19 |
20 | /**
21 |
22 | * @author Mageek Chiu
23 | * @date 2018/5/6 0007:13:49
24 | */
25 | public class CommandKEYS extends AbstractDataNodeCommand {
26 |
27 | private static final Logger logger = LoggerFactory.getLogger(CommandKEYS.class);
28 |
29 | @Override
30 | public DataResponse receive(DataRequest dataRequest) {
31 |
32 | // StringBuilder builder = new StringBuilder();
33 | // DATA_POOL.forEach((k,v)->{
34 | // builder.append("$").append(k.length()).append(innerSplit).append(k).append(innerSplit);// 这样就和协议耦合了,应该定义一个list的结果,让eocoder自己去编码,才能和协议解耦
35 | // });
36 | // return new DataResponse(LINE_NUM,builder.toString());//
37 |
38 | List msgList;
39 |
40 | String key = dataRequest.getKey();
41 | if (key.equals("*")){
42 | msgList = new ArrayList<>(DATA_POOL.keySet());
43 | }else {
44 | // DATA_POOL.forEach((k,v)->{
45 | // if (k.startsWith(dataRequest.getKey()))
46 | // msgList.add(k);
47 | // });
48 | msgList = DATA_POOL.keySet().stream().filter((k)->k.startsWith(dataRequest.getKey())).collect(Collectors.toList());
49 | }
50 |
51 | return new DataResponse(LINE_NUM,msgList);//
52 | }
53 |
54 | @Override
55 | public DataResponse send(WebMsgObject webMsgObject) {
56 | return null;
57 | }
58 |
59 | }
60 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandSET.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.*;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
8 |
9 | /**
10 | * 具体策略类
11 | * @author Mageek Chiu
12 | * @date 2018/5/6 0007:13:49
13 | */
14 | public class CommandSET extends AbstractDataNodeCommand {
15 |
16 | // private static final Logger logger = LoggerFactory.getLogger(CommandSET.class);
17 |
18 | @Override
19 | public DataResponse receive(DataRequest dataRequest) {
20 | DATA_POOL.put(dataRequest.getKey(),dataRequest.getValue());
21 | return new DataResponse(LineType.SINGLE_RIGHT,"OK");
22 | }
23 |
24 | @Override
25 | public DataResponse send(WebMsgObject webMsgObject) {
26 | return null;
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/command/CommandSETNX.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.command;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataRequest;
5 | import cn.mageek.common.model.DataResponse;
6 | import cn.mageek.common.model.LineType;
7 | import cn.mageek.common.model.WebMsgObject;
8 |
9 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
10 |
11 | /**
12 | * 具体策略类
13 | * @author Mageek Chiu
14 | * @date 2018/5/6 0007:13:49
15 | */
16 | public class CommandSETNX extends AbstractDataNodeCommand {
17 |
18 | // private static final Logger logger = LoggerFactory.getLogger(CommandSET.class);
19 |
20 | @Override
21 | public DataResponse receive(DataRequest dataRequest) {
22 |
23 | String key = dataRequest.getKey();
24 | String value = dataRequest.getValue();
25 |
26 | String res = DATA_POOL.putIfAbsent(key, value);// 不存在则存储并返回null,否则返回已存在的value
27 | return new DataResponse(LineType.INT_NUM,res==null?"1":"0");
28 | }
29 |
30 | @Override
31 | public DataResponse send(WebMsgObject webMsgObject) {
32 | return null;
33 | }
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/handler/BusinessHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.handler;
2 |
3 | import cn.mageek.common.model.DataRequest;
4 | import cn.mageek.common.model.DataResponse;
5 | import cn.mageek.common.model.LineType;
6 | import cn.mageek.datanode.res.CommandFactory;
7 | import cn.mageek.common.command.AbstractDataNodeCommand;
8 | import io.netty.channel.ChannelHandlerContext;
9 | import io.netty.channel.ChannelInboundHandlerAdapter;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 |
13 | import java.util.HashSet;
14 | import java.util.Set;
15 |
16 | import static cn.mageek.common.model.HeartbeatType.TRANSFERRING;
17 | import static cn.mageek.datanode.main.DataNode.dataNodeStatus;
18 |
19 | /**
20 | * 处理server接受到来自client的消息对象的handler,业务逻辑的核心
21 | * @author Mageek Chiu
22 | * @date 2018/3/10 0010:16:22
23 | */
24 | public class BusinessHandler extends ChannelInboundHandlerAdapter {
25 | private static final Logger logger = LoggerFactory.getLogger(BusinessHandler.class);
26 |
27 | private static final Set UPDATE_COMMAND = new HashSet(){{
28 | add("SET");add("SETNX");add("EXPIRE");add("APPEND");add("INCRBY");add("INCR");
29 | add("DECRBY");add("DECR");add("DEL");
30 | //add("GET");add("KEYS");add("COMMAND");
31 | }};
32 |
33 | @Override
34 | public void channelRead(ChannelHandlerContext ctx, Object obj) throws Exception {
35 | try {
36 | DataRequest dataRequest = (DataRequest)obj;//转换消息对象
37 | String commandString = dataRequest.getCommand();
38 | // 数据迁移中,禁止修改,可以查看
39 | if (dataNodeStatus.equals(TRANSFERRING) && UPDATE_COMMAND.contains(commandString)){
40 | DataResponse dataResponse = new DataResponse(
41 | LineType.SINGLE_ERROR,"DATA TRANSFERRING",dataRequest.getID());
42 | ctx.writeAndFlush(dataResponse);//从当前位置往上找outBound
43 | return;
44 | }
45 | AbstractDataNodeCommand command = CommandFactory.getCommand(commandString);
46 | if(command==null){
47 | logger.error("error command: {}",dataRequest);
48 | return;
49 | }
50 | // logger.debug("command:{}",command.getClass().getName());
51 | logger.debug("dataRequest:{}",dataRequest);
52 | DataResponse dataResponse = command.receive(dataRequest);// 处理请求 获得响应
53 | dataResponse.setID(dataRequest.getID());//设置响应ID
54 | logger.debug("dataResponse:{}",dataResponse);
55 | ctx.writeAndFlush(dataResponse);//从当前位置往上找outBound
56 |
57 | }catch (Exception e){
58 | logger.error("parse data :{} , from: {} , error: ", obj,ctx.channel().remoteAddress(),e);
59 | }
60 |
61 | }
62 |
63 | }
64 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/handler/ClientHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.handler;
2 |
3 | import io.netty.channel.ChannelHandlerContext;
4 | import io.netty.channel.ChannelInboundHandlerAdapter;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import static cn.mageek.datanode.main.DataNode.clientMap;
9 |
10 |
11 | /**
12 | * 管理客户端的在线状态的handler
13 | * @author Mageek Chiu
14 | * @date 2018/3/5 0005:19:02
15 | */
16 | //@ChannelHandler.Sharable//必须是线程安全的
17 | public class ClientHandler extends ChannelInboundHandlerAdapter {
18 |
19 | private static final Logger logger = LoggerFactory.getLogger(ClientHandler.class);
20 |
21 | @Override
22 | public void channelActive(ChannelHandlerContext ctx) throws Exception {
23 | String connection = ctx.channel().remoteAddress().toString();
24 | clientMap.put(connection,ctx.channel());
25 | logger.info("new connection arrived: {} clients living {}",connection, clientMap.size());
26 | }
27 |
28 | @Override
29 | public void channelInactive(ChannelHandlerContext ctx) throws Exception {
30 | String connection = ctx.channel().remoteAddress().toString();
31 | clientMap.remove(connection);
32 | logger.info("connection closed: {},uuid:{}, clients living {}",connection, clientMap.size());
33 | }
34 |
35 | @Override
36 | public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
37 | ctx.fireChannelRead(msg);//传输到下一个inBound
38 | }
39 |
40 | @Override
41 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
42 | logger.error("receiveMsg from: {},error: ",ctx.channel().remoteAddress(),cause);//ReadTimeoutException 会出现在这里,亦即事件会传递到handler链中最后一个事件处理中
43 | ctx.close();//这时一般就会自动关闭连接了。手动关闭的目的是避免偶尔情况下会处于未知状态
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/handler/DataTransferHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.handler;
2 |
3 | import cn.mageek.common.model.DataRequest;
4 | import cn.mageek.common.model.DataResponse;
5 | import cn.mageek.common.model.HeartbeatResponse;
6 | import cn.mageek.common.model.LineType;
7 | import cn.mageek.common.util.Decoder;
8 | import cn.mageek.common.util.Encoder;
9 | import io.netty.buffer.ByteBuf;
10 | import io.netty.channel.*;
11 | import io.netty.util.CharsetUtil;
12 | import org.slf4j.Logger;
13 | import org.slf4j.LoggerFactory;
14 |
15 | import java.util.ArrayList;
16 | import java.util.LinkedList;
17 | import java.util.List;
18 | import java.util.Map;
19 | import java.util.concurrent.atomic.AtomicInteger;
20 |
21 | import static cn.mageek.common.res.Constants.offlineKey;
22 | import static cn.mageek.common.res.Constants.offlineValue;
23 | import static cn.mageek.common.res.Constants.pageSize;
24 | import static cn.mageek.common.util.ConsistHash.getHash;
25 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
26 |
27 |
28 | /**
29 | * 数据迁移
30 | * @author Mageek Chiu
31 | * @date 2018/5/7 0007:12:41
32 | */
33 | public class DataTransferHandler extends ChannelInboundHandlerAdapter {
34 | private static final Logger logger = LoggerFactory.getLogger(DataTransferHandler.class);
35 | private static final String SET = "SET";
36 | private AtomicInteger ok;
37 | private boolean isAll;
38 | private boolean isSync;
39 | private String dataNodeIPPort;
40 | private List transList;// 响应等待队列
41 |
42 |
43 |
44 | public DataTransferHandler(String dataNodeIPPort,boolean isAll,boolean isSync) {
45 | this.isAll = isAll;
46 | this.dataNodeIPPort = dataNodeIPPort;
47 | this.isSync = isSync;
48 | }
49 |
50 | @Override
51 | public void channelActive(ChannelHandlerContext ctx) throws Exception {
52 | logger.info("opened dataTransfer connection to: {}",ctx.channel().remoteAddress());
53 | dataTransfer(ctx.channel());// 连接成功就开始转移数据
54 | }
55 |
56 | @Override
57 | public void channelInactive(ChannelHandlerContext ctx) throws Exception {
58 | logger.info("closed dataTransfer connection: {}",ctx.channel().remoteAddress());
59 | if (ok.get()>0){
60 | logger.info("dataTransfer connection closed, some failed,{}",transList);
61 | }
62 | }
63 |
64 | @Override
65 | public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
66 | ByteBuf in = (ByteBuf) msg;
67 | List responses = Decoder.bytesToDataResponse(in);
68 | responses.forEach((response)->{
69 | logger.debug("DataNode dataTransfer received : {}",response);
70 | if (LineType.SINGLE_RIGHT.equals(response.getLineType())){
71 | transList.remove(response.getID());// 转移成功,删除等待列表
72 |
73 | if(ok.decrementAndGet() == 0){//都收完了
74 | if(transList.isEmpty()) logger.info("dataTransfer completed, all succeeded");
75 | else logger.info("dataTransfer completed, some failed,{}",transList);
76 | ctx.channel().close();//断开连接就好,dataTransfer自然结束
77 | if (isAll && !isSync){// 全部转移并且不是主从复制则 可以下线了
78 | DATA_POOL = null;
79 | }
80 | }
81 | // 如果 没收完 或者 粘包导致数量没解析够 或者 某条数据确实没有转移成功,该channel就会超时,但是请求端不会报超时只会触发inactive,接收端才会报
82 | }
83 | });
84 | in.release();
85 | }
86 |
87 | @Override
88 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
89 | logger.error("connection to: {},error: ",ctx.channel().remoteAddress(),cause);
90 | ctx.close();
91 | }
92 |
93 | private void dataTransfer(Channel channel) throws InterruptedException {
94 |
95 | int allNum = DATA_POOL.size();
96 | List requests = new ArrayList<>(allNum);
97 | transList = new ArrayList<>(allNum);
98 |
99 | if (isAll){// 转移全部数据给下一个节点
100 | DATA_POOL.forEach((k,v)->{
101 | DataRequest r = new DataRequest(SET,k,v);
102 | requests.add(r);
103 | transList.add(r.getID());
104 | });
105 | }else {// 转移部分数据给上一个节点
106 | int serverHash = getHash(dataNodeIPPort);
107 | DATA_POOL.forEach((k,v)->{
108 | int keyHash = getHash(k);
109 | if (keyHash <= serverHash){// 需要转移
110 | DataRequest r = new DataRequest(SET,k,v);
111 | requests.add(r);
112 | transList.add(r.getID());
113 | }
114 | });
115 | }
116 |
117 | int listSize = requests.size();
118 | int transTime = (int) Math.ceil(listSize/pageSize);// 转移次数
119 | // ok = new AtomicInteger(transTime) ;
120 | ok = new AtomicInteger(listSize) ;
121 | logger.info("all data:{}, transfer data :{},pageSize:{},transfer time: {}",allNum,listSize,pageSize,transTime);
122 |
123 | for (int i = 0 ; i < transTime;i++){
124 | List requests1 = new ArrayList<>((int) pageSize);
125 | int index;// 转移数据的索引
126 | for (int j = 0; j < pageSize ; j++){
127 | index = (int) (i*pageSize+j);
128 | if (index {
140 | logger.debug("sent buf length:{}",num);
141 | // if(ok.decrementAndGet() == 0){//都发完了
142 | // Thread.sleep(8000);// 再等待一下,确保响应也都收完了// 实际上这里是不能确定的,只能说8000ms内没回复就算超时失败了。// 这样是不对的,因为是同一个线程,sleep会导致还有些response收不到
143 | //
144 | // if(transList.isEmpty()) logger.info("dataTransfer completed, all succeeded");
145 | // else logger.info("dataTransfer completed, some failed,{}",transList);
146 | //
147 | // channel.close();//断开连接就好,dataTransfer自然结束
148 | //
149 | // if (isAll){
150 | // DATA_POOL = null;// 可以下线了,整个DataNode下线
151 | // }
152 | // }
153 | });
154 | }
155 |
156 |
157 | }
158 |
159 | }
160 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/handler/HeartBeatHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.handler;
2 |
3 | import cn.mageek.common.model.HeartbeatResponse;
4 | import cn.mageek.common.model.HeartbeatType;
5 | import cn.mageek.datanode.job.DataTransfer;
6 | import cn.mageek.datanode.res.JobFactory;
7 | import io.netty.channel.ChannelHandlerContext;
8 | import io.netty.channel.ChannelInboundHandlerAdapter;
9 | import org.slf4j.Logger;
10 | import org.slf4j.LoggerFactory;
11 | import java.util.Map;
12 |
13 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
14 | import static cn.mageek.datanode.main.DataNode.dataNodeStatus;
15 |
16 | /**
17 | * @author Mageek Chiu
18 | * @date 2018/5/7 0007:13:52
19 | */
20 | public class HeartBeatHandler extends ChannelInboundHandlerAdapter {
21 | private static final Logger logger = LoggerFactory.getLogger(HeartBeatHandler.class);
22 |
23 | @Override
24 | public void channelActive(ChannelHandlerContext ctx) throws Exception {
25 | logger.info("opened connection to: {}",ctx.channel().remoteAddress());
26 | }
27 |
28 | @Override
29 | public void channelInactive(ChannelHandlerContext ctx) throws Exception {
30 | logger.info("closed connection: {}",ctx.channel().remoteAddress());
31 | }
32 |
33 | @Override
34 | public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
35 | HeartbeatResponse response = (HeartbeatResponse) msg;// 因为这个in 上面的 in 是decoder,所以直接可以获得对象
36 | logger.debug("DataNode received: {}",response);
37 | handleResponse(response);
38 | }
39 |
40 | @Override
41 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
42 | logger.error("connection to: {},error: ",ctx.channel().remoteAddress(),cause);
43 | ctx.close();
44 | }
45 |
46 | private void handleResponse(HeartbeatResponse response){
47 | DataTransfer dataTransfer = null;
48 | String IPPort = response.getIPPort();
49 | if (response.isOk()){// 继续运行
50 | if (IPPort!=null){
51 | logger.info("DataNode 需要转移部分数据给上一个节点");
52 | dataTransfer = (DataTransfer) JobFactory.getJob("DataTransfer");
53 | dataTransfer.connect(IPPort,false);
54 | }else{
55 | logger.debug("DataNode 不需要转移数据");
56 | }
57 | }else{// 不再运行
58 | if (IPPort!=null){
59 | logger.info("DataNode 数据全部迁移给下一个节点,{}",IPPort);
60 | dataTransfer = (DataTransfer) JobFactory.getJob("DataTransfer");
61 | dataTransfer.connect(IPPort,true);
62 | }else{
63 | logger.info("DataNode 最后一台下线,不需要转移数据");
64 | DATA_POOL = null;
65 | }
66 | }
67 |
68 | if(dataTransfer != null){
69 | dataNodeStatus = HeartbeatType.TRANSFERRING;
70 | Thread transfer = new Thread(dataTransfer,"dataTransfer");
71 | transfer.start();// 新起一个线程,但是这样可能不稳定,待改进
72 | }
73 | }
74 |
75 | }
76 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/handler/OtherHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.CHKV.handler;
2 |
3 | import io.netty.channel.ChannelInboundHandlerAdapter;
4 |
5 | /**
6 | * 占位
7 | * @author Mageek Chiu
8 | * @date 2018/3/10 0010:16:22
9 | */
10 | public class OtherHandler extends ChannelInboundHandlerAdapter {
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/handler/PushMsgHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.handler;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import cn.mageek.common.model.DataResponse;
5 | import cn.mageek.common.model.WebMsgObject;
6 | import io.netty.channel.ChannelHandlerContext;
7 | import io.netty.channel.ChannelOutboundHandlerAdapter;
8 | import io.netty.channel.ChannelPromise;
9 | import org.slf4j.Logger;
10 | import org.slf4j.LoggerFactory;
11 |
12 | /**
13 | * 处理server推送消息的handler,负责根据netMsg得到待发送消息并传递给下一个handler
14 | * @author Mageek Chiu
15 | * @date 2018/3/6 0006:19:59
16 | */
17 | public class PushMsgHandler extends ChannelOutboundHandlerAdapter {
18 |
19 | private static final Logger logger = LoggerFactory.getLogger(PushMsgHandler.class);
20 |
21 |
22 | @Override
23 | public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
24 | // 这种判断的方式比较浪费性能,是否有更优雅的解决方式???
25 | // 交换 PushMsgHandler 和 BusinessHandler 顺序即可,但是BusinessHandler必须处于最后??使得回复的报文不需要经过PushMsgHandler
26 | // 可以再加一个InboundHandler
27 | if(msg instanceof WebMsgObject ){//是主动推送,需要编码
28 | WebMsgObject webMsgObject = (WebMsgObject)msg;;//根据消息字符串解析成消息对象
29 | DataResponse dataResponse = ((AbstractDataNodeCommand)Class.forName("cn.mageek.common.command.AbstractDataNodeCommand"+webMsgObject.getCommand()).newInstance()).send(webMsgObject);
30 | logger.debug("pushMsg: {} to {}",dataResponse,ctx.channel().remoteAddress());
31 | // super.write(ctx,rcvMsgObject,promise);
32 | ctx.writeAndFlush(dataResponse);
33 | }else{
34 | logger.error("error pushMsg: {} to {}",msg,ctx.channel().remoteAddress());
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/handler/RcvMsgHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.handler;
2 |
3 | import cn.mageek.common.model.DataRequest;
4 | import cn.mageek.common.util.Decoder;
5 | import io.netty.buffer.ByteBuf;
6 | import io.netty.buffer.ByteBufUtil;
7 | import io.netty.channel.ChannelHandlerContext;
8 | import io.netty.channel.ChannelInboundHandlerAdapter;
9 | import io.netty.util.CharsetUtil;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 |
13 | import java.util.List;
14 |
15 | /**
16 | * server收到的buffer转换为消息对象
17 | * @author Mageek Chiu
18 | * @date 2018/5/5 0005:14:32
19 | */
20 | public class RcvMsgHandler extends ChannelInboundHandlerAdapter {
21 |
22 | private static final Logger logger = LoggerFactory.getLogger(RcvMsgHandler.class);
23 |
24 |
25 | @Override
26 | public void channelRead(ChannelHandlerContext ctx, Object msg) {
27 |
28 | ByteBuf buf = (ByteBuf) msg;
29 | try {
30 | List dataRequestList = Decoder.bytesToDataRequests(buf);
31 | dataRequestList.forEach(ctx::fireChannelRead);// 多个命令多次次传输给下一个handler
32 | }catch (Exception e){
33 | logger.error("parse data :{} , from: {} , error: ", ByteBufUtil.hexDump(buf),ctx.channel().remoteAddress(),e);
34 | }finally {
35 | buf.release();
36 | }
37 | }
38 |
39 | @Override
40 | public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
41 |
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/handler/SendMsgHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.handler;
2 |
3 | import cn.mageek.common.model.DataResponse;
4 | import cn.mageek.common.util.Encoder;
5 | import io.netty.buffer.ByteBuf;
6 | import io.netty.channel.ChannelHandlerContext;
7 | import io.netty.channel.ChannelOutboundHandlerAdapter;
8 | import io.netty.channel.ChannelPromise;
9 | import org.slf4j.Logger;
10 | import org.slf4j.LoggerFactory;
11 |
12 | /**
13 | * 处理server发出给client的消息的handler,负责把消息对象转换为buffer并发送给客户端
14 | * @author Mageek Chiu
15 | * @date 2018/3/6 0006:19:59
16 | */
17 | public class SendMsgHandler extends ChannelOutboundHandlerAdapter {
18 |
19 | private static final Logger logger = LoggerFactory.getLogger(SendMsgHandler.class);
20 |
21 | @Override
22 | public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
23 | DataResponse dataResponse = (DataResponse)msg;
24 | ByteBuf buf = Encoder.dataResponseToBytes(dataResponse);//把消息对象dataResponse转换为buffer
25 | ctx.writeAndFlush(buf);
26 | // logger.debug("sendMsg: {} to {}",dataResponse,ctx.channel().remoteAddress());
27 | }
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/job/DataRunnable.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.job;
2 |
3 |
4 | /**
5 | * @author Mageek Chiu
6 | * @date 2018/5/9 0009:10:24
7 | */
8 | public class DataRunnable implements Runnable {
9 |
10 |
11 | @Override
12 | public void run() {
13 |
14 | }
15 |
16 | /**
17 | * 可能需要提前创建连接
18 | */
19 | public void connect(){
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/job/DataTransfer.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.job;
2 |
3 | import cn.mageek.datanode.handler.DataTransferHandler;
4 | import io.netty.bootstrap.Bootstrap;
5 | import io.netty.channel.*;
6 | import io.netty.channel.nio.NioEventLoopGroup;
7 | import io.netty.channel.socket.SocketChannel;
8 | import io.netty.channel.socket.nio.NioSocketChannel;
9 | import io.netty.util.concurrent.DefaultEventExecutorGroup;
10 | import io.netty.util.concurrent.EventExecutorGroup;
11 | import org.slf4j.Logger;
12 | import org.slf4j.LoggerFactory;
13 | import java.net.InetSocketAddress;
14 |
15 | /**
16 | * 向dataNode转移数据
17 | * @author Mageek Chiu
18 | * @date 2018/5/7 0007:10:44
19 | */
20 | public class DataTransfer extends DataRunnable{
21 | //public class DataTransfer implements Runnable{
22 | private static final Logger logger = LoggerFactory.getLogger(DataTransfer.class);
23 |
24 | private boolean isAll;
25 | private boolean isSync;
26 | private String dataNodeIP;
27 | private String dataNodePort;
28 | private String dataNodeIPPort;
29 |
30 | @Override
31 | public void run(){
32 | // logger.debug("DataTransfer instance {},dataPool {}",this,DATA_POOL);
33 | // 其他节点上线能正确传输,然后其他节点再上线也能正确传输
34 | // 其他节点上线能正确传输,然后自己下线就不能正确传输了,DataTransferHandler 里面 DATA_POOL为 null,但是这里每次都不是null,和handler周期有关?
35 |
36 | EventLoopGroup group = new NioEventLoopGroup();
37 | Bootstrap b = new Bootstrap();
38 | EventExecutorGroup businessGroup = new DefaultEventExecutorGroup(1);//处理耗时业务逻辑,不占用IO线程
39 | b.group(group)
40 | .channel(NioSocketChannel.class)
41 | .remoteAddress(new InetSocketAddress(dataNodeIP, Integer.parseInt(dataNodePort)))
42 | .handler(new ChannelInitializer() {
43 | @Override
44 | public void initChannel(SocketChannel ch) throws Exception {
45 | ChannelPipeline p = ch.pipeline();
46 | // p.addLast(new ObjectDecoder(2048, ClassResolvers.cacheDisabled(this.getClass().getClassLoader())));// in 进制缓存类加载器
47 | // p.addLast(new ObjectEncoder());// out
48 | // p.addLast(businessGroup,"DataTransferHandler",new DataTransferHandler(dataNodeIPPort,DATA_POOL,isAll));// in
49 | p.addLast(businessGroup,"DataTransferHandler",new DataTransferHandler(dataNodeIPPort,isAll,isSync));// in
50 |
51 | }
52 | });
53 | try {
54 | ChannelFuture f = b.connect().sync();// 发起连接,阻塞等待
55 | logger.debug("DataTransfer connection established");
56 | f.channel().closeFuture().sync();// 这是一段阻塞的代码,除非链路断了,否则是不会停止阻塞的,我们可以在handler中手动关闭,达到关闭客户端的效果
57 | group.shutdownGracefully().sync();
58 | logger.debug("DataTransfer connection closed");
59 | } catch (InterruptedException e) {
60 | logger.error("DataTransfer connection InterruptedException",e);
61 | }
62 | }
63 |
64 | // @Override
65 | // public void connect(Map dataPool){
66 | // this.DATA_POOL = dataPool;
67 | // }
68 |
69 | /**
70 | * 转移数据
71 | * @param nextIPPort 转移到目标节点
72 | * @param isAll 是否全部转移
73 | */
74 | public void connect(String nextIPPort,boolean isAll,boolean isSync){
75 | dataNodeIPPort = nextIPPort;
76 | String[] strings = dataNodeIPPort.split(":");
77 | dataNodeIP = strings[0];
78 | dataNodePort = strings[1];
79 | this.isAll = isAll;
80 | this.isSync = isSync;
81 | }
82 |
83 | public void connect(String nextIPPort,boolean isAll){
84 | connect(nextIPPort,isAll,false);
85 | }
86 |
87 | }
88 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/job/ExpireChecking.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.job;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 | import java.util.Iterator;
6 | import java.util.Map;
7 | import static cn.mageek.datanode.main.DataNode.DATA_EXPIRE;
8 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
9 |
10 | /**
11 | * 检查过期键
12 | * @author Mageek Chiu
13 | * @date 2018/5/19 0007:16:36
14 | */
15 | public class ExpireChecking extends DataRunnable{
16 |
17 | private static final Logger logger = LoggerFactory.getLogger(ExpireChecking.class);
18 |
19 | @Override
20 | public void run(){
21 |
22 | Long noTime = System.currentTimeMillis();
23 | int i = 0;
24 |
25 | Iterator> it = DATA_EXPIRE.entrySet().iterator();
26 | while (it.hasNext()){
27 | Map.Entry e = it.next();
28 | String k = e.getKey();
29 | Long v = e.getValue();
30 | if (v= 如 java -Ddatanode.client.ip=192.168.0.136 -Ddatanode.client.port=10099 DataNode
38 | //// 来传入JVM,传入的参数作为system的property。因此在程序中可以通过下面的语句获取参数值:
39 | //// System.getProperty()
40 | // }
41 |
42 | // 实际连接
43 | @Override
44 | public void connect(){
45 | if (nameNodeChannel != null && nameNodeChannel.isActive()) return;// 防止多次重连
46 | // 这里必须new 否则可能就会失败多次被回收了
47 | group = new NioEventLoopGroup();
48 | Bootstrap b = new Bootstrap();
49 |
50 | b.group(group)
51 | .channel(NioSocketChannel.class)
52 | // .option(ChannelOption.CONNECT_TIMEOUT_MILLIS,5000)// 设置超时
53 | .remoteAddress(new InetSocketAddress(nameNodeIP, Integer.parseInt(nameNodePort)))// 配置namenode ip port
54 | .handler(new ChannelInitializer() {
55 | @Override
56 | public void initChannel(SocketChannel ch) {
57 | ChannelPipeline p = ch.pipeline();
58 | p.addLast(new ObjectDecoder(2048, ClassResolvers.cacheDisabled(this.getClass().getClassLoader())));// in 进制缓存类加载器
59 | p.addLast(new ObjectEncoder());// out
60 | p.addLast("HeartBeatHandler",new HeartBeatHandler());// in
61 | }
62 | });
63 |
64 | ChannelFuture f= b.connect();
65 | f.addListener((ChannelFutureListener) channelFuture -> {
66 | if (!channelFuture.isSuccess()) {
67 | logger.warn("connection to NameNode failed");
68 | group.shutdownGracefully();
69 | }else {
70 | nameNodeChannel = channelFuture.channel();
71 | logger.info("Heartbeat connection established");
72 | run1(ONLINE);// 成功后就发起上线请求
73 | dataNodeStatus = ONLINE;
74 | }
75 | });// 连接监听器
76 | ChannelFuture future = f.channel().closeFuture();// 采用异步加回调函数的做法,防止阻塞
77 | future.addListener((ChannelFutureListener) channelFuture -> {// 关闭成功,主动或者被动
78 | group.shutdownGracefully().sync();
79 | nameNodeChannel = null;
80 | logger.debug("Heartbeat connection closed");
81 | });
82 | }
83 |
84 | public void disconnect(){
85 | nameNodeChannel.close();
86 | }
87 |
88 | public boolean isConnected(){
89 | return nameNodeChannel!=null&&nameNodeChannel.isActive();
90 | }
91 |
92 | @Override
93 | public void run(){
94 | run1(RUNNING);// 运行中发送心跳
95 | dataNodeStatus = RUNNING;
96 | }
97 |
98 | /**
99 | * 发送不同状态的心跳
100 | * @param status 状态
101 | */
102 | public void run1(String status){
103 | if (!useHA || isMaster) {// 不使用HA 或者使用HA但是是master才发送心跳
104 | long memoryAvailable = Runtime.getRuntime().freeMemory();
105 | HeartbeatRequest request = new HeartbeatRequest(clientIP+":"+clientPort,status,memoryAvailable);
106 | if (nameNodeChannel == null || !nameNodeChannel.isActive()){
107 | logger.error("Connection to NameNode lost, retrying......");
108 | connect();// 断线重连
109 | }else{
110 | nameNodeChannel.writeAndFlush(request);
111 | logger.debug("DataNode sent: " + request);
112 | }
113 | }
114 | }
115 |
116 | }
117 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/job/MSSync.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.job;
2 |
3 | import cn.mageek.datanode.res.JobFactory;
4 | import cn.mageek.datanode.service.CronJobManager;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import java.util.concurrent.ExecutorService;
9 | import java.util.concurrent.Executors;
10 |
11 | import static cn.mageek.datanode.service.CronJobManager.isMaster;
12 | import static cn.mageek.datanode.service.CronJobManager.useHA;
13 |
14 | /**
15 | * 主从同步,用于实现DataNode的主从复制,高可用
16 | * @author Mageek Chiu
17 | * @date 2018/5/25 0025:10:40
18 | */
19 | public class MSSync extends DataRunnable{
20 | private static final Logger logger = LoggerFactory.getLogger(MSSync.class);
21 |
22 |
23 | private String IPPort;
24 | // private ExecutorService service = Executors.newFixedThreadPool(1);
25 |
26 | @Override
27 | public void run() {
28 | if (useHA && isMaster) {// 使用HA且是master才同步到slave节点,暂时采用全量复制
29 | DataTransfer dataTransfer = (DataTransfer) JobFactory.getJob("DataTransfer");
30 | dataTransfer.connect(IPPort, true, true);
31 | try {
32 | dataTransfer.run();// 直接在本线程运行
33 | }catch (Exception e){
34 | logger.error("sync error:{}",e.getMessage());
35 | }
36 |
37 | // service.execute(dataTransfer);// 用线程池运行
38 | }
39 | }
40 |
41 | public void connect(String IPPort) {
42 | this.IPPort = IPPort;
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/main/DataNode.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.main;
2 |
3 | import cn.mageek.common.ha.HAThirdParty;
4 | import cn.mageek.common.ha.ZKThirdParty;
5 | import cn.mageek.common.model.HeartbeatType;
6 | import cn.mageek.common.util.HAHelper;
7 | import cn.mageek.datanode.job.Heartbeat;
8 | import cn.mageek.datanode.res.CommandFactory;
9 | import cn.mageek.datanode.res.JobFactory;
10 | import cn.mageek.datanode.service.DataManager;
11 | import cn.mageek.datanode.service.CronJobManager;
12 | import io.netty.channel.Channel;
13 | import org.slf4j.Logger;
14 | import org.slf4j.LoggerFactory;
15 | import java.io.*;
16 | import java.lang.management.ManagementFactory;
17 | import java.net.ServerSocket;
18 | import java.net.Socket;
19 | import java.util.Map;
20 | import java.util.Properties;
21 | import java.util.concurrent.ConcurrentHashMap;
22 | import java.util.concurrent.CountDownLatch;
23 | import java.util.function.Consumer;
24 |
25 | import static cn.mageek.common.model.HeartbeatType.OFFLINE;
26 | import static cn.mageek.common.util.PropertyLoader.load;
27 |
28 | /**
29 | * 管理本应用的所有服务
30 | * redis-cli -h 127.0.0.1 -p 10100
31 | * @author Mageek Chiu
32 | * @date 2018/3/5 0005:19:26
33 | */
34 |
35 | public class DataNode {
36 |
37 | private static final Logger logger = LoggerFactory.getLogger(DataNode.class);
38 | private static final String pid = ManagementFactory.getRuntimeMXBean().getName();
39 |
40 | private static int offlinePort = 6666;// 默认值,可调
41 | private static String offlineCmd = "k";
42 |
43 | public static String nameNodeIP ;
44 | public static String nameNodePort ;
45 | public static String clientPort;
46 | public static String clientIP;
47 |
48 | // HA 信息
49 | private static boolean useNameNodeHA;
50 | private static String connectAddr ;
51 | private static int sessionTimeout;
52 | private static int connectionTimeout;
53 | private static String masterNodePath ;
54 | private static int baseSleepTimeMs ;
55 | private static int maxRetries ;
56 |
57 | //本节点的数据存储,ConcurrentHashMap 访问效率高于 ConcurrentSkipListMap,但是转移数据时就需要遍历而不能直接排序了,考虑到转移数据情况并不多,访问次数远大于转移次数,所以就不用ConcurrentSkipListMap
58 | public static volatile Map DATA_POOL = new ConcurrentHashMap<>(1024) ;//被置为null 则意味着节点该下线了
59 | public static volatile Map DATA_EXPIRE = new ConcurrentHashMap<>(1024);// 键-失效时间的秒表示
60 | public static final Map clientMap = new ConcurrentHashMap<>();//管理所有客户端连接
61 |
62 | public static volatile CountDownLatch countDownLatch;//任务个数
63 |
64 | public static volatile String dataNodeStatus = OFFLINE;// 节点状态
65 |
66 | public static void main(String[] args){
67 | Thread currentThread = Thread.currentThread();
68 | currentThread.setName(("DataNode"+Math.random()*100).substring(0,10));
69 | String threadName = currentThread.getName();
70 | logger.debug("current thread {}" ,threadName);
71 |
72 | Thread dataManager,cronJobManager;
73 | int jobNumber = 2;countDownLatch = new CountDownLatch(jobNumber);
74 |
75 | try(InputStream in = ClassLoader.class.getResourceAsStream("/app.properties")){
76 | Properties pop = new Properties(); pop.load(in);
77 | offlinePort = Integer.parseInt(load(pop,"datanode.offline.port")); //下线监听端口
78 | offlineCmd = load(pop,"datanode.offline.cmd"); //下线命令字
79 | nameNodeIP = load(pop,"datanode.namenode.ip");// nameNode 对DataNode开放心跳IP
80 | nameNodePort = load(pop,"datanode.namenode.port");// nameNode 对DataNode开放心跳Port
81 | clientIP = load(pop,"datanode.client.ip");//dataNode对client开放的ip
82 | clientPort = load(pop,"datanode.client.port");//dataNode对client开放的端口
83 | logger.debug("Heartbeat config nameNodeIP:{},nameNodePort:{},clientIP:{},clientPort:{},offlinePort:{},offlineCmd:{}", nameNodeIP, nameNodePort,clientIP,clientPort,offlinePort,offlineCmd);
84 |
85 | useNameNodeHA = Boolean.parseBoolean(load(pop,"datanode.useNameNodeHA"));
86 | if (useNameNodeHA) {
87 | logger.info("using NameNodeHA");
88 | connectAddr = load(pop, "datanode.zk.connectAddr");//
89 | sessionTimeout = Integer.parseInt(load(pop, "datanode.zk.sessionTimeout")); //
90 | connectionTimeout = Integer.parseInt(load(pop, "datanode.zk.connectionTimeout")); //
91 | masterNodePath = load(pop, "datanode.zk.masterNodePath"); //
92 | baseSleepTimeMs = Integer.parseInt(load(pop, "datanode.zk.baseSleepTimeMs")); //
93 | maxRetries = Integer.parseInt(load(pop, "datanode.zk.maxRetries")); //
94 | }else{
95 | logger.info("not using NameNodeHA");
96 | }
97 |
98 | // 初始化命令对象,所有command都是单例对象
99 | CommandFactory.construct();
100 |
101 | // 初始化任务对象
102 | JobFactory.construct();
103 |
104 | //放入一些数据 做测试
105 | for (int i = 1 ; i <= 25 ; i++ ) DATA_POOL.put(threadName+i,threadName);
106 |
107 | // n 个线程分别启动 n 个服务
108 | dataManager = new Thread(new DataManager(),"DataManager");dataManager.start();
109 | cronJobManager = new Thread(new CronJobManager(),"CronJobManager");cronJobManager.start();
110 |
111 | //等待其他几个线程完全启动,然后才能对外提供服务
112 | countDownLatch.await();
113 | logger.info("DataNode is fully up now, pid:{}",pid);
114 |
115 | if (useNameNodeHA){
116 | HAThirdParty party = new ZKThirdParty(connectAddr,sessionTimeout,connectionTimeout,masterNodePath,baseSleepTimeMs,maxRetries);
117 | dataNodeNameNodeHA(party);
118 | }
119 |
120 | // 开启socket,这样就能用telnet的方式来发送下线命令了
121 | signalHandler();
122 |
123 | }catch(Exception ex) {
124 | logger.error("DataNode start error:",ex);
125 | CommandFactory.destruct();
126 | JobFactory.destruct();
127 | }
128 | }
129 |
130 | private static void signalHandler() {
131 | ServerSocket serverSocket;
132 | try {
133 | serverSocket = new ServerSocket(offlinePort);// 监听信号端口,telnet 127.0.0.1 6666 。 输入字母 k 按回车就行
134 | for (;;) {
135 | //接收客户端连接的socket对象
136 | try (Socket connection = serverSocket.accept()) {// 使用这个方式,try结束后会自动断开连接
137 | //接收客户端传过来的数据,会阻塞
138 | BufferedReader br = new BufferedReader(new InputStreamReader(connection.getInputStream()));
139 | String msg = br.readLine();
140 | logger.info("signalHandler received msg: {}",msg );
141 | Writer writer = new OutputStreamWriter(connection.getOutputStream());
142 | if (offlineCmd.equals(msg)){
143 | writer.append("going down");writer.flush();writer.close();
144 | dataTransfer();
145 | break;// 可以下线了,所以也不必监听这个端口了
146 | }else{
147 | writer.append("bad option");writer.flush();writer.close();
148 | }
149 | } catch (Exception e) {
150 | logger.error("signalHandler connection error", e);
151 | }
152 | }
153 | } catch (IOException e) {
154 | logger.error("signalHandler ServerSocket error",e);
155 | }
156 | }
157 |
158 | /**
159 | * 收到下线信号,先转移数据,阻塞至数据转移完成
160 | */
161 | private static void dataTransfer() throws InterruptedException {
162 | logger.warn("get offline signal");
163 | Heartbeat heartbeat = (Heartbeat) JobFactory.getJob("Heartbeat");
164 | heartbeat.run1(OFFLINE);// heartbeat对象的连接早已打开并且由定时任务一直保持着,所以主线程直接发起下线请示与数据转移工作
165 | dataNodeStatus = OFFLINE;
166 | while (DATA_POOL != null){// 依然在线
167 | Thread.sleep(5000);// 睡5秒再检查
168 | logger.debug("waiting for dataTransfer to complete");
169 | }
170 | // 数据已转移完毕并清空,可以下线
171 | logger.info("DataNode can be safely shutdown now,{}",pid);// DATA_POOL == null,数据转移完成,可以让运维手动关闭本进程了
172 | }
173 |
174 | private static void dataNodeNameNodeHA(HAThirdParty party ){
175 |
176 | party.getInstantMaster();
177 | Consumer consumer = s -> {
178 | if (s==null){
179 | logger.error("masterNode is down, waiting");
180 | }else{
181 | logger.info("masterNode may have changed:{}",s);
182 | HAHelper helper = new HAHelper(s);
183 | String thisNameNodeIP = helper.getDataNodeIP();
184 | String thisNameNodePort = helper.getDataNodePort();
185 |
186 | Heartbeat heartbeat = (Heartbeat) JobFactory.getJob("Heartbeat");
187 |
188 | if (!(thisNameNodeIP.equals(nameNodeIP)&&thisNameNodePort.equals(nameNodePort))){// 不同,那肯定需要重连
189 | logger.info("masterNode indeed have changed,reconnecting");
190 | heartbeat.disconnect();// 可能已经断掉了,但是加一下确保
191 | nameNodeIP = thisNameNodeIP;nameNodePort = thisNameNodePort;
192 | // heartbeat.connect();// 不需要在这里连接 定时任务心跳自己会去连接
193 | }else {// 相同,有可能断线后又上线
194 | if (!heartbeat.isConnected()){//断线了就重连
195 | heartbeat.disconnect();
196 | // heartbeat.connect();// 不需要在这里连接 定时任务心跳自己会去连接
197 | }
198 | // 否则就没断线,只是nameNode和高可用注册中心连接抖动
199 | }
200 | }
201 | };
202 |
203 | party.beginWatch(consumer);
204 | while (party.getMasterNode()==null);//忙等待
205 | logger.debug("present master NameNode:{}",party.getMasterNode());
206 |
207 | }
208 |
209 | }
210 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/res/CommandFactory.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.res;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import org.reflections.Reflections;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import java.util.Map;
9 | import java.util.Set;
10 | import java.util.concurrent.ConcurrentHashMap;
11 | import static cn.mageek.datanode.main.DataNode.DATA_POOL;
12 |
13 | /**
14 | * Command 工厂类
15 | * @author Mageek Chiu
16 | * @date 2018/3/13 0013:21:49
17 | */
18 | public class CommandFactory {
19 | private static final String packagePrefix = "cn.mageek.datanode.command.";
20 | private static final Logger logger = LoggerFactory.getLogger(CommandFactory.class);
21 |
22 | private static volatile Map commandMap ;// 存储所有命令
23 | // private static volatile Map DATA_POOL ;// 数据存储池
24 |
25 |
26 | // public static void construct(Map dataPool) throws Exception {
27 | public static void construct() throws Exception {
28 | if(commandMap==null){//volatile+双重检查来实现单例模式
29 | synchronized (CommandFactory.class){
30 | if (commandMap==null){
31 | // Command 池 如果初始化不成功 整个程序就无法正常运转,所以不用try catch, 直接采用快速失败原则
32 | // DATA_POOL = dataPool;
33 | getAllCommands();
34 | logger.info("Command pool initialized, number : {}, DATA_POOL :{}",commandMap.size(),DATA_POOL.hashCode());
35 | }
36 | }
37 | }
38 | }
39 |
40 | public static AbstractDataNodeCommand getCommand(String commandId){
41 | return commandMap.get(commandId);
42 | }
43 |
44 |
45 | public static void destruct(){
46 | commandMap = null;
47 | }
48 |
49 |
50 | private static void getAllCommands() throws Exception {
51 | commandMap = new ConcurrentHashMap<>();
52 |
53 | Reflections reflections = new Reflections(packagePrefix);
54 |
55 | Set> subTypes = reflections.getSubTypesOf(AbstractDataNodeCommand.class);
56 |
57 | int idStart = packagePrefix.length()+7;
58 | for(Class clazz : subTypes){
59 | String className = clazz.getName();
60 | String commandId = className.substring(idStart);
61 | logger.debug("Command class found: {} , Id: {}",className,commandId);
62 | AbstractDataNodeCommand command = (AbstractDataNodeCommand)clazz.newInstance();
63 | // command.setDataPool(DATA_POOL);
64 | commandMap.put(commandId,command);
65 | }
66 | }
67 |
68 | }
69 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/res/ConstPool.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.res;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/4/2 0002:10:35
6 | */
7 | public final class ConstPool {
8 |
9 | // 日志 L
10 | public static final String L_SUCCESS = "执行成功:";
11 | public static final String L_ERROR = "执行错误:";
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/res/JobFactory.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.res;
2 |
3 | import cn.mageek.datanode.job.DataRunnable;
4 | import org.reflections.Reflections;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 | import java.util.Map;
8 | import java.util.Set;
9 | import java.util.concurrent.ConcurrentHashMap;
10 |
11 | /**
12 | * AbstractDataNodeCommand 工厂类
13 | * @author Mageek Chiu
14 | * @date 2018/3/13 0013:21:49
15 | */
16 | public class JobFactory {
17 | private static String packagePrefix = "cn.mageek.datanode.job.";
18 | private static final Logger logger = LoggerFactory.getLogger(JobFactory.class);
19 | private static volatile Map jobMap;
20 | // private static volatile Map jobMap;
21 | // private static volatile Map DATA_POOL ;// 数据存储池
22 |
23 |
24 | // public static void construct(Map dataPool) throws Exception {
25 | public static void construct() throws Exception {
26 |
27 | if(jobMap ==null){//volatile+双重检查来实现单例模式
28 | synchronized (JobFactory.class){
29 | if (jobMap == null){
30 | // DATA_POOL = dataPool;
31 | getAllJobs();
32 | logger.info("Job pool initialized, number : {}", jobMap.size());
33 | }
34 | }
35 | }
36 | }
37 |
38 | public static Runnable getJob(String jobName){
39 | return jobMap.get(jobName);
40 | }
41 |
42 | public static void destruct(){
43 | jobMap = null;
44 | }
45 |
46 |
47 | private static void getAllJobs() throws Exception {
48 | jobMap = new ConcurrentHashMap<>();
49 |
50 | Reflections reflections = new Reflections(packagePrefix);
51 |
52 | Set> subTypes = reflections.getSubTypesOf(DataRunnable.class);
53 | // Set> subTypes = reflections.getSubTypesOf(Runnable.class);
54 | int idStart = packagePrefix.length();
55 | for(Class clazz : subTypes){
56 | String className = clazz.getName();
57 | String jobName = className.substring(idStart);
58 | logger.debug("Job class found: {} , jobName: {}",className,jobName);
59 | DataRunnable r = (DataRunnable) clazz.newInstance();
60 | r.connect();//
61 | // Runnable r = (Runnable) clazz.newInstance();
62 | jobMap.put(jobName,r);
63 | }
64 | }
65 |
66 | }
67 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/service/CronJobManager.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.service;
2 |
3 | import cn.mageek.datanode.job.DataRunnable;
4 | import cn.mageek.datanode.job.MSSync;
5 | import cn.mageek.datanode.res.CommandFactory;
6 | import cn.mageek.datanode.res.JobFactory;
7 | import org.slf4j.Logger;
8 | import org.slf4j.LoggerFactory;
9 |
10 | import java.io.InputStream;
11 | import java.net.InetAddress;
12 | import java.net.NetworkInterface;
13 | import java.net.UnknownHostException;
14 | import java.util.Enumeration;
15 | import java.util.Properties;
16 | import java.util.concurrent.CountDownLatch;
17 | import java.util.concurrent.Executors;
18 | import java.util.concurrent.ScheduledExecutorService;
19 | import java.util.concurrent.TimeUnit;
20 |
21 | import static cn.mageek.common.util.PropertyLoader.load;
22 | import static cn.mageek.datanode.main.DataNode.countDownLatch;
23 |
24 | /**
25 | * @author Mageek Chiu
26 | * @date 2018/3/7 0007:20:24
27 | */
28 | public class CronJobManager implements Runnable {
29 |
30 | private static final Logger logger = LoggerFactory.getLogger(CronJobManager.class);
31 | // 如果任务执行过程中抛出了异常,那么过ScheduledExecutorService就会停止执行任务,且也不会再周期地执行该任务了。所以你如果想保住任务,那么要在任务里面catch住一切可能的异常。
32 | private static ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(3);
33 |
34 | private static int heartbeat = 10;
35 | private static int expireChecking = -1;
36 | private static int sync = 10;
37 |
38 | public static boolean useHA = false;
39 | private static String slaveIPPort = "";
40 | private static String vip = "";
41 | public static volatile boolean isMaster = false;
42 |
43 |
44 | static {
45 | try (InputStream in = ClassLoader.class.getResourceAsStream("/app.properties")){
46 | Properties pop = new Properties();
47 | pop.load(in);
48 | expireChecking = Integer.parseInt(load(pop, "datanode.interval.expireChecking"));
49 | heartbeat = Integer.parseInt(load(pop, "datanode.interval.heartbeat"));
50 | useHA = Boolean.parseBoolean(load(pop, "datanode.useHA"));
51 | if (useHA){
52 | sync = Integer.parseInt(load(pop, "datanode.interval.sync"));
53 | heartbeat = Integer.parseInt(load(pop, "datanode.interval.heartbeat"));
54 | isMaster = Boolean.parseBoolean(load(pop, "datanode.isMaster"));
55 | slaveIPPort = load(pop, "datanode.slave");
56 | vip = load(pop, "datanode.vip");
57 |
58 | }
59 | logger.debug("config interval heartbeat:{},expireChecking:{},useHA:{},isMaster:{},slaveIPPort:{},sync:{}", heartbeat, expireChecking,useHA,isMaster,slaveIPPort,sync);
60 | } catch (Exception ex) {
61 | logger.error("tart error:", ex);
62 | CommandFactory.destruct();
63 | JobFactory.destruct();
64 | }
65 | }
66 |
67 | public void run() {
68 |
69 | // 定时向namenode发起心跳
70 | scheduledExecutorService.scheduleAtFixedRate( JobFactory.getJob("Heartbeat"),1,heartbeat, TimeUnit.SECONDS);// 固定每隔多少时间执行一下任务,但是上一个任务结束才会执行下一个,保证任务执行的频率
71 | // scheduledExecutorService.scheduleWithFixedDelay( JobFactory.getJob("Heartbeat"),2,10, TimeUnit.SECONDS);// 上一个任务结束后延迟delay时间执行下一个任务,保证任务执行的间隔
72 |
73 | // 定时同步到从节点
74 | MSSync msSync = (MSSync) JobFactory.getJob("MSSync");
75 | msSync.connect(slaveIPPort);
76 | scheduledExecutorService.scheduleAtFixedRate( msSync,15,sync, TimeUnit.SECONDS);
77 |
78 | // 定时检查过期键
79 | if (expireChecking>0)// 大于0 才使用定期检查,否则就是 lazy delete
80 | scheduledExecutorService.scheduleAtFixedRate( JobFactory.getJob("ExpireChecking"),2,expireChecking, TimeUnit.SECONDS);//
81 |
82 | logger.info("CronJobManager is up now");
83 | countDownLatch.countDown();
84 |
85 | // 定期检查自己是否被keepalived分配了vip
86 | if (useHA){
87 | for (;;){
88 | try {
89 | InetAddress thisIP = null ;
90 | // 遍历所有的网络接口
91 | for (Enumeration ifaces = NetworkInterface.getNetworkInterfaces();ifaces.hasMoreElements();){
92 | NetworkInterface iface = (NetworkInterface) ifaces.nextElement();
93 | // 在所有的接口下再遍历IP,如果找到vip就break
94 | for (Enumeration inetAddrs = iface.getInetAddresses(); inetAddrs.hasMoreElements(); ) {
95 | InetAddress inetAddr = (InetAddress) inetAddrs.nextElement();
96 | if (inetAddr.isSiteLocalAddress() && inetAddr.toString().substring(1).equals(vip)) {//
97 | thisIP = inetAddr;
98 | break;
99 | }
100 | }
101 | }
102 | if(thisIP != null){// 找到vip
103 | if (isMaster){// 继续运行
104 | logger.debug("continue running");
105 | }else{// 备机该上线了
106 | logger.info("slave is up");
107 | isMaster = true;
108 | }
109 | }else {// 没找到
110 | if (isMaster){// 主机宕机
111 | logger.error("master is down");
112 | isMaster = false;
113 | }else{// 是备机
114 | logger.debug("continue standby");
115 | }
116 | }
117 | Thread.sleep(3000);
118 | if (Thread.interrupted()) break;
119 | } catch (Exception e) {
120 | logger.error("checking vip error",e);
121 | break;
122 | }
123 | }
124 | }
125 | }
126 |
127 | }
128 |
--------------------------------------------------------------------------------
/DataNode/src/main/java/cn/mageek/datanode/service/DataManager.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.datanode.service;
2 |
3 | import cn.mageek.datanode.handler.*;
4 | import io.netty.bootstrap.ServerBootstrap;
5 | import io.netty.channel.*;
6 | import io.netty.channel.nio.NioEventLoopGroup;
7 | import io.netty.channel.socket.SocketChannel;
8 | import io.netty.channel.socket.nio.NioServerSocketChannel;
9 | import io.netty.handler.timeout.ReadTimeoutHandler;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 |
13 | import java.io.IOException;
14 | import java.io.InputStream;
15 | import java.util.Properties;
16 |
17 | import static cn.mageek.common.util.PropertyLoader.loadWorkThread;
18 | import static cn.mageek.datanode.main.DataNode.clientPort;
19 | import static cn.mageek.datanode.main.DataNode.countDownLatch;
20 |
21 | /**
22 | * 管理来自client或者其他DataNode的data请求,保持连接
23 | * @author Mageek Chiu
24 | * @date 2018/5/5 0007:20:18
25 | */
26 | public class DataManager implements Runnable{
27 | private static final Logger logger = LoggerFactory.getLogger(DataManager.class);
28 | private static int workThread ;
29 |
30 | static {
31 | try( InputStream in = ClassLoader.class.getResourceAsStream("/app.properties")) {
32 | Properties pop = new Properties(); pop.load(in);
33 | workThread = loadWorkThread(pop,"datanode.workThread"); // IO线程
34 | logger.debug("config clientPort:{},workThread:{}", clientPort,workThread);
35 | } catch (IOException e) {
36 | logger.error("read config error",e);
37 | }
38 | }
39 |
40 | public void run() {
41 |
42 | EventLoopGroup bossGroup = new NioEventLoopGroup(1);//接收连接
43 | EventLoopGroup workerGroup = new NioEventLoopGroup(workThread);//处理连接的I/O事件
44 |
45 | try {
46 | ServerBootstrap b = new ServerBootstrap();
47 | b.group(bossGroup, workerGroup)
48 | .channel(NioServerSocketChannel.class) // 新建一个channel
49 | .option(ChannelOption.SO_BACKLOG, 512)//最大等待连接
50 | .childHandler(new ChannelInitializer() {
51 | @Override
52 | public void initChannel(SocketChannel ch){
53 | ChannelPipeline p = ch.pipeline();
54 | p.addLast("ReadTimeoutHandler",new ReadTimeoutHandler(100));// in // 多少秒超时
55 | p.addLast("SendMsgHandler",new SendMsgHandler());// out //发送消息编码
56 | p.addLast("ClientHandler",new ClientHandler());// in //连接管理
57 | // 下面这两都不管用,尚不清楚原因
58 | // p.addLast("DelimiterBasedFrameDecoder",new DelimiterBasedFrameDecoder(2048,true,true,Unpooled.copiedBuffer("\r\n".getBytes())));// in //基于分隔符的协议解码
59 | // p.addLast("StringDecoder",new StringDecoder());// in // 字符串解码器
60 | // p.addLast("LineBasedFrameDecoder",new LineBasedFrameDecoder(1024,true,true));// in //基于行的协议解码
61 | // p.addLast("StringDecoder",new StringDecoder());// in // 字符串解码器
62 | p.addLast("RcvMsgHandler",new RcvMsgHandler());// in //将行数据解码为消息对象
63 | p.addLast("BusinessHandler",new BusinessHandler());// in //解析业务数据,没有特别耗时的操作,还是不要切换线程
64 |
65 | }
66 | });
67 |
68 | // Start the server. 采用同步等待的方式
69 | ChannelFuture f = b.bind(Integer.parseInt(clientPort)).sync();
70 | logger.info("DataManager is up now and listens on {}", f.channel().localAddress());
71 | countDownLatch.countDown();
72 |
73 | // Wait until the server socket is closed.
74 | f.channel().closeFuture().sync();
75 | logger.info("DataManager is down");
76 |
77 | } catch (InterruptedException e) {
78 | logger.error("DataManager start error: ", e);
79 | } finally {
80 | workerGroup.shutdownGracefully();
81 | bossGroup.shutdownGracefully();
82 | }
83 |
84 | }
85 |
86 |
87 |
88 |
89 | }
90 |
91 |
92 |
93 |
94 |
--------------------------------------------------------------------------------
/DataNode/src/main/resources/app.properties:
--------------------------------------------------------------------------------
1 | # should get offline . 1: offline,0: online ; different among any node
2 | datanode.offline.port=20000
3 | datanode.offline.cmd=k
4 |
5 | # workThread number
6 | datanode.workThread=2
7 |
8 | # expire strategy, <= 0 : lazy deleting only ; > 0 lazy and periodically deleting both ,interval seconds
9 | datanode.interval.expireChecking=5
10 | datanode.interval.heartbeat=10
11 | datanode.useHA=false
12 | datanode.isMaster=true
13 | datanode.slave=192.168.0.136:10099
14 | datanode.interval.sync=10
15 | datanode.vip=192.168.0.136
16 |
17 |
18 | #datanode export this ip to client; different among any node
19 | datanode.client.ip=192.168.0.136
20 | datanode.client.port=10100
21 |
22 | #datanode use this ip to connect to namenode; same among all node
23 | datanode.namenode.ip=192.168.0.136
24 | datanode.namenode.port=10101
25 |
26 | datanode.useNameNodeHA=false
27 | datanode.zk.connectAddr=127.0.0.1:2181,127.0.0.1:3181,127.0.0.1:4181
28 | datanode.zk.sessionTimeout=2000
29 | datanode.zk.connectionTimeout=8000
30 | datanode.zk.masterNodePath=/CHKV/masterNode
31 | datanode.zk.baseSleepTimeMs=1000
32 | datanode.zk.maxRetries=10
33 |
34 |
--------------------------------------------------------------------------------
/DataNode/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | ### 设置root###
2 | log4j.rootLogger = debug,stdout,D,E
3 |
4 |
5 | ### 输出信息到控制抬 ###
6 | log4j.appender.stdout = org.apache.log4j.ConsoleAppender
7 | log4j.appender.stdout.Target = System.out
8 | log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
9 | log4j.appender.stdout.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
10 |
11 |
12 | ### 输出DEBUG 级别以上的日志到=debug.log ###
13 | # daily 表示一天一个文件
14 | log4j.appender.D = org.apache.log4j.DailyRollingFileAppender
15 | log4j.appender.D.File = ./DataNode/src/main/java/cn/mageek/datanode/log/debug.log
16 | log4j.appender.D.DatePattern=yyyy-MM-dd-HH'.log'
17 | log4j.appender.D.Append = true
18 | log4j.appender.D.Threshold = DEBUG
19 | log4j.appender.D.layout = org.apache.log4j.PatternLayout
20 | log4j.appender.D.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
21 |
22 |
23 | ### 输出ERROR 级别以上的日志到=error.log ###
24 | log4j.appender.E = org.apache.log4j.DailyRollingFileAppender
25 | log4j.appender.E.File = ./DataNode/src/main/java/cn/mageek/datanode/log/error.log
26 | log4j.appender.E.DatePattern=yyyy-MM-dd-HH'.log'
27 | log4j.appender.E.Append = true
28 | log4j.appender.E.Threshold = ERROR
29 | log4j.appender.E.layout = org.apache.log4j.PatternLayout
30 | log4j.appender.E.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
31 |
32 |
33 | ## 调整每个模块的日志级别##
34 | log4j.logger.cn.mageek.datanode=debug
35 | log4j.logger.cn.mageek.common=debug
36 | #log4j.logger.cn.mageek.datanode=info
37 | #log4j.logger.cn.mageek.common=info
38 | #log4j.logger.cn.mageek.datanode=error
39 | #log4j.logger.cn.mageek.common=error
40 |
41 | log4j.logger.io.netty=warn
42 |
43 | log4j.logger.org.reflections=info
44 |
45 | log4j.logger.org.apache.zookeeper=info
--------------------------------------------------------------------------------
/NameNode/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | CHKV
7 | cn.mageek
8 | 1.0-SNAPSHOT
9 | ../pom.xml
10 |
11 | 4.0.0
12 | NameNode
13 | jar
14 |
15 |
16 |
17 |
18 | cn.mageek
19 | Common
20 | 1.0-SNAPSHOT
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 | org.apache.maven.plugins
29 | maven-compiler-plugin
30 | 3.7.0
31 |
32 | 1.8
33 | 1.8
34 |
35 |
36 |
37 | maven-assembly-plugin
38 | 3.1.0
39 |
40 |
41 |
42 | cn.mageek.namenode.main.NameNode
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 | com.spotify
53 | dockerfile-maven-plugin
54 | 1.3.4
55 |
56 | ${docker.image.prefix}/${project.artifactId}
57 |
58 |
59 |
60 |
61 |
62 |
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/handler/ClientWatcherHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.namenode.handler;
2 |
3 | import cn.mageek.common.model.WatchRequest;
4 | import cn.mageek.common.model.WatchResponse;
5 | import io.netty.channel.Channel;
6 | import io.netty.channel.ChannelHandlerContext;
7 | import io.netty.channel.ChannelInboundHandlerAdapter;
8 | import org.slf4j.Logger;
9 | import org.slf4j.LoggerFactory;
10 | import java.util.Map;
11 | import java.util.concurrent.ConcurrentSkipListMap;
12 |
13 | import static cn.mageek.namenode.main.NameNode.clientMap;
14 | import static cn.mageek.namenode.main.NameNode.sortedServerMap;
15 |
16 | /**
17 | * @author Mageek Chiu
18 | * @date 2018/5/7 0007:13:52
19 | */
20 | public class ClientWatcherHandler extends ChannelInboundHandlerAdapter {
21 | private static final Logger logger = LoggerFactory.getLogger(ClientWatcherHandler.class);
22 | // private ConcurrentSkipListMap sortedServerMap;//管理所有datanode key:DataNode hash ,value: IP:port
23 | // private Map clientMap;// 管理所有client,key ip:port,value chanel
24 |
25 | // public ClientWatcherHandler(ConcurrentSkipListMap sortedServerMap, Map clientMap){
26 | // this.sortedServerMap = sortedServerMap;
27 | // this.clientMap = clientMap;
28 | // }
29 |
30 | @Override
31 | public void channelActive(ChannelHandlerContext ctx) throws Exception {
32 | String connection = ctx.channel().remoteAddress().toString();
33 | clientMap.put(connection,ctx.channel());
34 | logger.info("new connection arrived: {} clients living {}",connection, clientMap.size());
35 | }
36 |
37 | @Override
38 | public void channelInactive(ChannelHandlerContext ctx) throws Exception {
39 | String connection = ctx.channel().remoteAddress().toString();
40 | clientMap.remove(connection);
41 | logger.info("connection closed: {},uuid:{}, clients living {}",connection, clientMap.size());
42 | }
43 |
44 | @Override
45 | public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
46 | WatchRequest request = (WatchRequest)msg ;
47 | logger.debug("NameNode received: {}" , request);
48 | if (request.isImmediately()){// 需要立即回复,一般是刚上线的时候;否则就等节点变化时NameNode主动通知就行了
49 | WatchResponse response = new WatchResponse(sortedServerMap);
50 | logger.debug("NameNode answered: {}" , response);
51 | ctx.writeAndFlush(response);
52 | }
53 | }
54 |
55 | @Override
56 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
57 | logger.error("receiveMsg from: {},error: ",ctx.channel().remoteAddress(),cause);
58 | ctx.close();
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/handler/DataNodeHeartBeatHandler.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.namenode.handler;
2 |
3 | import cn.mageek.common.model.HeartbeatRequest;
4 | import cn.mageek.common.model.HeartbeatResponse;
5 | import cn.mageek.common.util.ConsistHash;
6 | import cn.mageek.namenode.main.NameNode;
7 | import io.netty.channel.Channel;
8 | import io.netty.channel.ChannelHandlerContext;
9 | import io.netty.channel.ChannelInboundHandlerAdapter;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 | import java.util.concurrent.atomic.AtomicReference;
13 |
14 | import static cn.mageek.common.model.HeartbeatType.*;
15 | import static cn.mageek.namenode.main.NameNode.dataNodeClientMap;
16 | import static cn.mageek.namenode.main.NameNode.dataNodeMap;
17 | import static cn.mageek.namenode.main.NameNode.sortedServerMap;
18 |
19 | /**
20 | * @author Mageek Chiu
21 | * @date 2018/5/7 0007:13:52
22 | */
23 | public class DataNodeHeartBeatHandler extends ChannelInboundHandlerAdapter {
24 | private static final Logger logger = LoggerFactory.getLogger(DataNodeHeartBeatHandler.class);
25 |
26 | @Override
27 | public void channelActive(ChannelHandlerContext ctx) throws Exception {
28 | String connection = ctx.channel().remoteAddress().toString();
29 | // 维护集合
30 | dataNodeMap.put(connection,ctx.channel());
31 | logger.info("new connection arrived: {}, DataNode living {}",connection, dataNodeMap.size());//包含ip:port
32 | }
33 |
34 | @Override
35 | public void channelInactive(ChannelHandlerContext ctx) throws Exception {
36 | String connection = ctx.channel().remoteAddress().toString();
37 | // 维护集合
38 | dataNodeMap.remove(connection);//
39 | logger.info("connection closed: {}, DataNode living {}",connection,dataNodeMap.size());
40 |
41 | String IPPort = dataNodeClientMap.get(connection);
42 | int hash = ConsistHash.getHash(IPPort);
43 | if (sortedServerMap.containsKey(hash)){//该节点未发送OFFLINE请求就下线了
44 | logger.error("DataNode {} get offline before request from NameNode, IPPort {}",connection,IPPort);
45 | }
46 | }
47 |
48 | @Override
49 | public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
50 | String connection = ctx.channel().remoteAddress().toString();
51 | HeartbeatRequest request = (HeartbeatRequest)msg ;
52 | // logger.debug("NameNode received: {}" , request);
53 | HeartbeatResponse response = handleStatus(connection,request);
54 | logger.debug("NameNode answered: {}" , response);
55 | ctx.writeAndFlush(response);
56 | }
57 |
58 | @Override
59 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
60 | logger.error("receiveMsg from: {},error: ",ctx.channel().remoteAddress(),cause);
61 | ctx.close();
62 | }
63 |
64 | private HeartbeatResponse handleStatus(String connection,HeartbeatRequest request){
65 | HeartbeatResponse response;
66 | String IPPort,nextIPPort;
67 | // ConsistHash hash = new ConsistHash(sortedServerMap,dataNodeClientMap);// 此时dataNodeClientMap是最新的,据此更新sortedServerMap// 频繁创建对象不合适,还是换成静态函数比较好
68 | switch (request.getStatus()){
69 | case ONLINE : // 上线,要通知环中下一台dataNode分一部分数据给本个dataNode
70 | // 维护集合,加入
71 | IPPort = request.getIPPort();
72 | dataNodeClientMap.put(connection,IPPort);
73 | ConsistHash.circleAdd(sortedServerMap,IPPort);// 将IPPort加入hash环sortedServerMap
74 | NameNode.dataNodeChanged = true;// 维护后后发出通知
75 |
76 | nextIPPort = ConsistHash.getServer(sortedServerMap,IPPort,true);// 根据hash环找到下一台服务器
77 | // 根据nextIPPort找到nextConnection,再找到nextChannel,然后就可以发消息了
78 | AtomicReference nextConnection = new AtomicReference<>("");
79 | dataNodeClientMap.forEach((k,v)->{
80 | if (v.equals(nextIPPort)) nextConnection.set(k);
81 | });
82 | logger.debug("{} ONLINE, nextIPPort {}, nextConnection {}",IPPort,nextIPPort,nextConnection.get());
83 | response = new HeartbeatResponse(true, dataNodeClientMap.size(),null);//connection依然运行,不需要转移数据
84 | if (IPPort.equals(nextIPPort)){// 是第一台上线,跳过处理步骤
85 | logger.info("only 1 dataNode {}, Skip the procedure",IPPort);
86 | break;
87 | }
88 | // 否则就不是第一台上线,给下一个节点发消息
89 | Channel nextChannel = dataNodeMap.get(nextConnection.get());
90 | if (nextChannel != null){// 正常
91 | HeartbeatResponse response1 = new HeartbeatResponse(true, dataNodeClientMap.size(),IPPort);// nextConnection依然运行,只是分一部分数据给connection
92 | logger.debug("NameNode pushed : {} to {}" ,response1,connection);
93 | nextChannel.writeAndFlush(response1);
94 | }else {// 下一个节点已经下线,但是没有维护hash环。也就是该节点未发送OFFLINE请求就下线了,属于失效
95 | logger.error("next DataNode get offline before request from NameNode");
96 | }
97 | break;
98 |
99 | case OFFLINE : // 下线,要通知本个dataNode把数据全部转移至环中下一台dataNode
100 | // 维护集合,删除
101 | IPPort = request.getIPPort();
102 | dataNodeClientMap.remove(connection);
103 | ConsistHash.cirlceDel(sortedServerMap,IPPort);
104 | NameNode.dataNodeChanged = true;// 维护后发出通知
105 |
106 | nextIPPort = ConsistHash.getServer(sortedServerMap,IPPort,true);
107 | logger.debug("{} OFFLINE, nextIPPort {}",IPPort,nextIPPort);
108 | if (IPPort.equals(nextIPPort)){// 是最后一台下线
109 | logger.info("only 1 dataNode {}, Skip the procedure",IPPort);
110 | response = new HeartbeatResponse(false, dataNodeClientMap.size(),null);/// connection不再运行,不需要转移数据
111 | break;
112 | }
113 | response = new HeartbeatResponse(false, dataNodeClientMap.size(),nextIPPort);// connection不再运行,全部数据转移给nextConnection
114 | break;
115 |
116 | case RUNNING : // 在线,无变化
117 | logger.debug("{} RUNNING",request.getIPPort());
118 | response = new HeartbeatResponse(true, dataNodeClientMap.size(),null);//connection依然运行,不需要转移数据
119 | break;
120 |
121 | default:
122 | logger.error("error status {}",request);
123 | response = new HeartbeatResponse(false, dataNodeClientMap.size(),null);// 非正常数据
124 | }
125 |
126 | return response;
127 | }
128 |
129 | }
130 |
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/log/keep.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/MageekChiu/CHKV/82204cb94fee417e983c9cd602031ef0d8f2948b/NameNode/src/main/java/cn/mageek/namenode/log/keep.txt
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/main/NameNode.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.namenode.main;
2 |
3 | import cn.mageek.common.ha.HAThirdParty;
4 | import cn.mageek.common.ha.ZKThirdParty;
5 | import cn.mageek.common.model.WatchResponse;
6 | import cn.mageek.namenode.res.CommandFactory;
7 | import cn.mageek.namenode.res.CronJobFactory;
8 | import cn.mageek.namenode.service.ClientManager;
9 | import cn.mageek.namenode.service.CronJobManager;
10 | import cn.mageek.namenode.service.DataNodeManager;
11 | import io.netty.channel.Channel;
12 | import org.slf4j.Logger;
13 | import org.slf4j.LoggerFactory;
14 |
15 | import java.io.InputStream;
16 | import java.lang.management.ManagementFactory;
17 | import java.util.Map;
18 | import java.util.Properties;
19 | import java.util.concurrent.ConcurrentHashMap;
20 | import java.util.concurrent.ConcurrentSkipListMap;
21 | import java.util.concurrent.CountDownLatch;
22 | import java.util.function.Consumer;
23 |
24 | import static cn.mageek.common.util.HAHelper.getNodeString;
25 | import static cn.mageek.common.util.PropertyLoader.load;
26 | import static cn.mageek.common.util.PropertyLoader.loadWorkThread;
27 |
28 | /**
29 | * 管理本应用的所有服务
30 | * @author Mageek Chiu
31 | * @date 2018/3/5 0005:19:26
32 | */
33 |
34 | public class NameNode {
35 |
36 | private static final Logger logger = LoggerFactory.getLogger(NameNode.class);
37 |
38 | // 存储信息
39 | // key表示dataNode的/ip:port,value表示对应信道 , 上线就有,并且维护,保证map里面的连接都是活跃的
40 | public static volatile Map dataNodeMap = new ConcurrentHashMap<>();//管理所有datanode 连接
41 | // key表示dataNode的/ip:port,value表示其对客户端开放的ip:port , 发送心跳就有
42 | public static volatile Map dataNodeClientMap = new ConcurrentHashMap<>();//管理所有datanode 及其对client开放的IP与端口
43 | // key表示dataNode的hash值,value表示其对客户端开放的ip:port , 发送心跳状态更新后就有
44 | public static volatile ConcurrentSkipListMap sortedServerMap = new ConcurrentSkipListMap<>();//管理所有datanode 对应 hash 和client开放的IP与端口
45 | // key表示client的/ip:port,value表示对应信道
46 | public static volatile Map clientMap = new ConcurrentHashMap<>();//管理所有client 连接
47 |
48 | // 辅助参数
49 | public static volatile boolean dataNodeChanged = false;// DataNode有无变化
50 | public static volatile CountDownLatch countDownLatch;//任务个数
51 |
52 | // 配置信息
53 | public static String dataNodePort;
54 | public static int dataNodeThread;
55 | public static String clientPort;
56 | public static int clientThread;
57 |
58 | // HA 信息
59 | private static boolean useHA;
60 | private static String thisNode;
61 | private static String masterNode;
62 | private static String connectAddr ;
63 | private static int sessionTimeout;
64 | private static int connectionTimeout;
65 | private static String masterNodePath ;
66 | private static int baseSleepTimeMs ;
67 | private static int maxRetries ;
68 |
69 |
70 | static {
71 | try( InputStream in = ClassLoader.class.getResourceAsStream("/app.properties")) {
72 | Properties pop = new Properties();
73 | pop.load(in);
74 | dataNodePort = load(pop,"namenode.datanode.port");// 对dataNode开放的端口
75 | dataNodeThread = loadWorkThread(pop,"namenode.datanode.workThread");
76 | clientPort = load(pop,"namenode.client.port");// 对client开放的端口
77 | clientThread = loadWorkThread(pop,"namenode.client.workThread");
78 | logger.debug("config dataNodePort:{},dataNodeThread:{},clientPort:{},clientThread:{}", dataNodePort,dataNodeThread,clientPort,clientThread);
79 |
80 | useHA = Boolean.parseBoolean(load(pop,"namenode.useHA"));
81 | if (useHA){
82 | logger.info("using HA");
83 | String dataNodeIP = load(pop,"namenode.datanode.ip");
84 | String clientIP = load(pop,"namenode.client.ip");
85 | thisNode = getNodeString(dataNodeIP,dataNodePort,clientIP,clientPort);
86 |
87 | connectAddr = load(pop,"namenode.zk.connectAddr");//
88 | sessionTimeout = Integer.parseInt(load(pop,"namenode.zk.sessionTimeout")); //
89 | connectionTimeout = Integer.parseInt(load(pop,"namenode.zk.connectionTimeout")); //
90 | masterNodePath = load(pop,"namenode.zk.masterNodePath"); //
91 | baseSleepTimeMs = Integer.parseInt(load(pop,"namenode.zk.baseSleepTimeMs")); //
92 | maxRetries = Integer.parseInt(load(pop,"namenode.zk.maxRetries")); //
93 | logger.debug("config connectAddr:{},sessionTimeout:{},connectionTimeout{},masterNodePath:{},baseSleepTimeMs:{},maxRetries:{}", connectAddr,sessionTimeout,connectionTimeout,masterNodePath,baseSleepTimeMs,maxRetries);
94 | }else {
95 | logger.info("not using HA");
96 | }
97 | } catch (Exception e) {
98 | logger.error("read config error",e);
99 | }
100 | }
101 |
102 | public static void main(String[] args){
103 | Thread.currentThread().setName("NameNode");
104 |
105 | int jobNumber = 3;countDownLatch = new CountDownLatch(jobNumber);
106 | Thread dataNodeManager,clientManager,cronJobManager;
107 |
108 | try{
109 | // 初始化命令对象
110 | CommandFactory.construct();
111 | // 初始化定时任务对象
112 | CronJobFactory.construct();
113 |
114 | dataNodeManager = new Thread(new DataNodeManager(),"DataNodeManager");dataNodeManager.start();
115 | clientManager = new Thread(new ClientManager(),"ClientManager");clientManager.start();
116 | cronJobManager = new Thread(new CronJobManager(),"CronJobManager");cronJobManager.start();
117 |
118 | countDownLatch.await();//等待其他几个线程完全启动,然后才能对外提供服务
119 | logger.info("NameNode is fully up now ,jobNumber :{},pid:{}",jobNumber,ManagementFactory.getRuntimeMXBean().getName());
120 |
121 | //HA相关
122 | if (useHA){
123 | HAThirdParty party = new ZKThirdParty(connectAddr,sessionTimeout,connectionTimeout,masterNodePath,baseSleepTimeMs,maxRetries);
124 | nameNodeHA(party);
125 | }
126 |
127 | // 开始监控DataNode变更事件
128 | dataNodeWatcher();
129 |
130 | }catch(Exception ex) {
131 | logger.error("NameNode start error:",ex);
132 | CommandFactory.destruct();
133 | CronJobFactory.destruct();
134 | }
135 | }
136 |
137 | private static void dataNodeWatcher() throws InterruptedException {
138 | //noinspection InfiniteLoopStatement
139 | for (;;){
140 | if (dataNodeChanged){
141 | dataNodeChanged = false;
142 | logger.info("DataNode dataNodeChanged,now {},\r\n client:{}",sortedServerMap,clientMap);
143 | WatchResponse watchResponse = new WatchResponse(sortedServerMap);
144 | clientMap.forEach((k,v)-> v.writeAndFlush(watchResponse));
145 | }
146 | Thread.sleep(5000);// 每隔5秒检测一次是否有变化
147 | }
148 | }
149 |
150 | private static void nameNodeHA(HAThirdParty party ){
151 |
152 | // 下面代码都与具体HA实现无关,能够复用
153 | party.setThisNode(thisNode);
154 | boolean result = party.becomeMaster();
155 | if (result){
156 | logger.info("Successfully Became Master");
157 | }else {
158 | logger.info("Failed to Became Master");
159 | }
160 | masterNode = party.getInstantMaster();
161 | boolean result1 = thisNode.equals(masterNode);
162 | if (result1){
163 | logger.info("Confirmed, I am the Master,masterNode;{}",masterNode);
164 | }else {
165 | logger.info("Confirmed,I am the Standby,masterNode;{}",masterNode);
166 | }
167 |
168 | Consumer consumer = s -> {
169 | if (s==null){
170 | logger.error("master NameNode is down, try to become Master");
171 | if (party.becomeMaster()){
172 | logger.info("Successfully tried to Became Master");
173 | }else {
174 | logger.info("Failed to try to Became Master");
175 | }
176 | }else{
177 | masterNode = s;
178 | logger.info("masterNode may changed:{}",masterNode);
179 | }
180 | };
181 | party.beginWatch(consumer);
182 | }
183 | }
184 |
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/res/CommandFactory.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.namenode.res;
2 |
3 | import cn.mageek.common.command.AbstractDataNodeCommand;
4 | import org.reflections.Reflections;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | import java.util.Map;
9 | import java.util.Set;
10 | import java.util.concurrent.ConcurrentHashMap;
11 |
12 | /**
13 | * AbstractDataNodeCommand 工厂类
14 | * @author Mageek Chiu
15 | * @date 2018/3/13 0013:21:49
16 | */
17 | public class CommandFactory {
18 | private static final String packagePrefix = "cn.mageek.datanode.command.";
19 | private static final Logger logger = LoggerFactory.getLogger(CommandFactory.class);
20 |
21 | private static volatile Map commandMap ;// 存储所有命令
22 |
23 | public static void construct() throws Exception {
24 | if(commandMap==null){//volatile+双重检查来实现单例模式
25 | synchronized (CommandFactory.class){
26 | if (commandMap==null){
27 | // AbstractDataNodeCommand 池 如果初始化不成功 整个程序就无法正常运转,所以不用try catch, 直接采用快速失败原则
28 | getAllCommands();
29 | logger.info("AbstractDataNodeCommand pool initialized, number : {}",commandMap.size());
30 | }
31 | }
32 | }
33 | }
34 |
35 | public static AbstractDataNodeCommand getCommand(String commandId){
36 | return commandMap.get(commandId);
37 | }
38 |
39 | public static void destruct(){
40 | commandMap = null;
41 | }
42 |
43 |
44 | private static void getAllCommands() throws Exception {
45 | commandMap = new ConcurrentHashMap<>();
46 |
47 | Reflections reflections = new Reflections(packagePrefix);
48 |
49 | Set> subTypes = reflections.getSubTypesOf(AbstractDataNodeCommand.class);
50 |
51 | int idStart = packagePrefix.length()+7;
52 | for(Class clazz : subTypes){
53 | String className = clazz.getName();
54 | String commandId = className.substring(idStart);
55 | logger.debug("AbstractDataNodeCommand class found: {} , Id: {}",className,commandId);
56 | AbstractDataNodeCommand command = (AbstractDataNodeCommand)clazz.newInstance();
57 | commandMap.put(commandId,command);
58 | }
59 | }
60 |
61 | }
62 |
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/res/ConstPool.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.namenode.res;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/4/2 0002:10:35
6 | */
7 | public final class ConstPool {
8 |
9 | // 日志 L
10 | public static final String L_SUCCESS = "执行成功:";
11 | public static final String L_ERROR = "执行错误:";
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/res/CronJobFactory.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.namenode.res;
2 |
3 | import org.reflections.Reflections;
4 | import org.slf4j.Logger;
5 | import org.slf4j.LoggerFactory;
6 |
7 | import java.util.Map;
8 | import java.util.Set;
9 | import java.util.concurrent.ConcurrentHashMap;
10 |
11 | /**
12 | * AbstractDataNodeCommand 工厂类
13 | * @author Mageek Chiu
14 | * @date 2018/3/13 0013:21:49
15 | */
16 | public class CronJobFactory {
17 | private static String packagePrefix = "cn.mageek.datanode.cron.";
18 | private static final Logger logger = LoggerFactory.getLogger(CronJobFactory.class);
19 | private static volatile Map cronJobMap;
20 |
21 | public static void construct() throws Exception {
22 | if(cronJobMap ==null){//volatile+双重检查来实现单例模式
23 | synchronized (CronJobFactory.class){
24 | if (cronJobMap ==null){
25 | cronJobMap = new ConcurrentHashMap<>();
26 | getAllCronJobs(cronJobMap);
27 | logger.info("CronJob pool initialized, number : {}", cronJobMap.size());
28 | }
29 | }
30 | }
31 | }
32 |
33 | public static Runnable getCronJob(String jobName){
34 | return cronJobMap.get(jobName);
35 | }
36 |
37 | public static void destruct(){
38 | cronJobMap = null;
39 | }
40 |
41 |
42 | private static void getAllCronJobs( Map cronJobMap) throws Exception {
43 |
44 | Reflections reflections = new Reflections(packagePrefix);
45 |
46 | Set> subTypes = reflections.getSubTypesOf(Runnable.class);
47 | int idStart = packagePrefix.length();
48 | for(Class clazz : subTypes){
49 | String className = clazz.getName();
50 | String jobName = className.substring(idStart);
51 | logger.debug("CronJob class found: {} , jobName: {}",className,jobName);
52 | cronJobMap.put(jobName,(Runnable)clazz.newInstance());
53 | }
54 | }
55 |
56 | }
57 |
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/service/ClientManager.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.namenode.service;
2 |
3 | import cn.mageek.namenode.handler.ClientWatcherHandler;
4 | import io.netty.bootstrap.ServerBootstrap;
5 | import io.netty.channel.*;
6 | import io.netty.channel.nio.NioEventLoopGroup;
7 | import io.netty.channel.socket.SocketChannel;
8 | import io.netty.channel.socket.nio.NioServerSocketChannel;
9 | import io.netty.handler.codec.serialization.ClassResolvers;
10 | import io.netty.handler.codec.serialization.ObjectDecoder;
11 | import io.netty.handler.codec.serialization.ObjectEncoder;
12 | import io.netty.handler.timeout.ReadTimeoutHandler;
13 | import org.slf4j.Logger;
14 | import org.slf4j.LoggerFactory;
15 |
16 | import static cn.mageek.namenode.main.NameNode.clientPort;
17 | import static cn.mageek.namenode.main.NameNode.clientThread;
18 | import static cn.mageek.namenode.main.NameNode.countDownLatch;
19 |
20 | /**
21 | * 管理所有client
22 | * @author Mageek Chiu
23 | * @date 2018/5/7 0007:20:18
24 | */
25 | public class ClientManager implements Runnable{
26 | private static final Logger logger = LoggerFactory.getLogger(DataNodeManager.class);
27 |
28 | public void run() {
29 | // Configure the server.
30 | EventLoopGroup bossGroup = new NioEventLoopGroup(1);//接收连接
31 | EventLoopGroup workerGroup = new NioEventLoopGroup(clientThread);//处理连接的I/O事件
32 | try {
33 | ServerBootstrap b = new ServerBootstrap();
34 | b.group(bossGroup, workerGroup)
35 | .channel(NioServerSocketChannel.class)//新建一个channel
36 | .option(ChannelOption.SO_BACKLOG, 512)//最大等待连接
37 | .childHandler(new ChannelInitializer() {
38 | @Override
39 | public void initChannel(SocketChannel ch) {
40 | ChannelPipeline p = ch.pipeline();
41 | p.addLast("ReadTimeoutHandler",new ReadTimeoutHandler(100));// in // 多少秒超时
42 | p.addLast(new ObjectDecoder(2048, ClassResolvers.cacheDisabled(this.getClass().getClassLoader())));// in 进制缓存类加载器
43 | p.addLast(new ObjectEncoder());// out
44 | p.addLast(new ClientWatcherHandler());// in
45 | }
46 | });
47 |
48 | // Start the server. 采用同步等待的方式
49 | ChannelFuture f = b.bind(Integer.parseInt(clientPort)).sync();
50 | logger.info("ClientManager is up now and listens on {}", f.channel().localAddress());
51 | countDownLatch.countDown();
52 |
53 | // Wait until the server socket is closed.
54 | f.channel().closeFuture().sync();
55 | logger.info("ClientManager is down");
56 |
57 | } catch (InterruptedException e) {
58 | logger.error("ClientManager start error: ", e);
59 | } finally {
60 | workerGroup.shutdownGracefully();
61 | bossGroup.shutdownGracefully();
62 | }
63 | }
64 | }
65 |
66 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/service/CronJobManager.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.namenode.service;
2 |
3 | import org.slf4j.Logger;
4 | import org.slf4j.LoggerFactory;
5 | import java.util.concurrent.CountDownLatch;
6 | import java.util.concurrent.Executors;
7 | import java.util.concurrent.ScheduledExecutorService;
8 |
9 | import static cn.mageek.namenode.main.NameNode.countDownLatch;
10 |
11 | /**
12 | * @author Mageek Chiu
13 | * @date 2018/3/7 0007:20:24
14 | */
15 | public class CronJobManager implements Runnable {
16 |
17 | private static final Logger logger = LoggerFactory.getLogger(CronJobManager.class);
18 | // private static ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(5);
19 |
20 | // private CountDownLatch countDownLatch;
21 |
22 | // public CronJobManager(CountDownLatch countDownLatch) {
23 | // this.countDownLatch = countDownLatch;
24 | // }
25 |
26 |
27 | public void run() {
28 |
29 | logger.info("CronJobManager is up now");
30 | countDownLatch.countDown();
31 | }
32 |
33 | }
34 |
--------------------------------------------------------------------------------
/NameNode/src/main/java/cn/mageek/namenode/service/DataNodeManager.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.namenode.service;
2 |
3 | import cn.mageek.namenode.handler.DataNodeHeartBeatHandler;
4 | import io.netty.bootstrap.ServerBootstrap;
5 | import io.netty.channel.*;
6 | import io.netty.channel.nio.NioEventLoopGroup;
7 | import io.netty.channel.socket.SocketChannel;
8 | import io.netty.channel.socket.nio.NioServerSocketChannel;
9 | import io.netty.handler.codec.serialization.ClassResolvers;
10 | import io.netty.handler.codec.serialization.ObjectDecoder;
11 | import io.netty.handler.codec.serialization.ObjectEncoder;
12 | import io.netty.handler.timeout.ReadTimeoutHandler;
13 | import org.slf4j.Logger;
14 | import org.slf4j.LoggerFactory;
15 |
16 | import java.io.IOException;
17 | import java.io.InputStream;
18 | import java.util.Properties;
19 |
20 | import static cn.mageek.common.util.PropertyLoader.load;
21 | import static cn.mageek.common.util.PropertyLoader.loadWorkThread;
22 | import static cn.mageek.namenode.main.NameNode.countDownLatch;
23 | import static cn.mageek.namenode.main.NameNode.dataNodePort;
24 | import static cn.mageek.namenode.main.NameNode.dataNodeThread;
25 |
26 | /**
27 | * 管理所有dataNode
28 | * @author Mageek Chiu
29 | * @date 2018/5/7 0007:20:18
30 | */
31 | public class DataNodeManager implements Runnable{
32 | private static final Logger logger = LoggerFactory.getLogger(DataNodeManager.class);
33 |
34 | // private static String dataNodePort;
35 | // private static int dataNodeThread;
36 |
37 | // private Map dataNodeMap;//管理所有datanode
38 | // private Map dataNodeClientMap ;//管理所有datanode 对client开放的IP与端口
39 | // private ConcurrentSkipListMap sortedServerMap ;//管理所有datanode 对应 hash 和 ip:port
40 |
41 | // private CountDownLatch countDownLatch;//
42 |
43 | // static {
44 | // try( InputStream in = ClassLoader.class.getResourceAsStream("/app.properties")) {
45 | // Properties pop = new Properties();
46 | // pop.load(in);
47 | // dataNodePort = load(pop,"namenode.datanode.port");// 对dataNode开放的端口
48 | // dataNodeThread = loadWorkThread(pop,"namenode.datanode.workThread");
49 | // logger.debug("config dataNodePort:{},dataNodeThread:{}", dataNodePort,dataNodeThread);
50 | // } catch (IOException e) {
51 | // logger.error("read config error",e);
52 | // }
53 | // }
54 |
55 | // public DataNodeManager(Map dataNodeMap, Map dataNodeClientMap , ConcurrentSkipListMap sortedServerMap, CountDownLatch countDownLatch) {
56 | // this.dataNodeMap = dataNodeMap;
57 | // this.dataNodeClientMap = dataNodeClientMap;
58 | // this.countDownLatch = countDownLatch;
59 | // this.sortedServerMap = sortedServerMap;
60 | // }
61 |
62 | // public DataNodeManager(CountDownLatch countDownLatch) {
63 | // this.countDownLatch = countDownLatch;
64 | // }
65 |
66 | public void run() {
67 | // Configure the server.
68 | EventLoopGroup bossGroup = new NioEventLoopGroup(1);//接收连接
69 | EventLoopGroup workerGroup = new NioEventLoopGroup(dataNodeThread);//处理连接的I/O事件
70 | try {
71 | ServerBootstrap b = new ServerBootstrap();
72 | b.group(bossGroup, workerGroup)
73 | .channel(NioServerSocketChannel.class)//新建一个channel
74 | .option(ChannelOption.SO_BACKLOG, 64)//最大等待连接
75 | .childHandler(new ChannelInitializer() {
76 | @Override
77 | public void initChannel(SocketChannel ch) throws Exception {
78 | ChannelPipeline p = ch.pipeline();
79 | p.addLast("ReadTimeoutHandler",new ReadTimeoutHandler(31));// in // 多少秒超时
80 | p.addLast(new ObjectDecoder(2048, ClassResolvers.cacheDisabled(this.getClass().getClassLoader())));// in 禁止缓存类加载器
81 | p.addLast(new ObjectEncoder());// out
82 | // p.addLast(new DataNodeHeartBeatHandler(dataNodeMap,dataNodeClientMap,sortedServerMap));// in
83 | p.addLast(new DataNodeHeartBeatHandler());// in
84 | }
85 | });
86 |
87 | // Start the server. 采用同步等待的方式
88 | ChannelFuture f = b.bind(Integer.parseInt(dataNodePort)).sync();
89 | logger.info("DataNodeManager is up now and listens on {}", f.channel().localAddress());
90 | countDownLatch.countDown();
91 |
92 | // Wait until the server socket is closed.
93 | f.channel().closeFuture().sync();
94 | logger.info("DataNodeManager is down");
95 |
96 | } catch (InterruptedException e) {
97 | logger.error("DataNodeManager start error: ", e);
98 | } finally {
99 | workerGroup.shutdownGracefully();
100 | bossGroup.shutdownGracefully();
101 | }
102 |
103 | }
104 |
105 |
106 |
107 |
108 | }
109 |
110 |
111 |
112 |
113 |
--------------------------------------------------------------------------------
/NameNode/src/main/resources/app.properties:
--------------------------------------------------------------------------------
1 | #namenode export this ip to datanode
2 | namenode.datanode.ip=192.168.0.136
3 | namenode.datanode.port=10101
4 | namenode.datanode.workThread=2
5 |
6 | #namenode export this ip to client
7 | namenode.client.ip=192.168.0.136
8 | namenode.client.port=10102
9 | namenode.client.workThread=2
10 |
11 |
12 | # 1:cache, 2:database,
13 | namenode.mode=1
14 |
15 | # 是否使用HA
16 | namenode.useHA=false
17 | # HA相关配置
18 | namenode.zk.connectAddr=127.0.0.1:2181,127.0.0.1:3181,127.0.0.1:4181
19 | namenode.zk.sessionTimeout=2000
20 | namenode.zk.connectionTimeout=8000
21 | namenode.zk.masterNodePath=/CHKV/masterNode
22 | namenode.zk.baseSleepTimeMs=1000
23 | namenode.zk.maxRetries=10
--------------------------------------------------------------------------------
/NameNode/src/main/resources/log4j.properties:
--------------------------------------------------------------------------------
1 | ### 设置root###
2 | log4j.rootLogger = debug,stdout,D,E
3 |
4 |
5 | ### 输出信息到控制抬 ###
6 | log4j.appender.stdout = org.apache.log4j.ConsoleAppender
7 | log4j.appender.stdout.Target = System.out
8 | log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
9 | log4j.appender.stdout.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
10 |
11 |
12 | ### 输出DEBUG 级别以上的日志到=debug.log ###
13 | # daily 表示一天一个文件
14 | log4j.appender.D = org.apache.log4j.DailyRollingFileAppender
15 | log4j.appender.D.File = ./NameNode/src/main/java/cn/mageek/namenode/log/debug.log
16 | log4j.appender.D.DatePattern=yyyy-MM-dd-HH'.log'
17 | log4j.appender.D.Append = true
18 | log4j.appender.D.Threshold = DEBUG
19 | log4j.appender.D.layout = org.apache.log4j.PatternLayout
20 | log4j.appender.D.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
21 |
22 |
23 | ### 输出ERROR 级别以上的日志到=error.log ###
24 | log4j.appender.E = org.apache.log4j.DailyRollingFileAppender
25 | log4j.appender.E.File = ./NameNode/src/main/java/cn/mageek/namenode/log/error.log
26 | log4j.appender.E.DatePattern=yyyy-MM-dd-HH'.log'
27 | log4j.appender.E.Append = true
28 | log4j.appender.E.Threshold = ERROR
29 | log4j.appender.E.layout = org.apache.log4j.PatternLayout
30 | log4j.appender.E.layout.ConversionPattern = [%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} %l:%m%n
31 |
32 |
33 | ## 调整每个模块的日志级别##
34 | log4j.logger.cn.mageek.namenode=debug
35 | log4j.logger.cn.mageek.common=debug
36 | #log4j.logger.cn.mageek.namenode=info
37 | #log4j.logger.cn.mageek.common=info
38 | #log4j.logger.cn.mageek.namenode=error
39 | #log4j.logger.cn.mageek.common=error
40 |
41 | log4j.logger.io.netty=warn
42 |
43 | log4j.logger.org.reflections=info
44 |
45 | log4j.logger.org.apache.zookeeper=info
--------------------------------------------------------------------------------
/Readme.md:
--------------------------------------------------------------------------------
1 | # Consistent Hashing based Key-Value Memory Storage #
2 |
3 | 基于[一致性哈希][5]的分布式内存键值存储——**CHKV**。
4 | 目前的定位就是作为 **Cache**,**DataBase** 的功能先不考虑。
5 |
6 | ## 系统设计 ##
7 |
8 | - **NameNode** : 维护 **DataNode节点** 列表,用心跳检测 **DataNode**(一般被动,被动失效时主动询问三次),节点增减等系统信息变化时调整数据并通知 **Client**;
9 | - **DataNode** : 存储具体的数据,向 **NameNode** 主动发起心跳并采用请求响应的方式来实现上下线,便于 **NameNode** 发起挪动数据指令,实际挪动操作由 **DataNode** 自行完成;
10 | - **Client** : 负责向 **NameNode** 请求 **DataNode** 相关信息并监听其变化,操纵数据时直接向对应 **DataNode** 发起请求就行,
11 | 目前支持`set,setnx,get,delete,keys,expire,incr,incrby,decr,decrby,append`几个操作;
12 |
13 | **NameNode** 失效则整个系统不可用。
14 |
15 | 若当成内存数据库使用,则要注意持久化,而且只要有一个 **DataNode** 失效(未经请求与数据转移就下线了)整个系统就不可对外服务;
16 | 若当成内存缓存使用,则 **DataNode** 失效只是失去了一部分缓存,系统仍然可用。
17 |
18 |
19 | **DataNode** 失效(未经请求与数据转移就断开了和 **NameNode** 的连接)则 **NameNode** 需要及时通知 **Client**。
20 |
21 | **客户** 要使用 **CHKV** 就必须使用 **Client** 库或者自己依据协议(兼容redis)实现,可以是多种语言的API。
22 | 当然也可以把 **Client** 当做 **Proxy**,使得 **CHKV** 内部结构对 **客户** 透明,亦即有如下两种方式:
23 |
24 | 方式1:
25 |
26 | 用户直接使用Client库
27 | ||
28 | || ||
29 | || ||
30 | NameNode || || || ||
31 | DataNode DataNode DataNode DataNode ......
32 |
33 | 方式2:
34 |
35 | 用户通过Proxy访问
36 | ||
37 | Client库构建的Proxy
38 | ||
39 | || ||
40 | || ||
41 | NameNode || || || ||
42 | DataNode DataNode DataNode DataNode ......
43 |
44 | ## 可用性分析 ##
45 |
46 | ### 高可用分析 ###
47 | 要想实现高可用有两点: **NameNode** 要主从双机备,避免单点失效;
48 | 每个 **DataNode** 可以做成主从复制甚至集群。
49 |
50 | 目前实现了**NameNode**多机热备的高可用,如下图:
51 |
52 | Client
53 | ||
54 | || ||
55 | || ||
56 | || || || || || ||
57 | NameNode0 NameNode1 DataNode DataNode DataNode DataNode ......
58 |
59 | 默认情况下 **Client** 和 **DataNode** 都与 **Master**:**NameNode0** 保持连接,**NameNode1** 作为 **Standby**。
60 | 一旦 **NameNode0** 不可用,**Client** 和 **DataNode** 都能收到消息并开始和 **NameNode1** 建立连接。
61 | 高可用依赖于第三方组件如**ZooKeeper、Redis**等,用户可以根据需要自行选择,
62 | 只需要基于特定第三方组件实现 [`cn.mageek.common.ha.HAThirdParty`][6] 抽象类即可,如本项目提供的例子 `cn.mageek.common.ha.ZKThirdParty`。
63 |
64 | **DataNode** 由于需要IP入环,所以其本身高可用建议使用**主从复制**(代码本身实现,暂时采用全量复制)和**IP漂移**( **Keepalived** 实现),可以参考[该文章][7]。
65 |
66 |
67 | 所以数据总共有两种状态需要迁移,一种是**DataNode**节点上下线,一种是**DataNode**节点主从复制。
68 | 对于**主从复制迁移**由于目前采取的是定时全量复制,所以是不一致的,这个要做改进,可以强行每次请求同步到从节点。
69 | **节点之间迁移**的一致性策略还是可以配置的:
70 | - 如果是强一致 CP,那么在迁移状态就可以简单的不接受修改;也可以优化为不接受迁移数据的修改,但是代码更复杂
71 | - 如果是弱一致 AP,那么迁移过程就要划分状态了:
72 | - 下线迁移中,不接受修改
73 | - 上一个节点上线迁移中,不接受修改
74 |
75 |
76 |
77 | 目前简单的采取迁移过程中不接受修改。
78 |
79 | ### 连接分析 ###
80 |
81 | 各个组件之间的连接情况:
82 |
83 | - **NameNode** 要保持和 **N** 个 **Client** 的TCP长连接,但是只有在集群发生变化时才有交互,所以使用IO多路复用负载就不大
84 | - **NameNode** 要和 **M** 个 **DataNode** 保持心跳,TCP请求响应式,负载与 **M** 和心跳间隔秒数 **interval** 有关
85 | - **DataNode** 与 **Client** 是TCP请求响应式操作,**Client** 请求完毕后保留与该 **DataNode** TCP连接一段时间,以备后续访问复用连接,连接采取自动过期策略,类似于LRU
86 | - **DataNode** 与 **NameNode** 保持心跳 `Heartbeat`
87 | - **Client** 与 **NameNode** 保持TCP长连接,`Watch` **DataNode** 的变化
88 | - **Client** 与 **DataNode** TCP请求响应式操作,`Data` 的请求响应
89 |
90 | 如下图所示,有4个连接:其中1、2要主动心跳来保持连接;3保持连接以备复用并可以自动超时断开,再次使用时重连;4完成数据转移后就断开连接。
91 |
92 | NameNode
93 | || ||
94 | 1、心跳请求响应|| ||2、监听长连接
95 | || 3、数据请求响应 ||
96 | DataNodes ========== Clients
97 | || ||
98 | ||
99 | 4、数据转移,可复用3
100 |
101 | 开发优先级:3、1、4、2
102 |
103 | ## 代码结构 ##
104 |
105 | - **NameNode** : 实现 NameNode 功能
106 |
107 | - handler : handler
108 | - res : 资源,如常量,命令工厂
109 | - service : 服务,含Client管理,DataNode管理
110 |
111 | - **DataNode** : 实现 DataNode 功能
112 |
113 | - command : 处理客户端各个命令的具体命令对象
114 | - job : 一些的任务如心跳、数据迁移
115 | - handler : 处理连接的handler
116 | - service : 服务,含定时任务管理,数据请求管理
117 |
118 | - **Client** : 实现 Client 功能
119 |
120 | - handler : handler
121 | - Client : 暴露给用户的命令管理
122 | - Connection : 发出网络请求
123 |
124 | - **Common** : 实现一些公共的功能,上面三个模块依赖于此模块
125 |
126 | - command : 命令抽象类
127 | - ha : HA相关类
128 | - model : 一些公用的pojo,如请求响应对象
129 | - util : 一些工具类
130 | - helper : 辅助脚本
131 |
132 | ## 使用方法 ##
133 |
134 | **DataNode** 运行起来就可以直接使用 **redis-cli** 连接,如`redis-cli -h 127.0.0.1 -p 10100`,并进行`set、get、del`等操作;
135 |
136 | 注意:要首先运行 **NameNode**,然后可以通过JVM参数的方式调整端口,在同一台机器上运行多个 **DataNode**,
137 | 若要在不同机器上运行 **DataNode** 也可以直接修改配置文件。
138 |
139 | 新的 **DataNode** 可以直接上线,**NameNode** 会自动通知下一个节点转移相应数据给新节点;**DataNode** 若要下线,
140 | 则可以通过 **telnet DataNode** 节点的下线监听端口(TCP监听) 如 `telnet 127.0.0.1 20000` ,
141 | 并发送 **k** 字符即可,待下线的DataNode收到命令 **k** 后会自动把数据全部转移给下一个 **DataNode**
142 | 然后提示**进程pid**,用户就可以关闭该DataNode进程了,如 **Linux**: `kill -s 9 23456`,**Windows**:`taskkill /pid 23456`
143 |
144 | **DataNode** 支持Expire,包含lazy与periodical两种删除策略,默认lazy,expireChecking大于0就是periodical+lazy
145 |
146 | **NameNode** 和 **DataNode** 启动后就可以使用 **Client** 了,代码示例如下:
147 |
148 | **Client** 代码示例[在此,关键如下:][4]
149 |
150 | try(Client client = new Client("192.168.0.136","10102")){// 支持自动关闭
151 | logger.debug(client.set("192.168.0.136:10099","123456")+"");
152 | logger.debug(client.get("192.168.0.136:10099")+"");
153 | logger.debug(client.set("112","23")+"");
154 | logger.debug(client.del("1321")+"");
155 | logger.debug(client.del("112")+"");
156 | }
157 |
158 | ## 压力测试 ##
159 |
160 | 在本机开启1个 **NameNode** 和1个 **DataNode** 直接压测,4次
161 |
162 | `redis-benchmark -h 127.0.0.1 -p 10100 -c 100 -t set -q`
163 | - SET: 5006.76 requests per second
164 | - SET: 5056.43 requests per second
165 | - SET: 5063.55 requests per second
166 | - SET: 5123.74.55 requests per second
167 |
168 |
169 | 把以上2个节点日志级别都调整为 `info`(实际上 **DataNode** 节点才会影响 **qps**),重启
170 |
171 | `redis-benchmark -h 127.0.0.1 -p 10100 -c 100 -t set -q`
172 | - SET: 62421.97 requests per second
173 | - SET: 87260.03 requests per second
174 | - SET: 92592.59 requests per second
175 | - SET: 94517.96 requests per second
176 |
177 | 可见日志对**qps**影响很大,是 **几k** 与 **几十k** 的不同数量级的概念,若把级别改成 `error`,**平均qps**还能提升 **几k**,所以生产环境一定要注意日志级别。
178 |
179 | 此外观察,不重启并且每次压测间隔都很小的话,qps一般会从 **65k** 附近开始,经过1、2次的 **88k** 左右,最终稳定在 **98k** 附近,数十次测试,最低 **62.4k**,最高**101.2k**。
180 |
181 | 重启的话,**qps**就会重复上述变化过程,这应该是和内存分配等初始化工作有关,第1次压测有大量的初始化,而后面就没了,所以第一次**qps**都比较低;还可能与 **JIT** 有关,所以 **Java** 的性能测试严格上来说要忽略掉最初的几个样本才对。
182 |
183 | 经观察,DataNode进程启动后,内存消耗在59M附近,第1次压测飙升到134M然后稳定到112M,第2次上升到133M然后稳定到116M,后面每次压测内存都是先增加几M然后减小更多,最终稳定在76M。
184 |
185 |
186 | 在本机运行一个redis-server进程,然后压测一下
187 |
188 | `redis-benchmark -h 127.0.0.1 -p 6379 -c 100 -t set -q`
189 | - SET: 129032.27 requests per second
190 | - SET: 124533.27 requests per second
191 | - SET: 130208.34 requests per second
192 | - SET: 132450.33 requests per second
193 |
194 | 经数十次测试,**qps** 稳定在 **128k** 附近,最高 **132.3k** ,最低 **122.7k** 可见**CHKV**的单个 **DataNode** 目前性能还比不过单个 **redis**。
195 |
196 | **DataNode** 经过重构后,现在的压测结果如下
197 |
198 | `redis-benchmark -h 127.0.0.1 -p 10100 -c 100 -t set -q`
199 |
200 | - SET: 78554.59 requests per second
201 | - SET: 114285.71 requests per second
202 | - SET: 119047.63 requests per second
203 | - SET: 123628.14 requests per second
204 |
205 | 经过多次测试,**qps** 稳定在 **125k** 附近,最高 **131.9k** ,最低 **78.6k**(这是启动后第一次压测的特例,后期稳定时最低是 **114.3k**),可见重构后
206 | 单个 **DataNode** 和单个 **redis-server** 的 **qps** 差距已经很小了,优化效果还是比较明显的。
207 |
208 | 主要优化两个:去掉单独的 **BusinessHandler** 的单独逻辑线程,因为没有耗时操作,直接在IO线程操作反而能省掉切换时间;
209 | **DataNode** 通过 `public static volatile Map DATA_POOL` 共享数据池,其他相关操作类减少了这个域,省一些内存;
210 | 第一条对比明显,很容易直接测试,第二条没直接测,只是分析。
211 |
212 | 然后通过` -Xint` 或者 `-Djava.compiler=NONE` 关闭 **JIT** 使用 **解释模式**,再压测试试。
213 |
214 | `redis-benchmark -h 127.0.0.1 -p 10100 -c 100 -t set -q`
215 | - SET: 16105.65 requests per second
216 | - SET: 16244.31 requests per second
217 | - SET: 16183.85 requests per second
218 | - SET: 16170.76 requests per second
219 |
220 | 可见关闭 **JIT** 后 **qps** 降低了 **7倍多**,而且每次差别不大(即使是第一次),这也能说明上面(默认是**混合模式**)第一次压测的 **qps** 比后面低了那么多的原因确实和 **JIT** 有关。
221 |
222 | 通过 `-Xcomp` 使用 **编译模式** ,启动会很慢。
223 |
224 | `redis-benchmark -h 127.0.0.1 -p 10100 -c 100 -t set -q`
225 | - SET: 83612.04 requests per second
226 | - SET: 117647.05 requests per second
227 | - SET: 121802.68 requests per second
228 | - SET: 120048.02 requests per second
229 |
230 | 可见 **编译模式** 并没有比 **混合模式** 效果好,因为即使是不热点的代码也要编译,反而浪费时间,所以一般还是选择默认的 **混合模式** 较好。
231 |
232 | 然后来验证**线程数、客户端操作**与 **qps** 的关系,实验机器是 `4 core、8 processor`,我把 **DataNode** 的 `DataManager` 中 `workerGroup`的线程数依次减少从 **8** 调到为 **1** (之前的测试都是 **4** ),
233 | 发现 **qps** 先升后降,在值为 **2** 的时候达到最大值,**超过了redis**,下面是数据
234 |
235 | `redis-benchmark -h 127.0.0.1 -p 10100 -c 100 -t set -q`
236 | - SET: 93283.04 requests per second
237 | - SET: 141043.05 requests per second
238 | - SET: 145560.68 requests per second
239 | - SET: 145384.02 requests per second
240 |
241 | 经数十次测试,**qps** 稳定在 **142k** 附近,最高 **150.6k** ,稳定后最低 **137.2k**。
242 | **Netty** 本身使用了**IO多路复用**,在客户端操作都比较轻量(压测这个 **set** 也确实比较轻量)时选择线程数较少是合理的,
243 | 因为这时候线程切换的代价超过了多线程带来的好处,这样我们也能理解 **redis** 单线程设计的初衷了,
244 | 单线程虽然有些极端,但是如果考虑 **面向快速轻量操作的客户端** 和 **单线程的安全与简洁特性**,也是最佳的选择。
245 |
246 | 但是如果客户端操作不是轻量级的,比如我们把 `set` 数据大小调为`500bytes`,再对 **CKHV** 不同的 `workerGroup`线程数进行压测
247 |
248 | 2 `redis-benchmark -h 127.0.0.1 -p 10100 -c 100 -t set -d 500 -q`
249 |
250 | - SET: 80450.52 requests per second
251 | - SET: 102459.02 requests per second
252 | - SET: 108813.92 requests per second
253 | - SET: 99206.34 requests per second
254 |
255 | 3 `redis-benchmark -h 127.0.0.1 -p 10100 -c 100 -t set -d 500 -q`
256 | - SET: 92592.59 requests per second
257 | - SET: 133868.81 requests per second
258 | - SET: 133868.81 requests per second
259 | - SET: 135685.22 requests per second
260 |
261 | 4 `redis-benchmark -h 127.0.0.1 -p 10100 -c 100 -t set -d 500 -q`
262 | - SET: 72046.11 requests per second
263 | - SET: 106723.59 requests per second
264 | - SET: 114810.56 requests per second
265 | - SET: 119047.63 requests per second
266 |
267 | 可见这个时候4、3个线程**qps**都大于2个线程,符合验证,但是4的**qps**又比3少,说明线程太多反而不好,
268 | 然而把数据大小调到`900byte`时,4个线程又比3个线程的**qps**大了,
269 | 所以这个参数真的要针对不同的应用场景做出不同的调整,总结起来就是轻量快速的操作适宜线程 **适当少**,重量慢速操作适宜线程 **适当多**。
270 | **DataNode** 中的 `workThread` 配置参数决定了IO线程数
271 |
272 |
273 | ## 未来工作 ##
274 |
275 | 水平有限,目前项目的问题还很多,可以改进的地方还很多,先列个清单:
276 |
277 | - 高可用性保证
278 | - 断线重连
279 | - DataNode迁移数据的正确性保障
280 | - DataNode迁移数据过程的一致性保障
281 | - DataNode迁移数据后清理空间节约内存
282 | - 键空间通知事件,初步设计NameNode下发待监控的keys,DataNode在心跳中上报对应keys的事件,由NameNode下发给client,可以复用已有连接
283 | - 对于WeakReference的支持
284 | - 更多数据类型
285 | - 更多操作
286 | - 完整的校验机制
287 | - 等等......
288 |
289 | 全部代码在[Github][1]上,欢迎 **star**,欢迎 **issue**,欢迎 **fork**,欢迎 **pull request**......
290 | 总之就是欢迎大家和我一起完善这个项目,一起进步。
291 |
292 | [戳此][2]看原文,来自[MageekChiu][3]
293 |
294 | [1]: https://github.com/MageekChiu/CHKV
295 | [2]: http://mageek.cn/archives/96/
296 | [3]: http://mageek.cn/
297 | [4]: https://github.com/MageekChiu/CHKV/blob/master/Client/src/test/java/cn/mageek/client/ConnectionTest.java
298 | [5]: https://zh.wikipedia.org/wiki/%E4%B8%80%E8%87%B4%E5%93%88%E5%B8%8C
299 | [6]: https://github.com/MageekChiu/CHKV/blob/master/Common/src/main/java/cn/mageek/common/ha/HAThirdParty.java
300 | [7]: http://mageek.cn/archives/97/
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | cn.mageek
8 | CHKV
9 | 1.0-SNAPSHOT
10 |
11 | DataNode
12 | NameNode
13 | Common
14 | Client
15 |
16 | pom
17 |
18 |
19 | UTF-8
20 | mageekchiu
21 |
22 |
23 | Consistent Hashing based Key-Value Memory Storage
24 |
25 |
26 |
27 |
28 |
29 | io.netty
30 | netty-all
31 | 4.1.22.Final
32 |
33 |
34 |
35 | org.slf4j
36 | slf4j-api
37 | 1.7.25
38 | compile
39 |
40 |
41 | org.slf4j
42 | slf4j-log4j12
43 | 1.7.25
44 | runtime
45 |
46 |
47 |
48 | junit
49 | junit
50 | 4.10
51 | test
52 |
53 |
54 |
55 |
56 | com.google.protobuf
57 | protobuf-java
58 | 3.5.1
59 |
60 |
61 |
62 |
63 | com.alibaba
64 | fastjson
65 | 1.2.46
66 |
67 |
68 | org.reflections
69 | reflections
70 | 0.9.11
71 |
72 |
73 |
74 | org.apache.curator
75 | curator-framework
76 | 3.3.0
77 |
78 |
79 | org.apache.curator
80 | curator-recipes
81 | 3.3.0
82 |
83 |
84 | org.apache.curator
85 | curator-client
86 | 3.3.0
87 |
88 |
89 |
90 |
91 |
92 |
105 |
106 |
107 | org.apache.maven.plugins
108 | maven-jar-plugin
109 |
110 |
111 | org.apache.maven.plugins
112 | maven-assembly-plugin
113 |
114 |
115 |
116 |
117 |
118 |
119 | org.apache.maven.plugins
120 | maven-assembly-plugin
121 | 3.1.0
122 |
123 |
124 | jar-with-dependencies
125 |
126 |
127 |
128 | cn.mageek.CHKV.Main
129 |
130 |
131 |
132 | src/assembly/bin.xml
133 |
134 |
135 |
136 |
137 | make-assembly
138 | package
139 |
140 | single
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
--------------------------------------------------------------------------------
/src/assembly/bin.xml:
--------------------------------------------------------------------------------
1 |
4 | jar
5 |
6 | jar
7 |
8 | false
9 |
10 |
11 |
12 |
13 | true
14 |
15 |
16 |
17 | cn.mageek.datanode
18 | cn.mageek.common
19 | cn.mageek.namenode
20 | cn.mageek.client
21 |
22 |
23 | modules/maven-assembly-plugin
24 | false
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/src/main/java/cn/mageek/CHKV/Main.java:
--------------------------------------------------------------------------------
1 | package cn.mageek.CHKV;
2 |
3 | /**
4 | * @author Mageek Chiu
5 | * @date 2018/5/11 0011:15:33
6 | */
7 | public class Main {
8 | public static void main (String... args){
9 |
10 | }
11 | }
12 |
--------------------------------------------------------------------------------