├── .gitignore ├── README.md ├── build.gradle ├── c-code ├── Makefile ├── echo_client.c ├── echo_server.c └── epoll_example.c ├── etcd-code ├── .gitignore ├── README.md ├── client.go ├── cluster │ └── Procfile ├── doc.go ├── etcd_code_test.go ├── key.go ├── kv_test.go ├── lease_test.go ├── mutex.go ├── mutex_test.go ├── queue.go ├── queue_test.go ├── watch.go └── watch_test.go ├── scripts ├── executor.sh ├── init-quorum.sh ├── seq.sh └── snapshotFormatter.sh ├── slides ├── 第一章:基础篇.pdf ├── 第三章:运维篇.pdf ├── 第二章:开发篇.pdf ├── 第五章:对比Chubby、etcd和ZooKeeper.pdf ├── 第六章:ZooKeeper实现原理和源码解读.pdf └── 第四章:进阶篇.pdf └── src ├── main ├── java │ └── org │ │ └── yao │ │ ├── DigestGenerator.java │ │ ├── netty │ │ ├── discard │ │ │ ├── DiscardServer.java │ │ │ └── DiscardServerHandler.java │ │ └── echo │ │ │ ├── EchoClient.java │ │ │ ├── EchoClientHandler.java │ │ │ ├── EchoServer.java │ │ │ └── EchoServerHandler.java │ │ ├── socket │ │ ├── EchoClient.java │ │ └── EchoServer.java │ │ └── watchclient │ │ ├── DataMonitor.java │ │ ├── Executor.java │ │ └── package.html └── resources │ ├── log4j.properties │ └── quorum │ ├── zoo-quorum-node1.cfg │ ├── zoo-quorum-node2.cfg │ └── zoo-quorum-node3.cfg └── test └── java └── org └── yao ├── CuratorTests.java ├── NodeCacheTests.java ├── PathChildrenCacheTests.java ├── ServiceDiscoveryTests.java ├── WatcherTests.java ├── ZooKeeperTests.java ├── jute └── JuteTests.java ├── net └── InetAddressTests.java ├── netty └── BufTests.java ├── nio └── BufferTests.java └── package.html /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled class file 2 | *.class 3 | 4 | # Log file 5 | *.log 6 | 7 | # BlueJ files 8 | *.ctxt 9 | 10 | # Mobile Tools for Java (J2ME) 11 | .mtj.tmp/ 12 | 13 | # Package Files # 14 | *.jar 15 | *.war 16 | *.nar 17 | *.ear 18 | *.zip 19 | *.tar.gz 20 | *.rar 21 | 22 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 23 | hs_err_pid* 24 | 25 | /data 26 | /build 27 | 28 | *.cfg.dynamic.next 29 | *-data 30 | 31 | a.out 32 | 33 | echo_* 34 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ZooKeeper实战与源代码剖析 2 | 3 | 极客时间课程首页: [ZooKeeper实战与源码剖析](https://time.geekbang.org/course/intro/100034201) 4 | 5 | ## 源码列表 6 | 7 | ### 1.7 ZooKeeper架构 8 | - [在一个机器上配置一个3节点ZooKeeper集群的配置文件](src/main/resources/quorum) 9 | 10 | ### 2.1 ZooKeeper API介绍 11 | - [WatcherTests](src/test/java/org/yao/WatcherTests.java) 12 | 13 | ### 2.2 ZooKeeper API-Watch示例 14 | - [Java Watch Client](src/main/java/org/yao/watchclient) 15 | - [seq.sh](scripts/seq.sh) 16 | - [executor.sh](scripts/executor.sh) 17 | 18 | ### 2.6 使用 Apache Curator 简化 ZooKeeper 开发 19 | - [CuratorTests](src/test/java/org/yao/CuratorTests.java) 20 | 21 | ### 3.4 通过动态配置实现不中断服务的集群成员变更 22 | - [DigestGenerator](src/main/java/org/yao/DigestGenerator.java) 23 | 24 | ### 3.5 ZooKeeper节点是如何存储数据的 25 | - [snapshotFormatter.sh](scripts/snapshotFormatter.sh) 26 | 27 | ### 4.1 使用ZooKeeper实现服务发现(1) 28 | - [ServiceDiscoveryTests](src/test/java/org/yao/ServiceDiscoveryTests.java) 29 | 30 | ### 4.3 使用ZooKeeper实现服务发现(3) 31 | - [A port of curator-x-discovery-server in Spring Boot](https://github.com/yaojingguo/curator-x-discovery-server) 32 | 33 | ### 4.4 Kafka是如何使用ZooKeeper的 34 | - [multi API sample code](src/test/java/org/yao/ZooKeeperTests.java) 35 | 36 | ### 5.5 etcd API (1) 37 | - [sample code for Range, Put and DeleteRange APIs](etcd-code/kv_test.go) 38 | 39 | ### 5.6 etcd API (2) 40 | - [sample code for Txn API](etcd-code/kv_test.go) 41 | - [sample code for Watch API](etcd-code/watch_test.go) 42 | - [sample code for Lease API](etcd-code/lease_test.go) 43 | 44 | ### 5.7 使用etcd实现分布式队列 45 | - [Queue](etcd-code/queue.go) 46 | 47 | ### 5.8 使用etcd实现分布式锁 48 | - [Mutex](etcd-code/mutex.go) 49 | 50 | ### 5.9 如何搭建一个etcd生产环境 51 | - [Procfile](etcd-code/cluster/Procfile) 52 | 53 | ### 5.9 如何搭建一个etcd生产环境 54 | 55 | ### 6.5 网络编程基础 56 | - [echo_client.c](c-code/echo_client.c) 57 | - [echo_server.c](c-code/echo_server.c) 58 | - [EchoClient.java](src/main/java/org/yao/socket/EchoClient.java) 59 | - [EchoServer.java](src/main/java/org/yao/socket/EchoServer.java) 60 | 61 | ### 6.6 事件驱动的网络编程 62 | - [epoll_example.c](c-code/epoll_example.c) 63 | 64 | ### 6.7 Java的事件驱动网络编程 65 | - [Netty Echo Example](src/main/java/org/yao/netty/echo) 66 | 67 | ## PPT 68 | 1. [第一章:基础篇](slides/第一章:基础篇.pdf) 69 | 70 | 1. [第二章:开发篇](slides/第二章:开发篇.pdf) 71 | 72 | 1. [第三章:运维篇](slides/第三章:运维篇.pdf) 73 | 74 | 1. [第四章:进阶篇](slides/第四章:进阶篇.pdf) 75 | 76 | 1. [第五章:对比Chubby、etcd和ZooKeeper](slides/第五章:对比Chubby、etcd和ZooKeeper.pdf) 77 | 78 | 1. [第六章:ZooKeeper实现原理和源码解读](slides/第六章:ZooKeeper实现原理和源码解读.pdf) 79 | -------------------------------------------------------------------------------- /build.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: "java" 2 | apply plugin: "idea" 3 | apply plugin: "application" 4 | 5 | repositories { 6 | mavenLocal() 7 | mavenCentral() 8 | } 9 | 10 | dependencies { 11 | implementation "org.apache.zookeeper:zookeeper:3.5.5" 12 | implementation "org.apache.curator:curator-recipes:4.2.0" 13 | implementation "org.apache.curator:curator-x-discovery:4.2.0" 14 | implementation "org.apache.curator:curator-x-discovery-server:4.2.0" 15 | 16 | testImplementation "junit:junit:4.12" 17 | testImplementation "com.google.truth:truth:1.0" 18 | } 19 | 20 | compileJava.options.encoding = "UTF-8" 21 | compileTestJava.options.encoding = "UTF-8" 22 | sourceCompatibility = 1.8 23 | targetCompatibility = 1.8 24 | [compileJava, compileTestJava, javadoc]*.options*.encoding = "UTF-8" 25 | mainClassName = "org.yao.netty.echo.EchoServer" 26 | 27 | test { 28 | testLogging { 29 | showStandardStreams = true 30 | events "passed", "skipped", "failed" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /c-code/Makefile: -------------------------------------------------------------------------------- 1 | PROGS=echo_client echo_server 2 | 3 | all: ${PROGS} 4 | 5 | echo_client: echo_client.o 6 | 7 | echo_server: echo_server.o 8 | 9 | cscope: 10 | rm -fr cscope.* 11 | find -name '*.h' -or -name '*.c' > cscope.files 12 | cscope -b -q 13 | 14 | clean: 15 | rm -f ${PROGS} *.o a.out 16 | -------------------------------------------------------------------------------- /c-code/echo_client.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | int port = 5000; 13 | #define SIZE 1024 14 | 15 | static void print_error_and_exit(const char* api_name) { 16 | perror(api_name); 17 | exit(EXIT_FAILURE); 18 | } 19 | 20 | static void write_all(int sockfd) { 21 | char msg[] = "hello\n"; 22 | int position = 0; 23 | int count = sizeof(msg); 24 | int ret; 25 | while (position < count) { 26 | ret = write(sockfd, msg + position, count - position); 27 | if (ret == -1) { 28 | print_error_and_exit("write"); 29 | } 30 | position += ret; 31 | } 32 | } 33 | 34 | static void read_all(int sockfd) { 35 | int ret; 36 | char buf[SIZE] = {0}; 37 | while ((ret = read(sockfd, buf, SIZE)) > 0) { 38 | write(STDOUT_FILENO, buf, ret); 39 | } 40 | if (ret == -1) { 41 | print_error_and_exit("read"); 42 | } 43 | } 44 | 45 | int main(int argc, char *argv[]) 46 | { 47 | int sockfd = 0; 48 | struct sockaddr_in serv_addr; 49 | 50 | if (argc != 2) { 51 | fprintf(stderr, "Usage: %s \n",argv[0]); 52 | return EXIT_FAILURE; 53 | } 54 | 55 | if ((sockfd = socket(AF_INET, SOCK_STREAM, 0)) == -1) { 56 | print_error_and_exit("socket"); 57 | } 58 | 59 | memset(&serv_addr, 0, sizeof(serv_addr)); 60 | serv_addr.sin_family = AF_INET; 61 | serv_addr.sin_port = htons(port); 62 | if (inet_pton(AF_INET, argv[1], &serv_addr.sin_addr) != 1) { 63 | print_error_and_exit("inet_pton"); 64 | } 65 | 66 | if (connect(sockfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) == -1) { 67 | print_error_and_exit("connect"); 68 | } 69 | 70 | write_all(sockfd); 71 | read_all(sockfd); 72 | 73 | 74 | if (close(sockfd) == -1) { 75 | print_error_and_exit("close"); 76 | } 77 | return 0; 78 | } 79 | -------------------------------------------------------------------------------- /c-code/echo_server.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #define BACKLOG 10 11 | #define SIZE 1024 12 | int port = 5000; 13 | 14 | static void print_error_and_exit(const char* api_name) { 15 | perror(api_name); 16 | exit(EXIT_FAILURE); 17 | } 18 | 19 | static void echo(int sockfd) { 20 | char buf[SIZE] = {0}; 21 | while (1) { 22 | printf("reading\n"); 23 | int received = read(sockfd, buf, SIZE); 24 | printf("read %d bytes\n", received); 25 | if (received == 0) { 26 | printf("connection closed\n"); 27 | break; 28 | } 29 | if (received == -1) { 30 | print_error_and_exit("read"); 31 | } 32 | 33 | buf[received] = 0; 34 | printf("read %d byts: %s", received, buf); 35 | 36 | int position = 0; 37 | int ret; 38 | while (position < received) { 39 | int ret = write(sockfd, buf + position, received - position); 40 | if (ret == -1) { 41 | print_error_and_exit("write"); 42 | } 43 | position += ret; 44 | } 45 | } 46 | } 47 | 48 | int main(int argc, char *argv[]) { 49 | int listenfd; 50 | if ((listenfd = socket(AF_INET, SOCK_STREAM, 0)) == -1) { 51 | print_error_and_exit("socket"); 52 | } 53 | 54 | int reuse = 1; 55 | if (setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse)) == -1) { 56 | print_error_and_exit("setsockopt"); 57 | } 58 | 59 | struct sockaddr_in serv_addr = {0}; 60 | serv_addr.sin_family = AF_INET; 61 | serv_addr.sin_addr.s_addr = htonl(INADDR_ANY); 62 | serv_addr.sin_port = htons(port); 63 | if (bind(listenfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr)) == -1) { 64 | print_error_and_exit("bind"); 65 | } 66 | printf("bound\n"); 67 | if (listen(listenfd, BACKLOG) == -1) { 68 | print_error_and_exit("listen"); 69 | } 70 | printf("listened\n"); 71 | 72 | int connfd; 73 | printf("accepting\n"); 74 | if ((connfd = accept(listenfd, (struct sockaddr*) NULL, NULL)) == -1) { 75 | print_error_and_exit("accept"); 76 | } 77 | printf("accepted\n"); 78 | 79 | echo(connfd); 80 | 81 | if (close(connfd) == -1) { 82 | print_error_and_exit("close"); 83 | } 84 | if (close(listenfd) == -1) { 85 | print_error_and_exit("close"); 86 | } 87 | 88 | return 0; 89 | } 90 | -------------------------------------------------------------------------------- /c-code/epoll_example.c: -------------------------------------------------------------------------------- 1 | // Taken from https://banu.com/blog/2/how-to-use-epoll-a-complete-example-in-c/ 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #define MAXEVENTS 64 14 | #define BUF_SIZE 1024 15 | char buf[BUF_SIZE]; 16 | char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV]; 17 | 18 | // Run the server "./a.out 5000". Open several consoles and run "nc 127.0.0.1 19 | // 50000". Type some characters and hit return. Observe the server behaviour. 20 | 21 | static void print_error_and_exit(const char* api_name) { 22 | perror(api_name); 23 | exit(EXIT_FAILURE); 24 | } 25 | 26 | static void make_socket_non_blocking(int sfd) { 27 | int flags; 28 | 29 | flags = fcntl(sfd, F_GETFL, 0); 30 | if (flags == -1) { 31 | print_error_and_exit("fcntl"); 32 | } 33 | 34 | flags |= O_NONBLOCK; 35 | int ret = fcntl(sfd, F_SETFL, flags); 36 | if (ret == -1) { 37 | print_error_and_exit("fcntl"); 38 | } 39 | } 40 | 41 | static void add_to_epoll_for_read(int efd, int fd) { 42 | struct epoll_event event; 43 | event.data.fd = fd; 44 | event.events = EPOLLIN; 45 | int ret = epoll_ctl(efd, EPOLL_CTL_ADD, fd, &event); 46 | if (ret == -1) { 47 | print_error_and_exit("epoll_ctl"); 48 | } 49 | } 50 | 51 | static int create_and_bind(const char *port) { 52 | struct addrinfo hints; 53 | struct addrinfo *res; 54 | memset(&hints, 0, sizeof(struct addrinfo)); 55 | // IPv4 and IPv6 56 | hints.ai_family = AF_UNSPEC; 57 | // TCP socket 58 | hints.ai_socktype = SOCK_STREAM; 59 | // All interfaces 60 | hints.ai_flags = AI_PASSIVE; 61 | int ret = getaddrinfo(NULL, port, &hints, &res); 62 | if (ret != 0) { 63 | fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(ret)); 64 | exit(EXIT_FAILURE); 65 | } 66 | 67 | // Loop over adding and try to bind 68 | int sfd; 69 | struct addrinfo *rp; 70 | for (rp = res; rp != NULL; rp = rp->ai_next) { 71 | sfd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); 72 | if (sfd == -1) { 73 | continue; 74 | } 75 | ret = bind(sfd, rp->ai_addr, rp->ai_addrlen); 76 | if (ret == 0) { 77 | break; 78 | } 79 | close(sfd); 80 | } 81 | 82 | if (rp == NULL) { 83 | fprintf(stderr, "could not bind\n"); 84 | exit(EXIT_FAILURE); 85 | } 86 | 87 | freeaddrinfo(res); 88 | return sfd; 89 | } 90 | 91 | static int serve(const char* port) { 92 | int sfd = create_and_bind(port); 93 | make_socket_non_blocking(sfd); 94 | int ret = listen(sfd, SOMAXCONN); 95 | if (ret == -1) { 96 | print_error_and_exit("listen"); 97 | } 98 | return sfd; 99 | } 100 | 101 | // We have data on the fd waiting to be read. Read and display it. We must read 102 | // whatever data is available completely, as we are running in edge-triggered 103 | // mode and won't get a notification again for the same data. 104 | static void read_all(int fd) { 105 | int done = 0; 106 | ssize_t count; 107 | int ret; 108 | 109 | for (;;) { 110 | count = read(fd, buf, sizeof(buf)); 111 | if (count == -1) { 112 | // errno == EAGAIN means we have read all data. So ignore it. 113 | if (errno != EAGAIN) { 114 | perror("read"); 115 | done = 1; 116 | } 117 | break; 118 | } else if (count == 0) { 119 | // End of file. The remote has closed the connection. 120 | done = 1; 121 | break; 122 | } 123 | 124 | buf[count] = 0; 125 | printf("read %ld bytes: %s\n", count, buf); 126 | } 127 | 128 | if (done) { 129 | // Closing the descriptor will make epoll remove it 130 | // from the set of descriptors which are monitored. 131 | close(fd); 132 | printf("file descriptor %d closed\n", fd); 133 | } 134 | } 135 | 136 | static void accept_for_read(int efd, int sfd) { 137 | struct sockaddr in_addr; 138 | socklen_t in_len = sizeof(in_addr); 139 | int infd; 140 | int ret; 141 | 142 | for (;;) { 143 | infd = accept(sfd, &in_addr, &in_len); 144 | if (infd == -1) { 145 | if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) { 146 | // We have processed all incoming connections. 147 | break; 148 | } else { 149 | perror("accept"); 150 | break; 151 | } 152 | } 153 | ret = getnameinfo(&in_addr, in_len, 154 | hbuf, sizeof hbuf, 155 | sbuf, sizeof sbuf, 156 | NI_NUMERICHOST | NI_NUMERICSERV); 157 | if (ret == 0) { 158 | printf("accepted connection on descriptor %d (host=%s, port=%s)\n", infd, hbuf, sbuf); 159 | } 160 | make_socket_non_blocking(infd);add_to_epoll_for_read(efd, infd); 161 | } 162 | } 163 | 164 | int main(int argc, const char *argv[]) { 165 | if (argc != 2) { 166 | fprintf(stderr, "usage: %s [port]\n", argv[0]); 167 | exit(EXIT_FAILURE); 168 | } 169 | int sfd = serve(argv[1]); 170 | 171 | 172 | int efd = epoll_create1(0); 173 | if (efd == -1) { 174 | print_error_and_exit("epoll_create1"); 175 | } 176 | add_to_epoll_for_read(efd, sfd); 177 | 178 | struct epoll_event* events = calloc(MAXEVENTS, sizeof(struct epoll_event)); 179 | 180 | int nfds; 181 | int fd; 182 | 183 | for (;;) { 184 | nfds = epoll_wait(efd, events, MAXEVENTS, -1); 185 | if (nfds == -1) { 186 | print_error_and_exit("epoll_wait"); 187 | } 188 | for (int i = 0; i < nfds; i++) { 189 | fd = events[i].data.fd; 190 | if (events[i].events & EPOLLERR) { 191 | // An error has occurred on this fd, or the socket is not 192 | // ready for reading (why were we notified then?) 193 | fprintf(stderr, "epoll error\n"); 194 | close(fd); 195 | continue; 196 | } else if (sfd == fd) { 197 | // We have a notification on the listening socket, which 198 | // means one or more incoming connections. 199 | accept_for_read(efd, sfd); 200 | continue; 201 | } else { 202 | read_all(fd); 203 | } 204 | } 205 | } 206 | 207 | free(events); 208 | close(sfd); 209 | return EXIT_SUCCESS; 210 | } 211 | -------------------------------------------------------------------------------- /etcd-code/.gitignore: -------------------------------------------------------------------------------- 1 | infra*.etcd 2 | -------------------------------------------------------------------------------- /etcd-code/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/geekbang-zk-course/5db0af8ab7b622cbc22eed47d8570fc3b9609b2a/etcd-code/README.md -------------------------------------------------------------------------------- /etcd-code/client.go: -------------------------------------------------------------------------------- 1 | // Code adapted from https://github.com/etcd-io/etcd/blob/master/contrib/recipes/client.go 2 | // Copyright 2016 The etcd Authors 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | package etcd_code 16 | 17 | import ( 18 | "context" 19 | "errors" 20 | 21 | v3 "go.etcd.io/etcd/clientv3" 22 | spb "go.etcd.io/etcd/mvcc/mvccpb" 23 | ) 24 | 25 | var ( 26 | ErrKeyExists = errors.New("key already exists") 27 | ErrWaitMismatch = errors.New("unexpected wait result") 28 | ErrTooManyClients = errors.New("too many clients") 29 | ErrNoWatcher = errors.New("no watcher channel") 30 | ) 31 | 32 | // deleteRevKey deletes a key by revision, returning false if key is missing 33 | func deleteRevKey(kv v3.KV, key string, rev int64) (bool, error) { 34 | cmp := v3.Compare(v3.ModRevision(key), "=", rev) 35 | req := v3.OpDelete(key) 36 | txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit() 37 | if err != nil { 38 | return false, err 39 | } else if !txnresp.Succeeded { 40 | return false, nil 41 | } 42 | return true, nil 43 | } 44 | 45 | func claimFirstKey(kv v3.KV, kvs []*spb.KeyValue) (*spb.KeyValue, error) { 46 | for _, k := range kvs { 47 | ok, err := deleteRevKey(kv, string(k.Key), k.ModRevision) 48 | if err != nil { 49 | return nil, err 50 | } else if ok { 51 | return k, nil 52 | } 53 | } 54 | return nil, nil 55 | } 56 | 57 | -------------------------------------------------------------------------------- /etcd-code/cluster/Procfile: -------------------------------------------------------------------------------- 1 | # Adapted https://github.com/etcd-io/etcd/blob/master/Procfile 2 | # Use goreman to run `go get github.com/mattn/goreman` 3 | etcd1: etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr 4 | etcd2: etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr 5 | etcd3: etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof --logger=zap --log-outputs=stderr 6 | proxy: etcd grpc-proxy start --endpoints=127.0.0.1:2379,127.0.0.1:22379,127.0.0.1:32379 --listen-addr=127.0.0.1:23790 --advertise-client-url=127.0.0.1:23790 --enable-pprof 7 | -------------------------------------------------------------------------------- /etcd-code/doc.go: -------------------------------------------------------------------------------- 1 | // Since I want to make it easy to check the data in the etcd with etcdctl, an external etcd cluster is used for the 2 | // tests 3 | package etcd_code 4 | -------------------------------------------------------------------------------- /etcd-code/etcd_code_test.go: -------------------------------------------------------------------------------- 1 | package etcd_code 2 | 3 | import ( 4 | v3 "go.etcd.io/etcd/clientv3" 5 | "go.etcd.io/etcd/clientv3/concurrency" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | var ( 11 | endPoints = []string{"localhost:2379"} 12 | dialTimeout = 1 * time.Second 13 | ) 14 | 15 | func newClient(t *testing.T) *v3.Client{ 16 | cli, err := v3.New(v3.Config{ 17 | Endpoints: endPoints, 18 | DialTimeout: dialTimeout, 19 | }) 20 | if err != nil { 21 | t.Fatal(err) 22 | } 23 | return cli 24 | } 25 | 26 | func newSession(t *testing.T) (cli *v3.Client, session *concurrency.Session) { 27 | cli, err := v3.New(v3.Config{ 28 | Endpoints: endPoints, 29 | DialTimeout: dialTimeout, 30 | }) 31 | if err != nil { 32 | t.Fatal(err) 33 | } 34 | session, err = concurrency.NewSession(cli) 35 | if err != nil { 36 | t.Fatal(err) 37 | } 38 | return 39 | } -------------------------------------------------------------------------------- /etcd-code/key.go: -------------------------------------------------------------------------------- 1 | // Adapted from https://github.com/etcd-io/etcd/blob/master/contrib/recipes/key.go 2 | // Copyright 2016 The etcd Authors 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | package etcd_code 16 | 17 | import ( 18 | "context" 19 | "fmt" 20 | "go.etcd.io/etcd/mvcc/mvccpb" 21 | "strings" 22 | "time" 23 | 24 | v3 "go.etcd.io/etcd/clientv3" 25 | "go.etcd.io/etcd/clientv3/concurrency" 26 | pb "go.etcd.io/etcd/etcdserver/etcdserverpb" 27 | ) 28 | 29 | // RemoteKV is a key/revision pair created by the client and stored on etcd 30 | type RemoteKV struct { 31 | // TODO(yaojingguo): remove kv field 32 | kv v3.KV 33 | key string 34 | rev int64 35 | val string 36 | } 37 | 38 | func newKey(kv v3.KV, key string, leaseID v3.LeaseID) (*RemoteKV, error) { 39 | return newKV(kv, key, "", leaseID) 40 | } 41 | 42 | func newKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (*RemoteKV, error) { 43 | rev, err := putNewKV(kv, key, val, leaseID) 44 | if err != nil { 45 | return nil, err 46 | } 47 | return &RemoteKV{kv, key, rev, val}, nil 48 | } 49 | 50 | func newUniqueKV(kv v3.KV, prefix string, val string) (*RemoteKV, error) { 51 | for { 52 | newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano()) 53 | rev, err := putNewKV(kv, newKey, val, v3.NoLease) 54 | if err == nil { 55 | return &RemoteKV{kv, newKey, rev, val}, nil 56 | } 57 | if err != ErrKeyExists { 58 | return nil, err 59 | } 60 | } 61 | } 62 | 63 | // putNewKV attempts to create the given key, only succeeding if the key did 64 | // not yet exist. 65 | func putNewKV(kv v3.KV, key, val string, leaseID v3.LeaseID) (int64, error) { 66 | cmp := v3.Compare(v3.Version(key), "=", 0) 67 | req := v3.OpPut(key, val, v3.WithLease(leaseID)) 68 | txnresp, err := kv.Txn(context.TODO()).If(cmp).Then(req).Commit() 69 | if err != nil { 70 | return 0, err 71 | } 72 | if !txnresp.Succeeded { 73 | return 0, ErrKeyExists 74 | } 75 | fmt.Printf("key: %s, val: %s, mod rev: %d\n", key, val, txnresp.Header.Revision) 76 | return txnresp.Header.Revision, nil 77 | } 78 | 79 | // newSequentialKV allocates a new sequential key /nnnnn with a given 80 | // prefix and value. Note: a bookkeeping node __ is also allocated. 81 | func newSequentialKV(kv v3.KV, prefix, val string) (*RemoteKV, error) { 82 | resp, err := kv.Get(context.TODO(), prefix, v3.WithLastKey()...) 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | // add 1 to last key, if any 88 | newSeqNum := 0 89 | if len(resp.Kvs) != 0 { 90 | fields := strings.Split(string(resp.Kvs[0].Key), "/") 91 | _, serr := fmt.Sscanf(fields[len(fields)-1], "%d", &newSeqNum) 92 | if serr != nil { 93 | return nil, serr 94 | } 95 | newSeqNum++ 96 | } 97 | newKey := fmt.Sprintf("%s/%016d", prefix, newSeqNum) 98 | 99 | // base prefix key must be current (i.e., <=) with the server update; 100 | // the base key is important to avoid the following: 101 | // N1: LastKey() == 1, start txn. 102 | // N2: new Key 2, new Key 3, Delete Key 2 103 | // N1: txn succeeds allocating key 2 when it shouldn't 104 | baseKey := "__" + prefix 105 | 106 | // current revision might contain modification so +1 107 | cmp := v3.Compare(v3.ModRevision(baseKey), "<", resp.Header.Revision+1) 108 | reqPrefix := v3.OpPut(baseKey, "") 109 | reqnewKey := v3.OpPut(newKey, val) 110 | 111 | txn := kv.Txn(context.TODO()) 112 | txnresp, err := txn.If(cmp).Then(reqPrefix, reqnewKey).Commit() 113 | if err != nil { 114 | return nil, err 115 | } 116 | if !txnresp.Succeeded { 117 | return newSequentialKV(kv, prefix, val) 118 | } 119 | return &RemoteKV{kv, newKey, txnresp.Header.Revision, val}, nil 120 | } 121 | 122 | func (rk *RemoteKV) Key() string { return rk.key } 123 | func (rk *RemoteKV) Revision() int64 { return rk.rev } 124 | func (rk *RemoteKV) Value() string { return rk.val } 125 | 126 | func (rk *RemoteKV) Delete() error { 127 | if rk.kv == nil { 128 | return nil 129 | } 130 | _, err := rk.kv.Delete(context.TODO(), rk.key) 131 | rk.kv = nil 132 | return err 133 | } 134 | 135 | func (rk *RemoteKV) Put(val string) error { 136 | _, err := rk.kv.Put(context.TODO(), rk.key, val) 137 | return err 138 | } 139 | 140 | // EphemeralKV is a new key associated with a session lease 141 | type EphemeralKV struct{ RemoteKV } 142 | 143 | // newEphemeralKV creates a new key/value pair associated with a session lease 144 | func newEphemeralKV(s *concurrency.Session, key, val string) (*EphemeralKV, error) { 145 | k, err := newKV(s.Client(), key, val, s.Lease()) 146 | if err != nil { 147 | return nil, err 148 | } 149 | return &EphemeralKV{*k}, nil 150 | } 151 | 152 | // newUniqueEphemeralKey creates a new unique valueless key associated with a session lease 153 | func newUniqueEphemeralKey(s *concurrency.Session, prefix string) (*EphemeralKV, error) { 154 | return newUniqueEphemeralKV(s, prefix, "") 155 | } 156 | 157 | // newUniqueEphemeralKV creates a new unique key/value pair associated with a session lease 158 | func newUniqueEphemeralKV(s *concurrency.Session, prefix, val string) (ek *EphemeralKV, err error) { 159 | for { 160 | newKey := fmt.Sprintf("%s/%v", prefix, time.Now().UnixNano()) 161 | ek, err = newEphemeralKV(s, newKey, val) 162 | if err == nil || err != ErrKeyExists { 163 | break 164 | } 165 | } 166 | return ek, err 167 | } 168 | 169 | func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { 170 | cctx, cancel := context.WithCancel(ctx) 171 | defer cancel() 172 | 173 | var wr v3.WatchResponse 174 | wch := client.Watch(cctx, key, v3.WithRev(rev)) 175 | for wr = range wch { 176 | for _, ev := range wr.Events { 177 | if ev.Type == mvccpb.DELETE { 178 | return nil 179 | } 180 | } 181 | } 182 | if err := wr.Err(); err != nil { 183 | return err 184 | } 185 | if err := ctx.Err(); err != nil { 186 | return err 187 | } 188 | return fmt.Errorf("lost watcher waiting for delete") 189 | } 190 | 191 | 192 | // waitDeletes efficiently waits until all keys matching the prefix and no greater 193 | // than the create revision. 194 | func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { 195 | getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) 196 | for { 197 | resp, err := client.Get(ctx, pfx, getOpts...) 198 | if err != nil { 199 | return nil, err 200 | } 201 | if len(resp.Kvs) == 0 { 202 | return resp.Header, nil 203 | } 204 | lastKey := string(resp.Kvs[0].Key) 205 | if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { 206 | return nil, err 207 | } 208 | } 209 | } -------------------------------------------------------------------------------- /etcd-code/kv_test.go: -------------------------------------------------------------------------------- 1 | package etcd_code 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | v3 "go.etcd.io/etcd/clientv3" 7 | "testing" 8 | ) 9 | 10 | func printGetResponse(r *v3.GetResponse) { 11 | fmt.Printf("response header: %q\n", r.Header) 12 | fmt.Printf("kvs: %v\n", r.Kvs) 13 | fmt.Printf("more: %t\n", r.More) 14 | fmt.Printf("count: %d\n", r.Count) 15 | } 16 | 17 | // Run beforehand: 18 | // etcdctl put TestGet 1 19 | func TestGet(t *testing.T) { 20 | cli := newClient(t) 21 | defer cli.Close() 22 | 23 | key := "TestGet" 24 | 25 | response, err := cli.Get(context.Background(), key) 26 | if err != nil { 27 | t.Fatal(err) 28 | } 29 | printGetResponse(response) 30 | } 31 | 32 | // Run beforehand: 33 | // etcdctl put TestGetWithPrefix 1 34 | // etcdctl put TestGetWithPrefix2 2 35 | func TestGetWithPrefix(t *testing.T) { 36 | cli := newClient(t) 37 | defer cli.Close() 38 | 39 | keyPrefix := "TestGetWithPrefix" 40 | response, err := cli.Get(context.Background(), keyPrefix, v3.WithPrefix()) 41 | if err != nil { 42 | t.Fatal(err) 43 | } 44 | printGetResponse(response) 45 | } 46 | 47 | // Run beforehand: 48 | // etcdctl put TestGetWithRange 1 49 | // etcdctl put TestGetWithRange2 2 50 | func TestGetWithRange(t *testing.T) { 51 | cli := newClient(t) 52 | defer cli.Close() 53 | 54 | response, err := cli.Get(context.Background(), "TestGetWithRange", v3.WithRange("TestGetWithRange2\x00")) 55 | if err != nil { 56 | t.Fatal(err) 57 | } 58 | printGetResponse(response) 59 | } 60 | 61 | // Run beforehand: 62 | // etcdctl put TestGetWithSerializable 1 63 | func TestGetWithSerializable(t *testing.T) { 64 | cli := newClient(t) 65 | defer cli.Close() 66 | 67 | key := "TestGetWithSerializable" 68 | response, err := cli.Get(context.Background(), key, v3.WithSerializable()) 69 | if err != nil { 70 | t.Fatal(err) 71 | } 72 | printGetResponse(response) 73 | } 74 | 75 | func TestPut(t *testing.T) { 76 | cli := newClient(t) 77 | defer cli.Close() 78 | 79 | key := "TestPut" 80 | response, err := cli.Put(context.Background(), key, "1", v3.WithPrevKV()) 81 | if err != nil { 82 | t.Fatal(err) 83 | } 84 | fmt.Printf("header: %q\n", response.Header) 85 | fmt.Printf("prev_kv: %q\n", response.PrevKv) 86 | } 87 | 88 | // Run beforehand: 89 | // etcdctl put TestDelete 1 90 | func TestDelete(t *testing.T) { 91 | cli := newClient(t) 92 | defer cli.Close() 93 | 94 | key := "TestDelete" 95 | response, err := cli.Delete(context.Background(), key) 96 | if err != nil { 97 | t.Fatal(err) 98 | } 99 | fmt.Printf("header: %v\n", response.Header) 100 | fmt.Printf("deleted: %d\n", response.Deleted) 101 | fmt.Printf("pre_kvs: %v\n", response.PrevKvs) 102 | } 103 | 104 | // Run beforehand: 105 | // etcdctl put TestTxn 1 106 | func TestTxn(t *testing.T) { 107 | cli := newClient(t) 108 | defer cli.Close() 109 | 110 | key := "TestTxn" 111 | thenValue := "then" 112 | 113 | ctx := context.Background() 114 | _, err := cli.Txn(ctx). 115 | If(v3.Compare(v3.Value(key), "<", "2")). 116 | Then(v3.OpPut(key, thenValue)). 117 | Else(v3.OpPut(key, "else")). 118 | Commit() 119 | if err != nil { 120 | t.Fatal(err) 121 | } 122 | 123 | // Return "then" 124 | gresp, err := cli.Get(ctx, key) 125 | if err != nil { 126 | t.Fatal(err) 127 | } 128 | 129 | if l := len(gresp.Kvs); l != 1 { 130 | t.Errorf("expeected 1 key-value, but got %d key-values\n", l) 131 | } 132 | 133 | if value := string(gresp.Kvs[0].Value); value != "then" { 134 | t.Errorf("expected value %s, but got %s\n", thenValue, value) 135 | } 136 | 137 | for _, ev := range gresp.Kvs { 138 | fmt.Printf("%s: %s\n", ev.Key, ev.Value) 139 | } 140 | //delete(cli, key, t) 141 | } 142 | 143 | func TestTxnOpOrder(t *testing.T) { 144 | cli := newClient(t) 145 | defer cli.Close() 146 | key := "TestTxnOpOrder" 147 | 148 | ctx := context.Background() 149 | // If CreateRevision is 0, it means that the key does not exist. 150 | cmp := v3.Compare(v3.CreateRevision(key), "=", 0) 151 | put := v3.OpPut(key, "1") 152 | get := v3.OpGet(key) 153 | 154 | // Get Put 155 | resp, err := cli.Txn(ctx).If(cmp).Then(get, put).Commit() 156 | if err != nil { 157 | t.Fatal(err) 158 | } 159 | printTxnResponse(resp) 160 | if l := len(resp.Responses[0].GetResponseRange().Kvs); l != 0 { 161 | t.Errorf("expeected 0 key-value, but got %d key-values\n", l) 162 | } 163 | delete(ctx, cli, key, t) 164 | 165 | // Put Get 166 | resp, err = cli.Txn(ctx).If(cmp).Then(put, get).Commit() 167 | if err != nil { 168 | t.Fatal(err) 169 | } 170 | printTxnResponse(resp) 171 | if l := len(resp.Responses[1].GetResponseRange().Kvs); l != 1 { 172 | t.Errorf("expeected 1 key-value, but got %d key-values\n", l) 173 | } 174 | delete(ctx, cli, key, t) 175 | } 176 | 177 | func TestTxnMultis(t *testing.T) { 178 | cli := newClient(t) 179 | defer cli.Close() 180 | ctx := context.Background() 181 | 182 | keyPrefix := "TestTxnMultis" 183 | keys := []string{keyPrefix + "1", keyPrefix + "2"} 184 | var cmps [2]v3.Cmp 185 | var puts [2]v3.Op 186 | var gets [2]v3.Op 187 | for i := 0; i < 2; i++ { 188 | val := string(i + 1) 189 | cmps[i] = v3.Compare(v3.CreateRevision(keys[i]), "=", 0) 190 | puts[i] = v3.OpPut(keys[i], val) 191 | gets[i] = v3.OpGet(keys[i]) 192 | } 193 | 194 | // Executes two Puts 195 | tresp, err := cli.Txn(ctx).If(cmps[:]...).Then(puts[:]...).Else(gets[:]...).Commit() 196 | if err != nil { 197 | t.Fatal(t) 198 | } 199 | if !tresp.Succeeded { 200 | t.Fatal("Txn should succeed, but failed") 201 | } 202 | 203 | // Executes two Gets 204 | tresp, err = cli.Txn(ctx).If(cmps[:]...).Then(puts[:]...).Else(gets[:]...).Commit() 205 | if err != nil { 206 | t.Fatal(t) 207 | } 208 | if tresp.Succeeded { 209 | t.Fatal("Txn should fail, but succeeded") 210 | } 211 | for i := 0; i < 2; i++ { 212 | if count := tresp.Responses[i].GetResponseRange().Count; count != 1 { 213 | t.Errorf("expected 1 value for key %s, but got %d value", keys[0], count) 214 | } 215 | } 216 | 217 | // Execute two Deletes 218 | tresp, err = cli.Txn(ctx).Then(v3.OpDelete(keys[0]), v3.OpDelete(keys[1])).Commit(); 219 | if err != nil { 220 | t.Fatal(err) 221 | } 222 | for _, r := range tresp.Responses { 223 | if deleted := r.GetResponseDeleteRange().Deleted; deleted != 1 { 224 | t.Fatalf("expected to delete 1 key-value, but deleted %d key-value", deleted) 225 | } 226 | } 227 | gresp, err := cli.Get(ctx, keyPrefix, v3.WithPrefix()) 228 | if gresp.Count != 0 { 229 | t.Errorf("expected 0 key-values, bot got %d", gresp.Count) 230 | } 231 | } 232 | 233 | func printTxnResponse(resp *v3.TxnResponse) { 234 | fmt.Printf("response header: %q\n", resp.Header) 235 | fmt.Printf("succeeded: %t\n", resp.Succeeded) 236 | } 237 | 238 | // version revision test 239 | func TestVersionAndRevision(t *testing.T) { 240 | cli := newClient(t) 241 | defer cli.Close() 242 | key := "TestVersionAndRevision" 243 | ctx := context.Background() 244 | 245 | put(ctx, cli, key, t) 246 | get(ctx, cli, key, t) 247 | put(ctx, cli, key, t) 248 | get(ctx, cli, key, t) 249 | delete(ctx, cli, key, t) 250 | 251 | put(ctx, cli, key, t) 252 | get(ctx, cli, key, t) 253 | put(ctx, cli, key, t) 254 | get(ctx, cli, key, t) 255 | } 256 | 257 | func put(ctx context.Context, cli *v3.Client, key string, t *testing.T) { 258 | fmt.Println("Put") 259 | response, err := cli.Put(ctx, key, "one", v3.WithPrevKV()) 260 | if err != nil { 261 | t.Fatal(err) 262 | } 263 | fmt.Printf("put revision: %d\n", response.Header.Revision) 264 | fmt.Printf("prev_kv: %v\n\n", response.PrevKv) 265 | } 266 | 267 | func get(ctx context.Context, cli *v3.Client, key string, t *testing.T) { 268 | fmt.Println("Get") 269 | response, err := cli.Get(ctx, key, v3.WithPrevKV()) 270 | if err != nil { 271 | t.Fatal(err) 272 | } 273 | fmt.Printf("get revision: %d\n", response.Header.Revision) 274 | fmt.Printf("kvs: %v\n\n", response.Kvs) 275 | } 276 | 277 | func delete(ctx context.Context, cli *v3.Client, key string, t *testing.T) { 278 | fmt.Println("Delete") 279 | response, err := cli.Delete(ctx, key, v3.WithPrevKV()) 280 | if err != nil { 281 | t.Fatal(err) 282 | } 283 | fmt.Printf("delete revision: %d\n", response.Header.Revision) 284 | fmt.Printf("deleted: %d\n", response.Deleted) 285 | fmt.Printf("prev_kvs: %v\n\n", response.PrevKvs) 286 | } 287 | -------------------------------------------------------------------------------- /etcd-code/lease_test.go: -------------------------------------------------------------------------------- 1 | package etcd_code 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | v3 "go.etcd.io/etcd/clientv3" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | func TestLeaseGrant(t *testing.T) { 12 | cli := newClient(t) 13 | defer cli.Close() 14 | key := "TestLeaseGrant" 15 | 16 | ctx := context.Background() 17 | 18 | // Grant a lease 19 | resp, err := cli.Grant(ctx, 10) 20 | 21 | // Put a key with a lease 22 | _, err = cli.Put(ctx, key, "1", v3.WithLease(resp.ID)) 23 | if err != nil { 24 | t.Fatal(err) 25 | } 26 | // The key will be removed after 5 seconds. Use etcdctl check it. 27 | time.Sleep(15 * time.Second) 28 | fmt.Println("woke up") 29 | } 30 | 31 | func TestLeaseRevoke(t *testing.T) { 32 | cli := newClient(t) 33 | defer cli.Close() 34 | key := "TestLeaseRevoke" 35 | ctx := context.Background() 36 | 37 | resp, err := cli.Grant(ctx, 5) 38 | if err != nil { 39 | t.Fatal(err) 40 | } 41 | _, err = cli.Put(ctx, key, "1", v3.WithLease(resp.ID)) 42 | if err != nil { 43 | t.Fatal(err) 44 | } 45 | 46 | // Revoking lease expires the key attached to the lease ID 47 | _, err = cli.Revoke(ctx, resp.ID) 48 | if err != nil { 49 | t.Fatal(err) 50 | } 51 | 52 | gresp, err := cli.Get(ctx, key) 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | fmt.Println("number of keys:", len(gresp.Kvs)) 57 | if keyCount := len(gresp.Kvs); keyCount != 0 { 58 | t.Errorf("expected %d keys, but got %d keys", 0, keyCount) 59 | } 60 | // Output: number of keys: 0 61 | } 62 | 63 | func TestLeaseWithKeepAlive(t *testing.T) { 64 | cli := newClient(t) 65 | defer cli.Close() 66 | key := "TestLeaseWithKeepAlive" 67 | ctx := context.Background() 68 | 69 | resp, err := cli.Grant(ctx, 5) 70 | if err != nil { 71 | t.Fatal(err) 72 | } 73 | _, err = cli.Put(context.TODO(), key, "1", v3.WithLease(resp.ID)) 74 | if err != nil { 75 | t.Fatal(err) 76 | } 77 | 78 | // Since the lease will be kept alive, the 79 | ch, kaerr := cli.KeepAlive(context.TODO(), resp.ID) 80 | if kaerr != nil { 81 | t.Fatal(kaerr) 82 | } 83 | 84 | ka := <-ch 85 | fmt.Println("ttl:", ka.TTL) 86 | // Output: ttl: 5 87 | seconds := 50 88 | fmt.Printf("sleeping for %d seconds\n", seconds) 89 | // During this time, "etcdctl get TestLeaseWithKeepAlive" always return a value 90 | time.Sleep(20 * time.Second) 91 | fmt.Printf("woke up, the key should be deleted after some time\n") 92 | } 93 | 94 | func TestLeaseWithKeepAliveOnce(t *testing.T) { 95 | cli := newClient(t) 96 | defer cli.Close() 97 | key := "TestLeaseWithKeepAliveOnce" 98 | ctx := context.Background() 99 | 100 | // Put with Lease 101 | resp, err := cli.Grant(ctx, 5) 102 | if err != nil { 103 | t.Fatal(err) 104 | } 105 | _, err = cli.Put(ctx, key, "bar", v3.WithLease(resp.ID)) 106 | if err != nil { 107 | t.Fatal(err) 108 | } 109 | 110 | go func() { 111 | time.Sleep(8 * time.Second) 112 | response, err := cli.Get(ctx, key) 113 | if err != nil { 114 | t.Fatal(err) 115 | } 116 | fmt.Printf("kvs: %q\n", response.Kvs) 117 | if response.Count != 1 { 118 | t.Fatalf("expected count %d, but got %d\n", 1, response.Count) 119 | } 120 | }() 121 | 122 | // Renew the lease only once. If the following code block is commented, 123 | // the above response.Count check will fail. 124 | time.Sleep(4 * time.Second) 125 | _, kaerr := cli.KeepAliveOnce(ctx, resp.ID) 126 | if kaerr != nil { 127 | t.Fatal(kaerr) 128 | } 129 | 130 | time.Sleep(12 * time.Second) 131 | } 132 | -------------------------------------------------------------------------------- /etcd-code/mutex.go: -------------------------------------------------------------------------------- 1 | // Adapted from https://github.com/etcd-io/etcd/blob/master/clientv3/concurrency/mutex.go 2 | // Copyright 2016 The etcd Authors 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | package etcd_code 16 | 17 | import ( 18 | "context" 19 | "fmt" 20 | v3 "go.etcd.io/etcd/clientv3" 21 | "go.etcd.io/etcd/clientv3/concurrency" 22 | pb "go.etcd.io/etcd/etcdserver/etcdserverpb" 23 | ) 24 | 25 | type Mutex struct { 26 | s *concurrency.Session 27 | 28 | pfx string 29 | myKey string 30 | myRev int64 31 | hdr *pb.ResponseHeader 32 | } 33 | 34 | func NewMutex(s *concurrency.Session, pfx string) *Mutex { 35 | m := &Mutex{s, pfx + "/", "", -1, nil} 36 | m.init() 37 | return m 38 | } 39 | 40 | func (m *Mutex) init() { 41 | m.myKey = "\x00" 42 | m.myRev = -1 43 | } 44 | 45 | func (m *Mutex) Lock(ctx context.Context) error { 46 | s := m.s 47 | client := m.s.Client() 48 | 49 | m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) 50 | cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) 51 | put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) 52 | get := v3.OpGet(m.myKey) 53 | getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) 54 | resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() 55 | if err != nil { 56 | m.hdr = resp.Header 57 | return err 58 | } 59 | m.myRev = resp.Header.Revision 60 | if !resp.Succeeded { 61 | m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision 62 | } 63 | 64 | // if no key on prefix / the minimum rev is key, already hold the lock 65 | ownerKey := resp.Responses[1].GetResponseRange().Kvs 66 | if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { 67 | return nil 68 | } 69 | 70 | // wait for deletion revisions prior to myKey 71 | hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) 72 | // release lock key if wait failed 73 | if werr != nil { 74 | m.Unlock(client.Ctx()) 75 | } else { 76 | m.hdr = hdr 77 | } 78 | return werr 79 | } 80 | 81 | func (m *Mutex) Unlock(ctx context.Context) error { 82 | client := m.s.Client() 83 | if _, err := client.Delete(ctx, m.myKey); err != nil { 84 | return err 85 | } 86 | m.init() 87 | return nil 88 | } 89 | 90 | -------------------------------------------------------------------------------- /etcd-code/mutex_test.go: -------------------------------------------------------------------------------- 1 | package etcd_code 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "go.etcd.io/etcd/clientv3/concurrency" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | func TestMutexBasics(t *testing.T) { 12 | cli := newClient(t) 13 | defer cli.Close() 14 | 15 | // Create two separate sessions for lock competition 16 | s1, err := concurrency.NewSession(cli) 17 | if err != nil { 18 | t.Fatal(err) 19 | } 20 | defer s1.Close() 21 | pfx := "/TestMutexBasics" 22 | m1 := NewMutex(s1, pfx) 23 | 24 | s2, err := concurrency.NewSession(cli) 25 | if err != nil { 26 | t.Fatal(err) 27 | } 28 | defer s2.Close() 29 | m2 := NewMutex(s2, pfx) 30 | 31 | ctx := context.Background() 32 | 33 | // acquire lock for s1 34 | if err := m1.Lock(ctx); err != nil { 35 | t.Fatal(err) 36 | } 37 | fmt.Printf("acquired lock for s1 with key %s and rev %d\n", m1.myKey, m1.myRev) 38 | 39 | m2Locked := make(chan struct{}) 40 | go func() { 41 | defer close(m2Locked) 42 | if err := m2.Lock(ctx); err != nil { 43 | t.Fatal(err) 44 | } 45 | }() 46 | 47 | var seconds time.Duration = 3 48 | fmt.Printf("sleeping for %d seconds\n", seconds) 49 | time.Sleep(seconds * time.Second) 50 | if err := m1.Unlock(ctx); err != nil { 51 | t.Fatal(err) 52 | } 53 | fmt.Println("released lock for s1") 54 | 55 | <-m2Locked 56 | fmt.Printf("acquired lock for s2 with key %s and rev %d\n", m2.myKey, m2.myRev) 57 | } 58 | -------------------------------------------------------------------------------- /etcd-code/queue.go: -------------------------------------------------------------------------------- 1 | // Adapted from https://github.com/etcd-io/etcd/blob/master/contrib/recipes/queue.go 2 | // Copyright 2016 The etcd Authors 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | package etcd_code 16 | 17 | import ( 18 | "context" 19 | v3 "go.etcd.io/etcd/clientv3" 20 | "go.etcd.io/etcd/mvcc/mvccpb" 21 | ) 22 | 23 | type Queue struct { 24 | client *v3.Client 25 | ctx context.Context 26 | keyPrefix string 27 | } 28 | 29 | func NewQueue(client *v3.Client, keyPrefix string) *Queue { 30 | return &Queue{client, context.TODO(), keyPrefix} 31 | } 32 | 33 | // Enqueue enqueues an item into the queue. 34 | func (q *Queue) Enqueue(val string) error { 35 | _, err := newUniqueKV(q.client, q.keyPrefix, val) 36 | return err 37 | } 38 | 39 | // Dequeue returns Enqueue()'d elements in FIFO order. If the queue is empty, Dequeue blocks until elements are 40 | // available. It is similar to Java BlockingQueue's take method. 41 | func (q *Queue) Dequeue() (string, error) { 42 | resp, err := q.client.Get(q.ctx, q.keyPrefix, v3.WithFirstRev()...) 43 | if err != nil { 44 | return "", err 45 | } 46 | 47 | kv, err := claimFirstKey(q.client, resp.Kvs) 48 | if err != nil { 49 | return "", err 50 | } else if kv != nil { 51 | return string(kv.Value), nil 52 | } else if resp.More { 53 | // Missed some items, retry to read in more 54 | return q.Dequeue() 55 | } 56 | 57 | // Nothing yet: wait on elements 58 | ev, err := WaitPrefixEvents(q.client, q.keyPrefix, resp.Header.Revision, []mvccpb.Event_EventType{mvccpb.PUT}) 59 | if err != nil { 60 | return "", err 61 | } 62 | 63 | ok, err := deleteRevKey(q.client, string(ev.Kv.Key), ev.Kv.ModRevision) 64 | if err != nil { 65 | return "", err 66 | } else if !ok { 67 | return q.Dequeue() 68 | } 69 | return string(ev.Kv.Value), err 70 | } 71 | -------------------------------------------------------------------------------- /etcd-code/queue_test.go: -------------------------------------------------------------------------------- 1 | package etcd_code 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestQueueBasics(t *testing.T) { 8 | cli := newClient(t) 9 | defer cli.Close() 10 | 11 | queue := NewQueue(cli, "/queue") 12 | firstKey := "first" 13 | secondKey := "second" 14 | thirdKey := "third" 15 | 16 | enqueue(t, queue, firstKey) 17 | enqueue(t, queue, secondKey) 18 | enqueue(t, queue, thirdKey) 19 | 20 | //time.Sleep(100 * time.Second) 21 | 22 | dequeue(t, queue, firstKey) 23 | dequeue(t, queue, secondKey ) 24 | dequeue(t, queue, thirdKey ) 25 | } 26 | 27 | func enqueue(t *testing.T, queue *Queue, val string) { 28 | if err := queue.Enqueue(val); err != nil { 29 | t.Fatal(err) 30 | } 31 | } 32 | 33 | func dequeue(t *testing.T, queue *Queue, expectedVal string) { 34 | val, err := queue.Dequeue() 35 | if err != nil { 36 | t.Fatal(err) 37 | } 38 | if val != expectedVal { 39 | t.Errorf("expected val %s, but got %s", expectedVal, val) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /etcd-code/watch.go: -------------------------------------------------------------------------------- 1 | // Adapted from https://github.com/etcd-io/etcd/blob/master/contrib/recipes/queue.go 2 | // Copyright 2016 The etcd Authors 3 | // 4 | // Licensed under the Apache License, Version 2.0 (the "License"); 5 | // you may not use this file except in compliance with the License. 6 | // You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | package etcd_code 16 | 17 | import ( 18 | "context" 19 | 20 | "go.etcd.io/etcd/clientv3" 21 | "go.etcd.io/etcd/mvcc/mvccpb" 22 | ) 23 | 24 | // WaitEvents waits on a key until it observes the given events and returns the final one. 25 | func WaitEvents(c *clientv3.Client, key string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) { 26 | ctx, cancel := context.WithCancel(context.Background()) 27 | defer cancel() 28 | wc := c.Watch(ctx, key, clientv3.WithRev(rev)) 29 | if wc == nil { 30 | return nil, ErrNoWatcher 31 | } 32 | return waitEvents(wc, evs), nil 33 | } 34 | 35 | func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) { 36 | ctx, cancel := context.WithCancel(context.Background()) 37 | defer cancel() 38 | wc := c.Watch(ctx, prefix, clientv3.WithPrefix(), clientv3.WithRev(rev)) 39 | if wc == nil { 40 | return nil, ErrNoWatcher 41 | } 42 | return waitEvents(wc, evs), nil 43 | } 44 | 45 | func waitEvents(wc clientv3.WatchChan, evs []mvccpb.Event_EventType) *clientv3.Event { 46 | i := 0 47 | for wresp := range wc { 48 | for _, ev := range wresp.Events { 49 | if ev.Type == evs[i] { 50 | i++ 51 | if i == len(evs) { 52 | return ev 53 | } 54 | } 55 | } 56 | } 57 | return nil 58 | } 59 | -------------------------------------------------------------------------------- /etcd-code/watch_test.go: -------------------------------------------------------------------------------- 1 | package etcd_code 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | v3 "go.etcd.io/etcd/clientv3" 7 | "testing" 8 | ) 9 | 10 | var end = []byte("end") 11 | 12 | // After start the test, run: 13 | // etcdctl put TestOneWatch 1 14 | func TestOneWatch(t *testing.T) { 15 | cli := newClient(t) 16 | defer cli.Close() 17 | 18 | rch := cli.Watch(context.Background(), "TestOneWatch") 19 | for wresp := range rch { 20 | printEvents(&wresp) 21 | } 22 | } 23 | 24 | // After start the test, run: 25 | // etcdctl put TestTwoWatches1 1 26 | // etcdctl put TestTwoWatches2 2 27 | func TestTwoWatches(t *testing.T) { 28 | cli := newClient(t) 29 | 30 | keys := []string{"TestTwoWatches1", "TestTwoWatches2"} 31 | rch1 := cli.Watch(context.Background(), keys[0]) 32 | rch2 := cli.Watch(context.Background(), keys[1]) 33 | 34 | for { 35 | select { 36 | case wresp1 := <-rch1: 37 | printEvents(&wresp1) 38 | case wresp2 := <-rch2: 39 | printEvents(&wresp2) 40 | } 41 | } 42 | } 43 | 44 | func TestWatchFromPast(t *testing.T) { 45 | cli := newClient(t) 46 | defer cli.Close() 47 | 48 | key := "TestWatchFromPast" 49 | ctx := context.Background() 50 | 51 | presp, err := cli.Put(ctx, key, "1") 52 | if err != nil { 53 | t.Fatal(err) 54 | } 55 | rev := presp.Header.Revision 56 | 57 | rch := cli.Watch(ctx, key, v3.WithRev(rev)) 58 | for wresp := range rch { 59 | printEvents(&wresp) 60 | } 61 | // Output: PUT event TestWatchFromPast: 1 62 | } 63 | 64 | func printEvents(resp *v3.WatchResponse) { 65 | for _, ev := range resp.Events { 66 | fmt.Printf("%s event key-value %s: %s\n", ev.Type, ev.Kv.Key, ev.Kv.Value) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /scripts/executor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | . zkEnv.sh 3 | 4 | export CLASSPATH="build/classes/java/main:$CLASSPATH" 5 | mkdir -p data 6 | java org.yao.watchclient.Executor "$@" 7 | -------------------------------------------------------------------------------- /scripts/init-quorum.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | quorum_dir=/data/zk/quorum 4 | rm -fr $quorum_dir 5 | mkdir -p $quorum_dir 6 | 7 | for no in `seq 3`; do 8 | node_dir="$quorum_dir/node$no" 9 | mkdir $node_dir 10 | echo $no > "$node_dir/myid" 11 | done 12 | 13 | -------------------------------------------------------------------------------- /scripts/seq.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | i=0 3 | for ((;; )) 4 | do 5 | i=$(($i+1)) 6 | echo $i 7 | sleep 1 8 | done 9 | -------------------------------------------------------------------------------- /scripts/snapshotFormatter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | . zkEnv.sh 3 | export CLASSPATH="$CLASSPATH" 4 | java org.apache.zookeeper.server.SnapshotFormatter "$@" -------------------------------------------------------------------------------- /slides/第一章:基础篇.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/geekbang-zk-course/5db0af8ab7b622cbc22eed47d8570fc3b9609b2a/slides/第一章:基础篇.pdf -------------------------------------------------------------------------------- /slides/第三章:运维篇.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/geekbang-zk-course/5db0af8ab7b622cbc22eed47d8570fc3b9609b2a/slides/第三章:运维篇.pdf -------------------------------------------------------------------------------- /slides/第二章:开发篇.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/geekbang-zk-course/5db0af8ab7b622cbc22eed47d8570fc3b9609b2a/slides/第二章:开发篇.pdf -------------------------------------------------------------------------------- /slides/第五章:对比Chubby、etcd和ZooKeeper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/geekbang-zk-course/5db0af8ab7b622cbc22eed47d8570fc3b9609b2a/slides/第五章:对比Chubby、etcd和ZooKeeper.pdf -------------------------------------------------------------------------------- /slides/第六章:ZooKeeper实现原理和源码解读.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/geekbang-zk-course/5db0af8ab7b622cbc22eed47d8570fc3b9609b2a/slides/第六章:ZooKeeper实现原理和源码解读.pdf -------------------------------------------------------------------------------- /slides/第四章:进阶篇.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geektime-geekbang/geekbang-zk-course/5db0af8ab7b622cbc22eed47d8570fc3b9609b2a/slides/第四章:进阶篇.pdf -------------------------------------------------------------------------------- /src/main/java/org/yao/DigestGenerator.java: -------------------------------------------------------------------------------- 1 | package org.yao; 2 | 3 | import org.apache.zookeeper.server.auth.DigestAuthenticationProvider; 4 | 5 | /** 6 | * Generate digest for ZooKeeper super user authentication. 7 | */ 8 | public class DigestGenerator { 9 | 10 | public static void main(String[] args) throws Exception { 11 | System.out.println(DigestAuthenticationProvider.generateDigest("super:jingguo")); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/main/java/org/yao/netty/discard/DiscardServer.java: -------------------------------------------------------------------------------- 1 | package org.yao.netty.discard; 2 | 3 | import io.netty.bootstrap.ServerBootstrap; 4 | import io.netty.channel.ChannelFuture; 5 | import io.netty.channel.ChannelInitializer; 6 | import io.netty.channel.ChannelOption; 7 | import io.netty.channel.EventLoopGroup; 8 | import io.netty.channel.nio.NioEventLoopGroup; 9 | import io.netty.channel.socket.SocketChannel; 10 | import io.netty.channel.socket.nio.NioServerSocketChannel; 11 | 12 | public class DiscardServer { 13 | private int port; 14 | 15 | public DiscardServer(int port) { 16 | this.port = port; 17 | } 18 | 19 | public void run() throws Exception { 20 | EventLoopGroup bossGroup = new NioEventLoopGroup(); 21 | EventLoopGroup workerGroup = new NioEventLoopGroup(); 22 | 23 | try { 24 | ServerBootstrap b = new ServerBootstrap(); 25 | b.group(bossGroup, workerGroup) 26 | .channel(NioServerSocketChannel.class) 27 | .childHandler(new ChannelInitializer() { 28 | @Override 29 | public void initChannel(SocketChannel ch) { 30 | ch.pipeline().addLast(new DiscardServerHandler()); 31 | } 32 | }) 33 | .option(ChannelOption.SO_BACKLOG, 128) 34 | .childOption(ChannelOption.SO_KEEPALIVE, true); 35 | 36 | ChannelFuture f = b.bind(port).sync(); 37 | 38 | f.channel().closeFuture().sync(); 39 | } finally { 40 | workerGroup.shutdownGracefully(); 41 | bossGroup.shutdownGracefully(); 42 | } 43 | } 44 | 45 | public static void main(String[] args) throws Exception { 46 | int port = 8080; 47 | new DiscardServer(port).run(); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/main/java/org/yao/netty/discard/DiscardServerHandler.java: -------------------------------------------------------------------------------- 1 | package org.yao.netty.discard; 2 | 3 | import io.netty.buffer.ByteBuf; 4 | import io.netty.channel.ChannelHandlerContext; 5 | import io.netty.channel.ChannelInboundHandlerAdapter; 6 | import io.netty.util.ReferenceCountUtil; 7 | 8 | public class DiscardServerHandler extends ChannelInboundHandlerAdapter { 9 | @Override 10 | public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { 11 | ByteBuf in = (ByteBuf) msg; 12 | try { 13 | while (in.isReadable()) { 14 | System.out.print((char) in.readByte()); 15 | System.out.flush(); 16 | } 17 | } finally { 18 | ReferenceCountUtil.release(msg); 19 | } 20 | } 21 | 22 | @Override 23 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { 24 | cause.printStackTrace(); 25 | ctx.close(); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/main/java/org/yao/netty/echo/EchoClient.java: -------------------------------------------------------------------------------- 1 | package org.yao.netty.echo; 2 | 3 | import io.netty.bootstrap.Bootstrap; 4 | import io.netty.channel.ChannelFuture; 5 | import io.netty.channel.ChannelInitializer; 6 | import io.netty.channel.ChannelOption; 7 | import io.netty.channel.ChannelPipeline; 8 | import io.netty.channel.EventLoopGroup; 9 | import io.netty.channel.nio.NioEventLoopGroup; 10 | import io.netty.channel.socket.SocketChannel; 11 | import io.netty.channel.socket.nio.NioSocketChannel; 12 | 13 | /** 14 | * Sends one message when a connection is open and echoes back any received 15 | * data to the server. Simply put, the echo client initiates the ping-pong 16 | * traffic between the echo client and server by sending the first message to 17 | * the server. 18 | */ 19 | public final class EchoClient { 20 | static final boolean SSL = System.getProperty("ssl") != null; 21 | static final String HOST = System.getProperty("host", "127.0.0.1"); 22 | static final int PORT = Integer.parseInt(System.getProperty("port", "8007")); 23 | 24 | public static void main(String[] args) throws Exception { 25 | // Configure the client. 26 | EventLoopGroup group = new NioEventLoopGroup(); 27 | try { 28 | Bootstrap b = new Bootstrap(); 29 | b.group(group) 30 | .channel(NioSocketChannel.class) 31 | .option(ChannelOption.TCP_NODELAY, true) 32 | .handler(new ChannelInitializer() { 33 | @Override 34 | public void initChannel(SocketChannel ch) throws Exception { 35 | ChannelPipeline p = ch.pipeline(); 36 | p.addLast(new EchoClientHandler()); 37 | } 38 | }); 39 | 40 | // Start the client. 41 | ChannelFuture f = b.connect(HOST, PORT).sync(); 42 | 43 | // Wait until the connection is closed. 44 | f.channel().closeFuture().sync(); 45 | } finally { 46 | // Shut down the event loop to terminate all threads. 47 | group.shutdownGracefully(); 48 | } 49 | } 50 | } -------------------------------------------------------------------------------- /src/main/java/org/yao/netty/echo/EchoClientHandler.java: -------------------------------------------------------------------------------- 1 | package org.yao.netty.echo; 2 | 3 | import io.netty.buffer.ByteBuf; 4 | import io.netty.buffer.Unpooled; 5 | import io.netty.channel.ChannelHandlerContext; 6 | import io.netty.channel.ChannelInboundHandlerAdapter; 7 | 8 | /** 9 | * Handler implementation for the echo client. It initiates the ping-pong traffic between the echo 10 | * client and server by sending the first message to the server. 11 | */ 12 | public class EchoClientHandler extends ChannelInboundHandlerAdapter { 13 | 14 | private final ByteBuf firstMessage; 15 | 16 | /** Creates a client-side handler. */ 17 | public EchoClientHandler() { 18 | firstMessage = Unpooled.buffer(11); 19 | for (int i = 0; i < firstMessage.capacity() - 1; i++) { 20 | firstMessage.writeByte((byte) i + '0'); 21 | } 22 | firstMessage.writeByte('\n'); 23 | } 24 | 25 | @Override 26 | public void channelActive(ChannelHandlerContext ctx) { 27 | // System.out.printf("channelActive\n"); 28 | ctx.writeAndFlush(firstMessage); 29 | } 30 | 31 | @Override 32 | public void channelRead(ChannelHandlerContext ctx, Object msg) { 33 | // System.out.printf("channelRead\n"); 34 | ByteBuf in = (ByteBuf) msg; 35 | 36 | while (in.isReadable()) { 37 | System.out.print((char) in.readByte()); 38 | } 39 | System.out.flush(); 40 | 41 | // Equivalent to ByteBuffer's flip 42 | in.readerIndex(0); 43 | ctx.write(in); 44 | } 45 | 46 | @Override 47 | public void channelReadComplete(ChannelHandlerContext ctx) { 48 | // System.out.printf("channelReadComplete\n"); 49 | ctx.flush(); 50 | } 51 | 52 | @Override 53 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { 54 | // Close the connection when an exception is raised. 55 | cause.printStackTrace(); 56 | ctx.close(); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/main/java/org/yao/netty/echo/EchoServer.java: -------------------------------------------------------------------------------- 1 | package org.yao.netty.echo; 2 | 3 | import io.netty.bootstrap.ServerBootstrap; 4 | import io.netty.channel.ChannelFuture; 5 | import io.netty.channel.ChannelInitializer; 6 | import io.netty.channel.ChannelOption; 7 | import io.netty.channel.ChannelPipeline; 8 | import io.netty.channel.EventLoopGroup; 9 | import io.netty.channel.nio.NioEventLoopGroup; 10 | import io.netty.channel.socket.SocketChannel; 11 | import io.netty.channel.socket.nio.NioServerSocketChannel; 12 | 13 | /** 14 | * Echoes back any received data from a client. 15 | */ 16 | public final class EchoServer { 17 | static final int PORT = Integer.parseInt(System.getProperty("port", "8007")); 18 | 19 | public static void main(String[] args) throws Exception { 20 | // Configure the server. 21 | EventLoopGroup bossGroup = new NioEventLoopGroup(1); 22 | EventLoopGroup workerGroup = new NioEventLoopGroup(); 23 | final EchoServerHandler serverHandler = new EchoServerHandler(); 24 | try { 25 | ServerBootstrap b = new ServerBootstrap(); 26 | b.group(bossGroup, workerGroup) 27 | .channel(NioServerSocketChannel.class) 28 | .option(ChannelOption.SO_BACKLOG, 100) 29 | .childHandler(new ChannelInitializer() { 30 | @Override 31 | public void initChannel(SocketChannel ch) throws Exception { 32 | ChannelPipeline p = ch.pipeline(); 33 | p.addLast(serverHandler); 34 | } 35 | }); 36 | 37 | // Start the server. 38 | ChannelFuture f = b.bind(PORT).sync(); 39 | 40 | // Wait until the server socket is closed. 41 | f.channel().closeFuture().sync(); 42 | } finally { 43 | // Shut down all event loops to terminate all threads. 44 | bossGroup.shutdownGracefully(); 45 | workerGroup.shutdownGracefully(); 46 | } 47 | } 48 | } -------------------------------------------------------------------------------- /src/main/java/org/yao/netty/echo/EchoServerHandler.java: -------------------------------------------------------------------------------- 1 | package org.yao.netty.echo; 2 | 3 | import io.netty.channel.ChannelHandler.Sharable; 4 | import io.netty.channel.ChannelHandlerContext; 5 | import io.netty.channel.ChannelInboundHandlerAdapter; 6 | 7 | /** 8 | * Handler implementation for the echo server. 9 | */ 10 | @Sharable 11 | public class EchoServerHandler extends ChannelInboundHandlerAdapter { 12 | 13 | @Override 14 | public void channelRead(ChannelHandlerContext ctx, Object msg) { 15 | ctx.write(msg); 16 | } 17 | 18 | @Override 19 | public void channelReadComplete(ChannelHandlerContext ctx) { 20 | ctx.flush(); 21 | } 22 | 23 | @Override 24 | public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { 25 | // Close the connection when an exception is raised. 26 | cause.printStackTrace(); 27 | ctx.close(); 28 | } 29 | } -------------------------------------------------------------------------------- /src/main/java/org/yao/socket/EchoClient.java: -------------------------------------------------------------------------------- 1 | package org.yao.socket; 2 | 3 | import java.io.BufferedReader; 4 | import java.io.InputStreamReader; 5 | import java.io.PrintWriter; 6 | import java.net.Socket; 7 | 8 | /** 9 | *
10 |  * java -cp build/classes/java/main org.yao.socket.EchoClient 127.0.0.1 6000
11 |  * 
12 | */ 13 | public class EchoClient { 14 | public static void main(String[] args) throws Exception { 15 | if (args.length != 2) { 16 | System.err.println("Usage: java EchoClient "); 17 | System.exit(1); 18 | } 19 | 20 | String ip = args[0]; 21 | int port = Integer.parseInt(args[1]); 22 | 23 | try (Socket socket = new Socket(ip, port); 24 | PrintWriter out = new PrintWriter(socket.getOutputStream(), true); 25 | BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream())); 26 | BufferedReader stdin = new BufferedReader(new InputStreamReader(System.in))) { 27 | String msg; 28 | while ((msg = stdin.readLine()) != null) { 29 | out.println(msg); 30 | System.out.println("echo: " + in.readLine()); 31 | } 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/main/java/org/yao/socket/EchoServer.java: -------------------------------------------------------------------------------- 1 | package org.yao.socket; 2 | 3 | import com.google.common.io.Closeables; 4 | 5 | import java.io.BufferedReader; 6 | import java.io.Closeable; 7 | import java.io.IOException; 8 | import java.io.InputStreamReader; 9 | import java.io.PrintWriter; 10 | import java.net.InetSocketAddress; 11 | import java.net.ServerSocket; 12 | import java.net.Socket; 13 | 14 | /** 15 | *
16 |  * java -cp build/classes/java/main org.yao.socket.EchoServer 6000
17 |  * 
18 | */ 19 | public class EchoServer { 20 | public static void main(String[] args) throws IOException { 21 | if (args.length != 1) { 22 | System.err.println("Usage: java EchoServer "); 23 | System.exit(1); 24 | } 25 | int port = Integer.parseInt(args[0]); 26 | ServerSocket serverSocket = null; 27 | Socket socket = null; 28 | PrintWriter out = null; 29 | BufferedReader in = null; 30 | try { 31 | // Method 1 32 | // serverSocket = new ServerSocket(Integer.parseInt(args[0])); 33 | 34 | // Method 2 35 | serverSocket = new ServerSocket(); 36 | serverSocket.setReuseAddress(true); 37 | InetSocketAddress address = new InetSocketAddress(port); 38 | serverSocket.bind(address); 39 | 40 | socket = serverSocket.accept(); 41 | out = new PrintWriter(socket.getOutputStream(), true); 42 | in = new BufferedReader(new InputStreamReader(socket.getInputStream())); 43 | String msg; 44 | while ((msg = in.readLine()) != null) { 45 | out.println(msg); 46 | } 47 | } finally { 48 | close(out); 49 | close(in); 50 | close(socket); 51 | close(serverSocket); 52 | } 53 | } 54 | 55 | private static void close(Closeable closeable) { 56 | if (closeable == null) { 57 | return; 58 | } 59 | try { 60 | closeable.close(); 61 | } catch (Throwable th) { 62 | throw new RuntimeException(th); 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/main/java/org/yao/watchclient/DataMonitor.java: -------------------------------------------------------------------------------- 1 | package org.yao.watchclient; 2 | 3 | /** 4 | * A simple class that monitors the data and existence of a ZooKeeper node. It uses asynchronous 5 | * ZooKeeper APIs. 6 | */ 7 | import java.util.Arrays; 8 | 9 | import org.apache.zookeeper.KeeperException; 10 | import org.apache.zookeeper.WatchedEvent; 11 | import org.apache.zookeeper.Watcher; 12 | import org.apache.zookeeper.ZooKeeper; 13 | import org.apache.zookeeper.AsyncCallback.StatCallback; 14 | import org.apache.zookeeper.KeeperException.Code; 15 | import org.apache.zookeeper.data.Stat; 16 | 17 | public class DataMonitor implements StatCallback { 18 | private ZooKeeper zk; 19 | private String znode; 20 | boolean dead; 21 | private DataMonitorListener listener; 22 | private byte prevData[]; 23 | 24 | public DataMonitor(ZooKeeper zk, String znode, DataMonitorListener listener) { 25 | this.zk = zk; 26 | this.znode = znode; 27 | this.listener = listener; 28 | // Get things started by checking if the node exists. We are going 29 | // to be completely event driven 30 | zk.exists(znode, true, this, null); 31 | } 32 | 33 | public void handle(WatchedEvent event) { 34 | String path = event.getPath(); 35 | if (event.getType() == Watcher.Event.EventType.None) { 36 | // We are are being told that the state of the 37 | // connection has changed 38 | switch (event.getState()) { 39 | case SyncConnected: 40 | // In this particular example we don't need to do anything 41 | // here - watches are automatically re-registered with 42 | // server and any watches triggered while the client was 43 | // disconnected will be delivered (in order of course) 44 | break; 45 | case Expired: 46 | // It's all over 47 | dead = true; 48 | listener.closing(KeeperException.Code.SessionExpired); 49 | break; 50 | } 51 | } else { 52 | if (path != null && path.equals(znode)) { 53 | // Something has changed on the node, let's find out 54 | zk.exists(znode, true, this, null); 55 | } 56 | } 57 | } 58 | 59 | // StatCallback 60 | @Override 61 | public void processResult(int rc, String path, Object ctx, Stat stat) { 62 | boolean exists; 63 | switch (rc) { 64 | case Code.Ok: 65 | exists = true; 66 | break; 67 | case Code.NoNode: 68 | exists = false; 69 | break; 70 | case Code.SessionExpired: 71 | case Code.NoAuth: 72 | dead = true; 73 | listener.closing(rc); 74 | return; 75 | default: 76 | // Retry errors 77 | zk.exists(znode, true, this, null); 78 | return; 79 | } 80 | 81 | byte b[] = null; 82 | if (exists) { 83 | try { 84 | b = zk.getData(znode, false, null); 85 | } catch (KeeperException e) { 86 | // We don't need to worry about recovering now. The watch 87 | // callbacks will kick off any exception handling 88 | e.printStackTrace(); 89 | } catch (InterruptedException e) { 90 | return; 91 | } 92 | } 93 | if ((b == null && b != prevData) || (b != null && !Arrays.equals(prevData, b))) { 94 | listener.exists(b); 95 | prevData = b; 96 | } 97 | } 98 | 99 | /** Other classes use the DataMonitor by implementing this method */ 100 | public interface DataMonitorListener { 101 | /** The existence status of the node has changed. */ 102 | void exists(byte data[]); 103 | 104 | /** 105 | * The ZooKeeper session is no longer valid. 106 | * 107 | * @param rc the ZooKeeper reason code 108 | */ 109 | void closing(int rc); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/main/java/org/yao/watchclient/Executor.java: -------------------------------------------------------------------------------- 1 | package org.yao.watchclient; 2 | 3 | /** 4 | * A simple example program to use DataMonitor to start and stop executables based on a znode. The 5 | * program watches the specified znode and saves the data that corresponds to the znode in the 6 | * filesystem. It also starts the specified program with the specified arguments when the znode 7 | * exists and kills the program if the znode goes away. 8 | */ 9 | import java.io.File; 10 | import java.io.FileOutputStream; 11 | import java.io.IOException; 12 | import java.io.InputStream; 13 | import java.io.OutputStream; 14 | 15 | import org.apache.zookeeper.KeeperException; 16 | import org.apache.zookeeper.WatchedEvent; 17 | import org.apache.zookeeper.Watcher; 18 | import org.apache.zookeeper.ZooKeeper; 19 | 20 | public class Executor implements Watcher, Runnable, DataMonitor.DataMonitorListener { 21 | private String znode; 22 | private DataMonitor dm; 23 | private ZooKeeper zk; 24 | private String pathname; 25 | private String exec[]; 26 | private Process child; 27 | 28 | public Executor(String hostPort, String znode, String filename, String exec[]) 29 | throws KeeperException, IOException { 30 | this.pathname = filename; 31 | this.exec = exec; 32 | zk = new ZooKeeper(hostPort, 3000, this); 33 | dm = new DataMonitor(zk, znode, this); 34 | } 35 | 36 | /** @param args */ 37 | public static void main(String[] args) { 38 | if (args.length < 4) { 39 | System.err.println("USAGE: Executor hostPort znode pathname program [args ...]"); 40 | System.exit(2); 41 | } 42 | String hostPort = args[0]; 43 | String znode = args[1]; 44 | String filename = args[2]; 45 | String exec[] = new String[args.length - 3]; 46 | System.arraycopy(args, 3, exec, 0, exec.length); 47 | try { 48 | new Executor(hostPort, znode, filename, exec).run(); 49 | } catch (Exception e) { 50 | e.printStackTrace(); 51 | } 52 | } 53 | 54 | // Watcher 55 | @Override 56 | public void process(WatchedEvent event) { 57 | dm.handle(event); 58 | } 59 | 60 | // Runnable 61 | @Override 62 | public void run() { 63 | try { 64 | synchronized (this) { 65 | while (!dm.dead) { 66 | wait(); 67 | } 68 | } 69 | } catch (InterruptedException e) { 70 | } 71 | } 72 | 73 | // DataMonitor.DataMonitorListener 74 | @Override 75 | public void closing(int rc) { 76 | synchronized (this) { 77 | notifyAll(); 78 | } 79 | } 80 | 81 | // DataMonitor.DataMonitorListener 82 | @Override 83 | public void exists(byte[] data) { 84 | if (data == null) { 85 | if (child != null) { 86 | System.out.println("Killing handle"); 87 | child.destroy(); 88 | try { 89 | child.waitFor(); 90 | } catch (InterruptedException e) { 91 | } 92 | } 93 | child = null; 94 | } else { 95 | if (child != null) { 96 | System.out.println("Stopping child"); 97 | child.destroy(); 98 | try { 99 | child.waitFor(); 100 | } catch (InterruptedException e) { 101 | e.printStackTrace(); 102 | } 103 | } 104 | try { 105 | FileOutputStream fos = new FileOutputStream(new File(pathname)); 106 | fos.write(data); 107 | fos.close(); 108 | } catch (IOException e) { 109 | e.printStackTrace(); 110 | } 111 | try { 112 | System.out.println("Starting child"); 113 | child = Runtime.getRuntime().exec(exec); 114 | new StreamWriter(child.getInputStream(), System.out); 115 | new StreamWriter(child.getErrorStream(), System.err); 116 | } catch (IOException e) { 117 | e.printStackTrace(); 118 | } 119 | } 120 | } 121 | 122 | static class StreamWriter extends Thread { 123 | OutputStream os; 124 | 125 | InputStream is; 126 | 127 | StreamWriter(InputStream is, OutputStream os) { 128 | this.is = is; 129 | this.os = os; 130 | start(); 131 | } 132 | 133 | public void run() { 134 | byte b[] = new byte[80]; 135 | int rc; 136 | try { 137 | while ((rc = is.read(b)) > 0) { 138 | os.write(b, 0, rc); 139 | } 140 | } catch (IOException e) { 141 | } 142 | } 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/main/java/org/yao/watchclient/package.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | The code in this package is adapted from 4 | ZooKeeper Java Example. The 5 | orignal version is very convoluted. So I have made some changes to make readers' life easier. 6 | 7 | -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=ERROR, stdout 2 | 3 | root.log.pattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n 4 | 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 7 | log4j.appender.stdout.layout.ConversionPattern=${root.log.pattern} 8 | -------------------------------------------------------------------------------- /src/main/resources/quorum/zoo-quorum-node1.cfg: -------------------------------------------------------------------------------- 1 | # The number of milliseconds of each tick 2 | tickTime=2000 3 | # The number of ticks that the initial 4 | # synchronization phase can take 5 | initLimit=10 6 | # The number of ticks that can pass between 7 | # sending a request and getting an acknowledgement 8 | syncLimit=5 9 | # the directory where the snapshot is stored. 10 | # do not use /tmp for storage, /tmp here is just 11 | # example sakes. 12 | dataDir=/data/zk/quorum/node1 13 | # the port at which the clients will connect 14 | clientPort=2181 15 | # the maximum number of client connections. 16 | # increase this if you need to handle more clients 17 | #maxClientCnxns=60 18 | # 19 | # Be sure to read the maintenance section of the 20 | # administrator guide before turning on autopurge. 21 | # 22 | # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance 23 | # 24 | # The number of snapshots to retain in dataDir 25 | #autopurge.snapRetainCount=3 26 | # Purge task interval in hours 27 | # Set to "0" to disable auto purge feature 28 | #autopurge.purgeInterval=1 29 | 30 | server.1=127.0.0.1:3333:3334 31 | server.2=127.0.0.1:4444:4445 32 | server.3=127.0.0.1:5555:5556 33 | -------------------------------------------------------------------------------- /src/main/resources/quorum/zoo-quorum-node2.cfg: -------------------------------------------------------------------------------- 1 | # The number of milliseconds of each tick 2 | tickTime=2000 3 | # The number of ticks that the initial 4 | # synchronization phase can take 5 | initLimit=10 6 | # The number of ticks that can pass between 7 | # sending a request and getting an acknowledgement 8 | syncLimit=5 9 | # the directory where the snapshot is stored. 10 | # do not use /tmp for storage, /tmp here is just 11 | # example sakes. 12 | dataDir=/data/zk/quorum/node2 13 | # the port at which the clients will connect 14 | clientPort=2182 15 | # the maximum number of client connections. 16 | # increase this if you need to handle more clients 17 | #maxClientCnxns=60 18 | # 19 | # Be sure to read the maintenance section of the 20 | # administrator guide before turning on autopurge. 21 | # 22 | # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance 23 | # 24 | # The number of snapshots to retain in dataDir 25 | #autopurge.snapRetainCount=3 26 | # Purge task interval in hours 27 | # Set to "0" to disable auto purge feature 28 | #autopurge.purgeInterval=1 29 | 30 | server.1=127.0.0.1:3333:3334 31 | server.2=127.0.0.1:4444:4445 32 | server.3=127.0.0.1:5555:5556 33 | -------------------------------------------------------------------------------- /src/main/resources/quorum/zoo-quorum-node3.cfg: -------------------------------------------------------------------------------- 1 | # The number of milliseconds of each tick 2 | tickTime=2000 3 | # The number of ticks that the initial 4 | # synchronization phase can take 5 | initLimit=10 6 | # The number of ticks that can pass between 7 | # sending a request and getting an acknowledgement 8 | syncLimit=5 9 | # the directory where the snapshot is stored. 10 | # do not use /tmp for storage, /tmp here is just 11 | # example sakes. 12 | dataDir=/data/zk/quorum/node3 13 | # the port at which the clients will connect 14 | clientPort=2183 15 | # the maximum number of client connections. 16 | # increase this if you need to handle more clients 17 | #maxClientCnxns=60 18 | # 19 | # Be sure to read the maintenance section of the 20 | # administrator guide before turning on autopurge. 21 | # 22 | # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance 23 | # 24 | # The number of snapshots to retain in dataDir 25 | #autopurge.snapRetainCount=3 26 | # Purge task interval in hours 27 | # Set to "0" to disable auto purge feature 28 | #autopurge.purgeInterval=1 29 | 30 | server.1=127.0.0.1:3333:3334 31 | server.2=127.0.0.1:4444:4445 32 | server.3=127.0.0.1:5555:5556 33 | -------------------------------------------------------------------------------- /src/test/java/org/yao/CuratorTests.java: -------------------------------------------------------------------------------- 1 | package org.yao; 2 | 3 | import org.apache.curator.RetryPolicy; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.CuratorFrameworkFactory; 6 | import org.apache.curator.framework.api.CuratorEvent; 7 | import org.apache.curator.retry.ExponentialBackoffRetry; 8 | import org.apache.zookeeper.CreateMode; 9 | import org.apache.zookeeper.WatchedEvent; 10 | import org.apache.zookeeper.Watcher; 11 | import org.junit.After; 12 | import org.junit.Before; 13 | import org.junit.Test; 14 | 15 | import java.util.concurrent.CountDownLatch; 16 | 17 | import static com.google.common.truth.Truth.assertThat; 18 | 19 | /** 20 | * Example code to demonstrate the usage of Curator client and framework. 21 | */ 22 | public class CuratorTests { 23 | private CuratorFramework client; 24 | private String connectString = "localhost:2181"; 25 | private RetryPolicy retryPolicy; 26 | 27 | @Before 28 | public void setUp() { 29 | retryPolicy = new ExponentialBackoffRetry(1000, 3); 30 | client = CuratorFrameworkFactory.newClient(connectString, retryPolicy); 31 | 32 | /* 33 | // Fluent style 34 | client = 35 | CuratorFrameworkFactory.builder() 36 | .connectString(connectString) 37 | .retryPolicy(retryPolicy) 38 | .build(); 39 | */ 40 | 41 | // Start client 42 | client.start(); 43 | } 44 | 45 | @After 46 | public void tearDown() { 47 | client.close(); 48 | } 49 | 50 | // create -> getData -> delete in synchronous mode 51 | @Test 52 | public void testSyncOp() throws Exception { 53 | String path = "/one"; 54 | byte[] data = {'1'}; 55 | client.create().withMode(CreateMode.PERSISTENT).forPath(path, data); 56 | 57 | byte[] actualData = client.getData().forPath(path); 58 | assertThat(data).isEqualTo(actualData); 59 | 60 | client.delete().forPath(path); 61 | 62 | client.close(); 63 | } 64 | 65 | 66 | // create -> getData -> delete in asynchronous mode 67 | @Test 68 | public void testAsyncOp() throws Exception { 69 | String path = "/two"; 70 | final byte[] data = {'2'}; 71 | final CountDownLatch latch = new CountDownLatch(1); 72 | 73 | // Use listener only for callbacks 74 | client 75 | .getCuratorListenable() 76 | .addListener( 77 | (CuratorFramework c, CuratorEvent event) -> { 78 | switch (event.getType()) { 79 | case CREATE: 80 | System.out.printf("znode '%s' created\n", event.getPath()); 81 | // 2. getData 82 | c.getData().inBackground().forPath(event.getPath()); 83 | break; 84 | case GET_DATA: 85 | System.out.printf("got the data of znode '%s'\n", event.getPath()); 86 | assertThat(event.getData()).isEqualTo(data); 87 | // 3. Delete 88 | c.delete().inBackground().forPath(path); 89 | break; 90 | case DELETE: 91 | System.out.printf("znode '%s' deleted\n", event.getPath()); 92 | latch.countDown(); 93 | break; 94 | } 95 | }); 96 | 97 | // 1. create 98 | client.create().withMode(CreateMode.PERSISTENT).inBackground().forPath(path, data); 99 | 100 | latch.await(); 101 | 102 | client.close(); 103 | } 104 | 105 | @Test 106 | public void testWatch() throws Exception { 107 | String path = "/three"; 108 | byte[] data = {'3'}; 109 | byte[] newData = {'4'}; 110 | CountDownLatch latch = new CountDownLatch(1); 111 | 112 | // Use listener only for watches 113 | client 114 | .getCuratorListenable() 115 | .addListener( 116 | (CuratorFramework c, CuratorEvent event) -> { 117 | switch (event.getType()) { 118 | case WATCHED: 119 | WatchedEvent we = event.getWatchedEvent(); 120 | System.out.println("watched event: " + we); 121 | if (we.getType() == Watcher.Event.EventType.NodeDataChanged 122 | && we.getPath().equals(path)) { 123 | // 4. watch triggered 124 | System.out.printf("got the event for the triggered watch\n"); 125 | byte[] actualData = c.getData().forPath(path); 126 | assertThat(actualData).isEqualTo(newData); 127 | } 128 | latch.countDown(); 129 | break; 130 | } 131 | }); 132 | 133 | // 1. create 134 | client.create().withMode(CreateMode.PERSISTENT).forPath(path, data); 135 | // 2. getData and register a watch 136 | byte[] actualData = client.getData().watched().forPath(path); 137 | assertThat(actualData).isEqualTo(data); 138 | 139 | // 3. setData 140 | client.setData().forPath(path, newData); 141 | latch.await(); 142 | 143 | // 5. delete 144 | client.delete().forPath(path); 145 | } 146 | 147 | @Test 148 | public void testCallbackAndWatch() throws Exception { 149 | String path = "/four"; 150 | byte[] data = {'4'}; 151 | byte[] newData = {'5'}; 152 | CountDownLatch latch = new CountDownLatch(2); 153 | 154 | // Use listener for both callbacks and watches 155 | client 156 | .getCuratorListenable() 157 | .addListener( 158 | (CuratorFramework c, CuratorEvent event) -> { 159 | switch (event.getType()) { 160 | case CREATE: 161 | // 2. callback for create 162 | System.out.printf("znode '%s' created\n", event.getPath()); 163 | // 3. getData and register a watch 164 | assertThat(client.getData().watched().forPath(path)).isEqualTo(data); 165 | // 4. setData 166 | client.setData().forPath(path, newData); 167 | latch.countDown(); 168 | break; 169 | case WATCHED: 170 | WatchedEvent we = event.getWatchedEvent(); 171 | System.out.println("watched event: " + we); 172 | if (we.getType() == Watcher.Event.EventType.NodeDataChanged 173 | && we.getPath().equals(path)) { 174 | // 5. watch triggered 175 | System.out.printf("got the event for the triggered watch\n"); 176 | assertThat(c.getData().forPath(path)).isEqualTo(newData); 177 | } 178 | latch.countDown(); 179 | break; 180 | } 181 | }); 182 | 183 | // 1. create 184 | client.create().withMode(CreateMode.PERSISTENT).inBackground().forPath(path, data); 185 | 186 | latch.await(); 187 | 188 | // 6. delete 189 | client.delete().forPath(path); 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /src/test/java/org/yao/NodeCacheTests.java: -------------------------------------------------------------------------------- 1 | package org.yao; 2 | 3 | import org.apache.curator.framework.CuratorFramework; 4 | import org.apache.curator.framework.CuratorFrameworkFactory; 5 | import org.apache.curator.framework.recipes.cache.NodeCache; 6 | import org.apache.curator.retry.RetryOneTime; 7 | import org.apache.curator.utils.CloseableUtils; 8 | import org.junit.Test; 9 | 10 | import java.util.concurrent.Semaphore; 11 | import java.util.concurrent.TimeUnit; 12 | 13 | import static com.google.common.truth.Truth.assertThat; 14 | 15 | public class NodeCacheTests { 16 | private String connectString = "localhost:2181"; 17 | 18 | @Test 19 | public void testBasics() throws Exception { 20 | NodeCache cache = null; 21 | CuratorFramework client = CuratorFrameworkFactory.newClient(connectString, new RetryOneTime(1)); 22 | client.start(); 23 | 24 | try { 25 | String basePath = "/test"; 26 | client.create().forPath(basePath); 27 | 28 | String nodePath = basePath + "/node"; 29 | cache = new NodeCache(client, nodePath); 30 | cache.start(true); 31 | 32 | final Semaphore semaphore = new Semaphore(0); 33 | 34 | cache.getListenable().addListener(() -> semaphore.release()); 35 | 36 | assertThat(cache.getCurrentData()).isNull(); 37 | 38 | String version0Data = "a"; 39 | client.create().forPath(nodePath, version0Data.getBytes()); 40 | assertThat(semaphore.tryAcquire(1, TimeUnit.SECONDS)).isTrue(); 41 | assertThat(cache.getCurrentData().getData()).isEqualTo(version0Data.getBytes()); 42 | 43 | String version1Data = "b"; 44 | client.setData().forPath(nodePath, version1Data.getBytes()); 45 | assertThat(semaphore.tryAcquire(1, TimeUnit.SECONDS)).isTrue(); 46 | assertThat(cache.getCurrentData().getData()).isEqualTo(version1Data.getBytes()); 47 | 48 | client.delete().forPath(nodePath); 49 | assertThat(semaphore.tryAcquire(1, TimeUnit.SECONDS)).isTrue(); 50 | assertThat(cache.getCurrentData()).isNull(); 51 | 52 | client.delete().forPath(basePath); 53 | } finally { 54 | CloseableUtils.closeQuietly(cache); 55 | CloseableUtils.closeQuietly(client); 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/test/java/org/yao/PathChildrenCacheTests.java: -------------------------------------------------------------------------------- 1 | package org.yao; 2 | 3 | import com.google.common.io.Closeables; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.CuratorFrameworkFactory; 6 | import org.apache.curator.framework.recipes.cache.PathChildrenCache; 7 | import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; 8 | import org.apache.curator.retry.RetryOneTime; 9 | import org.apache.curator.utils.CloseableUtils; 10 | import org.junit.Test; 11 | 12 | import static com.google.common.truth.Truth.assertThat; 13 | 14 | import java.util.concurrent.BlockingQueue; 15 | import java.util.concurrent.LinkedBlockingQueue; 16 | import java.util.concurrent.TimeUnit; 17 | 18 | public class PathChildrenCacheTests { 19 | private String connectString = "localhost:2181"; 20 | 21 | @Test 22 | public void testBasics() throws Exception { 23 | { 24 | CuratorFramework client = 25 | CuratorFrameworkFactory.newClient(connectString, new RetryOneTime(1)); 26 | PathChildrenCache cache = null; 27 | client.start(); 28 | String basePath = "/path-children-cache-tests"; 29 | String child1Path = basePath + "/one"; 30 | String child2Path = basePath + "/two"; 31 | 32 | try { 33 | client.create().forPath(basePath); 34 | 35 | final BlockingQueue events = new LinkedBlockingQueue<>(); 36 | 37 | cache = new PathChildrenCache(client, basePath, true); 38 | cache 39 | .getListenable() 40 | .addListener( 41 | (c, event) -> { 42 | if (event.getData().getPath().startsWith(basePath)) { 43 | events.offer(event.getType()); 44 | } 45 | }); 46 | cache.start(); 47 | 48 | String child1Version0Data = "hey there"; 49 | client.create().forPath(child1Path, child1Version0Data.getBytes()); 50 | assertThat(events.poll(1, TimeUnit.SECONDS)) 51 | .isEqualTo(PathChildrenCacheEvent.Type.CHILD_ADDED); 52 | 53 | String child1Version1Data = "sup!"; 54 | client.setData().forPath(child1Path, child1Version1Data.getBytes()); 55 | assertThat(events.poll(1, TimeUnit.SECONDS)) 56 | .isEqualTo(PathChildrenCacheEvent.Type.CHILD_UPDATED); 57 | assertThat(cache.getCurrentData(child1Path).getData()) 58 | .isEqualTo(child1Version1Data.getBytes()); 59 | 60 | String child2Version0Data = "foo"; 61 | client.create().forPath(child2Path, child2Version0Data.getBytes()); 62 | assertThat(events.poll(1, TimeUnit.SECONDS)) 63 | .isEqualTo(PathChildrenCacheEvent.Type.CHILD_ADDED); 64 | assertThat(cache.getCurrentData(child2Path).getData()) 65 | .isEqualTo(child2Version0Data.getBytes()); 66 | 67 | assertThat(cache.getCurrentData().size()).isEqualTo(2); 68 | 69 | client.delete().forPath(child1Path); 70 | assertThat(events.poll(1, TimeUnit.SECONDS)) 71 | .isEqualTo(PathChildrenCacheEvent.Type.CHILD_REMOVED); 72 | 73 | client.delete().deletingChildrenIfNeeded().forPath(basePath); 74 | 75 | } finally { 76 | CloseableUtils.closeQuietly(cache); 77 | CloseableUtils.closeQuietly(client); 78 | } 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/test/java/org/yao/ServiceDiscoveryTests.java: -------------------------------------------------------------------------------- 1 | package org.yao; 2 | 3 | import org.apache.curator.framework.CuratorFramework; 4 | import org.apache.curator.framework.CuratorFrameworkFactory; 5 | import org.apache.curator.retry.RetryOneTime; 6 | import org.apache.curator.utils.CloseableUtils; 7 | import org.apache.curator.x.discovery.ServiceDiscovery; 8 | import org.apache.curator.x.discovery.ServiceDiscoveryBuilder; 9 | import org.apache.curator.x.discovery.ServiceInstance; 10 | import org.apache.curator.x.discovery.ServiceProvider; 11 | import org.junit.Test; 12 | 13 | import static com.google.common.truth.Truth.assertThat; 14 | 15 | public class ServiceDiscoveryTests { 16 | private String connectString = "localhost:2181"; 17 | 18 | /** Shows the basic usage for curator-x-discovery. */ 19 | @Test 20 | public void testBasics() throws Exception { 21 | CuratorFramework client = null; 22 | ServiceDiscovery discovery = null; 23 | ServiceProvider provider = null; 24 | String serviceName = "test"; 25 | String basePath = "/services"; 26 | 27 | try { 28 | client = CuratorFrameworkFactory.newClient(connectString, new RetryOneTime(1)); 29 | client.start(); 30 | 31 | ServiceInstance instance1 = 32 | ServiceInstance.builder().payload("plant").name(serviceName).port(10064).build(); 33 | ServiceInstance instance2 = 34 | ServiceInstance.builder().payload("animal").name(serviceName).port(10065).build(); 35 | 36 | System.out.printf("instance1 id: %s\n", instance1.getId()); 37 | System.out.printf("instance2 id: %s\n", instance2.getId()); 38 | 39 | discovery = 40 | ServiceDiscoveryBuilder.builder(String.class) 41 | .basePath(basePath) 42 | .client(client) 43 | .thisInstance(instance1) 44 | .build(); 45 | discovery.start(); 46 | discovery.registerService(instance2); 47 | 48 | provider = discovery.serviceProviderBuilder().serviceName(serviceName).build(); 49 | provider.start(); 50 | 51 | assertThat(provider.getInstance().getId()).isNotEmpty(); 52 | assertThat(provider.getAllInstances()).containsExactly(instance1, instance2); 53 | 54 | client.delete().deletingChildrenIfNeeded().forPath(basePath); 55 | } finally { 56 | CloseableUtils.closeQuietly(provider); 57 | CloseableUtils.closeQuietly(discovery); 58 | CloseableUtils.closeQuietly(client); 59 | } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/test/java/org/yao/WatcherTests.java: -------------------------------------------------------------------------------- 1 | package org.yao; 2 | 3 | import org.apache.zookeeper.WatchedEvent; 4 | import org.apache.zookeeper.Watcher; 5 | import org.apache.zookeeper.ZooKeeper; 6 | import org.apache.zookeeper.data.Stat; 7 | import org.junit.Before; 8 | import org.junit.Test; 9 | 10 | import java.io.IOException; 11 | import java.util.List; 12 | import java.util.concurrent.CountDownLatch; 13 | 14 | /** 15 | * Example code to show how ZooKeeper watchers work. These test cases should run separately. And 16 | * they need to be driven by zkCli.sh. Check the test case comment for details. 17 | * 18 | * Watches registered by exists and getData only monitor changes related to znode 19 | * itself. Wathess registered by getChildren only monitor changes related to the znode's 20 | * children (not descendants). 21 | */ 22 | public class WatcherTests { 23 | private ZooKeeper zk; 24 | private volatile CountDownLatch closeLatch; 25 | 26 | @Before 27 | public void setUp() throws IOException, InterruptedException { 28 | DefaultWatcher gw = new DefaultWatcher(this); 29 | zk = new ZooKeeper("localhost", 2181, gw); 30 | // Wait for the connection establishment 31 | gw.await(); 32 | } 33 | 34 | public void setCloseLatch(int count) { 35 | closeLatch = new CountDownLatch(count); 36 | } 37 | 38 | public void countDownCloseLatch() { 39 | closeLatch.countDown(); 40 | } 41 | 42 | @Test 43 | public void testWatchChildren() throws Exception { 44 | String fatherPath = "/father"; 45 | setCloseLatch(1); 46 | List paths = zk.getChildren(fatherPath, true); 47 | System.out.printf("child paths: %s\n", paths); 48 | closeLatch.await(); 49 | zk.delete(fatherPath, -1); 50 | System.out.println("closing ZooKeeper..."); 51 | zk.close(); 52 | } 53 | 54 | @Test 55 | public void testWatchers() throws Exception { 56 | String onePath = "/one"; 57 | String twoPath = "/two"; 58 | setCloseLatch(2); 59 | 60 | // Set the default watcher 61 | Stat oneStat = zk.exists(onePath, true); 62 | System.out.printf("%s stat: %s\n", onePath, oneStat); 63 | 64 | // Set a specific watcher 65 | Stat twoStat = zk.exists(twoPath, new ExistsWatcher(this)); 66 | System.out.printf("%s stat: %s\n", twoPath, twoStat); 67 | 68 | // After creating znode /one in zkCli.sh, the default watcher should print a message. 69 | // After creating znode /two in zkCli.sh, the default watcher should print a message. 70 | closeLatch.await(); 71 | zk.delete(onePath, -1); 72 | zk.delete(twoPath, -1); 73 | System.out.println("closing ZooKeeper..."); 74 | zk.close(); 75 | } 76 | 77 | /** Two default watchers are set, but at most one get triggered. */ 78 | @Test 79 | public void testGlobalWatcherAtMostTriggerOnce() throws Exception { 80 | String path = "/three"; 81 | setCloseLatch(1); 82 | 83 | Stat oneStat; 84 | oneStat = zk.exists(path, true); 85 | System.out.printf("%s stat for the first exists: %s\n", path, oneStat); 86 | oneStat = zk.exists(path, true); 87 | System.out.printf("%s stat for the second exists: %s\n", path, oneStat); 88 | // After seeing the above output, issue create /three in zkCli. Then one message should be 89 | // printed by the default watcher. 90 | 91 | closeLatch.await(); 92 | zk.delete(path, -1); 93 | System.out.println("closing ZooKeeper..."); 94 | zk.close(); 95 | } 96 | 97 | /** Two explicit watchers are set, but at most one get triggered. */ 98 | @Test 99 | public void testExplicitWatcherAtMostTriggerOnce() throws Exception { 100 | String path = "/four"; 101 | setCloseLatch(1); 102 | 103 | Stat twoStat; 104 | twoStat = zk.exists(path, new ExistsWatcher(this)); 105 | System.out.printf("%s stat for the first exists: %s\n", path, twoStat); 106 | twoStat = zk.exists(path, new ExistsWatcher(this)); 107 | System.out.printf("%s stat for the second exists: %s\n", path, twoStat); 108 | // After seeing the above output, issue create /two in zkCli. 109 | 110 | closeLatch.await(); 111 | zk.delete(path, -1); 112 | System.out.println("closing ZooKeeper..."); 113 | zk.close(); 114 | } 115 | 116 | @Test 117 | public void testRemoveWatch() throws Exception { 118 | String path = "/five"; 119 | setCloseLatch(1); 120 | 121 | Stat fileStat = 122 | zk.exists( 123 | path, 124 | (event) -> { 125 | System.out.printf("event in exisits watcher: %s\n", event); 126 | if (event.getType() == Watcher.Event.EventType.DataWatchRemoved) { 127 | countDownCloseLatch(); 128 | return; 129 | } 130 | throw new IllegalStateException(); 131 | }); 132 | System.out.printf("%s stat : %s\n", path, fileStat); 133 | 134 | zk.removeAllWatches(path, Watcher.WatcherType.Any, false); 135 | closeLatch.await(); 136 | 137 | System.out.println("closing ZooKeeper..."); 138 | zk.close(); 139 | } 140 | } 141 | 142 | /** Default watcher. */ 143 | class DefaultWatcher implements Watcher { 144 | private CountDownLatch startLatch = new CountDownLatch(1); 145 | private WatcherTests tests; 146 | 147 | public DefaultWatcher(WatcherTests tests) { 148 | this.tests = tests; 149 | } 150 | 151 | @Override 152 | public void process(WatchedEvent event) { 153 | System.out.printf("event in default watcher: %s\n", event); 154 | if (event.getType() == Event.EventType.None 155 | && event.getState() == Event.KeeperState.SyncConnected) { 156 | startLatch.countDown(); 157 | return; 158 | } else if (event.getType() == Event.EventType.NodeCreated) { 159 | tests.countDownCloseLatch(); 160 | return; 161 | } 162 | } 163 | 164 | public void await() throws InterruptedException { 165 | startLatch.await(); 166 | } 167 | } 168 | 169 | /** Watcher for exists method. */ 170 | class ExistsWatcher implements Watcher { 171 | private WatcherTests tests; 172 | 173 | public ExistsWatcher(WatcherTests tests) { 174 | this.tests = tests; 175 | } 176 | 177 | @Override 178 | public void process(WatchedEvent event) { 179 | System.out.printf("event in exists watch: %s\n", event); 180 | Event.EventType eventType = event.getType(); 181 | if (eventType == Event.EventType.NodeCreated 182 | || eventType == Event.EventType.NodeChildrenChanged) { 183 | tests.countDownCloseLatch(); 184 | return; 185 | } 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /src/test/java/org/yao/ZooKeeperTests.java: -------------------------------------------------------------------------------- 1 | package org.yao; 2 | 3 | import com.google.common.collect.ImmutableList; 4 | import org.apache.zookeeper.AsyncCallback; 5 | import org.apache.zookeeper.CreateMode; 6 | import org.apache.zookeeper.KeeperException; 7 | import org.apache.zookeeper.Op; 8 | import org.apache.zookeeper.OpResult; 9 | import org.apache.zookeeper.Transaction; 10 | import org.apache.zookeeper.WatchedEvent; 11 | import org.apache.zookeeper.Watcher; 12 | import org.apache.zookeeper.ZooDefs; 13 | import org.apache.zookeeper.ZooKeeper; 14 | import org.junit.After; 15 | import org.junit.Before; 16 | import org.junit.Test; 17 | 18 | import java.util.List; 19 | import java.util.concurrent.CountDownLatch; 20 | import java.util.concurrent.ThreadLocalRandom; 21 | 22 | import static com.google.common.truth.Truth.assertThat; 23 | 24 | public class ZooKeeperTests { 25 | private String pathPrefix = "/multi"; 26 | private ZooKeeper zk; 27 | private CountDownLatch startLatch; 28 | private CountDownLatch closeLatch; 29 | private AsyncCallback.MultiCallback callback; 30 | 31 | private String path1 = pathPrefix + "1"; 32 | private String path2 = pathPrefix + "2"; 33 | private byte[] data1 = {0x1}; 34 | private byte[] data2 = {0x2}; 35 | 36 | @Before 37 | public void setUp() throws Exception { 38 | startLatch = new CountDownLatch(1); 39 | callback = 40 | (int rc, String path, Object ctx, List opResults) -> { 41 | assertThat(rc).isEqualTo(KeeperException.Code.OK.intValue()); 42 | System.out.printf("delete multi executed"); 43 | closeLatch.countDown(); 44 | }; 45 | zk = new ZooKeeper("localhost", 2181, new DefaultWatcher()); 46 | startLatch.await(); 47 | } 48 | 49 | @After 50 | public void tearDown() throws Exception { 51 | closeLatch.await(); 52 | zk.close(); 53 | } 54 | 55 | @Test 56 | public void testMulti() throws Exception { 57 | closeLatch = new CountDownLatch(1); 58 | 59 | // Create two znodes 60 | Op createOp1 = Op.create(path1, data1, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); 61 | Op createOp2 = Op.create(path2, data2, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); 62 | 63 | // Synchronous API 64 | zk.multi(ImmutableList.of(createOp1, createOp2)); 65 | System.out.println("create multi executed"); 66 | 67 | assertThat(zk.getData(path1, false, null)).isEqualTo(data1); 68 | assertThat(zk.getData(path2, false, null)).isEqualTo(data2); 69 | 70 | // Delete two znodes 71 | Op deleteOp1 = Op.delete(path1, -1); 72 | Op deleteOp2 = Op.delete(path2, -1); 73 | 74 | // Asynchronous API 75 | zk.multi(ImmutableList.of(deleteOp1, deleteOp2), callback, null); 76 | } 77 | 78 | @Test 79 | public void testTransaction() throws Exception { 80 | closeLatch = new CountDownLatch(1); 81 | 82 | // Create two znodes 83 | Transaction tx = zk.transaction(); 84 | tx.create(path1, data1, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); 85 | tx.create(path2, data2, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); 86 | 87 | // Synchronous API 88 | tx.commit(); 89 | System.out.println("transaction committed"); 90 | 91 | assertThat(zk.getData(path1, false, null)).isEqualTo(data1); 92 | assertThat(zk.getData(path2, false, null)).isEqualTo(data2); 93 | 94 | // Delete two znodes 95 | tx = zk.transaction(); 96 | tx.delete(path1, -1); 97 | tx.delete(path2, -1); 98 | 99 | // Asynchronous API 100 | tx.commit(callback, null); 101 | } 102 | 103 | @Test 104 | public void testTransactionWithCheck() throws Exception { 105 | closeLatch = new CountDownLatch(0); 106 | 107 | { 108 | Transaction tx = zk.transaction(); 109 | tx.create(path1, data1, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); 110 | tx.create(path2, data2, ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); 111 | tx.check(path1, 0); 112 | tx.check(path2, 0); 113 | tx.commit(); 114 | } 115 | 116 | { 117 | Transaction tx = zk.transaction(); 118 | tx.check(path1, 0); 119 | tx.check(path2, 0); 120 | tx.delete(path1, 0); 121 | tx.delete(path2, 0); 122 | tx.commit(); 123 | } 124 | } 125 | 126 | 127 | /** 128 | * getChildren does not list descendants recursively. 129 | */ 130 | @Test 131 | public void testGetChilren() throws Exception { 132 | closeLatch = new CountDownLatch(0); 133 | List paths = zk.getChildren("/a", false); 134 | System.out.printf("child paths: %s\n", paths); 135 | } 136 | 137 | class DefaultWatcher implements Watcher { 138 | @Override 139 | public void process(WatchedEvent event) { 140 | if (event.getType() == Event.EventType.None 141 | && event.getState() == Event.KeeperState.SyncConnected) { 142 | System.out.println("zookeeper client connected"); 143 | startLatch.countDown(); 144 | } 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/test/java/org/yao/jute/JuteTests.java: -------------------------------------------------------------------------------- 1 | package org.yao.jute; 2 | 3 | import com.google.common.base.MoreObjects; 4 | import org.apache.jute.BinaryInputArchive; 5 | import org.apache.jute.BinaryOutputArchive; 6 | import org.apache.jute.Index; 7 | import org.apache.jute.InputArchive; 8 | import org.apache.jute.OutputArchive; 9 | import org.apache.jute.Record; 10 | import org.junit.Test; 11 | 12 | import java.io.File; 13 | import java.io.FileInputStream; 14 | import java.io.FileOutputStream; 15 | import java.io.IOException; 16 | import java.io.OutputStream; 17 | import java.nio.file.Files; 18 | import java.nio.file.Paths; 19 | import java.util.TreeMap; 20 | 21 | /** BinaryOutputArchive and BinaryInputArchive ignore the tag argument in all methods. */ 22 | public class JuteTests { 23 | private String pathname = "jute-data"; 24 | 25 | @Test 26 | public void testSerDe() throws Exception { 27 | Files.deleteIfExists(Paths.get(pathname)); 28 | serialize(); 29 | deserialize(); 30 | } 31 | 32 | private void serialize() throws Exception { 33 | try (OutputStream os = new FileOutputStream(new File(pathname)); ) { 34 | BinaryOutputArchive oa = BinaryOutputArchive.getArchive(os); 35 | 36 | // Primitive types 37 | oa.writeBool(true, "boolean"); 38 | oa.writeInt(1024, "int"); 39 | oa.writeString("yao", "string"); 40 | 41 | // Records 42 | Student xiaoMing = new Student(2, "xiaoMing"); 43 | oa.writeRecord(xiaoMing, "xiaoMing"); 44 | 45 | // TreeMap 46 | TreeMap map = new TreeMap<>(); 47 | map.put("one", 1); 48 | map.put("two", 2); 49 | oa.startMap(map, "map"); 50 | int i = 1; 51 | for (String key : map.keySet()) { 52 | String tag = i + ""; 53 | oa.writeString(key, tag); 54 | oa.writeInt(map.get(key), tag); 55 | i++; 56 | } 57 | oa.endMap(map, "map"); 58 | } 59 | } 60 | 61 | private void deserialize() throws Exception { 62 | try (FileInputStream is = new FileInputStream(new File(pathname)); ) { 63 | BinaryInputArchive ia = BinaryInputArchive.getArchive(is); 64 | System.out.printf("boolean: %b\n", ia.readBool("boolean")); 65 | System.out.printf("int: %d\n", ia.readInt("int")); 66 | System.out.printf("string: %s\n", ia.readString("string")); 67 | 68 | Student xiaoMing = new Student(); 69 | ia.readRecord(xiaoMing, "xiaoMing"); 70 | System.out.printf("xiaoMing: %s\n", xiaoMing); 71 | 72 | Index index = ia.startMap("map"); 73 | int i = 1; 74 | while (!index.done()) { 75 | String tag = i + ""; 76 | System.out.printf("key: %s, value: %d\n", ia.readString(tag), ia.readInt(tag)); 77 | index.incr(); 78 | i++; 79 | } 80 | } 81 | } 82 | } 83 | 84 | class Student implements Record { 85 | private int grade; 86 | private String name; 87 | 88 | public Student() {} 89 | 90 | public Student(int grade, String name) { 91 | this.grade = grade; 92 | this.name = name; 93 | } 94 | 95 | @Override 96 | public void serialize(OutputArchive oa, String tag) throws IOException { 97 | oa.startRecord(this, tag); 98 | oa.writeInt(grade, "grade"); 99 | oa.writeString(name, "name"); 100 | oa.endRecord(this, tag); 101 | } 102 | 103 | @Override 104 | public void deserialize(InputArchive ia, String tag) throws IOException { 105 | ia.startRecord(tag); 106 | grade = ia.readInt("grade"); 107 | name = ia.readString("name"); 108 | ia.endRecord(tag); 109 | } 110 | 111 | @Override 112 | public String toString() { 113 | return MoreObjects.toStringHelper(this).add("grade", grade).add("name", name).toString(); 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/test/java/org/yao/net/InetAddressTests.java: -------------------------------------------------------------------------------- 1 | package org.yao.net; 2 | 3 | import org.junit.Test; 4 | 5 | import java.net.InetAddress; 6 | 7 | 8 | import static com.google.common.truth.Truth.assertThat; 9 | 10 | public class InetAddressTests { 11 | 12 | @Test 13 | public void testGetByName() throws Exception { 14 | InetAddress addr1 = InetAddress.getByName("www.baidu.com"); 15 | // Output: www.baidu.com/61.135.169.121 16 | System.out.println(addr1); 17 | 18 | // etc/hosts: 19 | // 47.95.28.76 leci 20 | InetAddress addr2 = InetAddress.getByName("47.95.28.76"); 21 | 22 | String hostName = addr2.getHostName(); 23 | // Output: leci 24 | System.out.println(hostName); 25 | // Output: leci/47.95.28.76 26 | System.out.println(addr2); 27 | } 28 | 29 | @Test 30 | public void testGetAllByName() throws Exception { 31 | InetAddress[] addrs = InetAddress.getAllByName("www.baidu.com"); 32 | // Output: 33 | // www.baidu.com/61.135.169.121 34 | // www.baidu.com/61.135.169.125 35 | for (InetAddress addr : addrs) System.out.println(addr); 36 | } 37 | 38 | @Test 39 | public void testGetAddress() throws Exception { 40 | InetAddress addr = InetAddress.getByName("192.168.1.9"); 41 | byte[] expected = {(byte)192, (byte)168, 1, 9}; 42 | assertThat(addr.getAddress()).isEqualTo(expected); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/test/java/org/yao/netty/BufTests.java: -------------------------------------------------------------------------------- 1 | package org.yao.netty; 2 | 3 | public class BufTests { 4 | } 5 | -------------------------------------------------------------------------------- /src/test/java/org/yao/nio/BufferTests.java: -------------------------------------------------------------------------------- 1 | package org.yao.nio; 2 | 3 | import org.junit.Before; 4 | import org.junit.Test; 5 | 6 | import java.io.FileInputStream; 7 | import java.io.FileOutputStream; 8 | import java.nio.ByteBuffer; 9 | import java.nio.channels.FileChannel; 10 | import java.nio.file.Files; 11 | import java.nio.file.Paths; 12 | 13 | import static com.google.common.truth.Truth.assertThat; 14 | 15 | public class BufferTests { 16 | private int CAP = 10; 17 | private ByteBuffer buf; 18 | private byte[] chars = {'h', 'e', 'l', 'l', 'o'}; 19 | private String pathname = "channel-data"; 20 | 21 | @Before 22 | public void setUp() { 23 | buf = ByteBuffer.allocate(CAP); 24 | verifyBufferState(buf, 0, 10); 25 | 26 | // Put 27 | for (byte ch : chars) { 28 | buf.put(ch); 29 | } 30 | verifyBufferState(buf, 5, 10); 31 | } 32 | 33 | @Test 34 | public void testBasics() { 35 | // Get 36 | buf.flip(); 37 | verifyGet(buf); 38 | 39 | // Get again 40 | buf.rewind(); 41 | verifyGet(buf); 42 | 43 | buf.clear(); 44 | verifyBufferState(buf, 0, 10); 45 | } 46 | 47 | @Test 48 | public void testWrap() { 49 | byte[] world = {'w', 'o', 'r', 'l', 'd'}; 50 | ByteBuffer buf = ByteBuffer.wrap(world); 51 | verifyBufferState(buf, 0, 5); 52 | } 53 | 54 | @Test 55 | public void testDuplicate() { 56 | buf.flip(); 57 | ByteBuffer clone = buf.duplicate(); 58 | verifyGet(buf); 59 | verifyBufferState(clone, 0, 5); 60 | verifyGet(clone); 61 | } 62 | 63 | @Test 64 | public void testWithChannel() throws Exception { 65 | Files.deleteIfExists(Paths.get(pathname)); 66 | try (FileChannel channel = new FileOutputStream(pathname).getChannel()) { 67 | buf.flip(); 68 | verifyBufferState(buf, 0, 5); 69 | // Channel write is equivalent to a series of gets. 70 | channel.write(buf); 71 | verifyBufferState(buf, 5, 5); 72 | } 73 | 74 | ByteBuffer buf2 = ByteBuffer.allocate(CAP); 75 | try (FileChannel channel = new FileInputStream(pathname).getChannel()) { 76 | // Channel read is equivalent to a series of puts. 77 | channel.read(buf2); 78 | verifyBufferState(buf2, 5, 10); 79 | buf2.flip(); 80 | verifyBufferState(buf2, 0, 5); 81 | verifyGet(buf2); 82 | } 83 | } 84 | 85 | private void verifyGet(ByteBuffer buffer) { 86 | int i = 0; 87 | verifyBufferState(buffer, 0, 5); 88 | while (buffer.hasRemaining()) { 89 | byte ch = buffer.get(); 90 | assertThat(ch).isEqualTo(chars[i++]); 91 | } 92 | verifyBufferState(buffer, 5, 5); 93 | } 94 | 95 | private void verifyBufferState(ByteBuffer buf, int position, int limit) { 96 | assertThat(buf.position()).isEqualTo(position); 97 | assertThat(buf.limit()).isEqualTo(limit); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/test/java/org/yao/package.html: -------------------------------------------------------------------------------- 1 | 2 | Some code is based on Apache Curator's test cases. For all the test casess, an external ZooKeeper is 3 | used. 4 | --------------------------------------------------------------------------------