├── .gitignore ├── README.md ├── Screenshots ├── hazelcast_management_center.png ├── hazelcast_management_center_idgenerator.png └── hazelcast_management_center_map.png ├── distributed-hazelcast-lock-example ├── README.md ├── docker-compose.yaml ├── hazelcast.xml ├── pom.xml └── src │ ├── main │ ├── java │ │ └── net │ │ │ └── ameizi │ │ │ └── distributed │ │ │ └── hazelcast │ │ │ └── example │ │ │ ├── DistributedHazelcastLockExampleApplication.java │ │ │ ├── LockController.java │ │ │ └── config │ │ │ └── HazelcastConfiguration.java │ └── resources │ │ └── application.properties │ └── test │ └── java │ └── net │ └── ameizi │ └── distributed │ └── hazelcast │ └── example │ ├── FlakeIdGeneratorSample.java │ └── HazelcastClientTest.java ├── distributed-redis-lock-example ├── pom.xml └── src │ └── main │ ├── java │ └── net │ │ └── ameizi │ │ └── distributed │ │ └── lock │ │ └── redis │ │ └── example │ │ └── DistributedRedisLockExampleApplication.java │ └── resources │ ├── application.properties │ ├── redisson-cluster.yaml │ ├── redisson-master-slave.yaml │ ├── redisson-sentinel.yaml │ └── redisson-single.yaml ├── distributed-zookeeper-lock-example ├── pom.xml └── src │ ├── main │ ├── java │ │ └── net │ │ │ └── ameizi │ │ │ └── distributed │ │ │ └── lock │ │ │ └── zookeeper │ │ │ └── example │ │ │ ├── DistributedZookeeperLockExampleApplication.java │ │ │ └── config │ │ │ ├── ZookeeperConfig.java │ │ │ └── ZookeeperProperties.java │ └── resources │ │ └── application.properties │ └── test │ └── java │ └── net │ └── ameizi │ ├── distributed │ └── lock │ │ └── zookeeper │ │ └── example │ │ ├── BlockingLockTest.java │ │ ├── DistributedLockTest.java │ │ └── NonBlockingLockTest.java │ └── zookeeper │ └── leader │ └── examples │ ├── LeaderLatchTest.java │ ├── LeaderSelectorListener.java │ └── LeaderSelectorTest.java ├── pom.xml └── spring-integration-distributed-lock-examples ├── pom.xml └── src └── main ├── java └── spring │ └── integration │ └── distributed │ └── lock │ └── examples │ ├── DistributedLockRegistryApplication.java │ └── config │ ├── JdbcConfiguration.java │ ├── RedisLockConfiguration.java │ └── ZookeeperLockConfiguration.java └── resources └── application.properties /.gitignore: -------------------------------------------------------------------------------- 1 | HELP.md 2 | target/ 3 | !.mvn/wrapper/maven-wrapper.jar 4 | !**/src/main/** 5 | !**/src/test/** 6 | 7 | ### STS ### 8 | .apt_generated 9 | .classpath 10 | .factorypath 11 | .project 12 | .settings 13 | .springBeans 14 | .sts4-cache 15 | 16 | ### IntelliJ IDEA ### 17 | .idea 18 | *.iws 19 | *.iml 20 | *.ipr 21 | 22 | ### NetBeans ### 23 | /nbproject/private/ 24 | /nbbuild/ 25 | /dist/ 26 | /nbdist/ 27 | /.nb-gradle/ 28 | build/ 29 | 30 | ### VS Code ### 31 | .vscode/ 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # distributed-lock-examples 2 | 3 | 史上最全的分布式锁合辑(我们不造轮子,只需用好轮子!) 4 | 5 | ``` 6 | distributed-lock-examples 7 | ├── README.md 8 | ├── Screenshots 9 | │ ├── hazelcast_management_center.png 10 | │ ├── hazelcast_management_center_idgenerator.png 11 | │ └── hazelcast_management_center_map.png 12 | ├── distributed-consul-lock-example 13 | │ ├── distributed-consul-lock-example.iml 14 | │ ├── pom.xml 15 | │ └── src 16 | │ └── main 17 | │ ├── java 18 | │ │ └── net 19 | │ │ └── ameizi 20 | │ │ └── distributed 21 | │ │ └── consul 22 | │ │ └── example 23 | │ │ └── DistributedConsulLockExampleApplication.java 24 | │ └── resources 25 | │ └── application.properties 26 | ├── distributed-hazelcast-lock-example 27 | │ ├── README.md 28 | │ ├── distributed-hazelcast-lock-example.iml 29 | │ ├── docker-compose.yaml 30 | │ ├── hazelcast.xml 31 | │ ├── pom.xml 32 | │ └── src 33 | │ ├── main 34 | │ │ ├── java 35 | │ │ │ └── net 36 | │ │ │ └── ameizi 37 | │ │ │ └── distributed 38 | │ │ │ └── hazelcast 39 | │ │ │ └── example 40 | │ │ │ ├── DistributedHazelcastLockExampleApplication.java 41 | │ │ │ ├── LockController.java 42 | │ │ │ └── config 43 | │ │ │ └── HazelcastConfiguration.java 44 | │ │ └── resources 45 | │ │ └── application.properties 46 | │ └── test 47 | │ ├── java 48 | │ │ └── net 49 | │ │ └── ameizi 50 | │ │ └── distributed 51 | │ │ └── hazelcast 52 | │ │ └── example 53 | │ │ ├── FlakeIdGeneratorSample.java 54 | │ │ └── HazelcastClientTest.java 55 | │ └── resources 56 | ├── distributed-redis-lock-example 57 | │ ├── distributed-redis-lock-example.iml 58 | │ ├── pom.xml 59 | │ └── src 60 | │ └── main 61 | │ ├── java 62 | │ │ └── net 63 | │ │ └── ameizi 64 | │ │ └── distributed 65 | │ │ └── lock 66 | │ │ └── redis 67 | │ │ └── example 68 | │ │ └── DistributedRedisLockExampleApplication.java 69 | │ └── resources 70 | │ ├── application.properties 71 | │ ├── redisson-cluster.yaml 72 | │ ├── redisson-master-slave.yaml 73 | │ ├── redisson-sentinel.yaml 74 | │ └── redisson-single.yaml 75 | ├── distributed-zookeeper-lock-example 76 | │ ├── distributed-zookeeper-lock-example.iml 77 | │ ├── pom.xml 78 | │ └── src 79 | │ ├── main 80 | │ │ ├── java 81 | │ │ │ └── net 82 | │ │ │ └── ameizi 83 | │ │ │ └── distributed 84 | │ │ │ └── lock 85 | │ │ │ └── zookeeper 86 | │ │ │ └── example 87 | │ │ │ ├── DistributedZookeeperLockExampleApplication.java 88 | │ │ │ └── config 89 | │ │ │ ├── ZookeeperConfig.java 90 | │ │ │ └── ZookeeperProperties.java 91 | │ │ └── resources 92 | │ │ ├── META-INF 93 | │ │ └── application.properties 94 | │ └── test 95 | │ ├── java 96 | │ │ └── net 97 | │ │ └── ameizi 98 | │ │ └── distributed 99 | │ │ └── lock 100 | │ │ └── zookeeper 101 | │ │ └── example 102 | │ │ ├── BlockingLockTest.java 103 | │ │ ├── DistributedLockTest.java 104 | │ │ └── NonBlockingLockTest.java 105 | │ └── resources 106 | ├── pom.xml 107 | └── spring-integration-distributed-lock-examples 108 | ├── pom.xml 109 | ├── spring-integration-distributed-lock-examples.iml 110 | └── src 111 | └── main 112 | ├── java 113 | │ └── spring 114 | │ └── integration 115 | │ └── distributed 116 | │ └── lock 117 | │ └── examples 118 | │ ├── DistributedLockRegistryApplication.java 119 | │ └── config 120 | │ ├── JdbcConfiguration.java 121 | │ ├── RedisLockConfiguration.java 122 | │ └── ZookeeperLockConfiguration.java 123 | └── resources 124 | └── application.properties 125 | ``` 126 | 127 | 注:为学习实验方便,示例代码中的zookeeper、redis、jdbc、hazelcast均使用本地嵌入式,实际应用应使用独立部署的服务。 128 | 129 | * distributed-hazelcast-lock-example(hazelcast实现分布式锁及分布式id) 130 | 131 | 修改`docker-compose.yaml`和`hazelcast.xml`中宿主机的IP 地址后执行`docker-compose up -d`即可启动一个三个节点的hazelcast集群。 132 | 133 | 浏览地址栏访问`http://localhost:8080/hazelcast-mancenter`可访问`hazelcast`的控制台 134 | 135 | 三台子节点信息 136 | 137 | ![](Screenshots/hazelcast_management_center.png) 138 | 139 | 分布式 id 140 | 141 | ![](Screenshots/hazelcast_management_center_idgenerator.png) 142 | 143 | map 144 | 145 | ![](Screenshots/hazelcast_management_center_map.png) 146 | 147 | * distributed-redis-lock-example(redis 实现分布式锁) 148 | 149 | * distributed-zookeeper-lock-example(zookeeper 实现分布式锁) 150 | 151 | * spring-integration-distributed-lock-examples(spring-integration 实现分布式锁) 分别使用ZookeeperLockRegistry、RedisLockRegistry、JdbcLockRegistry实现分布式锁 152 | 153 | -------------------------------------------------------------------------------- /Screenshots/hazelcast_management_center.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/v5tech/distributed-lock-examples/c514db77447a5abb85784a2329464ab84cf0b4bc/Screenshots/hazelcast_management_center.png -------------------------------------------------------------------------------- /Screenshots/hazelcast_management_center_idgenerator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/v5tech/distributed-lock-examples/c514db77447a5abb85784a2329464ab84cf0b4bc/Screenshots/hazelcast_management_center_idgenerator.png -------------------------------------------------------------------------------- /Screenshots/hazelcast_management_center_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/v5tech/distributed-lock-examples/c514db77447a5abb85784a2329464ab84cf0b4bc/Screenshots/hazelcast_management_center_map.png -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/README.md: -------------------------------------------------------------------------------- 1 | # Hazelcast 2 | 3 | Docker部署Hazelcast 4 | 5 | ```bash 6 | $ docker run -e JAVA_OPTS="-Xms512M -Xmx1024M" -p 5701:5701 hazelcast/hazelcast:3.12.7 7 | $ docker run -e JAVA_OPTS="-Xms512M -Xmx1024M" -p 5702:5701 hazelcast/hazelcast:3.12.7 8 | $ docker run -e JAVA_OPTS="-Xms512M -Xmx1024M" -p 5703:5701 hazelcast/hazelcast:3.12.7 9 | ``` 10 | 11 | 正确姿势应该是这样的 12 | 13 | ```bash 14 | $ docker run -d -e JAVA_OPTS="-Dhazelcast.local.publicAddress=172.24.202.128:5701 -Dhazelcast.rest.enabled=true -Xms128M -Xmx256M" -e MANCENTER_URL="http://172.24.202.128:8080/hazelcast-mancenter" -p 5701:5701 hazelcast/hazelcast:3.12.7 15 | $ docker run -d -e JAVA_OPTS="-Dhazelcast.local.publicAddress=172.24.202.128:5702 -Dhazelcast.rest.enabled=true -Xms128M -Xmx256M" -e MANCENTER_URL="http://172.24.202.128:8080/hazelcast-mancenter" -p 5702:5701 hazelcast/hazelcast:3.12.7 16 | $ docker run -d -e JAVA_OPTS="-Dhazelcast.local.publicAddress=172.24.202.128:5703 -Dhazelcast.rest.enabled=true -Xms128M -Xmx256M" -e MANCENTER_URL="http://172.24.202.128:8080/hazelcast-mancenter" -p 5703:5701 hazelcast/hazelcast:3.12.7 17 | ``` 18 | 19 | Hazelcast management-center 20 | 21 | 安装配置管理节点,监控和实时查看缓存情况 22 | 23 | ```bash 24 | $ docker run 25 | -m 512m \ 26 | -p 8080:8080 \ 27 | --rm \ 28 | hazelcast/management-center:3.12.9 29 | ``` 30 | 31 | ### Hazelcast镜像单节点部署 32 | 33 | ```bash 34 | docker run -d -e JAVA_OPTS="-Dhazelcast.local.publicAddress=172.24.202.128:5701 -Dhazelcast.rest.enabled=true -Xms128M -Xmx256M" -p 5701:5701 hazelcast/hazelcast:3.12.7 35 | ``` 36 | 37 | 注意: 38 | 39 | * hazelcast.rest.enabled=true,需要开启,不然管理节点连不上 40 | * docker需要后台启动服务-d,内部端口为5701 41 | * 最好指定publicAddress且需要设置JVM大小 42 | 43 | ### Hazelcast镜像多节点multicast集群部署 44 | 45 | 节点1: 46 | 47 | ```bash 48 | docker run -d -e JAVA_OPTS="-Dhazelcast.local.publicAddress=172.24.202.128:5701 -Dhazelcast.rest.enabled=true -Xms128M -Xmx256M" -e MANCENTER_URL="http://127.0.0.1:8080/hazelcast-mancenter" -p 5701:5701 hazelcast/hazelcast:3.12.7 49 | ``` 50 | 51 | 节点2: 52 | ```bash 53 | docker run -d -e JAVA_OPTS="-Dhazelcast.local.publicAddress=172.24.202.129:5701 -Dhazelcast.rest.enabled=true -Xms128M -Xmx256M" -e MANCENTER_URL="http://127.0.0.1:8080/hazelcast-mancenter" -p 5701:5701 hazelcast/hazelcast:3.12.7 54 | ``` 55 | 56 | 注意: 57 | 58 | * 指定MANCENTER_URL管理节点地址 59 | * multicast广播必须为同一台集群,因为docker下广播必须本机容器才能连接 60 | 61 | ### Hazelcast镜像多节点TCP-IP集群部署 62 | 63 | hazelcast.xml配置: 64 | 65 | ```xml 66 | http://127.0.0.1:8080/hazelcast-mancenter 67 | 5701 68 | 69 | 224.2.2.3 70 | 54327 71 | 72 | 73 | 172.24.202.120-129 74 | 75 | 172.24.202.128 76 | 172.24.202.129 77 | 78 | 79 | ``` 80 | 81 | 节点1 82 | 83 | ```bash 84 | docker run -d -e JAVA_OPTS="-Dhazelcast.config=/opt/hazelcast/config_ext/hazelcast.xml -Dhazelcast.local.publicAddress=172.24.202.128:5701 -Dhazelcast.rest.enabled=true -Xms128M -Xmx512M" -v ./config:/opt/hazelcast/config_ext -p 5701:5701 hazelcast/hazelcast:3.12.7 85 | ``` 86 | 87 | 节点2 88 | 89 | ```bash 90 | docker run -d -e JAVA_OPTS="-Dhazelcast.config=/opt/hazelcast/config_ext/hazelcast.xml -Dhazelcast.local.publicAddress=172.24.202.129:5701 -Dhazelcast.rest.enabled=true -Xms128M -Xmx512M" -v ./config:/opt/hazelcast/config_ext -p 5701:5701 hazelcast/hazelcast:3.12.7 91 | ``` 92 | 93 | 输出 94 | 95 | ``` 96 | Members {size:2, ver:2} [ 97 | Member [172.24.202.128]:5701 - cd40d155-d993-46a5-b07c-19f001c71f3c 98 | Member [172.24.202.129]:5701 - 34d0798c-37d0-42e8-88f0-1268eab9a90a this 99 | ] 100 | ``` 101 | 102 | 注意: 103 | 104 | * 端口自增限制为10,即每个机器端口限制为5701-5711,以提高发现效率 105 | * multicast关闭,tcp-ip开启 106 | * 限制interface,指定ip范围 107 | * 指定member-list,指定集群成员 108 | * 指定宿主机配置文件地址 109 | 110 | 111 | 快速在本地启动一个三个节点的hazelcast集群 112 | 113 | ```bash 114 | docker run \ 115 | -d \ 116 | -e \ 117 | JAVA_OPTS="-Dhazelcast.config=/opt/hazelcast/config_ext/hazelcast.xml -Dhazelcast.local.publicAddress=172.24.202.128:5701 -Dhazelcast.rest.enabled=true -Xms128M -Xmx512M" \ 118 | -v $(pwd)/hazelcast.xml:/opt/hazelcast/config_ext/hazelcast.xml \ 119 | -p 5701:5701 \ 120 | hazelcast/hazelcast:3.12.7 121 | ``` 122 | 123 | 124 | ```bash 125 | docker run \ 126 | -d \ 127 | -e \ 128 | JAVA_OPTS="-Dhazelcast.config=/opt/hazelcast/config_ext/hazelcast.xml -Dhazelcast.local.publicAddress=172.24.202.128:5702 -Dhazelcast.rest.enabled=true -Xms128M -Xmx512M" \ 129 | -v $(pwd)/hazelcast.xml:/opt/hazelcast/config_ext/hazelcast.xml \ 130 | -p 5702:5701 \ 131 | hazelcast/hazelcast:3.12.7 132 | ``` 133 | 134 | 135 | ```bash 136 | docker run \ 137 | -d \ 138 | -e \ 139 | JAVA_OPTS="-Dhazelcast.config=/opt/hazelcast/config_ext/hazelcast.xml -Dhazelcast.local.publicAddress=172.24.202.128:5703 -Dhazelcast.rest.enabled=true -Xms128M -Xmx512M" \ 140 | -v $(pwd)/hazelcast.xml:/opt/hazelcast/config_ext/hazelcast.xml \ 141 | -p 5703:5701 \ 142 | hazelcast/hazelcast:3.12.7 143 | ``` 144 | 145 | 146 | 配置 147 | 148 | 登录management-center 149 | 150 | 打开地址:http://127.0.0.1:8080/hazelcast-mancenter 151 | 152 | 第一次设置初始化账户和密码,密码有格式要求 153 | 154 | 添加成员节点,即Change URL 155 | 156 | 输入Cluster Name and Password,即集群名和集群密码 157 | 158 | 输入Server UR,为Management Center URL,即管理系统地址;例如http://127.0.0.1:8080/hazelcast-mancenter 159 | 160 | 测试连接 161 | 162 | 引入包pom.xml 163 | 164 | ```xml 165 | 166 | com.hazelcast 167 | hazelcast 168 | 3.12.7 169 | 170 | 171 | com.hazelcast 172 | hazelcast-spring 173 | 3.12.7 174 | 175 | 176 | com.hazelcast 177 | hazelcast-client 178 | 3.12.7 179 | 180 | ``` 181 | 182 | client代码 183 | 184 | ```java 185 | @Slf4j 186 | public class HazelcastTest { 187 | public static void main(String[] args) { 188 | ClientConfig clientConfig = new ClientConfig(); 189 | //集群组名称 190 | clientConfig.getGroupConfig().setName("dev"); 191 | //节点地址 192 | clientConfig.getNetworkConfig().addAddress("172.24.202.128:5701", "172.24.202.129:5701"); 193 | //客户端 194 | HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig); 195 | } 196 | } 197 | ``` -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.6" 2 | 3 | services: 4 | management-center: 5 | container_name: management-center 6 | image: hazelcast/management-center:3.12.9 7 | ports: 8 | - 8080:8080 9 | 10 | hazelcast1: 11 | container_name: hazelcast1 12 | image: hazelcast/hazelcast:3.12.7 13 | depends_on: 14 | - management-center 15 | ports: 16 | - 5701:5701 17 | volumes: 18 | - ./hazelcast.xml:/opt/hazelcast/config_ext/hazelcast.xml 19 | environment: 20 | - JAVA_OPTS=-Dhazelcast.config=/opt/hazelcast/config_ext/hazelcast.xml -Dhazelcast.local.publicAddress=172.24.202.121:5701 -Dhazelcast.rest.enabled=true -Xms128M -Xmx512M 21 | 22 | hazelcast2: 23 | container_name: hazelcast2 24 | image: hazelcast/hazelcast:3.12.7 25 | depends_on: 26 | - management-center 27 | ports: 28 | - 5702:5701 29 | volumes: 30 | - ./hazelcast.xml:/opt/hazelcast/config_ext/hazelcast.xml 31 | environment: 32 | - JAVA_OPTS=-Dhazelcast.config=/opt/hazelcast/config_ext/hazelcast.xml -Dhazelcast.local.publicAddress=172.24.202.121:5702 -Dhazelcast.rest.enabled=true -Xms128M -Xmx512M 33 | 34 | hazelcast3: 35 | container_name: hazelcast3 36 | image: hazelcast/hazelcast:3.12.7 37 | depends_on: 38 | - management-center 39 | ports: 40 | - 5703:5701 41 | volumes: 42 | - ./hazelcast.xml:/opt/hazelcast/config_ext/hazelcast.xml 43 | environment: 44 | - JAVA_OPTS=-Dhazelcast.config=/opt/hazelcast/config_ext/hazelcast.xml -Dhazelcast.local.publicAddress=172.24.202.121:5703 -Dhazelcast.rest.enabled=true -Xms128M -Xmx512M 45 | 46 | -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/hazelcast.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 8 | dev 9 | dev-pass 10 | 11 | 12 | 13 | http://172.24.202.121:8080/hazelcast-mancenter 14 | 15 | 16 | 17 | 3 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4.0.0 4 | 5 | net.ameizi 6 | distributed-lock-examples 7 | 1.0 8 | 9 | distributed-hazelcast-lock-example 10 | jar 11 | distributed-hazelcast-lock-example 12 | Demo project for Spring Boot 13 | 14 | 1.8 15 | 16 | 17 | 18 | org.projectlombok 19 | lombok 20 | true 21 | 22 | 23 | org.springframework.boot 24 | spring-boot-starter-web 25 | 26 | 27 | com.hazelcast 28 | hazelcast 29 | 30 | 31 | com.hazelcast 32 | hazelcast-client 33 | 34 | 35 | com.hazelcast 36 | hazelcast-spring 37 | 38 | 39 | junit 40 | junit 41 | 4.12 42 | 43 | 44 | 45 | 46 | 47 | org.apache.maven.plugins 48 | maven-compiler-plugin 49 | 50 | 1.8 51 | 1.8 52 | UTF-8 53 | 54 | 55 | 56 | org.springframework.boot 57 | spring-boot-maven-plugin 58 | 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/src/main/java/net/ameizi/distributed/hazelcast/example/DistributedHazelcastLockExampleApplication.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.hazelcast.example; 2 | 3 | import com.hazelcast.core.HazelcastInstance; 4 | import com.hazelcast.cp.lock.FencedLock; 5 | import lombok.extern.slf4j.Slf4j; 6 | import org.junit.Assert; 7 | import org.springframework.boot.SpringApplication; 8 | import org.springframework.boot.autoconfigure.SpringBootApplication; 9 | import org.springframework.web.bind.annotation.GetMapping; 10 | import org.springframework.web.bind.annotation.RestController; 11 | 12 | import javax.annotation.Resource; 13 | 14 | @Slf4j 15 | @RestController 16 | @SpringBootApplication 17 | public class DistributedHazelcastLockExampleApplication { 18 | 19 | @Resource(name="hazelcastInstance1") 20 | private HazelcastInstance hz1; 21 | @Resource(name="hazelcastInstance2") 22 | private HazelcastInstance hz2; 23 | @Resource(name="hazelcastInstance3") 24 | private HazelcastInstance hz3; 25 | 26 | public static void main(String[] args) { 27 | SpringApplication.run(DistributedHazelcastLockExampleApplication.class, args); 28 | } 29 | 30 | @GetMapping("/reentrant-lock") 31 | public void reentrantlock() { 32 | FencedLock hz1Lock = hz1.getCPSubsystem().getLock("LOCK"); 33 | FencedLock hz2Lock = hz2.getCPSubsystem().getLock("LOCK"); 34 | 35 | hz1Lock.lock(); 36 | hz1Lock.lock(); 37 | 38 | boolean b = hz2Lock.tryLock(); 39 | Assert.assertFalse(b); 40 | 41 | hz1Lock.unlock(); 42 | hz1Lock.unlock(); 43 | 44 | b = hz2Lock.tryLock(); 45 | Assert.assertTrue(b); 46 | hz2Lock.unlock(); 47 | } 48 | 49 | @GetMapping("/lock") 50 | public void lock() { 51 | // 默认情况下FencedLock是可重入锁,可以通过FencedLockConfig设置为非重入锁 52 | FencedLock hz1Lock = hz1.getCPSubsystem().getLock("LOCK"); 53 | FencedLock hz2Lock = hz2.getCPSubsystem().getLock("LOCK"); 54 | 55 | hz1Lock.lock(); 56 | // 再次加锁会报错 com.hazelcast.cp.lock.exception.LockAcquireLimitReachedException: Lock[LOCK] reentrant lock limit is already reached!] 57 | // hz1Lock.lock(); 58 | 59 | boolean b = hz2Lock.tryLock(); 60 | Assert.assertFalse(b); 61 | 62 | hz1Lock.unlock(); 63 | 64 | b = hz2Lock.tryLock(); 65 | Assert.assertTrue(b); 66 | hz2Lock.unlock(); 67 | } 68 | 69 | } 70 | -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/src/main/java/net/ameizi/distributed/hazelcast/example/LockController.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.hazelcast.example; 2 | 3 | import com.hazelcast.core.HazelcastInstance; 4 | import com.hazelcast.cp.lock.FencedLock; 5 | import com.hazelcast.flakeidgen.FlakeIdGenerator; 6 | import lombok.extern.slf4j.Slf4j; 7 | import org.junit.Assert; 8 | import org.springframework.boot.SpringApplication; 9 | import org.springframework.web.bind.annotation.GetMapping; 10 | import org.springframework.web.bind.annotation.RestController; 11 | 12 | import javax.annotation.Resource; 13 | 14 | 15 | @Slf4j 16 | @RestController 17 | public class LockController { 18 | 19 | public static final String ID_GENERATOR = "idGenerator"; 20 | 21 | @Resource(name="hazelcastInstance1") 22 | private HazelcastInstance hazelcastInstance; 23 | 24 | public static void main(String[] args) { 25 | SpringApplication.run(DistributedHazelcastLockExampleApplication.class, args); 26 | } 27 | 28 | /** 29 | * 分布式锁 30 | */ 31 | @GetMapping("/loc") 32 | public void lock() { 33 | FencedLock loc = hazelcastInstance.getCPSubsystem().getLock("loc"); 34 | boolean b = loc.tryLock(); 35 | Assert.assertTrue(b); 36 | loc.unlock(); 37 | } 38 | 39 | /** 40 | * 分布式 id 41 | * @return 42 | */ 43 | @GetMapping("/getid") 44 | public long getid(){ 45 | FlakeIdGenerator flakeIdGenerator = hazelcastInstance.getFlakeIdGenerator(ID_GENERATOR); 46 | return flakeIdGenerator.newId(); 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/src/main/java/net/ameizi/distributed/hazelcast/example/config/HazelcastConfiguration.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.hazelcast.example.config; 2 | 3 | import com.hazelcast.client.HazelcastClient; 4 | import com.hazelcast.client.config.ClientConfig; 5 | import com.hazelcast.client.config.ClientFlakeIdGeneratorConfig; 6 | import com.hazelcast.config.Config; 7 | import com.hazelcast.config.FlakeIdGeneratorConfig; 8 | import com.hazelcast.config.ManagementCenterConfig; 9 | import com.hazelcast.config.cp.FencedLockConfig; 10 | import com.hazelcast.core.HazelcastInstance; 11 | import org.springframework.context.annotation.Bean; 12 | import org.springframework.context.annotation.Configuration; 13 | 14 | import static java.util.concurrent.TimeUnit.MINUTES; 15 | 16 | /** 17 | * hazelcast集群 server 注册,程序启动后启动三个hazelcast节点并自动发现注册为集群 18 | */ 19 | @Configuration 20 | public class HazelcastConfiguration { 21 | 22 | public static final String ID_GENERATOR = "idGenerator"; 23 | 24 | @Bean 25 | public FlakeIdGeneratorConfig flakeIdGeneratorConfig(){ 26 | FlakeIdGeneratorConfig idGeneratorConfig = new FlakeIdGeneratorConfig(ID_GENERATOR); 27 | idGeneratorConfig.setPrefetchCount(10) 28 | .setPrefetchValidityMillis(MINUTES.toMillis(10)); 29 | return idGeneratorConfig; 30 | } 31 | 32 | @Bean 33 | public ClientFlakeIdGeneratorConfig clientFlakeIdGeneratorConfig(){ 34 | ClientFlakeIdGeneratorConfig idGeneratorConfig = new ClientFlakeIdGeneratorConfig(ID_GENERATOR); 35 | idGeneratorConfig.setPrefetchCount(10) 36 | .setPrefetchValidityMillis(MINUTES.toMillis(10)); 37 | return idGeneratorConfig; 38 | } 39 | 40 | /** 41 | * 本地启动嵌入式hazelcast集群配置,会在本地启动hazelcast服务器并组好集群 42 | * @return 43 | */ 44 | @Bean 45 | public Config hazelCastConfig() { 46 | // 设置集群管理中心 47 | ManagementCenterConfig centerConfig = new ManagementCenterConfig(); 48 | centerConfig.setUrl("http://localhost:8080/hazelcast-mancenter"); 49 | centerConfig.setEnabled(true); 50 | 51 | FencedLockConfig fencedLockConfig = new FencedLockConfig(); 52 | // 不可重入 53 | fencedLockConfig.disableReentrancy(); 54 | 55 | Config config = new Config(); 56 | config.getCPSubsystemConfig() 57 | .setCPMemberCount(3); 58 | // 设置为不可重入锁 59 | // .addLockConfig(fencedLockConfig); 60 | 61 | config.setManagementCenterConfig(centerConfig); 62 | config.addFlakeIdGeneratorConfig(flakeIdGeneratorConfig()); 63 | 64 | return config; 65 | } 66 | 67 | /** 68 | * 客户端配置,连接远程hazelcast服务器集群 69 | * @return 70 | */ 71 | @Bean 72 | public ClientConfig clientConfig(){ 73 | ClientConfig clientConfig = new ClientConfig(); 74 | //集群组名称 75 | clientConfig.getGroupConfig().setName("dev"); 76 | //节点地址 77 | clientConfig.getNetworkConfig().addAddress("127.0.0.1:5701", "127.0.0.1:5702", "127.0.0.1:5703"); 78 | clientConfig.addFlakeIdGeneratorConfig(clientFlakeIdGeneratorConfig()); 79 | return clientConfig; 80 | } 81 | 82 | 83 | @Bean 84 | public HazelcastInstance hazelcastInstance1(){ 85 | // return Hazelcast.newHazelcastInstance(hazelCastConfig()); // 本地启动hazelcast服务器 86 | return HazelcastClient.newHazelcastClient(clientConfig()); // 连接远程hazelcast服务器 87 | } 88 | 89 | @Bean 90 | public HazelcastInstance hazelcastInstance2(){ 91 | // return Hazelcast.newHazelcastInstance(hazelCastConfig()); // 本地启动hazelcast服务器 92 | return HazelcastClient.newHazelcastClient(clientConfig()); // 连接远程hazelcast服务器 93 | } 94 | 95 | @Bean 96 | public HazelcastInstance hazelcastInstance3(){ 97 | // return Hazelcast.newHazelcastInstance(hazelCastConfig()); // 本地启动hazelcast服务器 98 | return HazelcastClient.newHazelcastClient(clientConfig()); // 连接远程hazelcast服务器 99 | } 100 | 101 | } 102 | -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | # 应用服务web访问端口 2 | server.port=8081 3 | spring.application.name=distributed-hazelcast-lock-example 4 | -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/src/test/java/net/ameizi/distributed/hazelcast/example/FlakeIdGeneratorSample.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.hazelcast.example; 2 | 3 | import com.hazelcast.client.HazelcastClient; 4 | import com.hazelcast.client.config.ClientConfig; 5 | import com.hazelcast.client.config.ClientFlakeIdGeneratorConfig; 6 | import com.hazelcast.core.HazelcastInstance; 7 | import com.hazelcast.flakeidgen.FlakeIdGenerator; 8 | 9 | import static java.util.concurrent.TimeUnit.MINUTES; 10 | 11 | public class FlakeIdGeneratorSample { 12 | 13 | public static void main(String[] args) { 14 | ClientConfig clientConfig = new ClientConfig() 15 | .addFlakeIdGeneratorConfig(new ClientFlakeIdGeneratorConfig("idGenerator") 16 | .setPrefetchCount(10) 17 | .setPrefetchValidityMillis(MINUTES.toMillis(10))); 18 | //集群组名称 19 | clientConfig.getGroupConfig().setName("dev"); 20 | //节点地址 21 | clientConfig.getNetworkConfig().addAddress("127.0.0.1:5701", "127.0.0.1:5702", "127.0.0.1:5703"); 22 | //客户端 23 | HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig); 24 | 25 | FlakeIdGenerator idGenerator = client.getFlakeIdGenerator("idGenerator"); 26 | for (int i = 0; i < 10000; i++) { 27 | System.out.printf("Id: %s\n", idGenerator.newId()); 28 | } 29 | 30 | client.getLifecycleService().shutdown(); 31 | } 32 | } -------------------------------------------------------------------------------- /distributed-hazelcast-lock-example/src/test/java/net/ameizi/distributed/hazelcast/example/HazelcastClientTest.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.hazelcast.example; 2 | 3 | import com.hazelcast.client.HazelcastClient; 4 | import com.hazelcast.client.config.ClientConfig; 5 | import com.hazelcast.core.HazelcastInstance; 6 | import com.hazelcast.core.IMap; 7 | import lombok.extern.slf4j.Slf4j; 8 | 9 | import java.util.concurrent.ExecutorService; 10 | import java.util.concurrent.Executors; 11 | 12 | 13 | @Slf4j 14 | public class HazelcastClientTest { 15 | 16 | public static void main(String[] args) { 17 | 18 | ClientConfig clientConfig = new ClientConfig(); 19 | //集群组名称 20 | clientConfig.getGroupConfig().setName("dev"); 21 | //节点地址 22 | clientConfig.getNetworkConfig().addAddress("127.0.0.1:5701", "127.0.0.1:5702", "127.0.0.1:5703"); 23 | //客户端 24 | HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig); 25 | 26 | IMap instruments = client.getMap("instruments"); 27 | 28 | //并发测试 29 | Runnable runnable = () -> { 30 | long total = 10000; 31 | for (int i = 0; i < total; i++) { 32 | //插入缓存 33 | instruments.put(i, "user"+i); 34 | } 35 | }; 36 | 37 | ExecutorService executorService = Executors.newFixedThreadPool(10); 38 | int threadNum = 10; 39 | for (int i = 0; i < threadNum; i++) { 40 | executorService.submit(runnable); 41 | } 42 | 43 | // client.getLifecycleService().shutdown(); 44 | // executorService.shutdown(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /distributed-redis-lock-example/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4.0.0 4 | 5 | net.ameizi 6 | distributed-lock-examples 7 | 1.0 8 | 9 | distributed-redis-lock-example 10 | jar 11 | distributed-redis-lock-example 12 | Demo project for Spring Boot 13 | 14 | 1.8 15 | 16 | 17 | 18 | org.projectlombok 19 | lombok 20 | true 21 | 22 | 23 | org.springframework.boot 24 | spring-boot-starter-web 25 | 26 | 27 | org.redisson 28 | redisson-spring-boot-starter 29 | 3.12.1 30 | 31 | 32 | 33 | 34 | 35 | org.apache.maven.plugins 36 | maven-compiler-plugin 37 | 38 | 1.8 39 | 1.8 40 | UTF-8 41 | 42 | 43 | 44 | org.springframework.boot 45 | spring-boot-maven-plugin 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /distributed-redis-lock-example/src/main/java/net/ameizi/distributed/lock/redis/example/DistributedRedisLockExampleApplication.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.lock.redis.example; 2 | 3 | import lombok.extern.slf4j.Slf4j; 4 | import org.redisson.api.RLock; 5 | import org.redisson.api.RedissonClient; 6 | import org.springframework.beans.factory.annotation.Autowired; 7 | import org.springframework.boot.SpringApplication; 8 | import org.springframework.boot.autoconfigure.SpringBootApplication; 9 | import org.springframework.web.bind.annotation.GetMapping; 10 | import org.springframework.web.bind.annotation.RestController; 11 | 12 | import java.util.concurrent.ExecutorService; 13 | import java.util.concurrent.Executors; 14 | import java.util.concurrent.TimeUnit; 15 | 16 | @Slf4j 17 | @RestController 18 | @SpringBootApplication 19 | public class DistributedRedisLockExampleApplication { 20 | 21 | private static final String LOCK = "lock"; 22 | 23 | @Autowired 24 | private RedissonClient redissonClient; 25 | 26 | private ExecutorService executor = Executors.newFixedThreadPool(10); 27 | 28 | public static void main(String[] args) { 29 | SpringApplication.run(DistributedRedisLockExampleApplication.class, args); 30 | } 31 | 32 | @GetMapping("/lock") 33 | public void lock(){ 34 | for (int i = 0; i <100 ; i++) { 35 | executor.submit(()->{ 36 | // 获取公平锁 37 | // RLock lock = redissonClient.getFairLock(LOCK); 38 | // 获取非公平锁 39 | RLock lock = redissonClient.getLock(LOCK); 40 | try{ 41 | // 尝试加锁,最多等待 100 秒,10 秒后释放 42 | boolean b = lock.tryLock(100, 10, TimeUnit.SECONDS); 43 | if(b){ 44 | // 获取到锁,执行业务逻辑 45 | log.info("获取到分布式锁,执行业务逻辑"); 46 | } 47 | }catch (Exception e){ 48 | e.printStackTrace(); 49 | }finally { 50 | lock.unlock(); 51 | } 52 | }); 53 | } 54 | } 55 | 56 | } 57 | -------------------------------------------------------------------------------- /distributed-redis-lock-example/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | spring.application.name=distributed-redis-lock-example 2 | # 应用服务web访问端口 3 | server.port=8080 4 | spring.redis.host=localhost 5 | spring.redis.port=6379 6 | spring.redis.database=0 7 | spring.redis.password= 8 | spring.redis.redisson.config=classpath:redisson-single.yaml 9 | -------------------------------------------------------------------------------- /distributed-redis-lock-example/src/main/resources/redisson-cluster.yaml: -------------------------------------------------------------------------------- 1 | clusterServersConfig: 2 | idleConnectionTimeout: 10000 3 | pingTimeout: 1000 4 | connectTimeout: 10000 5 | timeout: 3000 6 | retryAttempts: 3 7 | retryInterval: 1500 8 | reconnectionTimeout: 3000 9 | failedAttempts: 3 10 | password: null 11 | subscriptionsPerConnection: 5 12 | clientName: null 13 | loadBalancer: ! {} 14 | slaveSubscriptionConnectionMinimumIdleSize: 1 15 | slaveSubscriptionConnectionPoolSize: 50 16 | slaveConnectionMinimumIdleSize: 32 17 | slaveConnectionPoolSize: 64 18 | masterConnectionMinimumIdleSize: 32 19 | masterConnectionPoolSize: 64 20 | readMode: "SLAVE" 21 | nodeAddresses: 22 | - "redis://127.0.0.1:7001" 23 | - "redis://127.0.0.1:7002" 24 | - "redis://127.0.0.1:7003" 25 | scanInterval: 1000 26 | threads: 0 27 | nettyThreads: 0 28 | codec: ! {} 29 | "transportMode":"NIO" -------------------------------------------------------------------------------- /distributed-redis-lock-example/src/main/resources/redisson-master-slave.yaml: -------------------------------------------------------------------------------- 1 | masterSlaveServersConfig: 2 | idleConnectionTimeout: 10000 3 | pingTimeout: 1000 4 | connectTimeout: 10000 5 | timeout: 3000 6 | retryAttempts: 3 7 | retryInterval: 1500 8 | reconnectionTimeout: 3000 9 | failedAttempts: 3 10 | password: null 11 | subscriptionsPerConnection: 5 12 | clientName: null 13 | loadBalancer: ! {} 14 | slaveSubscriptionConnectionMinimumIdleSize: 1 15 | slaveSubscriptionConnectionPoolSize: 50 16 | slaveConnectionMinimumIdleSize: 32 17 | slaveConnectionPoolSize: 64 18 | masterConnectionMinimumIdleSize: 32 19 | masterConnectionPoolSize: 64 20 | readMode: "SLAVE" 21 | slaveAddresses: 22 | - "redis://127.0.0.1:6381" 23 | - "redis://127.0.0.1:6380" 24 | masterAddress: "redis://127.0.0.1:6379" 25 | database: 0 26 | threads: 0 27 | nettyThreads: 0 28 | codec: ! {} 29 | "transportMode":"NIO" -------------------------------------------------------------------------------- /distributed-redis-lock-example/src/main/resources/redisson-sentinel.yaml: -------------------------------------------------------------------------------- 1 | sentinelServersConfig: 2 | idleConnectionTimeout: 10000 3 | pingTimeout: 1000 4 | connectTimeout: 10000 5 | timeout: 3000 6 | retryAttempts: 3 7 | retryInterval: 1500 8 | reconnectionTimeout: 3000 9 | failedAttempts: 3 10 | password: null 11 | subscriptionsPerConnection: 5 12 | clientName: null 13 | loadBalancer: ! {} 14 | slaveSubscriptionConnectionMinimumIdleSize: 1 15 | slaveSubscriptionConnectionPoolSize: 50 16 | slaveConnectionMinimumIdleSize: 32 17 | slaveConnectionPoolSize: 64 18 | masterConnectionMinimumIdleSize: 32 19 | masterConnectionPoolSize: 64 20 | readMode: "SLAVE" 21 | sentinelAddresses: 22 | - "redis://127.0.0.1:7001" 23 | - "redis://127.0.0.1:7002" 24 | masterName: "master" 25 | database: 0 26 | threads: 0 27 | nettyThreads: 0 28 | codec: ! {} 29 | "transportMode":"NIO" -------------------------------------------------------------------------------- /distributed-redis-lock-example/src/main/resources/redisson-single.yaml: -------------------------------------------------------------------------------- 1 | #单机模式 2 | singleServerConfig: 3 | # 连接空闲超时,单位:毫秒 4 | idleConnectionTimeout: 10000 5 | # 心跳检测时间间隔 6 | pingTimeout: 1000 7 | # 连接超时,单位:毫秒 8 | connectTimeout: 10000 9 | # 命令等待超时,单位:毫秒 10 | timeout: 3000 11 | # 命令失败重试次数,如果尝试达到 retryAttempts(命令失败重试次数) 仍然不能将命令发送至某个指定的节点时,将抛出错误。 12 | # 如果尝试在此限制之内发送成功,则开始启用 timeout(命令等待超时) 计时。 13 | retryAttempts: 3 14 | # 命令重试发送时间间隔,单位:毫秒 15 | retryInterval: 1000 16 | # 重新连接时间间隔,单位:毫秒 17 | reconnectionTimeout: 3000 18 | # 执行失败最大次数 19 | failedAttempts: 3 20 | # 密码 21 | password: null 22 | # 单个连接最大订阅数量 23 | subscriptionsPerConnection: 5 24 | # 客户端名称 25 | clientName: null 26 | # 节点地址 27 | address: redis://127.0.0.1:6379 28 | # 发布和订阅连接的最小空闲连接数 29 | subscriptionConnectionMinimumIdleSize: 1 30 | # 发布和订阅连接池大小 31 | subscriptionConnectionPoolSize: 50 32 | # 最小空闲连接数 33 | connectionMinimumIdleSize: 32 34 | # 连接池大小 35 | connectionPoolSize: 64 36 | # 数据库编号 37 | database: 0 38 | # DNS监测时间间隔,单位:毫秒 39 | dnsMonitoringInterval: 5000 40 | # 线程池数量,默认值: 当前处理核数量 * 2 41 | #threads: 0 42 | # Netty线程池数量,默认值: 当前处理核数量 * 2 43 | #nettyThreads: 0 44 | # 编码 45 | codec: ! {} 46 | # 传输模式 47 | transportMode : "NIO" -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4.0.0 4 | 5 | net.ameizi 6 | distributed-lock-examples 7 | 1.0 8 | 9 | distributed-zookeeper-lock-example 10 | jar 11 | distributed-zookeeper-lock-example 12 | Demo project for Spring Boot 13 | 14 | 1.8 15 | 16 | 17 | 18 | org.projectlombok 19 | lombok 20 | true 21 | 22 | 23 | org.springframework.boot 24 | spring-boot-starter-web 25 | 26 | 27 | org.apache.curator 28 | curator-recipes 29 | 4.2.0 30 | 31 | 32 | org.apache.curator 33 | curator-test 34 | 4.2.0 35 | 36 | 37 | junit 38 | junit 39 | 4.12 40 | 41 | 42 | 43 | 44 | 45 | org.apache.maven.plugins 46 | maven-compiler-plugin 47 | 48 | 1.8 49 | 1.8 50 | UTF-8 51 | 52 | 53 | 54 | org.springframework.boot 55 | spring-boot-maven-plugin 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/main/java/net/ameizi/distributed/lock/zookeeper/example/DistributedZookeeperLockExampleApplication.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.lock.zookeeper.example; 2 | 3 | import lombok.extern.slf4j.Slf4j; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.recipes.locks.InterProcessMutex; 6 | import org.springframework.beans.factory.annotation.Autowired; 7 | import org.springframework.boot.SpringApplication; 8 | import org.springframework.boot.autoconfigure.SpringBootApplication; 9 | import org.springframework.web.bind.annotation.GetMapping; 10 | import org.springframework.web.bind.annotation.RestController; 11 | 12 | import java.util.concurrent.ExecutorService; 13 | import java.util.concurrent.Executors; 14 | import java.util.concurrent.TimeUnit; 15 | 16 | @Slf4j 17 | @RestController 18 | @SpringBootApplication 19 | public class DistributedZookeeperLockExampleApplication { 20 | 21 | @Autowired 22 | private CuratorFramework curatorFramework; 23 | 24 | /** 线程池 */ 25 | private ExecutorService executor = Executors.newFixedThreadPool(5); 26 | 27 | public static void main(String[] args) { 28 | SpringApplication.run(DistributedZookeeperLockExampleApplication.class, args); 29 | } 30 | 31 | @GetMapping("/lock") 32 | public void lock() throws Exception{ 33 | // for (int i = 0; i < 10; i++) { 34 | // executor.submit(() -> { 35 | // // 创建锁对象 36 | // InterProcessMutex lock = new InterProcessMutex(curatorFramework, "/lock"); 37 | // try{ 38 | // // 获取锁 39 | // if (lock.acquire(3, TimeUnit.SECONDS)) { 40 | // // 如果获取锁成功,则执行对应逻辑 41 | // log.info("获取分布式锁,执行逻辑"); 42 | // } 43 | // }catch (Exception e){ 44 | // e.printStackTrace(); 45 | // }finally { 46 | // try { 47 | // lock.release(); 48 | // } catch (Exception e) { 49 | // e.printStackTrace(); 50 | // } 51 | // } 52 | // }); 53 | // } 54 | 55 | Thread thread1 = new Thread(() -> { 56 | InterProcessMutex lock = new InterProcessMutex(curatorFramework, "/lock"); 57 | try{ 58 | lock.acquire(); 59 | log.info("{}获取分布式锁,执行逻辑",Thread.currentThread().getName()); 60 | TimeUnit.SECONDS.sleep(3); 61 | }catch (Exception e){ 62 | e.printStackTrace(); 63 | }finally { 64 | try { 65 | lock.release(); 66 | } catch (Exception e) { 67 | e.printStackTrace(); 68 | } 69 | } 70 | }); 71 | thread1.start(); 72 | Thread thread2 = new Thread(() -> { 73 | InterProcessMutex lock = new InterProcessMutex(curatorFramework, "/lock"); 74 | try{ 75 | lock.acquire(); 76 | log.info("{}获取分布式锁,执行逻辑",Thread.currentThread().getName()); 77 | }catch (Exception e){ 78 | e.printStackTrace(); 79 | }finally { 80 | try { 81 | lock.release(); 82 | } catch (Exception e) { 83 | e.printStackTrace(); 84 | } 85 | } 86 | }); 87 | thread2.start(); 88 | } 89 | 90 | } 91 | -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/main/java/net/ameizi/distributed/lock/zookeeper/example/config/ZookeeperConfig.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.lock.zookeeper.example.config; 2 | 3 | import org.apache.curator.RetryPolicy; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.CuratorFrameworkFactory; 6 | import org.apache.curator.retry.ExponentialBackoffRetry; 7 | import org.apache.curator.retry.RetryNTimes; 8 | import org.apache.curator.test.TestingServer; 9 | import org.springframework.context.annotation.Bean; 10 | import org.springframework.context.annotation.Configuration; 11 | 12 | /** 13 | * 初始化 Zookeeper Curator 客户端 14 | */ 15 | @Configuration 16 | public class ZookeeperConfig { 17 | 18 | /** 19 | * 创建 CuratorFramework 对象并连接 Zookeeper 20 | * 21 | * @param zookeeperProperties 从 Spring 容器载入 ZookeeperProperties Bean 对象,读取连接 ZK 的参数 22 | * @return CuratorFramework 23 | */ 24 | @Bean(initMethod = "start") 25 | public CuratorFramework curatorFramework(ZookeeperProperties zookeeperProperties) { 26 | // return CuratorFrameworkFactory.newClient( 27 | // zookeeperProperties.getAddress(), 28 | // zookeeperProperties.getSessionTimeoutMs(), 29 | // zookeeperProperties.getConnectionTimeoutMs(), 30 | // new RetryNTimes(zookeeperProperties.getRetryCount(), 31 | // zookeeperProperties.getElapsedTimeMs())); 32 | String connectString = null; 33 | try { 34 | connectString = new TestingServer().getConnectString(); 35 | RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); 36 | return CuratorFrameworkFactory.newClient(connectString, retryPolicy); 37 | } catch (Exception e) { 38 | e.printStackTrace(); 39 | } 40 | return null; 41 | } 42 | 43 | } 44 | -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/main/java/net/ameizi/distributed/lock/zookeeper/example/config/ZookeeperProperties.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.lock.zookeeper.example.config; 2 | 3 | import lombok.Data; 4 | import org.springframework.boot.context.properties.ConfigurationProperties; 5 | import org.springframework.context.annotation.Configuration; 6 | 7 | /** 8 | * 从配置文件中读取 Zookeeper Server 连接参数 9 | */ 10 | @Data 11 | @Configuration 12 | @ConfigurationProperties(prefix = "zookeeper") 13 | public class ZookeeperProperties { 14 | 15 | /** 重试次数 */ 16 | private int retryCount; 17 | 18 | /** 重试间隔时间 */ 19 | private int elapsedTimeMs; 20 | 21 | /**连接地址 */ 22 | private String address; 23 | 24 | /**Session过期时间 */ 25 | private int sessionTimeoutMs; 26 | 27 | /**连接超时时间 */ 28 | private int connectionTimeoutMs; 29 | 30 | } -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | spring.application.name=distributed-zookeeper-lock-example 2 | # 应用服务web访问端口 3 | server.port=8080 4 | 5 | #Zookeeper Server地址,如果有多个使用,分隔 6 | zookeeper.address=127.0.0.1:2181 7 | #重试次数 8 | zookeeper.retryCount=5 9 | #重试间隔时间 10 | zookeeper.elapsedTimeMs=5000 11 | #session超时时间 12 | zookeeper.sessionTimeoutMs=30000 13 | #连接超时时间 14 | zookeeper.connectionTimeoutMs=10000 -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/test/java/net/ameizi/distributed/lock/zookeeper/example/BlockingLockTest.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.lock.zookeeper.example; 2 | 3 | import java.util.Date; 4 | import java.util.UUID; 5 | import java.util.concurrent.TimeUnit; 6 | import java.util.concurrent.atomic.AtomicBoolean; 7 | import java.util.logging.Level; 8 | import java.util.logging.Logger; 9 | 10 | import org.apache.curator.RetryPolicy; 11 | import org.apache.curator.framework.CuratorFramework; 12 | import org.apache.curator.framework.CuratorFrameworkFactory; 13 | import org.apache.curator.framework.recipes.locks.InterProcessMutex; 14 | import org.apache.curator.retry.ExponentialBackoffRetry; 15 | import org.apache.curator.test.TestingServer; 16 | import org.apache.curator.utils.CloseableUtils; 17 | 18 | /** 19 | * 阻塞式锁 20 | */ 21 | public class BlockingLockTest { 22 | 23 | public static void main(String[] args) throws Exception { 24 | // 创建一个测试 Zookeeper 服务器 25 | TestingServer testingServer = new TestingServer(); 26 | // 获取该服务器的链接地址 27 | String connectString = testingServer.getConnectString(); 28 | final String lockPath = "/lock"; 29 | 30 | RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); 31 | CuratorFramework curatorClient = CuratorFrameworkFactory.newClient(connectString, retryPolicy); 32 | curatorClient.start(); 33 | // 全局可重入锁 34 | final InterProcessMutex lock = new InterProcessMutex(curatorClient, lockPath); 35 | 36 | Runnable task = () -> { 37 | System.out.println("In BlockingLockTest"); 38 | AtomicBoolean acquired = null; 39 | for (int i = 1; i <= 2; i++) { 40 | try { 41 | System.out.println("Process " + Thread.currentThread().getName() + " TRYING lock at "+ new Date()); 42 | acquired = new AtomicBoolean(false); 43 | lock.acquire(); 44 | acquired.set(true); 45 | System.out.println("Process " + Thread.currentThread().getName() + " ACQUIRED lock. Iteration " + i + " at "+ new Date()); 46 | System.out.println("Process " + Thread.currentThread().getName() + " WORK-IN-PROGRESS"); 47 | Thread.sleep(1000); 48 | 49 | } catch (Exception ex) { 50 | Logger.getLogger(BlockingLockTest.class.getName()).log(Level.SEVERE, null, ex); 51 | } finally { 52 | try { 53 | if (acquired.get()) { 54 | lock.release(); 55 | System.out.println("Process " + Thread.currentThread().getName() + " RELEASED lock at "+ new Date()); 56 | } 57 | 58 | } catch (Exception ex) { 59 | Logger.getLogger(BlockingLockTest.class.getName()).log(Level.SEVERE, null, ex); 60 | } 61 | } 62 | } 63 | }; 64 | 65 | new Thread(task, UUID.randomUUID().toString()).start(); 66 | 67 | TimeUnit.SECONDS.sleep(3); 68 | 69 | CloseableUtils.closeQuietly(curatorClient); 70 | CloseableUtils.closeQuietly(testingServer); 71 | 72 | } 73 | 74 | } 75 | -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/test/java/net/ameizi/distributed/lock/zookeeper/example/DistributedLockTest.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.lock.zookeeper.example; 2 | 3 | import org.apache.curator.RetryPolicy; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.CuratorFrameworkFactory; 6 | import org.apache.curator.framework.recipes.locks.*; 7 | import org.apache.curator.retry.ExponentialBackoffRetry; 8 | import org.apache.curator.test.TestingCluster; 9 | import org.apache.curator.test.TestingServer; 10 | import org.apache.curator.utils.CloseableUtils; 11 | import org.junit.After; 12 | import org.junit.Assert; 13 | import org.junit.Before; 14 | import org.junit.Test; 15 | 16 | import java.util.Arrays; 17 | import java.util.Collection; 18 | import java.util.HashSet; 19 | import java.util.Set; 20 | import java.util.concurrent.TimeUnit; 21 | 22 | public class DistributedLockTest { 23 | 24 | // Zookeeper 锁节点路径,分布式锁的相关操作都是在这个节点上进行 25 | private final String lockPath = "/distributed-lock"; 26 | private String connectString; 27 | // Curator 客户端重试策略 28 | private RetryPolicy retryPolicy; 29 | // Curator 客户端对象 30 | private CuratorFramework client1; 31 | // Curator 客户端对象 用户模拟其他客户端 32 | private CuratorFramework client2; 33 | 34 | private TestingServer testingServer; 35 | private TestingCluster testingCluster; 36 | 37 | /** 38 | * 可重入锁 39 | * 全局可重入的锁。 Shared意味着锁是全局可见的, 客户端都可以请求锁。 Reentrant和JDK的ReentrantLock类似, 意味着同一个客户端在拥有锁的同时,可以多次获取,不会被阻塞。 40 | * 它是由类InterProcessMutex来实现,该实例可重用 41 | */ 42 | @Test 43 | public void sharedReentrantLock() throws Exception { 44 | // 创建可重入锁 45 | InterProcessLock lock1 = new InterProcessMutex(client1, lockPath); 46 | // 模拟另一个客户端 47 | InterProcessLock lock2 = new InterProcessMutex(client2, lockPath); 48 | // lock1 获取锁 49 | lock1.acquire(); 50 | try { 51 | // lock1 第二次获取锁 52 | lock1.acquire(); 53 | try { 54 | // lock2 超时获取锁,因为锁已经被 lock1 客户端占用,所以获取失败,需要等 lock1 释放 55 | Assert.assertFalse(lock2.acquire(2, TimeUnit.SECONDS)); 56 | } finally { 57 | lock1.release(); 58 | } 59 | } finally { 60 | // 重入锁获取与释放需要一一对应,如果获取2次,释放1次,那么该锁依然是被占用,如果将下面这行代码注释,那么会发现下面的 lock2 获取锁失败 61 | lock1.release(); 62 | } 63 | // 在 lock1 释放后,lock2 能够获取锁 64 | Assert.assertTrue(lock2.acquire(2, TimeUnit.SECONDS)); 65 | lock2.release(); 66 | } 67 | 68 | /** 69 | * 不可重入锁Shared Lock 70 | * 这个锁和上面的相比,就是少了Reentrant的功能,也就意味着它不能在同一个线程中重入。 71 | * 这个类是InterProcessSemaphoreMutex。 72 | * 73 | * 注意需要调用release两次。这和JDK的ReentrantLock用法一致。如果少调用一次release,则此线程依然拥有锁。 74 | * @throws Exception 75 | */ 76 | @Test 77 | public void sharedLock() throws Exception { 78 | // 创建共享锁 79 | InterProcessLock lock1 = new InterProcessSemaphoreMutex(client1, lockPath); 80 | // 模拟另一个客户端 81 | InterProcessLock lock2 = new InterProcessSemaphoreMutex(client2, lockPath); 82 | // 获取锁对象 83 | lock1.acquire(); 84 | // 打开此方法会发现线程被阻塞在该方法上,也就是说该锁不是可重入的 85 | // lock1.acquire(); 86 | 87 | // 测试是否可以重入 88 | // 超时获取锁对象(第一个参数为时间,第二个参数为时间单位),因为锁已经被获取,所以返回 false 89 | Assert.assertFalse(lock1.acquire(2, TimeUnit.SECONDS)); 90 | Assert.assertFalse(lock2.acquire(2, TimeUnit.SECONDS)); 91 | // 释放锁 92 | lock1.release(); 93 | // lock2 尝试获取锁成功,因为锁已经被释放 94 | lock2.acquire(); 95 | Assert.assertFalse(lock2.acquire(2, TimeUnit.SECONDS)); 96 | Assert.assertFalse(lock1.acquire(2, TimeUnit.SECONDS)); 97 | lock2.release(); 98 | } 99 | 100 | /** 101 | * 可重入读写锁 102 | * 103 | * 类似JDK的ReentrantReadWriteLock. 104 | * 一个读写锁管理一对相关的锁。 一个负责读操作,另外一个负责写操作。 读操作在写锁没被使用时可同时由多个进程使用,而写锁使用时不允许读 (阻塞)。 105 | * 此锁是可重入的。一个拥有写锁的线程可重入读锁,但是读锁却不能进入写锁。 106 | * 这也意味着写锁可以降级成读锁, 比如请求写锁 --->读锁 ---->释放写锁。 从读锁升级成写锁是不成的。 107 | * 108 | * 主要由两个类实现: 109 | * 110 | * InterProcessReadWriteLock 111 | * InterProcessLock 112 | * 113 | * @throws Exception 114 | */ 115 | @Test 116 | public void sharedReentrantReadWriteLock() throws Exception { 117 | // 创建读写锁对象,因 curator 的实现原理,该锁是公平的 118 | InterProcessReadWriteLock lock1 = new InterProcessReadWriteLock(client1, lockPath); 119 | // lock2 用于模拟其他客户端 120 | InterProcessReadWriteLock lock2 = new InterProcessReadWriteLock(client2, lockPath); 121 | // 使用 lock1 模拟读操作 122 | // 使用 lock2 模拟写操作 123 | // 获取读锁(使用InterProcessMutex实现,所以是可以重入的) 124 | InterProcessLock readLock = lock1.readLock(); 125 | // 获取写锁(使用InterProcessMutex实现,所以是可以重入的) 126 | InterProcessLock writeLock = lock2.writeLock(); 127 | 128 | /** 129 | * 读写锁测试对象 130 | */ 131 | class ReadWriteLockTest { 132 | // 测试数据变更字段 133 | private Integer testData = 0; 134 | private Set threadSet = new HashSet<>(); 135 | 136 | // 写入数据 137 | private void write() throws Exception { 138 | writeLock.acquire(); 139 | try { 140 | Thread.sleep(10); 141 | testData++; 142 | System.out.println("写入数据\t" + testData); 143 | } finally { 144 | writeLock.release(); 145 | } 146 | } 147 | 148 | // 读取数据 149 | private void read() throws Exception { 150 | readLock.acquire(); 151 | try { 152 | Thread.sleep(10); 153 | System.out.println("读取数据\t" + testData); 154 | } finally { 155 | readLock.release(); 156 | } 157 | } 158 | 159 | // 等待线程结束,防止test方法调用完成后,当前线程直接退出,导致控制台无法输出信息 160 | public void waitThread() throws InterruptedException { 161 | for (Thread thread : threadSet) { 162 | thread.join(); 163 | } 164 | } 165 | 166 | // 创建线程方法 167 | private void createThread(int type) { 168 | Thread thread = new Thread(new Runnable() { 169 | @Override 170 | public void run() { 171 | try { 172 | if (type == 1) { 173 | write(); 174 | } else { 175 | read(); 176 | } 177 | } catch (Exception e) { 178 | e.printStackTrace(); 179 | } 180 | } 181 | }); 182 | threadSet.add(thread); 183 | thread.start(); 184 | } 185 | 186 | // 测试方法 187 | public void test() { 188 | for (int i = 0; i < 5; i++) { 189 | createThread(1); 190 | } 191 | for (int i = 0; i < 5; i++) { 192 | createThread(2); 193 | } 194 | } 195 | } 196 | 197 | ReadWriteLockTest readWriteLockTest = new ReadWriteLockTest(); 198 | readWriteLockTest.test(); 199 | readWriteLockTest.waitThread(); 200 | } 201 | 202 | 203 | /** 204 | * 信号量 205 | * 206 | * 一个计数的信号量类似JDK的Semaphore。 JDK中Semaphore维护的一组许可(permits),而Cubator中称之为租约(Lease)。 207 | * 有两种方式可以决定semaphore的最大租约数。第一种方式是有用户给定的path决定。第二种方式使用SharedCountReader类。 208 | * 如果不使用SharedCountReader, 没有内部代码检查进程是否假定有10个租约而进程B假定有20个租约。 所以所有的实例必须使用相同的numberOfLeases值. 209 | * 210 | * 这次调用acquire会返回一个租约对象。 客户端必须在finally中close这些租约对象,否则这些租约会丢失掉。 但是,如果客户端session由于某种原因比如crash丢掉,那么这些客户端持有的租约会自动close,这样其它客户端可以继续使用这些租约。 211 | * 212 | * 注意一次你可以请求多个租约,如果Semaphore当前的租约不够,则请求线程会被阻塞。 同时还提供了超时的重载方法。 213 | * 214 | * @throws Exception 215 | */ 216 | @Test 217 | public void semaphore() throws Exception { 218 | // 创建一个信号量 219 | InterProcessSemaphoreV2 semaphore1 = new InterProcessSemaphoreV2(client1, lockPath, 6); 220 | // 模拟其他客户端 221 | InterProcessSemaphoreV2 semaphore2 = new InterProcessSemaphoreV2(client2, lockPath, 6); 222 | 223 | // 获取一个许可 224 | Lease lease1 = semaphore1.acquire(); 225 | Assert.assertNotNull(lease1); 226 | // semaphore.getParticipantNodes() 会返回当前参与信号量的节点列表,俩个客户端所获取的信息相同 227 | Assert.assertEquals(semaphore1.getParticipantNodes(), semaphore2.getParticipantNodes()); 228 | 229 | // 超时获取一个许可 230 | Lease lease2 = semaphore2.acquire(2, TimeUnit.SECONDS); 231 | Assert.assertNotNull(lease2); 232 | Assert.assertEquals(semaphore1.getParticipantNodes(), semaphore2.getParticipantNodes()); 233 | 234 | // 获取多个许可,参数为许可数量 235 | Collection leases1 = semaphore1.acquire(2); 236 | Assert.assertEquals(2, leases1.size()); 237 | Assert.assertEquals(semaphore1.getParticipantNodes(), semaphore2.getParticipantNodes()); 238 | 239 | // 超时获取多个许可,第一个参数为许可数量 240 | Collection leases2 = semaphore2.acquire(2, 2, TimeUnit.SECONDS); 241 | Assert.assertEquals(2, leases2.size()); 242 | Assert.assertEquals(semaphore1.getParticipantNodes(), semaphore2.getParticipantNodes()); 243 | 244 | // 目前 semaphore1 已经获取 3 个许可,semaphore2 也获取 3 个许可,加起来为 6 个,所以他们无法在进行许可获取 245 | Assert.assertNull(semaphore1.acquire(2, TimeUnit.SECONDS)); 246 | Assert.assertNull(semaphore2.acquire(2, TimeUnit.SECONDS)); 247 | 248 | semaphore1.returnLease(lease1); 249 | semaphore2.returnLease(lease2); 250 | semaphore1.returnAll(leases1); 251 | semaphore2.returnAll(leases2); 252 | } 253 | 254 | /** 255 | * 多重锁 256 | * 257 | * Multi Shared Lock是一个锁的容器。 当调用acquire, 所有的锁都会被acquire,如果请求失败,所有的锁都会被release。 同样调用release时所有的锁都被release(失败被忽略)。 258 | * 基本上,它就是组锁的代表,在它上面的请求释放操作都会传递给它包含的所有的锁。 259 | * 260 | * 主要涉及两个类: 261 | * 262 | * InterProcessMultiLock 263 | * InterProcessLock 264 | * 265 | * 新建一个InterProcessMultiLock, 包含一个重入锁和一个非重入锁。 266 | * 调用acquire后可以看到线程同时拥有了这两个锁。 267 | * 调用release看到这两个锁都被释放了。 268 | * 269 | * @throws Exception 270 | */ 271 | @Test 272 | public void multiLock() throws Exception { 273 | // 可重入锁 274 | InterProcessLock interProcessLock1 = new InterProcessMutex(client1, lockPath); 275 | // 不可重入锁 276 | InterProcessLock interProcessLock2 = new InterProcessSemaphoreMutex(client2, lockPath); 277 | // 创建多重锁对象 278 | InterProcessLock lock = new InterProcessMultiLock(Arrays.asList(interProcessLock1, interProcessLock2)); 279 | // 获取参数集合中的所有锁 280 | lock.acquire(); 281 | 282 | // 因为存在一个不可重入锁,所以整个 InterProcessMultiLock 不可重入 283 | Assert.assertFalse(lock.acquire(2, TimeUnit.SECONDS)); 284 | // interProcessLock1 是可重入锁,所以可以继续获取锁 285 | Assert.assertTrue(interProcessLock1.acquire(2, TimeUnit.SECONDS)); 286 | // interProcessLock2 是不可重入锁,所以获取锁失败 287 | Assert.assertFalse(interProcessLock2.acquire(2, TimeUnit.SECONDS)); 288 | 289 | // 释放参数集合中的所有锁 290 | lock.release(); 291 | 292 | // interProcessLock2 中的所已经释放,所以可以获取 293 | Assert.assertTrue(interProcessLock2.acquire(2, TimeUnit.SECONDS)); 294 | 295 | } 296 | 297 | @Before 298 | public void init() throws Exception { 299 | 300 | // 创建一个测试 Zookeeper 服务器 301 | testingServer = new TestingServer(); 302 | // 获取该服务器的链接地址 303 | connectString = testingServer.getConnectString(); 304 | 305 | // 创建一个集群测试 zookeeper 服务器 306 | // testingCluster = new TestingCluster(3); 307 | // 获取该服务器的链接地址 308 | // connectString = testingCluster.getConnectString(); 309 | 310 | // 重试策略 311 | // 初始休眠时间为 1000ms,最大重试次数为3 312 | retryPolicy = new ExponentialBackoffRetry(1000, 3); 313 | client1 = CuratorFrameworkFactory.newClient(connectString, retryPolicy); 314 | client2 = CuratorFrameworkFactory.newClient(connectString, retryPolicy); 315 | // 创建会话 316 | client1.start(); 317 | client2.start(); 318 | } 319 | 320 | // 释放资源 321 | @After 322 | public void close() { 323 | CloseableUtils.closeQuietly(client1); 324 | CloseableUtils.closeQuietly(client2); 325 | CloseableUtils.closeQuietly(testingServer); 326 | CloseableUtils.closeQuietly(testingCluster); 327 | } 328 | } 329 | -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/test/java/net/ameizi/distributed/lock/zookeeper/example/NonBlockingLockTest.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.distributed.lock.zookeeper.example; 2 | 3 | import org.apache.curator.RetryPolicy; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.CuratorFrameworkFactory; 6 | import org.apache.curator.framework.recipes.locks.InterProcessMutex; 7 | import org.apache.curator.retry.ExponentialBackoffRetry; 8 | import org.apache.curator.test.TestingServer; 9 | 10 | import java.util.Date; 11 | import java.util.UUID; 12 | import java.util.concurrent.TimeUnit; 13 | import java.util.concurrent.atomic.AtomicBoolean; 14 | import java.util.logging.Level; 15 | import java.util.logging.Logger; 16 | 17 | /** 18 | * 非阻塞式锁 19 | */ 20 | public class NonBlockingLockTest { 21 | 22 | public static void main(String[] args) throws Exception { 23 | // 创建一个测试 Zookeeper 服务器 24 | TestingServer testingServer = new TestingServer(); 25 | // 获取该服务器的链接地址 26 | String connectString = testingServer.getConnectString(); 27 | final String lockPath = "/lock"; 28 | 29 | RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); 30 | CuratorFramework curatorClient = CuratorFrameworkFactory.newClient(connectString, retryPolicy); 31 | curatorClient.start(); 32 | // 全局可重入锁 33 | final InterProcessMutex lock = new InterProcessMutex(curatorClient, lockPath); 34 | 35 | Runnable task = new Runnable() { 36 | @Override 37 | public void run() { 38 | System.out.println("In NonBlockingLockTest"); 39 | AtomicBoolean acquired = null; 40 | for (int i = 1; i <= 2; i++) { 41 | try { 42 | System.out.println("Process " + Thread.currentThread().getName() + " TRYING lock at "+ new Date()); 43 | acquired = new AtomicBoolean(false); 44 | if (lock.acquire(2, TimeUnit.SECONDS)) { 45 | acquired.set(true); 46 | System.out.println("Process " + Thread.currentThread().getName() + " ACQUIRED lock. Iteration " + i + " at "+ new Date()); 47 | System.out.println("Process " + Thread.currentThread().getName() + " WORK-IN-PROGRESS"); 48 | Thread.sleep(3000); //simulating some work 49 | } 50 | 51 | } catch (Exception ex) { 52 | Logger.getLogger(NonBlockingLockTest.class.getName()).log(Level.SEVERE, null, ex); 53 | } finally { 54 | try { 55 | if (acquired.get()) { 56 | lock.release(); 57 | System.out.println("Process " + Thread.currentThread().getName() + " RELEASED lock at "+ new Date()); 58 | } 59 | 60 | } catch (Exception ex) { 61 | Logger.getLogger(NonBlockingLockTest.class.getName()).log(Level.SEVERE, null, ex); 62 | } 63 | } 64 | } 65 | } 66 | }; 67 | 68 | new Thread(task, UUID.randomUUID().toString()).start(); 69 | 70 | TimeUnit.SECONDS.sleep(5); 71 | } 72 | 73 | } -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/test/java/net/ameizi/zookeeper/leader/examples/LeaderLatchTest.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.zookeeper.leader.examples; 2 | 3 | import com.google.common.collect.Lists; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.CuratorFrameworkFactory; 6 | import org.apache.curator.framework.recipes.leader.LeaderLatch; 7 | import org.apache.curator.framework.recipes.queue.BlockingQueueConsumer; 8 | import org.apache.curator.framework.recipes.queue.DistributedDelayQueue; 9 | import org.apache.curator.framework.recipes.queue.QueueBuilder; 10 | import org.apache.curator.framework.recipes.queue.QueueSerializer; 11 | import org.apache.curator.framework.state.ConnectionState; 12 | import org.apache.curator.framework.state.ConnectionStateListener; 13 | import org.apache.curator.retry.ExponentialBackoffRetry; 14 | import org.apache.curator.test.TestingServer; 15 | import org.apache.curator.test.Timing; 16 | import org.apache.curator.utils.CloseableUtils; 17 | import org.junit.Assert; 18 | 19 | import java.util.List; 20 | import java.util.Random; 21 | import java.util.concurrent.TimeUnit; 22 | 23 | /** 24 | * LeaderLatch方式Zookeeper选主 25 | */ 26 | public class LeaderLatchTest { 27 | 28 | private static final int CLIENT_QTY = 10; 29 | private static final String PATH = "/leader"; 30 | 31 | List clients; 32 | List latches; 33 | TestingServer server; 34 | 35 | public static void main(String[] args) throws Exception{ 36 | LeaderLatchTest test = new LeaderLatchTest(); 37 | test.setup(); 38 | test.testSelect(); 39 | test.close(); 40 | } 41 | 42 | public void setup() throws Exception { 43 | clients = Lists.newArrayList(); 44 | latches = Lists.newArrayList(); 45 | server = new TestingServer(); 46 | // 模拟 10 个客户端 47 | for (int i = 0; i < CLIENT_QTY; i++) { 48 | CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new ExponentialBackoffRetry(1000, 3)); 49 | clients.add(client); 50 | LeaderLatch leaderLatch = new LeaderLatch(client, PATH, "Client #" + i); 51 | latches.add(leaderLatch); 52 | client.start(); 53 | // 启动该实例,并参与选举 54 | leaderLatch.start(); 55 | } 56 | } 57 | 58 | public void testSelect() throws Exception { 59 | // 延迟执行,确保zk已顺利选主 60 | TimeUnit.SECONDS.sleep(3); 61 | // 获取Leader 62 | LeaderLatch currentLeader = latches.stream().filter(LeaderLatch::hasLeadership).findFirst().get(); 63 | System.out.println("current leader is " + currentLeader.getId()); 64 | System.out.println("release the leader " + currentLeader.getId()); 65 | // 释放Leader 66 | currentLeader.close(); 67 | testSelect(); 68 | } 69 | 70 | public void close() { 71 | for (LeaderLatch leaderLatch : latches) { 72 | if (!LeaderLatch.State.CLOSED.equals(leaderLatch.getState())) { 73 | CloseableUtils.closeQuietly(leaderLatch); 74 | } 75 | } 76 | for (CuratorFramework client : clients) { 77 | CloseableUtils.closeQuietly(client); 78 | } 79 | CloseableUtils.closeQuietly(server); 80 | } 81 | 82 | 83 | public void queue() throws Exception { 84 | final int QTY = 10; 85 | Random random = new Random(); 86 | Timing timing = new Timing(); 87 | TestingServer server = new TestingServer(); 88 | CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new ExponentialBackoffRetry(1000, 3)); 89 | client.start(); 90 | BlockingQueueConsumer blockingQueueConsumer = new BlockingQueueConsumer(new ConnectionStateListener(){ 91 | @Override 92 | public void stateChanged(CuratorFramework client, ConnectionState newState) { 93 | } 94 | }); 95 | DistributedDelayQueue delayQueue = QueueBuilder.builder(client, blockingQueueConsumer,new QueueSerializer(){ 96 | @Override 97 | public byte[] serialize(Integer item) { 98 | return Integer.toString(item).getBytes(); 99 | } 100 | @Override 101 | public Integer deserialize(byte[] bytes) { 102 | return Integer.parseInt(new String(bytes)); 103 | } 104 | },"/delay_queue").buildDelayQueue(); 105 | delayQueue.start(); 106 | try{ 107 | for (int i = 0; i < QTY; i++) { 108 | long delay = System.currentTimeMillis() + random.nextInt(100); 109 | delayQueue.put(i,delay); 110 | } 111 | long lastValue = -1; 112 | for (int i = 0; i < QTY; i++) { 113 | Integer value = blockingQueueConsumer.take(timing.forWaiting().seconds(), TimeUnit.SECONDS); 114 | Assert.assertNotNull(value); 115 | Assert.assertTrue(value >= lastValue); 116 | lastValue = value; 117 | System.out.println(value); 118 | } 119 | 120 | }finally { 121 | CloseableUtils.closeQuietly(delayQueue); 122 | CloseableUtils.closeQuietly(client); 123 | CloseableUtils.closeQuietly(server); 124 | } 125 | } 126 | 127 | } 128 | -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/test/java/net/ameizi/zookeeper/leader/examples/LeaderSelectorListener.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.zookeeper.leader.examples; 2 | 3 | import org.apache.curator.framework.CuratorFramework; 4 | import org.apache.curator.framework.recipes.leader.LeaderSelector; 5 | import org.apache.curator.framework.recipes.leader.LeaderSelectorListenerAdapter; 6 | 7 | import java.io.Closeable; 8 | import java.util.concurrent.TimeUnit; 9 | import java.util.concurrent.atomic.AtomicInteger; 10 | 11 | /** 12 | * LeaderSelector方式Zookeeper选主 13 | */ 14 | public class LeaderSelectorListener extends LeaderSelectorListenerAdapter implements Closeable { 15 | 16 | private final String name; 17 | private final LeaderSelector leaderSelector; 18 | private final AtomicInteger leaderCount = new AtomicInteger(); 19 | 20 | public LeaderSelectorListener(CuratorFramework client, String path, String name) { 21 | this.name = name; 22 | this.leaderSelector = new LeaderSelector(client, path, this); 23 | leaderSelector.autoRequeue(); 24 | } 25 | 26 | public void start() { 27 | leaderSelector.start(); 28 | } 29 | 30 | @Override 31 | public void close() { 32 | leaderSelector.close(); 33 | } 34 | 35 | @Override 36 | public void takeLeadership(CuratorFramework client) throws Exception { 37 | final int waitSeconds = (int) (3 * Math.random()) + 1; 38 | System.out.println(name + " is now the leader. Waiting " + waitSeconds + " seconds..."); 39 | System.out.println(name + " has been leader " + leaderCount.getAndIncrement() + " time(s) before."); 40 | TimeUnit.SECONDS.sleep(waitSeconds); 41 | System.out.println(name + " relinquishing leadership.\n"); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /distributed-zookeeper-lock-example/src/test/java/net/ameizi/zookeeper/leader/examples/LeaderSelectorTest.java: -------------------------------------------------------------------------------- 1 | package net.ameizi.zookeeper.leader.examples; 2 | 3 | import com.google.common.collect.Lists; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.CuratorFrameworkFactory; 6 | import org.apache.curator.retry.ExponentialBackoffRetry; 7 | import org.apache.curator.test.TestingServer; 8 | import org.apache.curator.utils.CloseableUtils; 9 | 10 | import java.io.BufferedReader; 11 | import java.io.InputStreamReader; 12 | import java.util.List; 13 | 14 | /** 15 | * LeaderSelectorListener可以对领导权进行控制, 在适当的时候释放领导权,这样每个节点都有可能获得领导权.而LeaderLatch则不可以,必须手动调用 close 16 | */ 17 | public class LeaderSelectorTest { 18 | 19 | private static final int CLIENT_QTY = 10; 20 | private static final String PATH = "/leader"; 21 | 22 | public static void main(String[] args) throws Exception { 23 | List clients = Lists.newArrayList(); 24 | List listeners = Lists.newArrayList(); 25 | TestingServer server = new TestingServer(); 26 | try { 27 | for (int i = 0; i < CLIENT_QTY; ++i) { 28 | CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new ExponentialBackoffRetry(1000, 3)); 29 | clients.add(client); 30 | LeaderSelectorListener listener = new LeaderSelectorListener(client, PATH, "Client #" + i); 31 | listeners.add(listener); 32 | client.start(); 33 | listener.start(); 34 | } 35 | System.out.println("Press enter/return to quit\n"); 36 | new BufferedReader(new InputStreamReader(System.in)).readLine(); 37 | } finally { 38 | System.out.println("Shutting down..."); 39 | for (LeaderSelectorListener leaderSelectorListener : listeners) { 40 | CloseableUtils.closeQuietly(leaderSelectorListener); 41 | } 42 | for (CuratorFramework client : clients) { 43 | CloseableUtils.closeQuietly(client); 44 | } 45 | CloseableUtils.closeQuietly(server); 46 | } 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4.0.0 4 | 5 | org.springframework.boot 6 | spring-boot-starter-parent 7 | 2.2.7.RELEASE 8 | 9 | 10 | net.ameizi 11 | distributed-lock-examples 12 | 1.0 13 | pom 14 | distributed-lock-examples 15 | Demo project for Spring Boot 16 | 17 | distributed-redis-lock-example 18 | distributed-zookeeper-lock-example 19 | distributed-hazelcast-lock-example 20 | spring-integration-distributed-lock-examples 21 | 22 | 23 | 1.8 24 | 25 | 26 | 27 | 28 | com.github.ekryd.sortpom 29 | sortpom-maven-plugin 30 | 2.11.0 31 | 32 | 33 | verify 34 | 35 | sort 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /spring-integration-distributed-lock-examples/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4.0.0 4 | 5 | net.ameizi 6 | distributed-lock-examples 7 | 1.0 8 | 9 | spring-integration-distributed-lock-examples 10 | jar 11 | spring-integration-distributed-lock-examples 12 | Demo project for Spring Boot 13 | 14 | 1.8 15 | 16 | 17 | 18 | org.projectlombok 19 | lombok 20 | true 21 | 22 | 23 | 24 | it.ozimov 25 | embedded-redis 26 | 0.7.2 27 | 28 | 29 | 30 | org.apache.curator 31 | curator-recipes 32 | 4.2.0 33 | 34 | 35 | org.apache.curator 36 | curator-test 37 | 4.2.0 38 | 39 | 40 | org.springframework.boot 41 | spring-boot-starter-web 42 | 43 | 44 | org.springframework.boot 45 | spring-boot-starter-integration 46 | 47 | 48 | org.springframework.integration 49 | spring-integration-redis 50 | 51 | 52 | org.springframework.integration 53 | spring-integration-zookeeper 54 | 55 | 56 | org.springframework.integration 57 | spring-integration-jdbc 58 | 59 | 60 | org.springframework.boot 61 | spring-boot-starter-jdbc 62 | 63 | 64 | org.springframework.boot 65 | spring-boot-starter-data-redis 66 | 67 | 68 | com.h2database 69 | h2 70 | runtime 71 | 72 | 73 | 74 | 75 | 76 | org.apache.maven.plugins 77 | maven-compiler-plugin 78 | 79 | 1.8 80 | 1.8 81 | UTF-8 82 | 83 | 84 | 85 | org.springframework.boot 86 | spring-boot-maven-plugin 87 | 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /spring-integration-distributed-lock-examples/src/main/java/spring/integration/distributed/lock/examples/DistributedLockRegistryApplication.java: -------------------------------------------------------------------------------- 1 | package spring.integration.distributed.lock.examples; 2 | 3 | import lombok.extern.slf4j.Slf4j; 4 | import org.springframework.beans.factory.annotation.Autowired; 5 | import org.springframework.boot.SpringApplication; 6 | import org.springframework.boot.autoconfigure.SpringBootApplication; 7 | import org.springframework.integration.jdbc.lock.JdbcLockRegistry; 8 | import org.springframework.integration.redis.util.RedisLockRegistry; 9 | import org.springframework.integration.zookeeper.lock.ZookeeperLockRegistry; 10 | import org.springframework.web.bind.annotation.GetMapping; 11 | import org.springframework.web.bind.annotation.RestController; 12 | 13 | import java.util.concurrent.TimeUnit; 14 | import java.util.concurrent.locks.Lock; 15 | 16 | @Slf4j 17 | @RestController 18 | @SpringBootApplication 19 | public class DistributedLockRegistryApplication { 20 | 21 | @Autowired 22 | private RedisLockRegistry redisLockRegistry; 23 | 24 | @Autowired 25 | private ZookeeperLockRegistry zookeeperLockRegistry; 26 | 27 | @Autowired 28 | private JdbcLockRegistry jdbcLockRegistry; 29 | 30 | public static void main(String[] args) { 31 | SpringApplication.run(DistributedLockRegistryApplication.class, args); 32 | } 33 | 34 | @GetMapping("/lock") 35 | public void lock() throws InterruptedException { 36 | // Lock lock = redisLockRegistry.obtain("lock"); 37 | // Lock lock = zookeeperLockRegistry.obtain("lock"); 38 | Lock lock = jdbcLockRegistry.obtain("lock"); 39 | try{ 40 | boolean b1 = lock.tryLock(3, TimeUnit.SECONDS); 41 | log.info("b1 is : {}", b1); 42 | // TimeUnit.SECONDS.sleep(5); 43 | boolean b2 = lock.tryLock(3, TimeUnit.SECONDS); 44 | log.info("b2 is : {}", b2); 45 | }finally { 46 | // 切记解锁,获取了几次锁,就要释放几次 47 | lock.unlock(); 48 | lock.unlock(); 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /spring-integration-distributed-lock-examples/src/main/java/spring/integration/distributed/lock/examples/config/JdbcConfiguration.java: -------------------------------------------------------------------------------- 1 | package spring.integration.distributed.lock.examples.config; 2 | 3 | import org.springframework.context.annotation.Bean; 4 | import org.springframework.context.annotation.Configuration; 5 | import org.springframework.integration.jdbc.lock.DefaultLockRepository; 6 | import org.springframework.integration.jdbc.lock.JdbcLockRegistry; 7 | import org.springframework.integration.jdbc.lock.LockRepository; 8 | 9 | import javax.sql.DataSource; 10 | 11 | @Configuration 12 | public class JdbcConfiguration { 13 | 14 | @Bean 15 | public DefaultLockRepository lockRepository(DataSource dataSource){ 16 | return new DefaultLockRepository(dataSource); 17 | } 18 | 19 | @Bean 20 | public JdbcLockRegistry jdbcLockRegistry(LockRepository lockRepository){ 21 | return new JdbcLockRegistry(lockRepository); 22 | } 23 | 24 | } 25 | -------------------------------------------------------------------------------- /spring-integration-distributed-lock-examples/src/main/java/spring/integration/distributed/lock/examples/config/RedisLockConfiguration.java: -------------------------------------------------------------------------------- 1 | package spring.integration.distributed.lock.examples.config; 2 | 3 | import org.springframework.context.annotation.Bean; 4 | import org.springframework.context.annotation.Configuration; 5 | import org.springframework.data.redis.connection.RedisConnectionFactory; 6 | import org.springframework.data.redis.connection.lettuce.LettuceConnectionFactory; 7 | import org.springframework.integration.redis.util.RedisLockRegistry; 8 | import redis.embedded.RedisServer; 9 | 10 | import javax.annotation.PostConstruct; 11 | import javax.annotation.PreDestroy; 12 | 13 | /** 14 | * 嵌入式RedisServer 15 | */ 16 | @Configuration 17 | public class RedisLockConfiguration { 18 | 19 | private RedisServer redisServer = new RedisServer(6379); 20 | 21 | @PostConstruct 22 | public void postConstruct() { 23 | redisServer.start(); 24 | } 25 | 26 | @PreDestroy 27 | public void preDestroy() { 28 | redisServer.stop(); 29 | } 30 | 31 | @Bean 32 | public LettuceConnectionFactory redisConnectionFactory() { 33 | return new LettuceConnectionFactory("127.0.0.1",6379); 34 | } 35 | 36 | @Bean 37 | public RedisLockRegistry redisLockRegistry(RedisConnectionFactory redisConnectionFactory) { 38 | return new RedisLockRegistry(redisConnectionFactory, "redis-lock"); 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /spring-integration-distributed-lock-examples/src/main/java/spring/integration/distributed/lock/examples/config/ZookeeperLockConfiguration.java: -------------------------------------------------------------------------------- 1 | package spring.integration.distributed.lock.examples.config; 2 | 3 | import org.apache.curator.RetryPolicy; 4 | import org.apache.curator.framework.CuratorFramework; 5 | import org.apache.curator.framework.CuratorFrameworkFactory; 6 | import org.apache.curator.retry.ExponentialBackoffRetry; 7 | import org.apache.curator.test.TestingServer; 8 | import org.springframework.context.annotation.Bean; 9 | import org.springframework.context.annotation.Configuration; 10 | import org.springframework.integration.zookeeper.lock.ZookeeperLockRegistry; 11 | 12 | /** 13 | * 嵌入式Zookeeper 14 | */ 15 | @Configuration 16 | public class ZookeeperLockConfiguration { 17 | 18 | @Bean 19 | public CuratorFramework curatorFramework() { 20 | CuratorFramework curatorFramework = null; 21 | try { 22 | TestingServer testingServer = new TestingServer(); 23 | // 重试策略,重试时间1s,重试3次 24 | RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); 25 | // 通过工厂创建 Curator 26 | curatorFramework = CuratorFrameworkFactory.newClient(testingServer.getConnectString(), retryPolicy); 27 | curatorFramework.start(); 28 | }catch (Exception e){ 29 | e.printStackTrace(); 30 | } 31 | return curatorFramework; 32 | } 33 | 34 | @Bean 35 | public ZookeeperLockRegistry zookeeperLockRegistry(CuratorFramework curatorFramework) { 36 | return new ZookeeperLockRegistry(curatorFramework); 37 | } 38 | 39 | } 40 | -------------------------------------------------------------------------------- /spring-integration-distributed-lock-examples/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | spring.application.name=spring-integration-distributed-lock-examples 2 | # 应用服务web访问端口 3 | server.port=8086 4 | 5 | spring.datasource.url=jdbc:h2:file:./h2lock 6 | spring.datasource.driverClassName=org.h2.Driver 7 | spring.datasource.username=sa 8 | spring.datasource.password= 9 | 10 | spring.h2.console.enabled=true 11 | spring.h2.console.settings.trace=true 12 | spring.h2.console.settings.web-allow-others=true 13 | spring.h2.console.path=/h2-console --------------------------------------------------------------------------------