├── .dockerignore ├── .github └── workflows │ └── ci.yaml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── chaostest ├── USAGE.md ├── config.py ├── gen_hosts.py ├── monitor_all_redis.py ├── random_test.py ├── render_compose.py ├── requirements.txt ├── test_stack_mem_broker.yml.j2 └── utils.py ├── clienttest ├── golang │ ├── go.mod │ ├── go.sum │ └── pkg │ │ ├── goredis_test.go │ │ └── utils.go ├── java │ ├── .gitattributes │ ├── .gitignore │ ├── app │ │ ├── build.gradle.kts │ │ └── src │ │ │ ├── main │ │ │ └── java │ │ │ │ └── clienttest │ │ │ │ └── App.java │ │ │ └── test │ │ │ └── java │ │ │ └── clienttest │ │ │ ├── JedisTest.java │ │ │ ├── LettuceTest.java │ │ │ ├── RedissionTest.java │ │ │ └── Utils.java │ ├── gradle │ │ └── wrapper │ │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle.kts └── python │ ├── redis_py_cluster_test.py │ └── requirements.txt ├── conf ├── coordinator.toml ├── mem-broker.toml └── server-proxy.toml ├── docs ├── active_redirection.md ├── architecture.svg ├── best_practice.md ├── broker_external_storage.md ├── broker_http_api.md ├── chunk.md ├── chunk.svg ├── chunk_allocation.txt ├── command_table.json ├── command_table.md ├── development.md ├── docker_compose_example.md ├── generate_command_table.py ├── mem_broker_replica.md ├── memory_broker_api.md ├── meta_command.md ├── migration_benchmark.md ├── migration_local_test.md ├── performance.md ├── performance │ ├── max_latency_connection_number.svg │ ├── max_latency_pipeline_number.svg │ ├── throughput_connection_number.svg │ └── throughput_pipeline_number.svg ├── redis_cluster_protocol.md ├── redis_cluster_protocol.svg ├── redis_cluster_proxy.svg ├── set_up_manually.md ├── slots_migration.md ├── undermoon-logo-raw.svg ├── undermoon-logo.svg └── undermoon_server_proxy.svg ├── examples ├── Dockerfile-undermoon-release ├── Dockerfile-undermoon-test ├── broker_external_http_storage.py ├── docker-compose-mem-broker-example.yml ├── docker-compose-mem-broker.yml ├── mem-broker │ ├── coordinator1.toml │ ├── coordinator2.toml │ ├── init.sh │ ├── mem-broker.toml │ ├── server_proxy1.toml │ ├── server_proxy2.toml │ ├── server_proxy3.toml │ ├── server_proxy4.toml │ ├── server_proxy5.toml │ └── server_proxy6.toml ├── run_broker.sh ├── run_coordinator.sh └── run_proxy.sh ├── local_tests ├── 1 │ └── redis.conf ├── 2 │ └── redis.conf └── redis_cluster │ ├── 1 │ └── redis.conf │ └── 2 │ └── redis.conf ├── rust-toolchain ├── rustfmt.toml ├── scripts ├── dkclean.sh ├── dkkill.sh ├── dkrmi.sh ├── dksh.sh ├── init_single_server_proxy.sh ├── loop_migration_test.sh ├── mem_store_v1_to_v2.py ├── readme_test.sh ├── run_redis_cluster.sh └── run_two_shards.sh ├── src ├── bin │ ├── coordinator.rs │ ├── mem_broker.rs │ └── server_proxy.rs ├── broker │ ├── epoch.rs │ ├── external.rs │ ├── migrate.rs │ ├── mod.rs │ ├── ordered_proxy.rs │ ├── persistence.rs │ ├── query.rs │ ├── replication.rs │ ├── resource.rs │ ├── service.rs │ ├── storage.rs │ ├── store.rs │ ├── update.rs │ └── utils.rs ├── common │ ├── atomic_lock.rs │ ├── batch.rs │ ├── biatomic.rs │ ├── cluster.rs │ ├── config.rs │ ├── future_group.rs │ ├── mod.rs │ ├── proto.rs │ ├── resp_execution.rs │ ├── response.rs │ ├── slot_lock.rs │ ├── track.rs │ ├── try_chunks.rs │ ├── utils.rs │ ├── version.rs │ └── yield_now.rs ├── coordinator │ ├── api.rs │ ├── broker.rs │ ├── core.rs │ ├── detector.rs │ ├── http_mani_broker.rs │ ├── http_meta_broker.rs │ ├── migration.rs │ ├── mod.rs │ ├── recover.rs │ ├── service.rs │ └── sync.rs ├── lib.rs ├── migration │ ├── manager.rs │ ├── mod.rs │ ├── scan_migration.rs │ ├── scan_task.rs │ ├── stats.rs │ └── task.rs ├── protocol │ ├── client.rs │ ├── codec.rs │ ├── decoder.rs │ ├── encoder.rs │ ├── fp.rs │ ├── mod.rs │ ├── packet.rs │ ├── resp.rs │ └── stateless.rs ├── proxy │ ├── backend.rs │ ├── blocking.rs │ ├── cluster.rs │ ├── command.rs │ ├── compress.rs │ ├── executor.rs │ ├── manager.rs │ ├── migration_backend.rs │ ├── mod.rs │ ├── reply.rs │ ├── sender.rs │ ├── service.rs │ ├── session.rs │ ├── slot.rs │ ├── slowlog.rs │ └── table.rs └── replication │ ├── manager.rs │ ├── mod.rs │ ├── redis_replicator.rs │ └── replicator.rs └── tests ├── connection.rs ├── proxy_manager_test.rs └── redis_client.rs /.dockerignore: -------------------------------------------------------------------------------- 1 | target/ 2 | chaostest/__pycache__/ 3 | 4 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - master 5 | pull_request: {} 6 | name: Continuous Integration 7 | jobs: 8 | check: 9 | name: Check 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout sources 13 | uses: actions/checkout@v1 14 | - name: Install stable toolchain 15 | uses: actions-rs/toolchain@v1 16 | with: 17 | toolchain: stable 18 | override: true 19 | - name: Run cargo check 20 | uses: actions-rs/cargo@v1 21 | with: 22 | command: check 23 | test: 24 | name: Test Suite 25 | runs-on: ubuntu-latest 26 | steps: 27 | - name: Checkout sources 28 | uses: actions/checkout@v1 29 | - name: Install stable toolchain 30 | uses: actions-rs/toolchain@v1 31 | with: 32 | toolchain: stable 33 | override: true 34 | - name: Run cargo test 35 | uses: actions-rs/cargo@v1 36 | with: 37 | command: test 38 | fmt: 39 | name: Rustfmt 40 | runs-on: ubuntu-latest 41 | steps: 42 | - name: Checkout sources 43 | uses: actions/checkout@v1 44 | - name: Install stable toolchain 45 | uses: actions-rs/toolchain@v1 46 | with: 47 | toolchain: stable 48 | override: true 49 | components: rustfmt 50 | - name: Run cargo fmt 51 | uses: actions-rs/cargo@v1 52 | with: 53 | command: fmt 54 | args: --all -- --check 55 | clippy: 56 | name: Clippy 57 | runs-on: ubuntu-latest 58 | steps: 59 | - name: Checkout sources 60 | uses: actions/checkout@v1 61 | - name: Install stable toolchain 62 | uses: actions-rs/toolchain@v1 63 | with: 64 | toolchain: stable 65 | override: true 66 | components: clippy 67 | - name: Run cargo clippy 68 | uses: actions-rs/cargo@v1 69 | with: 70 | command: clippy 71 | clienttest: 72 | name: ClientTest 73 | runs-on: ubuntu-latest 74 | steps: 75 | - name: Checkout sources 76 | uses: actions/checkout@v1 77 | - name: Install stable toolchain 78 | uses: actions-rs/toolchain@v1 79 | with: 80 | toolchain: stable 81 | override: true 82 | - name: Build all binaries 83 | uses: actions-rs/cargo@v1 84 | with: 85 | command: build 86 | - name: Run server proxy in background 87 | run: target/debug/server_proxy conf/server-proxy.toml & 88 | env: 89 | RUST_LOG: undermoon=debug,server_proxy=debug 90 | UNDERMOON_ANNOUNCE_ADDRESS: 127.0.0.1:5299 91 | - name: Install redis and start redis server 92 | uses: shogo82148/actions-setup-redis@v1 93 | with: 94 | redis-version: '5.x' 95 | - name: Init server proxy 96 | run: redis-cli -h localhost -p 5299 UMCTL SETCLUSTER v2 2 NOFLAGS mydb 127.0.0.1:6379 1 0-16383 97 | - name: Test server proxy 98 | run: '[ "$(redis-cli -h localhost -p 5299 EXISTS somekey)" = "0" ]' 99 | # Run golang tests 100 | - name: Setup go 101 | uses: actions/setup-go@v1 102 | with: 103 | go-version: '1.15' 104 | - name: Run Golang client tests 105 | run: go test -v ./... 106 | working-directory: ./clienttest/golang 107 | env: 108 | CLIENT_TEST_NODE_HOST: 127.0.0.1 109 | CLIENT_TEST_NODE_PORT: 5299 110 | # Run java tests 111 | - name: Install Java 112 | uses: actions/setup-java@v1 113 | with: 114 | java-version: 11 115 | - name: Regenerate Gradle Wrapper 116 | run: gradle wrapper 117 | working-directory: ./clienttest/java 118 | - name: Run Java client tests 119 | run: ./gradlew clean test 120 | working-directory: ./clienttest/java 121 | env: 122 | CLIENT_TEST_NODE_HOST: 127.0.0.1 123 | CLIENT_TEST_NODE_PORT: 5299 124 | # Run python tests 125 | - name: Install Python 126 | uses: actions/setup-python@v2 127 | with: 128 | python-version: '3.8' 129 | - name: Install dependencies 130 | run: pip install -r requirements.txt 131 | working-directory: ./clienttest/python 132 | - name: Run Python client tests 133 | run: python redis_py_cluster_test.py 134 | working-directory: ./clienttest/python 135 | env: 136 | CLIENT_TEST_NODE_HOST: 127.0.0.1 137 | CLIENT_TEST_NODE_PORT: 5299 138 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | chaostest/__pycache__/ 4 | chaostest/chaos-docker-compose.yml 5 | *.pyc 6 | metadata 7 | .idea/ 8 | local_tests/**/*log 9 | local_tests/**/dump.rdb 10 | local_tests/**/nodes.conf 11 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "undermoon" 3 | version = "0.6.2" 4 | authors = ["doyoubi"] 5 | edition = "2018" 6 | 7 | [[bin]] 8 | name="server_proxy" 9 | path="src/bin/server_proxy.rs" 10 | 11 | [[bin]] 12 | name="coordinator" 13 | path="src/bin/coordinator.rs" 14 | 15 | [[bin]] 16 | name="mem_broker" 17 | path="src/bin/mem_broker.rs" 18 | 19 | [dependencies] 20 | bytes = "1" 21 | tokio = { version = "1", features = ["full"] } 22 | tokio-util = { version = "0.7", features = ["codec"] } 23 | tokio-stream = { version = "0.1", features = ["net"] } 24 | warp = { version = "0.3", features = ["compression"] } 25 | futures = "0.3" 26 | crc16 = "0.4" 27 | crc64 = "2" 28 | caseless = "0.2" 29 | arc-swap = "1" 30 | reqwest = { version = "0.11", features = ["json", "gzip"] } 31 | serde = "1" 32 | serde_derive = "1" 33 | serde_json = "1" 34 | log = "0.4" 35 | env_logger = "0.9" 36 | scopeguard = "1" 37 | itertools = "0.10" 38 | futures-batch = "0.6.0" 39 | config = { version = "0.13", features = ["toml"], default_features = false } 40 | btoi = "0.4" 41 | crossbeam = "0.8" 42 | crossbeam-channel = "0.5" 43 | chrono = "0.4" 44 | atoi = "1" 45 | zstd = "0.11" 46 | memchr = "2" 47 | pin-project = "1" 48 | string-error = "0.1.0" 49 | dashmap = "5" 50 | coarsetime = "0.1" 51 | arrayvec = "0.5" # Need this specific version to make sure the size_of:: is correct. 52 | either = "1" 53 | mockall = "0.11" 54 | backtrace = "0.3" 55 | jemallocator = "0.5" 56 | async-trait = "0.1" 57 | derivative = "2" 58 | flate2 = "1" 59 | base64 = "0.13" 60 | parking_lot = "0.12" 61 | lazy_static = "1" 62 | 63 | [profile.release] 64 | debug = true 65 | lto = true 66 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | cargo build 3 | 4 | test: 5 | RUST_BACKTRACE=full cargo test -- --nocapture 6 | 7 | install-linters: 8 | rustup update 9 | rustup component add clippy 10 | rustup component add rustfmt 11 | 12 | lint: 13 | cargo fmt --all 14 | cargo clippy 15 | 16 | release: 17 | cargo build --release 18 | 19 | server: 20 | RUST_LOG=undermoon=debug,server_proxy=debug target/debug/server_proxy conf/server-proxy.toml 21 | 22 | server-release: 23 | RUST_LOG=undermoon=info,server_proxy=info target/release/server_proxy conf/server-proxy.toml 24 | 25 | coord: 26 | RUST_LOG=undermoon=debug,coordinator=debug target/debug/coordinator conf/coordinator.toml 27 | 28 | broker: 29 | RUST_LOG=warp=debug,undermoon=debug,mem_broker=debug target/debug/mem_broker conf/mem-broker.toml 30 | 31 | broker1: 32 | RUST_LOG=warp=debug,undermoon=debug,mem_broker=debug UNDERMOON_REPLICA_ADDRESSES=127.0.0.1:8899 target/debug/mem_broker conf/mem-broker.toml 33 | 34 | broker2: 35 | RUST_LOG=warp=debug,undermoon=debug,mem_broker=debug UNDERMOON_ADDRESS=127.0.0.1:8899 UNDERMOON_META_FILENAME=metadata2 target/debug/mem_broker conf/mem-broker.toml 36 | 37 | flame: 38 | sudo flamegraph -o $(name).svg target/release/server_proxy conf/server-proxy.toml 39 | 40 | # Image for testing undermoon-operator 41 | docker-build-test-image: 42 | docker image build -f examples/Dockerfile-undermoon-test -t undermoon_test . 43 | 44 | docker-build-release: 45 | docker image build -f examples/Dockerfile-undermoon-release -t undermoon . 46 | 47 | docker-mem-broker: 48 | docker-compose -f examples/docker-compose-mem-broker.yml up 49 | 50 | docker-mem-broker-example: 51 | docker-compose -f examples/docker-compose-mem-broker-example.yml up 52 | 53 | start-func-test: 54 | python chaostest/render_compose.py -t mem_broker 55 | docker-compose -f chaostest/chaos-docker-compose.yml up 56 | 57 | start-func-test-active: 58 | python chaostest/render_compose.py -t mem_broker -a 59 | docker-compose -f chaostest/chaos-docker-compose.yml up 60 | 61 | start-chaos: 62 | python chaostest/render_compose.py -t mem_broker -f 63 | docker-compose -f chaostest/chaos-docker-compose.yml up 64 | 65 | start-chaos-active: 66 | python chaostest/render_compose.py -t mem_broker -f -a 67 | docker-compose -f chaostest/chaos-docker-compose.yml up 68 | 69 | chaos-test: 70 | python chaostest/random_test.py 71 | 72 | func-test: 73 | python chaostest/random_test.py exit-on-error 74 | 75 | .PHONY: build test lint release server coord test_broker flame docker-multi-redis docker-multi-shard docker-failover docker-mem-broker \ 76 | start-func-test start-chaos stop-chaos list-chaos-services chaos-test func-test 77 | 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![undermoon logo](docs/undermoon-logo.svg) 2 | 3 | # Undermoon ![Continuous Integration](https://github.com/doyoubi/undermoon/workflows/Continuous%20Integration/badge.svg?event=push) 4 | `Undermoon` is a self-managed Redis clustering system based on **Redis Cluster Protocol** supporting: 5 | 6 | - Horizontal scalability and high availability 7 | - Cluster management through HTTP API 8 | - Automatic failover for both master and replica 9 | - Fast scaling 10 | 11 | Any storage system implementing redis protocol could also somehow work with undermoon, 12 | such as [KeyDB](https://github.com/JohnSully/KeyDB). 13 | 14 | For more in-depth explanation of Redis Cluster Protocol and how Undermoon implement it, 15 | please refer to [Redis Cluster Protocol](./docs/redis_cluster_protocol.md). 16 | 17 | ## Architecture 18 | ![architecture](docs/architecture.svg) 19 | ##### Metadata Storage 20 | Metadata storage stores all the metadata of the whole `undermoon` cluster, 21 | including existing Redis instances, proxies, and exposed Redis clusters. 22 | Now it's an in-memory storage server called `Memory Broker`. 23 | When using [undermoon-operator](https://github.com/doyoubi/undermoon-operator), 24 | this `Memory Broker` will change to use `ConfigMap` to store the data. 25 | 26 | ##### Coordinator 27 | Coordinator will synchronize the metadata between broker and server proxy. 28 | It also actively checks the liveness of server proxy and initiates failover. 29 | 30 | ##### Storage Cluster 31 | The storage cluster consists of server proxies and Redis instances. 32 | It serves just like the official Redis Cluster to the applications. 33 | A Redis Cluster Proxy could be added between it and applications 34 | so that applications don't need to upgrade their Redis clients to smart clients. 35 | 36 | ###### Chunk 37 | Chunk is the smallest building block of every single exposed Redis Cluster. 38 | Each chunk consists of 4 Redis instances and 2 server proxies evenly distributed in two different physical machines. 39 | So the node number of each Redis cluster will be the multiples of 4 with half masters and half replicas. 40 | 41 | The design of chunk makes it very easy to build a cluster with a good topology for **workload balancing**. 42 | 43 | ## Getting Started 44 | ### Run Undermoon in Kubernetes 45 | Using [undermoon-operator](https://github.com/doyoubi/undermoon-operator) 46 | is the easiest way to create Redis clusters if you have Kubernetes. 47 | 48 | ``` 49 | helm install my-undermoon-operator undermoon-operator-.tgz 50 | 51 | helm install \ 52 | --set 'cluster.clusterName=my-cluster-name' \ 53 | --set 'cluster.chunkNumber=2' \ 54 | --set 'cluster.maxMemory=2048' \ 55 | --set 'cluster.port=5299' \ 56 | my-cluster \ 57 | -n my-namespace \ 58 | undermoon-cluster-.tgz 59 | ``` 60 | 61 | See the `README.md` of [undermoon-operator](https://github.com/doyoubi/undermoon-operator) 62 | for how to use it. 63 | 64 | ### Run Undermoon Using Docker Compose 65 | See [docker compose example](./docs/docker_compose_example.md). 66 | 67 | ### Setup Undermoon Manually 68 | Or you can set them up without docker following this docs: [setting up undermoon manually](docs/set_up_manually.md). 69 | 70 | ## Development 71 | `undermoon` tries to avoid `unsafe` and some calls that could crash like `unwrap`. 72 | 73 | Run the following commands before committing your codes: 74 | ``` 75 | $ make lint 76 | $ make test 77 | ``` 78 | 79 | See more in the [development guide](./docs/development.md). 80 | 81 | ## Documentation 82 | - [Redis Cluster Protocol and Server Proxy](./docs/redis_cluster_protocol.md) 83 | - [Chunk](./docs/chunk.md) 84 | - [Slot Migration](./docs/slots_migration.md) 85 | - [Memory Broker Replica](./docs/mem_broker_replica.md) 86 | - [Configure to support non-cluster-mode clients](./docs/active_redirection.md) 87 | - [Command Table](./docs/command_table.md) 88 | - [Performance](./docs/performance.md) 89 | - [Best Practice](./docs/best_practice.md) 90 | - [Broker External Storage](./docs/broker_external_storage.md) 91 | 92 | ## API 93 | - [Proxy UMCTL command](./docs/meta_command.md) 94 | - [HTTP Broker API](./docs/broker_http_api.md) 95 | - [Memory Broker API](./docs/memory_broker_api.md) 96 | -------------------------------------------------------------------------------- /chaostest/USAGE.md: -------------------------------------------------------------------------------- 1 | # Chaos Testing 2 | 3 | ## Set Up 4 | 5 | - Install Python 3 6 | - Install [Docker Desktop](https://www.docker.com/products/docker-desktop) 7 | 8 | #### (1) Install Python Dependencies 9 | ``` 10 | pip install -r chaostest/requirements.txt 11 | ``` 12 | 13 | #### (2) Generate docker-compose.yaml 14 | ``` 15 | python chaostest/render_compose.py -t mem_broker [enable_failure] 16 | ``` 17 | 18 | #### (3) Build Docker Images 19 | 20 | Build `undermoon`: 21 | ``` 22 | make docker-build-release-image 23 | // or 24 | make docker-build-test-image 25 | docker tag undermoon_test undermoon 26 | ``` 27 | 28 | Config the `/etc/hosts`. 29 | ``` 30 | python chaostest/gen_hosts.py >> /etc/hosts 31 | 32 | # You might need sudo 33 | sudo sh -c 'python chaostest/gen_hosts.py >> /etc/hosts' 34 | ``` 35 | 36 | ## Run Test 37 | 38 | Deploy our service `chaos`: 39 | ``` 40 | make start-func-test 41 | ``` 42 | 43 | Run the test script. It will randomly create cluster, remove cluster, and start migration. 44 | ``` 45 | make func-test 46 | ``` 47 | There will not be any fault injection. Any error log indicates there are bugs in the codes! 48 | Upon error it will stop immediately. 49 | 50 | ## Run Test with Fault Injection 51 | Or Deploy our service `chaos` with fault injection: 52 | ``` 53 | make start-chaos 54 | ``` 55 | In this case, any error should be able to recover. 56 | 57 | Run the command above, you can see some services got killed occasionally. 58 | 59 | Run the test script. It will randomly create cluster, remove cluster, and start migration. 60 | ``` 61 | make chaos-test 62 | ``` 63 | Upon error it will not stop. But the error should be able to recover soon. 64 | 65 | ## Debugging 66 | 67 | You can use `monitor_all_redis.py` to debug what commands are running on all the Redis. 68 | ``` 69 | python chaostest/monitor_all_redis.py 70 | ``` 71 | -------------------------------------------------------------------------------- /chaostest/config.py: -------------------------------------------------------------------------------- 1 | SERVER_PROXY_NUM = 12 2 | SERVER_PROXY_RANGE_START = 6000 3 | SERVER_PROXY_RANGE_END = SERVER_PROXY_RANGE_START + SERVER_PROXY_NUM 4 | 5 | REDIS_NUM = SERVER_PROXY_NUM * 2 6 | REDIS_PORT_RANGE_START = 7000 7 | REDIS_PORT_RANGE_END = REDIS_PORT_RANGE_START + REDIS_NUM 8 | 9 | COORDINATOR_RANGE_START = 8000 10 | COORDINATOR_NUM = 3 11 | 12 | 13 | DOCKER_COMPOSE_CONFIG = { 14 | 'redis_maxmemory': '100MB', 15 | 'server_proxy_num': SERVER_PROXY_NUM, 16 | 'coordinator_port_start': COORDINATOR_RANGE_START, 17 | 'redis_port_start': REDIS_PORT_RANGE_START, 18 | 'server_proxy_port_start': SERVER_PROXY_RANGE_START, 19 | 'redis_ports': list(range(REDIS_PORT_RANGE_START, REDIS_PORT_RANGE_END)), 20 | 'server_proxy_ports': list(range(SERVER_PROXY_RANGE_START, SERVER_PROXY_RANGE_END)), 21 | 'redis_addresses': ['server_proxy{}:{}'.format(i // 2, REDIS_PORT_RANGE_START + i) for i in range(REDIS_NUM)], 22 | 'server_proxy_addresses': ['server_proxy{}:{}'.format(i, SERVER_PROXY_RANGE_START + i) for i in range(SERVER_PROXY_NUM)], 23 | 'coordinator_num': COORDINATOR_NUM, 24 | 'broker_port': 7799, 25 | 'broker_address': 'broker:7799', 26 | 'etcd_port': 2379, 27 | 'active_redirection': False, 28 | 'pumba_commands': { 29 | 'kill': "--random --interval 60s kill 're2:(server_proxy|coordinator).*'", 30 | 'delay': "--random --interval 20s netem --duration 5s delay 're2:(server_proxy|coordinator).*'", 31 | 'loss': "--random --interval 20s netem --duration 5s loss 're2:(server_proxy|coordinator).*'", 32 | 'rate': "--random --interval 20s netem --duration 5s rate 're2:(server_proxy|coordinator).*'", 33 | 'duplicate': "--random --interval 20s netem --duration 5s duplicate 're2:(server_proxy|coordinator).*'", 34 | 'corrupt': "--random --interval 20s netem --duration 5s corrupt 're2:(server_proxy|coordinator).*'", 35 | }, 36 | } 37 | -------------------------------------------------------------------------------- /chaostest/gen_hosts.py: -------------------------------------------------------------------------------- 1 | import config 2 | 3 | def print_hosts(): 4 | server_proxy_num = config.DOCKER_COMPOSE_CONFIG['server_proxy_num'] 5 | redis_addresses = ['redis{}'.format(p) for p in range(server_proxy_num * 2)] 6 | server_proxy_addresses = ['server_proxy{}'.format(p) for p in range(server_proxy_num)] 7 | 8 | for addr in redis_addresses + server_proxy_addresses: 9 | print("127.0.0.1 {}".format(addr)) 10 | 11 | if __name__ == '__main__': 12 | print("# Put this in your /etc/hosts") 13 | print_hosts() 14 | -------------------------------------------------------------------------------- /chaostest/monitor_all_redis.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import threading 3 | 4 | import redis 5 | 6 | import config 7 | 8 | class Monitor(): 9 | def __init__(self, connection_pool): 10 | self.connection_pool = connection_pool 11 | self.connection = None 12 | 13 | def __del__(self): 14 | try: 15 | self.reset() 16 | except: 17 | pass 18 | 19 | def reset(self): 20 | if self.connection: 21 | self.connection_pool.release(self.connection) 22 | self.connection = None 23 | 24 | def monitor(self): 25 | if self.connection is None: 26 | self.connection = self.connection_pool.get_connection( 27 | 'monitor', None) 28 | self.connection.send_command("monitor") 29 | return self.listen() 30 | 31 | def parse_response(self): 32 | return self.connection.read_response() 33 | 34 | def listen(self): 35 | while True: 36 | yield self.parse_response() 37 | 38 | 39 | def run_monitor(address): 40 | host, port = address.split(':') 41 | pool = redis.ConnectionPool(host=host, port=port) 42 | monitor = Monitor(pool) 43 | commands = monitor.monitor() 44 | for c in commands: 45 | print(address, datetime.datetime.now(), c) 46 | 47 | # Need to put this in your /etc/hosts 48 | # 127.0.0.1 redis6000 49 | # 127.0.0.1 redis6001 50 | # ... 51 | 52 | if __name__ == '__main__': 53 | redis_addresses = config.DOCKER_COMPOSE_CONFIG['redis_addresses'] 54 | for addr in redis_addresses: 55 | # Python variable is a name. Need to use the whole array instead of a shared variable. 56 | threading.Thread(target=lambda: run_monitor(addr)).start() 57 | -------------------------------------------------------------------------------- /chaostest/render_compose.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | 4 | from jinja2 import Environment, FileSystemLoader 5 | 6 | import config 7 | 8 | 9 | def render_docker_compose(docker_compose_yml): 10 | env = Environment(loader=FileSystemLoader('./')) 11 | template = env.get_template('chaostest/test_stack_mem_broker.yml.j2') 12 | output = template.render(config.DOCKER_COMPOSE_CONFIG) 13 | with open(docker_compose_yml, 'w') as f: 14 | f.write(output) 15 | 16 | 17 | if __name__ == '__main__': 18 | # Usage: 19 | # python chaostest/render_compose.py -t mem_broker [-f] [-a] 20 | parser = argparse.ArgumentParser(description='Render docker-compose file for chaos testing') 21 | 22 | parser.add_argument('-t', action='store', dest='test_type',default='mem_broker') 23 | parser.add_argument('-f', action='store_true', dest="enable_failure_injection", default=False) 24 | parser.add_argument('-a', action="store_true", dest="active_redirection", default=False) 25 | 26 | results = parser.parse_args() 27 | 28 | is_mem_broker = results.test_type == 'mem_broker' 29 | enable_failure = results.enable_failure_injection 30 | active_redirection = results.active_redirection 31 | 32 | if not enable_failure: 33 | print("Disable fault injection") 34 | config.DOCKER_COMPOSE_CONFIG['pumba_commands'] = {} 35 | else: 36 | print("Enable fault injection") 37 | 38 | if active_redirection: 39 | config.DOCKER_COMPOSE_CONFIG['active_redirection'] = True 40 | print("Enable active redirection") 41 | else: 42 | config.DOCKER_COMPOSE_CONFIG['active_redirection'] = False 43 | print("Disable active redirection") 44 | 45 | render_docker_compose('chaostest/chaos-docker-compose.yml') 46 | -------------------------------------------------------------------------------- /chaostest/requirements.txt: -------------------------------------------------------------------------------- 1 | Jinja2==2.11.3 2 | loguru==0.4.1 3 | redis==2.10.6 4 | requests==2.22.0 5 | requests-unixsocket==0.2.0 6 | -------------------------------------------------------------------------------- /chaostest/test_stack_mem_broker.yml.j2: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | broker: 4 | image: "undermoon" 5 | deploy: 6 | restart_policy: 7 | condition: any 8 | delay: 30s 9 | command: mem_broker 10 | ports: 11 | - "{{ broker_port }}:{{ broker_port }}" 12 | environment: 13 | - RUST_LOG=undermoon=debug,mem_broker=debug 14 | - RUST_BACKTRACE=full 15 | - UNDERMOON_ADDRESS=0.0.0.0:{{ broker_port }} 16 | - UNDERMOON_FAILURE_QUORUM=2 17 | - UNDERMOON_MIGRATION_LIMIT=2 18 | - UNDERMOON_STORAGE_TYPE=memory 19 | - UNDERMOON_DEBUG=true 20 | 21 | {% filter indent(width=2) %} 22 | {% for coordinator_id in range(coordinator_num) %} 23 | {% set coordinator_port = coordinator_port_start + coordinator_id -%} 24 | coordinator{{ coordinator_id }}: 25 | image: "undermoon" 26 | deploy: 27 | restart_policy: 28 | condition: any 29 | delay: 30s 30 | command: coordinator 31 | environment: 32 | - RUST_LOG=undermoon=debug,coordinator=debug 33 | - RUST_BACKTRACE=full 34 | - UNDERMOON_ADDRESS=coordinator{{ coordinator_id }}:{{ coordinator_port }} 35 | - UNDERMOON_BROKER_ADDRESS={{ broker_address }} 36 | - UNDERMOON_REPORTER_ID=coordinator{{ coordinator_id }} 37 | ports: 38 | - "{{ coordinator_port }}:{{ coordinator_port }}" 39 | {% endfor %} 40 | {% endfilter %} 41 | 42 | {% filter indent(width=2) %} 43 | {% for redis_id in range(server_proxy_num * 2) %} 44 | {% set redis_port = redis_port_start + redis_id -%} 45 | {% set server_proxy_id = redis_id // 2 -%} 46 | redis{{ redis_id }}: 47 | image: "redis" 48 | deploy: 49 | restart_policy: 50 | condition: any 51 | delay: 30s 52 | command: redis-server --port {{ redis_port }} --slave-announce-ip server_proxy{{ server_proxy_id }} --slave-announce-port {{ redis_port }} --maxmemory {{ redis_maxmemory }} 53 | network_mode: service:server_proxy{{ server_proxy_id }} 54 | {% endfor %} 55 | {% endfilter %} 56 | 57 | {% filter indent(width=2) %} 58 | {% for proxy_id in range(server_proxy_num) %} 59 | {% set proxy_port = server_proxy_port_start + proxy_id -%} 60 | {% set redis_port1 = redis_port_start + 2 * proxy_id -%} 61 | {% set redis_port2 = redis_port_start + 2 * proxy_id + 1 -%} 62 | server_proxy{{ proxy_id }}: 63 | image: "undermoon" 64 | deploy: 65 | restart_policy: 66 | condition: any 67 | delay: 30s 68 | command: server_proxy 69 | ports: 70 | - "{{ proxy_port }}:{{ proxy_port }}" 71 | - "{{ redis_port1 }}:{{ redis_port1 }}" 72 | - "{{ redis_port2 }}:{{ redis_port2 }}" 73 | environment: 74 | - RUST_LOG=undermoon=debug,server_proxy=debug 75 | - RUST_BACKTRACE=full 76 | - UNDERMOON_ADDRESS=0.0.0.0:{{ proxy_port }} 77 | - UNDERMOON_ANNOUNCE_ADDRESS=server_proxy{{ proxy_id }}:{{ proxy_port }} 78 | - UNDERMOON_SLOWLOG_LEN=1024 79 | - UNDERMOON_SLOWLOG_LOG_SLOWER_THAN=50000 80 | - UNDERMOON_THREAD_NUMBER=1 81 | - UNDERMOON_BACKEND_CONN_NUM=4 82 | - UNDERMOON_ACTIVE_REDIRECTION={{ active_redirection }} 83 | {% endfor %} 84 | {% endfilter %} 85 | 86 | {% filter indent(width=2) %} 87 | {% for name, cmd in pumba_commands.items() %} 88 | pumba_{{ name }}: 89 | image: "gaiaadm/pumba" 90 | volumes: 91 | - /var/run/docker.sock:/var/run/docker.sock 92 | command: {{ cmd }} 93 | {% endfor %} 94 | {% endfilter %} 95 | -------------------------------------------------------------------------------- /clienttest/golang/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/doyoubi/undermoon/clienttest/golang 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/go-redis/redis/v8 v8.4.11 7 | github.com/stretchr/testify v1.7.0 8 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f 9 | ) 10 | -------------------------------------------------------------------------------- /clienttest/golang/pkg/goredis_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/go-redis/redis/v8" 11 | "github.com/stretchr/testify/assert" 12 | "golang.org/x/sync/errgroup" 13 | ) 14 | 15 | var clusterClient *redis.ClusterClient 16 | var ctx context.Context = context.Background() 17 | 18 | const expireTime = time.Minute 19 | 20 | func genKey(testcase, key string) string { 21 | return fmt.Sprintf("goredis:%d:%s:%s", time.Now().UnixNano(), testcase, key) 22 | } 23 | 24 | func TestMain(m *testing.M) { 25 | host, port := getNodeAddress() 26 | clusterClient = redis.NewClusterClient(&redis.ClusterOptions{ 27 | Addrs: []string{fmt.Sprintf("%s:%s", host, port)}, 28 | }) 29 | os.Exit(m.Run()) 30 | } 31 | 32 | func TestSingleKeyCommand(t *testing.T) { 33 | assert := assert.New(t) 34 | 35 | key := genKey("singlekey", "key") 36 | const value = "singlevalue" 37 | 38 | _, err := clusterClient.Set(ctx, key, value, expireTime).Result() 39 | assert.NoError(err) 40 | 41 | v, err := clusterClient.Get(ctx, key).Result() 42 | assert.NoError(err) 43 | assert.Equal(value, v) 44 | } 45 | 46 | func TestMultiKeyCommand(t *testing.T) { 47 | assert := assert.New(t) 48 | 49 | key1 := genKey("multikey", "key1:{hashtag}") 50 | key2 := genKey("multikey", "key2:{hashtag}") 51 | const value1 = "value1" 52 | const value2 = "value2" 53 | 54 | _, err := clusterClient.MSet(ctx, key1, value1, key2, value2).Result() 55 | assert.NoError(err) 56 | 57 | values, err := clusterClient.MGet(ctx, key1, key2).Result() 58 | assert.NoError(err) 59 | assert.Equal(value1, values[0]) 60 | assert.Equal(value2, values[1]) 61 | 62 | count, err := clusterClient.Del(ctx, key1, key2).Result() 63 | assert.NoError(err) 64 | assert.Equal(int64(2), count) 65 | 66 | count, err = clusterClient.Exists(ctx, key1, key2).Result() 67 | assert.NoError(err) 68 | assert.Equal(int64(0), count) 69 | } 70 | 71 | func TestBrpoplpush(t *testing.T) { 72 | assert := assert.New(t) 73 | 74 | key1 := genKey("blocking", "brpoplpush_key1:{hashtag}") 75 | key2 := genKey("blocking", "brpoplpush_key2:{hashtag}") 76 | const value = "listvalue" 77 | 78 | group, ctx := errgroup.WithContext(ctx) 79 | group.Go(func() error { 80 | res, err := clusterClient.BRPopLPush(ctx, key1, key2, time.Minute).Result() 81 | assert.NoError(err) 82 | assert.Equal(value, res) 83 | return nil 84 | }) 85 | 86 | time.Sleep(time.Second) 87 | l, err := clusterClient.RPush(ctx, key1, value).Result() 88 | assert.NoError(err) 89 | assert.Equal(int64(1), l) 90 | 91 | group.Wait() 92 | } 93 | 94 | func TestBlpop(t *testing.T) { 95 | testListBlockingCommandHelper(t, func(ctx context.Context, key1, key2 string) ([]string, error) { 96 | return clusterClient.BLPop(ctx, time.Minute, key1, key2).Result() 97 | }) 98 | } 99 | 100 | func TestBrpop(t *testing.T) { 101 | testListBlockingCommandHelper(t, func(ctx context.Context, key1, key2 string) ([]string, error) { 102 | return clusterClient.BRPop(ctx, time.Minute, key1, key2).Result() 103 | }) 104 | } 105 | 106 | type ListCmdFunc func(context.Context, string, string) ([]string, error) 107 | 108 | func testListBlockingCommandHelper(t *testing.T, cmdFunc ListCmdFunc) { 109 | assert := assert.New(t) 110 | 111 | key1 := genKey("blocking", "listkey1:{hashtag}") 112 | key2 := genKey("blocking", "listkey2:{hashtag}") 113 | const value = "listvalue" 114 | 115 | group, ctx := errgroup.WithContext(ctx) 116 | group.Go(func() error { 117 | res, err := cmdFunc(ctx, key1, key2) 118 | assert.NoError(err) 119 | assert.Equal(2, len(res)) 120 | assert.Equal(key2, res[0]) 121 | assert.Equal(value, res[1]) 122 | return nil 123 | }) 124 | 125 | time.Sleep(time.Second) 126 | l, err := clusterClient.RPush(ctx, key2, value).Result() 127 | assert.NoError(err) 128 | assert.Equal(int64(1), l) 129 | 130 | group.Wait() 131 | } 132 | 133 | func TestBzpopmin(t *testing.T) { 134 | testZsetBlockingCommandHelper(t, func(ctx context.Context, key1, key2 string) (*redis.ZWithKey, error) { 135 | return clusterClient.BZPopMin(ctx, time.Minute, key1, key2).Result() 136 | }) 137 | } 138 | 139 | func TestBzpipmax(t *testing.T) { 140 | testZsetBlockingCommandHelper(t, func(ctx context.Context, key1, key2 string) (*redis.ZWithKey, error) { 141 | return clusterClient.BZPopMax(ctx, time.Minute, key1, key2).Result() 142 | }) 143 | } 144 | 145 | type ZsetCmdFunc func(context.Context, string, string) (*redis.ZWithKey, error) 146 | 147 | func testZsetBlockingCommandHelper(t *testing.T, cmdFunc ZsetCmdFunc) { 148 | assert := assert.New(t) 149 | 150 | key1 := genKey("blocking", "zsetkey1:{hashtag}") 151 | key2 := genKey("blocking", "zsetkey2:{hashtag}") 152 | const member = "zsetmember" 153 | const score = 0.0 154 | 155 | group, ctx := errgroup.WithContext(ctx) 156 | group.Go(func() error { 157 | res, err := cmdFunc(ctx, key1, key2) 158 | assert.NoError(err) 159 | assert.NotNil(res) 160 | assert.Equal(key2, res.Key) 161 | assert.Equal(member, res.Member) 162 | assert.Equal(score, res.Score) 163 | return nil 164 | }) 165 | 166 | time.Sleep(time.Second) 167 | l, err := clusterClient.ZAdd(ctx, key2, &redis.Z{ 168 | Score: score, 169 | Member: member, 170 | }).Result() 171 | assert.NoError(err) 172 | assert.Equal(int64(1), l) 173 | 174 | group.Wait() 175 | } 176 | 177 | func TestSingleKeyEval(t *testing.T) { 178 | assert := assert.New(t) 179 | 180 | key := genKey("singlekey_eval", "key") 181 | const arg = "arg" 182 | 183 | const script = "return {KEYS[1],ARGV[1]}" 184 | vals, err := clusterClient.Eval(ctx, script, []string{key}, arg).Result() 185 | assert.NoError(err) 186 | 187 | strs := vals.([]interface{}) 188 | assert.Equal(2, len(strs)) 189 | assert.Equal(key, strs[0].(string)) 190 | assert.Equal(arg, strs[1].(string)) 191 | } 192 | 193 | func TestMultiKeyEval(t *testing.T) { 194 | assert := assert.New(t) 195 | 196 | key1 := genKey("multikey-eval", "key1:{hashtag}") 197 | key2 := genKey("multikey-eval", "key2:{hashtag}") 198 | const arg1 = "arg1" 199 | const arg2 = "arg2" 200 | 201 | const script = "return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}" 202 | vals, err := clusterClient.Eval(ctx, script, []string{key1, key2}, arg1, arg2).Result() 203 | assert.NoError(err) 204 | 205 | strs := vals.([]interface{}) 206 | assert.Equal(4, len(strs)) 207 | assert.Equal(key1, strs[0].(string)) 208 | assert.Equal(key2, strs[1].(string)) 209 | assert.Equal(arg1, strs[2].(string)) 210 | assert.Equal(arg2, strs[3].(string)) 211 | } 212 | -------------------------------------------------------------------------------- /clienttest/golang/pkg/utils.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "os" 5 | ) 6 | 7 | const nodeHostEnvName = "CLIENT_TEST_NODE_HOST" 8 | const nodePortEnvName = "CLIENT_TEST_NODE_PORT" 9 | 10 | func getNodeAddress() (string, string) { 11 | host := os.Getenv(nodeHostEnvName) 12 | if host == "" { 13 | host = "localhost" 14 | } 15 | 16 | port := os.Getenv(nodePortEnvName) 17 | if port == "" { 18 | return host, "5299" 19 | } 20 | return host, port 21 | } 22 | -------------------------------------------------------------------------------- /clienttest/java/.gitattributes: -------------------------------------------------------------------------------- 1 | # 2 | # https://help.github.com/articles/dealing-with-line-endings/ 3 | # 4 | # These are explicitly windows files and should use crlf 5 | *.bat text eol=crlf 6 | 7 | -------------------------------------------------------------------------------- /clienttest/java/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore Gradle project-specific cache directory 2 | .gradle 3 | # Do not put gradle wrapper in git 4 | gradle/wrapper/gradle-wrapper.jar 5 | 6 | # Ignore Gradle build output directory 7 | build 8 | 9 | # IDE 10 | .idea/ 11 | 12 | -------------------------------------------------------------------------------- /clienttest/java/app/build.gradle.kts: -------------------------------------------------------------------------------- 1 | /* 2 | * This file was generated by the Gradle 'init' task. 3 | * 4 | * This generated file contains a sample Java application project to get you started. 5 | * For more details take a look at the 'Building Java & JVM projects' chapter in the Gradle 6 | * User Manual available at https://docs.gradle.org/6.7.1/userguide/building_java_projects.html 7 | */ 8 | 9 | plugins { 10 | // Apply the application plugin to add support for building a CLI application in Java. 11 | application 12 | 13 | id("com.adarshr.test-logger") version "2.1.1" 14 | } 15 | 16 | repositories { 17 | // Use JCenter for resolving dependencies. 18 | jcenter() 19 | } 20 | 21 | dependencies { 22 | // Use TestNG framework, also requires calling test.useTestNG() below 23 | testImplementation("org.testng:testng:7.2.0") 24 | testImplementation("redis.clients:jedis:3.5.1") 25 | testImplementation("io.lettuce:lettuce-core:6.0.3.RELEASE") 26 | testImplementation("org.redisson:redisson:3.15.1") 27 | testImplementation("org.slf4j:slf4j-nop:1.7.25") 28 | 29 | // This dependency is used by the application. 30 | implementation("com.google.guava:guava:29.0-jre") 31 | } 32 | 33 | application { 34 | // Define the main class for the application. 35 | mainClass.set("clienttest.App") 36 | } 37 | 38 | tasks.test { 39 | // Use TestNG for unit tests. 40 | useTestNG() 41 | } 42 | 43 | java { 44 | toolchain { 45 | languageVersion.set(JavaLanguageVersion.of(11)) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /clienttest/java/app/src/main/java/clienttest/App.java: -------------------------------------------------------------------------------- 1 | /* 2 | * This Java source file was generated by the Gradle 'init' task. 3 | */ 4 | package clienttest; 5 | 6 | public class App { 7 | public String getGreeting() { 8 | return "This project is only used for running unit tests!"; 9 | } 10 | 11 | public static void main(String[] args) { 12 | System.out.println(new App().getGreeting()); 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /clienttest/java/app/src/test/java/clienttest/JedisTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * This Java source file was generated by the Gradle 'init' task. 3 | */ 4 | package clienttest; 5 | 6 | import org.testng.annotations.BeforeClass; 7 | import org.testng.annotations.Test; 8 | import redis.clients.jedis.HostAndPort; 9 | import redis.clients.jedis.JedisCluster; 10 | 11 | import java.util.List; 12 | import java.util.function.BiFunction; 13 | 14 | import static org.testng.Assert.assertEquals; 15 | 16 | public class JedisTest { 17 | JedisCluster jc; 18 | 19 | @BeforeClass 20 | public void setUp() { 21 | var node = new HostAndPort(Utils.getNodeHost(), Utils.getNodePort()); 22 | this.jc = new JedisCluster(node); 23 | } 24 | 25 | String genKey(String testCase, String key) { 26 | final var now = new java.util.Date(); 27 | return String.format("jedis:%d:%s:%s", now.toInstant().toEpochMilli(), testCase, key); 28 | } 29 | 30 | @Test 31 | public void singleKeyCommand() { 32 | final var key = this.genKey("singleKey", "somekey"); 33 | final var value = "value"; 34 | this.jc.setex(key, 60, value); 35 | final var v = this.jc.get(key); 36 | assertEquals(value, v); 37 | } 38 | 39 | @Test 40 | public void multiKeyCommand() { 41 | // for hashtag, refers to https://redis.io/topics/cluster-spec 42 | final var key1 = this.genKey("multikey", "key1:{hashtag}"); 43 | final var key2 = this.genKey("multikey", "key2:{hashtag}"); 44 | final var value1 = "value1"; 45 | final var value2 = "value2"; 46 | 47 | this.jc.mset(key1, value1, key2, value2); 48 | final var values = this.jc.mget(key1, key2); 49 | assertEquals(values.size(), 2); 50 | assertEquals(values.get(0), value1); 51 | assertEquals(values.get(1), value2); 52 | } 53 | 54 | @Test 55 | public void multiKeyNXCommand() { 56 | // for hashtag, refers to https://redis.io/topics/cluster-spec 57 | final var key1 = this.genKey("multikey_nx", "key1:{hashtag}"); 58 | final var key2 = this.genKey("multikey_nx", "key2:{hashtag}"); 59 | final var key3 = this.genKey("multikey_nx", "key3:{hashtag}"); 60 | final var value1 = "value1"; 61 | final var value2 = "value2"; 62 | final var value3 = "value3"; 63 | 64 | this.jc.mset(key1, value1, key2, value2); 65 | final var values = this.jc.mget(key1, key2); 66 | assertEquals(values.size(), 2); 67 | assertEquals(values.get(0), value1); 68 | assertEquals(values.get(1), value2); 69 | 70 | long nSet = this.jc.msetnx(key3, value3, key1, value1); 71 | assertEquals(nSet, 0L); 72 | } 73 | 74 | @Test 75 | public void brpoplpush() throws InterruptedException { 76 | final var key1 = this.genKey("blocking", "brpoplpush_key1:{hashtag}"); 77 | final var key2 = this.genKey("blocking", "brpoplpush_key2:{hashtag}"); 78 | final var value = "listvalue"; 79 | 80 | Runnable blockingCmd = () -> { 81 | final var res = this.jc.brpoplpush(key1, key2, 60); 82 | assertEquals(res, value); 83 | }; 84 | var thread = new Thread(blockingCmd); 85 | thread.start(); 86 | 87 | Thread.sleep(1000); 88 | var listLen = this.jc.rpush(key1, value); 89 | assertEquals(listLen, Long.valueOf(1)); 90 | 91 | thread.join(); 92 | } 93 | 94 | @Test 95 | void blpop() throws InterruptedException { 96 | listBlockingCommandHelper((key1, key2) -> this.jc.blpop(60, key1, key2)); 97 | } 98 | 99 | @Test 100 | void brpop() throws InterruptedException { 101 | listBlockingCommandHelper((key1, key2) -> this.jc.brpop(60, key1, key2)); 102 | } 103 | 104 | void listBlockingCommandHelper(BiFunction> cmdFunc) throws InterruptedException { 105 | final var key1 = this.genKey("blocking", "key1:{hashtag}"); 106 | final var key2 = this.genKey("blocking", "key2:{hashtag}"); 107 | final var value = "listvalue"; 108 | 109 | Runnable blockingCmd = () -> { 110 | final var res = cmdFunc.apply(key1, key2); 111 | assertEquals(res.size(), 2); 112 | assertEquals(res.get(0), key2); 113 | assertEquals(res.get(1), value); 114 | }; 115 | var thread = new Thread(blockingCmd); 116 | thread.start(); 117 | 118 | Thread.sleep(1000); 119 | var listLen = this.jc.rpush(key2, value); 120 | assertEquals(listLen, Long.valueOf(1)); 121 | 122 | thread.join(); 123 | } 124 | 125 | // Jedis still does not support BZPOPMIN, BZPOPMAX 126 | // https://github.com/redis/jedis/issues/2177 127 | } 128 | -------------------------------------------------------------------------------- /clienttest/java/app/src/test/java/clienttest/LettuceTest.java: -------------------------------------------------------------------------------- 1 | package clienttest; 2 | 3 | import io.lettuce.core.KeyValue; 4 | import io.lettuce.core.cluster.RedisClusterClient; 5 | import org.testng.annotations.BeforeClass; 6 | import org.testng.annotations.Test; 7 | 8 | import java.util.HashMap; 9 | import java.util.function.BiFunction; 10 | 11 | import static org.testng.Assert.assertEquals; 12 | 13 | public class LettuceTest { 14 | RedisClusterClient lc; 15 | 16 | @BeforeClass 17 | public void setUp() { 18 | var address = String.format("redis://%s:%d", Utils.getNodeHost(), Utils.getNodePort()); 19 | this.lc = RedisClusterClient.create(address); 20 | } 21 | 22 | String genKey(String testCase, String key) { 23 | final var now = new java.util.Date(); 24 | return String.format("lettuce:%d:%s:%s", now.toInstant().toEpochMilli(), testCase, key); 25 | } 26 | 27 | @Test 28 | public void singleKeyCommand() { 29 | final var key = this.genKey("singleKey", "somekey"); 30 | final var value = "value"; 31 | final var conn = this.lc.connect(); 32 | final var syncCmd = conn.sync(); 33 | syncCmd.setex(key, 60, value); 34 | final var v = syncCmd.get(key); 35 | assertEquals(value, v); 36 | } 37 | 38 | @Test 39 | public void multiKeyCommand() { 40 | // for hashtag, refers to https://redis.io/topics/cluster-spec 41 | final var key1 = this.genKey("multikey", "key1:{hashtag}"); 42 | final var key2 = this.genKey("multikey", "key2:{hashtag}"); 43 | final var value1 = "value1"; 44 | final var value2 = "value2"; 45 | 46 | final var conn = this.lc.connect(); 47 | final var syncCmd = conn.sync(); 48 | 49 | var map = new HashMap(); 50 | map.put(key1, value1); 51 | map.put(key2, value2); 52 | syncCmd.mset(map); 53 | final var values = syncCmd.mget(key1, key2); 54 | assertEquals(values.size(), 2); 55 | assertEquals(values.get(0).getValue(), value1); 56 | assertEquals(values.get(1).getValue(), value2); 57 | } 58 | 59 | @Test 60 | public void brpoplpush() throws InterruptedException { 61 | final var key1 = this.genKey("blocking", "brpoplpush_key1:{hashtag}"); 62 | final var key2 = this.genKey("blocking", "brpoplpush_key2:{hashtag}"); 63 | final var value = "listvalue"; 64 | 65 | Runnable blockingCmd = () -> { 66 | final var res = this.lc.connect().sync().brpoplpush(60, key1, key2); 67 | assertEquals(res, value); 68 | }; 69 | var thread = new Thread(blockingCmd); 70 | thread.start(); 71 | 72 | Thread.sleep(1000); 73 | var listLen = this.lc.connect().sync().rpush(key1, value); 74 | assertEquals(listLen, Long.valueOf(1)); 75 | 76 | thread.join(); 77 | } 78 | 79 | @Test 80 | void blpop() throws InterruptedException { 81 | listBlockingCommandHelper((key1, key2) -> this.lc.connect().sync().blpop(60, key1, key2)); 82 | } 83 | 84 | @Test 85 | void brpop() throws InterruptedException { 86 | listBlockingCommandHelper((key1, key2) -> this.lc.connect().sync().brpop(60, key1, key2)); 87 | } 88 | 89 | void listBlockingCommandHelper(BiFunction> cmdFunc) throws InterruptedException { 90 | final var key1 = this.genKey("blocking", "key1:{hashtag}"); 91 | final var key2 = this.genKey("blocking", "key2:{hashtag}"); 92 | final var value = "listvalue"; 93 | 94 | Runnable blockingCmd = () -> { 95 | final var res = cmdFunc.apply(key1, key2); 96 | assertEquals(res.getKey(), key2); 97 | assertEquals(res.getValue(), value); 98 | }; 99 | var thread = new Thread(blockingCmd); 100 | thread.start(); 101 | 102 | Thread.sleep(1000); 103 | var listLen = this.lc.connect().sync().rpush(key2, value); 104 | assertEquals(listLen, Long.valueOf(1)); 105 | 106 | thread.join(); 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /clienttest/java/app/src/test/java/clienttest/RedissionTest.java: -------------------------------------------------------------------------------- 1 | package clienttest; 2 | 3 | import org.redisson.Redisson; 4 | import org.redisson.api.RBucket; 5 | import org.redisson.api.RBuckets; 6 | import org.redisson.api.RedissonClient; 7 | import org.redisson.config.Config; 8 | import org.redisson.config.ReadMode; 9 | import org.testng.annotations.BeforeClass; 10 | import org.testng.annotations.Test; 11 | 12 | import java.util.HashMap; 13 | import java.util.Map; 14 | import java.util.concurrent.TimeUnit; 15 | 16 | import static org.testng.Assert.assertEquals; 17 | 18 | public class RedissionTest { 19 | RedissonClient rd; 20 | 21 | @BeforeClass 22 | public void setUp() { 23 | var address = String.format("redis://%s:%d", Utils.getNodeHost(), Utils.getNodePort()); 24 | 25 | Config config = new Config(); 26 | config.useClusterServers() 27 | .setScanInterval(2000) 28 | .setReadMode(ReadMode.MASTER) 29 | .addNodeAddress(address); 30 | this.rd = Redisson.create(config); 31 | } 32 | 33 | String genKey(String testCase, String key) { 34 | final var now = new java.util.Date(); 35 | return String.format("redission:%d:%s:%s", now.toInstant().toEpochMilli(), testCase, key); 36 | } 37 | 38 | @Test 39 | public void singleKeyCommand() { 40 | final var key = this.genKey("singleKey", "somekey"); 41 | final var value = "value"; 42 | RBucket bucket = this.rd.getBucket(key); 43 | bucket.set(value, 60, TimeUnit.SECONDS); 44 | final var v = bucket.get(); 45 | assertEquals(value, v); 46 | } 47 | 48 | @Test 49 | public void multiKeyCommand() { 50 | // for hashtag, refers to https://redis.io/topics/cluster-spec 51 | final var key1 = this.genKey("multikey", "key1:{hashtag}"); 52 | final var key2 = this.genKey("multikey", "key2:{hashtag}"); 53 | final var value1 = "value1"; 54 | final var value2 = "value2"; 55 | 56 | RBuckets buckets = this.rd.getBuckets(); 57 | 58 | var map = new HashMap(); 59 | map.put(key1, value1); 60 | map.put(key2, value2); 61 | buckets.set(map); 62 | 63 | Map loadedBuckets = buckets.get(key1, key2); 64 | assertEquals(loadedBuckets.size(), 2); 65 | assertEquals(loadedBuckets.get(key1), value1); 66 | assertEquals(loadedBuckets.get(key2), value2); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /clienttest/java/app/src/test/java/clienttest/Utils.java: -------------------------------------------------------------------------------- 1 | package clienttest; 2 | 3 | public class Utils { 4 | static final String NODE_HOST = "CLIENT_TEST_NODE_HOST"; 5 | static final String NODE_PORT = "CLIENT_TEST_NODE_PORT"; 6 | 7 | public static String getNodeHost() { 8 | var host = System.getenv(NODE_HOST); 9 | if (host == null || host.isEmpty()) { 10 | return "localhost"; 11 | } 12 | return host; 13 | } 14 | 15 | public static int getNodePort() { 16 | var port = System.getenv(NODE_PORT); 17 | if (port == null || port.isEmpty()) { 18 | return 5299; 19 | } 20 | return Integer.parseInt(port); 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /clienttest/java/gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-6.7.1-bin.zip 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | -------------------------------------------------------------------------------- /clienttest/java/gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | # 4 | # Copyright 2015 the original author or authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # https://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ############################################################################## 20 | ## 21 | ## Gradle start up script for UN*X 22 | ## 23 | ############################################################################## 24 | 25 | # Attempt to set APP_HOME 26 | # Resolve links: $0 may be a link 27 | PRG="$0" 28 | # Need this for relative symlinks. 29 | while [ -h "$PRG" ] ; do 30 | ls=`ls -ld "$PRG"` 31 | link=`expr "$ls" : '.*-> \(.*\)$'` 32 | if expr "$link" : '/.*' > /dev/null; then 33 | PRG="$link" 34 | else 35 | PRG=`dirname "$PRG"`"/$link" 36 | fi 37 | done 38 | SAVED="`pwd`" 39 | cd "`dirname \"$PRG\"`/" >/dev/null 40 | APP_HOME="`pwd -P`" 41 | cd "$SAVED" >/dev/null 42 | 43 | APP_NAME="Gradle" 44 | APP_BASE_NAME=`basename "$0"` 45 | 46 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 47 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 48 | 49 | # Use the maximum available, or set MAX_FD != -1 to use that value. 50 | MAX_FD="maximum" 51 | 52 | warn () { 53 | echo "$*" 54 | } 55 | 56 | die () { 57 | echo 58 | echo "$*" 59 | echo 60 | exit 1 61 | } 62 | 63 | # OS specific support (must be 'true' or 'false'). 64 | cygwin=false 65 | msys=false 66 | darwin=false 67 | nonstop=false 68 | case "`uname`" in 69 | CYGWIN* ) 70 | cygwin=true 71 | ;; 72 | Darwin* ) 73 | darwin=true 74 | ;; 75 | MINGW* ) 76 | msys=true 77 | ;; 78 | NONSTOP* ) 79 | nonstop=true 80 | ;; 81 | esac 82 | 83 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 84 | 85 | 86 | # Determine the Java command to use to start the JVM. 87 | if [ -n "$JAVA_HOME" ] ; then 88 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 89 | # IBM's JDK on AIX uses strange locations for the executables 90 | JAVACMD="$JAVA_HOME/jre/sh/java" 91 | else 92 | JAVACMD="$JAVA_HOME/bin/java" 93 | fi 94 | if [ ! -x "$JAVACMD" ] ; then 95 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 96 | 97 | Please set the JAVA_HOME variable in your environment to match the 98 | location of your Java installation." 99 | fi 100 | else 101 | JAVACMD="java" 102 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 103 | 104 | Please set the JAVA_HOME variable in your environment to match the 105 | location of your Java installation." 106 | fi 107 | 108 | # Increase the maximum file descriptors if we can. 109 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then 110 | MAX_FD_LIMIT=`ulimit -H -n` 111 | if [ $? -eq 0 ] ; then 112 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 113 | MAX_FD="$MAX_FD_LIMIT" 114 | fi 115 | ulimit -n $MAX_FD 116 | if [ $? -ne 0 ] ; then 117 | warn "Could not set maximum file descriptor limit: $MAX_FD" 118 | fi 119 | else 120 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 121 | fi 122 | fi 123 | 124 | # For Darwin, add options to specify how the application appears in the dock 125 | if $darwin; then 126 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 127 | fi 128 | 129 | # For Cygwin or MSYS, switch paths to Windows format before running java 130 | if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then 131 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 132 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 133 | 134 | JAVACMD=`cygpath --unix "$JAVACMD"` 135 | 136 | # We build the pattern for arguments to be converted via cygpath 137 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 138 | SEP="" 139 | for dir in $ROOTDIRSRAW ; do 140 | ROOTDIRS="$ROOTDIRS$SEP$dir" 141 | SEP="|" 142 | done 143 | OURCYGPATTERN="(^($ROOTDIRS))" 144 | # Add a user-defined pattern to the cygpath arguments 145 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 146 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 147 | fi 148 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 149 | i=0 150 | for arg in "$@" ; do 151 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 152 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 153 | 154 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 155 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 156 | else 157 | eval `echo args$i`="\"$arg\"" 158 | fi 159 | i=`expr $i + 1` 160 | done 161 | case $i in 162 | 0) set -- ;; 163 | 1) set -- "$args0" ;; 164 | 2) set -- "$args0" "$args1" ;; 165 | 3) set -- "$args0" "$args1" "$args2" ;; 166 | 4) set -- "$args0" "$args1" "$args2" "$args3" ;; 167 | 5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 168 | 6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 169 | 7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 170 | 8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 171 | 9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 172 | esac 173 | fi 174 | 175 | # Escape application args 176 | save () { 177 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done 178 | echo " " 179 | } 180 | APP_ARGS=`save "$@"` 181 | 182 | # Collect all arguments for the java command, following the shell quoting and substitution rules 183 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" 184 | 185 | exec "$JAVACMD" "$@" 186 | -------------------------------------------------------------------------------- /clienttest/java/gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | 17 | @if "%DEBUG%" == "" @echo off 18 | @rem ########################################################################## 19 | @rem 20 | @rem Gradle startup script for Windows 21 | @rem 22 | @rem ########################################################################## 23 | 24 | @rem Set local scope for the variables with windows NT shell 25 | if "%OS%"=="Windows_NT" setlocal 26 | 27 | set DIRNAME=%~dp0 28 | if "%DIRNAME%" == "" set DIRNAME=. 29 | set APP_BASE_NAME=%~n0 30 | set APP_HOME=%DIRNAME% 31 | 32 | @rem Resolve any "." and ".." in APP_HOME to make it shorter. 33 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi 34 | 35 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 36 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 37 | 38 | @rem Find java.exe 39 | if defined JAVA_HOME goto findJavaFromJavaHome 40 | 41 | set JAVA_EXE=java.exe 42 | %JAVA_EXE% -version >NUL 2>&1 43 | if "%ERRORLEVEL%" == "0" goto execute 44 | 45 | echo. 46 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 47 | echo. 48 | echo Please set the JAVA_HOME variable in your environment to match the 49 | echo location of your Java installation. 50 | 51 | goto fail 52 | 53 | :findJavaFromJavaHome 54 | set JAVA_HOME=%JAVA_HOME:"=% 55 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 56 | 57 | if exist "%JAVA_EXE%" goto execute 58 | 59 | echo. 60 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 61 | echo. 62 | echo Please set the JAVA_HOME variable in your environment to match the 63 | echo location of your Java installation. 64 | 65 | goto fail 66 | 67 | :execute 68 | @rem Setup the command line 69 | 70 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 71 | 72 | 73 | @rem Execute Gradle 74 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* 75 | 76 | :end 77 | @rem End local scope for the variables with windows NT shell 78 | if "%ERRORLEVEL%"=="0" goto mainEnd 79 | 80 | :fail 81 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 82 | rem the _cmd.exe /c_ return code! 83 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 84 | exit /b 1 85 | 86 | :mainEnd 87 | if "%OS%"=="Windows_NT" endlocal 88 | 89 | :omega 90 | -------------------------------------------------------------------------------- /clienttest/java/settings.gradle.kts: -------------------------------------------------------------------------------- 1 | /* 2 | * This file was generated by the Gradle 'init' task. 3 | * 4 | * The settings file is used to specify which projects to include in your build. 5 | * 6 | * Detailed information about configuring a multi-project build in Gradle can be found 7 | * in the user manual at https://docs.gradle.org/6.7.1/userguide/multi_project_builds.html 8 | */ 9 | 10 | rootProject.name = "clienttest" 11 | include("app") 12 | -------------------------------------------------------------------------------- /clienttest/python/redis_py_cluster_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | from rediscluster import RedisCluster 5 | 6 | 7 | def create_client(host, port): 8 | startup_nodes = [{"host": host, "port": port}] 9 | return RedisCluster( 10 | startup_nodes=startup_nodes, 11 | decode_responses=True, 12 | skip_full_coverage_check=True, 13 | ) 14 | 15 | 16 | def gen_key(testcase, key): 17 | return 'goredis:{}:{}:{}'.format(time.time(), testcase, key) 18 | 19 | 20 | def test_single_key_command(client): 21 | key = gen_key('singlekey', 'key') 22 | value = 'singlevalue' 23 | 24 | client.setex(key, 60, value) 25 | v = client.get(key) 26 | assert v == value 27 | 28 | 29 | def test_multi_key_command(client): 30 | key1 = gen_key('multikey', 'key1:{hashtag}') 31 | key2 = gen_key('multikey', 'key2:{hashtag}') 32 | value1 = 'value1' 33 | value2 = 'value2' 34 | 35 | client.mset({ 36 | key1: value1, 37 | key2: value2, 38 | }) 39 | count = client.delete(key1, key2) 40 | assert count == 2 41 | 42 | values = client.mget(key1, key2) 43 | assert len(values) == 2 44 | assert values[0] is None 45 | assert values[1] is None 46 | 47 | 48 | if __name__ == '__main__': 49 | host = os.environ.get('CLIENT_TEST_NODE_HOST') or '127.0.0.1' 50 | port = os.environ.get('CLIENT_TEST_NODE_PORT') or '5299' 51 | client = create_client(host, port) 52 | 53 | test_single_key_command(client) 54 | test_multi_key_command(client) 55 | -------------------------------------------------------------------------------- /clienttest/python/requirements.txt: -------------------------------------------------------------------------------- 1 | redis-py-cluster==2.1.0 2 | -------------------------------------------------------------------------------- /conf/coordinator.toml: -------------------------------------------------------------------------------- 1 | address="127.0.0.1:6699" 2 | # broker_address = ["127.0.0.1:7799", "127.0.0.1:17799"] 3 | broker_address = "127.0.0.1:7799" 4 | reporter_id = "127.0.0.1:6699" 5 | thread_number = 2 6 | # Set this to true for large cluster 7 | enable_compression = false 8 | -------------------------------------------------------------------------------- /conf/mem-broker.toml: -------------------------------------------------------------------------------- 1 | address = "127.0.0.1:7799" 2 | failure_ttl = 60 3 | failure_quorum = 1 4 | migration_limit = 2 5 | 6 | # The HTTP API address of other memory brokers. 7 | replica_addresses = [] 8 | # replica_addresses = ["192.168.0.123:7799", "192.168.0.123:8899"] 9 | # replica_addresses = "192.168.0.123:7799,192.168.0.123:8899" 10 | 11 | # Periodically synchronize metadata to replicas. 12 | # This is in seconds. 13 | # Use zero to disable it. 14 | sync_meta_interval = 10 15 | 16 | # Set it to `true` for kubernetes StatefulSet 17 | # to disable the chunk allocation algorithm 18 | # and only use the index of server proxy to allocate chunks. 19 | # If this is true, 20 | # (1) when adding proxies, the "index" field is required and should be the index of the StatefulSet index. 21 | # (2) failover will only change the role and will not replace proxy. 22 | # (3) the whole undermoon cluster can only create one Redis cluster. 23 | enable_ordered_proxy = false 24 | 25 | storage_type = "memory" 26 | recover_from_meta_file = true 27 | meta_filename = "metadata" 28 | # Refresh meta file on each update 29 | auto_update_meta_file = true 30 | # Periodically update meta file. 31 | # This is in seconds. 32 | # Use zero to disable it. 33 | update_meta_file_interval = 10 34 | 35 | # Use the following config to enable external http storage. 36 | # 37 | # This is used for external system to differentiate different undermoon clusters 38 | # storage_type = "http" 39 | # storage_name = "my_storage_name" 40 | # storage_password = "somepassword" 41 | # http_storage_address = "localhost:9999" 42 | # refresh_interval = 30 43 | 44 | debug = false 45 | 46 | # Cluster Config 47 | # Cluster config can vary between clusters. 48 | # The config below is the default cluster config 49 | # and could be modified dynamically in the memory broker. 50 | 51 | # Enable string compression. 52 | # Since commands like INCR won't work when the actual stored string 53 | # is compressed, when string compression is enabled, 54 | # client can only use commands like SET, GET. 55 | # 56 | # Could only be "disabled", "set_get_only" 57 | compression_strategy = "disabled" 58 | # In seconds 59 | migration_max_migration_time = 3600 60 | # In milliseconds 61 | migration_max_blocking_time = 10000 62 | # In microseconds 63 | migration_scan_interval = 500 64 | migration_scan_count = 16 65 | -------------------------------------------------------------------------------- /conf/server-proxy.toml: -------------------------------------------------------------------------------- 1 | address = "127.0.0.1:5299" 2 | # announce_address must be the same as the address registered in the broker. 3 | announce_address = "127.0.0.1:5299" 4 | 5 | slowlog_len = 1024 6 | 7 | # In microseconds like redis. 8 | slowlog_log_slower_than = 20000 9 | # Execute `CONFIG SET slowlog_sample_rate 1` at runtime to record all commands. 10 | slowlog_sample_rate = 1000 11 | 12 | thread_number = 2 13 | 14 | backend_conn_num = 2 15 | # "disabled" should be good enough for most cases. 16 | # For larger throughput with pipline enabled, 17 | # we can try "fixed" or "dynamic" for higher throughput. 18 | backend_batch_strategy = "disabled" 19 | # In bytes 20 | backend_flush_size = 1024 21 | # In nanoseconds 22 | backend_low_flush_interval = 200000 23 | backend_high_flush_interval = 600000 24 | 25 | # In millisecond. Set 0 to disable session timeout. 26 | session_timeout = 0 27 | # In millisecond 28 | backend_timeout = 3000 29 | 30 | # Password for AUTH command 31 | # password = "yourpwd" 32 | 33 | # Active Redirection Mode 34 | # When active_redirection is enabled, 35 | # all the server proxies will handle the redirection inside. 36 | # Clients don't need to be a Redis Cluster Client. 37 | # NOTICE: This is an experimental feature and don't use it in production. 38 | active_redirection = false 39 | # This is only useful when active_redirection is true. 40 | # Use 0 to disable limitation. 41 | # Or it should be at least 4. 42 | max_redirections = 4 43 | 44 | # This should almost only be used in undermoon-operator in Kubernetes. 45 | # When scaling down, the kubernetes service may not be able to remove 46 | # the pods already deleted from the cluster, 47 | # which results in a `CLUSTER_NOT_FOUND` error. 48 | # This default address will redirect the clients to the service 49 | # and try again. 50 | # Leave it empty to disable it. 51 | default_redirection_address = "" 52 | 53 | # In v1, the format of node address is just 54 | # In v2, the format of node address is , 55 | # where `cport` is used in the gossip protocol in the official Redis Cluster. 56 | # In undermoon, `cport` will always be 5299. 57 | cluster_nodes_version = "v2" 58 | -------------------------------------------------------------------------------- /docs/active_redirection.md: -------------------------------------------------------------------------------- 1 | # Active Redirection Mode 2 | When server proxies run in `active redirection` mode, 3 | they will expose themselves in a single Redis client protocol. 4 | Clients don't need to support [Redis Cluster Protocol](./redis_cluster_protocol.md). 5 | 6 | ## Enable Active Redirection 7 | Inside the server proxy config file`server-proxy.toml`, 8 | set `active_redirection` to `true`, 9 | or use environment variable `UNDERMOON_ACTIVE_REDIRECTION=true`. 10 | 11 | ## How it works? 12 | When `active redirection` mode is enabled, 13 | a server proxy will automatically redirect the requests to other server proxies. 14 | Then if needed, other server proxies will keep redirecting the requests 15 | until they find the owner or exceed maximum redirection limit 16 | set by `max_redirections` in server proxy config file. 17 | -------------------------------------------------------------------------------- /docs/best_practice.md: -------------------------------------------------------------------------------- 1 | # Best Practice 2 | 3 | #### Consider Multi-threaded Redis First 4 | Now both Redis 6 and [KeyDB](https://github.com/JohnSully/KeyDB) support multi-threaded. 5 | They should be your first choice before looking into the distributed Redis solution, 6 | because any existing cluster solution is not trivial to maintain or it would be expensive 7 | if you use enterprise cloud. 8 | 9 | If your best machine still can't store all the data or support your high throughput, 10 | then you can take your time on the cluster solution. 11 | 12 | #### Use Small Redis 13 | Each Redis instance should be specified a `max_memory` not larger than 8G. 14 | `2G` is a good `max_memory` for each instance as it will not have great impact on the network during replication. 15 | And it would be much easier to scale for small Redis. 16 | 17 | `Undermoon` is designed to help you to maintain clusters with hundreds of nodes. 18 | 19 | #### Trigger Migration with Cautions 20 | Running migration could decrease the max throughput and increase the latency. 21 | Try to employ a good capacity planning strategy and only trigger migration when the throughput is low. 22 | 23 | #### Prefer Pipeline to Multi-key Commands 24 | Multi-key commands are much harder to optimize for the proxy. Use pipeline instead of multi-key commands for better performance. 25 | -------------------------------------------------------------------------------- /docs/broker_external_storage.md: -------------------------------------------------------------------------------- 1 | # Broker External Storage 2 | The `mem_broker` supports external storage through HTTP API. 3 | The external storage may support multiple `undermoon` clusters at the same time, 4 | so there's a `` argument in the path to differentiate these clusters. 5 | 6 | Query data: 7 | ``` 8 | GET /api/v1/store/ 9 | 10 | Response: 11 | HTTP 200: ExternalStore json 12 | HTTP 404: not found 13 | ``` 14 | 15 | Update data: 16 | ``` 17 | PUT /api/v1/store/ 18 | Basic Auth: : 19 | 20 | Request: ExternalStore json 21 | Response: 22 | HTTP 200 for success 23 | HTTP 404: not found 24 | HTTP 409 for version conflict 25 | ``` 26 | 27 | The structure of `ExternalStore` is: 28 | ``` 29 | { 30 | "version": or null, 31 | "store": 32 | } 33 | ``` 34 | 35 | In the [undermoon-operator](https://github.com/doyoubi/undermoon-operator), 36 | this `version` is `ResourceVersion` of kubernetes object. 37 | The HTTP service should check the `version` before updating the data. 38 | -------------------------------------------------------------------------------- /docs/broker_http_api.md: -------------------------------------------------------------------------------- 1 | # Broker HTTP API 2 | 3 | All the payload of request and response should be in JSON format 4 | and use the HTTP 200 to indicate success or failure. 5 | 6 | HTTP Broker should at least implement the following apis to work with Coordinator: 7 | 8 | ##### (1) GET /api/v3/clusters/names?offset=&limit= 9 | Get all the cluster names. 10 | `offset` starts from zero. 11 | ``` 12 | Response: 13 | { 14 | "names": ["cluster_name1", ...], 15 | } 16 | ``` 17 | 18 | ##### (2) GET /api/v3/clusters/meta/ 19 | Get the meta data of . 20 | ``` 21 | Response: 22 | If the cluster exists: 23 | { 24 | "cluster": { 25 | "name": "cluster_name1", 26 | "epoch": 1, 27 | "nodes": [{ 28 | "address": "redis9:6379", 29 | "proxy_address": "server_proxy5:6005", 30 | "cluster_name": "mycluster", 31 | "slots": [{ 32 | "range_list": [[0, 8191]], 33 | "tag": "None" 34 | }], 35 | "repl": { 36 | "role": "master", 37 | "peers": [{ 38 | "node_address": "redis2:6379", 39 | "proxy_address": "server_proxy1:6001" 40 | }] 41 | } 42 | }, ...], 43 | "config": { 44 | "compression_strategy": "disabled" 45 | } 46 | } 47 | } 48 | 49 | If not: 50 | { "cluster": null } 51 | ``` 52 | 53 | ##### (3) GET /api/v3/proxies/addresses?offset=&limit= 54 | Get all the server-side proxy addresses. 55 | `offset` starts from zero. 56 | ``` 57 | Response: 58 | { 59 | "addresses": ["server_proxy_address1", ...], 60 | } 61 | ``` 62 | 63 | ##### (4) GET /api/v3/proxies/meta/ 64 | Get the meta data of 65 | ``` 66 | Response: 67 | If the proxy exists: 68 | { 69 | "proxy": { 70 | "address": "server_proxy_address1", 71 | "epoch": 1, 72 | "nodes": [{ 73 | "address": "127.0.0.1:7001", 74 | "proxy_address": "127.0.0.1:6001", 75 | "cluster_name": "cluster_name1", 76 | "repl": { 77 | "role": "master", 78 | "peers": [{ 79 | "node_address": "127.0.0.1:7002", 80 | "proxy_address": "127.0.0.1:7003", 81 | }...] 82 | }, 83 | "slots": [{ 84 | "range_list": [[0, 5000]], 85 | "tag": "None" 86 | }, ...] 87 | }, ...], 88 | "free_nodes": ["127.0.0.1:7004"], // For free proxies 89 | "peers": [{ 90 | "proxy_address": "127.0.0.1:6001", 91 | "cluster_name": "cluster_name1", 92 | "slots": [{ 93 | "range_list": [[0, 5000]], 94 | "tag": "None" 95 | }, ...] 96 | }, ...], 97 | "clusters_config": { 98 | "cluster_name1": { 99 | "compression_strategy": "disabled" 100 | } 101 | } 102 | } 103 | } 104 | If not: 105 | { "proxy": null } 106 | ``` 107 | 108 | ##### (5) POST /api/v3/failures// 109 | Report a suspected failure and tag it use a unique for every Coordinator. 110 | ``` 111 | Response: 112 | empty payload 113 | ``` 114 | 115 | ##### (6) GET /api/v3/failures 116 | Get all the failures reported by coordinator but not committed to be failed yet. 117 | It's used by coordinator and you probably need to use (9) instead. 118 | ``` 119 | Response: 120 | { 121 | "addresses": ["server_proxy_address1", ...], 122 | } 123 | ``` 124 | 125 | ##### (7) POST /api/v3/proxies/failover/ 126 | Try to do the failover for the specified proxy. 127 | In the memory broker implementation, if `enable_ordered_proxy` is on, 128 | this API will only change the role and will not replace the failed server proxy. 129 | ``` 130 | Request: 131 | empty payload 132 | 133 | Response: 134 | If success: 135 | Proxy is being used by a cluster. 136 | { 137 | "proxy": { 138 | "address": "server_proxy_address1", 139 | "epoch": 1, 140 | "nodes": [{ 141 | "address": "127.0.0.1:7001", 142 | "proxy_address": "127.0.0.1:6001", 143 | "cluster_name": "cluster_name1", 144 | "repl": { 145 | "role": "master", 146 | "peers": [{ 147 | "node_address": "127.0.0.1:7002", 148 | "proxy_address": "127.0.0.1:7003", 149 | }...] 150 | }, 151 | "slots": [{ 152 | "range_list": [[0, 5000]], 153 | "tag": "None" 154 | }, ...] 155 | }, ...] 156 | } 157 | } 158 | 159 | Proxy is not in use: 160 | { 161 | "proxy": null 162 | } 163 | 164 | If not: 165 | HTTP 409 166 | ``` 167 | 168 | ##### (8) PUT /api/v3/clusters/migrations 169 | Try to commit the migration. 170 | ``` 171 | Request: 172 | { 173 | "cluster_name": "mydb", 174 | "slot_range": { 175 | "range_list": [[0, 5000]], 176 | "tag": { 177 | "Migrating": { 178 | "epoch": 233, 179 | "src_proxy_address": "127.0.0.1:7000", 180 | "src_node_address": "127.0.0.1:7001", 181 | "dst_proxy_address": "127.0.0.2:7000", 182 | "dst_node_address": "127.0.0.2:7001" 183 | } 184 | } 185 | } 186 | } 187 | 188 | Response: 189 | { 190 | "addresses": ["server_proxy_address1", ...], 191 | } 192 | ``` 193 | 194 | ##### (9) GET /api/v3/proxies/failed/addresses 195 | Get all the failed proxies. 196 | ``` 197 | Response: 198 | { 199 | "addresses": ["server_proxy_address1", ...], 200 | } 201 | ``` 202 | -------------------------------------------------------------------------------- /docs/chunk.md: -------------------------------------------------------------------------------- 1 | # Chunk 2 | ![Chunk](./chunk.svg) 3 | 4 | Chunk is the basic building block of a cluster to provide the created cluster 5 | with a good topology for workload balancing. 6 | It consists of 2 proxies and 4 Redis nodes evenly distributed in two machines. 7 | 8 | Normally, the first half has 1 master and 1 replica and their peers locate in the second half. 9 | 10 | After the second half failed, all the Redis nodes in the first half will become masters. 11 | 12 | ## Chunk Allocation 13 | Instead of complex scheduling strategies and techniques, 14 | `undermoon` employs a simple Redis node allocation algorithm 15 | to achieve **workload balancing**: 16 | - The masters should be evenly distributed in all machines. 17 | - After failover happens, in each cluster, 18 | the flood of failed masters should be evenly distributed in all machines. 19 | 20 | The algorithm detail and the proof of algorithm terminal is documented in 21 | [chunk allocation](./chunk_allocation.txt). 22 | -------------------------------------------------------------------------------- /docs/command_table.md: -------------------------------------------------------------------------------- 1 | | COMMAND | SUPPORTED | DESCRIPTION | 2 | |---|---|---| 3 | | acl | False | | 4 | | append | True | | 5 | | asking | True | This is an no-op. It only returns OK. | 6 | | auth | False | This command is reserved for future use. | 7 | | bgrewriteaof | False | | 8 | | bgsave | False | | 9 | | bitcount | True | | 10 | | bitfield | True | | 11 | | bitfield_ro | False | | 12 | | bitop | False | | 13 | | bitpos | True | | 14 | | blpop | True | User MUST specify timeout. | 15 | | brpop | True | User MUST specify timeout. | 16 | | brpoplpush | True | User MUST specify timeout. | 17 | | bzpopmax | True | User MUST specify timeout. | 18 | | bzpopmin | True | User MUST specify timeout. | 19 | | client | False | | 20 | | cluster | True | Only support the following sub commands: NODES, SLOTS, KEYSLOT. | 21 | | command | True | Will filter the unsupported commands | 22 | | config | True | | 23 | | dbsize | False | | 24 | | debug | False | | 25 | | decr | True | | 26 | | decrby | True | | 27 | | del | True | | 28 | | discard | False | | 29 | | dump | True | | 30 | | echo | True | | 31 | | eval | True | All the keys should be in the same slot. | 32 | | evalsha | False | | 33 | | exec | False | | 34 | | exists | True | | 35 | | expire | True | | 36 | | expireat | True | | 37 | | flushall | False | | 38 | | flushdb | False | | 39 | | geoadd | True | | 40 | | geodist | True | | 41 | | geohash | True | | 42 | | geopos | True | | 43 | | georadius | True | | 44 | | georadius_ro | True | | 45 | | georadiusbymember | True | | 46 | | georadiusbymember_ro | True | | 47 | | get | True | | 48 | | getbit | True | | 49 | | getrange | True | | 50 | | getset | True | | 51 | | hdel | True | | 52 | | hello | False | | 53 | | hexists | True | | 54 | | hget | True | | 55 | | hgetall | True | | 56 | | hincrby | True | | 57 | | hincrbyfloat | True | | 58 | | hkeys | True | | 59 | | hlen | True | | 60 | | hmget | True | | 61 | | hmset | True | | 62 | | host: | False | | 63 | | hscan | True | | 64 | | hset | True | | 65 | | hsetnx | True | | 66 | | hstrlen | True | | 67 | | hvals | True | | 68 | | incr | True | | 69 | | incrby | True | | 70 | | incrbyfloat | True | | 71 | | info | True | | 72 | | keys | False | | 73 | | lastsave | False | | 74 | | latency | False | | 75 | | lindex | True | | 76 | | linsert | True | | 77 | | llen | True | | 78 | | lolwut | False | | 79 | | lpop | True | | 80 | | lpos | False | | 81 | | lpush | True | | 82 | | lpushx | True | | 83 | | lrange | True | | 84 | | lrem | True | | 85 | | lset | True | | 86 | | ltrim | True | | 87 | | memory | False | | 88 | | mget | True | | 89 | | migrate | False | | 90 | | module | False | | 91 | | monitor | False | | 92 | | move | False | | 93 | | mset | True | | 94 | | msetnx | False | | 95 | | multi | False | | 96 | | object | False | | 97 | | persist | True | | 98 | | pexpire | True | | 99 | | pexpireat | True | | 100 | | pfadd | True | | 101 | | pfcount | True | All the keys should be in the same slot. | 102 | | pfdebug | False | | 103 | | pfmerge | True | All the keys should be in the same slot. | 104 | | pfselftest | False | | 105 | | ping | True | | 106 | | post | False | | 107 | | psetex | True | | 108 | | psubscribe | False | | 109 | | psync | False | | 110 | | pttl | True | | 111 | | publish | False | | 112 | | pubsub | False | | 113 | | punsubscribe | False | | 114 | | randomkey | False | | 115 | | readonly | False | | 116 | | readwrite | False | | 117 | | rename | True | All the keys should be in the same slot. | 118 | | renamenx | False | All the keys should be in the same slot. | 119 | | replconf | False | | 120 | | replicaof | False | | 121 | | restore | True | | 122 | | restore-asking | False | | 123 | | role | False | | 124 | | rpop | True | | 125 | | rpoplpush | True | All the keys should be in the same slot. | 126 | | rpush | True | | 127 | | rpushx | True | | 128 | | sadd | True | | 129 | | save | False | | 130 | | scan | False | | 131 | | scard | True | | 132 | | script | False | | 133 | | sdiff | True | All the keys should be in the same slot. | 134 | | sdiffstore | True | All the keys should be in the same slot. | 135 | | select | False | | 136 | | set | True | | 137 | | setbit | True | | 138 | | setex | True | | 139 | | setnx | True | | 140 | | setrange | True | | 141 | | shutdown | False | | 142 | | sinter | True | All the keys should be in the same slot. | 143 | | sinterstore | True | All the keys should be in the same slot. | 144 | | sismember | True | | 145 | | slaveof | False | | 146 | | slowlog | False | | 147 | | smembers | True | | 148 | | smove | True | All the keys should be in the same slot. | 149 | | sort | True | | 150 | | spop | True | | 151 | | srandmember | True | | 152 | | srem | True | | 153 | | sscan | True | | 154 | | stralgo | False | | 155 | | strlen | True | | 156 | | subscribe | False | | 157 | | substr | False | | 158 | | sunion | True | All the keys should be in the same slot. | 159 | | sunionstore | False | All the keys should be in the same slot. | 160 | | swapdb | False | | 161 | | sync | False | | 162 | | time | False | | 163 | | touch | True | All the keys should be in the same slot. | 164 | | ttl | True | | 165 | | type | True | | 166 | | unlink | True | All the keys should be in the same slot. | 167 | | unsubscribe | False | | 168 | | unwatch | False | | 169 | | wait | False | | 170 | | watch | False | | 171 | | xack | True | | 172 | | xadd | True | | 173 | | xclaim | True | | 174 | | xdel | True | | 175 | | xgroup | False | | 176 | | xinfo | False | | 177 | | xlen | True | | 178 | | xpending | True | | 179 | | xrange | True | | 180 | | xread | False | | 181 | | xreadgroup | False | | 182 | | xrevrange | True | | 183 | | xsetid | False | | 184 | | xtrim | True | | 185 | | zadd | True | | 186 | | zcard | True | | 187 | | zcount | True | | 188 | | zincrby | True | | 189 | | zinterstore | True | All the keys should be in the same slot. | 190 | | zlexcount | True | | 191 | | zpopmax | True | | 192 | | zpopmin | True | | 193 | | zrange | True | | 194 | | zrangebylex | True | | 195 | | zrangebyscore | True | | 196 | | zrank | True | | 197 | | zrem | True | | 198 | | zremrangebylex | True | | 199 | | zremrangebyrank | True | | 200 | | zremrangebyscore | True | | 201 | | zrevrange | True | | 202 | | zrevrangebylex | True | | 203 | | zrevrangebyscore | True | | 204 | | zrevrank | True | | 205 | | zscan | True | | 206 | | zscore | True | | 207 | | zunionstore | True | All the keys should be in the same slot. | -------------------------------------------------------------------------------- /docs/development.md: -------------------------------------------------------------------------------- 1 | # Development Guide 2 | 3 | ## Run linter and tests 4 | 5 | Run the following commands before committing your codes: 6 | ``` 7 | $ make lint 8 | $ make test 9 | ``` 10 | 11 | ## Coding Style 12 | ### Safe Codes 13 | Avoid using `unsafe` and calls that could crash like `unwrap`, `unsafe_pinned`. 14 | Use `pin-project` instead of `pin-utils`. 15 | 16 | But you can still use `expect` for some cases only you have to: 17 | 18 | - (1) Data manipulation in memory broker 19 | - ~~(2) Locks~~ (Use poison-free locks in parking_lot) 20 | 21 | ### Employ dependency injection for better unit tests 22 | Dependency injection not only make it much easier to write unit tests 23 | but also makes more modular codes. 24 | 25 | Now the whole server proxy could run just inside pure memory 26 | and can run some tests towards the proxy without creating a real connection. 27 | -------------------------------------------------------------------------------- /docs/docker_compose_example.md: -------------------------------------------------------------------------------- 1 | # Run Undermoon using Docker Compose 2 | The following examples use docker to create an `undermoon` cluster. 3 | 4 | Requirements: 5 | 6 | - docker-compose 7 | - redis-cli 8 | - [jq](https://stedolan.github.io/jq/) 9 | 10 | #### Run the cluster in docker-compose 11 | Download and run the cluster directly: 12 | ```bash 13 | $ make docker-mem-broker-example 14 | ``` 15 | 16 | Or build it yourself and run the `undermoon` docker image: 17 | ```bash 18 | $ make docker-build-test-image 19 | $ make docker-mem-broker 20 | ``` 21 | 22 | #### Register Proxies 23 | After everything is up, run the initialize script to register the storage resources through HTTP API: 24 | ```bash 25 | $ ./examples/mem-broker/init.sh 26 | ``` 27 | 28 | We have 6 available proxies. 29 | ```bash 30 | $ curl http://localhost:7799/api/v3/proxies/addresses 31 | ``` 32 | 33 | #### Create Cluster 34 | Since every proxy has 2 corresponding Redis nodes, we have 12 nodes in total. 35 | Note that the number of a cluster could only be the multiples of 4. 36 | Let's create a cluster with 4 nodes. 37 | ```bash 38 | $ curl -XPOST -H 'Content-Type: application/json' \ 39 | http://localhost:7799/api/v3/clusters/meta/mycluster -d '{"node_number": 4}' 40 | ``` 41 | 42 | Before connecting to the cluster, you need to add these hosts to you `/etc/hosts`: 43 | ``` 44 | # /etc/hosts 45 | 127.0.0.1 server_proxy1 46 | 127.0.0.1 server_proxy2 47 | 127.0.0.1 server_proxy3 48 | 127.0.0.1 server_proxy4 49 | 127.0.0.1 server_proxy5 50 | 127.0.0.1 server_proxy6 51 | ``` 52 | 53 | Let's checkout our cluster. It's created by some randomly chosen proxies. 54 | We need to find them out first. 55 | Note that you need to install the `jq` command to parse json easily for the command below. 56 | 57 | ```bash 58 | # List the proxies of the our "mycluster`: 59 | $ curl -s http://localhost:7799/api/v3/clusters/meta/mycluster | jq '.cluster.nodes[].proxy_address' | uniq 60 | "server_proxy5:6005" 61 | "server_proxy6:6006" 62 | ``` 63 | 64 | Pickup one of the proxy address above (in my case it's `server_proxy5:6005`) for the cluster `mycluster` and connect to it. 65 | 66 | ```bash 67 | # Add `-c` to enable cluster mode: 68 | $ redis-cli -h server_proxy5 -p 6005 -c 69 | # List the proxies: 70 | server_proxy5:6005> cluster nodes 71 | mycluster___________d71bc00fbdddf89_____ server_proxy5:6005 myself,master - 0 0 7 connected 0-8191 72 | mycluster___________8de73f9146386295____ server_proxy6:6006 master - 0 0 7 connected 8192-16383 73 | # Send out some requests: 74 | server_proxy5:6005> get a 75 | -> Redirected to slot [15495] located at server_proxy6:6006 76 | (nil) 77 | server_proxy6:6006> get b 78 | -> Redirected to slot [3300] located at server_proxy5:6005 79 | (nil) 80 | ``` 81 | Great! We can use our created cluster just like the official Redis Cluster. 82 | 83 | #### Scale Up 84 | It actually has 4 Redis nodes under the hood. 85 | ```bash 86 | # List the nodes of the our "mycluster`: 87 | $ curl -s http://localhost:7799/api/v3/clusters/meta/mycluster | jq '.cluster.nodes[].address' 88 | "redis9:6379" 89 | "redis10:6379" 90 | "redis11:6379" 91 | "redis12:6379" 92 | ``` 93 | Two of them are masters and the other two of them are replicas. 94 | 95 | Let's scale up to 8 nodes: 96 | ```bash 97 | # Add 4 nodes 98 | $ curl -XPATCH -H 'Content-Type: application/json' \ 99 | http://localhost:7799/api/v3/clusters/nodes/mycluster -d '{"node_number": 4}' 100 | # Start migrating the data 101 | $ curl -XPOST http://localhost:7799/api/v3/clusters/migrations/expand/mycluster 102 | ``` 103 | 104 | Now we have 4 server proxies: 105 | ```bash 106 | $ redis-cli -h server_proxy5 -p 6005 -c 107 | server_proxy5:6005> cluster nodes 108 | mycluster___________d71bc00fbdddf89_____ server_proxy5:6005 myself,master - 0 0 12 connected 0-4095 109 | mycluster___________8de73f9146386295____ server_proxy6:6006 master - 0 0 12 connected 8192-12287 110 | mycluster___________be40fe317baf2cf7____ server_proxy2:6002 master - 0 0 12 connected 4096-8191 111 | mycluster___________9434df4158f3c5a4____ server_proxy4:6004 master - 0 0 12 connected 12288-16383 112 | ``` 113 | 114 | and 8 nodes: 115 | ```bash 116 | # List the nodes of the our "mycluster`: 117 | $ curl -s http://localhost:7799/api/v3/clusters/meta/mycluster | jq '.cluster.nodes[].address' 118 | "redis9:6379" 119 | "redis10:6379" 120 | "redis11:6379" 121 | "redis12:6379" 122 | "redis3:6379" 123 | "redis4:6379" 124 | "redis7:6379" 125 | "redis8:6379" 126 | ``` 127 | 128 | #### Failover 129 | If you shutdown any proxy, the replica will be promoted to master. 130 | And as long as the whole `undermoon` cluster has remaining free proxies, 131 | it can automatically replace the failed proxy, 132 | 133 | ```bash 134 | # List the proxies of the our "mycluster`: 135 | $ curl -s http://localhost:7799/api/v3/clusters/meta/mycluster | jq '.cluster.nodes[].proxy_address' | uniq 136 | "server_proxy5:6005" 137 | "server_proxy6:6006" 138 | "server_proxy2:6002" 139 | "server_proxy4:6004" 140 | ``` 141 | 142 | Let's shutdown one of the proxies like `server_proxy5:6005` here. 143 | ```bash 144 | $ docker ps | grep server_proxy5 | awk '{print $1}' | xargs docker kill 145 | ``` 146 | 147 | The `undermoon` will detect the failure, replace the failed proxy, promote the new master, and add new replica to the new master. 148 | ```bash 149 | # server_proxy5 is replaced by server_proxy3 150 | $ curl -s http://localhost:7799/api/v3/clusters/meta/mycluster | jq '.cluster.nodes[].proxy_address' | uniq 151 | "server_proxy3:6003" 152 | "server_proxy6:6006" 153 | "server_proxy2:6002" 154 | "server_proxy4:6004" 155 | ``` 156 | 157 | And we can remove the server_proxy3 from the `undermoon` cluster now. 158 | ```bash 159 | $ curl -XDELETE http://localhost:7799/api/v3/proxies/meta/server_proxy3:6003 160 | ``` 161 | -------------------------------------------------------------------------------- /docs/generate_command_table.py: -------------------------------------------------------------------------------- 1 | import json 2 | import redis 3 | 4 | 5 | COMMAND_TABLE_FILE = './docs/command_table.json' 6 | MARKDOWN_TABLE_FILE = './docs/command_table.md' 7 | 8 | 9 | def get_existing_command_table(): 10 | try: 11 | with open(COMMAND_TABLE_FILE, 'r') as f: 12 | content = f.read() 13 | return json.loads(content) 14 | except IOError: 15 | return {} 16 | 17 | 18 | def get_commands_from_server_proxy(): 19 | client = redis.StrictRedis(port=5299) 20 | commands = client.execute_command("COMMAND") 21 | return [cmd[0].decode("utf-8") for cmd in commands] 22 | 23 | 24 | def get_commands_from_redis(): 25 | ''' Use COMMAND to get all the commands from Redis 26 | ''' 27 | client = redis.StrictRedis() 28 | commands = client.execute_command("COMMAND") 29 | return [cmd[0].decode("utf-8") for cmd in commands] 30 | 31 | 32 | def generate_markdown(table): 33 | headers = [] 34 | headers.append('| COMMAND | SUPPORTED | DESCRIPTION |') 35 | headers.append('|---|---|---|') 36 | 37 | lines = [] 38 | for cmd, fields in table.items(): 39 | lines.append('| {} | {} | {} |'.format(cmd, fields['supported'], fields['desc'])) 40 | lines.sort() 41 | 42 | return '\n'.join(headers + lines) 43 | 44 | 45 | # Need to run a Redis locally to retrieve the commands. 46 | if __name__ == '__main__': 47 | supported_cmds = set(get_commands_from_server_proxy()) 48 | table = get_existing_command_table() 49 | for cmd in get_commands_from_redis(): 50 | if cmd in table: 51 | continue 52 | table[cmd] = { 53 | 'supported': cmd in supported_cmds, 54 | 'desc': '' 55 | } 56 | 57 | content = json.dumps(table, indent=4, sort_keys=True) 58 | with open(COMMAND_TABLE_FILE, 'w') as f: 59 | f.write(content) 60 | 61 | with open(MARKDOWN_TABLE_FILE, 'w') as f: 62 | f.write(generate_markdown(table)) 63 | -------------------------------------------------------------------------------- /docs/mem_broker_replica.md: -------------------------------------------------------------------------------- 1 | # Setting Up Backup for Memory Broker 2 | 3 | ## Setting Up Replica for Memory Broker 4 | Build the binaries: 5 | ``` 6 | $ cargo build 7 | ``` 8 | 9 | Run the replica 10 | ``` 11 | $ RUST_LOG=warp=info,undermoon=info,mem_broker=info UNDERMOON_ADDRESS=127.0.0.1:8899 UNDERMOON_META_FILENAME=metadata2 target/debug/mem_broker 12 | ``` 13 | 14 | Run the master Memory 15 | ``` 16 | $ RUST_LOG=warp=info,undermoon=info,mem_broker=info UNDERMOON_REPLICA_ADDRESSES=127.0.0.1:8899 UNDERMOON_SYNC_META_INTERVAL=3 target/debug/mem_broker 17 | ``` 18 | 19 | ``` 20 | # Put some data to the master: 21 | $ ./examples/mem-broker/init.sh 22 | 23 | # Verify that on master: 24 | curl localhost:7799/api/v3/metadata 25 | ... 26 | 27 | # Verify tat on replica after 3 seconds: 28 | curl localhost:7799/api/v3/metadata 29 | ... 30 | # Replica should have the same data as master. 31 | ``` 32 | 33 | Note that when master failed, 34 | the whole system will **NOT** automatically fail back to replica. 35 | You need to do it by calling the API of coordinator. 36 | During this time, the server proxies will still be able to process the requests 37 | but the whole system can't scale and failover for server proxies 38 | util the `Memory Broker` endpoint of coordinator is switched to the replica. 39 | 40 | Let's say you have already run a coordinator: 41 | ``` 42 | $ RUST_LOG=undermoon=info,coordinator=info target/debug/coordinator conf/coordinator.toml 43 | ``` 44 | 45 | Then you can change master to replica by connecting to coordinator in Redis protocol 46 | and changing the config. 47 | ``` 48 | # 6699 is the port of coordinators. 49 | $ redis-cli -p 6699 CONFIG SET brokers 127.0.0.1:8899 50 | ``` 51 | 52 | The newest metadata of the master memory broker 53 | has not been replicated to the replica memory broker and fail. 54 | We can't recover the lost data but we can bump the metadata epoch 55 | by collecting the epoch from all the recorded proxies 56 | to recover the service. 57 | 58 | So we also need to call this API after reconfiguring the coordinator. 59 | ``` 60 | $ curl -XPUT localhost:7799/api/v3/epoch/recovery 61 | ``` 62 | Now the system should be able to work again. 63 | -------------------------------------------------------------------------------- /docs/meta_command.md: -------------------------------------------------------------------------------- 1 | # UMCTL Command 2 | ## UMCTL SETCLUSTER 3 | UMCTL SETCLUSTER 4 | - version (v2 for current version) 5 | - epoch 6 | - flag 7 | - dbname 8 | - [ip:port slot_range] 9 | - [PEER [ip:port slot_range...]] 10 | - [CONFIG [field value...]] 11 | 12 | Sets the mapping relationship between the server-side proxy and its corresponding redis instances behind it. 13 | 14 | - `version` is the api version of `UMCTL SETCLUSTER` 15 | - `epoch` is the logical time of the configuration this command is sending used to decide which configuration is more up-to-date. 16 | Every running server-side proxy will store its epoch and will reject all the `UMCTL [SETCLUSTER|SETREPL]` requests which don't have higher epoch. 17 | - `flags` Currently it may be NOFLAG or combination of FORCE and COMPRESS("FORCE,COMPRESS"). 18 | When it contains `FORCE`, the server-side proxy will ignore the epoch rule above and will always accept the configuration. 19 | When it contains `COMPRESS`, later it only contains one element with gzip and base64 encoded data. 20 | - `slot_range` can be like 21 | - 1 0-1000 22 | - 2 0-1000 2000-3000 23 | - migrating 1 0-1000 epoch src_proxy_address src_node_address dst_proxy_address dst_node_address 24 | - importing 1 0-1000 epoch src_proxy_address src_node_address dst_proxy_address dst_node_address 25 | - `ip:port` should be the addresses of redis instances or other proxies for `PEER` part. 26 | 27 | Note that both these two commands set all the `local` or `peer` meta data of the proxy. 28 | For example, you can't add multiple backend redis instances one by one by sending multiple `UMCTL SETCLUSTER`. 29 | You should batch them in just one `UMCTL SETCLUSTER`. 30 | 31 | ## UMCTL SETREPL 32 | UMCTL SETREPL 33 | - epoch 34 | - flags 35 | - [[master|replica] dbname1 node_ip:node_port peer_num [peer_node_ip:peer_node_port peer_proxy_ip:peer_proxy_port]...] ... 36 | 37 | Sets the replication metadata to server-side proxies. This API supports multiple replicas for a master and also multiple masters for a replica. 38 | 39 | - For master `node_ip:node_port` is the master node. For replica it's replica node. 40 | - `peer_node_ip:peer_node_port` is the node port of the corresponding master if we're sending this to a replica, and vice versa. 41 | - `peer_proxy_ip:peer_proxy_port` is similar. -------------------------------------------------------------------------------- /docs/migration_benchmark.md: -------------------------------------------------------------------------------- 1 | # Migration Benchmarking compared to Redis 2 | 3 | ## Redis Cluster Migration 4 | Run a redis cluster with 2 nodes 5 | ```bash 6 | ./scripts/run_redis_cluster.sh 7 | ``` 8 | 9 | At first all slots are on the migrating redis node 7001. 10 | ```bash 11 | redis-cli -p 7001 cluster nodes 12 | migrating_redis_________________________ 127.0.0.1:7001@17001 myself,master - 0 0 1 connected 0-16383 13 | importing_redis_________________________ 127.0.0.1:7002@17002 master - 0 1634309831944 2 connected 14 | ``` 15 | 16 | Insert 1GB data (512 * 2097152) to 7001 17 | ```bash 18 | redis-benchmark -h 127.0.0.1 -p 7001 -c 20 -P 32 -t set -d 512 -n 2097152 -r 100000000 19 | ``` 20 | 21 | Start to reshard: 22 | ``` 23 | start_time=$(date) 24 | redis-cli --cluster reshard 127.0.0.1:7001 \ 25 | --cluster-from migrating_redis_________________________ \ 26 | --cluster-to importing_redis_________________________ \ 27 | --cluster-slots 8192 \ 28 | --cluster-yes \ 29 | --cluster-pipeline 16 30 | echo 'Start time:' ${start_time} 31 | echo 'End time:' "$(date)" 32 | ``` 33 | 34 | ## Undermoon Cluster Migration 35 | Run a undermoon redis cluster with 2 nodes 36 | ```bash 37 | ./scripts/run_two_shards.sh 38 | ``` 39 | 40 | Insert 1GB data (512 * 2097152) to 7001 just same as redis cluster above 41 | ```bash 42 | redis-benchmark -h 127.0.0.1 -p 7001 -c 20 -P 32 -t set -d 512 -n 2097152 -r 100000000 43 | ``` 44 | 45 | Start migration: 46 | ```bash 47 | start_time=$(date) 48 | ./scripts/loop_migration_test.sh one_shot 49 | echo 'Start time:' ${start_time} 50 | echo 'End time:' "$(date)" 51 | ``` 52 | -------------------------------------------------------------------------------- /docs/migration_local_test.md: -------------------------------------------------------------------------------- 1 | # Test Migration Locally 2 | We can set up two redis servers and server proxies to test migration locally. 3 | 4 | ## Simple Tests 5 | ### Run them all 6 | ``` 7 | ./scripts/run_two_shards.sh 8 | ``` 9 | 10 | The logs will be inside `./local_tests`. 11 | 12 | ### Write Some Data to Source Redis 13 | Here we use redis 7001 and server proxy 6001 as the source part(the part migrating out the slots). 14 | 15 | ``` 16 | for i in {0..50}; do redis-cli -p 7001 set $i $i; done 17 | ``` 18 | 19 | ### Start Migration 20 | ``` 21 | redis-cli -p 6001 UMCTL SETCLUSTER v2 2 NOFLAGS mydb \ 22 | 127.0.0.1:7001 1 0-8000 \ 23 | 127.0.0.1:7001 migrating 1 8001-16383 2 127.0.0.1:6001 127.0.0.1:7001 127.0.0.1:6002 127.0.0.1:7002 \ 24 | PEER 127.0.0.1:6002 importing 1 8001-16383 2 127.0.0.1:6001 127.0.0.1:7001 127.0.0.1:6002 127.0.0.1:7002 25 | redis-cli -p 6002 UMCTL SETCLUSTER v2 2 NOFLAGS mydb \ 26 | 127.0.0.1:7002 importing 1 8001-16383 2 127.0.0.1:6001 127.0.0.1:7001 127.0.0.1:6002 127.0.0.1:7002 \ 27 | PEER 127.0.0.1:6001 1 0-8000 \ 28 | 127.0.0.1:6001 migrating 1 8001-16383 2 127.0.0.1:6001 127.0.0.1:7001 127.0.0.1:6002 127.0.0.1:7002 29 | ``` 30 | 31 | Read the logs to see whether there's any unexpected error. 32 | 33 | Check the data of both redis 34 | ``` 35 | redis-cli -p 7001 keys '*' 36 | echo '=====================' 37 | redis-cli -p 7002 keys '*' 38 | ``` 39 | 40 | ### Commit Migration 41 | ``` 42 | redis-cli -p 6001 umctl setcluster v2 3 noflags mydb \ 43 | 127.0.0.1:7001 1 0-8000 PEER 127.0.0.1:6002 1 8001-16383 44 | redis-cli -p 6002 umctl setcluster v2 3 noflags mydb \ 45 | 127.0.0.1:7002 1 8001-16383 PEER 127.0.0.1:6001 1 0-8000 46 | ``` 47 | 48 | Check the logs again. 49 | 50 | Check the data of both redis again 51 | ``` 52 | redis-cli -p 7001 keys '*' 53 | echo '=====================' 54 | redis-cli -p 7002 keys '*' 55 | ``` 56 | 57 | ## Keep Running Migration 58 | ``` 59 | ./scripts/run_two_shards.sh 60 | ``` 61 | 62 | Run this in another terminal: 63 | ``` 64 | ./scripts/loop_migration_test.sh 65 | ``` 66 | 67 | The second script will keep moving the slots between 2 server proxies. 68 | 69 | Then we can use [checker](https://github.com/doyoubi/undermoon-operator/tree/master/checker) 70 | to see whether there's any data consistency problem. 71 | -------------------------------------------------------------------------------- /docs/performance.md: -------------------------------------------------------------------------------- 1 | # Performance 2 | 3 | ### Throughput and Max Latency as Pipeline Number changes 4 | ![throughput_pipeline_number](./performance/throughput_pipeline_number.svg) 5 | ![max_latency_pipeline_number](./performance/max_latency_pipeline_number.svg) 6 | 7 | ### Throughput and Max Latency as Connection Number changes 8 | ![throughput_connection_number](./performance/throughput_connection_number.svg) 9 | ![max_latency_connection_number](./performance/max_latency_connection_number.svg) 10 | -------------------------------------------------------------------------------- /docs/redis_cluster_protocol.md: -------------------------------------------------------------------------------- 1 | # Redis Cluster Protocol and Server Proxy 2 | [Redis Cluster](https://redis.io/topics/cluster-tutorial) is the official Redis distributed solution supporting sharding and failover. 3 | Compared to using a single instance redis, clients connecting to `Redis Cluster` need to implement the `Redis Cluster Client Protocol`. 4 | What it basically does is: 5 | - Redirecting the requests if we are not sending commands to the right node. 6 | - Caching the cluster routing table from one of the two commands, `CLUSTER NODES` and `CLUSTER SLOTS`. 7 | 8 | This kind of client is called the **Smart Client**. 9 | 10 | ![Redis Cluster Protocol](./redis_cluster_protocol.svg) 11 | 12 | To be compatible with the existing Redis client, 13 | there're also some `Redis Cluster Proxies` 14 | like [redis-cluster-proxy(official)](https://github.com/RedisLabs/redis-cluster-proxy), 15 | [aster](https://github.com/wayslog/aster), 16 | [corvus](https://github.com/eleme/corvus), 17 | and [samaritan](https://github.com/samaritan-proxy/samaritan), 18 | to adapt the cluster protocol to the widely supported single instance protocol. 19 | 20 | ![Redis Cluster Proxy](./redis_cluster_proxy.svg) 21 | 22 | #### How does Undermoon Implement the "Redis Cluster Protocol"? 23 | Undermoon implements Redis Cluster Protocol based on a server-side proxy or Server Proxy. 24 | The Server Proxy will act just like the official Redis Cluster Redis and return redirection response if needed. 25 | 26 | ![Server-side Proxy](./undermoon_server_proxy.svg) 27 | 28 | #### Why another "Redis Cluster Protocol" implementation? 29 | This implementation not only support horizontal scalability and high availability, 30 | but also enable you to build a self-managed distributed Redis supporting: 31 | - Redis resource pool management 32 | - Serving multiple clusters for different users 33 | - Spreading the flood evenly to all the physical machines 34 | - Scaling fast 35 | - Much easier operation and kubernetes integration. 36 | 37 | #### Why server-side proxy? 38 | Redis and most redis proxies such as redis-cluster-proxy, corvus, aster, codis are deployed in separated machines 39 | as the proxies normally need to spread the requests to different Redis instances. 40 | 41 | Instead of routing requests, server-side proxy serves as a different role from these proxies 42 | and is analogous to the cluster module of Redis, which make it able to migrate the data and **scale fast** 43 | by using some customized migration protocols. 44 | 45 | #### Server-side Proxy 46 | ##### A small bite of server-side proxy and Redis Cluster Protocol 47 | 48 | ```bash 49 | # Run a redis-server first 50 | $ redis-server 51 | ``` 52 | 53 | ```bash 54 | # Build and run the server_proxy 55 | > cargo build 56 | > make server # runs on port 5299 and will forward commands to 127.0.0.1:6379 57 | ``` 58 | 59 | ```bash 60 | > redis-cli -p 5299 61 | # Initialize the proxy by `UMCTL` commands. 62 | 127.0.0.1:5299> UMCTL SETCLUSTER v2 1 NOFLAGS mydb 127.0.0.1:6379 1 0-8000 PEER 127.0.0.1:7000 1 8001-16383 63 | 64 | # Done! We can use it like a Redis Cluster! 65 | # Unlike the official Redis Cluster, it only displays master nodes here 66 | # instead of showing both masters and replicas. 67 | 127.0.0.1:5299> CLUSTER NODES 68 | mydb________________9f8fca2805923328____ 127.0.0.1:5299 myself,master - 0 0 1 connected 0-8000 69 | mydb________________d458dd9b55cc9ad9____ 127.0.0.1:7000 master - 0 0 1 connected 8001-16383 70 | 71 | # As we initialize it using UMCTL SETCLUSTER, 72 | # slots 8001-16383 belongs to another server proxy 127.0.0.1:7000 73 | # so we get a redirection response. 74 | # 75 | # This is the key difference between normal Redis client and Redis Cluster client 76 | # as we need to deal with the redirection. 77 | 127.0.0.1:5299> get a 78 | (error) MOVED 15495 127.0.0.1:7000 79 | 80 | # Key 'b' is what this proxy is responsible for so we process the request. 81 | 127.0.0.1:5299> set b 1 82 | OK 83 | ``` -------------------------------------------------------------------------------- /docs/set_up_manually.md: -------------------------------------------------------------------------------- 1 | # Set Up Cluster By Hand 2 | This tutorial will walk you through the process to set up an `undermoon` cluster by hand 3 | to better understand how `undermoon` works. 4 | 5 | ## Architecture 6 | We will deploy all the following parts in one machine: 7 | - mem_broker 8 | - coordinator 9 | - two proxies with four nodes. 10 | 11 | ![architecture](./architecture.svg) 12 | 13 | ## Build the Binaries 14 | ```bash 15 | $ cargo build 16 | ``` 17 | Note that you also need to install Redis. 18 | 19 | ## Deploy Memory Broker 20 | ```bash 21 | $ RUST_LOG=undermoon=debug,mem_broker=debug UNDERMOON_ADDRESS=127.0.0.1:7799 target/debug/mem_broker 22 | ``` 23 | 24 | ## Deploy Coordinator 25 | Run the coordinator and specify the memory broker address. 26 | ```bash 27 | $ RUST_LOG=undermoon=debug,coordinator=debug UNDERMOON_BROKER_ADDRESS=127.0.0.1:7799 target/debug/coordinator 28 | ``` 29 | 30 | ## Deploy Server Proxy and Redis 31 | 32 | #### Chunk 33 | Refers to the [chunk docs](./chunk.md) for detailed explanation. 34 | 35 | ![Chunk](./chunk.svg) 36 | 37 | #### Run Server Proxy and Redis 38 | Run 2 server proxies and 4 Redis nodes: 39 | ```bash 40 | # You need to run each line in different terminals 41 | 42 | # The first half 43 | $ redis-server --port 7001 44 | $ redis-server --port 7002 45 | $ RUST_LOG=undermoon=debug,server_proxy=debug UNDERMOON_ADDRESS=127.0.0.1:6001 target/debug/server_proxy 46 | 47 | # The second Half 48 | $ redis-server --port 7003 49 | $ redis-server --port 7004 50 | $ RUST_LOG=undermoon=debug,server_proxy=debug UNDERMOON_ADDRESS=127.0.0.1:6002 target/debug/server_proxy 51 | ``` 52 | 53 | ## Register Server Proxy and Redis to Memory Broker 54 | A Redis cluster could never be created in a single machine. 55 | Memory broker won't be able to create a cluster even we have enough nodes 56 | because all of them seems to in the same host `127.0.0.1`; 57 | 58 | But since we are deploying the whole `undermoon` cluster in one machine, 59 | we need to explicitly tell the memory broker they are in different hosts 60 | by specifying the `host` field to `localhost1` and `localhost2` in the posted json. 61 | ```bash 62 | curl -XPOST -H 'Content-Type: application/json' "http://localhost:7799/api/v3/proxies/meta" -d '{"proxy_address": "127.0.0.1:6001", "nodes": ["127.0.0.1:7001", "127.0.0.1:7002"], "host": "localhost1"}' 63 | curl -XPOST -H 'Content-Type: application/json' "http://localhost:7799/api/v3/proxies/meta" -d '{"proxy_address": "127.0.0.1:6002", "nodes": ["127.0.0.1:7003", "127.0.0.1:7004"], "host": "localhost2"}' 64 | ``` 65 | 66 | Now we have 2 free server proxies with 4 nodes. 67 | ```bash 68 | $ curl http://localhost:7799/api/v3/proxies/addresses 69 | {"addresses":["127.0.0.1:6001","127.0.0.1:6002"]} 70 | 71 | $ curl http://localhost:7799/api/v3/proxies/meta/127.0.0.1:6001 72 | {"proxy":{"address":"127.0.0.1:6001","epoch":2,"nodes":[],"free_nodes":["127.0.0.1:7001","127.0.0.1:7002"],"peers":[],"clusters_config":{}}} 73 | 74 | $ curl http://localhost:7799/api/v3/proxies/meta/127.0.0.1:6002 75 | {"proxy":{"address":"127.0.0.1:6002","epoch":2,"nodes":[],"free_nodes":["127.0.0.1:7003","127.0.0.1:7004"],"peers":[],"clusters_config":{}}} 76 | ``` 77 | 78 | ## Create Cluster 79 | Create a cluster named `mycluster` with 4 Redis nodes. 80 | ```bash 81 | $ curl -XPOST -H 'Content-Type: application/json' http://localhost:7799/api/v3/clusters/meta/mycluster -d '{"node_number": 4}' 82 | ``` 83 | 84 | Now we can connect to the cluster: 85 | ```bash 86 | $ redis-cli -h 127.0.0.1 -p 6001 -c 87 | 127.0.0.1:6001> cluster nodes 88 | mycluster___________2261c530e98070a6____ 127.0.0.1:6001 myself,master - 0 0 3 connected 8192-16383 89 | mycluster___________ad095468b9deeb2d____ 127.0.0.1:6002 master - 0 0 3 connected 0-8191 90 | 127.0.0.1:6001> get a 91 | (nil) 92 | 127.0.0.1:6001> get b 93 | -> Redirected to slot [3300] located at 127.0.0.1:6002 94 | "1" 95 | ``` 96 | -------------------------------------------------------------------------------- /docs/slots_migration.md: -------------------------------------------------------------------------------- 1 | # Slots Migration 2 | 3 | Goals: 4 | - Simple 5 | - Fast 6 | 7 | The migration process is based on the following Redis commands: 8 | - SCAN 9 | - DUMP 10 | - PTTL 11 | - RESTORE 12 | - DEL 13 | 14 | The SCAN command has a great property that 15 | it can guarantee that all the keys set before the first SCAN command will finally be returned, 16 | for multiple times sometimes though. We can perform a 3 stage migration to mimic the replication. 17 | 18 | - Wait for all the commands to be finished by Redis. 19 | - Redirect all the read and write operation to destination Redis. 20 | The destination Redis will need to dump the data of the key from the source Redis 21 | before processing the commands if the key does not exist. 22 | - Start the scanning and forward the data to peer Redis. 23 | 24 | ## Detailed Steps 25 | - The migrating proxy check whether the importing proxy 26 | has also received the migration task by a `PreCheck` command. 27 | - The migrating proxy blocks all the newly added commands to `Queue`, 28 | and wait for the existing commands to be finished. 29 | - The migrating proxy send a `TmpSwitch` command to importing proxy. 30 | Upon receiving this command, 31 | the import proxy start to process the keys inside the importing slot ranges. 32 | When the command returns, the migrating proxy release all the commands inside `Queue` and redirect them to the importing proxy. 33 | - The migrating proxy use `SCAN`, `PTTL`, `DUMP`, `RESTORE`, `DEL` 34 | to forward all the data inside the migrating slot ranges to peer importing Redis. 35 | The `RESTORE` does not set the `REPLACE` flag. 36 | - When the importing proxy processes commands, no matter read or write operation, it will first 37 | - If the command will not delete the key, get the `key lock`, 38 | - Send `EXISTS` and the processed command to local importing Redis, 39 | if `EXISTS` returns true, forward the command to the local importing Redis. 40 | - If `EXISTS` returns false, 41 | send `DUMP` and `PTTL`to migrating Redis to get the data, 42 | and `RESTORE` the data and forward the command to local Redis. 43 | Then finally forward the command to the local importing Redis. 44 | - If the command can possibly delete the key, 45 | get the `key lock` and 46 | send `UMSYNC` to the migrating proxy to let the migrating proxy 47 | use `DUMP`, `PTTL`, `RESTORE`, `DEL` to transfer the key to the importing proxy. 48 | Then finally forward the command to the local importing Redis 49 | - When the migrating proxy finishes the scanning, 50 | it proposes the `CommitSwitch` to the importing proxy. 51 | Then the importing proxy will only need to process the command in local Redis. 52 | - Notify `coordinator` and wait for the final commit by `UMCTL SETCLUSTER`. 53 | 54 | ## Why it's designed in this way. 55 | The overall migration process is based on the following command `SCAN`, `PTTL`, `DUMP`, `RESTORE`, `DELETE`. 56 | Only the `RESTORE` command is sent to importing server proxy, so for better performance, 57 | this scanning and transferring should be executed in the migrating server proxy. 58 | 59 | Since the scanning and transferring occupy a lot of CPU resource on both server proxy and Redis, 60 | other workload should better be processed on the importing proxy. 61 | So at the very beginning we transferred all the slots directly to the importing proxy. 62 | 63 | At this point, the importing proxy still only have a small part of the data. 64 | When it needs to process the commands on the newly added slots, 65 | it needs to use `PTTL`, `DUMP`, `RESTORE` to pull the data from the migrating server proxy 66 | before processing the requests. 67 | It also need to send `DELETE` to remove the key. 68 | 69 | Note that for any commands that won't delete the key, 70 | it would still be correct to `RESTORE` multiple times for the same key since it's idempotent. 71 | So just letting the importing proxy to pull the data won't cause any inconsistency. 72 | 73 | But for those commands that could possibly remove the keys such as `DEL`, `EXPIRE`, `LPOP`, 74 | just letting importing proxy pull the data could result in the following cases: 75 | - The key get deleted 76 | - There's another `RESTORE` command recovers the key. 77 | 78 | So when pulling the data it needs to be isolated from 79 | - Other `RESTORE` commands in the importing proxy. 80 | - `SCAN` and `RESTORE` in the migrating proxy. 81 | 82 | Thus we need key lock in the importing proxy, 83 | and need the migrating proxy help us to send the data instead of pulling from the importing proxy 84 | so that the operation for this key could only be processed in a sequential way. 85 | 86 | ## The Performance. 87 | As a result, during the migration, the workload for the migrating and importing proxies is quite balanced. 88 | The migrating proxy uses 130% of the CPU and the importing proxy uses 80% of the CPU. 89 | 90 | And it only took **less a minute** to migrate 1G data. 91 | 92 | In the test, during the migration while benchmarking, 93 | the throughput reduces from 50k to 28k and gradually increases to 40k. 94 | This is because the `SCAN`, `DUMP`, `RESTORE` costs a lot of throughput 95 | in Redis in both the migrating and importing proxy. 96 | But once the key get migrated to the importing server proxy, 97 | it only need to send an additional `EXISTS` command before the request. 98 | 99 | After committing the migration, the throughput will double. 100 | -------------------------------------------------------------------------------- /docs/undermoon-logo-raw.svg: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /examples/Dockerfile-undermoon-release: -------------------------------------------------------------------------------- 1 | # Use https://crates.io/crates/cargo-chef to cache dependencies. 2 | 3 | FROM rust:1.64-buster as planner 4 | WORKDIR /undermoon 5 | RUN cargo install cargo-chef 6 | COPY src /undermoon/src 7 | COPY Cargo.toml Cargo.lock /undermoon/ 8 | RUN cargo chef prepare --recipe-path recipe.json 9 | 10 | FROM rust:1.64-buster as cacher 11 | WORKDIR /undermoon 12 | RUN cargo install cargo-chef 13 | COPY --from=planner /undermoon/recipe.json recipe.json 14 | RUN cargo chef cook --release --recipe-path recipe.json 15 | 16 | FROM rust:1.64-buster as builder 17 | WORKDIR /undermoon 18 | COPY src /undermoon/src 19 | COPY Cargo.toml Cargo.lock /undermoon/ 20 | # Copy over the cached dependencies 21 | COPY --from=cacher /undermoon/target target 22 | COPY --from=cacher $CARGO_HOME $CARGO_HOME 23 | RUN cargo build --release 24 | 25 | FROM buildpack-deps:buster-curl as undermoon 26 | RUN set -ex; \ 27 | apt-get update; \ 28 | apt-get install -y --no-install-recommends \ 29 | libssl-dev 30 | WORKDIR /undermoon 31 | COPY --from=builder /undermoon/target/release/server_proxy /bin 32 | COPY --from=builder /undermoon/target/release/coordinator /bin 33 | COPY --from=builder /undermoon/target/release/mem_broker /bin 34 | -------------------------------------------------------------------------------- /examples/Dockerfile-undermoon-test: -------------------------------------------------------------------------------- 1 | # Use https://crates.io/crates/cargo-chef to cache dependencies. 2 | 3 | FROM rust:1.64-buster as planner 4 | WORKDIR /undermoon 5 | RUN cargo install cargo-chef 6 | COPY src /undermoon/src 7 | COPY Cargo.toml Cargo.lock /undermoon/ 8 | RUN cargo chef prepare --recipe-path recipe.json 9 | 10 | FROM rust:1.64-buster as cacher 11 | WORKDIR /undermoon 12 | RUN cargo install cargo-chef 13 | COPY --from=planner /undermoon/recipe.json recipe.json 14 | RUN cargo chef cook --recipe-path recipe.json 15 | 16 | FROM rust:1.64-buster as builder 17 | WORKDIR /undermoon 18 | COPY src /undermoon/src 19 | COPY Cargo.toml Cargo.lock /undermoon/ 20 | # Copy over the cached dependencies 21 | COPY --from=cacher /undermoon/target target 22 | COPY --from=cacher $CARGO_HOME $CARGO_HOME 23 | RUN cargo build 24 | 25 | FROM buildpack-deps:buster-curl as undermoon 26 | RUN set -ex; \ 27 | apt-get update; \ 28 | apt-get install -y --no-install-recommends \ 29 | libssl-dev 30 | WORKDIR /undermoon 31 | COPY --from=builder /undermoon/target/debug/server_proxy /bin 32 | COPY --from=builder /undermoon/target/debug/coordinator /bin 33 | COPY --from=builder /undermoon/target/debug/mem_broker /bin 34 | -------------------------------------------------------------------------------- /examples/broker_external_http_storage.py: -------------------------------------------------------------------------------- 1 | # FLASK_APP=examples/broker_external_http_storage.py FLASK_RUN_PORT=9999 flask run 2 | 3 | import json 4 | 5 | from flask import Flask, request, jsonify, abort 6 | 7 | 8 | app = Flask(__name__) 9 | 10 | STORE_PATH = '/api/v1/store/' 11 | 12 | version = 0 13 | store = None 14 | 15 | 16 | # This is configured in mem-broker.toml 17 | storage_name = 'my_storage_name' 18 | storage_password = 'somepassword' 19 | 20 | 21 | def check_auth(username, password): 22 | print('user:pass', username, password) 23 | if username != storage_name or password != storage_password: 24 | abort(401) 25 | 26 | 27 | @app.route(STORE_PATH, methods=['GET']) 28 | def get(name): 29 | response = { 30 | 'version': str(version) if version is not None else None, 31 | 'store': store, 32 | } 33 | return jsonify(response) 34 | 35 | 36 | @app.route(STORE_PATH, methods=['PUT']) 37 | def update(name): 38 | auth = request.authorization 39 | check_auth(auth.username, auth.password) 40 | 41 | content = request.get_json() 42 | global version, store 43 | v = content['version'] 44 | s = content['store'] 45 | 46 | if store is not None: 47 | if v is None: 48 | abort(400, 'version is empty') 49 | v = int(v) 50 | if v < version: 51 | abort(409, 'version conflict') 52 | if store == s: 53 | return '' 54 | 55 | if v is None: 56 | v = 0 57 | version = v + 1 58 | store = s 59 | return '' 60 | -------------------------------------------------------------------------------- /examples/mem-broker/coordinator1.toml: -------------------------------------------------------------------------------- 1 | broker_address = "mem_broker:7799" 2 | address = "127.0.0.1:8001" 3 | reporter_id = "127.0.0.1:8001" 4 | -------------------------------------------------------------------------------- /examples/mem-broker/coordinator2.toml: -------------------------------------------------------------------------------- 1 | broker_address = "mem_broker:7799" 2 | address = "127.0.0.1:8002" 3 | reporter_id = "127.0.0.1:8002" 4 | -------------------------------------------------------------------------------- /examples/mem-broker/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | API_VERSION='v3' 4 | 5 | # Add proxy to mem-broker 6 | curl -XPOST -H 'Content-Type: application/json' "http://localhost:7799/api/${API_VERSION}/proxies/meta" -d '{"proxy_address": "server_proxy1:6001", "nodes": ["server_proxy1:7001", "server_proxy1:7002"]}' 7 | curl -XPOST -H 'Content-Type: application/json' "http://localhost:7799/api/${API_VERSION}/proxies/meta" -d '{"proxy_address": "server_proxy2:6002", "nodes": ["server_proxy2:7003", "server_proxy2:7004"]}' 8 | curl -XPOST -H 'Content-Type: application/json' "http://localhost:7799/api/${API_VERSION}/proxies/meta" -d '{"proxy_address": "server_proxy3:6003", "nodes": ["server_proxy3:7005", "server_proxy3:7006"]}' 9 | curl -XPOST -H 'Content-Type: application/json' "http://localhost:7799/api/${API_VERSION}/proxies/meta" -d '{"proxy_address": "server_proxy4:6004", "nodes": ["server_proxy4:7007", "server_proxy4:7008"]}' 10 | curl -XPOST -H 'Content-Type: application/json' "http://localhost:7799/api/${API_VERSION}/proxies/meta" -d '{"proxy_address": "server_proxy5:6005", "nodes": ["server_proxy5:7009", "server_proxy5:7010"]}' 11 | curl -XPOST -H 'Content-Type: application/json' "http://localhost:7799/api/${API_VERSION}/proxies/meta" -d '{"proxy_address": "server_proxy6:6006", "nodes": ["server_proxy6:7011", "server_proxy6:7012"]}' 12 | -------------------------------------------------------------------------------- /examples/mem-broker/mem-broker.toml: -------------------------------------------------------------------------------- 1 | address = "mem_broker:7799" 2 | failure_ttl = 60 3 | storage_type = "memory" 4 | -------------------------------------------------------------------------------- /examples/mem-broker/server_proxy1.toml: -------------------------------------------------------------------------------- 1 | address = "0.0.0.0:6001" 2 | announce_address = "server_proxy1:6001" 3 | -------------------------------------------------------------------------------- /examples/mem-broker/server_proxy2.toml: -------------------------------------------------------------------------------- 1 | address = "0.0.0.0:6002" 2 | announce_address = "server_proxy2:6002" 3 | -------------------------------------------------------------------------------- /examples/mem-broker/server_proxy3.toml: -------------------------------------------------------------------------------- 1 | address = "0.0.0.0:6003" 2 | announce_address = "server_proxy3:6003" -------------------------------------------------------------------------------- /examples/mem-broker/server_proxy4.toml: -------------------------------------------------------------------------------- 1 | address = "0.0.0.0:6004" 2 | announce_address = "server_proxy4:6004" -------------------------------------------------------------------------------- /examples/mem-broker/server_proxy5.toml: -------------------------------------------------------------------------------- 1 | address = "0.0.0.0:6005" 2 | announce_address = "server_proxy5:6005" -------------------------------------------------------------------------------- /examples/mem-broker/server_proxy6.toml: -------------------------------------------------------------------------------- 1 | address = "0.0.0.0:6006" 2 | announce_address = "server_proxy6:6006" -------------------------------------------------------------------------------- /examples/run_broker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Run it manually and rerun docker-compose. 3 | # cargo build 4 | mem_broker config/mem-broker.toml 5 | -------------------------------------------------------------------------------- /examples/run_coordinator.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Run it manually and rerun docker-compose. 3 | # cargo build 4 | coordinator config/coordinator.toml 5 | -------------------------------------------------------------------------------- /examples/run_proxy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Run it manually and rerun docker-compose. 3 | # cargo build 4 | server_proxy config/server_proxy.toml 5 | -------------------------------------------------------------------------------- /local_tests/1/redis.conf: -------------------------------------------------------------------------------- 1 | bind 127.0.0.1 2 | maxmemory-policy allkeys-lru 3 | maxmemory 2147483648 4 | -------------------------------------------------------------------------------- /local_tests/2/redis.conf: -------------------------------------------------------------------------------- 1 | bind 127.0.0.1 2 | maxmemory-policy allkeys-lru 3 | maxmemory 2147483648 4 | -------------------------------------------------------------------------------- /local_tests/redis_cluster/1/redis.conf: -------------------------------------------------------------------------------- 1 | bind 127.0.0.1 2 | port 7001 3 | maxmemory-policy allkeys-lru 4 | maxmemory 2147483648 5 | cluster-enabled yes 6 | cluster-announce-ip 127.0.0.1 7 | cluster-announce-port 7001 8 | cluster-announce-bus-port 17001 9 | -------------------------------------------------------------------------------- /local_tests/redis_cluster/2/redis.conf: -------------------------------------------------------------------------------- 1 | bind 127.0.0.1 2 | port 7002 3 | maxmemory-policy allkeys-lru 4 | maxmemory 2147483648 5 | cluster-enabled yes 6 | cluster-announce-ip 127.0.0.1 7 | cluster-announce-port 7002 8 | cluster-announce-bus-port 17002 -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | stable 2 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2018" 2 | 3 | newline_style = "Unix" 4 | use_field_init_shorthand = true 5 | use_try_shorthand = true 6 | -------------------------------------------------------------------------------- /scripts/dkclean.sh: -------------------------------------------------------------------------------- 1 | docker images | grep none | awk '{print $3}' | xargs docker rmi 2 | 3 | -------------------------------------------------------------------------------- /scripts/dkkill.sh: -------------------------------------------------------------------------------- 1 | docker ps | grep -v 'CONTAINER ID' | awk '{print $1}' | xargs docker kill 2 | -------------------------------------------------------------------------------- /scripts/dkrmi.sh: -------------------------------------------------------------------------------- 1 | docker images -a | grep none | awk '{print $3}' | xargs docker rmi 2 | -------------------------------------------------------------------------------- /scripts/dksh.sh: -------------------------------------------------------------------------------- 1 | did=$(docker ps | grep $1 | awk '{print $1}' | head -n1) 2 | echo $did 3 | docker exec -it $did /bin/bash 4 | -------------------------------------------------------------------------------- /scripts/init_single_server_proxy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | redis-cli -h localhost -p 5299 UMCTL SETCLUSTER v2 2 NOFLAGS mydb 127.0.0.1:6379 1 0-16383 4 | 5 | -------------------------------------------------------------------------------- /scripts/loop_migration_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | one_shot="$1" 4 | if [ "${one_shot}" == 'one_shot' ]; then 5 | echo 'just run once' 6 | fi 7 | 8 | migration_scan_interval=0 9 | 10 | function wait_for_migration() { 11 | while true; do 12 | sleep 1 13 | 14 | local r 15 | r=$(redis-cli -p 6001 UMCTL INFOMGR) 16 | if [ "${r}" == '' ]; then 17 | continue 18 | fi 19 | echo "Migration: ${r}" 20 | 21 | r=$(redis-cli -p 6002 UMCTL INFOMGR) 22 | if [ "${r}" == '' ]; then 23 | continue 24 | fi 25 | echo "Migration: ${r}" 26 | 27 | return 28 | done 29 | } 30 | 31 | function get_epoch() { 32 | local epoch 33 | epoch=$(redis-cli -p 6001 UMCTL GETEPOCH) 34 | local epoch2 35 | epoch2=$(redis-cli -p 6002 UMCTL GETEPOCH) 36 | if [ "${epoch2}" -lt "${epoch}" ]; then 37 | epoch="${epoch2}" 38 | fi 39 | echo "${epoch}" 40 | } 41 | 42 | expand=true 43 | 44 | while true; do 45 | epoch=$(get_epoch) 46 | epoch=$((epoch+1)) 47 | echo "Start to scale. Epoch: ${epoch}" 48 | date 49 | 50 | if [ "${expand}" = true ]; then 51 | echo 'start scaling out' 52 | expand=false 53 | 54 | redis-cli -p 6001 UMCTL SETCLUSTER v2 "${epoch}" NOFLAGS mydb \ 55 | 127.0.0.1:7001 1 0-8000 \ 56 | 127.0.0.1:7001 migrating 1 8001-16383 "${epoch}" 127.0.0.1:6001 127.0.0.1:7001 127.0.0.1:6002 127.0.0.1:7002 \ 57 | PEER 127.0.0.1:6002 importing 1 8001-16383 "${epoch}" 127.0.0.1:6001 127.0.0.1:7001 127.0.0.1:6002 127.0.0.1:7002 \ 58 | config migration_scan_interval ${migration_scan_interval} 59 | redis-cli -p 6002 UMCTL SETCLUSTER v2 "${epoch}" NOFLAGS mydb \ 60 | 127.0.0.1:7002 importing 1 8001-16383 "${epoch}" 127.0.0.1:6001 127.0.0.1:7001 127.0.0.1:6002 127.0.0.1:7002 \ 61 | PEER 127.0.0.1:6001 1 0-8000 \ 62 | 127.0.0.1:6001 migrating 1 8001-16383 "${epoch}" 127.0.0.1:6001 127.0.0.1:7001 127.0.0.1:6002 127.0.0.1:7002 \ 63 | config migration_scan_interval ${migration_scan_interval} 64 | 65 | wait_for_migration 66 | epoch=$(get_epoch) 67 | epoch=$((epoch+1)) 68 | echo "Start to commit. Epoch: ${epoch}" 69 | 70 | redis-cli -p 6001 UMCTL SETCLUSTER v2 "${epoch}" noflags mydb \ 71 | 127.0.0.1:7001 1 0-8000 \ 72 | PEER 127.0.0.1:6002 1 8001-16383 73 | redis-cli -p 6002 UMCTL SETCLUSTER v2 "${epoch}" noflags mydb \ 74 | 127.0.0.1:7002 1 8001-16383 \ 75 | PEER 127.0.0.1:6001 1 0-8000 76 | else 77 | echo 'start scaling down' 78 | expand=true 79 | 80 | redis-cli -p 6001 UMCTL SETCLUSTER v2 "${epoch}" NOFLAGS mydb \ 81 | 127.0.0.1:7001 1 0-8000 \ 82 | 127.0.0.1:7001 importing 1 8001-16383 "${epoch}" 127.0.0.1:6002 127.0.0.1:7002 127.0.0.1:6001 127.0.0.1:7001 \ 83 | PEER 127.0.0.1:6002 migrating 1 8001-16383 "${epoch}" 127.0.0.1:6002 127.0.0.1:7002 127.0.0.1:6001 127.0.0.1:7001 \ 84 | config migration_scan_interval ${migration_scan_interval} 85 | redis-cli -p 6002 UMCTL SETCLUSTER v2 "${epoch}" NOFLAGS mydb \ 86 | 127.0.0.1:7002 migrating 1 8001-16383 "${epoch}" 127.0.0.1:6002 127.0.0.1:7002 127.0.0.1:6001 127.0.0.1:7001 \ 87 | PEER 127.0.0.1:6001 1 0-8000 \ 88 | 127.0.0.1:6001 importing 1 8001-16383 "${epoch}" 127.0.0.1:6002 127.0.0.1:7002 127.0.0.1:6001 127.0.0.1:7001 \ 89 | config migration_scan_interval ${migration_scan_interval} 90 | 91 | wait_for_migration 92 | epoch=$(get_epoch) 93 | epoch=$((epoch+1)) 94 | echo "Start to commit. Epoch: ${epoch}" 95 | 96 | redis-cli -p 6001 UMCTL SETCLUSTER v2 "${epoch}" noflags mydb \ 97 | 127.0.0.1:7001 1 0-16383 98 | redis-cli -p 6002 UMCTL SETCLUSTER v2 "${epoch}" noflags mydb \ 99 | PEER 127.0.0.1:6001 1 0-16383 100 | fi 101 | 102 | date 103 | if [ "${one_shot}" == 'one_shot' ]; then 104 | break 105 | fi 106 | 107 | sleep 3 108 | done 109 | -------------------------------------------------------------------------------- /scripts/mem_store_v1_to_v2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import json 3 | 4 | 5 | def upgrade_store_to_v2(v1_json): 6 | if v1_json['version'] != 'mem-broker-0.1': 7 | raise Exception('unexpected version: {}'.format(v1_json['version'])) 8 | new_json = v1_json 9 | new_json['version'] = 'mem-broker-0.2' 10 | new_json['enable_ordered_proxy'] = False 11 | for proxy_resource in new_json['all_proxies'].values(): 12 | proxy_resource['index'] = 0 13 | return new_json 14 | 15 | 16 | def main(): 17 | if len(sys.argv) != 3: 18 | print('Usage: python mem_store_v1_to_v2.py src_meta_file dst_meta_file') 19 | return 20 | 21 | src_file = sys.argv[1] 22 | dst_file = sys.argv[2] 23 | 24 | with open(src_file, 'r') as src: 25 | content = src.read() 26 | src_json = json.loads(content) 27 | 28 | with open(dst_file, 'w') as dst: 29 | dst.write(json.dumps(upgrade_store_to_v2(src_json))) 30 | 31 | 32 | if __name__ == '__main__': 33 | main() 34 | -------------------------------------------------------------------------------- /scripts/readme_test.sh: -------------------------------------------------------------------------------- 1 | ./examples/mem-broker/init.sh 2 | 3 | curl -XPOST -H 'Content-Type: application/json' http://localhost:7799/api/v3/clusters/meta/mycluster -d '{"node_number": 4}' 4 | curl -XPATCH -H 'Content-Type: application/json' http://localhost:7799/api/v3/clusters/nodes/mycluster -d '{"node_number": 4}' 5 | curl -XPOST http://localhost:7799/api/v3/clusters/migrations/expand/mycluster 6 | 7 | echo '\n###### Before failover' 8 | curl -s http://localhost:7799/api/v3/clusters/meta/mycluster | jq '.cluster.nodes[].proxy_address' | uniq 9 | echo '#######' 10 | 11 | docker ps | grep server_proxy5 | awk '{print $1}' | xargs docker kill 12 | sleep 3 13 | 14 | echo '\n###### After failover' 15 | curl -s http://localhost:7799/api/v3/clusters/meta/mycluster | jq '.cluster.nodes[].proxy_address' | uniq 16 | echo '#######' 17 | 18 | -------------------------------------------------------------------------------- /scripts/run_redis_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | rm -f local_tests/redis_cluster/1/dump.rdb 4 | rm -f local_tests/redis_cluster/2/dump.rdb 5 | 6 | trap "exit" INT TERM 7 | trap "kill 0" EXIT 8 | 9 | migrating_nodes_conf="migrating_redis_________________________ 127.0.0.1:7001@17001 myself,master - 0 0 1 connected 0-16383 10 | importing_redis_________________________ 127.0.0.1:7002@17002 master - 0 0 1 connected 11 | vars currentEpoch 1 lastVoteEpoch 1" 12 | importing_nodes_conf="migrating_redis_________________________ 127.0.0.1:7001@17001 master - 0 0 1 connected 0-16383 13 | importing_redis_________________________ 127.0.0.1:7002@17002 myself,master - 0 0 1 connected 14 | vars currentEpoch 1 lastVoteEpoch 1" 15 | 16 | echo "${migrating_nodes_conf}" > local_tests/redis_cluster/1/nodes.conf 17 | echo "${importing_nodes_conf}" > local_tests/redis_cluster/2/nodes.conf 18 | 19 | cd local_tests/redis_cluster/1 && redis-server redis.conf --port 7001 &> redis-1.log & 20 | cd local_tests/redis_cluster/2 && redis-server redis.conf --port 7002 &> redis-2.log & 21 | 22 | while true; do 23 | sleep 1 24 | 25 | redis-cli -p 7001 ping 26 | if [ "$?" == '1' ]; then 27 | continue 28 | fi 29 | 30 | redis-cli -p 7002 ping 31 | if [ "$?" == '1' ]; then 32 | continue 33 | fi 34 | 35 | break 36 | done 37 | 38 | echo 'redis cluster is ready' 39 | 40 | wait 41 | -------------------------------------------------------------------------------- /scripts/run_two_shards.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | mkdir -p local_tests/1 4 | mkdir -p local_tests/2 5 | 6 | trap "exit" INT TERM 7 | trap "kill 0" EXIT 8 | 9 | cd local_tests/1 && redis-server redis.conf --port 7001 &> redis-1.log & 10 | cd local_tests/2 && redis-server redis.conf --port 7002 &> redis-2.log & 11 | 12 | proxy="${PWD}/target/release/server_proxy" 13 | conf="${PWD}/conf/server-proxy.toml" 14 | 15 | export RUST_LOG=undermoon=info,server_proxy=info 16 | export UNDERMOON_ADDRESS=127.0.0.1:6001 UNDERMOON_ANNOUNCE_ADDRESS=127.0.0.1:6001 17 | cd local_tests/1 && "${proxy}" "${conf}" &> proxy-1.log & 18 | export UNDERMOON_ADDRESS=127.0.0.1:6002 UNDERMOON_ANNOUNCE_ADDRESS=127.0.0.1:6002 19 | cd local_tests/2 && "${proxy}" "${conf}" &> proxy-2.log & 20 | 21 | while true; do 22 | sleep 1 23 | 24 | redis-cli -p 6001 ping 25 | if [ "$?" == '1' ]; then 26 | continue 27 | fi 28 | 29 | redis-cli -p 6002 ping 30 | if [ "$?" == '1' ]; then 31 | continue 32 | fi 33 | 34 | break 35 | done 36 | 37 | redis-cli -p 6001 UMCTL SETCLUSTER v2 2 NOFLAGS mydb \ 38 | 127.0.0.1:7001 1 0-16383 39 | redis-cli -p 6002 UMCTL SETCLUSTER v2 2 NOFLAGS mydb \ 40 | PEER 127.0.0.1:6001 1 0-16383 41 | 42 | wait 43 | -------------------------------------------------------------------------------- /src/bin/coordinator.rs: -------------------------------------------------------------------------------- 1 | extern crate futures; 2 | extern crate tokio; 3 | extern crate undermoon; 4 | #[macro_use] 5 | extern crate log; 6 | extern crate config; 7 | extern crate env_logger; 8 | 9 | use arc_swap::ArcSwap; 10 | use std::cmp::max; 11 | use std::env; 12 | use std::error::Error; 13 | use std::sync::Arc; 14 | use std::time::Duration; 15 | use undermoon::coordinator::http_mani_broker::HttpMetaManipulationBroker; 16 | use undermoon::coordinator::http_meta_broker::HttpMetaBroker; 17 | use undermoon::coordinator::service::{CoordinatorConfig, CoordinatorService}; 18 | use undermoon::protocol::PooledRedisClientFactory; 19 | 20 | #[global_allocator] 21 | static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; 22 | 23 | fn gen_conf() -> Result> { 24 | let mut config_builder = config::Config::builder(); 25 | // If config file is specified, load it. 26 | if let Some(conf_file_path) = env::args().nth(1) { 27 | config_builder = config_builder.add_source(config::File::with_name(&conf_file_path)); 28 | } 29 | // e.g. UNDERMOON_ADDRESS_LIST='127.0.0.1:5299' 30 | let s = config_builder 31 | .add_source(config::Environment::with_prefix("undermoon")) 32 | .build() 33 | .map_err(|e| { 34 | warn!("failed to read config from file or env vars {:?}", e); 35 | "failed to read config" 36 | })?; 37 | 38 | let address = s 39 | .get::("address") 40 | .unwrap_or_else(|_| "127.0.0.1:6699".to_string()); 41 | 42 | let mut broker_address_list = vec![]; 43 | 44 | if let Ok(list) = s.get::>("broker_address") { 45 | info!("load multiple broker addresses {:?}", list); 46 | broker_address_list = list; 47 | } else { 48 | broker_address_list.push( 49 | s.get::("broker_address") 50 | .unwrap_or_else(|_| "127.0.0.1:7799".to_string()), 51 | ) 52 | } 53 | 54 | let reporter_id = s 55 | .get::("reporter_id") 56 | .unwrap_or_else(|_| address.clone()); 57 | 58 | let thread_number = s.get::("thread_number").unwrap_or(4); 59 | let thread_number = max(1, thread_number); 60 | 61 | let proxy_timeout = s.get::("proxy_timeout").unwrap_or(2); 62 | 63 | let enable_compression = s.get::("enable_compression").unwrap_or(false); 64 | let disable_failover = s.get::("disable_failover").unwrap_or(false); 65 | 66 | let config = CoordinatorConfig { 67 | address, 68 | broker_addresses: Arc::new(ArcSwap::new(Arc::new(broker_address_list))), 69 | reporter_id, 70 | thread_number, 71 | proxy_timeout, 72 | enable_compression, 73 | disable_failover, 74 | }; 75 | Ok(config) 76 | } 77 | 78 | fn gen_service( 79 | config: CoordinatorConfig, 80 | ) -> CoordinatorService { 81 | let http_client = reqwest::Client::new(); 82 | let data_broker = Arc::new(HttpMetaBroker::new( 83 | config.broker_addresses.clone(), 84 | http_client.clone(), 85 | config.enable_compression, 86 | )); 87 | let mani_broker = Arc::new(HttpMetaManipulationBroker::new( 88 | config.broker_addresses.clone(), 89 | http_client, 90 | )); 91 | 92 | let timeout = Duration::new(config.proxy_timeout as u64, 0); 93 | let pool_size = 2; 94 | let client_factory = PooledRedisClientFactory::new(pool_size, timeout); 95 | 96 | CoordinatorService::new(config, data_broker, mani_broker, client_factory) 97 | } 98 | 99 | fn main() -> Result<(), Box> { 100 | env_logger::init(); 101 | 102 | let config = gen_conf()?; 103 | let thread_number = config.thread_number; 104 | 105 | let service = gen_service(config); 106 | let fut = async move { 107 | if let Err(err) = service.run().await { 108 | error!("coordinator error {:?}", err); 109 | } 110 | }; 111 | 112 | let runtime = tokio::runtime::Builder::new_multi_thread() 113 | .worker_threads(thread_number) 114 | .enable_all() 115 | .build()?; 116 | runtime.block_on(fut); 117 | Ok(()) 118 | } 119 | -------------------------------------------------------------------------------- /src/broker/epoch.rs: -------------------------------------------------------------------------------- 1 | use crate::protocol::{PooledRedisClientFactory, RedisClient, RedisClientFactory, Resp}; 2 | use futures::future; 3 | use std::cmp::max; 4 | use std::time::Duration; 5 | 6 | pub struct EpochFetchResult { 7 | pub max_epoch: u64, 8 | pub failed_addresses: Vec, 9 | } 10 | 11 | pub async fn fetch_max_epoch(proxy_addresses: Vec) -> EpochFetchResult { 12 | let timeout = Duration::from_secs(1); 13 | let client_factory = PooledRedisClientFactory::new(1, timeout); 14 | 15 | let futs: Vec<_> = proxy_addresses 16 | .into_iter() 17 | .map(|address| fetch_proxy_epoch(address, &client_factory)) 18 | .collect(); 19 | let results = future::join_all(futs).await; 20 | 21 | let mut failed_addresses = vec![]; 22 | let mut max_epoch = 0; 23 | for res in results.into_iter() { 24 | match res { 25 | Ok(epoch) => { 26 | max_epoch = max(max_epoch, epoch); 27 | } 28 | Err(address) => { 29 | failed_addresses.push(address); 30 | } 31 | } 32 | } 33 | EpochFetchResult { 34 | max_epoch, 35 | failed_addresses, 36 | } 37 | } 38 | 39 | const MAX_RETRY_TIMES: usize = 30; 40 | const RETRY_INTERVAL: u64 = 1; 41 | 42 | pub async fn wait_for_proxy_epoch(proxy_addresses: Vec, epoch: u64) -> Result<(), String> { 43 | let timeout = Duration::from_secs(1); 44 | let client_factory = PooledRedisClientFactory::new(1, timeout); 45 | 46 | let mut i = 0; 47 | loop { 48 | tokio::time::sleep(Duration::from_secs(RETRY_INTERVAL)).await; 49 | let min_epoch = match fetch_min_epoch(proxy_addresses.clone(), &client_factory).await { 50 | Ok(min_epoch) => min_epoch, 51 | Err(failed_address) => { 52 | if i >= MAX_RETRY_TIMES { 53 | return Err(failed_address); 54 | } 55 | i += 1; 56 | continue; 57 | } 58 | }; 59 | if min_epoch >= epoch { 60 | return Ok(()); 61 | } 62 | info!("waiting for proxy epoch"); 63 | } 64 | } 65 | 66 | async fn fetch_min_epoch( 67 | proxy_addresses: Vec, 68 | client_factory: &PooledRedisClientFactory, 69 | ) -> Result { 70 | let futs: Vec<_> = proxy_addresses 71 | .into_iter() 72 | .map(|address| fetch_proxy_epoch(address, client_factory)) 73 | .collect(); 74 | let results = future::join_all(futs).await; 75 | 76 | let mut epoch_list = vec![]; 77 | for res in results.into_iter() { 78 | epoch_list.push(res?); 79 | } 80 | Ok(epoch_list.into_iter().min().unwrap_or(0)) 81 | } 82 | 83 | async fn fetch_proxy_epoch( 84 | address: String, 85 | client_factory: &PooledRedisClientFactory, 86 | ) -> Result { 87 | let mut client = client_factory 88 | .create_client(address.clone()) 89 | .await 90 | .map_err(|err| { 91 | error!( 92 | "Failed to create client for broker recovery: {} {}", 93 | address, err 94 | ); 95 | address.clone() 96 | })?; 97 | 98 | let cmd = vec![b"UMCTL".to_vec(), b"GETEPOCH".to_vec()]; 99 | let resp = client.execute_single(cmd).await.map_err(|err| { 100 | error!("Failed to send UMCTL GETEPOCH: {} {}", address, err); 101 | address.clone() 102 | })?; 103 | 104 | match resp { 105 | Resp::Integer(int_bytes) => match btoi::btoi::(&int_bytes) { 106 | Ok(epoch) => Ok(epoch), 107 | Err(_) => { 108 | error!( 109 | "Invalid UMCTL GETEPOCH int reply: {} {:?}", 110 | address, int_bytes 111 | ); 112 | Err(address.clone()) 113 | } 114 | }, 115 | other => { 116 | error!("Invalid UMCTL GETEPOCH reply: {} {:?}", address, other); 117 | Err(address.clone()) 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/broker/mod.rs: -------------------------------------------------------------------------------- 1 | mod epoch; 2 | mod external; 3 | mod migrate; 4 | mod persistence; 5 | mod query; 6 | mod replication; 7 | mod resource; 8 | mod service; 9 | mod storage; 10 | mod store; 11 | mod update; 12 | 13 | mod ordered_proxy; 14 | mod utils; 15 | 16 | pub use self::persistence::{JsonFileStorage, MetaPersistence, MetaSyncError}; 17 | pub use self::replication::{JsonMetaReplicator, MetaReplicator}; 18 | pub use self::service::{ 19 | run_server, MemBrokerConfig, MemBrokerService, ReplicaAddresses, StorageConfig, 20 | MEM_BROKER_API_VERSION, 21 | }; 22 | pub use self::store::MetaStoreError; 23 | -------------------------------------------------------------------------------- /src/broker/persistence.rs: -------------------------------------------------------------------------------- 1 | use super::store::MetaStore; 2 | use chrono::Utc; 3 | use futures::Future; 4 | use std::error::Error; 5 | use std::fmt; 6 | use std::io; 7 | use std::path::Path; 8 | use std::pin::Pin; 9 | use std::str; 10 | use tokio::fs::{rename, File}; 11 | use tokio::io::{AsyncReadExt, AsyncWriteExt}; 12 | use tokio::sync::Mutex; 13 | 14 | pub trait MetaPersistence { 15 | fn store<'s>( 16 | &'s self, 17 | store: MetaStore, 18 | ) -> Pin> + Send + 's>>; 19 | fn load<'s>( 20 | &'s self, 21 | ) -> Pin, MetaSyncError>> + Send + 's>>; 22 | } 23 | 24 | pub struct JsonFileStorage { 25 | json_file: JsonFile, 26 | lock: Mutex<()>, 27 | } 28 | 29 | impl JsonFileStorage { 30 | pub fn new(filename: String) -> Self { 31 | Self { 32 | json_file: JsonFile::new(filename), 33 | lock: Mutex::new(()), 34 | } 35 | } 36 | 37 | async fn store_impl(&self, store: MetaStore) -> Result<(), MetaSyncError> { 38 | let _guard = self.lock.lock().await; 39 | self.json_file.store(store).await 40 | } 41 | 42 | async fn load_impl(&self) -> Result, MetaSyncError> { 43 | let _guard = self.lock.lock().await; 44 | self.json_file.load().await 45 | } 46 | } 47 | 48 | impl MetaPersistence for JsonFileStorage { 49 | fn store<'s>( 50 | &'s self, 51 | store: MetaStore, 52 | ) -> Pin> + Send + 's>> { 53 | Box::pin(self.store_impl(store)) 54 | } 55 | 56 | fn load<'s>( 57 | &'s self, 58 | ) -> Pin, MetaSyncError>> + Send + 's>> { 59 | Box::pin(self.load_impl()) 60 | } 61 | } 62 | 63 | struct JsonFile { 64 | filename: String, 65 | } 66 | 67 | impl JsonFile { 68 | fn new(filename: String) -> Self { 69 | Self { filename } 70 | } 71 | 72 | async fn store(&self, store: MetaStore) -> Result<(), MetaSyncError> { 73 | let json_str = { 74 | serde_json::to_string(&store).map_err(|err| { 75 | error!("failed to convert MetaStore to json {}", err); 76 | MetaSyncError::Json 77 | })? 78 | }; 79 | 80 | let data = json_str.into_bytes(); 81 | 82 | let now = Utc::now().timestamp_nanos(); 83 | let tmp_filename = format!("{}-{}", self.filename, now); 84 | let mut tmp_file = File::create(tmp_filename.as_str()) 85 | .await 86 | .map_err(MetaSyncError::Io)?; 87 | tmp_file 88 | .write_all(data.as_slice()) 89 | .await 90 | .map_err(MetaSyncError::Io)?; 91 | 92 | rename(tmp_filename.as_str(), self.filename.as_str()) 93 | .await 94 | .map_err(MetaSyncError::Io)?; 95 | Ok(()) 96 | } 97 | 98 | async fn load(&self) -> Result, MetaSyncError> { 99 | if !Path::new(self.filename.as_str()).exists() { 100 | return Ok(None); 101 | } 102 | 103 | let mut file = File::open(self.filename.as_str()) 104 | .await 105 | .map_err(MetaSyncError::Io)?; 106 | let mut contents = vec![]; 107 | file.read_to_end(&mut contents) 108 | .await 109 | .map_err(MetaSyncError::Io)?; 110 | 111 | let json_str = str::from_utf8(&contents).map_err(|err| { 112 | error!("invalid json utf8 data {}", err); 113 | MetaSyncError::Json 114 | })?; 115 | 116 | let store = serde_json::from_str(json_str).map_err(|err| { 117 | error!("invalid json data {}", err); 118 | MetaSyncError::Json 119 | })?; 120 | 121 | Ok(store) 122 | } 123 | } 124 | 125 | #[derive(Debug)] 126 | pub enum MetaSyncError { 127 | Io(io::Error), 128 | Replication, 129 | Json, 130 | Lock, 131 | } 132 | 133 | impl MetaSyncError { 134 | pub fn to_code(&self) -> &str { 135 | match self { 136 | Self::Io(_) => "PERSISTENCE_IO_ERROR", 137 | Self::Replication => "REPLICATION_ERROR", 138 | Self::Json => "PERSISTENCE_JSON_ERROR", 139 | Self::Lock => "PERSISTENCE_LOCK_ERROR", 140 | } 141 | } 142 | } 143 | 144 | impl fmt::Display for MetaSyncError { 145 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 146 | write!(f, "{}", self.to_code()) 147 | } 148 | } 149 | 150 | impl Error for MetaSyncError { 151 | fn cause(&self) -> Option<&dyn Error> { 152 | match self { 153 | Self::Io(io_error) => Some(io_error), 154 | _ => None, 155 | } 156 | } 157 | } 158 | 159 | impl PartialEq for MetaSyncError { 160 | fn eq(&self, other: &Self) -> bool { 161 | self.to_code() == other.to_code() 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/broker/replication.rs: -------------------------------------------------------------------------------- 1 | use super::persistence::MetaSyncError; 2 | use super::service::ReplicaAddresses; 3 | use super::store::MetaStore; 4 | use crate::broker::MEM_BROKER_API_VERSION; 5 | use futures::{future, Future}; 6 | use std::pin::Pin; 7 | use std::sync::Arc; 8 | 9 | pub trait MetaReplicator { 10 | fn sync_meta<'s>( 11 | &'s self, 12 | store: Arc, 13 | ) -> Pin> + Send + 's>>; 14 | } 15 | 16 | pub struct JsonMetaReplicator { 17 | replica_addresses: ReplicaAddresses, 18 | client: reqwest::Client, 19 | } 20 | 21 | impl JsonMetaReplicator { 22 | pub fn new(replica_addresses: ReplicaAddresses, client: reqwest::Client) -> Self { 23 | Self { 24 | replica_addresses, 25 | client, 26 | } 27 | } 28 | 29 | fn gen_url(address: &str) -> String { 30 | format!("http://{}/api/{}/metadata", address, MEM_BROKER_API_VERSION,) 31 | } 32 | 33 | async fn sync_one_replica( 34 | &self, 35 | meta_store: Arc, 36 | replica_address: &str, 37 | ) -> Result<(), MetaSyncError> { 38 | let url = Self::gen_url(replica_address); 39 | 40 | let response = self 41 | .client 42 | .put(&url) 43 | .json(&(*meta_store)) 44 | .send() 45 | .await 46 | .map_err(|e| { 47 | error!("Failed to sync meta to replica {} {}", replica_address, e); 48 | MetaSyncError::Replication 49 | })?; 50 | 51 | let status = response.status(); 52 | 53 | if !status.is_success() { 54 | error!("Failed to sync meta to replica: status code {:?}", status); 55 | let result = response.text().await; 56 | match result { 57 | Ok(body) => { 58 | error!("Failed to sync meta to replica: Error body: {:?}", body); 59 | } 60 | Err(e) => { 61 | error!( 62 | "Failed to sync meta to replica: Failed to get body: {:?}", 63 | e 64 | ); 65 | } 66 | } 67 | return Err(MetaSyncError::Replication); 68 | } 69 | Ok(()) 70 | } 71 | 72 | async fn sync_meta_impl(&self, store: Arc) -> Result<(), MetaSyncError> { 73 | let replica_addresses = self.replica_addresses.load(); 74 | let futs = replica_addresses 75 | .iter() 76 | .map(|address| self.sync_one_replica(store.clone(), address.as_str())) 77 | .collect::>(); 78 | let res_list = future::join_all(futs).await; 79 | for res in res_list.into_iter() { 80 | res?; 81 | } 82 | Ok(()) 83 | } 84 | } 85 | 86 | impl MetaReplicator for JsonMetaReplicator { 87 | fn sync_meta<'s>( 88 | &'s self, 89 | store: Arc, 90 | ) -> Pin> + Send + 's>> { 91 | Box::pin(self.sync_meta_impl(store)) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/broker/resource.rs: -------------------------------------------------------------------------------- 1 | use super::store::{MetaStore, MetaStoreError}; 2 | use std::collections::HashMap; 3 | 4 | pub struct ResourceChecker { 5 | store: MetaStore, 6 | } 7 | 8 | impl ResourceChecker { 9 | pub fn new(store: MetaStore) -> Self { 10 | Self { store } 11 | } 12 | 13 | // Returns hosts that can't recover because of not having enough resources. 14 | pub fn check_failure_tolerance( 15 | &self, 16 | migration_limit: u64, 17 | ) -> Result, MetaStoreError> { 18 | // host => proxy addresses 19 | let mut proxy_map = HashMap::new(); 20 | for (proxy_address, proxy_resource) in self.store.all_proxies.iter() { 21 | proxy_map 22 | .entry(proxy_resource.host.clone()) 23 | .or_insert_with(Vec::new) 24 | .push(proxy_address.clone()); 25 | } 26 | 27 | let mut hosts = vec![]; 28 | for (host, proxy_addresses) in proxy_map.iter() { 29 | let enough_resource = 30 | self.check_failure_tolerance_for_one_host(proxy_addresses, migration_limit)?; 31 | if !enough_resource { 32 | hosts.push(host.clone()); 33 | } 34 | } 35 | 36 | Ok(hosts) 37 | } 38 | 39 | fn check_failure_tolerance_for_one_host( 40 | &self, 41 | proxy_addresses: &[String], 42 | migration_limit: u64, 43 | ) -> Result { 44 | let mut store = self.store.clone(); 45 | for proxy_address in proxy_addresses.iter() { 46 | match store.replace_failed_proxy(proxy_address.clone(), migration_limit) { 47 | Ok(_) => (), 48 | Err(MetaStoreError::NoAvailableResource) => return Ok(false), 49 | Err(err) => { 50 | error!("ResourceChecker failed to replace failed proxy: {}", err); 51 | return Err(err); 52 | } 53 | } 54 | } 55 | Ok(true) 56 | } 57 | } 58 | 59 | #[cfg(test)] 60 | mod tests { 61 | use super::super::utils::tests::add_testing_proxies; 62 | use super::*; 63 | use crate::common::config::ClusterConfig; 64 | 65 | #[test] 66 | fn test_no_cluster() { 67 | let mut store = MetaStore::new(false); 68 | add_testing_proxies(&mut store, 4, 2); 69 | 70 | let checker = ResourceChecker::new(store); 71 | let res = checker.check_failure_tolerance(2); 72 | let proxies = res.unwrap(); 73 | assert!(proxies.is_empty()); 74 | } 75 | 76 | #[test] 77 | fn test_enough_resources() { 78 | let mut store = MetaStore::new(false); 79 | add_testing_proxies(&mut store, 4, 2); 80 | store 81 | .add_cluster("test_cluster".to_string(), 12, ClusterConfig::default()) 82 | .unwrap(); 83 | 84 | let checker = ResourceChecker::new(store); 85 | let res = checker.check_failure_tolerance(2); 86 | let proxies = res.unwrap(); 87 | assert!(proxies.is_empty()); 88 | } 89 | 90 | #[test] 91 | fn test_no_enough_resource() { 92 | let mut store = MetaStore::new(false); 93 | add_testing_proxies(&mut store, 4, 2); 94 | store 95 | .add_cluster("test_cluster".to_string(), 16, ClusterConfig::default()) 96 | .unwrap(); 97 | 98 | let checker = ResourceChecker::new(store); 99 | let res = checker.check_failure_tolerance(2); 100 | let proxies = res.unwrap(); 101 | assert!(!proxies.is_empty()); 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/broker/utils.rs: -------------------------------------------------------------------------------- 1 | #[cfg(test)] 2 | pub mod tests { 3 | use super::super::store::MetaStore; 4 | use crate::common::cluster::{Cluster, Role, SlotRangeTag}; 5 | use crate::common::utils::SLOT_NUM; 6 | 7 | pub fn add_testing_proxies(store: &mut MetaStore, host_num: usize, proxy_per_host: usize) { 8 | for host_index in 1..=host_num { 9 | for i in 1..=proxy_per_host { 10 | let proxy_address = format!("127.0.0.{}:70{:02}", host_index, i); 11 | let node_addresses = [ 12 | format!("127.0.0.{}:60{:02}", host_index, i * 2), 13 | format!("127.0.0.{}:60{:02}", host_index, i * 2 + 1), 14 | ]; 15 | let index = (host_index - 1) * proxy_per_host + (i - 1); 16 | store 17 | .add_proxy(proxy_address, node_addresses, None, Some(index)) 18 | .unwrap(); 19 | } 20 | } 21 | } 22 | 23 | pub fn check_cluster_and_proxy(store: &MetaStore) { 24 | for cluster in store.clusters.values() { 25 | for chunk in cluster.chunks.iter() { 26 | for proxy_address in chunk.proxy_addresses.iter() { 27 | let proxy = store.all_proxies.get(proxy_address).unwrap(); 28 | assert_eq!(proxy.cluster.as_ref().unwrap(), &cluster.name); 29 | } 30 | } 31 | } 32 | for proxy in store.all_proxies.values() { 33 | let cluster_name = match proxy.cluster.as_ref() { 34 | Some(name) => name, 35 | None => continue, 36 | }; 37 | let cluster = store.clusters.get(cluster_name).unwrap(); 38 | assert!(cluster.chunks.iter().any(|chunk| chunk 39 | .proxy_addresses 40 | .iter() 41 | .any(|addr| addr == &proxy.proxy_address))); 42 | } 43 | 44 | assert!(store.check().is_ok()); 45 | } 46 | 47 | pub fn check_cluster_slots(cluster: Cluster, node_num: usize) { 48 | assert_eq!(cluster.get_nodes().len(), node_num); 49 | let master_num = cluster.get_nodes().len() / 2; 50 | let average_slots_num = SLOT_NUM / master_num; 51 | 52 | let mut visited = Vec::with_capacity(SLOT_NUM); 53 | for _ in 0..SLOT_NUM { 54 | visited.push(false); 55 | } 56 | 57 | for node in cluster.get_nodes() { 58 | let slots = node.get_slots(); 59 | if node.get_role() == Role::Master { 60 | assert_eq!(slots.len(), 1); 61 | assert_eq!(slots[0].tag, SlotRangeTag::None); 62 | let slots_num = slots[0].get_range_list().get_slots_num(); 63 | let delta = slots_num.checked_sub(average_slots_num).unwrap(); 64 | assert!(delta <= 1); 65 | 66 | for range in slots[0].get_range_list().get_ranges().iter() { 67 | for i in range.start()..=range.end() { 68 | assert!(!visited.get(i).unwrap()); 69 | *visited.get_mut(i).unwrap() = true; 70 | } 71 | } 72 | 73 | let mut sorted_range_list = slots[0].get_range_list().clone(); 74 | sorted_range_list.compact(); 75 | assert_eq!(&sorted_range_list, slots[0].get_range_list()); 76 | } else { 77 | assert!(slots.is_empty()); 78 | } 79 | } 80 | for v in visited.iter() { 81 | assert!(*v); 82 | } 83 | 84 | let mut last_node_slot_num = usize::max_value(); 85 | for node in cluster.get_nodes() { 86 | if node.get_role() == Role::Replica { 87 | continue; 88 | } 89 | let curr_num = node 90 | .get_slots() 91 | .iter() 92 | .map(|slots| slots.get_range_list().get_slots_num()) 93 | .sum(); 94 | assert!(last_node_slot_num >= curr_num); 95 | last_node_slot_num = curr_num; 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /src/common/atomic_lock.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicBool, Ordering}; 2 | 3 | pub struct AtomicLock { 4 | flag: AtomicBool, 5 | } 6 | 7 | impl Default for AtomicLock { 8 | fn default() -> Self { 9 | Self { 10 | flag: AtomicBool::new(false), 11 | } 12 | } 13 | } 14 | 15 | impl AtomicLock { 16 | pub fn new() -> Self { 17 | Self { 18 | flag: AtomicBool::new(false), 19 | } 20 | } 21 | 22 | pub fn lock(&self) -> Option { 23 | if self 24 | .flag 25 | .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) 26 | .is_err() 27 | { 28 | return None; 29 | } 30 | Some(AtomicLockGuard::new(self)) 31 | } 32 | } 33 | 34 | pub struct AtomicLockGuard<'a> { 35 | flag: &'a AtomicBool, 36 | } 37 | 38 | impl<'a> AtomicLockGuard<'a> { 39 | fn new(lock: &'a AtomicLock) -> Self { 40 | Self { flag: &lock.flag } 41 | } 42 | } 43 | 44 | impl<'a> Drop for AtomicLockGuard<'a> { 45 | fn drop(&mut self) { 46 | self.flag.store(false, Ordering::SeqCst); 47 | } 48 | } 49 | 50 | #[cfg(test)] 51 | mod tests { 52 | use super::*; 53 | 54 | #[test] 55 | fn test_atomic_lock() { 56 | let lock = AtomicLock::default(); 57 | { 58 | let guard = lock.lock(); 59 | assert!(guard.is_some()); 60 | 61 | assert!(lock.lock().is_none()); 62 | assert!(lock.lock().is_none()); 63 | } 64 | assert!(lock.lock().is_some()); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/common/biatomic.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicU64, Ordering}; 2 | 3 | pub struct BiAtomicU32 { 4 | inner: AtomicU64, 5 | } 6 | 7 | impl BiAtomicU32 { 8 | pub fn new(num1: u32, num2: u32) -> Self { 9 | Self { 10 | inner: AtomicU64::new(Self::combine_two_num(num1, num2)), 11 | } 12 | } 13 | 14 | pub fn load(&self) -> (u32, u32) { 15 | let num = self.inner.load(Ordering::SeqCst); 16 | Self::split_to_two_num(num) 17 | } 18 | 19 | pub fn compare_and_apply(&self, num1_func: F1, num2_func: F2) -> (u32, u32) 20 | where 21 | F1: Fn(u32) -> u32, 22 | F2: Fn(u32) -> u32, 23 | { 24 | loop { 25 | let old_num = self.inner.load(Ordering::SeqCst); 26 | let (old_num1, old_num2) = Self::split_to_two_num(old_num); 27 | let new_num1 = num1_func(old_num1); 28 | let new_num2 = num2_func(old_num2); 29 | let new_num = Self::combine_two_num(new_num1, new_num2); 30 | let success = self 31 | .inner 32 | .compare_exchange(old_num, new_num, Ordering::SeqCst, Ordering::SeqCst) 33 | .is_ok(); 34 | if success { 35 | return (old_num1, old_num2); 36 | } 37 | } 38 | } 39 | 40 | fn combine_two_num(num1: u32, num2: u32) -> u64 { 41 | let n1 = (num1 as u64) << 32; 42 | let n2 = num2 as u64; 43 | n1 + n2 44 | } 45 | 46 | fn split_to_two_num(num: u64) -> (u32, u32) { 47 | let part2_mask = (!0u32) as u64; 48 | let part1_mask = part2_mask << 32; 49 | let part1 = (num & part1_mask) >> 32; 50 | let part2 = num & part2_mask; 51 | (part1 as u32, part2 as u32) 52 | } 53 | } 54 | 55 | #[cfg(test)] 56 | mod tests { 57 | use super::*; 58 | 59 | #[test] 60 | fn test_num_conversion() { 61 | let bi = BiAtomicU32::new(233, 666); 62 | assert_eq!(bi.load(), (233, 666)); 63 | let old = bi.compare_and_apply(|n| n + 1, |n| n + 1); 64 | assert_eq!(old, (233, 666)); 65 | assert_eq!(bi.load(), (234, 667)); 66 | let old = bi.compare_and_apply(|n| n - 1, |n| n + 1); 67 | assert_eq!(old, (234, 667)); 68 | assert_eq!(bi.load(), (233, 668)); 69 | } 70 | 71 | #[test] 72 | fn test_bounder() { 73 | const M: u32 = u32::MAX; 74 | { 75 | let bi = BiAtomicU32::new(0, M); 76 | assert_eq!(bi.load(), (0, M)); 77 | let old = bi.compare_and_apply(|n| n + 1, |n| n - 1); 78 | assert_eq!(old, (0, M)); 79 | assert_eq!(bi.load(), (1, M - 1)); 80 | let old = bi.compare_and_apply(|n| n.overflowing_sub(2).0, |n| n.overflowing_add(2).0); 81 | assert_eq!(old, (1, M - 1)); 82 | assert_eq!(bi.load(), (M, 0)); 83 | } 84 | { 85 | let bi = BiAtomicU32::new(M, 0); 86 | assert_eq!(bi.load(), (M, 0)); 87 | let old = bi.compare_and_apply(|n| n - 1, |n| n + 1); 88 | assert_eq!(old, (M, 0)); 89 | assert_eq!(bi.load(), (M - 1, 1)); 90 | let old = bi.compare_and_apply(|n| n.overflowing_add(2).0, |n| n.overflowing_sub(2).0); 91 | assert_eq!(old, (M - 1, 1)); 92 | assert_eq!(bi.load(), (0, M)); 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/common/future_group.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::oneshot; 2 | use futures::task::{Context, Poll}; 3 | use futures::Future; 4 | use pin_project::{pin_project, pinned_drop}; 5 | use std::pin::Pin; 6 | 7 | #[allow(dead_code)] 8 | pub fn new_future_group( 9 | future1: FA, 10 | future2: FB, 11 | ) -> (FutureGroupHandle, FutureGroupHandle) { 12 | let (s1, r1) = oneshot::channel(); 13 | let (s2, r2) = oneshot::channel(); 14 | let handle1 = FutureGroupHandle { 15 | inner: future1, 16 | signal_sender: Some(s1), 17 | signal_receiver: r2, 18 | }; 19 | let handle2 = FutureGroupHandle { 20 | inner: future2, 21 | signal_sender: Some(s2), 22 | signal_receiver: r1, 23 | }; 24 | (handle1, handle2) 25 | } 26 | 27 | #[pin_project(PinnedDrop)] 28 | pub struct FutureGroupHandle { 29 | #[pin] 30 | inner: F, 31 | #[pin] 32 | signal_receiver: oneshot::Receiver<()>, 33 | signal_sender: Option>, 34 | } 35 | 36 | impl Future for FutureGroupHandle { 37 | type Output = Option; 38 | 39 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 40 | let this = self.project(); 41 | 42 | match this.inner.poll(cx) { 43 | Poll::Pending => (), 44 | Poll::Ready(output) => { 45 | if let Some(sender) = this.signal_sender.take() { 46 | if let Err(()) = sender.send(()) { 47 | debug!("failed to signal"); 48 | } 49 | } 50 | return Poll::Ready(Some(output)); 51 | } 52 | } 53 | 54 | this.signal_receiver.poll(cx).map(|_| None) 55 | } 56 | } 57 | 58 | #[pinned_drop] 59 | impl PinnedDrop for FutureGroupHandle { 60 | fn drop(mut self: Pin<&mut Self>) { 61 | self.project() 62 | .signal_sender 63 | .take() 64 | .and_then(|sender| sender.send(()).ok()) 65 | .unwrap_or_else(|| debug!("FutureGroupHandle already closed")) 66 | } 67 | } 68 | 69 | pub fn new_auto_drop_future(future: F) -> (FutureAutoStop, FutureAutoStopHandle) { 70 | let (s, r) = oneshot::channel(); 71 | let handle = FutureAutoStopHandle { 72 | signal_sender: Some(s), 73 | }; 74 | let fut = FutureAutoStop { 75 | inner: future, 76 | signal_receiver: r, 77 | }; 78 | (fut, handle) 79 | } 80 | 81 | #[pin_project] 82 | pub struct FutureAutoStop { 83 | #[pin] 84 | inner: F, 85 | #[pin] 86 | signal_receiver: oneshot::Receiver<()>, 87 | } 88 | 89 | pub struct FutureAutoStopHandle { 90 | signal_sender: Option>, 91 | } 92 | 93 | impl Drop for FutureAutoStopHandle { 94 | fn drop(&mut self) { 95 | self.signal_sender 96 | .take() 97 | .and_then(|sender| sender.send(()).ok()) 98 | .unwrap_or_else(|| debug!("FutureAutoStopHandle already closed")) 99 | } 100 | } 101 | 102 | impl Future for FutureAutoStop { 103 | type Output = Option; 104 | 105 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 106 | let this = self.project(); 107 | 108 | match this.inner.poll(cx) { 109 | Poll::Pending => (), 110 | Poll::Ready(output) => return Poll::Ready(Some(output)), 111 | } 112 | 113 | this.signal_receiver.poll(cx).map(|_| None) 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /src/common/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod atomic_lock; 2 | pub mod batch; 3 | pub mod biatomic; 4 | pub mod cluster; 5 | pub mod config; 6 | pub mod future_group; 7 | pub mod proto; 8 | pub mod resp_execution; 9 | pub mod response; 10 | pub mod slot_lock; 11 | pub mod track; 12 | pub mod try_chunks; 13 | pub mod utils; 14 | pub mod version; 15 | pub mod yield_now; 16 | -------------------------------------------------------------------------------- /src/common/response.rs: -------------------------------------------------------------------------------- 1 | pub const OK_REPLY: &str = "OK"; 2 | pub const OLD_EPOCH_REPLY: &str = "OLD_EPOCH"; 3 | pub const TRY_AGAIN_REPLY: &str = "TRY_AGAIN"; 4 | pub const NOT_READY_FOR_SWITCHING_REPLY: &str = "NOT_READY_FOR_SWITCHING"; 5 | pub const TASK_NOT_FOUND: &str = "TASK_NOT_FOUND"; 6 | pub const ERR_NOT_THE_SAME_SLOT: &str = "ERR_MULTI_SLOTS slots of the keys are not the same"; 7 | pub const ERR_CLUSTER_NOT_FOUND: &str = "ERR_CLUSTER_NOT_FOUND"; 8 | pub const ERR_BACKEND_CONNECTION: &str = "ERR_BACKEND_CONNECTION"; 9 | pub const ERR_MOVED: &str = "MOVED"; 10 | pub const CMD_NOT_SUPPORTED: &str = "ERR_COMMAND_NOT_SUPPORTED"; 11 | pub const ERR_TOO_MANY_REDIRECTIONS: &str = "ERR_TOO_MANY_REDIRECTIONS"; 12 | pub const MIGRATING_FINISHED: &str = "MIGRATING_FINISHED"; 13 | pub const MIGRATION_TASK_NOT_FOUND: &str = "MIGRATION_TASK_NOT_FOUND"; 14 | pub const ERR_MULTI_KEY_PARTIAL_ERROR: &str = "ERR_MULTI_KEY_PARTIAL_ERROR"; 15 | pub const ERR_NOT_MY_META: &str = "ERR_NOT_MY_META"; 16 | -------------------------------------------------------------------------------- /src/common/slot_lock.rs: -------------------------------------------------------------------------------- 1 | use super::utils::SLOT_NUM; 2 | use std::sync::atomic::{AtomicBool, Ordering}; 3 | 4 | pub struct SlotMutex { 5 | slots: Vec, 6 | } 7 | 8 | impl Default for SlotMutex { 9 | fn default() -> Self { 10 | Self { 11 | slots: std::iter::repeat_with(|| AtomicBool::new(false)) 12 | .take(SLOT_NUM) 13 | .collect(), 14 | } 15 | } 16 | } 17 | 18 | impl SlotMutex { 19 | pub fn lock(&self, slot: usize) -> Option { 20 | if let Some(s) = self.slots.get(slot) { 21 | if let Ok(false) = s.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) { 22 | return Some(SlotMutexGuard { flag: s }); 23 | } 24 | } else { 25 | error!("SlotMutex::lock invalid slot: {}", slot); 26 | } 27 | None 28 | } 29 | } 30 | 31 | pub struct SlotMutexGuard<'a> { 32 | flag: &'a AtomicBool, 33 | } 34 | 35 | impl<'a> Drop for SlotMutexGuard<'a> { 36 | fn drop(&mut self) { 37 | self.flag.store(false, Ordering::SeqCst); 38 | } 39 | } 40 | 41 | #[cfg(test)] 42 | mod tests { 43 | use super::*; 44 | 45 | #[test] 46 | fn test_slot_mutex() { 47 | let mutex = SlotMutex::default(); 48 | for slot in [0, 5000, SLOT_NUM - 1].iter() { 49 | let _guard = mutex.lock(*slot).unwrap(); 50 | assert!(mutex.lock(*slot).is_none()); 51 | } 52 | assert!(mutex.lock(SLOT_NUM).is_none()); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/common/track.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Local}; 2 | use dashmap::mapref::entry::Entry; 3 | use dashmap::DashMap; 4 | use futures::task::{Context, Poll}; 5 | use futures::Future; 6 | use pin_project::{pin_project, pinned_drop}; 7 | use std::fmt; 8 | use std::pin::Pin; 9 | use std::sync::atomic::{AtomicU64, Ordering}; 10 | use std::sync::Arc; 11 | 12 | pub struct FutureDescription { 13 | future_id: u64, 14 | desc: String, 15 | start_time: DateTime, 16 | } 17 | 18 | impl FutureDescription { 19 | pub fn get_start_time(&self) -> DateTime { 20 | self.start_time 21 | } 22 | } 23 | 24 | impl fmt::Display for FutureDescription { 25 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 26 | write!( 27 | f, 28 | "{} {} {}", 29 | self.future_id, 30 | self.start_time.format("%Y-%m-%d %H:%M:%S"), 31 | self.desc 32 | ) 33 | } 34 | } 35 | 36 | pub struct TrackedFutureRegistry { 37 | curr_future_id: AtomicU64, 38 | future_map: DashMap>, 39 | } 40 | 41 | impl Default for TrackedFutureRegistry { 42 | fn default() -> Self { 43 | Self { 44 | curr_future_id: AtomicU64::new(0), 45 | future_map: DashMap::new(), 46 | } 47 | } 48 | } 49 | 50 | impl TrackedFutureRegistry { 51 | pub fn wrap( 52 | registry: Arc, 53 | fut: F, 54 | desc: String, 55 | ) -> TrackedFuture { 56 | TrackedFuture::new(fut, registry, desc) 57 | } 58 | 59 | pub fn register(&self, desc: String) -> u64 { 60 | let future_id = self.curr_future_id.fetch_add(1, Ordering::Relaxed); 61 | let future_desc = Arc::new(FutureDescription { 62 | future_id, 63 | desc, 64 | start_time: Local::now(), 65 | }); 66 | match self.future_map.entry(future_id) { 67 | Entry::Occupied(entry) => { 68 | error!( 69 | "TrackedFutureRegistry found duplicated future id: {}, will replace it", 70 | *entry.get() 71 | ); 72 | entry.replace_entry(future_desc); 73 | } 74 | Entry::Vacant(entry) => { 75 | entry.insert(future_desc); 76 | } 77 | } 78 | future_id 79 | } 80 | 81 | pub fn deregister(&self, future_id: u64) { 82 | self.future_map.remove(&future_id); 83 | } 84 | 85 | pub fn get_all_futures(&self) -> Vec> { 86 | self.future_map 87 | .iter() 88 | .map(|item| item.value().clone()) 89 | .collect() 90 | } 91 | } 92 | 93 | #[pin_project(PinnedDrop)] 94 | pub struct TrackedFuture { 95 | #[pin] 96 | inner: F, 97 | registry: Arc, 98 | future_id: u64, 99 | } 100 | 101 | impl TrackedFuture { 102 | pub fn new(inner: F, registry: Arc, desc: String) -> Self { 103 | let future_id = registry.register(desc); 104 | Self { 105 | inner, 106 | registry, 107 | future_id, 108 | } 109 | } 110 | } 111 | 112 | impl Future for TrackedFuture { 113 | type Output = F::Output; 114 | 115 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 116 | let this = self.project(); 117 | 118 | match this.inner.poll(cx) { 119 | Poll::Pending => Poll::Pending, 120 | Poll::Ready(output) => { 121 | this.registry.deregister(*this.future_id); 122 | Poll::Ready(output) 123 | } 124 | } 125 | } 126 | } 127 | 128 | #[pinned_drop] 129 | impl PinnedDrop for TrackedFuture { 130 | fn drop(mut self: Pin<&mut Self>) { 131 | let this = self.project(); 132 | let future_id = *this.future_id; 133 | this.registry.deregister(future_id); 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /src/common/try_chunks.rs: -------------------------------------------------------------------------------- 1 | use core::mem; 2 | use futures::stream::{Fuse, FusedStream}; 3 | use futures::task::{Context, Poll}; 4 | use futures::{Stream, StreamExt}; 5 | use pin_project::pin_project; 6 | use std::num::NonZeroUsize; 7 | use std::pin::Pin; 8 | 9 | // This is copied and modified from the `Chunk` future combinator from futures-rs 0.3 10 | 11 | pub trait TryChunksStreamExt: Stream { 12 | fn try_chunks(self, capacity: NonZeroUsize) -> TryChunks 13 | where 14 | Self: Sized, 15 | { 16 | TryChunks::new(self, capacity) 17 | } 18 | } 19 | 20 | impl TryChunksStreamExt for T where T: Stream {} 21 | 22 | #[pin_project] 23 | #[derive(Debug)] 24 | pub struct TryChunks { 25 | #[pin] 26 | stream: Fuse, 27 | items: Vec, 28 | cap: NonZeroUsize, // https://github.com/rust-lang/futures-rs/issues/1475 29 | } 30 | 31 | impl TryChunks 32 | where 33 | St: Stream, 34 | { 35 | pub fn new(stream: St, cap: NonZeroUsize) -> Self { 36 | Self { 37 | stream: stream.fuse(), 38 | items: Vec::with_capacity(cap.get()), 39 | cap, 40 | } 41 | } 42 | 43 | fn take(mut self: Pin<&mut Self>) -> Vec { 44 | let this = self.as_mut().project(); 45 | let cap = this.cap.get(); 46 | mem::replace(this.items, Vec::with_capacity(cap)) 47 | } 48 | } 49 | 50 | impl Stream for TryChunks { 51 | type Item = Vec; 52 | 53 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 54 | loop { 55 | let item_opt = match self.as_mut().project().stream.poll_next(cx) { 56 | Poll::Ready(item_opt) => item_opt, 57 | Poll::Pending => { 58 | return if self.items.is_empty() { 59 | Poll::Pending 60 | } else { 61 | Poll::Ready(Some(self.take())) 62 | }; 63 | } 64 | }; 65 | match item_opt { 66 | // Push the item into the buffer and check whether it is full. 67 | // If so, replace our buffer with a new and empty one and return 68 | // the full one. 69 | Some(item) => { 70 | self.as_mut().project().items.push(item); 71 | if self.items.len() >= self.cap.get() { 72 | return Poll::Ready(Some(self.take())); 73 | } 74 | } 75 | 76 | // Since the underlying stream ran out of values, return what we 77 | // have buffered, if we have anything. 78 | None => { 79 | let last = if self.items.is_empty() { 80 | None 81 | } else { 82 | let full_buf = mem::take(self.as_mut().project().items); 83 | Some(full_buf) 84 | }; 85 | 86 | return Poll::Ready(last); 87 | } 88 | } 89 | } 90 | } 91 | } 92 | 93 | impl FusedStream for TryChunks { 94 | fn is_terminated(&self) -> bool { 95 | self.stream.is_terminated() && self.items.is_empty() 96 | } 97 | } 98 | 99 | #[cfg(test)] 100 | mod tests { 101 | use super::*; 102 | use futures::{stream, StreamExt}; 103 | 104 | #[tokio::test] 105 | async fn test_one_item() { 106 | let iter = vec![0].into_iter(); 107 | let mut stream = stream::iter(iter).try_chunks(NonZeroUsize::new(5).unwrap()); 108 | let item = stream.next().await.unwrap(); 109 | assert_eq!(item, vec![0]); 110 | } 111 | 112 | #[tokio::test] 113 | async fn test_chunks() { 114 | let iter = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9].into_iter(); 115 | let mut stream = stream::iter(iter).try_chunks(NonZeroUsize::new(3).unwrap()); 116 | let item = stream.next().await.unwrap(); 117 | assert_eq!(item, vec![0, 1, 2]); 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /src/common/version.rs: -------------------------------------------------------------------------------- 1 | pub const UNDERMOON_VERSION: &str = env!("CARGO_PKG_VERSION"); 2 | pub const UNDERMOON_MIGRATION_VERSION: &str = "mgr-0.2"; 3 | pub const UNDERMOON_MEM_BROKER_META_VERSION: &str = "mem-broker-0.2"; 4 | -------------------------------------------------------------------------------- /src/common/yield_now.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::Pin; 3 | use std::task::{Context, Poll}; 4 | 5 | // Copied from tokio::task::yield_now so that this future could be `Pin`. 6 | #[derive(Default)] 7 | pub struct YieldNow { 8 | yielded: bool, 9 | } 10 | 11 | impl Future for YieldNow { 12 | type Output = (); 13 | 14 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { 15 | if self.yielded { 16 | return Poll::Ready(()); 17 | } 18 | 19 | self.yielded = true; 20 | cx.waker().wake_by_ref(); 21 | Poll::Pending 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/coordinator/broker.rs: -------------------------------------------------------------------------------- 1 | use crate::common::cluster::{Cluster, ClusterName, MigrationTaskMeta, Proxy}; 2 | use crate::common::utils::ThreadSafe; 3 | use futures::{Future, Stream}; 4 | use mockall::automock; 5 | use std::error::Error; 6 | use std::fmt; 7 | use std::io; 8 | use std::pin::Pin; 9 | 10 | // Suppress errors in [automock] 11 | #[allow( 12 | clippy::indexing_slicing, 13 | clippy::panic, 14 | clippy::panic_in_result_fn, 15 | clippy::unreachable 16 | )] 17 | mod trait_mod { 18 | use super::*; 19 | 20 | // To support large result set, return Stream here in some APIs. 21 | #[automock] 22 | pub trait MetaDataBroker: ThreadSafe { 23 | fn get_cluster_names<'s>( 24 | &'s self, 25 | ) -> Pin> + Send + 's>>; 26 | 27 | fn get_cluster<'s>( 28 | &'s self, 29 | name: ClusterName, 30 | ) -> Pin, MetaDataBrokerError>> + Send + 's>>; 31 | 32 | fn get_proxy_addresses<'s>( 33 | &'s self, 34 | ) -> Pin> + Send + 's>>; 35 | 36 | fn get_proxy<'s>( 37 | &'s self, 38 | address: String, 39 | ) -> Pin, MetaDataBrokerError>> + Send + 's>>; 40 | 41 | fn add_failure<'s>( 42 | &'s self, 43 | address: String, 44 | reporter_id: String, 45 | ) -> Pin> + Send + 's>>; 46 | 47 | fn get_failures<'s>( 48 | &'s self, 49 | ) -> Pin> + Send + 's>>; 50 | 51 | fn get_failed_proxies<'s>( 52 | &'s self, 53 | ) -> Pin> + Send + 's>>; 54 | } 55 | 56 | // Maybe we would want to support other database supporting redis protocol. 57 | // For them, we may need to trigger other action such as migrating data. 58 | #[automock] 59 | pub trait MetaManipulationBroker: ThreadSafe { 60 | fn replace_proxy<'s>( 61 | &'s self, 62 | failed_proxy_address: String, 63 | ) -> Pin< 64 | Box< 65 | dyn Future, MetaManipulationBrokerError>> + Send + 's, 66 | >, 67 | >; 68 | 69 | fn commit_migration<'s>( 70 | &'s self, 71 | meta: MigrationTaskMeta, 72 | ) -> Pin> + Send + 's>>; 73 | } 74 | } 75 | 76 | pub use self::trait_mod::{ 77 | MetaDataBroker, MetaManipulationBroker, MockMetaDataBroker, MockMetaManipulationBroker, 78 | }; 79 | 80 | #[derive(Debug)] 81 | pub enum MetaDataBrokerError { 82 | Io(io::Error), 83 | RequestFailed, 84 | InvalidReply, 85 | NoBroker, 86 | } 87 | 88 | impl fmt::Display for MetaDataBrokerError { 89 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 90 | write!(f, "{:?}", self) 91 | } 92 | } 93 | 94 | impl Error for MetaDataBrokerError { 95 | fn description(&self) -> &str { 96 | "broker error" 97 | } 98 | 99 | fn cause(&self) -> Option<&dyn Error> { 100 | match self { 101 | MetaDataBrokerError::Io(err) => Some(err), 102 | _ => None, 103 | } 104 | } 105 | } 106 | 107 | #[derive(Debug)] 108 | pub enum MetaManipulationBrokerError { 109 | Io(io::Error), 110 | RequestFailed, 111 | ResourceNotAvailable, 112 | InvalidReply, 113 | NoBroker, 114 | Retry, 115 | } 116 | 117 | impl fmt::Display for MetaManipulationBrokerError { 118 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 119 | write!(f, "{:?}", self) 120 | } 121 | } 122 | 123 | impl Error for MetaManipulationBrokerError { 124 | fn description(&self) -> &str { 125 | "broker error" 126 | } 127 | 128 | fn cause(&self) -> Option<&dyn Error> { 129 | match self { 130 | MetaManipulationBrokerError::Io(err) => Some(err), 131 | _ => None, 132 | } 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /src/coordinator/http_mani_broker.rs: -------------------------------------------------------------------------------- 1 | use super::broker::{MetaManipulationBroker, MetaManipulationBrokerError}; 2 | use super::service::BrokerAddresses; 3 | use crate::broker::MEM_BROKER_API_VERSION; 4 | use crate::common::cluster::{MigrationTaskMeta, Proxy}; 5 | use futures::Future; 6 | use std::pin::Pin; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | 9 | pub struct HttpMetaManipulationBroker { 10 | broker_addresses: BrokerAddresses, 11 | broker_index: AtomicUsize, 12 | client: reqwest::Client, 13 | } 14 | 15 | impl HttpMetaManipulationBroker { 16 | pub fn new(broker_addresses: BrokerAddresses, client: reqwest::Client) -> Self { 17 | HttpMetaManipulationBroker { 18 | broker_addresses, 19 | broker_index: AtomicUsize::new(0), 20 | client, 21 | } 22 | } 23 | } 24 | 25 | impl HttpMetaManipulationBroker { 26 | fn gen_url(&self, path: &str) -> Option { 27 | let broker_addresses = self.broker_addresses.load(); 28 | let num = broker_addresses.len(); 29 | let curr_index = self.broker_index.fetch_add(1, Ordering::Relaxed); 30 | let broker = broker_addresses.get(curr_index % num)?; 31 | let url = format!("http://{}/api/{}{}", broker, MEM_BROKER_API_VERSION, path); 32 | Some(url) 33 | } 34 | 35 | async fn replace_proxy_impl( 36 | &self, 37 | failed_proxy_address: String, 38 | ) -> Result, MetaManipulationBrokerError> { 39 | let url = self 40 | .gen_url(&format!("/proxies/failover/{}", failed_proxy_address)) 41 | .ok_or(MetaManipulationBrokerError::NoBroker)?; 42 | let response = self.client.post(&url).send().await.map_err(|e| { 43 | error!("Failed to replace proxy {:?}", e); 44 | MetaManipulationBrokerError::RequestFailed 45 | })?; 46 | 47 | let status = response.status(); 48 | 49 | if status.is_success() { 50 | let ReplaceProxyResponse { proxy } = response.json().await.map_err(|e| { 51 | error!("Failed to get json payload {:?}", e); 52 | MetaManipulationBrokerError::InvalidReply 53 | })?; 54 | Ok(proxy) 55 | } else { 56 | if status == reqwest::StatusCode::CONFLICT { 57 | return Err(MetaManipulationBrokerError::Retry); 58 | } 59 | 60 | error!( 61 | "replace_proxy: Failed to replace node: status code {:?}", 62 | status 63 | ); 64 | let result = response.text().await; 65 | match result { 66 | Ok(body) => { 67 | error!("replace_proxy: Error body: {:?}", body); 68 | Err(MetaManipulationBrokerError::InvalidReply) 69 | } 70 | Err(e) => { 71 | error!("replace_proxy: Failed to get body: {:?}", e); 72 | Err(MetaManipulationBrokerError::InvalidReply) 73 | } 74 | } 75 | } 76 | } 77 | 78 | async fn commit_migration_impl( 79 | &self, 80 | meta: MigrationTaskMeta, 81 | ) -> Result<(), MetaManipulationBrokerError> { 82 | let url = self 83 | .gen_url("/clusters/migrations") 84 | .ok_or(MetaManipulationBrokerError::NoBroker)?; 85 | 86 | let response = self 87 | .client 88 | .put(&url) 89 | .json(&meta) 90 | .send() 91 | .await 92 | .map_err(|e| { 93 | error!("Failed to commit migration {:?}", e); 94 | MetaManipulationBrokerError::RequestFailed 95 | })?; 96 | 97 | let status = response.status(); 98 | 99 | if status.is_success() || status.as_u16() == 404 { 100 | Ok(()) 101 | } else { 102 | if status == reqwest::StatusCode::CONFLICT { 103 | return Err(MetaManipulationBrokerError::Retry); 104 | } 105 | 106 | error!("Failed to commit migration status code {:?}", status); 107 | let result = response.text().await; 108 | match result { 109 | Ok(body) => { 110 | error!( 111 | "HttpMetaManipulationBroker::commit_migration Error body: {:?}", 112 | body 113 | ); 114 | Err(MetaManipulationBrokerError::InvalidReply) 115 | } 116 | Err(e) => { 117 | error!( 118 | "HttpMetaManipulationBroker::commit_migration Failed to get body: {:?}", 119 | e 120 | ); 121 | Err(MetaManipulationBrokerError::InvalidReply) 122 | } 123 | } 124 | } 125 | } 126 | } 127 | 128 | impl MetaManipulationBroker for HttpMetaManipulationBroker { 129 | fn replace_proxy<'s>( 130 | &'s self, 131 | failed_proxy_address: String, 132 | ) -> Pin, MetaManipulationBrokerError>> + Send + 's>> 133 | { 134 | Box::pin(self.replace_proxy_impl(failed_proxy_address)) 135 | } 136 | 137 | fn commit_migration<'s>( 138 | &'s self, 139 | meta: MigrationTaskMeta, 140 | ) -> Pin> + Send + 's>> { 141 | Box::pin(self.commit_migration_impl(meta)) 142 | } 143 | } 144 | 145 | #[derive(Deserialize, Serialize)] 146 | pub struct ReplaceProxyResponse { 147 | pub proxy: Option, 148 | } 149 | -------------------------------------------------------------------------------- /src/coordinator/mod.rs: -------------------------------------------------------------------------------- 1 | // Suppress warning from automock. 2 | mod api; 3 | #[allow(clippy::ptr_arg)] 4 | pub mod broker; 5 | mod core; 6 | mod detector; 7 | pub mod http_mani_broker; 8 | pub mod http_meta_broker; 9 | mod migration; 10 | mod recover; 11 | pub mod service; 12 | mod sync; 13 | -------------------------------------------------------------------------------- /src/coordinator/recover.rs: -------------------------------------------------------------------------------- 1 | use super::broker::{MetaDataBroker, MetaManipulationBroker}; 2 | use super::core::{CoordinateError, ProxyFailure, ProxyFailureHandler, ProxyFailureRetriever}; 3 | use futures::{Future, Stream, TryFutureExt, TryStreamExt}; 4 | use std::pin::Pin; 5 | use std::sync::Arc; 6 | 7 | pub struct BrokerProxyFailureRetriever { 8 | broker: Arc, 9 | } 10 | 11 | impl BrokerProxyFailureRetriever { 12 | pub fn new(broker: Arc) -> Self { 13 | Self { broker } 14 | } 15 | } 16 | 17 | impl ProxyFailureRetriever for BrokerProxyFailureRetriever { 18 | fn retrieve_proxy_failures<'s>( 19 | &'s self, 20 | ) -> Pin> + Send + 's>> { 21 | Box::pin( 22 | self.broker 23 | .get_failures() 24 | .map_err(CoordinateError::MetaData), 25 | ) 26 | } 27 | } 28 | 29 | pub struct ReplaceNodeHandler { 30 | mani_broker: Arc, 31 | } 32 | 33 | impl ReplaceNodeHandler { 34 | pub fn new(mani_broker: Arc) -> Self { 35 | Self { mani_broker } 36 | } 37 | } 38 | 39 | impl ProxyFailureHandler for ReplaceNodeHandler { 40 | fn handle_proxy_failure<'s>( 41 | &'s self, 42 | proxy_failure: ProxyFailure, 43 | ) -> Pin> + Send + 's>> { 44 | let proxy_failure2 = proxy_failure.clone(); 45 | Box::pin( 46 | self.mani_broker 47 | .replace_proxy(proxy_failure.clone()) 48 | .map_err(move |e| { 49 | error!("failed to replace proxy {} {:?}", proxy_failure2, e); 50 | CoordinateError::MetaMani(e) 51 | }) 52 | .map_ok(move |new_proxy| { 53 | info!( 54 | "successfully replace {} with new proxy {:?}", 55 | proxy_failure, new_proxy 56 | ); 57 | }), 58 | ) 59 | } 60 | } 61 | 62 | #[cfg(test)] 63 | mod tests { 64 | use super::super::broker::{MockMetaDataBroker, MockMetaManipulationBroker}; 65 | use super::super::core::ParFailureHandler; 66 | use super::*; 67 | use crate::common::cluster::Proxy; 68 | use crate::coordinator::core::FailureHandler; 69 | use futures::{stream, StreamExt}; 70 | use tokio; 71 | 72 | fn gen_testing_dummy_proxy() -> Proxy { 73 | Proxy::new( 74 | None, 75 | "127.0.0.1:6000".to_string(), 76 | 7799, 77 | vec![], 78 | vec![], 79 | None, 80 | ) 81 | } 82 | 83 | #[tokio::test] 84 | async fn test_failure_retriever() { 85 | let mut mock_broker = MockMetaDataBroker::new(); 86 | mock_broker 87 | .expect_get_failures() 88 | .times(1) 89 | .returning(move || Box::pin(stream::iter(vec![Ok("127.0.0.1:6000".to_string())]))); 90 | let mock_broker = Arc::new(mock_broker); 91 | 92 | let retriever = BrokerProxyFailureRetriever::new(mock_broker); 93 | let s: Vec<_> = retriever.retrieve_proxy_failures().collect().await; 94 | assert_eq!(s.len(), 1); 95 | } 96 | 97 | #[tokio::test] 98 | async fn test_handler() { 99 | let mut mock_broker = MockMetaManipulationBroker::new(); 100 | let failure = "127.0.0.1:6000"; 101 | let failure2 = failure; 102 | mock_broker 103 | .expect_replace_proxy() 104 | .withf(move |f| f == failure2) 105 | .times(1) 106 | .returning(move |_| Box::pin(async { Ok(Some(gen_testing_dummy_proxy())) })); 107 | let mock_broker = Arc::new(mock_broker); 108 | 109 | let handler = ReplaceNodeHandler::new(mock_broker); 110 | let res = handler.handle_proxy_failure(failure.to_string()).await; 111 | assert!(res.is_ok()); 112 | } 113 | 114 | #[tokio::test] 115 | async fn test_failure_handler() { 116 | let mut mock_data_broker = MockMetaDataBroker::new(); 117 | mock_data_broker 118 | .expect_get_failures() 119 | .times(1) 120 | .returning(move || Box::pin(stream::iter(vec![Ok("127.0.0.1:6000".to_string())]))); 121 | let mock_data_broker = Arc::new(mock_data_broker); 122 | 123 | let mut mock_mani_broker = MockMetaManipulationBroker::new(); 124 | let failure = "127.0.0.1:6000"; 125 | let failure2 = failure; 126 | mock_mani_broker 127 | .expect_replace_proxy() 128 | .withf(move |f| f == failure2) 129 | .times(1) 130 | .returning(move |_| Box::pin(async { Ok(Some(gen_testing_dummy_proxy())) })); 131 | let mock_mani_broker = Arc::new(mock_mani_broker); 132 | 133 | let retriever = BrokerProxyFailureRetriever::new(mock_data_broker); 134 | let handler = ReplaceNodeHandler::new(mock_mani_broker); 135 | let failure_handler = ParFailureHandler::new(retriever, handler); 136 | let res: Vec<_> = failure_handler.run().collect().await; 137 | assert_eq!(res.len(), 1); 138 | assert!(res[0].is_ok()); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![forbid(unsafe_code)] 2 | #![deny( 3 | clippy::panic, 4 | clippy::panic_in_result_fn, 5 | clippy::unreachable, 6 | clippy::unwrap_used, 7 | // clippy::expect_used, 8 | clippy::indexing_slicing, 9 | )] 10 | 11 | #[macro_use] 12 | extern crate serde_derive; 13 | #[macro_use] 14 | extern crate log; 15 | #[macro_use] 16 | extern crate derivative; 17 | #[macro_use(defer)] 18 | extern crate scopeguard; 19 | #[macro_use] 20 | extern crate lazy_static; 21 | 22 | pub mod broker; 23 | pub mod common; 24 | pub mod coordinator; 25 | pub mod migration; 26 | pub mod protocol; 27 | pub mod proxy; 28 | pub mod replication; 29 | 30 | pub use self::migration::MAX_REDIRECTIONS; 31 | -------------------------------------------------------------------------------- /src/migration/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod manager; 2 | pub mod scan_migration; 3 | mod scan_task; 4 | pub mod stats; 5 | pub mod task; 6 | 7 | pub use self::scan_task::MAX_REDIRECTIONS; 8 | -------------------------------------------------------------------------------- /src/migration/stats.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | macro_rules! atomic_usize_stats { 4 | (pub struct $struct_name:ident { 5 | $(pub $field_name:ident: AtomicUsize,)* 6 | }) => { 7 | pub struct $struct_name { 8 | $(pub $field_name: AtomicUsize,)* 9 | } 10 | 11 | impl Default for $struct_name { 12 | fn default() -> Self { 13 | Self { 14 | $($field_name: AtomicUsize::new(0),)* 15 | } 16 | } 17 | } 18 | 19 | impl $struct_name { 20 | pub fn to_lines_str(&self) -> Vec<(String, usize)> { 21 | vec![ 22 | $((stringify!($field_name).to_string(), self.$field_name.load(Ordering::Relaxed))),* 23 | ] 24 | } 25 | } 26 | } 27 | } 28 | 29 | atomic_usize_stats! { 30 | pub struct MigrationStats { 31 | pub migrating_src_redis_conn: AtomicUsize, 32 | pub migrating_dst_redis_conn: AtomicUsize, 33 | pub migrating_active_sync_lock_success: AtomicUsize, 34 | pub migrating_active_sync_lock_failed: AtomicUsize, 35 | pub migrating_scan_lock_success: AtomicUsize, 36 | pub migrating_scan_lock_failed: AtomicUsize, 37 | pub importing_blocking_migration_commands: AtomicUsize, 38 | pub importing_non_blocking_migration_commands: AtomicUsize, 39 | pub importing_umsync_lock_success: AtomicUsize, 40 | pub importing_umsync_lock_failed: AtomicUsize, 41 | pub importing_umsync_lock_failed_again: AtomicUsize, 42 | pub importing_umsync_failed: AtomicUsize, 43 | pub importing_dst_key_existed: AtomicUsize, 44 | pub importing_dst_key_not_existed: AtomicUsize, 45 | pub importing_lock_success: AtomicUsize, 46 | pub importing_lock_failed: AtomicUsize, 47 | pub importing_resend_exists: AtomicUsize, 48 | pub importing_resend_exists_failed: AtomicUsize, 49 | pub importing_lock_loop_retry: AtomicUsize, 50 | pub importing_src_key_existed: AtomicUsize, 51 | pub importing_src_key_not_existed: AtomicUsize, 52 | pub importing_src_failed: AtomicUsize, 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/protocol/codec.rs: -------------------------------------------------------------------------------- 1 | use super::decoder::DecodeError; 2 | use super::encoder::EncodeError; 3 | use crate::protocol::packet::{PacketDecoder, PacketEncoder}; 4 | use bytes::BytesMut; 5 | use tokio_util::codec::{Decoder, Encoder}; 6 | 7 | pub struct RespCodec { 8 | encoder: E, 9 | decoder: D, 10 | } 11 | 12 | impl RespCodec { 13 | pub fn new(encoder: E, decoder: D) -> Self { 14 | Self { encoder, decoder } 15 | } 16 | } 17 | 18 | impl Decoder for RespCodec { 19 | type Item = D::Pkt; 20 | type Error = DecodeError; 21 | 22 | fn decode(&mut self, buf: &mut BytesMut) -> Result, Self::Error> { 23 | self.decoder.decode(buf) 24 | } 25 | } 26 | 27 | impl Encoder for RespCodec { 28 | type Error = EncodeError; 29 | 30 | fn encode(&mut self, item: E::Pkt, buf: &mut BytesMut) -> Result<(), Self::Error> { 31 | self.encoder 32 | .encode(item, |data| buf.extend_from_slice(data))?; 33 | Ok(()) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/protocol/decoder.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::fmt; 3 | use std::io; 4 | 5 | #[derive(Debug)] 6 | pub enum DecodeError { 7 | InvalidProtocol, 8 | Io(io::Error), 9 | } 10 | 11 | impl fmt::Display for DecodeError { 12 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 13 | write!(f, "{:?}", self) 14 | } 15 | } 16 | 17 | impl Error for DecodeError { 18 | fn description(&self) -> &str { 19 | "decode error" 20 | } 21 | 22 | fn cause(&self) -> Option<&dyn Error> { 23 | match self { 24 | DecodeError::Io(err) => Some(err), 25 | _ => None, 26 | } 27 | } 28 | } 29 | 30 | impl From for DecodeError { 31 | fn from(e: io::Error) -> Self { 32 | DecodeError::Io(e) 33 | } 34 | } 35 | 36 | pub const LF: u8 = b'\n'; 37 | -------------------------------------------------------------------------------- /src/protocol/encoder.rs: -------------------------------------------------------------------------------- 1 | use super::resp::{Array, BinSafeStr, BulkStr, Resp, RespVec}; 2 | use std::error::Error; 3 | use std::fmt; 4 | use std::io; 5 | 6 | pub fn command_to_buf(buf: &mut Vec, command: Vec) -> io::Result { 7 | let arr: Vec = command 8 | .into_iter() 9 | .map(|s| Resp::Bulk(BulkStr::Str(s))) 10 | .collect(); 11 | let resp = Resp::Arr(Array::Arr(arr)); 12 | resp_to_buf(buf, &resp) 13 | } 14 | 15 | pub fn resp_to_buf(buf: &mut Vec, resp: &RespVec) -> io::Result { 16 | encode_resp(buf, resp) 17 | } 18 | 19 | pub fn encode_resp>(writer: &mut W, resp: &Resp) -> io::Result 20 | where 21 | W: io::Write, 22 | { 23 | match resp { 24 | Resp::Error(s) => encode_simple_element(writer, b"-", s), 25 | Resp::Simple(s) => encode_simple_element(writer, b"+", s), 26 | Resp::Integer(s) => encode_simple_element(writer, b":", s), 27 | Resp::Bulk(bulk) => encode_bulk_str(writer, bulk), 28 | Resp::Arr(array) => encode_array(writer, array), 29 | } 30 | } 31 | 32 | fn encode_array>(writer: &mut W, array: &Array) -> io::Result 33 | where 34 | W: io::Write, 35 | { 36 | match *array { 37 | Array::Nil => writer.write(b"*-1\r\n"), 38 | Array::Arr(ref arr) => { 39 | let mut l = encode_simple_element(writer, b"*", &arr.len().to_string().into_bytes())?; 40 | for element in arr { 41 | l += encode_resp(writer, element)?; 42 | } 43 | Ok(l) 44 | } 45 | } 46 | } 47 | 48 | fn encode_bulk_str>(writer: &mut W, bulk_str: &BulkStr) -> io::Result 49 | where 50 | W: io::Write, 51 | { 52 | match *bulk_str { 53 | BulkStr::Nil => writer.write(b"$-1\r\n"), 54 | BulkStr::Str(ref s) => Ok(encode_simple_element( 55 | writer, 56 | b"$", 57 | &s.as_ref().len().to_string().into_bytes(), 58 | )? + writer.write(s.as_ref())? 59 | + writer.write(b"\r\n")?), 60 | } 61 | } 62 | 63 | fn encode_simple_element>( 64 | writer: &mut W, 65 | prefix: &[u8], 66 | b: T, 67 | ) -> io::Result 68 | where 69 | W: io::Write, 70 | { 71 | Ok(writer.write(prefix)? + writer.write(b.as_ref())? + writer.write(b"\r\n")?) 72 | } 73 | 74 | #[derive(Debug)] 75 | pub enum EncodeError { 76 | NotReady(T), 77 | Io(io::Error), 78 | } 79 | 80 | impl fmt::Display for EncodeError { 81 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 82 | let s = match self { 83 | Self::NotReady(_) => "EncodeError::NotReady".to_string(), 84 | Self::Io(err) => format!("EncodeError::Io({})", err), 85 | }; 86 | write!(f, "{}", s) 87 | } 88 | } 89 | 90 | impl Error for EncodeError { 91 | fn description(&self) -> &str { 92 | "decode error" 93 | } 94 | 95 | fn cause(&self) -> Option<&dyn Error> { 96 | match self { 97 | EncodeError::Io(err) => Some(err), 98 | _ => None, 99 | } 100 | } 101 | } 102 | 103 | impl From for EncodeError { 104 | fn from(e: io::Error) -> Self { 105 | EncodeError::Io(e) 106 | } 107 | } 108 | 109 | pub fn get_resp_size_hint>(resp: &Resp) -> io::Result { 110 | let mut writer = SizeHintWriter {}; 111 | encode_resp(&mut writer, resp) 112 | } 113 | 114 | struct SizeHintWriter {} 115 | 116 | impl io::Write for SizeHintWriter { 117 | fn write(&mut self, buf: &[u8]) -> io::Result { 118 | Ok(buf.len()) 119 | } 120 | 121 | fn flush(&mut self) -> io::Result<()> { 122 | Ok(()) 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/protocol/fp.rs: -------------------------------------------------------------------------------- 1 | pub trait Functor { 2 | type A; // Concrete inner type of Wrap 3 | type Wrap: Functor; // In the form of Wrap 4 | 5 | fn map(self, f: F) -> Self::Wrap 6 | where 7 | F: Fn(Self::A) -> B + Copy; 8 | fn as_ref(&self) -> Self::Wrap<&Self::A>; 9 | fn as_mut(&mut self) -> Self::Wrap<&mut Self::A>; 10 | fn map_in_place(&mut self, f: F) 11 | where 12 | F: Fn(&mut Self::A) + Copy; 13 | } 14 | -------------------------------------------------------------------------------- /src/protocol/mod.rs: -------------------------------------------------------------------------------- 1 | mod client; 2 | mod codec; 3 | mod decoder; 4 | mod encoder; 5 | mod fp; 6 | mod packet; 7 | mod resp; 8 | mod stateless; 9 | 10 | pub use self::client::{ 11 | DummyRedisClientFactory, MockRedisClient, MockRedisClientFactory, Pool, PooledRedisClient, 12 | PooledRedisClientFactory, PreCheckRedisClientFactory, RedisClient, RedisClientError, 13 | RedisClientFactory, SimpleRedisClient, SimpleRedisClientFactory, 14 | }; 15 | pub use self::codec::RespCodec; 16 | pub use self::decoder::DecodeError; 17 | pub use self::encoder::{encode_resp, resp_to_buf, EncodeError}; 18 | pub use self::fp::Functor; 19 | pub use self::packet::{ 20 | new_optional_multi_packet_codec, new_simple_packet_codec, DecodedPacket, EncodedPacket, 21 | FromResp, MonoPacket, OptionalMulti, OptionalMultiPacketDecoder, OptionalMultiPacketEncoder, 22 | Packet, PacketDecoder, PacketEncoder, PacketSizeHint, RespPacket, SimplePacketDecoder, 23 | SimplePacketEncoder, 24 | }; 25 | pub use self::resp::{ 26 | Array, ArrayBytes, ArrayIndex, ArraySlice, ArrayVec, BinSafeStr, BulkStr, BulkStrBytes, 27 | BulkStrIndex, BulkStrSlice, BulkStrVec, IndexedResp, Resp, RespBytes, RespIndex, RespSlice, 28 | RespVec, 29 | }; 30 | -------------------------------------------------------------------------------- /src/proxy/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod backend; 2 | pub mod blocking; 3 | pub mod cluster; 4 | pub mod command; 5 | mod compress; 6 | pub mod executor; 7 | pub mod manager; 8 | pub mod migration_backend; 9 | pub mod reply; 10 | pub mod sender; 11 | pub mod service; 12 | pub mod session; 13 | mod slot; 14 | pub mod slowlog; 15 | mod table; 16 | -------------------------------------------------------------------------------- /src/proxy/reply.rs: -------------------------------------------------------------------------------- 1 | use super::backend::{ 2 | BackendResult, CmdTask, CmdTaskResultHandler, CmdTaskResultHandlerFactory, ConnFactory, 3 | }; 4 | use super::compress::{CmdReplyDecompressor, CompressionError, CompressionStrategyMetaMapConfig}; 5 | use super::manager::SharedMetaMap; 6 | use super::session::CmdCtx; 7 | use crate::common::utils::Wrapper; 8 | use crate::protocol::{BulkStr, Resp, RespPacket}; 9 | use std::marker::PhantomData; 10 | 11 | pub struct DecompressCommitHandlerFactory< 12 | T: CmdTask + Into>, 13 | C: ConnFactory, 14 | > { 15 | meta_map: SharedMetaMap, 16 | phanthom: PhantomData, 17 | } 18 | 19 | impl DecompressCommitHandlerFactory 20 | where 21 | T: CmdTask + Into>, 22 | C: ConnFactory, 23 | { 24 | pub fn new(meta_map: SharedMetaMap) -> Self { 25 | Self { 26 | meta_map, 27 | phanthom: PhantomData, 28 | } 29 | } 30 | } 31 | 32 | impl CmdTaskResultHandlerFactory for DecompressCommitHandlerFactory 33 | where 34 | T: CmdTask + Into>, 35 | C: ConnFactory, 36 | { 37 | type Handler = DecompressCommitHandler; 38 | 39 | fn create(&self) -> Self::Handler { 40 | DecompressCommitHandler { 41 | decompressor: CmdReplyDecompressor::new(CompressionStrategyMetaMapConfig::new( 42 | self.meta_map.clone(), 43 | )), 44 | phanthom: PhantomData, 45 | } 46 | } 47 | } 48 | 49 | pub struct DecompressCommitHandler< 50 | T: CmdTask + Into>, 51 | C: ConnFactory, 52 | > { 53 | decompressor: CmdReplyDecompressor>, 54 | phanthom: PhantomData, 55 | } 56 | 57 | impl CmdTaskResultHandler for DecompressCommitHandler 58 | where 59 | T: CmdTask + Into>, 60 | C: ConnFactory, 61 | { 62 | type Task = T; 63 | 64 | fn handle_task( 65 | &self, 66 | cmd_task: Self::Task, 67 | result: BackendResult<::Pkt>, 68 | ) { 69 | let cmd_ctx = cmd_task.into().into_inner(); 70 | 71 | let mut packet = match result { 72 | Ok(pkt) => pkt, 73 | Err(err) => { 74 | return cmd_ctx.set_resp_result(Ok(Resp::Error( 75 | format!("backend failed to handle task: {:?}", err).into_bytes(), 76 | ))); 77 | } 78 | }; 79 | 80 | match self.decompressor.decompress(&cmd_ctx, &mut packet) { 81 | Ok(()) 82 | | Err(CompressionError::UnsupportedCmdType) 83 | | Err(CompressionError::Disabled) => (), 84 | Err(err) => { 85 | warn!( 86 | "failed to decompress: {:?}. Force to return nil bulk string", 87 | err 88 | ); 89 | return cmd_ctx.set_resp_result(Ok(Resp::Bulk(BulkStr::Nil))); 90 | } 91 | } 92 | 93 | cmd_ctx.set_result(Ok(Box::new(packet))) 94 | } 95 | } 96 | 97 | pub struct ReplyCommitHandlerFactory; 98 | 99 | impl Default for ReplyCommitHandlerFactory { 100 | fn default() -> Self { 101 | Self 102 | } 103 | } 104 | 105 | impl CmdTaskResultHandlerFactory for ReplyCommitHandlerFactory { 106 | type Handler = ReplyCommitHandler; 107 | 108 | fn create(&self) -> Self::Handler { 109 | ReplyCommitHandler 110 | } 111 | } 112 | 113 | pub struct ReplyCommitHandler; 114 | 115 | impl CmdTaskResultHandler for ReplyCommitHandler { 116 | type Task = CmdCtx; 117 | 118 | fn handle_task( 119 | &self, 120 | cmd_ctx: Self::Task, 121 | result: BackendResult<::Pkt>, 122 | ) { 123 | let packet = match result { 124 | Ok(pkt) => pkt, 125 | Err(err) => { 126 | return cmd_ctx.set_resp_result(Ok(Resp::Error( 127 | format!("backend failed to handle task: {:?}", err).into_bytes(), 128 | ))); 129 | } 130 | }; 131 | cmd_ctx.set_result(Ok(Box::new(packet))) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/proxy/slot.rs: -------------------------------------------------------------------------------- 1 | use crate::common::cluster::SlotRange; 2 | use crate::common::utils::SLOT_NUM; 3 | use std::collections::HashMap; 4 | 5 | pub struct SlotMap { 6 | data: SlotMapData, 7 | } 8 | 9 | impl SlotMap { 10 | pub fn from_ranges(slot_map: HashMap>) -> Self { 11 | let mut map = HashMap::new(); 12 | for (addr, slot_ranges) in slot_map { 13 | let mut slots = Vec::new(); 14 | for slot_range in slot_ranges { 15 | for range in slot_range.get_range_list().get_ranges() { 16 | slots.push((range.start(), range.end())); 17 | } 18 | } 19 | map.insert(addr, slots); 20 | } 21 | SlotMap { 22 | data: SlotMapData::new(map), 23 | } 24 | } 25 | 26 | pub fn get(&self, slot: usize) -> Option<&str> { 27 | self.data.get(slot) 28 | } 29 | } 30 | 31 | pub struct SlotMapData { 32 | slot_arr: Vec>, 33 | addrs: Vec, 34 | } 35 | 36 | impl SlotMapData { 37 | pub fn new(slot_map: HashMap>) -> SlotMapData { 38 | let mut slot_arr = Vec::with_capacity(SLOT_NUM); 39 | let mut addrs = Vec::with_capacity(slot_map.len()); 40 | for _ in 0..SLOT_NUM { 41 | slot_arr.push(None); 42 | } 43 | for (addr, slots) in slot_map.into_iter() { 44 | addrs.push(addr); 45 | for range in slots { 46 | let (start, end) = range; 47 | if start > end { 48 | continue; 49 | } 50 | for s in start..=end { 51 | if s >= SLOT_NUM { 52 | break; 53 | } 54 | if let Some(opt) = slot_arr.get_mut(s) { 55 | *opt = Some(addrs.len() - 1); 56 | } 57 | } 58 | } 59 | } 60 | SlotMapData { slot_arr, addrs } 61 | } 62 | 63 | pub fn get(&self, slot: usize) -> Option<&str> { 64 | let addr_index = self.slot_arr.get(slot).and_then(|opt| *opt)?; 65 | self.addrs.get(addr_index).map(|s| s.as_str()) 66 | } 67 | } 68 | 69 | #[cfg(test)] 70 | mod tests { 71 | use super::*; 72 | use crate::common::cluster::{RangeList, SlotRange, SlotRangeTag}; 73 | use std::convert::TryFrom; 74 | 75 | #[test] 76 | fn test_slot_map() { 77 | let mut range_map = HashMap::new(); 78 | let backend = "127.0.0.1:6379".to_string(); 79 | range_map.insert( 80 | backend.clone(), 81 | vec![SlotRange { 82 | range_list: RangeList::try_from(format!("1 0-{}", SLOT_NUM - 1).as_str()).unwrap(), 83 | tag: SlotRangeTag::None, 84 | }], 85 | ); 86 | 87 | let slot_map = SlotMap::from_ranges(range_map); 88 | for slot in 0..SLOT_NUM { 89 | let node = slot_map.get(slot).unwrap(); 90 | assert_eq!(node, backend); 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/proxy/table.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | 3 | pub struct CommandTable { 4 | supported_commands: HashSet<&'static [u8]>, 5 | } 6 | 7 | impl Default for CommandTable { 8 | fn default() -> Self { 9 | Self { 10 | supported_commands: SUPPORTED_COMMANDS.iter().cloned().collect(), 11 | } 12 | } 13 | } 14 | 15 | impl CommandTable { 16 | pub fn is_supported(&self, cmd: &[u8]) -> bool { 17 | self.supported_commands.contains(cmd) 18 | } 19 | } 20 | 21 | const SUPPORTED_COMMANDS: [&[u8]; 137] = [ 22 | b"restore", 23 | b"zrangebylex", 24 | b"zcount", 25 | b"ttl", 26 | b"touch", 27 | b"rpoplpush", 28 | b"decr", 29 | b"pttl", 30 | b"zrevrank", 31 | b"georadiusbymember_ro", 32 | b"persist", 33 | b"zrem", 34 | b"zinterstore", 35 | b"hincrbyfloat", 36 | b"bitcount", 37 | b"get", 38 | b"sscan", 39 | b"incrby", 40 | b"xpending", 41 | b"xclaim", 42 | b"hexists", 43 | b"xlen", 44 | b"hlen", 45 | b"pfadd", 46 | b"smembers", 47 | b"geodist", 48 | b"lset", 49 | b"geohash", 50 | b"pfmerge", 51 | b"mget", 52 | b"set", 53 | b"dump", 54 | b"setex", 55 | b"zremrangebyrank", 56 | b"blpop", 57 | b"zscan", 58 | b"append", 59 | b"srem", 60 | b"pexpire", 61 | b"hstrlen", 62 | b"zremrangebyscore", 63 | b"xack", 64 | b"hsetnx", 65 | b"lpushx", 66 | b"georadius", 67 | b"spop", 68 | b"asking", 69 | b"srandmember", 70 | b"sismember", 71 | b"zlexcount", 72 | b"zscore", 73 | b"zpopmin", 74 | b"expire", 75 | b"sdiffstore", 76 | b"setbit", 77 | b"rpop", 78 | b"lpop", 79 | b"hgetall", 80 | b"lpush", 81 | b"hmget", 82 | b"xrevrange", 83 | b"hdel", 84 | b"lrem", 85 | b"rename", 86 | b"pfcount", 87 | b"sunion", 88 | b"hincrby", 89 | b"geopos", 90 | b"echo", 91 | b"georadiusbymember", 92 | b"pexpireat", 93 | b"getbit", 94 | b"xadd", 95 | b"rpush", 96 | b"zpopmax", 97 | b"type", 98 | b"exists", 99 | b"georadius_ro", 100 | b"zrevrangebylex", 101 | b"zrevrangebyscore", 102 | b"linsert", 103 | b"sort", 104 | b"zrevrange", 105 | b"setnx", 106 | b"lindex", 107 | b"hget", 108 | b"info", 109 | b"zadd", 110 | b"sinter", 111 | b"expireat", 112 | b"hvals", 113 | b"hset", 114 | b"xdel", 115 | b"hkeys", 116 | b"del", 117 | b"decrby", 118 | b"setrange", 119 | b"zcard", 120 | b"xrange", 121 | b"sdiff", 122 | b"brpoplpush", 123 | b"bitfield", 124 | b"cluster", 125 | b"geoadd", 126 | b"bzpopmin", 127 | b"mset", 128 | b"strlen", 129 | b"incr", 130 | b"getrange", 131 | b"bzpopmax", 132 | b"ping", 133 | b"smove", 134 | b"incrbyfloat", 135 | b"bitpos", 136 | b"rpushx", 137 | b"zunionstore", 138 | b"hmset", 139 | b"getset", 140 | b"zrange", 141 | b"config", 142 | b"zrank", 143 | b"psetex", 144 | b"zremrangebylex", 145 | b"sadd", 146 | b"ltrim", 147 | b"sinterstore", 148 | b"brpop", 149 | b"lrange", 150 | b"eval", 151 | b"zincrby", 152 | b"unlink", 153 | b"hscan", 154 | b"zrangebyscore", 155 | b"scard", 156 | b"xtrim", 157 | b"llen", 158 | b"command", 159 | ]; 160 | -------------------------------------------------------------------------------- /src/replication/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod manager; 2 | pub mod redis_replicator; 3 | pub mod replicator; 4 | -------------------------------------------------------------------------------- /tests/connection.rs: -------------------------------------------------------------------------------- 1 | extern crate undermoon; 2 | 3 | use futures::channel::mpsc; 4 | use futures::{Future, SinkExt, StreamExt, TryStreamExt}; 5 | use std::net::SocketAddr; 6 | use std::str; 7 | use std::sync::Arc; 8 | use tokio::macros::support::Pin; 9 | use undermoon::protocol::{Array, BulkStr, Resp, RespPacket, RespVec}; 10 | use undermoon::proxy::backend::{ 11 | BackendError, ConnFactory, ConnSink, ConnStream, CreateConnResult, 12 | }; 13 | 14 | pub struct DummyOkConnFactory { 15 | handle_func: Arc) -> RespVec + Send + Sync + 'static>, 16 | } 17 | 18 | impl DummyOkConnFactory { 19 | pub fn new(handle_func: Arc) -> RespVec + Send + Sync + 'static>) -> Self { 20 | Self { handle_func } 21 | } 22 | } 23 | 24 | impl ConnFactory for DummyOkConnFactory { 25 | type Pkt = RespPacket; 26 | 27 | fn create_conn( 28 | &self, 29 | _addr: SocketAddr, 30 | ) -> Pin> + Send>> { 31 | let (sender, receiver) = mpsc::unbounded(); 32 | let handle_func = self.handle_func.clone(); 33 | let receiver = receiver.map(move |packet: RespPacket| { 34 | let cmd: Vec = match packet.to_resp_vec() { 35 | Resp::Arr(Array::Arr(resps)) => resps 36 | .iter() 37 | .map(|resp| match resp { 38 | Resp::Bulk(BulkStr::Str(s)) => str::from_utf8(s).unwrap().to_string(), 39 | _ => panic!(), 40 | }) 41 | .collect(), 42 | _ => panic!(), 43 | }; 44 | let resp = handle_func(cmd); 45 | Ok::<_, ()>(RespPacket::Data(resp)) 46 | }); 47 | let sink: ConnSink = Box::pin(sender.sink_map_err(|_| BackendError::Canceled)); 48 | let stream: ConnStream = Box::pin(receiver.map_err(|_| BackendError::Canceled)); 49 | Box::pin(async { Ok((sink, stream)) }) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /tests/redis_client.rs: -------------------------------------------------------------------------------- 1 | extern crate undermoon; 2 | 3 | use futures::{future, Future}; 4 | use std::pin::Pin; 5 | use std::str; 6 | use std::sync::Arc; 7 | use undermoon::protocol::{ 8 | BinSafeStr, OptionalMulti, RedisClient, RedisClientError, RedisClientFactory, RespVec, 9 | }; 10 | 11 | pub struct DummyRedisClient { 12 | handle_func: Arc) -> RespVec + Send + Sync + 'static>, 13 | } 14 | 15 | impl DummyRedisClient { 16 | fn gen_reply(&self, cmd: Vec) -> RespVec { 17 | let cmd = cmd 18 | .into_iter() 19 | .map(|s| str::from_utf8(s.as_slice()).unwrap().to_string()) 20 | .collect(); 21 | (self.handle_func)(cmd) 22 | } 23 | } 24 | 25 | impl RedisClient for DummyRedisClient { 26 | fn execute<'s>( 27 | &'s mut self, 28 | command: OptionalMulti>, 29 | ) -> Pin, RedisClientError>> + Send + 's>> 30 | { 31 | let res = command.map(|cmd| self.gen_reply(cmd)); 32 | Box::pin(async { Ok(res) }) 33 | } 34 | } 35 | 36 | pub struct DummyClientFactory { 37 | handle_func: Arc) -> RespVec + Send + Sync + 'static>, 38 | } 39 | 40 | impl DummyClientFactory { 41 | pub fn new(handle_func: Arc) -> RespVec + Send + Sync + 'static>) -> Self { 42 | Self { handle_func } 43 | } 44 | } 45 | 46 | impl RedisClientFactory for DummyClientFactory { 47 | type Client = DummyRedisClient; 48 | 49 | fn create_client( 50 | &self, 51 | _address: String, 52 | ) -> Pin> + Send>> { 53 | let handle_func = self.handle_func.clone(); 54 | Box::pin(future::ok(DummyRedisClient { handle_func })) 55 | } 56 | } 57 | --------------------------------------------------------------------------------