├── .github ├── stale.yml └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── _config.yml ├── build.rs ├── examples ├── README.md ├── kv.rs ├── tls.rs └── watch.rs ├── hack ├── .gitignore ├── cfssl │ ├── ca-config.json │ └── ca-csr.json ├── download_etcd.sh ├── generate_etcd_certificate.sh ├── generate_etcd_cluster.sh └── openssl.cnf ├── integration-tests ├── .gitignore ├── Cargo.toml └── src │ ├── failover.rs │ ├── kv.rs │ ├── lib.rs │ ├── support.rs │ ├── tls.rs │ └── watch.rs ├── justfile ├── proto ├── auth.proto ├── kv.proto ├── rpc.proto ├── v3election.proto └── v3lock.proto ├── rustfmt.toml └── src ├── auth ├── authenticate.rs └── mod.rs ├── client.rs ├── cluster ├── member_add.rs ├── member_list.rs ├── member_remove.rs ├── member_update.rs └── mod.rs ├── error.rs ├── kv ├── compact.rs ├── delete.rs ├── mod.rs ├── put.rs ├── range.rs └── txn.rs ├── lease ├── grant.rs ├── keep_alive.rs ├── mod.rs ├── revoke.rs └── time_to_live.rs ├── lib.rs ├── lock └── mod.rs ├── proto.rs ├── response_header.rs └── watch ├── mod.rs └── watch.rs /.github/stale.yml: -------------------------------------------------------------------------------- 1 | daysUntilStale: 60 2 | daysUntilClose: 14 3 | staleLabel: wontfix 4 | 5 | markComment: > 6 | This issue has been automatically marked as stale because it has not had 7 | recent activity. It will be closed in 14 days if no further activity occurs. Thank you 8 | for your contributions. 9 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | schedule: [cron: "0 */6 * * *"] 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | integration-test: 15 | name: Integration test 16 | runs-on: ubuntu-latest 17 | timeout-minutes: 30 18 | steps: 19 | - name: Checkout Code 20 | uses: actions/checkout@v3 21 | - name: Install rust toolchain 22 | uses: actions-rs/toolchain@v1 23 | with: 24 | toolchain: stable 25 | components: clippy 26 | override: true 27 | - name: Install Protoc 28 | uses: arduino/setup-protoc@v1 29 | with: 30 | repo-token: ${{ secrets.GITHUB_TOKEN }} 31 | - run: | 32 | wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64 -O cfssl 33 | wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64 -O cfssljson 34 | chmod +x cfssl 35 | chmod +x cfssljson 36 | sudo mv cfssl /usr/local/bin 37 | sudo mv cfssljson /usr/local/bin 38 | - name: Install cargo-nextest 39 | uses: baptiste0928/cargo-install@v1 40 | with: 41 | crate: cargo-nextest 42 | locked: true 43 | - name: Install just 44 | uses: extractions/setup-just@v1 45 | - name: Run tests 46 | run: | 47 | just integration-test 48 | 49 | lint: 50 | name: Coding style check 51 | runs-on: ubuntu-latest 52 | timeout-minutes: 10 53 | steps: 54 | - name: Checkout Code 55 | uses: actions/checkout@v3 56 | - name: Install rust toolchain 57 | uses: actions-rs/toolchain@v1 58 | with: 59 | toolchain: stable 60 | components: clippy 61 | override: true 62 | - name: Install Protoc 63 | uses: arduino/setup-protoc@v1 64 | with: 65 | repo-token: ${{ secrets.GITHUB_TOKEN }} 66 | - name: Install just 67 | uses: extractions/setup-just@v1 68 | - name: Run lint 69 | run: | 70 | just lint 71 | 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | .idea/ 5 | etcd-docker-compose.yaml 6 | etcd 7 | *.tar.gz -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "etcd-rs" 3 | version = "1.0.1" 4 | authors = ["lodrem "] 5 | edition = "2021" 6 | keywords = ["etcd", "future", "async"] 7 | repository = "https://github.com/lodrem/etcd-rs" 8 | homepage = "https://github.com/lodrem/etcd-rs" 9 | description = "etcd client for rust" 10 | documentation = "https://docs.rs/etcd-rs" 11 | license = "MIT" 12 | 13 | [features] 14 | default = ["tls"] 15 | tls = ["tonic/tls", "tokio/fs"] 16 | 17 | [dependencies] 18 | tonic = "0.9" 19 | prost = "0.11" 20 | tokio = "1.27" 21 | tokio-stream = "0.1" 22 | async-trait = "0.1" 23 | futures = "0.3" 24 | thiserror = "1.0" 25 | http = "0.2" 26 | 27 | [dev-dependencies] 28 | tokio = { version = "1.27", features = ["full"] } 29 | 30 | [build-dependencies] 31 | tonic-build = "0.9" 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 luncj 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ETCD_CLUSTER_DOCKER_COMPOSE ?= etcd-docker-compose.yaml 2 | ETCD_CLUSTER_WITH_TLS ?= false 3 | ETCD_NODE ?= etcd-1 4 | ETCD_VERSION ?= v3.5.2 5 | 6 | TEST_CASE ?= test_basic 7 | 8 | .PHONY: build 9 | build: 10 | cargo build 11 | 12 | .PHONY: test 13 | test: 14 | cargo nextest run --test-threads=1 --retries 5 15 | cargo check --no-default-features 16 | 17 | .PHONY: test-one 18 | test-one: 19 | cargo nextest run --test ${TEST_CASE} --test-threads=1 20 | 21 | .PHONY: publish 22 | publish: 23 | cargo package && cargo publish 24 | 25 | .PHONY: setup-etcd-cluster 26 | setup-etcd-cluster: teardown-etcd-cluster 27 | ifneq ("${ETCD_CLUSTER_WITH_TLS}", "false") 28 | ./hack/generate_etcd_certificate.sh 29 | endif 30 | ./hack/generate_etcd_cluster.sh ${ETCD_CLUSTER_DOCKER_COMPOSE} ${ETCD_VERSION} ${ETCD_CLUSTER_WITH_TLS} 31 | docker-compose -f ${ETCD_CLUSTER_DOCKER_COMPOSE} up -d 32 | 33 | .PHONY: start-etcd-node 34 | start-etcd-node: 35 | ifneq ("$(wildcard ${ETCD_CLUSTER_DOCKER_COMPOSE})","") 36 | docker-compose -f ${ETCD_CLUSTER_DOCKER_COMPOSE} start ${ETCD_NODE} 37 | endif 38 | 39 | .PHONY: stop-etcd-node 40 | stop-etcd-node: 41 | ifneq ("$(wildcard ${ETCD_CLUSTER_DOCKER_COMPOSE})","") 42 | docker-compose -f ${ETCD_CLUSTER_DOCKER_COMPOSE} stop ${ETCD_NODE} 43 | endif 44 | 45 | .PHONY: teardown-etcd-cluster 46 | teardown-etcd-cluster: 47 | ifneq ("$(wildcard ${ETCD_CLUSTER_DOCKER_COMPOSE})","") 48 | docker-compose -f ${ETCD_CLUSTER_DOCKER_COMPOSE} down 49 | rm ${ETCD_CLUSTER_DOCKER_COMPOSE} 50 | endif 51 | 52 | etcd/etcdctl: 53 | ./hack/download_etcd.sh 54 | 55 | .PHONY: etcd-cluster-status 56 | etcd-cluster-status: etcd/etcdctl 57 | ifneq ("$(wildcard ${ETCD_CLUSTER_DOCKER_COMPOSE})","") 58 | docker-compose -f ${ETCD_CLUSTER_DOCKER_COMPOSE} ps; 59 | etcd/etcdctl endpoint status --endpoints=127.0.0.1:12379,127.0.0.1:22379,127.0.0.1:32379 -w table; 60 | endif 61 | 62 | .PHONY: clean 63 | clean: teardown-etcd-cluster 64 | cargo clean 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | etcd client for Rust 2 | ==== 3 | 4 | [github](https://github.com/lodrem/etcd-rs) 5 | [crates.io](https://crates.io/crates/etcd-rs) 6 | [docs.rs](https://docs.rs/etcd-rs) 7 | [build status](https://github.com/luncj/etcd-rs/actions?query%3Amaster) 8 | [dependency status](https://deps.rs/repo/github/lodrem/etcd-rs) 9 | 10 | An [etcd](https://github.com/etcd-io/etcd) (API v3) client for Rust backed by [tokio](https://github.com/tokio-rs/tokio) and [tonic](https://github.com/hyperium/tonic). 11 | 12 | Supported APIs 13 | ---- 14 | 15 | - KV 16 | - [x] Put 17 | - [x] Range 18 | - [x] Delete 19 | - [x] Transaction 20 | - [x] Compact 21 | - Lease 22 | - [x] Grant 23 | - [x] Revoke 24 | - [x] KeepAlive 25 | - [x] TimeToLive 26 | - Watch 27 | - [x] WatchCreate 28 | - [x] WatchCancel 29 | - Auth 30 | - [x] Authenticate 31 | - [ ] RoleAdd 32 | - [ ] RoleGrantPermission 33 | - [ ] UserAdd 34 | - [ ] UserGrantRole 35 | - [ ] AuthEnable 36 | - [ ] AuthDisable 37 | - Cluster 38 | - [x] MemberAdd 39 | - [x] MemberRemove 40 | - [x] MemberUpdate 41 | - [x] MemberList 42 | - Maintenance 43 | - [ ] Alarm 44 | - [ ] Status 45 | - [ ] Defragment 46 | - [ ] Hash 47 | - [ ] Snapshot 48 | - [ ] MoveLeader 49 | 50 | Usage 51 | ---- 52 | 53 | Add following dependencies in your project `cargo.toml`: 54 | 55 | ```toml 56 | [dependencies] 57 | etcd-rs = "1.0" 58 | ``` 59 | 60 | ```rust 61 | use etcd_rs::Client; 62 | 63 | #[tokio::main] 64 | async fn main() { 65 | let cli = Client::connect(ClientConfig { 66 | endpoints: [ 67 | "http://127.0.0.1:12379", 68 | "http://127.0.0.1:22379", 69 | "http://127.0.0.1:32379", 70 | ], 71 | ..Default::default() 72 | }).await; 73 | 74 | cli.put(("foo", "bar")).await.expect("put kv"); 75 | 76 | let kvs = cli.get("foo").await.expect("get kv").take_kvs(); 77 | assert_eq!(kvs.len(), 1); 78 | } 79 | ``` 80 | 81 | Development 82 | ---- 83 | 84 | 85 | requirements: 86 | - Makefile 87 | - docker 88 | - docker-compose 89 | 90 | ### Start local etcd cluster 91 | 92 | ```shell 93 | make setup-etcd-cluster 94 | ``` 95 | 96 | stop cluster 97 | ```shell 98 | make teardown-etcd-cluster 99 | ``` 100 | 101 | ### Run tests 102 | 103 | ```shell 104 | make test 105 | ``` 106 | 107 | for specified case: 108 | ```shell 109 | TEST_CASE=test_put_error make test-one 110 | ``` 111 | 112 | License 113 | ---- 114 | 115 | This project is licensed under the [MIT license](LICENSE). 116 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-hacker -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | fn main() -> Result<(), Box> { 2 | tonic_build::configure().build_server(false).compile( 3 | &[ 4 | "proto/auth.proto", 5 | "proto/kv.proto", 6 | "proto/rpc.proto", 7 | "proto/v3lock.proto", 8 | "proto/v3election.proto", 9 | ], 10 | &["proto"], 11 | )?; 12 | 13 | Ok(()) 14 | } 15 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | Examples 2 | ==== 3 | 4 | ## for Non-TLS 5 | 6 | ```shell 7 | cd ./etcd-rs 8 | make setup-etcd-cluster 9 | 10 | cargo run --example kv 11 | cargo run --example watch 12 | ``` 13 | 14 | ## for TLS 15 | 16 | ```shell 17 | cd ./etcd-rs 18 | ETCD_CLUSTER_WITH_TLS=true make setup-etcd-cluster 19 | 20 | cargo run --example tls 21 | ``` 22 | 23 | -------------------------------------------------------------------------------- /examples/kv.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use etcd_rs::{Client, ClientConfig, KeyRange, KeyValueOp, LeaseOp, PutRequest, Result}; 4 | 5 | async fn put(cli: &Client) -> Result<()> { 6 | cli.put(("foo", "bar")).await.expect("put kv"); 7 | let resp = cli.get("foo").await.expect("get kv"); 8 | 9 | assert_eq!(resp.kvs.len(), 1); 10 | assert_eq!(resp.kvs[0].key_str(), "foo"); 11 | assert_eq!(resp.kvs[0].value_str(), "bar"); 12 | 13 | Ok(()) 14 | } 15 | 16 | async fn put_with_lease(cli: &Client) -> Result<()> { 17 | let lease = cli 18 | .grant_lease(Duration::from_secs(10)) 19 | .await 20 | .expect("grant lease"); 21 | cli.put(PutRequest::new("foo", "bar").lease(lease.id)) 22 | .await 23 | .expect("put kv with lease"); 24 | 25 | Ok(()) 26 | } 27 | 28 | async fn get(cli: &Client) -> Result<()> { 29 | cli.get(KeyRange::range("start", "end")) 30 | .await 31 | .expect("get range kvs"); 32 | cli.get_range("start", "end").await.expect("get range kvs"); 33 | 34 | cli.get(KeyRange::all()).await.expect("get all kvs"); 35 | cli.get_all().await.expect("get all kvs"); 36 | 37 | cli.get(KeyRange::prefix("foo")) 38 | .await 39 | .expect("get by prefix"); 40 | cli.get_by_prefix("foo").await.expect("get by prefix"); 41 | 42 | Ok(()) 43 | } 44 | 45 | #[tokio::main] 46 | async fn main() -> Result<()> { 47 | let cli = Client::connect(ClientConfig::new([ 48 | "http://127.0.0.1:12379".into(), 49 | "http://127.0.0.1:22379".into(), 50 | "http://127.0.0.1:32379".into(), 51 | ])) 52 | .await?; 53 | 54 | put(&cli).await?; 55 | put_with_lease(&cli).await?; 56 | get(&cli).await?; 57 | 58 | Ok(()) 59 | } 60 | -------------------------------------------------------------------------------- /examples/tls.rs: -------------------------------------------------------------------------------- 1 | use etcd_rs::{Client, ClientConfig, Endpoint, KeyValueOp, Result}; 2 | 3 | #[tokio::main] 4 | async fn main() -> Result<()> { 5 | let cli = Client::connect(ClientConfig::new([ 6 | Endpoint::from("http://127.0.0.1:12379") 7 | .tls( 8 | "etcd-1", 9 | "./hack/certs/ca.pem", 10 | "./hack/certs/etcd-1.pem", 11 | "./hack/certs/etcd-1-key.pem", 12 | ) 13 | .await?, 14 | Endpoint::from("http://127.0.0.1:22379") 15 | .tls( 16 | "etcd-2", 17 | "./hack/certs/ca.pem", 18 | "./hack/certs/etcd-2.pem", 19 | "./hack/certs/etcd-2-key.pem", 20 | ) 21 | .await?, 22 | Endpoint::from("http://127.0.0.1:32379") 23 | .tls( 24 | "etcd-3", 25 | "./hack/certs/ca.pem", 26 | "./hack/certs/etcd-3.pem", 27 | "./hack/certs/etcd-3-key.pem", 28 | ) 29 | .await?, 30 | ])) 31 | .await?; 32 | 33 | cli.put(("foo", "bar")).await.expect("put kv"); 34 | let resp = cli.get("foo").await.expect("get kv"); 35 | 36 | assert_eq!(resp.kvs.len(), 1); 37 | assert_eq!(resp.kvs[0].key_str(), "foo"); 38 | assert_eq!(resp.kvs[0].value_str(), "bar"); 39 | 40 | Ok(()) 41 | } 42 | -------------------------------------------------------------------------------- /examples/watch.rs: -------------------------------------------------------------------------------- 1 | use etcd_rs::{Client, ClientConfig, KeyRange, KeyValueOp, Result, WatchInbound, WatchOp}; 2 | 3 | #[tokio::main] 4 | async fn main() -> Result<()> { 5 | let cli = Client::connect(ClientConfig::new([ 6 | "http://127.0.0.1:12379".into(), 7 | "http://127.0.0.1:22379".into(), 8 | "http://127.0.0.1:32379".into(), 9 | ])) 10 | .await?; 11 | 12 | let (mut stream, cancel) = cli 13 | .watch(KeyRange::prefix("foo")) 14 | .await 15 | .expect("watch by prefix"); 16 | 17 | tokio::spawn(async move { 18 | cli.put(("foo1", "1")).await.expect("put kv"); 19 | cli.put(("bar", "2")).await.expect("put kv"); 20 | cli.put(("foo2", "3")).await.expect("put kv"); 21 | cli.put(("bar", "4")).await.expect("put kv"); 22 | cli.put(("foo2", "5")).await.expect("put kv"); 23 | cli.delete("foo1").await.expect("delete kv"); 24 | cli.delete("bar").await.expect("delete kv"); 25 | 26 | cancel.cancel().await.expect("cancel watch"); 27 | }); 28 | 29 | loop { 30 | match stream.inbound().await { 31 | WatchInbound::Ready(resp) => { 32 | println!("receive event: {:?}", resp); 33 | } 34 | WatchInbound::Interrupted(e) => { 35 | eprintln!("encounter error: {:?}", e); 36 | } 37 | WatchInbound::Closed => { 38 | println!("watch stream closed"); 39 | break; 40 | } 41 | } 42 | } 43 | 44 | Ok(()) 45 | } 46 | -------------------------------------------------------------------------------- /hack/.gitignore: -------------------------------------------------------------------------------- 1 | certs -------------------------------------------------------------------------------- /hack/cfssl/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "168h" 5 | }, 6 | "profiles": { 7 | "server": { 8 | "expiry": "87600h", 9 | "usages": [ 10 | "signing", 11 | "key encipherment", 12 | "server auth" 13 | ] 14 | }, 15 | "client": { 16 | "expiry": "87600h", 17 | "usages": [ 18 | "signing", 19 | "key encipherment", 20 | "client auth" 21 | ] 22 | }, 23 | "peer": { 24 | "expiry": "87600h", 25 | "usages": [ 26 | "signing", 27 | "key encipherment", 28 | "server auth", 29 | "client auth" 30 | ] 31 | } 32 | } 33 | } 34 | } 35 | 36 | -------------------------------------------------------------------------------- /hack/cfssl/ca-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "ETCD_RS", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "CA", 11 | "O": "ETCD_RS INC.", 12 | "ST": "Los Angeles", 13 | "OU": "E11" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /hack/download_etcd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ETCD_VERSION=$1 4 | ETCD_VERSION=${ETCD_VERSION:-v3.5.2} 5 | 6 | ARCH=$2 7 | ARCH=${ARCH:-linux-amd64} 8 | 9 | wget https://github.com/etcd-io/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-${ARCH}.tar.gz 10 | 11 | tar -xf etcd-${ETCD_VERSION}-${ARCH}.tar.gz 12 | 13 | mv etcd-${ETCD_VERSION}-${ARCH} etcd 14 | 15 | echo "etcd ${ETCD_VERSION} ${ARCH} downloaded" -------------------------------------------------------------------------------- /hack/generate_etcd_certificate.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CUR_DIR=$(dirname "$0") 4 | 5 | echo ${CUR_DIR} 6 | 7 | cfssl gencert -initca ${CUR_DIR}/cfssl/ca-csr.json | cfssljson -bare ca - 8 | 9 | rm -rf ${CUR_DIR}/certs 10 | mkdir ${CUR_DIR}/certs 11 | mv ca.csr ${CUR_DIR}/certs 12 | mv ca.pem ${CUR_DIR}/certs 13 | mv ca-key.pem ${CUR_DIR}/certs 14 | 15 | create_certificate() { 16 | NODE=$1 17 | CN="lodrem-etcd-cluster-${NODE}" 18 | 19 | echo "{\"CN\": \"${CN}\", \"hosts\": [\"\"], \"key\":{\"algo\": \"rsa\", \"size\": 2048}}" | \ 20 | cfssl gencert \ 21 | -ca="${CUR_DIR}/certs/ca.pem" \ 22 | -ca-key="${CUR_DIR}/certs/ca-key.pem" \ 23 | -config="${CUR_DIR}/cfssl/ca-config.json" \ 24 | -profile=server \ 25 | -hostname="127.0.0.1,localhost,${NODE}" - | cfssljson -bare "${NODE}" 26 | 27 | echo "{\"CN\": \"${CN}\", \"hosts\": [\"\"], \"key\":{\"algo\": \"rsa\", \"size\": 2048}}" | \ 28 | cfssl gencert \ 29 | -ca="${CUR_DIR}/certs/ca.pem" \ 30 | -ca-key="${CUR_DIR}/certs/ca-key.pem" \ 31 | -config="${CUR_DIR}/cfssl/ca-config.json" \ 32 | -profile=peer \ 33 | -hostname="127.0.0.1,localhost,${NODE}" - | cfssljson -bare "${NODE}" 34 | 35 | mv ${NODE}.csr ${CUR_DIR}/certs/ 36 | mv ${NODE}.pem ${CUR_DIR}/certs/ 37 | mv ${NODE}-key.pem ${CUR_DIR}/certs/ 38 | } 39 | 40 | NODES=(etcd-1 etcd-2 etcd-3) 41 | 42 | for NODE in ${NODES[@]}; do 43 | create_certificate ${NODE} 44 | done -------------------------------------------------------------------------------- /hack/generate_etcd_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CUR_DIR=$(dirname "$0") 4 | 5 | DOCKER_COMPOSE_FILE=$1 6 | DOCKER_COMPOSE_FILE=${DOCKER_COMPOSE_FILE:-docker-compose.yaml} 7 | 8 | VERSION=$2 9 | VERSION=${VERSION:-v3.5.2} 10 | 11 | WITH_TLS=$3 12 | WITH_TLS=${WITH_TLS:-false} 13 | 14 | IMAGE=quay.io/coreos/etcd:${VERSION} 15 | 16 | SCHEME="http" 17 | ETCD_1_TLS_OPTIONS="" 18 | ETCD_2_TLS_OPTIONS="" 19 | ETCD_3_TLS_OPTIONS="" 20 | if [[ ${WITH_TLS} != 'false' ]]; then 21 | SCHEME="https" 22 | 23 | ETCD_1_TLS_OPTIONS=$(cat <<-EOF 24 | --client-cert-auth 25 | --trusted-ca-file /opt/certs/ca.pem 26 | --cert-file /opt/certs/etcd-1.pem 27 | --key-file /opt/certs/etcd-1-key.pem 28 | --peer-client-cert-auth 29 | --peer-trusted-ca-file /opt/certs/ca.pem 30 | --peer-cert-file /opt/certs/etcd-1.pem 31 | --peer-key-file /opt/certs/etcd-1-key.pem 32 | EOF 33 | ) 34 | 35 | ETCD_2_TLS_OPTIONS=$(cat <<-EOF 36 | --client-cert-auth 37 | --trusted-ca-file /opt/certs/ca.pem 38 | --cert-file /opt/certs/etcd-2.pem 39 | --key-file /opt/certs/etcd-2-key.pem 40 | --peer-client-cert-auth 41 | --peer-trusted-ca-file /opt/certs/ca.pem \ 42 | --peer-cert-file /opt/certs/etcd-2.pem 43 | --peer-key-file /opt/certs/etcd-2-key.pem 44 | EOF 45 | ) 46 | 47 | ETCD_3_TLS_OPTIONS=$(cat <<-EOF 48 | --client-cert-auth 49 | --trusted-ca-file /opt/certs/ca.pem 50 | --cert-file /opt/certs/etcd-3.pem 51 | --key-file /opt/certs/etcd-3-key.pem 52 | --peer-client-cert-auth 53 | --peer-trusted-ca-file /opt/certs/ca.pem \ 54 | --peer-cert-file /opt/certs/etcd-3.pem 55 | --peer-key-file /opt/certs/etcd-3-key.pem 56 | EOF 57 | ) 58 | 59 | fi 60 | 61 | 62 | cat > "${DOCKER_COMPOSE_FILE}" <- 71 | /usr/local/bin/etcd 72 | -name etcd-1 73 | -advertise-client-urls ${SCHEME}://etcd-1:12379 -listen-client-urls ${SCHEME}://0.0.0.0:12379 74 | -initial-advertise-peer-urls ${SCHEME}://etcd-1:12380 -listen-peer-urls ${SCHEME}://0.0.0.0:12380 75 | -initial-cluster-token etcd-cluster 76 | -initial-cluster etcd-1=${SCHEME}://etcd-1:12380,etcd-2=${SCHEME}://etcd-2:22380,etcd-3=${SCHEME}://etcd-3:32380 77 | -initial-cluster-state new 78 | ${ETCD_1_TLS_OPTIONS} 79 | volumes: 80 | - ${CUR_DIR}/certs:/opt/certs 81 | ports: 82 | - "12379:12379" 83 | - "12380:12380" 84 | 85 | etcd-2: 86 | image: ${IMAGE} 87 | container_name: etcd-2 88 | hostname: etcd-2 89 | command: >- 90 | /usr/local/bin/etcd 91 | -name etcd-2 92 | -advertise-client-urls ${SCHEME}://etcd-2:22379 -listen-client-urls ${SCHEME}://0.0.0.0:22379 93 | -initial-advertise-peer-urls ${SCHEME}://etcd-2:22380 -listen-peer-urls ${SCHEME}://0.0.0.0:22380 94 | -initial-cluster-token etcd-cluster 95 | -initial-cluster etcd-1=${SCHEME}://etcd-1:12380,etcd-2=${SCHEME}://etcd-2:22380,etcd-3=${SCHEME}://etcd-3:32380 96 | -initial-cluster-state new 97 | ${ETCD_2_TLS_OPTIONS} 98 | volumes: 99 | - ${CUR_DIR}/certs:/opt/certs 100 | ports: 101 | - "22379:22379" 102 | - "22380:22380" 103 | 104 | etcd-3: 105 | image: ${IMAGE} 106 | container_name: etcd-3 107 | hostname: etcd-3 108 | command: >- 109 | /usr/local/bin/etcd 110 | -name etcd-3 111 | -advertise-client-urls ${SCHEME}://etcd-3:32379 -listen-client-urls ${SCHEME}://0.0.0.0:32379 112 | -initial-advertise-peer-urls ${SCHEME}://etcd-3:32380 -listen-peer-urls ${SCHEME}://0.0.0.0:32380 113 | -initial-cluster-token etcd-cluster 114 | -initial-cluster etcd-1=${SCHEME}://etcd-1:12380,etcd-2=${SCHEME}://etcd-2:22380,etcd-3=${SCHEME}://etcd-3:32380 115 | -initial-cluster-state new 116 | ${ETCD_3_TLS_OPTIONS} 117 | volumes: 118 | - ${CUR_DIR}/certs:/opt/certs 119 | ports: 120 | - "32379:32379" 121 | - "32380:32380" 122 | EOF -------------------------------------------------------------------------------- /hack/openssl.cnf: -------------------------------------------------------------------------------- 1 | # etcd OpenSSL configuration file. 2 | SAN = "IP:127.0.0.1" 3 | dir = ${ENV:CA_BASE_DIR} 4 | 5 | [ ca ] 6 | default_ca = etcd_ca 7 | 8 | [ etcd_ca ] 9 | certs = $dir/certs 10 | certificate = $dir/certs/etcd-ca.crt 11 | crl = $dir/crl.pem 12 | crl_dir = $dir/crl 13 | crlnumber = $dir/crlnumber 14 | database = $dir/index.txt 15 | email_in_dn = no 16 | new_certs_dir = $dir/newcerts 17 | private_key = $dir/private/etcd-ca.key 18 | serial = $dir/serial 19 | RANDFILE = $dir/private/.rand 20 | name_opt = ca_default 21 | cert_opt = ca_default 22 | default_days = 3650 23 | default_crl_days = 30 24 | default_md = sha512 25 | preserve = no 26 | policy = policy_etcd 27 | 28 | [ policy_etcd ] 29 | organizationName = optional 30 | commonName = supplied 31 | 32 | [ req ] 33 | default_bits = 4096 34 | default_keyfile = privkey.pem 35 | distinguished_name = req_distinguished_name 36 | attributes = req_attributes 37 | x509_extensions = v3_ca 38 | string_mask = utf8only 39 | req_extensions = etcd_client 40 | 41 | [ req_distinguished_name ] 42 | countryName = Country Name (2 letter code) 43 | countryName_default = US 44 | countryName_min = 2 45 | countryName_max = 2 46 | commonName = Common Name (FQDN) 47 | 0.organizationName = Organization Name (eg, company) 48 | 0.organizationName_default = etcd-ca 49 | 50 | [ req_attributes ] 51 | 52 | [ v3_ca ] 53 | basicConstraints = CA:true 54 | keyUsage = keyCertSign,cRLSign 55 | subjectKeyIdentifier = hash 56 | 57 | [ etcd_client ] 58 | basicConstraints = CA:FALSE 59 | extendedKeyUsage = clientAuth 60 | keyUsage = digitalSignature, keyEncipherment 61 | 62 | [ etcd_peer ] 63 | basicConstraints = CA:FALSE 64 | extendedKeyUsage = clientAuth, serverAuth 65 | keyUsage = digitalSignature, keyEncipherment 66 | subjectAltName = ${ENV::SAN} 67 | 68 | [ etcd_server ] 69 | basicConstraints = CA:FALSE 70 | extendedKeyUsage = clientAuth, serverAuth 71 | keyUsage = digitalSignature, keyEncipherment 72 | subjectAltName = ${ENV::SAN} -------------------------------------------------------------------------------- /integration-tests/.gitignore: -------------------------------------------------------------------------------- 1 | /target -------------------------------------------------------------------------------- /integration-tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "etcd-rs-integration-tests" 3 | version = "0.1.0" 4 | edition = "2021" 5 | publish = false 6 | 7 | [dependencies] 8 | etcd-rs = { path = "../", features = ["tls"] } 9 | tokio = { version = "1.27", features = ["full"] } 10 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } 11 | tracing = "0.1" 12 | rand = "0.8" 13 | tonic = "0.9" -------------------------------------------------------------------------------- /integration-tests/src/failover.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use rand::Rng; 4 | use tokio::time::timeout; 5 | 6 | use etcd_rs::*; 7 | 8 | use crate::support::{Context, KVOp}; 9 | 10 | async fn put_and_get(cli: &Client, retry: usize) { 11 | for _ in 0..=retry { 12 | tokio::time::sleep(Duration::from_secs(3)).await; 13 | 14 | let k = format!("key-{}", rand::thread_rng().gen::()); 15 | let v = rand::thread_rng().gen::().to_string(); 16 | let r = cli.put(PutRequest::new(k.clone(), v.clone())).await; 17 | if let Some(e) = r.err() { 18 | eprintln!("failed to put kv (will retry): {:?}", e); 19 | continue; 20 | } 21 | 22 | let r = cli.get(k).await; 23 | 24 | match r { 25 | Ok(resp) => { 26 | assert_eq!(1, resp.count); 27 | assert_eq!(&v, resp.kvs[0].value_str()); 28 | 29 | return; 30 | } 31 | Err(e) => { 32 | eprintln!("failed to range kv (will retry): {:?}", e); 33 | } 34 | } 35 | } 36 | 37 | unreachable!(); 38 | } 39 | 40 | async fn expect_timeout(cli: &Client) { 41 | for _ in 0..3 { 42 | tokio::time::sleep(Duration::from_millis(100)).await; 43 | 44 | let res = cli.put(PutRequest::new("foo", "bar")).await; // FIXME check specified error 45 | assert!(res.is_err(), "resp = {:?}", res); 46 | } 47 | } 48 | 49 | #[tokio::test] 50 | async fn test_kv_when_node_stopped() { 51 | let ctx = Context::new(false); 52 | 53 | let cli = ctx.connect_to_cluster().await; 54 | 55 | put_and_get(&cli, 0).await; 56 | 57 | ctx.etcd_cluster.stop_node(1); 58 | 59 | put_and_get(&cli, 5).await; 60 | 61 | ctx.etcd_cluster.stop_node(2); 62 | 63 | ctx.etcd_cluster.print_status(); 64 | expect_timeout(&cli).await; 65 | 66 | ctx.etcd_cluster.start_node(1); 67 | 68 | put_and_get(&cli, 5).await; 69 | 70 | ctx.etcd_cluster.stop_node(3); 71 | 72 | expect_timeout(&cli).await; 73 | 74 | ctx.etcd_cluster.start_node(2); 75 | 76 | put_and_get(&cli, 5).await; 77 | } 78 | 79 | #[tokio::test] 80 | async fn test_kv_when_cluster_down() { 81 | let ctx = Context::new(false); 82 | let cli = ctx.connect_to_cluster().await; 83 | 84 | put_and_get(&cli, 0).await; 85 | 86 | ctx.etcd_cluster.stop_node(1); 87 | ctx.etcd_cluster.stop_node(2); 88 | ctx.etcd_cluster.stop_node(3); 89 | 90 | ctx.etcd_cluster.print_status(); 91 | expect_timeout(&cli).await; 92 | 93 | ctx.etcd_cluster.start_node(1); 94 | ctx.etcd_cluster.start_node(2); 95 | ctx.etcd_cluster.start_node(3); 96 | 97 | put_and_get(&cli, 5).await; 98 | put_and_get(&cli, 0).await; 99 | put_and_get(&cli, 0).await; 100 | } 101 | 102 | #[tokio::test] 103 | async fn test_watch_when_cluster_down() { 104 | let ctx = Context::new(false); 105 | let cli = ctx.connect_to_cluster().await; 106 | 107 | const PREFIX: &str = "prefix-"; 108 | 109 | let (mut stream, _cancel) = cli 110 | .watch(KeyRange::prefix(PREFIX)) 111 | .await 112 | .expect("watch created"); 113 | 114 | ctx.etcd_cluster.stop_node(1); 115 | ctx.etcd_cluster.stop_node(2); 116 | ctx.etcd_cluster.stop_node(3); 117 | 118 | { 119 | let mut interrupted = false; 120 | 121 | for _ in 0..10 { 122 | let x = timeout(Duration::from_secs(1), stream.inbound()).await; 123 | match x { 124 | Ok(etcd_rs::WatchInbound::Interrupted(_)) => { 125 | interrupted = true; 126 | break; 127 | } 128 | Ok(etcd_rs::WatchInbound::Closed) => { 129 | panic!("should not close watch stream"); 130 | } 131 | Err(e) => { 132 | println!("timeout: {:?}", e); 133 | } 134 | Ok(v) => { 135 | panic!("should not reach here: {:?}", v) 136 | } 137 | } 138 | } 139 | 140 | assert!(interrupted); 141 | } 142 | 143 | expect_timeout(&cli).await; 144 | 145 | ctx.etcd_cluster.start_node(1); 146 | ctx.etcd_cluster.start_node(2); 147 | ctx.etcd_cluster.start_node(3); 148 | 149 | tokio::time::sleep(Duration::from_secs(2)).await; 150 | 151 | put_and_get(&cli, 5).await; // re-connect to cluster 152 | put_and_get(&cli, 0).await; 153 | put_and_get(&cli, 0).await; 154 | 155 | let (mut stream, cancel) = cli 156 | .watch(KeyRange::prefix(PREFIX)) 157 | .await 158 | .expect("watch created"); 159 | 160 | let ops: Vec<_> = vec![ 161 | KVOp::Put("foo1".to_owned(), "bar1".to_owned()), 162 | KVOp::Put("foo2".to_owned(), "bar2".to_owned()), 163 | KVOp::Put("foo3".to_owned(), "bar3".to_owned()), 164 | KVOp::Delete("foo1".to_owned()), 165 | KVOp::Delete("foo2".to_owned()), 166 | ] 167 | .into_iter() 168 | .map(|op| match op { 169 | KVOp::Put(k, v) => KVOp::Put(format!("{}-{}", PREFIX, k), v), 170 | KVOp::Delete(k) => KVOp::Delete(format!("{}-{}", PREFIX, k)), 171 | }) 172 | .collect(); 173 | 174 | apply_kv_ops!(cli, ops); 175 | 176 | cancel.cancel().await.expect("watch canceled"); 177 | 178 | assert_ops_events!(ops, stream); 179 | } 180 | -------------------------------------------------------------------------------- /integration-tests/src/kv.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use tonic::Code; 4 | 5 | use etcd_rs::*; 6 | 7 | use crate::support::Context; 8 | 9 | #[tokio::test] 10 | async fn test_put_error() { 11 | let ctx = Context::new(false); 12 | let cli = ctx.connect_to_cluster().await; 13 | 14 | let resp = cli.put(("", "bar")).await; 15 | match resp { 16 | Err(Error::Response(status)) => { 17 | assert_eq!(status.code(), Code::InvalidArgument); 18 | assert_eq!(status.message(), "etcdserver: key is not provided") 19 | } 20 | _ => unreachable!(), 21 | } 22 | 23 | // hard code max in server-side 24 | let resp = cli 25 | .put(("foo", "x".repeat((1.5 * (1024 * 1024) as f64) as usize))) 26 | .await; 27 | match resp { 28 | Err(Error::Response(status)) => { 29 | assert_eq!(status.code(), Code::InvalidArgument); 30 | assert_eq!(status.message(), "etcdserver: request is too large") 31 | } 32 | _ => unreachable!(), 33 | } 34 | } 35 | 36 | #[tokio::test] 37 | async fn test_put_with_lease() { 38 | let ctx = Context::new(false); 39 | let cli = ctx.connect_to_cluster().await; 40 | 41 | let (key, value) = ("foo", "bar"); 42 | 43 | let lease = cli 44 | .grant_lease(Duration::from_secs(10)) 45 | .await 46 | .expect("grant lease"); 47 | 48 | cli.put(PutRequest::from((key, value)).lease(lease.id)) 49 | .await 50 | .expect("put kv with lease"); 51 | 52 | let resp = cli.get(key).await.expect("get kv"); 53 | assert_eq!(resp.kvs.len(), 1); 54 | assert_eq!(key, resp.kvs[0].key_str()); 55 | assert_eq!(value, resp.kvs[0].value_str()); 56 | assert_eq!(lease.id, resp.kvs[0].lease) 57 | } 58 | 59 | #[tokio::test] 60 | async fn test_put_with_ignore_value() { 61 | let ctx = Context::new(false); 62 | let cli = ctx.connect_to_cluster().await; 63 | 64 | let (key, value) = ("foo", "bar"); 65 | 66 | match cli.put(PutRequest::from((key, "")).ignore_value()).await { 67 | Err(Error::Response(status)) => { 68 | assert_eq!(status.code(), Code::InvalidArgument); 69 | assert_eq!(status.message(), "etcdserver: key not found") 70 | } 71 | _ => unreachable!(), 72 | } 73 | 74 | cli.put((key, value)).await.expect("put kv"); 75 | 76 | cli.put(PutRequest::from((key, "")).ignore_value()) 77 | .await 78 | .expect("put kv with ignore value"); 79 | 80 | let resp = cli.get(key).await.expect("get kv"); 81 | assert_eq!(resp.kvs.len(), 1); 82 | assert_eq!(key, resp.kvs[0].key_str()); 83 | assert_eq!(value, resp.kvs[0].value_str()); 84 | } 85 | 86 | #[tokio::test] 87 | async fn test_put_with_ignore_lease() { 88 | let ctx = Context::new(false); 89 | let cli = ctx.connect_to_cluster().await; 90 | 91 | let (key, value) = ("foo", "bar"); 92 | 93 | match cli.put(PutRequest::from((key, "")).ignore_lease()).await { 94 | Err(Error::Response(status)) => { 95 | assert_eq!(status.code(), Code::InvalidArgument); 96 | assert_eq!(status.message(), "etcdserver: key not found") 97 | } 98 | _ => unreachable!(), 99 | } 100 | 101 | let lease = cli 102 | .grant_lease(Duration::from_secs(10)) 103 | .await 104 | .expect("grant lease"); 105 | 106 | cli.put(PutRequest::from((key, value)).lease(lease.id)) 107 | .await 108 | .expect("put kv with lease"); 109 | 110 | cli.put(PutRequest::from((key, "bar1")).ignore_lease()) 111 | .await 112 | .expect("put kv with ignore lease"); 113 | 114 | let resp = cli.get(key).await.expect("get kv"); 115 | assert_eq!(resp.kvs.len(), 1); 116 | assert_eq!(key, resp.kvs[0].key_str()); 117 | assert_eq!("bar1", resp.kvs[0].value_str()); 118 | assert_eq!(lease.id, resp.kvs[0].lease) 119 | } 120 | 121 | #[tokio::test] 122 | async fn test_get_all() { 123 | let ctx = Context::new(false); 124 | let cli = ctx.connect_to_cluster().await; 125 | 126 | let kvs = vec![ 127 | ("a", "a1"), 128 | ("b", "b1"), 129 | ("c", "c1"), 130 | ("c", "c2"), 131 | ("c", "c3"), 132 | ("foo", "foo1"), 133 | ("foo/abc", "foo/abc1"), 134 | ("fop", "fop1"), 135 | ]; 136 | 137 | for (k, v) in kvs { 138 | cli.put((k, v)) 139 | .await 140 | .expect(&format!("put kv: ({}, {})", k, v)); 141 | } 142 | 143 | let resp = cli.get_all().await.expect("get all key-value"); 144 | 145 | assert_eq!(resp.count, 6); 146 | 147 | assert_eq!( 148 | resp.kvs, 149 | vec![ 150 | KeyValue { 151 | key: "a".into(), 152 | value: "a1".into(), 153 | create_revision: 2, 154 | mod_revision: 2, 155 | version: 1, 156 | lease: 0 157 | }, 158 | KeyValue { 159 | key: "b".into(), 160 | value: "b1".into(), 161 | create_revision: 3, 162 | mod_revision: 3, 163 | version: 1, 164 | lease: 0 165 | }, 166 | KeyValue { 167 | key: "c".into(), 168 | value: "c3".into(), 169 | create_revision: 4, 170 | mod_revision: 6, 171 | version: 3, 172 | lease: 0 173 | }, 174 | KeyValue { 175 | key: "foo".into(), 176 | value: "foo1".into(), 177 | create_revision: 7, 178 | mod_revision: 7, 179 | version: 1, 180 | lease: 0 181 | }, 182 | KeyValue { 183 | key: "foo/abc".into(), 184 | value: "foo/abc1".into(), 185 | create_revision: 8, 186 | mod_revision: 8, 187 | version: 1, 188 | lease: 0 189 | }, 190 | KeyValue { 191 | key: "fop".into(), 192 | value: "fop1".into(), 193 | create_revision: 9, 194 | mod_revision: 9, 195 | version: 1, 196 | lease: 0 197 | } 198 | ] 199 | ); 200 | } 201 | 202 | #[tokio::test] 203 | async fn test_delete_all() { 204 | let ctx = Context::new(false); 205 | let cli = ctx.connect_to_cluster().await; 206 | 207 | let kvs = vec![ 208 | ("a", "a1"), 209 | ("b", "b1"), 210 | ("c", "c1"), 211 | ("c", "c2"), 212 | ("c", "c3"), 213 | ("foo", "foo1"), 214 | ("foo/abc", "foo/abc1"), 215 | ("fop", "fop1"), 216 | ]; 217 | 218 | for (k, v) in kvs { 219 | cli.put((k, v)) 220 | .await 221 | .expect(&format!("put kv: ({}, {})", k, v)); 222 | } 223 | 224 | cli.delete_all().await.expect("delete all key-value"); 225 | 226 | let resp = cli.get_all().await.expect("get all key-value"); 227 | assert_eq!(resp.count, 0); 228 | assert!(resp.kvs.is_empty()); 229 | } 230 | 231 | #[tokio::test] 232 | async fn test_compact_error() { 233 | let ctx = Context::new(false); 234 | let cli = ctx.connect_to_cluster().await; 235 | 236 | for _ in 0..5 { 237 | cli.put(("foo", "bar")).await.expect("put key-value"); 238 | } 239 | 240 | cli.compact(6).await.expect("compact with current revision"); 241 | cli.compact(6) 242 | .await 243 | .expect_err("compact with compacted revision"); 244 | cli.compact(42) 245 | .await 246 | .expect_err("compact with future revision"); 247 | } 248 | -------------------------------------------------------------------------------- /integration-tests/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | #[macro_use] 4 | mod support; 5 | mod failover; 6 | mod kv; 7 | mod tls; 8 | mod watch; 9 | -------------------------------------------------------------------------------- /integration-tests/src/support.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | #![allow(unused_macros)] 3 | 4 | use etcd_rs::Endpoint; 5 | use std::collections::HashMap; 6 | use std::process::Command; 7 | 8 | pub struct EtcdCluster { 9 | nodes: HashMap, 10 | } 11 | 12 | impl EtcdCluster { 13 | pub fn new(with_tls: bool) -> Self { 14 | println!("etcd cluster starting"); 15 | { 16 | let output = Command::new("just") 17 | .arg("setup-etcd-cluster") 18 | .arg(with_tls.to_string()) 19 | .output() 20 | .expect("setup etcd cluster"); 21 | assert!( 22 | output.status.success(), 23 | "stdout: {} \nstderr: {}", 24 | String::from_utf8_lossy(&output.stdout), 25 | String::from_utf8_lossy(&output.stderr) 26 | ); 27 | } 28 | println!("etcd cluster started"); 29 | 30 | let nodes: HashMap<_, _> = (1..=3) 31 | .map(|i| { 32 | let node = format!("etcd-{}", i); 33 | if with_tls { 34 | use std::fs::read; 35 | 36 | let ca_cert = read("../hack/certs/ca.pem").expect("read ca cert"); 37 | let client_cert = 38 | read(format!("../hack/certs/{}.pem", node)).expect("read client cert"); 39 | let client_key = 40 | read(format!("../hack/certs/{}-key.pem", node)).expect("read client key"); 41 | ( 42 | node.clone(), 43 | Endpoint::from(format!("https://127.0.0.1:{}2379", i)).tls_raw( 44 | node, 45 | ca_cert, 46 | client_cert, 47 | client_key, 48 | ), 49 | ) 50 | } else { 51 | (node, format!("http://127.0.0.1:{}2379", i).into()) 52 | } 53 | }) 54 | .collect(); 55 | 56 | Self { nodes } 57 | } 58 | 59 | pub fn print_status(&self) { 60 | let output = Command::new("just") 61 | .arg("etcd-cluster-status") 62 | .output() 63 | .expect("fetch etcd cluster status"); 64 | 65 | println!("stdout: {}", String::from_utf8_lossy(&output.stdout)); 66 | println!("stderr: {}", String::from_utf8_lossy(&output.stderr)); 67 | } 68 | 69 | pub fn endpoints(&self) -> Vec { 70 | self.nodes.values().cloned().collect() 71 | } 72 | 73 | #[track_caller] 74 | pub fn start_node(&self, i: u64) { 75 | let caller = std::panic::Location::caller(); 76 | println!( 77 | "[{}:{}] => etcd node {} starting", 78 | caller.file(), 79 | caller.line(), 80 | i 81 | ); 82 | assert!(Command::new("just") 83 | .arg("start-etcd-node") 84 | .arg(format!("etcd-{}", i)) 85 | .output() 86 | .expect("start etcd node") 87 | .status 88 | .success()); 89 | println!( 90 | "[{}:{}] => etcd node {} started", 91 | caller.file(), 92 | caller.line(), 93 | i 94 | ); 95 | } 96 | 97 | #[track_caller] 98 | pub fn stop_node(&self, i: u64) { 99 | let caller = std::panic::Location::caller(); 100 | println!( 101 | "[{}:{}] => etcd node {} stopping", 102 | caller.file(), 103 | caller.line(), 104 | i 105 | ); 106 | assert!(Command::new("just") 107 | .arg("stop-etcd-node") 108 | .arg(format!("etcd-{}", i)) 109 | .output() 110 | .expect("stop etcd node") 111 | .status 112 | .success()); 113 | println!( 114 | "[{}:{}] => etcd node {} stopped", 115 | caller.file(), 116 | caller.line(), 117 | i 118 | ); 119 | } 120 | } 121 | 122 | impl Drop for EtcdCluster { 123 | fn drop(&mut self) { 124 | println!("etcd cluster stopping"); 125 | assert!(Command::new("just") 126 | .arg("teardown-etcd-cluster") 127 | .output() 128 | .expect("teardown etcd cluster") 129 | .status 130 | .success()); 131 | println!("etcd cluster stopped"); 132 | } 133 | } 134 | 135 | pub struct Context { 136 | pub etcd_cluster: EtcdCluster, 137 | auth: Option<(String, String)>, 138 | } 139 | 140 | impl Context { 141 | pub fn new(with_tls: bool) -> Self { 142 | Self { 143 | etcd_cluster: EtcdCluster::new(with_tls), 144 | auth: None, 145 | } 146 | } 147 | 148 | pub fn set_auth(mut self, user: String, pwd: String) -> Self { 149 | self.auth = Some((user, pwd)); 150 | self 151 | } 152 | 153 | pub async fn connect_to_cluster(&self) -> etcd_rs::Client { 154 | use etcd_rs::*; 155 | 156 | Client::connect(ClientConfig::new(self.etcd_cluster.endpoints())) 157 | .await 158 | .expect("connect to etcd cluster") 159 | } 160 | } 161 | 162 | #[derive(Debug, Clone, PartialEq)] 163 | pub enum KVOp { 164 | Put(String, String), 165 | Delete(String), 166 | } 167 | 168 | macro_rules! apply_kv_ops { 169 | ($cli:expr, $ops:expr) => { 170 | for op in $ops.iter() { 171 | match op { 172 | KVOp::Put(k, v) => { 173 | let resp = $cli.put(PutRequest::new(k.clone(), v.clone())).await; 174 | assert!(resp.is_ok()); 175 | } 176 | KVOp::Delete(k) => { 177 | let resp = $cli 178 | .delete(DeleteRequest::new(KeyRange::key(k.clone()))) 179 | .await; 180 | assert!(resp.is_ok()); 181 | } 182 | } 183 | } 184 | }; 185 | } 186 | 187 | macro_rules! assert_ops_events { 188 | ($ops:expr, $stream:expr) => { 189 | let events = { 190 | let mut events = vec![]; 191 | 192 | loop { 193 | match tokio::time::timeout(std::time::Duration::from_secs(1), $stream.inbound()) 194 | .await 195 | { 196 | Ok(etcd_rs::WatchInbound::Ready(resp)) => { 197 | for e in resp.events { 198 | events.push(match e.event_type { 199 | EventType::Put => KVOp::Put( 200 | e.kv.key_str().to_owned(), 201 | e.kv.value_str().to_owned(), 202 | ), 203 | EventType::Delete => KVOp::Delete(e.kv.key_str().to_owned()), 204 | }); 205 | } 206 | } 207 | Ok(etcd_rs::WatchInbound::Closed) => break, 208 | others => panic!("should not reach here but got: {:?}", others), 209 | } 210 | } 211 | events 212 | }; 213 | 214 | assert_eq!(&$ops[..events.len()], events); 215 | }; 216 | } 217 | -------------------------------------------------------------------------------- /integration-tests/src/tls.rs: -------------------------------------------------------------------------------- 1 | use etcd_rs::*; 2 | 3 | use crate::support::Context; 4 | 5 | #[tokio::test] 6 | async fn test_put() { 7 | let ctx = Context::new(true); 8 | let cli = ctx.connect_to_cluster().await; 9 | 10 | cli.put(("foo", "bar")).await.expect("put kv"); 11 | 12 | let resp = cli.get("foo").await.expect("get kv"); 13 | assert_eq!(resp.kvs.len(), 1); 14 | assert_eq!(resp.kvs[0].key_str(), "foo"); 15 | assert_eq!(resp.kvs[0].value_str(), "bar"); 16 | } 17 | -------------------------------------------------------------------------------- /integration-tests/src/watch.rs: -------------------------------------------------------------------------------- 1 | use etcd_rs::*; 2 | 3 | use crate::support::{Context, KVOp}; 4 | 5 | #[tokio::test] 6 | async fn test_watch() { 7 | let ctx = Context::new(false); 8 | let cli = ctx.connect_to_cluster().await; 9 | 10 | const PREFIX: &str = "prefix-test-watch"; 11 | 12 | let (mut stream, cancel) = cli 13 | .watch(KeyRange::prefix(PREFIX)) 14 | .await 15 | .expect("watch created"); 16 | 17 | let ops: Vec<_> = vec![ 18 | KVOp::Put("foo1".to_owned(), "bar1".to_owned()), 19 | KVOp::Put("foo2".to_owned(), "bar2".to_owned()), 20 | KVOp::Put("foo3".to_owned(), "bar3".to_owned()), 21 | KVOp::Delete("foo1".to_owned()), 22 | KVOp::Delete("foo2".to_owned()), 23 | ] 24 | .into_iter() 25 | .map(|op| match op { 26 | KVOp::Put(k, v) => KVOp::Put(format!("{}-{}", PREFIX, k), v), 27 | KVOp::Delete(k) => KVOp::Delete(format!("{}-{}", PREFIX, k)), 28 | }) 29 | .collect(); 30 | 31 | apply_kv_ops!(cli, ops); 32 | 33 | cancel.cancel().await.expect("watch canceled"); 34 | 35 | assert_ops_events!(ops, stream); 36 | } 37 | 38 | #[tokio::test] 39 | async fn test_watch_multi() { 40 | let ctx = Context::new(false); 41 | let cli = ctx.connect_to_cluster().await; 42 | 43 | const PREFIX1: &str = "prefix-test-watch-multi1"; 44 | const PREFIX2: &str = "prefix-test-watch-multi2"; 45 | 46 | let (mut stream1, cancel1) = cli 47 | .watch(KeyRange::prefix(PREFIX1)) 48 | .await 49 | .expect("watch created"); 50 | let (mut stream2, cancel2) = cli 51 | .watch(KeyRange::prefix(PREFIX2)) 52 | .await 53 | .expect("watch created"); 54 | 55 | let ops_1: Vec<_> = vec![ 56 | KVOp::Put("foo1".to_owned(), "bar1".to_owned()), 57 | KVOp::Put("foo2".to_owned(), "bar2".to_owned()), 58 | KVOp::Put("foo1".to_owned(), "bar3".to_owned()), 59 | KVOp::Delete("foo1".to_owned()), 60 | KVOp::Delete("foo2".to_owned()), 61 | ] 62 | .into_iter() 63 | .map(|op| match op { 64 | KVOp::Put(k, v) => KVOp::Put(format!("{}-{}", PREFIX1, k), v), 65 | KVOp::Delete(k) => KVOp::Delete(format!("{}-{}", PREFIX1, k)), 66 | }) 67 | .collect(); 68 | 69 | let ops_2: Vec<_> = vec![ 70 | KVOp::Put("foo1".to_owned(), "bar1".to_owned()), 71 | KVOp::Put("foo2".to_owned(), "bar2".to_owned()), 72 | KVOp::Put("foo3".to_owned(), "bar3".to_owned()), 73 | KVOp::Put("foo4".to_owned(), "bar3".to_owned()), 74 | KVOp::Delete("foo1".to_owned()), 75 | KVOp::Delete("foo2".to_owned()), 76 | ] 77 | .into_iter() 78 | .map(|op| match op { 79 | KVOp::Put(k, v) => KVOp::Put(format!("{}-{}", PREFIX2, k), v), 80 | KVOp::Delete(k) => KVOp::Delete(format!("{}-{}", PREFIX2, k)), 81 | }) 82 | .collect(); 83 | 84 | apply_kv_ops!(cli, ops_1); 85 | apply_kv_ops!(cli, ops_2); 86 | 87 | cancel1.cancel().await.expect("watch canceled"); 88 | cancel2.cancel().await.expect("watch canceled"); 89 | 90 | assert_ops_events!(ops_1, stream1); 91 | assert_ops_events!(ops_2, stream2); 92 | } 93 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | DOCKER_COMPOSE_SPEC := "etcd-docker-compose.yaml" 2 | 3 | default: 4 | just --list 5 | 6 | # Build the project 7 | build: 8 | cargo build 9 | 10 | # Format code with rust 11 | fmt: 12 | cargo fmt 13 | 14 | # Lint code with clippy 15 | lint: 16 | cargo fmt --all -- --check 17 | cargo clippy --all-targets --all-features 18 | 19 | # Run unit tests 20 | unit-test: 21 | cargo nextest run 22 | cargo test --doc 23 | 24 | # Run integration tests 25 | integration-test: 26 | #!/usr/bin/env bash 27 | set -e 28 | pushd integration-tests 29 | cargo nextest run --test-threads=1 --retries 5 30 | popd 31 | 32 | # Download etcdctl 33 | etcdctl: 34 | ./hack/download_etcd.sh 35 | 36 | setup-etcd-cluster tls="false" version="v3.5.2": teardown-etcd-cluster 37 | {{ if tls != "false" { "./hack/generate_etcd_certificate.sh" } else { "" } }} 38 | ./hack/generate_etcd_cluster.sh {{ DOCKER_COMPOSE_SPEC }} {{ version }} {{ tls }} 39 | docker-compose -f {{ DOCKER_COMPOSE_SPEC }} up -d 40 | 41 | teardown-etcd-cluster: 42 | docker-compose -f {{ DOCKER_COMPOSE_SPEC }} down || true 43 | 44 | start-etcd-node node: 45 | docker-compose -f {{ DOCKER_COMPOSE_SPEC }} start {{ node }} 46 | 47 | stop-etcd-node node: 48 | docker-compose -f {{ DOCKER_COMPOSE_SPEC }} stop {{ node }} 49 | 50 | etcd-cluster-status: 51 | docker-compose -f {{ DOCKER_COMPOSE_SPEC }} ps 52 | etcd/etcdctl endpoint status --endpoints=127.0.0.1:12379,127.0.0.1:22379,127.0.0.1:32379 -w table -------------------------------------------------------------------------------- /proto/auth.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package authpb; 3 | 4 | message UserAddOptions { 5 | bool no_password = 1; 6 | }; 7 | 8 | // User is a single entry in the bucket authUsers 9 | message User { 10 | bytes name = 1; 11 | bytes password = 2; 12 | repeated string roles = 3; 13 | UserAddOptions options = 4; 14 | } 15 | 16 | // Permission is a single entity 17 | message Permission { 18 | enum Type { 19 | READ = 0; 20 | WRITE = 1; 21 | READWRITE = 2; 22 | } 23 | Type permType = 1; 24 | 25 | bytes key = 2; 26 | bytes range_end = 3; 27 | } 28 | 29 | // Role is a single entry in the bucket authRoles 30 | message Role { 31 | bytes name = 1; 32 | 33 | repeated Permission keyPermission = 2; 34 | } 35 | -------------------------------------------------------------------------------- /proto/kv.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package mvccpb; 3 | 4 | message KeyValue { 5 | // key is the key in bytes. An empty key is not allowed. 6 | bytes key = 1; 7 | // create_revision is the revision of last creation on this key. 8 | int64 create_revision = 2; 9 | // mod_revision is the revision of last modification on this key. 10 | int64 mod_revision = 3; 11 | // version is the version of the key. A deletion resets 12 | // the version to zero and any modification of the key 13 | // increases its version. 14 | int64 version = 4; 15 | // value is the value held by the key, in bytes. 16 | bytes value = 5; 17 | // lease is the ID of the lease that attached to key. 18 | // When the attached lease expires, the key will be deleted. 19 | // If lease is 0, then no lease is attached to the key. 20 | int64 lease = 6; 21 | } 22 | 23 | message Event { 24 | enum EventType { 25 | PUT = 0; 26 | DELETE = 1; 27 | } 28 | // type is the kind of event. If type is a PUT, it indicates 29 | // new data has been stored to the key. If type is a DELETE, 30 | // it indicates the key was deleted. 31 | EventType type = 1; 32 | // kv holds the KeyValue for the event. 33 | // A PUT event contains current kv pair. 34 | // A PUT event with kv.Version=1 indicates the creation of a key. 35 | // A DELETE/EXPIRE event contains the deleted key with 36 | // its modification revision set to the revision of deletion. 37 | KeyValue kv = 2; 38 | 39 | // prev_kv holds the key-value pair before the event happens. 40 | KeyValue prev_kv = 3; 41 | } 42 | -------------------------------------------------------------------------------- /proto/rpc.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package etcdserverpb; 3 | 4 | import "kv.proto"; 5 | import "auth.proto"; 6 | 7 | service KV { 8 | // Range gets the keys in the range from the key-value store. 9 | rpc Range(RangeRequest) returns (RangeResponse) {} 10 | 11 | // Put puts the given key into the key-value store. 12 | // A put request increments the revision of the key-value store 13 | // and generates one event in the event history. 14 | rpc Put(PutRequest) returns (PutResponse) {} 15 | 16 | // DeleteRange deletes the given range from the key-value store. 17 | // A delete request increments the revision of the key-value store 18 | // and generates a delete event in the event history for every deleted key. 19 | rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {} 20 | 21 | // Txn processes multiple requests in a single transaction. 22 | // A txn request increments the revision of the key-value store 23 | // and generates events with the same revision for every completed request. 24 | // It is not allowed to modify the same key several times within one txn. 25 | rpc Txn(TxnRequest) returns (TxnResponse) {} 26 | 27 | // Compact compacts the event history in the etcd key-value store. The key-value 28 | // store should be periodically compacted or the event history will continue to grow 29 | // indefinitely. 30 | rpc Compact(CompactionRequest) returns (CompactionResponse) {} 31 | } 32 | 33 | service Watch { 34 | // Watch watches for events happening or that have happened. Both input and output 35 | // are streams; the input stream is for creating and canceling watchers and the output 36 | // stream sends events. One watch RPC can watch on multiple key ranges, streaming events 37 | // for several watches at once. The entire event history can be watched starting from the 38 | // last compaction revision. 39 | rpc Watch(stream WatchRequest) returns (stream WatchResponse) {} 40 | } 41 | 42 | service Lease { 43 | // LeaseGrant creates a lease which expires if the server does not receive a keepAlive 44 | // within a given time to live period. All keys attached to the lease will be expired and 45 | // deleted if the lease expires. Each expired key generates a delete event in the event history. 46 | rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {} 47 | 48 | // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. 49 | rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {} 50 | 51 | // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client 52 | // to the server and streaming keep alive responses from the server to the client. 53 | rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {} 54 | 55 | // LeaseTimeToLive retrieves lease information. 56 | rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {} 57 | 58 | // LeaseLeases lists all existing leases. 59 | rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {} 60 | } 61 | 62 | service Cluster { 63 | // MemberAdd adds a member into the cluster. 64 | rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {} 65 | 66 | // MemberRemove removes an existing member from the cluster. 67 | rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {} 68 | 69 | // MemberUpdate updates the member configuration. 70 | rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {} 71 | 72 | // MemberList lists all the members in the cluster. 73 | rpc MemberList(MemberListRequest) returns (MemberListResponse) {} 74 | 75 | // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. 76 | rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) {} 77 | } 78 | 79 | service Maintenance { 80 | // Alarm activates, deactivates, and queries alarms regarding cluster health. 81 | rpc Alarm(AlarmRequest) returns (AlarmResponse) {} 82 | 83 | // Status gets the status of the member. 84 | rpc Status(StatusRequest) returns (StatusResponse) {} 85 | 86 | // Defragment defragments a member's backend database to recover storage space. 87 | rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {} 88 | 89 | // Hash computes the hash of whole backend keyspace, 90 | // including key, lease, and other buckets in storage. 91 | // This is designed for testing ONLY! 92 | // Do not rely on this in production with ongoing transactions, 93 | // since Hash operation does not hold MVCC locks. 94 | // Use "HashKV" API instead for "key" bucket consistency checks. 95 | rpc Hash(HashRequest) returns (HashResponse) {} 96 | 97 | // HashKV computes the hash of all MVCC keys up to a given revision. 98 | // It only iterates "key" bucket in backend storage. 99 | rpc HashKV(HashKVRequest) returns (HashKVResponse) {} 100 | 101 | // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. 102 | rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {} 103 | 104 | // MoveLeader requests current leader node to transfer its leadership to transferee. 105 | rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {} 106 | 107 | // Downgrade requests downgrades, verifies feasibility or cancels downgrade 108 | // on the cluster version. 109 | // Supported since etcd 3.5. 110 | rpc Downgrade(DowngradeRequest) returns (DowngradeResponse) {} 111 | } 112 | 113 | service Auth { 114 | // AuthEnable enables authentication. 115 | rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {} 116 | 117 | // AuthDisable disables authentication. 118 | rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {} 119 | 120 | // AuthStatus displays authentication status. 121 | rpc AuthStatus(AuthStatusRequest) returns (AuthStatusResponse) {} 122 | 123 | // Authenticate processes an authenticate request. 124 | rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {} 125 | 126 | // UserAdd adds a new user. User name cannot be empty. 127 | rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {} 128 | 129 | // UserGet gets detailed user information. 130 | rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {} 131 | 132 | // UserList gets a list of all users. 133 | rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {} 134 | 135 | // UserDelete deletes a specified user. 136 | rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {} 137 | 138 | // UserChangePassword changes the password of a specified user. 139 | rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {} 140 | 141 | // UserGrant grants a role to a specified user. 142 | rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {} 143 | 144 | // UserRevokeRole revokes a role of specified user. 145 | rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {} 146 | 147 | // RoleAdd adds a new role. Role name cannot be empty. 148 | rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {} 149 | 150 | // RoleGet gets detailed role information. 151 | rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {} 152 | 153 | // RoleList gets lists of all roles. 154 | rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {} 155 | 156 | // RoleDelete deletes a specified role. 157 | rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {} 158 | 159 | // RoleGrantPermission grants a permission of a specified key or range to a specified role. 160 | rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {} 161 | 162 | // RoleRevokePermission revokes a key or range permission of a specified role. 163 | rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {} 164 | } 165 | 166 | message ResponseHeader { 167 | // cluster_id is the ID of the cluster which sent the response. 168 | uint64 cluster_id = 1; 169 | // member_id is the ID of the member which sent the response. 170 | uint64 member_id = 2; 171 | // revision is the key-value store revision when the request was applied. 172 | // For watch progress responses, the header.revision indicates progress. All future events 173 | // recieved in this stream are guaranteed to have a higher revision number than the 174 | // header.revision number. 175 | int64 revision = 3; 176 | // raft_term is the raft term when the request was applied. 177 | uint64 raft_term = 4; 178 | } 179 | 180 | message RangeRequest { 181 | enum SortOrder { 182 | NONE = 0; // default, no sorting 183 | ASCEND = 1; // lowest target value first 184 | DESCEND = 2; // highest target value first 185 | } 186 | enum SortTarget { 187 | KEY = 0; 188 | VERSION = 1; 189 | CREATE = 2; 190 | MOD = 3; 191 | VALUE = 4; 192 | } 193 | 194 | // key is the first key for the range. If range_end is not given, the request only looks up key. 195 | bytes key = 1; 196 | // range_end is the upper bound on the requested range [key, range_end). 197 | // If range_end is '\0', the range is all keys >= key. 198 | // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), 199 | // then the range request gets all keys prefixed with key. 200 | // If both key and range_end are '\0', then the range request returns all keys. 201 | bytes range_end = 2; 202 | // limit is a limit on the number of keys returned for the request. When limit is set to 0, 203 | // it is treated as no limit. 204 | int64 limit = 3; 205 | // revision is the point-in-time of the key-value store to use for the range. 206 | // If revision is less or equal to zero, the range is over the newest key-value store. 207 | // If the revision has been compacted, ErrCompacted is returned as a response. 208 | int64 revision = 4; 209 | 210 | // sort_order is the order for returned sorted results. 211 | SortOrder sort_order = 5; 212 | 213 | // sort_target is the key-value field to use for sorting. 214 | SortTarget sort_target = 6; 215 | 216 | // serializable sets the range request to use serializable member-local reads. 217 | // Range requests are linearizable by default; linearizable requests have higher 218 | // latency and lower throughput than serializable requests but reflect the current 219 | // consensus of the cluster. For better performance, in exchange for possible stale reads, 220 | // a serializable range request is served locally without needing to reach consensus 221 | // with other nodes in the cluster. 222 | bool serializable = 7; 223 | 224 | // keys_only when set returns only the keys and not the values. 225 | bool keys_only = 8; 226 | 227 | // count_only when set returns only the count of the keys in the range. 228 | bool count_only = 9; 229 | 230 | // min_mod_revision is the lower bound for returned key mod revisions; all keys with 231 | // lesser mod revisions will be filtered away. 232 | int64 min_mod_revision = 10; 233 | 234 | // max_mod_revision is the upper bound for returned key mod revisions; all keys with 235 | // greater mod revisions will be filtered away. 236 | int64 max_mod_revision = 11; 237 | 238 | // min_create_revision is the lower bound for returned key create revisions; all keys with 239 | // lesser create revisions will be filtered away. 240 | int64 min_create_revision = 12; 241 | 242 | // max_create_revision is the upper bound for returned key create revisions; all keys with 243 | // greater create revisions will be filtered away. 244 | int64 max_create_revision = 13; 245 | } 246 | 247 | message RangeResponse { 248 | ResponseHeader header = 1; 249 | // kvs is the list of key-value pairs matched by the range request. 250 | // kvs is empty when count is requested. 251 | repeated mvccpb.KeyValue kvs = 2; 252 | // more indicates if there are more keys to return in the requested range. 253 | bool more = 3; 254 | // count is set to the number of keys within the range when requested. 255 | int64 count = 4; 256 | } 257 | 258 | message PutRequest { 259 | // key is the key, in bytes, to put into the key-value store. 260 | bytes key = 1; 261 | // value is the value, in bytes, to associate with the key in the key-value store. 262 | bytes value = 2; 263 | // lease is the lease ID to associate with the key in the key-value store. A lease 264 | // value of 0 indicates no lease. 265 | int64 lease = 3; 266 | 267 | // If prev_kv is set, etcd gets the previous key-value pair before changing it. 268 | // The previous key-value pair will be returned in the put response. 269 | bool prev_kv = 4; 270 | 271 | // If ignore_value is set, etcd updates the key using its current value. 272 | // Returns an error if the key does not exist. 273 | bool ignore_value = 5; 274 | 275 | // If ignore_lease is set, etcd updates the key using its current lease. 276 | // Returns an error if the key does not exist. 277 | bool ignore_lease = 6; 278 | } 279 | 280 | message PutResponse { 281 | ResponseHeader header = 1; 282 | // if prev_kv is set in the request, the previous key-value pair will be returned. 283 | mvccpb.KeyValue prev_kv = 2; 284 | } 285 | 286 | message DeleteRangeRequest { 287 | // key is the first key to delete in the range. 288 | bytes key = 1; 289 | // range_end is the key following the last key to delete for the range [key, range_end). 290 | // If range_end is not given, the range is defined to contain only the key argument. 291 | // If range_end is one bit larger than the given key, then the range is all the keys 292 | // with the prefix (the given key). 293 | // If range_end is '\0', the range is all keys greater than or equal to the key argument. 294 | bytes range_end = 2; 295 | 296 | // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. 297 | // The previous key-value pairs will be returned in the delete response. 298 | bool prev_kv = 3; 299 | } 300 | 301 | message DeleteRangeResponse { 302 | ResponseHeader header = 1; 303 | // deleted is the number of keys deleted by the delete range request. 304 | int64 deleted = 2; 305 | // if prev_kv is set in the request, the previous key-value pairs will be returned. 306 | repeated mvccpb.KeyValue prev_kvs = 3; 307 | } 308 | 309 | message RequestOp { 310 | // request is a union of request types accepted by a transaction. 311 | oneof request { 312 | RangeRequest request_range = 1; 313 | PutRequest request_put = 2; 314 | DeleteRangeRequest request_delete_range = 3; 315 | TxnRequest request_txn = 4; 316 | } 317 | } 318 | 319 | message ResponseOp { 320 | // response is a union of response types returned by a transaction. 321 | oneof response { 322 | RangeResponse response_range = 1; 323 | PutResponse response_put = 2; 324 | DeleteRangeResponse response_delete_range = 3; 325 | TxnResponse response_txn = 4; 326 | } 327 | } 328 | 329 | message Compare { 330 | enum CompareResult { 331 | EQUAL = 0; 332 | GREATER = 1; 333 | LESS = 2; 334 | NOT_EQUAL = 3; 335 | } 336 | enum CompareTarget { 337 | VERSION = 0; 338 | CREATE = 1; 339 | MOD = 2; 340 | VALUE = 3; 341 | LEASE = 4; 342 | } 343 | // result is logical comparison operation for this comparison. 344 | CompareResult result = 1; 345 | // target is the key-value field to inspect for the comparison. 346 | CompareTarget target = 2; 347 | // key is the subject key for the comparison operation. 348 | bytes key = 3; 349 | oneof target_union { 350 | // version is the version of the given key 351 | int64 version = 4; 352 | // create_revision is the creation revision of the given key 353 | int64 create_revision = 5; 354 | // mod_revision is the last modified revision of the given key. 355 | int64 mod_revision = 6; 356 | // value is the value of the given key, in bytes. 357 | bytes value = 7; 358 | // lease is the lease id of the given key. 359 | int64 lease = 8; 360 | // leave room for more target_union field tags, jump to 64 361 | } 362 | 363 | // range_end compares the given target to all keys in the range [key, range_end). 364 | // See RangeRequest for more details on key ranges. 365 | bytes range_end = 64; 366 | // TODO: fill out with most of the rest of RangeRequest fields when needed. 367 | } 368 | 369 | // From google paxosdb paper: 370 | // Our implementation hinges around a powerful primitive which we call MultiOp. All other database 371 | // operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically 372 | // and consists of three components: 373 | // 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check 374 | // for the absence or presence of a value, or compare with a given value. Two different tests in the guard 375 | // may apply to the same or different entries in the database. All tests in the guard are applied and 376 | // MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise 377 | // it executes f op (see item 3 below). 378 | // 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or 379 | // lookup operation, and applies to a single database entry. Two different operations in the list may apply 380 | // to the same or different entries in the database. These operations are executed 381 | // if guard evaluates to 382 | // true. 383 | // 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. 384 | message TxnRequest { 385 | // compare is a list of predicates representing a conjunction of terms. 386 | // If the comparisons succeed, then the success requests will be processed in order, 387 | // and the response will contain their respective responses in order. 388 | // If the comparisons fail, then the failure requests will be processed in order, 389 | // and the response will contain their respective responses in order. 390 | repeated Compare compare = 1; 391 | // success is a list of requests which will be applied when compare evaluates to true. 392 | repeated RequestOp success = 2; 393 | // failure is a list of requests which will be applied when compare evaluates to false. 394 | repeated RequestOp failure = 3; 395 | } 396 | 397 | message TxnResponse { 398 | ResponseHeader header = 1; 399 | // succeeded is set to true if the compare evaluated to true or false otherwise. 400 | bool succeeded = 2; 401 | // responses is a list of responses corresponding to the results from applying 402 | // success if succeeded is true or failure if succeeded is false. 403 | repeated ResponseOp responses = 3; 404 | } 405 | 406 | // CompactionRequest compacts the key-value store up to a given revision. All superseded keys 407 | // with a revision less than the compaction revision will be removed. 408 | message CompactionRequest { 409 | // revision is the key-value store revision for the compaction operation. 410 | int64 revision = 1; 411 | // physical is set so the RPC will wait until the compaction is physically 412 | // applied to the local database such that compacted entries are totally 413 | // removed from the backend database. 414 | bool physical = 2; 415 | } 416 | 417 | message CompactionResponse { 418 | ResponseHeader header = 1; 419 | } 420 | 421 | message HashRequest { 422 | } 423 | 424 | message HashKVRequest { 425 | // revision is the key-value store revision for the hash operation. 426 | int64 revision = 1; 427 | } 428 | 429 | message HashKVResponse { 430 | ResponseHeader header = 1; 431 | // hash is the hash value computed from the responding member's MVCC keys up to a given revision. 432 | uint32 hash = 2; 433 | // compact_revision is the compacted revision of key-value store when hash begins. 434 | int64 compact_revision = 3; 435 | } 436 | 437 | message HashResponse { 438 | ResponseHeader header = 1; 439 | // hash is the hash value computed from the responding member's KV's backend. 440 | uint32 hash = 2; 441 | } 442 | 443 | message SnapshotRequest { 444 | } 445 | 446 | message SnapshotResponse { 447 | // header has the current key-value store information. The first header in the snapshot 448 | // stream indicates the point in time of the snapshot. 449 | ResponseHeader header = 1; 450 | 451 | // remaining_bytes is the number of blob bytes to be sent after this message 452 | uint64 remaining_bytes = 2; 453 | 454 | // blob contains the next chunk of the snapshot in the snapshot stream. 455 | bytes blob = 3; 456 | } 457 | 458 | message WatchRequest { 459 | // request_union is a request to either create a new watcher or cancel an existing watcher. 460 | oneof request_union { 461 | WatchCreateRequest create_request = 1; 462 | WatchCancelRequest cancel_request = 2; 463 | WatchProgressRequest progress_request = 3; 464 | } 465 | } 466 | 467 | message WatchCreateRequest { 468 | // key is the key to register for watching. 469 | bytes key = 1; 470 | 471 | // range_end is the end of the range [key, range_end) to watch. If range_end is not given, 472 | // only the key argument is watched. If range_end is equal to '\0', all keys greater than 473 | // or equal to the key argument are watched. 474 | // If the range_end is one bit larger than the given key, 475 | // then all keys with the prefix (the given key) will be watched. 476 | bytes range_end = 2; 477 | 478 | // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". 479 | int64 start_revision = 3; 480 | 481 | // progress_notify is set so that the etcd server will periodically send a WatchResponse with 482 | // no events to the new watcher if there are no recent events. It is useful when clients 483 | // wish to recover a disconnected watcher starting from a recent known revision. 484 | // The etcd server may decide how often it will send notifications based on current load. 485 | bool progress_notify = 4; 486 | 487 | enum FilterType { 488 | // filter out put event. 489 | NOPUT = 0; 490 | // filter out delete event. 491 | NODELETE = 1; 492 | } 493 | 494 | // filters filter the events at server side before it sends back to the watcher. 495 | repeated FilterType filters = 5; 496 | 497 | // If prev_kv is set, created watcher gets the previous KV before the event happens. 498 | // If the previous KV is already compacted, nothing will be returned. 499 | bool prev_kv = 6; 500 | 501 | // If watch_id is provided and non-zero, it will be assigned to this watcher. 502 | // Since creating a watcher in etcd is not a synchronous operation, 503 | // this can be used ensure that ordering is correct when creating multiple 504 | // watchers on the same stream. Creating a watcher with an ID already in 505 | // use on the stream will cause an error to be returned. 506 | int64 watch_id = 7; 507 | 508 | // fragment enables splitting large revisions into multiple watch responses. 509 | bool fragment = 8; 510 | } 511 | 512 | message WatchCancelRequest { 513 | // watch_id is the watcher id to cancel so that no more events are transmitted. 514 | int64 watch_id = 1; 515 | } 516 | 517 | // Requests the a watch stream progress status be sent in the watch response stream as soon as 518 | // possible. 519 | message WatchProgressRequest { 520 | } 521 | 522 | message WatchResponse { 523 | ResponseHeader header = 1; 524 | // watch_id is the ID of the watcher that corresponds to the response. 525 | int64 watch_id = 2; 526 | 527 | // created is set to true if the response is for a create watch request. 528 | // The client should record the watch_id and expect to receive events for 529 | // the created watcher from the same stream. 530 | // All events sent to the created watcher will attach with the same watch_id. 531 | bool created = 3; 532 | 533 | // canceled is set to true if the response is for a cancel watch request. 534 | // No further events will be sent to the canceled watcher. 535 | bool canceled = 4; 536 | 537 | // compact_revision is set to the minimum index if a watcher tries to watch 538 | // at a compacted index. 539 | // 540 | // This happens when creating a watcher at a compacted revision or the watcher cannot 541 | // catch up with the progress of the key-value store. 542 | // 543 | // The client should treat the watcher as canceled and should not try to create any 544 | // watcher with the same start_revision again. 545 | int64 compact_revision = 5; 546 | 547 | // cancel_reason indicates the reason for canceling the watcher. 548 | string cancel_reason = 6; 549 | 550 | // framgment is true if large watch response was split over multiple responses. 551 | bool fragment = 7; 552 | 553 | repeated mvccpb.Event events = 11; 554 | } 555 | 556 | message LeaseGrantRequest { 557 | // TTL is the advisory time-to-live in seconds. Expired lease will return -1. 558 | int64 TTL = 1; 559 | // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. 560 | int64 ID = 2; 561 | } 562 | 563 | message LeaseGrantResponse { 564 | ResponseHeader header = 1; 565 | // ID is the lease ID for the granted lease. 566 | int64 ID = 2; 567 | // TTL is the server chosen lease time-to-live in seconds. 568 | int64 TTL = 3; 569 | string error = 4; 570 | } 571 | 572 | message LeaseRevokeRequest { 573 | // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. 574 | int64 ID = 1; 575 | } 576 | 577 | message LeaseRevokeResponse { 578 | ResponseHeader header = 1; 579 | } 580 | 581 | message LeaseCheckpoint { 582 | // ID is the lease ID to checkpoint. 583 | int64 ID = 1; 584 | 585 | // Remaining_TTL is the remaining time until expiry of the lease. 586 | int64 remaining_TTL = 2; 587 | } 588 | 589 | message LeaseCheckpointRequest { 590 | repeated LeaseCheckpoint checkpoints = 1; 591 | } 592 | 593 | message LeaseCheckpointResponse { 594 | ResponseHeader header = 1; 595 | } 596 | 597 | message LeaseKeepAliveRequest { 598 | // ID is the lease ID for the lease to keep alive. 599 | int64 ID = 1; 600 | } 601 | 602 | message LeaseKeepAliveResponse { 603 | ResponseHeader header = 1; 604 | // ID is the lease ID from the keep alive request. 605 | int64 ID = 2; 606 | // TTL is the new time-to-live for the lease. 607 | int64 TTL = 3; 608 | } 609 | 610 | message LeaseTimeToLiveRequest { 611 | // ID is the lease ID for the lease. 612 | int64 ID = 1; 613 | // keys is true to query all the keys attached to this lease. 614 | bool keys = 2; 615 | } 616 | 617 | message LeaseTimeToLiveResponse { 618 | ResponseHeader header = 1; 619 | // ID is the lease ID from the keep alive request. 620 | int64 ID = 2; 621 | // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. 622 | int64 TTL = 3; 623 | // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. 624 | int64 grantedTTL = 4; 625 | // Keys is the list of keys attached to this lease. 626 | repeated bytes keys = 5; 627 | } 628 | 629 | message LeaseLeasesRequest { 630 | } 631 | 632 | message LeaseStatus { 633 | int64 ID = 1; 634 | // TODO: int64 TTL = 2; 635 | } 636 | 637 | message LeaseLeasesResponse { 638 | ResponseHeader header = 1; 639 | repeated LeaseStatus leases = 2; 640 | } 641 | 642 | message Member { 643 | // ID is the member ID for this member. 644 | uint64 ID = 1; 645 | // name is the human-readable name of the member. If the member is not started, the name will be an empty string. 646 | string name = 2; 647 | // peerURLs is the list of URLs the member exposes to the cluster for communication. 648 | repeated string peerURLs = 3; 649 | // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. 650 | repeated string clientURLs = 4; 651 | // isLearner indicates if the member is raft learner. 652 | bool isLearner = 5; 653 | } 654 | 655 | message MemberAddRequest { 656 | // peerURLs is the list of URLs the added member will use to communicate with the cluster. 657 | repeated string peerURLs = 1; 658 | // isLearner indicates if the added member is raft learner. 659 | bool isLearner = 2; 660 | } 661 | 662 | message MemberAddResponse { 663 | ResponseHeader header = 1; 664 | // member is the member information for the added member. 665 | Member member = 2; 666 | // members is a list of all members after adding the new member. 667 | repeated Member members = 3; 668 | } 669 | 670 | message MemberRemoveRequest { 671 | // ID is the member ID of the member to remove. 672 | uint64 ID = 1; 673 | } 674 | 675 | message MemberRemoveResponse { 676 | ResponseHeader header = 1; 677 | // members is a list of all members after removing the member. 678 | repeated Member members = 2; 679 | } 680 | 681 | message MemberUpdateRequest { 682 | // ID is the member ID of the member to update. 683 | uint64 ID = 1; 684 | // peerURLs is the new list of URLs the member will use to communicate with the cluster. 685 | repeated string peerURLs = 2; 686 | } 687 | 688 | message MemberUpdateResponse{ 689 | ResponseHeader header = 1; 690 | // members is a list of all members after updating the member. 691 | repeated Member members = 2; 692 | } 693 | 694 | message MemberListRequest { 695 | bool linearizable = 1; 696 | } 697 | 698 | message MemberListResponse { 699 | ResponseHeader header = 1; 700 | // members is a list of all members associated with the cluster. 701 | repeated Member members = 2; 702 | } 703 | 704 | message MemberPromoteRequest { 705 | // ID is the member ID of the member to promote. 706 | uint64 ID = 1; 707 | } 708 | 709 | message MemberPromoteResponse { 710 | ResponseHeader header = 1; 711 | // members is a list of all members after promoting the member. 712 | repeated Member members = 2; 713 | } 714 | 715 | message DefragmentRequest { 716 | } 717 | 718 | message DefragmentResponse { 719 | ResponseHeader header = 1; 720 | } 721 | 722 | message MoveLeaderRequest { 723 | // targetID is the node ID for the new leader. 724 | uint64 targetID = 1; 725 | } 726 | 727 | message MoveLeaderResponse { 728 | ResponseHeader header = 1; 729 | } 730 | 731 | enum AlarmType { 732 | NONE = 0; // default, used to query if any alarm is active 733 | NOSPACE = 1; // space quota is exhausted 734 | CORRUPT = 2; // kv store corruption detected 735 | } 736 | 737 | message AlarmRequest { 738 | enum AlarmAction { 739 | GET = 0; 740 | ACTIVATE = 1; 741 | DEACTIVATE = 2; 742 | } 743 | // action is the kind of alarm request to issue. The action 744 | // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a 745 | // raised alarm. 746 | AlarmAction action = 1; 747 | // memberID is the ID of the member associated with the alarm. If memberID is 0, the 748 | // alarm request covers all members. 749 | uint64 memberID = 2; 750 | // alarm is the type of alarm to consider for this request. 751 | AlarmType alarm = 3; 752 | } 753 | 754 | message AlarmMember { 755 | // memberID is the ID of the member associated with the raised alarm. 756 | uint64 memberID = 1; 757 | // alarm is the type of alarm which has been raised. 758 | AlarmType alarm = 2; 759 | } 760 | 761 | message AlarmResponse { 762 | ResponseHeader header = 1; 763 | // alarms is a list of alarms associated with the alarm request. 764 | repeated AlarmMember alarms = 2; 765 | } 766 | 767 | message DowngradeRequest { 768 | enum DowngradeAction { 769 | VALIDATE = 0; 770 | ENABLE = 1; 771 | CANCEL = 2; 772 | } 773 | 774 | // action is the kind of downgrade request to issue. The action may 775 | // VALIDATE the target version, DOWNGRADE the cluster version, 776 | // or CANCEL the current downgrading job. 777 | DowngradeAction action = 1; 778 | // version is the target version to downgrade. 779 | string version = 2; 780 | } 781 | 782 | message DowngradeResponse { 783 | ResponseHeader header = 1; 784 | // version is the current cluster version. 785 | string version = 2; 786 | } 787 | 788 | message StatusRequest { 789 | } 790 | 791 | message StatusResponse { 792 | ResponseHeader header = 1; 793 | // version is the cluster protocol version used by the responding member. 794 | string version = 2; 795 | // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. 796 | int64 dbSize = 3; 797 | // leader is the member ID which the responding member believes is the current leader. 798 | uint64 leader = 4; 799 | // raftIndex is the current raft committed index of the responding member. 800 | uint64 raftIndex = 5; 801 | // raftTerm is the current raft term of the responding member. 802 | uint64 raftTerm = 6; 803 | // raftAppliedIndex is the current raft applied index of the responding member. 804 | uint64 raftAppliedIndex = 7; 805 | // errors contains alarm/health information and status. 806 | repeated string errors = 8; 807 | // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. 808 | int64 dbSizeInUse = 9; 809 | // isLearner indicates if the member is raft learner. 810 | bool isLearner = 10; 811 | } 812 | 813 | message AuthEnableRequest { 814 | } 815 | 816 | message AuthDisableRequest { 817 | } 818 | 819 | message AuthStatusRequest { 820 | } 821 | 822 | message AuthenticateRequest { 823 | string name = 1; 824 | string password = 2; 825 | } 826 | 827 | message AuthUserAddRequest { 828 | string name = 1; 829 | string password = 2; 830 | authpb.UserAddOptions options = 3; 831 | string hashedPassword = 4; 832 | } 833 | 834 | message AuthUserGetRequest { 835 | string name = 1; 836 | } 837 | 838 | message AuthUserDeleteRequest { 839 | // name is the name of the user to delete. 840 | string name = 1; 841 | } 842 | 843 | message AuthUserChangePasswordRequest { 844 | // name is the name of the user whose password is being changed. 845 | string name = 1; 846 | // password is the new password for the user. Note that this field will be removed in the API layer. 847 | string password = 2; 848 | // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer. 849 | string hashedPassword = 3; 850 | } 851 | 852 | message AuthUserGrantRoleRequest { 853 | // user is the name of the user which should be granted a given role. 854 | string user = 1; 855 | // role is the name of the role to grant to the user. 856 | string role = 2; 857 | } 858 | 859 | message AuthUserRevokeRoleRequest { 860 | string name = 1; 861 | string role = 2; 862 | } 863 | 864 | message AuthRoleAddRequest { 865 | // name is the name of the role to add to the authentication system. 866 | string name = 1; 867 | } 868 | 869 | message AuthRoleGetRequest { 870 | string role = 1; 871 | } 872 | 873 | message AuthUserListRequest { 874 | } 875 | 876 | message AuthRoleListRequest { 877 | } 878 | 879 | message AuthRoleDeleteRequest { 880 | string role = 1; 881 | } 882 | 883 | message AuthRoleGrantPermissionRequest { 884 | // name is the name of the role which will be granted the permission. 885 | string name = 1; 886 | // perm is the permission to grant to the role. 887 | authpb.Permission perm = 2; 888 | } 889 | 890 | message AuthRoleRevokePermissionRequest { 891 | string role = 1; 892 | bytes key = 2; 893 | bytes range_end = 3; 894 | } 895 | 896 | message AuthEnableResponse { 897 | ResponseHeader header = 1; 898 | } 899 | 900 | message AuthDisableResponse { 901 | ResponseHeader header = 1; 902 | } 903 | 904 | message AuthStatusResponse { 905 | ResponseHeader header = 1; 906 | bool enabled = 2; 907 | // authRevision is the current revision of auth store 908 | uint64 authRevision = 3; 909 | } 910 | 911 | message AuthenticateResponse { 912 | ResponseHeader header = 1; 913 | // token is an authorized token that can be used in succeeding RPCs 914 | string token = 2; 915 | } 916 | 917 | message AuthUserAddResponse { 918 | ResponseHeader header = 1; 919 | } 920 | 921 | message AuthUserGetResponse { 922 | ResponseHeader header = 1; 923 | 924 | repeated string roles = 2; 925 | } 926 | 927 | message AuthUserDeleteResponse { 928 | ResponseHeader header = 1; 929 | } 930 | 931 | message AuthUserChangePasswordResponse { 932 | ResponseHeader header = 1; 933 | } 934 | 935 | message AuthUserGrantRoleResponse { 936 | ResponseHeader header = 1; 937 | } 938 | 939 | message AuthUserRevokeRoleResponse { 940 | ResponseHeader header = 1; 941 | } 942 | 943 | message AuthRoleAddResponse { 944 | ResponseHeader header = 1; 945 | } 946 | 947 | message AuthRoleGetResponse { 948 | ResponseHeader header = 1; 949 | 950 | repeated authpb.Permission perm = 2; 951 | } 952 | 953 | message AuthRoleListResponse { 954 | ResponseHeader header = 1; 955 | 956 | repeated string roles = 2; 957 | } 958 | 959 | message AuthUserListResponse { 960 | ResponseHeader header = 1; 961 | 962 | repeated string users = 2; 963 | } 964 | 965 | message AuthRoleDeleteResponse { 966 | ResponseHeader header = 1; 967 | } 968 | 969 | message AuthRoleGrantPermissionResponse { 970 | ResponseHeader header = 1; 971 | } 972 | 973 | message AuthRoleRevokePermissionResponse { 974 | ResponseHeader header = 1; 975 | } 976 | -------------------------------------------------------------------------------- /proto/v3election.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package v3electionpb; 3 | 4 | import "rpc.proto"; 5 | import "kv.proto"; 6 | 7 | 8 | // The election service exposes client-side election facilities as a gRPC interface. 9 | service Election { 10 | // Campaign waits to acquire leadership in an election, returning a LeaderKey 11 | // representing the leadership if successful. The LeaderKey can then be used 12 | // to issue new values on the election, transactionally guard API requests on 13 | // leadership still being held, and resign from the election. 14 | rpc Campaign(CampaignRequest) returns (CampaignResponse) {} 15 | // Proclaim updates the leader's posted value with a new value. 16 | rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) {} 17 | // Leader returns the current election proclamation, if any. 18 | rpc Leader(LeaderRequest) returns (LeaderResponse) {} 19 | // Observe streams election proclamations in-order as made by the election's 20 | // elected leaders. 21 | rpc Observe(LeaderRequest) returns (stream LeaderResponse) {} 22 | // Resign releases election leadership so other campaigners may acquire 23 | // leadership on the election. 24 | rpc Resign(ResignRequest) returns (ResignResponse) {} 25 | } 26 | 27 | message CampaignRequest { 28 | // name is the election's identifier for the campaign. 29 | bytes name = 1; 30 | // lease is the ID of the lease attached to leadership of the election. If the 31 | // lease expires or is revoked before resigning leadership, then the 32 | // leadership is transferred to the next campaigner, if any. 33 | int64 lease = 2; 34 | // value is the initial proclaimed value set when the campaigner wins the 35 | // election. 36 | bytes value = 3; 37 | } 38 | 39 | message CampaignResponse { 40 | etcdserverpb.ResponseHeader header = 1; 41 | // leader describes the resources used for holding leadereship of the election. 42 | LeaderKey leader = 2; 43 | } 44 | 45 | message LeaderKey { 46 | // name is the election identifier that correponds to the leadership key. 47 | bytes name = 1; 48 | // key is an opaque key representing the ownership of the election. If the key 49 | // is deleted, then leadership is lost. 50 | bytes key = 2; 51 | // rev is the creation revision of the key. It can be used to test for ownership 52 | // of an election during transactions by testing the key's creation revision 53 | // matches rev. 54 | int64 rev = 3; 55 | // lease is the lease ID of the election leader. 56 | int64 lease = 4; 57 | } 58 | 59 | message LeaderRequest { 60 | // name is the election identifier for the leadership information. 61 | bytes name = 1; 62 | } 63 | 64 | message LeaderResponse { 65 | etcdserverpb.ResponseHeader header = 1; 66 | // kv is the key-value pair representing the latest leader update. 67 | mvccpb.KeyValue kv = 2; 68 | } 69 | 70 | message ResignRequest { 71 | // leader is the leadership to relinquish by resignation. 72 | LeaderKey leader = 1; 73 | } 74 | 75 | message ResignResponse { 76 | etcdserverpb.ResponseHeader header = 1; 77 | } 78 | 79 | message ProclaimRequest { 80 | // leader is the leadership hold on the election. 81 | LeaderKey leader = 1; 82 | // value is an update meant to overwrite the leader's current value. 83 | bytes value = 2; 84 | } 85 | 86 | message ProclaimResponse { 87 | etcdserverpb.ResponseHeader header = 1; 88 | } -------------------------------------------------------------------------------- /proto/v3lock.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package v3lockpb; 3 | 4 | import "rpc.proto"; 5 | 6 | // The lock service exposes client-side locking facilities as a gRPC interface. 7 | service Lock { 8 | // Lock acquires a distributed shared lock on a given named lock. 9 | // On success, it will return a unique key that exists so long as the 10 | // lock is held by the caller. This key can be used in conjunction with 11 | // transactions to safely ensure updates to etcd only occur while holding 12 | // lock ownership. The lock is held until Unlock is called on the key or the 13 | // lease associate with the owner expires. 14 | rpc Lock(LockRequest) returns (LockResponse) {} 15 | 16 | // Unlock takes a key returned by Lock and releases the hold on lock. The 17 | // next Lock caller waiting for the lock will then be woken up and given 18 | // ownership of the lock. 19 | rpc Unlock(UnlockRequest) returns (UnlockResponse) {} 20 | } 21 | 22 | message LockRequest { 23 | // name is the identifier for the distributed shared lock to be acquired. 24 | bytes name = 1; 25 | // lease is the ID of the lease that will be attached to ownership of the 26 | // lock. If the lease expires or is revoked and currently holds the lock, 27 | // the lock is automatically released. Calls to Lock with the same lease will 28 | // be treated as a single acquisition; locking twice with the same lease is a 29 | // no-op. 30 | int64 lease = 2; 31 | } 32 | 33 | message LockResponse { 34 | etcdserverpb.ResponseHeader header = 1; 35 | // key is a key that will exist on etcd for the duration that the Lock caller 36 | // owns the lock. Users should not modify this key or the lock may exhibit 37 | // undefined behavior. 38 | bytes key = 2; 39 | } 40 | 41 | message UnlockRequest { 42 | // key is the lock ownership key granted by Lock. 43 | bytes key = 1; 44 | } 45 | 46 | message UnlockResponse { 47 | etcdserverpb.ResponseHeader header = 1; 48 | } 49 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | -------------------------------------------------------------------------------- /src/auth/authenticate.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | use crate::ResponseHeader; 3 | 4 | #[derive(Debug, Clone)] 5 | pub struct AuthenticateRequest { 6 | proto: crate::proto::etcdserverpb::AuthenticateRequest, 7 | } 8 | 9 | impl AuthenticateRequest { 10 | pub fn new(name: N, password: P) -> Self 11 | where 12 | N: Into, 13 | P: Into, 14 | { 15 | let proto = etcdserverpb::AuthenticateRequest { 16 | name: name.into(), 17 | password: password.into(), 18 | }; 19 | Self { proto } 20 | } 21 | } 22 | 23 | impl From for AuthenticateRequest { 24 | fn from(proto: etcdserverpb::AuthenticateRequest) -> Self { 25 | Self { proto } 26 | } 27 | } 28 | 29 | impl From for etcdserverpb::AuthenticateRequest { 30 | fn from(value: AuthenticateRequest) -> Self { 31 | value.proto 32 | } 33 | } 34 | 35 | impl From<(N, P)> for AuthenticateRequest 36 | where 37 | N: Into, 38 | P: Into, 39 | { 40 | fn from((user, password): (N, P)) -> Self { 41 | Self::new(user, password) 42 | } 43 | } 44 | 45 | #[derive(Debug, Clone)] 46 | pub struct AuthenticateResponse { 47 | pub header: ResponseHeader, 48 | pub token: String, 49 | } 50 | 51 | impl From for AuthenticateResponse { 52 | fn from(proto: etcdserverpb::AuthenticateResponse) -> Self { 53 | Self { 54 | header: From::from(proto.header.expect("must fetch header")), 55 | token: proto.token, 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/auth/mod.rs: -------------------------------------------------------------------------------- 1 | mod authenticate; 2 | 3 | pub use authenticate::{AuthenticateRequest, AuthenticateResponse}; 4 | 5 | use async_trait::async_trait; 6 | 7 | use crate::Result; 8 | 9 | #[async_trait] 10 | pub trait AuthOp { 11 | async fn authenticate(&self, req: R) -> Result 12 | where 13 | R: Into + Send; 14 | } 15 | -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use async_trait::async_trait; 4 | use tokio::sync::mpsc::channel; 5 | use tokio_stream::wrappers::ReceiverStream; 6 | use tonic::{ 7 | codegen::InterceptedService, 8 | metadata::{Ascii, MetadataValue}, 9 | service::Interceptor, 10 | transport::Channel, 11 | Request, Status, 12 | }; 13 | 14 | use crate::auth::{AuthOp, AuthenticateRequest, AuthenticateResponse}; 15 | use crate::cluster::{ 16 | ClusterOp, MemberAddRequest, MemberAddResponse, MemberListRequest, MemberListResponse, 17 | MemberRemoveRequest, MemberRemoveResponse, MemberUpdateRequest, MemberUpdateResponse, 18 | }; 19 | use crate::kv::{ 20 | CompactRequest, CompactResponse, DeleteRequest, DeleteResponse, KeyRange, KeyValueOp, 21 | PutRequest, PutResponse, RangeRequest, RangeResponse, TxnRequest, TxnResponse, 22 | }; 23 | use crate::lease::{ 24 | LeaseGrantRequest, LeaseGrantResponse, LeaseId, LeaseKeepAlive, LeaseOp, LeaseRevokeRequest, 25 | LeaseRevokeResponse, LeaseTimeToLiveRequest, LeaseTimeToLiveResponse, 26 | }; 27 | use crate::proto::etcdserverpb; 28 | use crate::proto::etcdserverpb::cluster_client::ClusterClient; 29 | use crate::proto::etcdserverpb::LeaseKeepAliveRequest; 30 | use crate::proto::etcdserverpb::{ 31 | auth_client::AuthClient, kv_client::KvClient, lease_client::LeaseClient, 32 | watch_client::WatchClient, 33 | }; 34 | use crate::watch::{WatchCanceler, WatchCreateRequest, WatchOp, WatchStream}; 35 | use crate::{Error, Result}; 36 | 37 | #[derive(Clone)] 38 | pub struct TokenInterceptor { 39 | token: Option>, 40 | } 41 | 42 | impl TokenInterceptor { 43 | fn new(token: Option) -> Self { 44 | Self { 45 | token: token.map(|token: String| MetadataValue::try_from(&token).unwrap()), 46 | } 47 | } 48 | } 49 | 50 | impl Interceptor for TokenInterceptor { 51 | fn call(&mut self, mut req: tonic::Request<()>) -> std::result::Result, Status> { 52 | match &self.token { 53 | Some(token) => { 54 | req.metadata_mut().insert("authorization", token.clone()); 55 | Ok(req) 56 | } 57 | None => Ok(req), 58 | } 59 | } 60 | } 61 | 62 | #[cfg(feature = "tls")] 63 | #[derive(Debug, Clone)] 64 | enum TlsOption { 65 | None, 66 | WithConfig(tonic::transport::ClientTlsConfig), 67 | } 68 | 69 | #[cfg(not(feature = "tls"))] 70 | #[derive(Debug, Clone)] 71 | enum TlsOption { 72 | None, 73 | } 74 | 75 | #[derive(Debug, Clone)] 76 | pub struct Endpoint { 77 | url: String, 78 | 79 | tls_opt: TlsOption, 80 | } 81 | 82 | impl Endpoint { 83 | pub fn new(url: impl Into) -> Self { 84 | Self { 85 | url: url.into(), 86 | tls_opt: TlsOption::None, 87 | } 88 | } 89 | 90 | #[cfg(feature = "tls")] 91 | pub fn tls_raw( 92 | mut self, 93 | domain_name: impl Into, 94 | ca_cert: impl AsRef<[u8]>, 95 | client_cert: impl AsRef<[u8]>, 96 | client_key: impl AsRef<[u8]>, 97 | ) -> Self { 98 | use tonic::transport::{Certificate, ClientTlsConfig, Identity}; 99 | 100 | let certificate = Certificate::from_pem(ca_cert); 101 | let identity = Identity::from_pem(client_cert, client_key); 102 | 103 | self.tls_opt = TlsOption::WithConfig( 104 | ClientTlsConfig::new() 105 | .domain_name(domain_name) 106 | .ca_certificate(certificate) 107 | .identity(identity), 108 | ); 109 | 110 | self 111 | } 112 | 113 | #[cfg(feature = "tls")] 114 | pub async fn tls( 115 | self, 116 | domain_name: impl Into, 117 | ca_cert_path: impl AsRef, 118 | client_cert_path: impl AsRef, 119 | client_key_path: impl AsRef, 120 | ) -> Result { 121 | use tokio::fs::read; 122 | 123 | let ca_cert = read(ca_cert_path).await?; 124 | 125 | let client_cert = read(client_cert_path).await?; 126 | let client_key = read(client_key_path).await?; 127 | 128 | Ok(self.tls_raw(domain_name, ca_cert, client_cert, client_key)) 129 | } 130 | } 131 | 132 | impl From for Endpoint 133 | where 134 | T: Into, 135 | { 136 | fn from(url: T) -> Self { 137 | Self { 138 | url: url.into(), 139 | tls_opt: TlsOption::None, 140 | } 141 | } 142 | } 143 | 144 | /// Config for establishing etcd client. 145 | #[derive(Clone, Debug)] 146 | pub struct ClientConfig { 147 | pub endpoints: Vec, 148 | pub auth: Option<(String, String)>, 149 | pub connect_timeout: Duration, 150 | pub http2_keep_alive_interval: Duration, 151 | } 152 | 153 | impl ClientConfig { 154 | pub fn new(endpoints: impl Into>) -> Self { 155 | Self { 156 | endpoints: endpoints.into(), 157 | auth: None, 158 | connect_timeout: Duration::from_secs(30), 159 | http2_keep_alive_interval: Duration::from_secs(5), 160 | } 161 | } 162 | 163 | pub fn auth(mut self, name: impl Into, password: impl Into) -> Self { 164 | self.auth = Some((name.into(), password.into())); 165 | self 166 | } 167 | 168 | pub fn connect_timeout(mut self, timeout: Duration) -> Self { 169 | self.connect_timeout = timeout; 170 | self 171 | } 172 | 173 | pub fn http2_keep_alive_interval(mut self, interval: Duration) -> Self { 174 | self.http2_keep_alive_interval = interval; 175 | self 176 | } 177 | } 178 | 179 | /// Client is an abstraction for grouping etcd operations and managing underlying network communications. 180 | #[derive(Clone)] 181 | pub struct Client { 182 | auth_client: AuthClient>, 183 | kv_client: KvClient>, 184 | watch_client: WatchClient>, 185 | cluster_client: ClusterClient>, 186 | lease_client: LeaseClient>, 187 | } 188 | 189 | impl Client { 190 | /// Build clients from tonic [`Channel`] directly. 191 | /// 192 | /// For advanced users, it provides the ability to control more details about the connection. 193 | pub fn with_channel(channel: Channel, token: Option) -> Self { 194 | let auth_interceptor = TokenInterceptor::new(token); 195 | 196 | let auth_client = AuthClient::with_interceptor(channel.clone(), auth_interceptor.clone()); 197 | let kv_client = KvClient::with_interceptor(channel.clone(), auth_interceptor.clone()); 198 | let watch_client = WatchClient::with_interceptor(channel.clone(), auth_interceptor.clone()); 199 | let cluster_client = 200 | ClusterClient::with_interceptor(channel.clone(), auth_interceptor.clone()); 201 | let lease_client = LeaseClient::with_interceptor(channel, auth_interceptor); 202 | 203 | Self { 204 | auth_client, 205 | kv_client, 206 | watch_client, 207 | cluster_client, 208 | lease_client, 209 | } 210 | } 211 | 212 | pub async fn connect_with_token(cfg: &ClientConfig, token: Option) -> Result { 213 | let channel = { 214 | let mut endpoints = Vec::with_capacity(cfg.endpoints.len()); 215 | for e in cfg.endpoints.iter() { 216 | let mut c = Channel::from_shared(e.url.clone())? 217 | .connect_timeout(cfg.connect_timeout) 218 | .http2_keep_alive_interval(cfg.http2_keep_alive_interval); 219 | 220 | #[cfg(feature = "tls")] 221 | { 222 | if let TlsOption::WithConfig(tls) = e.tls_opt.clone() { 223 | c = c.tls_config(tls)?; 224 | } 225 | } 226 | 227 | endpoints.push(c); 228 | } 229 | 230 | Channel::balance_list(endpoints.into_iter()) 231 | }; 232 | 233 | Ok(Self::with_channel(channel, token)) 234 | } 235 | 236 | /// Connects to etcd cluster and returns a client. 237 | /// 238 | /// # Errors 239 | /// Will returns `Err` if failed to contact with given endpoints or authentication failed. 240 | pub async fn connect(mut cfg: ClientConfig) -> Result { 241 | let cli = Self::connect_with_token(&cfg, None).await?; 242 | 243 | match cfg.auth.take() { 244 | Some((name, password)) => { 245 | let token = cli.authenticate((name, password)).await?.token; 246 | 247 | Self::connect_with_token(&cfg, Some(token)).await 248 | } 249 | None => Ok(cli), 250 | } 251 | } 252 | } 253 | 254 | #[async_trait] 255 | impl AuthOp for Client { 256 | async fn authenticate(&self, req: R) -> Result 257 | where 258 | R: Into + Send, 259 | { 260 | let req = tonic::Request::new(req.into().into()); 261 | let resp = self.auth_client.clone().authenticate(req).await?; 262 | 263 | Ok(resp.into_inner().into()) 264 | } 265 | } 266 | 267 | #[async_trait] 268 | impl KeyValueOp for Client { 269 | async fn put(&self, req: R) -> Result 270 | where 271 | R: Into + Send, 272 | { 273 | let req = tonic::Request::new(req.into().into()); 274 | let resp = self.kv_client.clone().put(req).await?; 275 | 276 | Ok(resp.into_inner().into()) 277 | } 278 | 279 | async fn get(&self, req: R) -> Result 280 | where 281 | R: Into + Send, 282 | { 283 | let req = tonic::Request::new(req.into().into()); 284 | let resp = self.kv_client.clone().range(req).await?; 285 | 286 | Ok(resp.into_inner().into()) 287 | } 288 | 289 | async fn get_all(&self) -> Result { 290 | self.get(KeyRange::all()).await 291 | } 292 | 293 | async fn get_by_prefix(&self, p: K) -> Result 294 | where 295 | K: Into> + Send, 296 | { 297 | self.get(KeyRange::prefix(p)).await 298 | } 299 | 300 | async fn get_range(&self, from: F, end: E) -> Result 301 | where 302 | F: Into> + Send, 303 | E: Into> + Send, 304 | { 305 | self.get(KeyRange::range(from, end)).await 306 | } 307 | 308 | async fn delete(&self, req: R) -> Result 309 | where 310 | R: Into + Send, 311 | { 312 | let req = tonic::Request::new(req.into().into()); 313 | let resp = self.kv_client.clone().delete_range(req).await?; 314 | 315 | Ok(resp.into_inner().into()) 316 | } 317 | 318 | async fn delete_all(&self) -> Result { 319 | self.delete(KeyRange::all()).await 320 | } 321 | 322 | async fn delete_by_prefix(&self, p: K) -> Result 323 | where 324 | K: Into> + Send, 325 | { 326 | self.delete(KeyRange::prefix(p)).await 327 | } 328 | 329 | async fn delete_range(&self, from: F, end: E) -> Result 330 | where 331 | F: Into> + Send, 332 | E: Into> + Send, 333 | { 334 | self.delete(KeyRange::range(from, end)).await 335 | } 336 | 337 | async fn txn(&self, req: R) -> Result 338 | where 339 | R: Into + Send, 340 | { 341 | let req = tonic::Request::new(req.into().into()); 342 | let resp = self.kv_client.clone().txn(req).await?; 343 | 344 | Ok(resp.into_inner().into()) 345 | } 346 | 347 | async fn compact(&self, req: R) -> Result 348 | where 349 | R: Into + Send, 350 | { 351 | let req = tonic::Request::new(req.into().into()); 352 | let resp = self.kv_client.clone().compact(req).await?; 353 | 354 | Ok(resp.into_inner().into()) 355 | } 356 | } 357 | 358 | #[async_trait] 359 | impl WatchOp for Client { 360 | async fn watch(&self, req: R) -> Result<(WatchStream, WatchCanceler)> 361 | where 362 | R: Into + Send, 363 | { 364 | let (tx, rx) = channel::(128); 365 | 366 | tx.send(req.into().into()).await?; 367 | 368 | let mut req = tonic::Request::new(ReceiverStream::new(rx)); 369 | 370 | req.metadata_mut() 371 | .insert("hasleader", "true".try_into().unwrap()); 372 | 373 | let resp = self.watch_client.clone().watch(req).await?; 374 | 375 | let mut inbound = resp.into_inner(); 376 | 377 | let watch_id = match inbound.message().await? { 378 | Some(resp) => { 379 | if !resp.created { 380 | return Err(Error::WatchEvent( 381 | "should receive created event at first".to_owned(), 382 | )); 383 | } 384 | assert!(resp.events.is_empty(), "received created event {:?}", resp); 385 | resp.watch_id 386 | } 387 | 388 | None => return Err(Error::CreateWatch), 389 | }; 390 | 391 | Ok((WatchStream::new(inbound), WatchCanceler::new(watch_id, tx))) 392 | } 393 | } 394 | 395 | #[async_trait] 396 | impl LeaseOp for Client { 397 | async fn grant_lease(&self, req: R) -> Result 398 | where 399 | R: Into + Send, 400 | { 401 | let req = tonic::Request::new(req.into().into()); 402 | let resp = self.lease_client.clone().lease_grant(req).await?; 403 | Ok(resp.into_inner().into()) 404 | } 405 | 406 | async fn revoke(&self, req: R) -> Result 407 | where 408 | R: Into + Send, 409 | { 410 | let req = tonic::Request::new(req.into().into()); 411 | let resp = self.lease_client.clone().lease_revoke(req).await?; 412 | Ok(resp.into_inner().into()) 413 | } 414 | 415 | async fn keep_alive_for(&self, lease_id: LeaseId) -> Result { 416 | let (req_tx, req_rx) = channel(1024); 417 | 418 | let req_rx = ReceiverStream::new(req_rx); 419 | 420 | let initial_req = LeaseKeepAliveRequest { id: lease_id }; 421 | 422 | req_tx 423 | .send(initial_req) 424 | .await 425 | .map_err(|_| Error::ChannelClosed)?; 426 | 427 | let mut resp_rx = self 428 | .lease_client 429 | .clone() 430 | .lease_keep_alive(req_rx) 431 | .await? 432 | .into_inner(); 433 | 434 | let lease_id = match resp_rx.message().await? { 435 | Some(resp) => resp.id, 436 | None => { 437 | return Err(Error::CreateWatch); 438 | } 439 | }; 440 | 441 | Ok(LeaseKeepAlive::new(lease_id, req_tx, resp_rx)) 442 | } 443 | 444 | async fn time_to_live(&self, req: R) -> Result 445 | where 446 | R: Into + Send, 447 | { 448 | let req = tonic::Request::new(req.into().into()); 449 | let resp = self.lease_client.clone().lease_time_to_live(req).await?; 450 | Ok(resp.into_inner().into()) 451 | } 452 | } 453 | 454 | #[async_trait] 455 | impl ClusterOp for Client { 456 | async fn member_add(&self, req: R) -> Result 457 | where 458 | R: Into + Send, 459 | { 460 | let req = tonic::Request::new(req.into().into()); 461 | let resp = self.cluster_client.clone().member_add(req).await?; 462 | 463 | Ok(resp.into_inner().into()) 464 | } 465 | 466 | async fn member_remove(&self, req: R) -> Result 467 | where 468 | R: Into + Send, 469 | { 470 | let req = tonic::Request::new(req.into().into()); 471 | let resp = self.cluster_client.clone().member_remove(req).await?; 472 | 473 | Ok(resp.into_inner().into()) 474 | } 475 | 476 | async fn member_update(&self, req: R) -> Result 477 | where 478 | R: Into + Send, 479 | { 480 | let req = tonic::Request::new(req.into().into()); 481 | let resp = self.cluster_client.clone().member_update(req).await?; 482 | 483 | Ok(resp.into_inner().into()) 484 | } 485 | 486 | async fn member_list(&self) -> Result { 487 | let req = tonic::Request::new(MemberListRequest::new().into()); 488 | let resp = self.cluster_client.clone().member_list(req).await?; 489 | 490 | Ok(resp.into_inner().into()) 491 | } 492 | } 493 | -------------------------------------------------------------------------------- /src/cluster/member_add.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | use crate::{Member, ResponseHeader}; 3 | 4 | #[derive(Debug, Clone)] 5 | pub struct MemberAddRequest { 6 | proto: etcdserverpb::MemberAddRequest, 7 | } 8 | 9 | impl MemberAddRequest { 10 | pub fn new(peer_urls: impl Into>, is_learner: bool) -> Self { 11 | Self { 12 | proto: etcdserverpb::MemberAddRequest { 13 | peer_ur_ls: peer_urls.into(), 14 | is_learner, 15 | }, 16 | } 17 | } 18 | } 19 | 20 | impl From for MemberAddRequest 21 | where 22 | I: Into>, 23 | { 24 | fn from(peer_urls: I) -> Self { 25 | Self::new(peer_urls, false) 26 | } 27 | } 28 | 29 | impl From for etcdserverpb::MemberAddRequest { 30 | fn from(req: MemberAddRequest) -> Self { 31 | req.proto 32 | } 33 | } 34 | 35 | #[derive(Debug, Clone)] 36 | pub struct MemberAddResponse { 37 | pub header: ResponseHeader, 38 | pub member: Member, 39 | pub members: Vec, 40 | } 41 | 42 | impl From for MemberAddResponse { 43 | fn from(proto: etcdserverpb::MemberAddResponse) -> Self { 44 | Self { 45 | header: From::from(proto.header.expect("must fetch header")), 46 | member: From::from(proto.member.expect("must get a new member")), 47 | members: proto.members.into_iter().map(From::from).collect(), 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/cluster/member_list.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | use crate::{Member, ResponseHeader}; 3 | 4 | #[derive(Debug, Clone)] 5 | pub struct MemberListRequest { 6 | proto: etcdserverpb::MemberListRequest, 7 | } 8 | 9 | impl MemberListRequest { 10 | pub fn new() -> Self { 11 | Self { 12 | proto: etcdserverpb::MemberListRequest { 13 | // default true 14 | // https://github.com/etcd-io/etcd/blob/v3.5.2/client/v3/cluster.go#L127 15 | linearizable: true, 16 | }, 17 | } 18 | } 19 | } 20 | 21 | impl From for etcdserverpb::MemberListRequest { 22 | fn from(req: MemberListRequest) -> Self { 23 | req.proto 24 | } 25 | } 26 | 27 | impl Default for MemberListRequest { 28 | fn default() -> Self { 29 | Self::new() 30 | } 31 | } 32 | 33 | #[derive(Debug, Clone)] 34 | pub struct MemberListResponse { 35 | pub header: ResponseHeader, 36 | pub members: Vec, 37 | } 38 | 39 | impl From for MemberListResponse { 40 | fn from(proto: etcdserverpb::MemberListResponse) -> Self { 41 | Self { 42 | header: From::from(proto.header.expect("must fetch header")), 43 | members: proto.members.into_iter().map(From::from).collect(), 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/cluster/member_remove.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | use crate::{Member, ResponseHeader}; 3 | 4 | #[derive(Debug, Clone)] 5 | pub struct MemberRemoveRequest { 6 | proto: etcdserverpb::MemberRemoveRequest, 7 | } 8 | 9 | impl MemberRemoveRequest { 10 | pub fn new(member_id: u64) -> Self { 11 | Self { 12 | proto: etcdserverpb::MemberRemoveRequest { id: member_id }, 13 | } 14 | } 15 | } 16 | 17 | impl From for etcdserverpb::MemberRemoveRequest { 18 | fn from(req: MemberRemoveRequest) -> Self { 19 | req.proto 20 | } 21 | } 22 | 23 | impl From for MemberRemoveRequest { 24 | fn from(id: u64) -> Self { 25 | Self::new(id) 26 | } 27 | } 28 | 29 | #[derive(Debug, Clone)] 30 | pub struct MemberRemoveResponse { 31 | pub header: ResponseHeader, 32 | pub members: Vec, 33 | } 34 | 35 | impl From for MemberRemoveResponse { 36 | fn from(proto: etcdserverpb::MemberRemoveResponse) -> Self { 37 | Self { 38 | header: From::from(proto.header.expect("must fetch header")), 39 | members: proto.members.into_iter().map(From::from).collect(), 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /src/cluster/member_update.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | use crate::{Member, ResponseHeader}; 3 | 4 | #[derive(Debug, Clone)] 5 | pub struct MemberUpdateRequest { 6 | proto: etcdserverpb::MemberUpdateRequest, 7 | } 8 | 9 | impl MemberUpdateRequest { 10 | pub fn new(member_id: u64, peer_urls: impl Into>) -> Self { 11 | Self { 12 | proto: etcdserverpb::MemberUpdateRequest { 13 | id: member_id, 14 | peer_ur_ls: peer_urls.into(), 15 | }, 16 | } 17 | } 18 | } 19 | 20 | impl From<(u64, I)> for MemberUpdateRequest 21 | where 22 | I: Into>, 23 | { 24 | fn from((id, peer_urls): (u64, I)) -> Self { 25 | Self::new(id, peer_urls) 26 | } 27 | } 28 | 29 | impl From for etcdserverpb::MemberUpdateRequest { 30 | fn from(req: MemberUpdateRequest) -> Self { 31 | req.proto 32 | } 33 | } 34 | 35 | #[derive(Debug, Clone)] 36 | pub struct MemberUpdateResponse { 37 | pub header: ResponseHeader, 38 | pub members: Vec, 39 | } 40 | 41 | impl From for MemberUpdateResponse { 42 | fn from(proto: etcdserverpb::MemberUpdateResponse) -> Self { 43 | Self { 44 | header: From::from(proto.header.expect("must fetch header")), 45 | members: proto.members.into_iter().map(From::from).collect(), 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/cluster/mod.rs: -------------------------------------------------------------------------------- 1 | mod member_add; 2 | mod member_list; 3 | mod member_remove; 4 | mod member_update; 5 | 6 | pub use member_add::{MemberAddRequest, MemberAddResponse}; 7 | pub use member_list::{MemberListRequest, MemberListResponse}; 8 | pub use member_remove::{MemberRemoveRequest, MemberRemoveResponse}; 9 | pub use member_update::{MemberUpdateRequest, MemberUpdateResponse}; 10 | 11 | use async_trait::async_trait; 12 | 13 | use crate::proto::etcdserverpb; 14 | use crate::Result; 15 | 16 | #[async_trait] 17 | pub trait ClusterOp { 18 | async fn member_add(&self, req: R) -> Result 19 | where 20 | R: Into + Send; 21 | 22 | async fn member_remove(&self, req: R) -> Result 23 | where 24 | R: Into + Send; 25 | 26 | async fn member_update(&self, req: R) -> Result 27 | where 28 | R: Into + Send; 29 | 30 | async fn member_list(&self) -> Result; 31 | } 32 | 33 | #[derive(Debug, Clone)] 34 | pub struct Member { 35 | pub id: u64, 36 | pub name: String, 37 | pub peer_urls: Vec, 38 | pub client_urls: Vec, 39 | pub is_learner: bool, 40 | } 41 | 42 | impl From for Member { 43 | fn from(proto: etcdserverpb::Member) -> Self { 44 | Self { 45 | id: proto.id, 46 | name: proto.name, 47 | peer_urls: proto.peer_ur_ls, 48 | client_urls: proto.client_ur_ls, 49 | is_learner: proto.is_learner, 50 | } 51 | } 52 | } 53 | 54 | impl From for etcdserverpb::Member { 55 | fn from(value: Member) -> Self { 56 | etcdserverpb::Member { 57 | id: value.id, 58 | name: value.name, 59 | peer_ur_ls: value.peer_urls, 60 | client_ur_ls: value.client_urls, 61 | is_learner: value.is_learner, 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | 3 | #[derive(thiserror::Error, Debug)] 4 | pub enum Error { 5 | #[error("io error")] 6 | IOError(#[from] std::io::Error), 7 | #[error("invalid URI")] 8 | InvalidURI(#[from] http::uri::InvalidUri), 9 | #[error("gRPC transport error")] 10 | Transport(#[from] tonic::transport::Error), 11 | #[error("response failed")] 12 | Response(#[from] tonic::Status), 13 | #[error("channel closed")] 14 | ChannelClosed, 15 | #[error("failed to create watch")] 16 | CreateWatch, 17 | #[error("unexpected watch event")] 18 | WatchEvent(String), 19 | #[error("failed to keep alive lease")] 20 | KeepAliveLease, 21 | #[error("watch channel send error")] 22 | WatchChannelSend(#[from] tokio::sync::mpsc::error::SendError), 23 | #[error("watch event exhausted")] 24 | WatchEventExhausted, 25 | } 26 | -------------------------------------------------------------------------------- /src/kv/compact.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | use crate::ResponseHeader; 3 | 4 | #[derive(Debug, Clone)] 5 | pub struct CompactRequest { 6 | proto: etcdserverpb::CompactionRequest, 7 | } 8 | 9 | impl CompactRequest { 10 | pub fn new(revision: i64) -> Self { 11 | Self { 12 | proto: etcdserverpb::CompactionRequest { 13 | revision, 14 | physical: false, 15 | }, 16 | } 17 | } 18 | 19 | pub fn physical(mut self) -> Self { 20 | self.proto.physical = true; 21 | self 22 | } 23 | } 24 | 25 | impl From for etcdserverpb::CompactionRequest { 26 | fn from(req: CompactRequest) -> Self { 27 | req.proto 28 | } 29 | } 30 | 31 | impl From for CompactRequest { 32 | fn from(revision: i64) -> Self { 33 | Self::new(revision) 34 | } 35 | } 36 | 37 | #[derive(Debug, Clone)] 38 | pub struct CompactResponse { 39 | pub header: ResponseHeader, 40 | } 41 | 42 | impl From for CompactResponse { 43 | fn from(proto: etcdserverpb::CompactionResponse) -> Self { 44 | Self { 45 | header: From::from(proto.header.expect("must fetch header")), 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/kv/delete.rs: -------------------------------------------------------------------------------- 1 | use super::{KeyRange, KeyValue}; 2 | use crate::proto::etcdserverpb; 3 | use crate::ResponseHeader; 4 | 5 | #[derive(Debug)] 6 | pub struct DeleteRequest { 7 | proto: etcdserverpb::DeleteRangeRequest, 8 | } 9 | 10 | impl DeleteRequest { 11 | /// Creates a new DeleteRequest for the specified key range. 12 | pub fn new(key_range: T) -> Self 13 | where 14 | T: Into, 15 | { 16 | let key_range = key_range.into(); 17 | Self { 18 | proto: etcdserverpb::DeleteRangeRequest { 19 | key: key_range.key, 20 | range_end: key_range.range_end, 21 | prev_kv: false, 22 | }, 23 | } 24 | } 25 | 26 | /// When set, responds with the key-value pair data before the update from this Delete request. 27 | pub fn prev_kv(mut self, prev_kv: bool) -> Self { 28 | self.proto.prev_kv = prev_kv; 29 | self 30 | } 31 | } 32 | 33 | impl From for DeleteRequest 34 | where 35 | T: Into, 36 | { 37 | fn from(key_range: T) -> Self { 38 | Self::new(key_range) 39 | } 40 | } 41 | 42 | impl From for etcdserverpb::DeleteRangeRequest { 43 | fn from(value: DeleteRequest) -> Self { 44 | value.proto 45 | } 46 | } 47 | 48 | #[derive(Debug, Clone)] 49 | pub struct DeleteResponse { 50 | pub header: ResponseHeader, 51 | pub deleted: u64, 52 | pub prev_kvs: Vec, 53 | } 54 | 55 | impl From for DeleteResponse { 56 | fn from(proto: etcdserverpb::DeleteRangeResponse) -> Self { 57 | Self { 58 | header: From::from(proto.header.expect("must fetch header")), 59 | deleted: proto.deleted as u64, 60 | prev_kvs: proto.prev_kvs.into_iter().map(From::from).collect(), 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/kv/mod.rs: -------------------------------------------------------------------------------- 1 | mod compact; 2 | mod delete; 3 | mod put; 4 | mod range; 5 | mod txn; 6 | 7 | pub use compact::{CompactRequest, CompactResponse}; 8 | pub use delete::{DeleteRequest, DeleteResponse}; 9 | pub use put::{PutRequest, PutResponse}; 10 | pub use range::{RangeRequest, RangeResponse}; 11 | pub use txn::{TxnCmp, TxnOp, TxnOpResponse, TxnRequest, TxnResponse}; 12 | 13 | use std::ops::Range; 14 | 15 | use async_trait::async_trait; 16 | 17 | use crate::lease::LeaseId; 18 | use crate::proto::mvccpb; 19 | use crate::Result; 20 | 21 | #[async_trait] 22 | pub trait KeyValueOp { 23 | async fn put(&self, req: R) -> Result 24 | where 25 | R: Into + Send; 26 | 27 | async fn get(&self, req: R) -> Result 28 | where 29 | R: Into + Send; 30 | async fn get_all(&self) -> Result; 31 | async fn get_by_prefix(&self, p: K) -> Result 32 | where 33 | K: Into> + Send; 34 | async fn get_range(&self, from: F, end: E) -> Result 35 | where 36 | F: Into> + Send, 37 | E: Into> + Send; 38 | 39 | async fn delete(&self, req: R) -> Result 40 | where 41 | R: Into + Send; 42 | async fn delete_all(&self) -> Result; 43 | async fn delete_by_prefix(&self, p: K) -> Result 44 | where 45 | K: Into> + Send; 46 | async fn delete_range(&self, from: F, end: E) -> Result 47 | where 48 | F: Into> + Send, 49 | E: Into> + Send; 50 | 51 | async fn txn(&self, req: R) -> Result 52 | where 53 | R: Into + Send; 54 | 55 | async fn compact(&self, req: R) -> Result 56 | where 57 | R: Into + Send; 58 | } 59 | 60 | /// Key-Value pair. 61 | #[derive(Clone, PartialEq, Default, Debug)] 62 | pub struct KeyValue { 63 | pub key: Vec, 64 | pub value: Vec, 65 | pub create_revision: i64, 66 | pub mod_revision: i64, 67 | pub version: i64, 68 | pub lease: LeaseId, 69 | } 70 | 71 | impl KeyValue { 72 | /// Converts the key from bytes `&[u8]` to `&str`. 73 | /// Leaves the original `&[u8]` in place, and creates a new string slice containing the entire content. 74 | pub fn key_str(&self) -> &str { 75 | std::str::from_utf8(&self.key).expect("convert bytes to string") 76 | } 77 | 78 | /// Converts the value from bytes `&[u8]` to `&str`. 79 | /// Leaves the original `&[u8]` in place, and creates a new string slice containing the entire content. 80 | pub fn value_str(&self) -> &str { 81 | std::str::from_utf8(&self.value).expect("convert bytes to string") 82 | } 83 | } 84 | 85 | impl From for KeyValue { 86 | fn from(proto: mvccpb::KeyValue) -> Self { 87 | Self { 88 | key: proto.key, 89 | value: proto.value, 90 | create_revision: proto.create_revision, 91 | mod_revision: proto.mod_revision, 92 | version: proto.version, 93 | lease: proto.lease, 94 | } 95 | } 96 | } 97 | 98 | /// KeyRange is an abstraction for describing etcd key of various types. 99 | #[derive(Clone, Hash, PartialEq, Eq)] 100 | pub struct KeyRange { 101 | pub key: Vec, 102 | pub range_end: Vec, 103 | } 104 | 105 | impl KeyRange { 106 | /// Creates a new KeyRange for describing a range of multiple keys. 107 | pub fn range(key: K, range_end: R) -> Self 108 | where 109 | K: Into>, 110 | R: Into>, 111 | { 112 | Self { 113 | key: key.into(), 114 | range_end: range_end.into(), 115 | } 116 | } 117 | 118 | /// Creates a new KeyRange for describing a specified key. 119 | pub fn key(key: K) -> Self 120 | where 121 | K: Into>, 122 | { 123 | Self { 124 | key: key.into(), 125 | range_end: vec![], 126 | } 127 | } 128 | 129 | /// Creates a new KeyRange for describing all keys. 130 | pub fn all() -> Self { 131 | Self { 132 | key: vec![0], 133 | range_end: vec![0], 134 | } 135 | } 136 | 137 | /// Creates a new KeyRange for describing keys prefixed with specified value. 138 | pub fn prefix(prefix: K) -> Self 139 | where 140 | K: Into>, 141 | { 142 | let key = prefix.into(); 143 | if key.is_empty() { 144 | // An empty Vec results in an invalid KeyRange. 145 | // Assume that an empty value passed to this method implies no prefix (i.e., all keys). 146 | return KeyRange::all(); 147 | } 148 | 149 | let range_end = { 150 | let mut end = key.clone(); 151 | 152 | for i in (0..end.len()).rev() { 153 | if end[i] < 0xff { 154 | end[i] += 1; 155 | end.truncate(i + 1); 156 | break; 157 | } 158 | } 159 | end 160 | }; 161 | Self { key, range_end } 162 | } 163 | } 164 | 165 | impl From> for KeyRange 166 | where 167 | T: Into>, 168 | { 169 | fn from(range: Range) -> Self { 170 | Self::range(range.start, range.end) 171 | } 172 | } 173 | 174 | impl From<&str> for KeyRange { 175 | fn from(k: &str) -> Self { 176 | Self::key(k) 177 | } 178 | } 179 | 180 | impl From for KeyRange { 181 | fn from(k: String) -> Self { 182 | Self::key(k) 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/kv/put.rs: -------------------------------------------------------------------------------- 1 | use super::KeyValue; 2 | use crate::lease::LeaseId; 3 | use crate::proto::etcdserverpb; 4 | use crate::ResponseHeader; 5 | 6 | #[derive(Debug)] 7 | pub struct PutRequest { 8 | proto: etcdserverpb::PutRequest, 9 | } 10 | 11 | impl PutRequest { 12 | /// Creates a new PutRequest for saving the specified key-value. 13 | pub fn new(key: K, value: V) -> Self 14 | where 15 | K: Into>, 16 | V: Into>, 17 | { 18 | Self { 19 | proto: etcdserverpb::PutRequest { 20 | key: key.into(), 21 | value: value.into(), 22 | lease: 0, 23 | prev_kv: false, 24 | ignore_value: false, 25 | ignore_lease: false, 26 | }, 27 | } 28 | } 29 | 30 | /// Sets the lease ID to associate with the key in the key-value store. 31 | /// A lease value of 0 indicates no lease. 32 | pub fn lease(mut self, lease: LeaseId) -> Self { 33 | self.proto.lease = lease; 34 | self 35 | } 36 | 37 | /// When set, responds with the key-value pair data before the update from this Put request. 38 | pub fn prev_kv(mut self, prev_kv: bool) -> Self { 39 | self.proto.prev_kv = prev_kv; 40 | self 41 | } 42 | 43 | /// When set, update the key without changing its current value. Returns an error if the key does not exist. 44 | pub fn ignore_value(mut self) -> Self { 45 | self.proto.ignore_value = true; 46 | self 47 | } 48 | 49 | /// When set, update the key without changing its current lease. Returns an error if the key does not exist. 50 | pub fn ignore_lease(mut self) -> Self { 51 | self.proto.ignore_lease = true; 52 | self 53 | } 54 | } 55 | 56 | impl From for etcdserverpb::PutRequest { 57 | fn from(x: PutRequest) -> Self { 58 | x.proto 59 | } 60 | } 61 | 62 | impl From<(K, V)> for PutRequest 63 | where 64 | K: Into>, 65 | V: Into>, 66 | { 67 | fn from(kv: (K, V)) -> Self { 68 | Self::new(kv.0, kv.1) 69 | } 70 | } 71 | 72 | #[derive(Debug, Clone)] 73 | pub struct PutResponse { 74 | pub header: ResponseHeader, 75 | pub prev_kv: KeyValue, 76 | } 77 | 78 | impl From for PutResponse { 79 | fn from(proto: etcdserverpb::PutResponse) -> Self { 80 | Self { 81 | header: From::from(proto.header.expect("must fetch header")), 82 | prev_kv: From::from(proto.prev_kv.unwrap_or(Default::default())), 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/kv/range.rs: -------------------------------------------------------------------------------- 1 | use super::{KeyRange, KeyValue}; 2 | use crate::proto::etcdserverpb; 3 | use crate::ResponseHeader; 4 | 5 | #[derive(Debug)] 6 | pub struct RangeRequest { 7 | proto: etcdserverpb::RangeRequest, 8 | } 9 | 10 | impl RangeRequest { 11 | /// Creates a new RangeRequest for the specified key range. 12 | pub fn new(key_range: KeyRange) -> Self { 13 | Self { 14 | proto: etcdserverpb::RangeRequest { 15 | key: key_range.key, 16 | range_end: key_range.range_end, 17 | limit: 0, 18 | revision: 0, 19 | sort_order: 0, 20 | sort_target: 0, 21 | serializable: false, 22 | keys_only: false, 23 | count_only: false, 24 | min_mod_revision: 0, 25 | max_mod_revision: 0, 26 | min_create_revision: 0, 27 | max_create_revision: 0, 28 | }, 29 | } 30 | } 31 | 32 | /// Sets the maximum number of keys returned for the request. 33 | /// When limit is set to 0, it is treated as no limit. 34 | pub fn limit(mut self, limit: u64) -> Self { 35 | self.proto.limit = limit as i64; 36 | self 37 | } 38 | 39 | pub fn revision(mut self, revision: i64) -> Self { 40 | self.proto.revision = revision; 41 | self 42 | } 43 | 44 | pub fn sort_by_key(mut self, order: SortOrder) -> Self { 45 | self.proto.sort_target = etcdserverpb::range_request::SortTarget::Key as i32; 46 | self.proto.sort_order = order.into(); 47 | self 48 | } 49 | 50 | pub fn sort_by_version(mut self, order: SortOrder) -> Self { 51 | self.proto.sort_target = etcdserverpb::range_request::SortTarget::Version as i32; 52 | self.proto.sort_order = order.into(); 53 | self 54 | } 55 | } 56 | 57 | impl From for RangeRequest 58 | where 59 | T: Into, 60 | { 61 | fn from(key_range: T) -> Self { 62 | Self::new(key_range.into()) 63 | } 64 | } 65 | 66 | impl From for etcdserverpb::RangeRequest { 67 | fn from(x: RangeRequest) -> Self { 68 | x.proto 69 | } 70 | } 71 | 72 | #[derive(Debug, Clone)] 73 | pub enum SortOrder { 74 | Ascending, 75 | Descending, 76 | } 77 | 78 | impl From for etcdserverpb::range_request::SortOrder { 79 | fn from(value: SortOrder) -> Self { 80 | match value { 81 | SortOrder::Ascending => etcdserverpb::range_request::SortOrder::Ascend, 82 | SortOrder::Descending => etcdserverpb::range_request::SortOrder::Descend, 83 | } 84 | } 85 | } 86 | 87 | impl From for i32 { 88 | fn from(value: SortOrder) -> Self { 89 | let order: etcdserverpb::range_request::SortOrder = value.into(); 90 | order as i32 91 | } 92 | } 93 | 94 | #[derive(Debug, Clone)] 95 | pub struct RangeResponse { 96 | pub header: ResponseHeader, 97 | pub kvs: Vec, 98 | pub has_more: bool, 99 | pub count: u64, 100 | } 101 | 102 | impl From for RangeResponse { 103 | fn from(proto: etcdserverpb::RangeResponse) -> Self { 104 | Self { 105 | header: From::from(proto.header.expect("must fetch header")), 106 | kvs: proto.kvs.into_iter().map(From::from).collect(), 107 | has_more: proto.more, 108 | count: proto.count as u64, 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/kv/txn.rs: -------------------------------------------------------------------------------- 1 | use super::{ 2 | DeleteRequest, DeleteResponse, KeyRange, PutRequest, PutResponse, RangeRequest, RangeResponse, 3 | }; 4 | use crate::proto::etcdserverpb; 5 | use crate::ResponseHeader; 6 | use etcdserverpb::compare::{CompareResult, CompareTarget, TargetUnion}; 7 | use etcdserverpb::Compare; 8 | 9 | #[derive(Debug)] 10 | pub struct TxnRequest { 11 | proto: etcdserverpb::TxnRequest, 12 | } 13 | 14 | impl TxnRequest { 15 | /// Creates a new TxnRequest. 16 | pub fn new() -> Self { 17 | Self { 18 | proto: etcdserverpb::TxnRequest { 19 | compare: vec![], 20 | success: vec![], 21 | failure: vec![], 22 | }, 23 | } 24 | } 25 | 26 | /// Adds a version compare. 27 | pub fn when_version(mut self, key_range: KeyRange, cmp: TxnCmp, version: usize) -> Self { 28 | let result: CompareResult = cmp.into(); 29 | self.proto.compare.push(Compare { 30 | result: result as i32, 31 | target: CompareTarget::Version as i32, 32 | key: key_range.key, 33 | range_end: key_range.range_end, 34 | target_union: Some(TargetUnion::Version(version as i64)), 35 | }); 36 | self 37 | } 38 | 39 | /// Adds a create revision compare. 40 | pub fn when_create_revision( 41 | mut self, 42 | key_range: KeyRange, 43 | cmp: TxnCmp, 44 | revision: usize, 45 | ) -> Self { 46 | let result: CompareResult = cmp.into(); 47 | self.proto.compare.push(Compare { 48 | result: result as i32, 49 | target: CompareTarget::Create as i32, 50 | key: key_range.key, 51 | range_end: key_range.range_end, 52 | target_union: Some(TargetUnion::CreateRevision(revision as i64)), 53 | }); 54 | self 55 | } 56 | 57 | /// Adds a mod revision compare. 58 | pub fn when_mod_revision(mut self, key_range: KeyRange, cmp: TxnCmp, revision: usize) -> Self { 59 | let result: CompareResult = cmp.into(); 60 | self.proto.compare.push(Compare { 61 | result: result as i32, 62 | target: CompareTarget::Mod as i32, 63 | key: key_range.key, 64 | range_end: key_range.range_end, 65 | target_union: Some(TargetUnion::ModRevision(revision as i64)), 66 | }); 67 | self 68 | } 69 | 70 | /// Adds a value compare. 71 | pub fn when_value(mut self, key_range: KeyRange, cmp: TxnCmp, value: V) -> Self 72 | where 73 | V: Into>, 74 | { 75 | let result: CompareResult = cmp.into(); 76 | self.proto.compare.push(Compare { 77 | result: result as i32, 78 | target: CompareTarget::Value as i32, 79 | key: key_range.key, 80 | range_end: key_range.range_end, 81 | target_union: Some(TargetUnion::Value(value.into())), 82 | }); 83 | self 84 | } 85 | 86 | /// If compare success, then execute the specified operations. 87 | pub fn and_then(mut self, op: O) -> Self 88 | where 89 | O: Into, 90 | { 91 | self.proto.success.push(op.into().into()); 92 | self 93 | } 94 | 95 | /// If compare fail, then execute the specified operations. 96 | pub fn or_else(mut self, op: O) -> Self 97 | where 98 | O: Into, 99 | { 100 | self.proto.failure.push(op.into().into()); 101 | self 102 | } 103 | } 104 | 105 | impl Default for TxnRequest { 106 | fn default() -> Self { 107 | Self::new() 108 | } 109 | } 110 | 111 | impl From for crate::proto::etcdserverpb::TxnRequest { 112 | fn from(x: TxnRequest) -> Self { 113 | x.proto 114 | } 115 | } 116 | 117 | /// Transaction Operation. 118 | pub enum TxnOp { 119 | Range(RangeRequest), 120 | Put(PutRequest), 121 | Delete(DeleteRequest), 122 | Txn(TxnRequest), 123 | } 124 | 125 | impl From for etcdserverpb::RequestOp { 126 | fn from(x: TxnOp) -> etcdserverpb::RequestOp { 127 | use etcdserverpb::request_op::Request; 128 | 129 | let req = match x { 130 | TxnOp::Range(req) => Request::RequestRange(req.into()), 131 | TxnOp::Put(req) => Request::RequestPut(req.into()), 132 | TxnOp::Delete(req) => Request::RequestDeleteRange(req.into()), 133 | TxnOp::Txn(req) => Request::RequestTxn(req.into()), 134 | }; 135 | 136 | etcdserverpb::RequestOp { request: Some(req) } 137 | } 138 | } 139 | 140 | impl From for TxnOp { 141 | fn from(req: RangeRequest) -> Self { 142 | Self::Range(req) 143 | } 144 | } 145 | 146 | impl From for TxnOp { 147 | fn from(req: PutRequest) -> Self { 148 | Self::Put(req) 149 | } 150 | } 151 | 152 | impl From for TxnOp { 153 | fn from(req: DeleteRequest) -> Self { 154 | Self::Delete(req) 155 | } 156 | } 157 | 158 | impl From for TxnOp { 159 | fn from(req: TxnRequest) -> Self { 160 | Self::Txn(req) 161 | } 162 | } 163 | 164 | /// Transaction Comparation. 165 | pub enum TxnCmp { 166 | Equal, 167 | NotEqual, 168 | Greater, 169 | Less, 170 | } 171 | 172 | impl From for CompareResult { 173 | fn from(x: TxnCmp) -> CompareResult { 174 | match x { 175 | TxnCmp::Equal => CompareResult::Equal, 176 | TxnCmp::NotEqual => CompareResult::NotEqual, 177 | TxnCmp::Greater => CompareResult::Greater, 178 | TxnCmp::Less => CompareResult::Less, 179 | } 180 | } 181 | } 182 | 183 | /// Response transaction operation. 184 | #[derive(Debug, Clone)] 185 | pub enum TxnOpResponse { 186 | Range(RangeResponse), 187 | Put(PutResponse), 188 | Delete(DeleteResponse), 189 | Txn(TxnResponse), 190 | } 191 | 192 | impl From for TxnOpResponse { 193 | fn from(mut resp: etcdserverpb::ResponseOp) -> Self { 194 | use etcdserverpb::response_op::Response; 195 | match resp.response.take().unwrap() { 196 | Response::ResponseRange(r) => Self::Range(From::from(r)), 197 | Response::ResponsePut(r) => Self::Put(From::from(r)), 198 | Response::ResponseTxn(r) => Self::Txn(From::from(r)), 199 | Response::ResponseDeleteRange(r) => Self::Delete(From::from(r)), 200 | } 201 | } 202 | } 203 | 204 | #[derive(Debug, Clone)] 205 | pub struct TxnResponse { 206 | pub header: ResponseHeader, 207 | pub succeeded: bool, 208 | pub responses: Vec, 209 | } 210 | 211 | impl From for TxnResponse { 212 | fn from(proto: etcdserverpb::TxnResponse) -> Self { 213 | Self { 214 | header: From::from(proto.header.expect("must fetch header")), 215 | succeeded: proto.succeeded, 216 | responses: proto.responses.into_iter().map(From::from).collect(), 217 | } 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /src/lease/grant.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use crate::lease::LeaseId; 4 | use crate::proto::etcdserverpb; 5 | use crate::ResponseHeader; 6 | 7 | #[derive(Debug)] 8 | pub struct LeaseGrantRequest { 9 | proto: crate::proto::etcdserverpb::LeaseGrantRequest, 10 | } 11 | 12 | impl LeaseGrantRequest { 13 | /// Creates a new LeaseGrantRequest with the specified TTL. 14 | pub fn new(ttl: Duration) -> Self { 15 | Self { 16 | proto: etcdserverpb::LeaseGrantRequest { 17 | ttl: ttl.as_secs() as i64, 18 | id: 0, 19 | }, 20 | } 21 | } 22 | 23 | /// Set custom lease ID. 24 | pub fn with_id(mut self, id: LeaseId) -> Self { 25 | self.proto.id = id as LeaseId; 26 | self 27 | } 28 | } 29 | 30 | impl From for crate::proto::etcdserverpb::LeaseGrantRequest { 31 | fn from(x: LeaseGrantRequest) -> Self { 32 | x.proto 33 | } 34 | } 35 | 36 | impl From for LeaseGrantRequest { 37 | fn from(ttl: Duration) -> Self { 38 | Self::new(ttl) 39 | } 40 | } 41 | 42 | #[derive(Debug, Clone)] 43 | pub struct LeaseGrantResponse { 44 | pub header: ResponseHeader, 45 | pub id: LeaseId, 46 | pub ttl: i64, 47 | } 48 | 49 | impl From for LeaseGrantResponse { 50 | fn from(proto: crate::proto::etcdserverpb::LeaseGrantResponse) -> Self { 51 | Self { 52 | header: From::from(proto.header.expect("must fetch header")), 53 | id: proto.id, 54 | ttl: proto.ttl, 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/lease/keep_alive.rs: -------------------------------------------------------------------------------- 1 | use crate::lease::LeaseId; 2 | use crate::proto::etcdserverpb; 3 | use crate::ResponseHeader; 4 | 5 | #[derive(Debug)] 6 | pub struct LeaseKeepAliveRequest { 7 | proto: crate::proto::etcdserverpb::LeaseKeepAliveRequest, 8 | } 9 | 10 | impl LeaseKeepAliveRequest { 11 | /// Creates a new LeaseKeepAliveRequest which will refresh the specified lease. 12 | pub fn new(id: LeaseId) -> Self { 13 | Self { 14 | proto: etcdserverpb::LeaseKeepAliveRequest { id }, 15 | } 16 | } 17 | } 18 | 19 | impl From for crate::proto::etcdserverpb::LeaseKeepAliveRequest { 20 | fn from(x: LeaseKeepAliveRequest) -> Self { 21 | x.proto 22 | } 23 | } 24 | 25 | #[derive(Debug)] 26 | pub struct LeaseKeepAliveResponse { 27 | pub header: ResponseHeader, 28 | pub id: LeaseId, 29 | pub ttl: i64, 30 | } 31 | 32 | impl From for LeaseKeepAliveResponse { 33 | fn from(proto: crate::proto::etcdserverpb::LeaseKeepAliveResponse) -> Self { 34 | Self { 35 | header: From::from(proto.header.expect("must fetch header")), 36 | id: proto.id, 37 | ttl: proto.ttl, 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/lease/mod.rs: -------------------------------------------------------------------------------- 1 | //! Leases are a mechanism for detecting client liveness. The cluster grants leases with a time-to-live. A lease expires if the etcd cluster does not receive a keepAlive within a given TTL period. 2 | //! 3 | //! # Examples 4 | //! 5 | //! Grant lease and keep lease alive 6 | 7 | mod grant; 8 | mod keep_alive; 9 | mod revoke; 10 | mod time_to_live; 11 | 12 | pub use grant::{LeaseGrantRequest, LeaseGrantResponse}; 13 | pub use keep_alive::{LeaseKeepAliveRequest, LeaseKeepAliveResponse}; 14 | pub use revoke::{LeaseRevokeRequest, LeaseRevokeResponse}; 15 | pub use time_to_live::{LeaseTimeToLiveRequest, LeaseTimeToLiveResponse}; 16 | 17 | use async_trait::async_trait; 18 | use tokio::sync::mpsc::Sender; 19 | use tonic::Streaming; 20 | 21 | use crate::{Error, Result}; 22 | 23 | pub type LeaseId = i64; 24 | 25 | #[async_trait] 26 | pub trait LeaseOp { 27 | async fn grant_lease(&self, req: R) -> Result 28 | where 29 | R: Into + Send; 30 | 31 | async fn revoke(&self, req: R) -> Result 32 | where 33 | R: Into + Send; 34 | 35 | async fn keep_alive_for(&self, lease_id: LeaseId) -> Result; 36 | 37 | async fn time_to_live(&self, req: R) -> Result 38 | where 39 | R: Into + Send; 40 | } 41 | 42 | pub struct LeaseKeepAlive { 43 | id: LeaseId, 44 | req_tx: Sender, 45 | resp_rx: Streaming, 46 | } 47 | 48 | impl LeaseKeepAlive { 49 | pub(crate) fn new( 50 | id: LeaseId, 51 | req_tx: Sender, 52 | resp_rx: Streaming, 53 | ) -> Self { 54 | Self { 55 | id, 56 | req_tx, 57 | resp_rx, 58 | } 59 | } 60 | 61 | #[inline] 62 | pub fn lease_id(&mut self) -> LeaseId { 63 | self.id 64 | } 65 | 66 | pub async fn keep_alive(&mut self) -> Result> { 67 | let req = LeaseKeepAliveRequest::new(self.lease_id()); 68 | 69 | self.req_tx 70 | .send(req.into()) 71 | .await 72 | .map_err(|_| Error::ChannelClosed)?; 73 | 74 | Ok(self.resp_rx.message().await?.map(|resp| resp.into())) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/lease/revoke.rs: -------------------------------------------------------------------------------- 1 | use crate::lease::LeaseId; 2 | use crate::proto::etcdserverpb; 3 | use crate::ResponseHeader; 4 | 5 | #[derive(Debug)] 6 | pub struct LeaseRevokeRequest { 7 | proto: crate::proto::etcdserverpb::LeaseRevokeRequest, 8 | } 9 | 10 | impl LeaseRevokeRequest { 11 | /// Creates a new LeaseRevokeRequest which will revoke the specified lease. 12 | pub fn new(id: LeaseId) -> Self { 13 | Self { 14 | proto: etcdserverpb::LeaseRevokeRequest { id: id as LeaseId }, 15 | } 16 | } 17 | } 18 | impl From for crate::proto::etcdserverpb::LeaseRevokeRequest { 19 | fn from(x: LeaseRevokeRequest) -> Self { 20 | x.proto 21 | } 22 | } 23 | 24 | #[derive(Debug, Clone)] 25 | pub struct LeaseRevokeResponse { 26 | pub header: ResponseHeader, 27 | } 28 | 29 | impl From for LeaseRevokeResponse { 30 | fn from(proto: crate::proto::etcdserverpb::LeaseRevokeResponse) -> Self { 31 | Self { 32 | header: From::from(proto.header.expect("must fetch header")), 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/lease/time_to_live.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | use crate::{LeaseId, ResponseHeader}; 3 | 4 | #[derive(Debug)] 5 | pub struct LeaseTimeToLiveRequest { 6 | proto: crate::proto::etcdserverpb::LeaseTimeToLiveRequest, 7 | } 8 | 9 | impl LeaseTimeToLiveRequest { 10 | /// Creates a new LeaseTimeToLiveRequest with the specified lease id. 11 | pub fn new(id: LeaseId) -> Self { 12 | Self { 13 | proto: etcdserverpb::LeaseTimeToLiveRequest { id, keys: false }, 14 | } 15 | } 16 | 17 | /// Set custom lease ID. 18 | pub fn with_id(mut self, id: LeaseId) -> Self { 19 | self.proto.id = id; 20 | self 21 | } 22 | 23 | pub fn with_keys(mut self, keys: bool) -> Self { 24 | self.proto.keys = keys; 25 | self 26 | } 27 | } 28 | 29 | impl From for crate::proto::etcdserverpb::LeaseTimeToLiveRequest { 30 | fn from(x: LeaseTimeToLiveRequest) -> Self { 31 | x.proto 32 | } 33 | } 34 | 35 | impl From for LeaseTimeToLiveRequest { 36 | fn from(lease_id: LeaseId) -> Self { 37 | Self::new(lease_id) 38 | } 39 | } 40 | 41 | #[derive(Debug)] 42 | pub struct LeaseTimeToLiveResponse { 43 | pub header: ResponseHeader, 44 | pub id: LeaseId, 45 | pub ttl: i64, 46 | } 47 | 48 | impl From for LeaseTimeToLiveResponse { 49 | fn from(proto: crate::proto::etcdserverpb::LeaseTimeToLiveResponse) -> Self { 50 | Self { 51 | header: From::from(proto.header.expect("must fetch header")), 52 | id: proto.id, 53 | ttl: proto.ttl, 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | clippy::suspicious_op_assign_impl, 3 | clippy::suspicious_arithmetic_impl, 4 | clippy::module_inception 5 | )] 6 | #![deny( 7 | clippy::clone_on_ref_ptr, 8 | clippy::dbg_macro, 9 | clippy::enum_glob_use, 10 | clippy::get_unwrap, 11 | clippy::macro_use_imports 12 | )] 13 | 14 | //! An asynchronously etcd client for Rust. 15 | //! 16 | //! etcd-rs supports etcd v3 API and async/await syntax. 17 | 18 | pub use auth::{AuthOp, AuthenticateRequest, AuthenticateResponse}; 19 | pub use cluster::{ 20 | ClusterOp, Member, MemberAddRequest, MemberAddResponse, MemberListRequest, MemberListResponse, 21 | MemberRemoveRequest, MemberRemoveResponse, MemberUpdateRequest, MemberUpdateResponse, 22 | }; 23 | pub use kv::{ 24 | CompactRequest, CompactResponse, DeleteRequest, DeleteResponse, KeyRange, KeyValue, KeyValueOp, 25 | PutRequest, PutResponse, RangeRequest, RangeResponse, TxnCmp, TxnOp, TxnOpResponse, TxnRequest, 26 | TxnResponse, 27 | }; 28 | pub use lease::{ 29 | LeaseGrantRequest, LeaseGrantResponse, LeaseId, LeaseKeepAlive, LeaseKeepAliveRequest, 30 | LeaseKeepAliveResponse, LeaseOp, LeaseRevokeRequest, LeaseRevokeResponse, 31 | LeaseTimeToLiveRequest, LeaseTimeToLiveResponse, 32 | }; 33 | pub use response_header::ResponseHeader; 34 | pub use watch::{ 35 | Event, EventType, WatchCancelRequest, WatchCanceler, WatchCreateRequest, WatchInbound, WatchOp, 36 | WatchResponse, WatchStream, 37 | }; 38 | 39 | pub use client::{Client, ClientConfig, Endpoint}; 40 | pub use error::Error; 41 | 42 | mod auth; 43 | mod client; 44 | mod cluster; 45 | mod error; 46 | mod kv; 47 | mod lease; 48 | mod lock; 49 | mod proto; 50 | mod response_header; 51 | mod watch; 52 | 53 | pub type Result = std::result::Result; 54 | -------------------------------------------------------------------------------- /src/lock/mod.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | 3 | #[async_trait] 4 | pub trait LockOp { 5 | async fn lock(&self); 6 | } 7 | -------------------------------------------------------------------------------- /src/proto.rs: -------------------------------------------------------------------------------- 1 | #[allow(warnings)] 2 | pub mod mvccpb { 3 | tonic::include_proto!("mvccpb"); 4 | } 5 | 6 | #[allow(warnings)] 7 | pub mod authpb { 8 | tonic::include_proto!("authpb"); 9 | } 10 | 11 | #[allow(warnings)] 12 | pub mod etcdserverpb { 13 | tonic::include_proto!("etcdserverpb"); 14 | } 15 | 16 | #[allow(warnings)] 17 | pub mod v3lockpb { 18 | tonic::include_proto!("v3lockpb"); 19 | } 20 | 21 | #[allow(warnings)] 22 | pub mod v3electionpb { 23 | tonic::include_proto!("v3electionpb"); 24 | } 25 | -------------------------------------------------------------------------------- /src/response_header.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | 3 | #[derive(Debug, Clone)] 4 | pub struct ResponseHeader { 5 | proto: crate::proto::etcdserverpb::ResponseHeader, 6 | } 7 | 8 | impl ResponseHeader { 9 | /// Get the ID of the cluster which sent the response. 10 | pub fn cluster_id(&self) -> u64 { 11 | self.proto.cluster_id 12 | } 13 | 14 | /// Get the ID of the member which sent the response. 15 | pub fn member_id(&self) -> u64 { 16 | self.proto.member_id 17 | } 18 | 19 | /// Get the key-value store revision when the request was applied. 20 | pub fn revision(&self) -> i64 { 21 | self.proto.revision 22 | } 23 | 24 | /// Get the raft term when the request was applied. 25 | pub fn raft_term(&self) -> u64 { 26 | self.proto.raft_term 27 | } 28 | } 29 | 30 | impl From for ResponseHeader { 31 | fn from(proto: etcdserverpb::ResponseHeader) -> Self { 32 | Self { proto } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/watch/mod.rs: -------------------------------------------------------------------------------- 1 | //! The Watch API provides an event-based interface for asynchronously monitoring changes to keys. 2 | 3 | mod watch; 4 | 5 | pub use watch::{WatchCancelRequest, WatchCreateRequest, WatchResponse}; 6 | 7 | use std::pin::Pin; 8 | use std::task::{Context, Poll}; 9 | 10 | use async_trait::async_trait; 11 | use futures::Stream; 12 | use tokio::sync::mpsc::Sender; 13 | use tonic::Streaming; 14 | 15 | use crate::proto::etcdserverpb; 16 | use crate::proto::mvccpb; 17 | use crate::{Error, KeyValue, Result}; 18 | 19 | #[async_trait] 20 | pub trait WatchOp { 21 | async fn watch(&self, req: R) -> Result<(WatchStream, WatchCanceler)> 22 | where 23 | R: Into + Send; 24 | 25 | // async fn cancel_watch(&self, req: R) -> Result<()> 26 | // where 27 | // R: Into + Send; 28 | } 29 | 30 | #[derive(Debug)] 31 | pub enum WatchInbound { 32 | Ready(WatchResponse), 33 | Interrupted(Error), 34 | Closed, 35 | } 36 | 37 | pub struct WatchStream { 38 | stream: Streaming, 39 | is_closed: bool, 40 | } 41 | 42 | impl WatchStream { 43 | pub(crate) fn new(stream: Streaming) -> Self { 44 | Self { 45 | stream, 46 | is_closed: false, 47 | } 48 | } 49 | 50 | pub async fn inbound(&mut self) -> WatchInbound { 51 | if self.is_closed { 52 | return WatchInbound::Closed; 53 | } 54 | 55 | match self.stream.message().await { 56 | Ok(Some(resp)) => { 57 | if resp.canceled { 58 | self.is_closed = true; 59 | } 60 | 61 | if resp.canceled && resp.events.is_empty() { 62 | WatchInbound::Closed 63 | } else { 64 | WatchInbound::Ready(resp.into()) 65 | } 66 | } 67 | Ok(None) => WatchInbound::Interrupted(Error::WatchEventExhausted), 68 | Err(e) => WatchInbound::Interrupted(e.into()), 69 | } 70 | } 71 | } 72 | 73 | impl Stream for WatchStream { 74 | type Item = WatchInbound; 75 | 76 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 77 | Pin::new(&mut self.get_mut().stream) 78 | .poll_next(cx) 79 | .map(|e| match e { 80 | Some(Ok(resp)) => Some(WatchInbound::Ready(resp.into())), 81 | Some(Err(e)) => Some(WatchInbound::Interrupted(e.into())), 82 | None => Some(WatchInbound::Closed), 83 | }) 84 | } 85 | } 86 | 87 | pub struct WatchCanceler { 88 | watch_id: i64, 89 | tx: Sender, 90 | } 91 | 92 | impl WatchCanceler { 93 | pub(crate) fn new(watch_id: i64, tx: Sender) -> Self { 94 | Self { watch_id, tx } 95 | } 96 | 97 | pub async fn cancel(self) -> Result<()> { 98 | self.tx 99 | .send(WatchCancelRequest::new(self.watch_id).into()) 100 | .await 101 | .map_err(Error::WatchChannelSend) 102 | } 103 | } 104 | 105 | /// The kind of event. 106 | #[derive(Debug, PartialEq, Clone)] 107 | pub enum EventType { 108 | Put, 109 | Delete, 110 | } 111 | 112 | impl From for EventType { 113 | fn from(event_type: mvccpb::event::EventType) -> Self { 114 | use mvccpb::event::EventType; 115 | match event_type { 116 | EventType::Put => Self::Put, 117 | EventType::Delete => Self::Delete, 118 | } 119 | } 120 | } 121 | 122 | /// Every change to every key is represented with Event messages. 123 | #[derive(Debug, Clone)] 124 | pub struct Event { 125 | pub event_type: EventType, 126 | pub kv: KeyValue, 127 | pub prev_kv: Option, 128 | } 129 | 130 | impl From for Event { 131 | fn from(proto: mvccpb::Event) -> Self { 132 | Self { 133 | event_type: match proto.r#type { 134 | 0 => EventType::Put, 135 | _ => EventType::Delete, // FIXME: assert valid event type 136 | }, 137 | kv: From::from(proto.kv.expect("must fetch kv")), 138 | prev_kv: proto.prev_kv.map(KeyValue::from), 139 | } 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/watch/watch.rs: -------------------------------------------------------------------------------- 1 | use crate::proto::etcdserverpb; 2 | use crate::proto::etcdserverpb::watch_request::RequestUnion; 3 | use crate::{Event, KeyRange, ResponseHeader}; 4 | 5 | #[derive(Debug)] 6 | pub struct WatchCreateRequest { 7 | proto: crate::proto::etcdserverpb::WatchCreateRequest, 8 | } 9 | 10 | impl WatchCreateRequest { 11 | /// Creates a new WatchRequest which will subscribe events of the specified key. 12 | pub fn create(key_range: KeyRange) -> Self { 13 | Self { 14 | proto: etcdserverpb::WatchCreateRequest { 15 | key: key_range.key, 16 | range_end: key_range.range_end, 17 | start_revision: 0, 18 | progress_notify: false, 19 | filters: vec![], // TODO support filters 20 | prev_kv: false, 21 | fragment: false, // TODO support fragment 22 | watch_id: 0, // TODO support watch_id 23 | }, 24 | } 25 | } 26 | 27 | /// Sets the revision to watch from (inclusive). No start_revision is "now". 28 | pub fn start_revision(mut self, revision: i64) -> Self { 29 | self.proto.start_revision = revision; 30 | self 31 | } 32 | 33 | pub fn progress_notify(mut self) -> Self { 34 | self.proto.progress_notify = true; 35 | self 36 | } 37 | 38 | /// Sets previous key value. 39 | pub fn prev_kv(mut self) -> Self { 40 | self.proto.prev_kv = true; 41 | self 42 | } 43 | } 44 | 45 | impl From for etcdserverpb::WatchCreateRequest { 46 | fn from(value: WatchCreateRequest) -> Self { 47 | value.proto 48 | } 49 | } 50 | 51 | impl From for etcdserverpb::WatchRequest { 52 | fn from(value: WatchCreateRequest) -> Self { 53 | etcdserverpb::WatchRequest { 54 | request_union: Some(RequestUnion::CreateRequest(value.into())), 55 | } 56 | } 57 | } 58 | 59 | impl From for WatchCreateRequest { 60 | fn from(key_range: KeyRange) -> Self { 61 | Self::create(key_range) 62 | } 63 | } 64 | 65 | #[derive(Debug, Clone)] 66 | pub struct WatchCancelRequest { 67 | proto: etcdserverpb::WatchCancelRequest, 68 | } 69 | 70 | impl WatchCancelRequest { 71 | /// Creates a new WatchRequest which will unsubscribe the specified watch. 72 | pub fn new(watch_id: i64) -> Self { 73 | Self { 74 | proto: etcdserverpb::WatchCancelRequest { watch_id }, 75 | } 76 | } 77 | } 78 | 79 | impl From for WatchCancelRequest { 80 | fn from(watch_id: i64) -> Self { 81 | Self::new(watch_id) 82 | } 83 | } 84 | 85 | impl From for etcdserverpb::WatchCancelRequest { 86 | fn from(value: WatchCancelRequest) -> Self { 87 | value.proto 88 | } 89 | } 90 | 91 | impl From for etcdserverpb::WatchRequest { 92 | fn from(value: WatchCancelRequest) -> Self { 93 | etcdserverpb::WatchRequest { 94 | request_union: Some(RequestUnion::CancelRequest(value.into())), 95 | } 96 | } 97 | } 98 | 99 | #[derive(Debug, Clone)] 100 | pub struct WatchResponse { 101 | pub header: ResponseHeader, 102 | pub watch_id: i64, 103 | pub created: bool, 104 | pub canceled: bool, 105 | pub events: Vec, 106 | } 107 | 108 | impl From for WatchResponse { 109 | fn from(proto: etcdserverpb::WatchResponse) -> Self { 110 | Self { 111 | header: From::from(proto.header.expect("must fetch header")), 112 | watch_id: proto.watch_id, 113 | created: proto.created, 114 | canceled: proto.canceled, 115 | events: proto.events.into_iter().map(From::from).collect(), 116 | } 117 | } 118 | } 119 | --------------------------------------------------------------------------------