├── .github └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── etcd-client ├── Cargo.toml ├── build.rs └── src │ ├── auth │ ├── authenticate.rs │ └── mod.rs │ ├── client.rs │ ├── error.rs │ ├── kv │ ├── cache.rs │ ├── delete.rs │ ├── get.rs │ ├── mod.rs │ ├── put.rs │ ├── range.rs │ └── txn.rs │ ├── lazy.rs │ ├── lease │ ├── grant.rs │ ├── keep_alive.rs │ ├── mod.rs │ └── revoke.rs │ ├── lib.rs │ ├── lock │ ├── mod.rs │ ├── release.rs │ └── require.rs │ ├── protos │ └── mod.rs │ ├── response_header.rs │ └── watch │ ├── mod.rs │ └── watch_impl.rs ├── mock-etcd ├── .gitignore ├── Cargo.toml ├── README.md ├── build.rs └── src │ ├── lib.rs │ └── mock_etcd.rs └── proto ├── auth.proto ├── kv.proto ├── lock.proto └── rpc.proto /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | schedule: [cron: "0 0 * * *"] 8 | 9 | env: 10 | CI_RUST_TOOLCHAIN: 1.59 11 | 12 | jobs: 13 | test: 14 | name: Rust ${{matrix.rust}} 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Install dependencies 18 | run: sudo apt install -y cmake g++ libprotobuf-dev protobuf-compiler 19 | - name: Prepare docker environment 20 | uses: crazy-max/ghaction-docker-buildx@v3 21 | with: 22 | buildx-version: latest 23 | qemu-version: latest 24 | - uses: actions/checkout@v2 25 | - uses: actions-rs/toolchain@v1 26 | with: 27 | profile: minimal 28 | toolchain: ${{ env.CI_RUST_TOOLCHAIN }} 29 | override: true 30 | - name: Test mock etcd 31 | run: cargo test -p mock-etcd 32 | 33 | - name: Test etcd client 34 | run: | 35 | sudo apt install net-tools 36 | export ETCD_CONTAINER_NAME=etcd 37 | docker run -d --rm --net host --name $ETCD_CONTAINER_NAME gcr.io/google-containers/etcd:3.4.9 /usr/local/bin/etcd 38 | docker ps 39 | docker logs $ETCD_CONTAINER_NAME 40 | sudo netstat -lntp 41 | cargo test -p etcd-client 42 | 43 | fmt: 44 | name: Format 45 | runs-on: ubuntu-latest 46 | steps: 47 | - uses: actions/checkout@v2 48 | - uses: actions-rs/toolchain@v1 49 | with: 50 | profile: minimal 51 | toolchain: ${{ env.CI_RUST_TOOLCHAIN }} 52 | override: true 53 | - run: rustup component add rustfmt 54 | - uses: actions-rs/cargo@v1 55 | with: 56 | command: fmt 57 | args: --all -- --check 58 | 59 | clippy: 60 | name: Clippy 61 | runs-on: ubuntu-latest 62 | steps: 63 | - run: sudo apt install -y cmake g++ libprotobuf-dev protobuf-compiler 64 | - uses: actions/checkout@v2 65 | - uses: actions-rs/toolchain@v1 66 | with: 67 | profile: minimal 68 | toolchain: ${{ env.CI_RUST_TOOLCHAIN }} 69 | override: true 70 | - run: rustup component add clippy 71 | - uses: actions-rs/cargo@v1 72 | with: 73 | command: clippy 74 | args: --all-targets --all-features -- -D warnings 75 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | .idea/ 5 | etcd-client/src/protos/* 6 | !etcd-client/src/protos/mod.rs 7 | mock-etcd/src/* 8 | !mock-etcd/lib.rs 9 | !mock-etcd/mock_etcd.rs 10 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "etcd-client", 5 | "mock-etcd", 6 | ] 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 datenlord 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | etcd client for Rust 2 | ==== 3 | 4 | [![CI Status][ci-badge]][ci-url] 5 | [![License][license-badge]][license-url] 6 | 7 | [ci-badge]: https://img.shields.io/github/workflow/status/datenlord/etcd-client/CI?style=flat-square 8 | [ci-url]: https://github.com/datenlord/etcd-client/actions 9 | [license-badge]: https://img.shields.io/github/license/datenlord/etcd-client.svg?style=flat-square 10 | [license-url]: https://github.com/datenlord/etcd-client/blob/master/LICENSE 11 | 12 | This is an [etcd](https://github.com/etcd-io/etcd)(API v3) client for Rust, which is refactored from [etcd-rs](https://github.com/luncj/etcd-rs) project. 13 | 14 | ## Road Map 15 | - [x] 0.1 Replace the tokio and tonic library with smol and grpc-rs, which are more lightweight libraries. 16 | - [x] 0.2 Refactor etcd api to use grpc-rs async APIs to provide real async functionalities. 17 | - [x] 0.3 Apply the single key-value lock free Cache to etcd client, which is based on the etcd watch mechanism. 18 | - [x] 0.4 Apply the lru replacement policies with lock to etcd Cache. 19 | - [x] 0.5 Add the retry with exponential backoff mechanism to etcd client. 20 | - [ ] 0.6 Support the Cache recovery mechanism. 21 | - [ ] 0.7 Support the lock free lru replacement policies to etcd Cache. 22 | - [ ] 0.8 Support range key-value Cache. 23 | -------------------------------------------------------------------------------- /etcd-client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "etcd-client" 3 | version = "0.1.0" 4 | authors = ["Qi Tian "] 5 | edition = "2021" 6 | keywords = ["etcd", "future", "async"] 7 | repository = "https://github.com/datenlord/etcd-client" 8 | readme = "README.md" 9 | homepage = "https://github.com/datenlord/etcd-client" 10 | description = "etcd client for datenlord" 11 | license = "MIT" 12 | categories = ["filesystem"] 13 | 14 | [dependencies] 15 | arc-swap = "1.5.0" 16 | async-compat = "0.2.1" 17 | async-stream = "0.2" 18 | async-std = "1.8.0" 19 | async-trait = "0.1" 20 | backoff = { version = "0.3.0", features = ["futures"] } 21 | bytes = "0.5" 22 | clippy-utilities = "0.1.0" 23 | either = "1.6.1" 24 | futures = "0.3.5" 25 | grpcio = { version = "0.9.1", default-features = false, features = [ 26 | "protobuf-codec", 27 | ] } 28 | http = "0.2" 29 | log = "0.4.11" 30 | lockfree-cuckoohash = { git = "https://github.com/datenlord/lockfree-cuckoohash", rev = "27f965b" } 31 | priority-queue = "1.0.5" 32 | protobuf = "2.16.2" 33 | smol = "1.2.4" 34 | thiserror = "1.0" 35 | crossbeam-queue = "0.3.8" 36 | async-broadcast = "0.5.1" 37 | 38 | [dev-dependencies] 39 | env_logger = "0.8.4" 40 | 41 | [build-dependencies] 42 | protoc-grpcio = "2.0.0" 43 | -------------------------------------------------------------------------------- /etcd-client/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | // grpcio depends on cmake, g++ and protoc, 3 | // run the following command to install: 4 | // `sudo apt install cmake g++ libprotobuf-dev protobuf-compiler` 5 | protoc_grpcio::compile_grpc_protos( 6 | &[ 7 | "proto/kv.proto", 8 | "proto/auth.proto", 9 | "proto/rpc.proto", 10 | "proto/lock.proto", 11 | ], // inputs 12 | &[".."], // includes 13 | "src/protos", // output 14 | None, // customizations 15 | ) 16 | .unwrap_or_else(|e| panic!("Failed to compile gRPC definitions!, the error is: {}", e)); 17 | } 18 | -------------------------------------------------------------------------------- /etcd-client/src/auth/authenticate.rs: -------------------------------------------------------------------------------- 1 | use crate::protos::rpc::{AuthenticateRequest, AuthenticateResponse}; 2 | use crate::ResponseHeader; 3 | 4 | /// Request for authenticating. 5 | #[derive(Debug, Clone)] 6 | pub struct EtcdAuthenticateRequest { 7 | /// Etcd authenticate request. 8 | proto: AuthenticateRequest, 9 | } 10 | 11 | impl EtcdAuthenticateRequest { 12 | /// Creates a new `EtcdAuthenticateRequest`. 13 | #[inline] 14 | pub fn new(name: N, password: P) -> Self 15 | where 16 | N: Into, 17 | P: Into, 18 | { 19 | let proto = AuthenticateRequest { 20 | name: name.into(), 21 | password: password.into(), 22 | ..AuthenticateRequest::default() 23 | }; 24 | Self { proto } 25 | } 26 | } 27 | 28 | impl From for AuthenticateRequest { 29 | #[inline] 30 | fn from(e: EtcdAuthenticateRequest) -> Self { 31 | e.proto 32 | } 33 | } 34 | 35 | /// Response for authenticating. 36 | #[derive(Debug)] 37 | pub struct EtcdAuthenticateResponse { 38 | /// Etcd authenticate response. 39 | proto: AuthenticateResponse, 40 | } 41 | 42 | impl EtcdAuthenticateResponse { 43 | /// Takes the header out of response, leaving a `None` in its place. 44 | #[inline] 45 | pub fn take_header(&mut self) -> Option { 46 | self.proto.header.take().map(From::from) 47 | } 48 | 49 | /// Gets an authorized token that can be used in succeeding RPCs. 50 | #[inline] 51 | pub fn token(&self) -> &str { 52 | &self.proto.token 53 | } 54 | } 55 | 56 | impl From for EtcdAuthenticateResponse { 57 | #[inline] 58 | fn from(resp: AuthenticateResponse) -> Self { 59 | Self { proto: resp } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /etcd-client/src/auth/mod.rs: -------------------------------------------------------------------------------- 1 | /// Authenticate mode for Etcd authentication operations. 2 | mod authenticate; 3 | 4 | pub use authenticate::{EtcdAuthenticateRequest, EtcdAuthenticateResponse}; 5 | 6 | use crate::protos::rpc_grpc::AuthClient; 7 | use crate::retryable; 8 | use crate::Result; 9 | use crate::CURRENT_INTERVAL_ENV_KEY; 10 | use crate::CURRENT_INTERVAL_VALUE; 11 | use crate::INITIAL_INTERVAL_ENV_KEY; 12 | use crate::INITIAL_INTERVAL_VALUE; 13 | use crate::MAX_ELAPSED_TIME_ENV_KEY; 14 | use crate::MAX_ELAPSED_TIME_VALUE; 15 | use backoff::ExponentialBackoff; 16 | use std::time::Duration; 17 | 18 | /// Auth client which provides authenticating operation. 19 | #[derive(Clone)] 20 | pub struct Auth { 21 | /// Etcd Auth client. 22 | client: AuthClient, 23 | } 24 | 25 | impl Auth { 26 | /// Creates a new Auth client. 27 | pub(crate) const fn new(client: AuthClient) -> Self { 28 | Self { client } 29 | } 30 | 31 | /// Performs an authenticating operation. 32 | /// It generates an authentication token based on a given user name and password. 33 | /// # Errors 34 | /// Will returns `Err` if the status of `response` is not `ok` 35 | #[inline] 36 | pub async fn authenticate( 37 | &mut self, 38 | req: EtcdAuthenticateRequest, 39 | ) -> Result { 40 | let authenticate_result = retryable!(|| async { 41 | let resp = self.client.authenticate_async(&req.clone().into())?; 42 | Ok(From::from(resp.await?)) 43 | }); 44 | Ok(authenticate_result) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /etcd-client/src/client.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use std::net::SocketAddr; 4 | 5 | use grpcio::{Channel, ChannelBuilder, EnvBuilder, LbPolicy}; 6 | 7 | use crate::{ 8 | protos::{ 9 | lock_grpc::LockClient, 10 | rpc_grpc::{AuthClient, KvClient, LeaseClient, WatchClient}, 11 | }, 12 | watch::SingleWatchEventReceiver, 13 | }; 14 | use crate::{Auth, KeyRange, Kv, Lease, Lock, Result, Watch}; 15 | 16 | /// Config for establishing etcd client. 17 | #[non_exhaustive] 18 | pub struct ClientConfig { 19 | /// Etcd server end points. 20 | pub endpoints: Vec, 21 | /// Etcd Auth configurations (User ID, password). 22 | pub auth: Option<(String, String)>, 23 | /// Etcd client cache size. 24 | pub cache_size: usize, 25 | /// Enable etcd client cache. 26 | pub cache_enable: bool, 27 | } 28 | 29 | impl ClientConfig { 30 | /// New a client config 31 | #[must_use] 32 | #[inline] 33 | pub fn new( 34 | endpoints: Vec, 35 | auth: Option<(String, String)>, 36 | cache_size: usize, 37 | cache_enable: bool, 38 | ) -> Self { 39 | Self { 40 | endpoints, 41 | auth, 42 | cache_size, 43 | cache_enable, 44 | } 45 | } 46 | } 47 | 48 | /// Client is an abstraction for grouping etcd operations and managing underlying network communications. 49 | #[derive(Clone)] 50 | pub struct Client { 51 | /// Inner struct for etcd client. 52 | inner: Arc, 53 | } 54 | 55 | #[allow(dead_code)] 56 | /// Inner struct 57 | pub struct Inner { 58 | /// A grpc channel used to communicate with Etcd server. 59 | channel: Channel, 60 | /// Auth client for authentication operations. 61 | auth_client: Auth, 62 | /// Key-Value client for key-value operations. 63 | kv_client: Arc, 64 | /// Watch client for watch operations. 65 | watch_client: Watch, 66 | /// Lease client for lease operations. 67 | lease_client: Lease, 68 | /// Lock client for lock operations. 69 | lock_client: Lock, 70 | } 71 | 72 | impl Client { 73 | /// Get grpc channel. 74 | fn get_channel(cfg: &ClientConfig) -> Channel { 75 | assert!(!cfg.endpoints.is_empty(), "Empty etcd endpoints"); 76 | 77 | let mut end_points = cfg.endpoints.join(","); 78 | let env = Arc::new(EnvBuilder::new().build()); 79 | if cfg.endpoints.len() > 1 { 80 | let socket_address: SocketAddr = cfg 81 | .endpoints 82 | .first() 83 | .unwrap_or_else(|| panic!("Fail to get the first endpoint")) 84 | .parse() 85 | .unwrap_or_else(|e| { 86 | panic!( 87 | "Fail to parse enpoint to socket address, the error is {}", 88 | e, 89 | ) 90 | }); 91 | cfg.endpoints.iter().for_each(|endpoint| { 92 | let ip: SocketAddr = endpoint.parse().unwrap_or_else(|e| { 93 | panic!( 94 | "Fail to parse enpoint to socket address, the error is {}", 95 | e, 96 | ) 97 | }); 98 | if !(socket_address.is_ipv4() && ip.is_ipv4() 99 | || socket_address.is_ipv6() && ip.is_ipv6()) 100 | { 101 | panic!("Endpoints have different type of ip address schema"); 102 | } 103 | }); 104 | 105 | if socket_address.is_ipv4() { 106 | end_points = format!("{}:{}", "ipv4", end_points); 107 | } else if socket_address.is_ipv6() { 108 | end_points = format!("{}:{}", "ipv6", end_points); 109 | } else { 110 | panic!("unsupported etcd address: {}", socket_address) 111 | } 112 | } 113 | let ch = ChannelBuilder::new(env) 114 | .load_balancing_policy(LbPolicy::RoundRobin) 115 | .connect(end_points.as_str()); 116 | ch 117 | } 118 | 119 | /// Connects to etcd generate auth token. 120 | /// The client connection used to request the authentication token is typically thrown away; 121 | /// it cannot carry the new token’s credentials. This is because `gRPC` doesn’t provide a way 122 | /// for adding per RPC credential after creation of the connection. 123 | // async fn generate_auth_token(cfg: &ClientConfig) -> Result> { 124 | // use crate::AuthenticateRequest; 125 | 126 | // let channel = Self::get_channel(&cfg)?; 127 | 128 | // let mut auth_client = Auth::new(AuthClient::new(channel)); 129 | 130 | // let token = match cfg.auth.as_ref() { 131 | // Some((name, password)) => auth_client 132 | // .authenticate(AuthenticateRequest::new(name, password)) 133 | // .await 134 | // .map(|r| Some(r.token().to_owned()))?, 135 | // None => None, 136 | // }; 137 | 138 | // Ok(token) 139 | // } 140 | 141 | /// Connects to etcd cluster and returns a client. 142 | /// 143 | /// # Errors 144 | /// Will returns `Err` if failed to contact with given endpoints or authentication failed. 145 | #[inline] 146 | pub async fn connect(cfg: ClientConfig) -> Result { 147 | let channel = Self::get_channel(&cfg); 148 | let etcd_watch_client = WatchClient::new(channel.clone()); 149 | 150 | Ok(Self { 151 | inner: Arc::new(Inner { 152 | channel: channel.clone(), 153 | auth_client: Auth::new(AuthClient::new(channel.clone())), 154 | watch_client: Watch::new(&etcd_watch_client), 155 | kv_client: Kv::new( 156 | KvClient::new(channel.clone()), 157 | etcd_watch_client, 158 | cfg.cache_size, 159 | cfg.cache_enable, 160 | ), 161 | lease_client: Lease::new(LeaseClient::new(channel.clone())), 162 | lock_client: Lock::new(LockClient::new(channel)), 163 | }), 164 | }) 165 | } 166 | 167 | /// Gets an auth client. 168 | #[inline] 169 | #[must_use] 170 | pub fn auth(&self) -> Auth { 171 | self.inner.auth_client.clone() 172 | } 173 | 174 | /// Gets a key-value client. 175 | #[inline] 176 | #[must_use] 177 | pub fn kv(&self) -> Arc { 178 | Arc::::clone(&self.inner.kv_client) 179 | } 180 | 181 | /// Get a lock client. 182 | #[inline] 183 | #[must_use] 184 | pub fn lock(&self) -> Lock { 185 | self.inner.lock_client.clone() 186 | } 187 | 188 | /// Gets a watch client. 189 | #[inline] 190 | #[must_use] 191 | pub fn watch_client(&self) -> Watch { 192 | self.inner.watch_client.clone() 193 | } 194 | 195 | /// Perform a watch operation 196 | /// 197 | /// # Errors 198 | /// 199 | /// etcd error: client closed 200 | #[inline] 201 | pub async fn watch(&self, key_range: KeyRange) -> Result { 202 | let mut client = self.inner.watch_client.clone(); 203 | client.watch(key_range).await 204 | } 205 | 206 | /// Gets a lease client. 207 | #[inline] 208 | #[must_use] 209 | pub fn lease(&self) -> Lease { 210 | self.inner.lease_client.clone() 211 | } 212 | 213 | /// Shut down any running tasks. 214 | /// 215 | /// # Errors 216 | /// 217 | /// Will return `Err` if RPC call is failed. 218 | #[inline] 219 | pub async fn shutdown(&self) -> Result<()> { 220 | let mut watch_client = self.inner.watch_client.clone(); 221 | watch_client.shutdown().await?; 222 | let mut lease_client = self.inner.lease_client.clone(); 223 | lease_client.shutdown().await?; 224 | self.inner.kv_client.shutdown().await; 225 | Ok(()) 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /etcd-client/src/error.rs: -------------------------------------------------------------------------------- 1 | use crate::kv::LocalWatchRequest; 2 | use crate::lease::EtcdLeaseKeepAliveRequest; 3 | use smol::channel::SendError; 4 | 5 | #[non_exhaustive] 6 | #[derive(thiserror::Error, Debug)] 7 | /// Error type for etcd-client 8 | pub enum EtcdError { 9 | /// InvalidURI 10 | #[error("invalid URI: {0}")] 11 | InvalidUri(#[from] http::uri::InvalidUri), 12 | /// Transport 13 | #[error("gRPC transport error: {0}")] 14 | Transport(#[from] grpcio::Error), 15 | /// SendError for () 16 | #[error("send error for (): {0}")] 17 | SendFailed(#[from] SendError<()>), 18 | /// SendError for EtcdLeaseKeepAliveRequest 19 | #[error("send error for EtcdLeaseKeepAliveRequest: {0}")] 20 | SendFailedForLeaseKeepAliveRequest(#[from] SendError), 21 | /// SendError for EtcdWatchRequest 22 | #[error("send error for EtcdWatchRequest: {0}")] 23 | SendFailedForWatchRequest(#[from] SendError), 24 | /// Internal Error 25 | #[error("Internal Error: {0}")] 26 | InternalError(String), 27 | /// waiting for response timeout 28 | #[error("waiting for response timeout: {0}")] 29 | WaitingResponseTimeout(String), 30 | /// Client closed 31 | #[error("etcd client closed: {0}")] 32 | ClientClosed(String), 33 | } 34 | -------------------------------------------------------------------------------- /etcd-client/src/kv/cache.rs: -------------------------------------------------------------------------------- 1 | //! The implementation for Etcd cache. 2 | use super::KeyValue; 3 | use super::LocalWatchRequest; 4 | use super::OverflowArithmetic; 5 | use crate::Result as Res; 6 | use lockfree_cuckoohash::{pin, LockFreeCuckooHash}; 7 | use priority_queue::PriorityQueue; 8 | use smol::channel::Sender; 9 | use smol::lock::Mutex; 10 | use std::time::{SystemTime, UNIX_EPOCH}; 11 | 12 | /// Cache entry 13 | #[derive(Debug, Clone, PartialEq)] 14 | pub struct CacheEntry { 15 | /// current revision of key in cache 16 | revision: i64, 17 | /// key value, None means key has been deleted 18 | /// but watch is not cancelled yet. 19 | kv: Option, 20 | } 21 | 22 | impl CacheEntry { 23 | /// Create a new `CacheEntry`. 24 | pub const fn new(kv: Option, revision: i64) -> Self { 25 | Self { revision, kv } 26 | } 27 | } 28 | /// Cache struct contains a lock-free hashTable. 29 | pub struct Cache { 30 | /// map to store key value 31 | hashtable: LockFreeCuckooHash, CacheEntry>, 32 | /// lru queue of the keys in hashtable. 33 | lru_queue: Mutex, u64>>, 34 | } 35 | 36 | impl Cache { 37 | /// Create a new `Cache` with specified size. 38 | pub fn new(size: usize) -> Self { 39 | Self { 40 | hashtable: LockFreeCuckooHash::with_capacity(size), 41 | lru_queue: Mutex::new(PriorityQueue::new()), 42 | } 43 | } 44 | 45 | /// Searches a `key` from the cache. 46 | pub async fn search(&self, key: &[u8]) -> Option { 47 | let search_result = { 48 | let guard = pin(); 49 | self.hashtable.get(key, &guard).cloned() 50 | }; 51 | match search_result { 52 | Some(entry) => { 53 | if let Some(kv) = entry.kv { 54 | self.lru_queue 55 | .lock() 56 | .await 57 | .change_priority(key, Self::get_priority()); 58 | Some(kv) 59 | } else { 60 | None 61 | } 62 | } 63 | None => None, 64 | } 65 | } 66 | 67 | /// Check if the new `CacheEntry` has higher revision. 68 | const fn higher_revision(old: &CacheEntry, new: &CacheEntry) -> bool { 69 | new.revision > old.revision 70 | } 71 | 72 | /// Helper function to insert or update a `key` to the cache. 73 | /// Return `(bool, bool)` first bool indicates if operation succeed 74 | /// second bool indicates if it is an insert or not. 75 | async fn insert_or_update_helper( 76 | &self, 77 | key: Vec, 78 | value: KeyValue, 79 | mark_delete: bool, 80 | ) -> (bool, bool) { 81 | let revision = value.get_mod_revision(); 82 | let res = { 83 | let guard = &pin(); 84 | let (succeed, old_value) = self.hashtable.insert_or_update_on( 85 | key.clone(), 86 | CacheEntry::new(if mark_delete { None } else { Some(value) }, revision), 87 | Self::higher_revision, 88 | guard, 89 | ); 90 | (succeed, old_value.is_none()) 91 | }; 92 | if res.0 { 93 | self.lru_queue.lock().await.push(key, Self::get_priority()); 94 | } 95 | res 96 | } 97 | 98 | /// Insert or update a `key` to the cache. 99 | pub async fn insert_or_update(&self, key: Vec, value: KeyValue) -> (bool, bool) { 100 | self.insert_or_update_helper(key, value, false).await 101 | } 102 | 103 | /// Remove a `key` from cache totally 104 | pub async fn remove(&self, key: &[u8]) { 105 | // Adopt the following sequence to keep consistency between hashtable and lru_queue 106 | // 107 | // | Sequence | Sequence | Sequence | Sequence | Sequence | Sequence | 108 | // | insert hashtable | insert hashtable | remove lru_queue | insert hashtable | remove lru_queue | remove lru_queue | 109 | // | insert lru_queue | remove lru_queue | insert hashtable | remove lru_queue | insert hashtable | remove hashtable | 110 | // | remove lru_queue | insert lru_queue | insert lru_queue | remove hashtable | remove hashtable | insert hashtable | 111 | // | remove hashtable | remove hashtable | remove hashtable | insert lru_queue | insert lru_queue | insert lru_queue | 112 | // | Good | <---------- key in lru_queue not in hashtable, but it is OK --------> | Good | 113 | self.lru_queue.lock().await.remove(key); 114 | self.hashtable.remove(key); 115 | } 116 | 117 | /// Mark a `key` as delete. 118 | pub async fn mark_delete(&self, key: Vec, value: KeyValue) { 119 | self.insert_or_update_helper(key, value, true).await; 120 | } 121 | 122 | /// Gets the priority of a key in lru queue. 123 | fn get_priority() -> u64 { 124 | let current_time = SystemTime::now(); 125 | let since_the_epoch = current_time 126 | .duration_since(UNIX_EPOCH) 127 | .unwrap_or_else(|e| panic!("Fail to get the time since the epoch, the error is {}", e)); 128 | 129 | u64::MAX.overflow_sub(since_the_epoch.as_secs()) 130 | } 131 | 132 | /// Adjusts cache size if the number of value in cache has exceed the threshold(0.8 * capacity). 133 | /// Adjusts cache to 0.6 * capacity 134 | pub async fn adjust_cache_size(&self, watch_sender: &Sender) -> Res<()> { 135 | if let Some(mut queue) = self.lru_queue.try_lock() { 136 | let upper_bound = self.hashtable.capacity().overflow_mul(8).overflow_div(10); 137 | let lower_bound = self.hashtable.capacity().overflow_mul(6).overflow_div(10); 138 | 139 | if queue.len() > upper_bound { 140 | while queue.len() > lower_bound { 141 | if let Some(pop_value) = queue.pop() { 142 | let key = pop_value.0; 143 | watch_sender.send(LocalWatchRequest::cancel(key)).await?; 144 | } 145 | } 146 | } 147 | } 148 | Ok(()) 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /etcd-client/src/kv/delete.rs: -------------------------------------------------------------------------------- 1 | use super::{EtcdKeyValue, KeyRange}; 2 | use crate::protos::rpc::{DeleteRangeRequest, DeleteRangeResponse}; 3 | use crate::ResponseHeader; 4 | use clippy_utilities::Cast; 5 | use protobuf::RepeatedField; 6 | 7 | /// Request for deleting key-value pairs. 8 | #[derive(Debug, Clone)] 9 | pub struct EtcdDeleteRequest { 10 | /// Etcd delete range key-value pairs request. 11 | proto: DeleteRangeRequest, 12 | } 13 | 14 | impl EtcdDeleteRequest { 15 | /// Creates a new `EtcdDeleteRequest` for the specified key range. 16 | #[inline] 17 | #[must_use] 18 | pub fn new(key_range: KeyRange) -> Self { 19 | let delete_range_request = DeleteRangeRequest { 20 | key: key_range.key, 21 | range_end: key_range.range_end, 22 | prev_kv: false, 23 | ..DeleteRangeRequest::default() 24 | }; 25 | Self { 26 | proto: delete_range_request, 27 | } 28 | } 29 | 30 | /// When set, responds with the key-value pair data before the update from this Delete request. 31 | #[inline] 32 | pub fn set_prev_kv(&mut self, prev_kv: bool) { 33 | self.proto.prev_kv = prev_kv; 34 | } 35 | 36 | /// Get key of request 37 | #[inline] 38 | pub fn get_key(&self) -> &[u8] { 39 | self.proto.get_key() 40 | } 41 | 42 | /// Wether request previous kv or not 43 | #[inline] 44 | pub const fn request_prev_kv(&self) -> bool { 45 | self.proto.prev_kv 46 | } 47 | } 48 | 49 | impl From for DeleteRangeRequest { 50 | #[inline] 51 | fn from(e: EtcdDeleteRequest) -> Self { 52 | e.proto 53 | } 54 | } 55 | 56 | /// Response for `DeleteRequest`. 57 | #[derive(Debug)] 58 | pub struct EtcdDeleteResponse { 59 | /// Etcd delete range key-value pairs response. 60 | proto: DeleteRangeResponse, 61 | } 62 | 63 | impl EtcdDeleteResponse { 64 | /// Takes the header out of response, leaving a `None` in its place. 65 | #[inline] 66 | pub fn take_header(&mut self) -> Option { 67 | self.proto.header.take().map(From::from) 68 | } 69 | 70 | /// Returns the number of keys deleted by the delete range request. 71 | #[inline] 72 | pub fn count_deleted(&self) -> usize { 73 | self.proto.deleted.cast() 74 | } 75 | 76 | /// Takes the previous key-value pairs out of response, leaving an empty vector in its place. 77 | #[inline] 78 | pub fn take_prev_kvs(&mut self) -> Vec { 79 | let kvs = std::mem::replace(&mut self.proto.prev_kvs, RepeatedField::from_vec(vec![])); 80 | 81 | kvs.into_iter().map(From::from).collect() 82 | } 83 | 84 | /// Returns `true` if the previous key-value pairs is not empty, and `false` otherwise. 85 | #[inline] 86 | pub fn has_prev_kvs(&self) -> bool { 87 | !self.proto.prev_kvs.is_empty() 88 | } 89 | 90 | /// Gets the previous kvs from `DeleteRangeResponse`. 91 | #[inline] 92 | pub fn get_prev_kvs(&self) -> Vec { 93 | self.proto 94 | .prev_kvs 95 | .clone() 96 | .into_iter() 97 | .map(From::from) 98 | .collect() 99 | } 100 | 101 | /// Get revision of response 102 | #[inline] 103 | pub fn get_revision(&self) -> i64 { 104 | self.proto.get_header().revision 105 | } 106 | } 107 | 108 | impl From for EtcdDeleteResponse { 109 | #[inline] 110 | fn from(resp: DeleteRangeResponse) -> Self { 111 | Self { proto: resp } 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /etcd-client/src/kv/get.rs: -------------------------------------------------------------------------------- 1 | use super::EtcdKeyValue; 2 | use crate::protos::rpc::{ 3 | RangeRequest, RangeRequest_SortOrder, RangeRequest_SortTarget, RangeResponse, 4 | }; 5 | use crate::ResponseHeader; 6 | use clippy_utilities::Cast; 7 | use protobuf::RepeatedField; 8 | 9 | /// Request for fetching a single key-value pair. 10 | #[derive(Debug, Clone)] 11 | pub struct EtcdGetRequest { 12 | /// Etcd range fetching request. 13 | proto: RangeRequest, 14 | } 15 | 16 | impl EtcdGetRequest { 17 | /// Creates a new `RangeRequest` for a specified key. 18 | #[inline] 19 | #[must_use] 20 | pub fn new(key: K) -> Self 21 | where 22 | K: Into>, 23 | { 24 | let range_request = RangeRequest { 25 | key: key.into(), 26 | range_end: vec![], 27 | limit: 0, 28 | revision: 0, 29 | sort_order: RangeRequest_SortOrder::NONE, 30 | sort_target: RangeRequest_SortTarget::KEY, 31 | serializable: false, 32 | keys_only: false, 33 | count_only: false, 34 | min_mod_revision: 0, 35 | max_mod_revision: 0, 36 | min_create_revision: 0, 37 | max_create_revision: 0, 38 | ..RangeRequest::default() 39 | }; 40 | Self { 41 | proto: range_request, 42 | } 43 | } 44 | 45 | /// Sets the maximum number of keys returned for the request. 46 | /// When limit is set to 0, it is treated as no limit. 47 | #[inline] 48 | pub fn set_limit(&mut self, limit: usize) { 49 | self.proto.limit = limit.cast(); 50 | } 51 | 52 | /// Gets the `key_range` from the `RangeRequest`. 53 | #[inline] 54 | pub fn get_key(&self) -> &[u8] { 55 | self.proto.get_key() 56 | } 57 | } 58 | 59 | impl From for RangeRequest { 60 | #[inline] 61 | fn from(e: EtcdGetRequest) -> Self { 62 | e.proto 63 | } 64 | } 65 | 66 | /// Response for `RangeRequest`. 67 | #[derive(Debug)] 68 | pub struct EtcdGetResponse { 69 | /// Etcd range fetching response. 70 | proto: RangeResponse, 71 | } 72 | 73 | impl EtcdGetResponse { 74 | /// Creates a new `EtcdGetResponse` for a specified key. 75 | #[inline] 76 | pub const fn new(range_response: RangeResponse) -> Self { 77 | Self { 78 | proto: range_response, 79 | } 80 | } 81 | 82 | /// Takes the header out of response, leaving a `None` in its place. 83 | #[inline] 84 | pub fn take_header(&mut self) -> Option { 85 | self.proto.header.take().map(From::from) 86 | } 87 | 88 | /// Takes the key-value pairs out of response, leaving an empty vector in its place. 89 | #[inline] 90 | pub fn take_kvs(&mut self) -> Vec { 91 | let kvs = std::mem::replace(&mut self.proto.kvs, RepeatedField::from_vec(vec![])); 92 | 93 | kvs.into_iter().map(From::from).collect() 94 | } 95 | 96 | /// Returns `true` if there are more keys to return in the requested range, and `false` otherwise. 97 | #[inline] 98 | pub const fn has_more(&self) -> bool { 99 | self.proto.more 100 | } 101 | 102 | /// Returns the number of keys within the range when requested. 103 | #[inline] 104 | pub fn count(&self) -> usize { 105 | self.proto.count.cast() 106 | } 107 | 108 | /// Gets the key-value pairs from the response. 109 | #[inline] 110 | pub fn get_kvs(&self) -> Vec { 111 | self.proto.kvs.clone().into_iter().map(From::from).collect() 112 | } 113 | 114 | /// Consume `EtcdGetRequest` and return inner `RangeResponse` 115 | #[allow(clippy::missing_const_for_fn)] // false alarm 116 | #[inline] 117 | pub fn get_inner(self) -> RangeResponse { 118 | self.proto 119 | } 120 | } 121 | 122 | impl From for EtcdGetResponse { 123 | #[inline] 124 | fn from(resp: RangeResponse) -> Self { 125 | Self { proto: resp } 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /etcd-client/src/kv/mod.rs: -------------------------------------------------------------------------------- 1 | mod cache; 2 | /// Etcd delete mod for delete operations. 3 | mod delete; 4 | /// Etcd get mod for get operations. 5 | mod get; 6 | /// Etcd put mod for put operations. 7 | mod put; 8 | /// Etcd range mod for range fetching operations. 9 | mod range; 10 | /// Etcd txn mod for transaction operations. 11 | mod txn; 12 | 13 | pub use super::watch::{EtcdWatchRequest, EtcdWatchResponse}; 14 | use async_std::channel::Receiver; 15 | pub use cache::Cache; 16 | pub use delete::{EtcdDeleteRequest, EtcdDeleteResponse}; 17 | pub use get::{EtcdGetRequest, EtcdGetResponse}; 18 | pub use put::{EtcdPutRequest, EtcdPutResponse}; 19 | pub use range::{EtcdRangeRequest, EtcdRangeResponse}; 20 | pub use txn::{EtcdTxnRequest, EtcdTxnResponse, TxnCmp, TxnOpResponse}; 21 | 22 | use super::OverflowArithmetic; 23 | use crate::protos::kv::Event_EventType; 24 | use crate::protos::rpc::{RangeResponse, WatchRequest}; 25 | use crate::protos::rpc_grpc::{KvClient, WatchClient}; 26 | use crate::retryable; 27 | use crate::CURRENT_INTERVAL_ENV_KEY; 28 | use crate::CURRENT_INTERVAL_VALUE; 29 | use crate::INITIAL_INTERVAL_ENV_KEY; 30 | use crate::INITIAL_INTERVAL_VALUE; 31 | use crate::MAX_ELAPSED_TIME_ENV_KEY; 32 | use crate::MAX_ELAPSED_TIME_VALUE; 33 | use backoff::ExponentialBackoff; 34 | use either::{Left, Right}; 35 | use futures::future::FutureExt; 36 | use futures::stream::StreamExt; 37 | use grpcio::{Error, StreamingCallSink, WriteFlags}; 38 | use log::warn; 39 | use protobuf::RepeatedField; 40 | use smol::channel::{unbounded, Sender}; 41 | use smol::Timer; 42 | use std::collections::{HashMap, VecDeque}; 43 | use std::fmt; 44 | use std::fmt::{Debug, Display}; 45 | use std::str; 46 | use std::sync::{Arc, Mutex, Weak}; 47 | use std::time::Duration; 48 | 49 | use crate::protos::kv::KeyValue; 50 | use crate::Result as Res; 51 | use arc_swap::ArcSwap; 52 | use clippy_utilities::Cast; 53 | use futures::SinkExt; 54 | 55 | /// Key-Value client. 56 | pub struct Kv { 57 | /// Etcd Key-Value client. 58 | client: KvClient, 59 | /// Watch Client 60 | watch_client: WatchClient, 61 | /// Kv Cache Size 62 | cache_size: usize, 63 | /// Kv Cache if etcd client cache is enabled otherwise None 64 | kvcache: Option>, 65 | /// Lock to restart cache 66 | restart_lock: Mutex<()>, 67 | } 68 | 69 | /// Cache of Kv 70 | pub struct KvCache { 71 | /// Etcd client cache. 72 | cache: Arc, 73 | /// Etcd watch request sender. 74 | watch_sender: Sender, 75 | /// A channel sender to send shutdown request for task. 76 | /// This task is to handle watch request/response. 77 | shutdown_watch_task: Sender<()>, 78 | /// Arc to `Kv` that contains this `KvCache` 79 | kv: Weak, 80 | } 81 | 82 | /// Etcd client cache default size. 83 | const ETCD_CACHE_DEFAULT_SIZE: usize = 256; 84 | impl KvCache { 85 | /// Creates a new `KvClient`. 86 | /// 87 | /// This method should only be called within etcd client. 88 | pub fn new(watch_client: &WatchClient, cache_size: usize, kv: Weak) -> Arc { 89 | let etcd_cache_size = if cache_size == 0 { 90 | ETCD_CACHE_DEFAULT_SIZE 91 | } else { 92 | cache_size 93 | }; 94 | 95 | let cache = Arc::new(Cache::new(etcd_cache_size)); 96 | 97 | let cache_clone = Arc::::clone(&cache); 98 | let (watch_req_sender, watch_req_receiver) = unbounded::(); 99 | let (shutdown_tx, shutdown_rx) = unbounded(); 100 | let this = Arc::new(Self { 101 | cache, 102 | watch_sender: watch_req_sender, 103 | shutdown_watch_task: shutdown_tx, 104 | kv, 105 | }); 106 | 107 | Self::start_watch_task( 108 | Arc::::clone(&this), 109 | cache_clone, 110 | shutdown_rx, 111 | watch_req_receiver, 112 | watch_client, 113 | ); 114 | 115 | this 116 | } 117 | 118 | /// Restart cache 119 | fn restart_cache(&self) { 120 | if let Some(kv) = self.kv.upgrade() { 121 | kv.restart_kvcache(); 122 | } 123 | } 124 | 125 | /// Start async watch task 126 | #[allow(clippy::mut_mut)] 127 | #[allow(clippy::too_many_lines)] 128 | fn start_watch_task( 129 | this_clone: Arc, 130 | cache_clone: Arc, 131 | shutdown_rx: Receiver<()>, 132 | watch_req_receiver: Receiver, 133 | watch_client: &WatchClient, 134 | ) { 135 | let (mut client_req_sender, mut client_resp_receiver) = watch_client 136 | .watch() 137 | .unwrap_or_else(|e| panic!("failed to start watch channel, the error is: {}", e)); 138 | smol::spawn(async move { 139 | let mut watch_map = HashMap::, i64>::new(); 140 | let mut shutdown_rx = shutdown_rx.into_future().fuse(); 141 | let mut watch_request_queue = VecDeque::::new(); 142 | let mut has_pending_watch_request = false; 143 | let mut processing_req = None; 144 | 145 | loop { 146 | let message = futures::select! { 147 | watch_req_opt = watch_req_receiver.recv().fuse() => { 148 | if let Ok(req) = watch_req_opt { 149 | Left(req) 150 | } else { 151 | warn!("Failed to receive watch request"); 152 | this_clone.restart_cache(); 153 | return; 154 | } 155 | 156 | } 157 | watch_resp_opt = client_resp_receiver.next().fuse() => { 158 | if let Some(resp) = watch_resp_opt { 159 | Right(resp) 160 | } else { 161 | warn!("failed to receive watch response from etcd"); 162 | this_clone.restart_cache(); 163 | return; 164 | } 165 | } 166 | _ = shutdown_rx => return 167 | 168 | }; 169 | 170 | match message { 171 | Left(watch_req) => { 172 | let watch_key = &watch_req.key; 173 | // If key is already watched, skip create watch request 174 | // If key is not watched, skip cancel watch request 175 | if (watch_req.is_create && watch_map.contains_key(watch_key)) 176 | || (!watch_req.is_create && !watch_map.contains_key(watch_key)) 177 | { 178 | continue; 179 | } 180 | watch_request_queue.push_back(watch_req); 181 | 182 | if !has_pending_watch_request { 183 | if let Some(req) = watch_request_queue.pop_front() { 184 | processing_req = Some(req); 185 | has_pending_watch_request = true; 186 | let res = Self::send_watch_requset(processing_req.as_ref(), &watch_map, &mut client_req_sender).await; 187 | if let Err(e) = res { 188 | warn!( 189 | "Fail to send watch request, the error is: {}, restart cache", 190 | e 191 | ); 192 | this_clone.restart_cache(); 193 | return; 194 | } 195 | } 196 | } 197 | }, 198 | Right(watch_resp) => { 199 | match watch_resp { 200 | Ok(resp) => { 201 | if resp.get_created() || resp.get_canceled() { 202 | if let Some(req) = processing_req.take() { 203 | let watch_id = resp.get_watch_id(); 204 | let is_create = resp.get_created(); 205 | if is_create != req.is_create { 206 | warn!( 207 | "processing request is_create {} doesn't match response is_create {}, 208 | restart cache", 209 | is_create, req.is_create 210 | ); 211 | this_clone.restart_cache(); 212 | return; 213 | } 214 | let processing_key = req.key.clone(); 215 | if is_create { 216 | watch_map.insert(processing_key, watch_id); 217 | } else { 218 | watch_map.remove(&processing_key); 219 | cache_clone.remove(&processing_key).await; 220 | } 221 | has_pending_watch_request = false; 222 | if let Some(req) = watch_request_queue.pop_front() { 223 | processing_req = Some(req); 224 | has_pending_watch_request = true; 225 | let res = Self::send_watch_requset(processing_req.as_ref(), &watch_map, &mut client_req_sender).await; 226 | if let Err(e) = res { 227 | warn!( 228 | "Fail to send watch request, the error is: {}, restart cache", 229 | e 230 | ); 231 | this_clone.restart_cache(); 232 | return; 233 | } 234 | } 235 | } else { 236 | warn!( 237 | "Receive watch response when no watch request is sent, the watch response is: {:?}, restart cache", 238 | resp 239 | ); 240 | this_clone.restart_cache(); 241 | return; 242 | } 243 | } else { 244 | let events = resp.get_events().to_vec(); 245 | for event in events { 246 | if event.get_field_type() == Event_EventType::PUT { 247 | cache_clone 248 | .insert_or_update( 249 | event.get_kv().get_key().to_vec(), 250 | event.get_kv().clone(), 251 | ).await; 252 | } else { 253 | cache_clone 254 | .mark_delete( 255 | event.get_kv().get_key().to_vec(), 256 | event.get_kv().clone(), 257 | ).await; 258 | { 259 | let res = this_clone 260 | .watch_sender 261 | .send(LocalWatchRequest::cancel( 262 | event.get_kv().get_key().to_vec(), 263 | )) 264 | .await; 265 | if let Err(e) = res { 266 | warn!( 267 | "Fail to send watch request, the error is: {}, restart cache", 268 | e 269 | ); 270 | this_clone.restart_cache(); 271 | return; 272 | } 273 | } 274 | } 275 | } 276 | } 277 | } 278 | Err(e) => { 279 | warn!( 280 | "Watch response contains error, the error is: {}, restart cache", 281 | e 282 | ); 283 | this_clone.restart_cache(); 284 | return; 285 | } 286 | } 287 | }, 288 | } 289 | } 290 | }) 291 | .detach(); 292 | } 293 | 294 | /// Send watch request to etcd server 295 | async fn send_watch_requset( 296 | watch_req: Option<&LocalWatchRequest>, 297 | watch_map: &HashMap, i64>, 298 | client_req_sender: &mut StreamingCallSink, 299 | ) -> Result<(), Error> { 300 | if let Some(req) = watch_req { 301 | let processing_key = req.key.clone(); 302 | let request = if req.is_create { 303 | let mut etcd_req = EtcdWatchRequest::create(KeyRange::key(processing_key)); 304 | etcd_req.set_start_revision(req.revision.cast()); 305 | etcd_req 306 | } else { 307 | let etcd_req = EtcdWatchRequest::cancel( 308 | (*watch_map 309 | .get(&processing_key) 310 | .unwrap_or_else(|| panic!("key {:?} doesn't exist", processing_key))) 311 | .cast(), 312 | ); 313 | etcd_req 314 | }; 315 | 316 | client_req_sender 317 | .send((request.into(), WriteFlags::default())) 318 | .await 319 | } else { 320 | Ok(()) 321 | } 322 | } 323 | 324 | /// Shutdown cache task 325 | #[inline] 326 | async fn shutdown_cache(&self) { 327 | if !self.shutdown_watch_task.is_closed() { 328 | if let Err(e) = self.shutdown_watch_task.send(()).await { 329 | // Shouldn't reach here because we have already check the channel is not closed. 330 | panic!("Shutdown cache error, the error is: {}", e); 331 | } 332 | self.shutdown_watch_task.close(); 333 | } 334 | } 335 | } 336 | impl Drop for KvCache { 337 | fn drop(&mut self) { 338 | smol::block_on(async { 339 | self.shutdown_cache().await; 340 | }); 341 | } 342 | } 343 | impl Kv { 344 | /// Creates a new `KvClient`. 345 | /// 346 | /// This method should only be called within etcd client. 347 | #[allow(unsafe_code)] 348 | #[allow(clippy::as_conversions)] 349 | pub(crate) fn new( 350 | client: KvClient, 351 | watch_client: WatchClient, 352 | cache_size: usize, 353 | cache_enable: bool, 354 | ) -> Arc { 355 | let this = Arc::new(Self { 356 | client, 357 | watch_client, 358 | cache_size, 359 | kvcache: None, 360 | restart_lock: Mutex::<()>::new(()), 361 | }); 362 | 363 | if cache_enable { 364 | let kvcache = Some(ArcSwap::from(KvCache::new( 365 | &this.watch_client, 366 | cache_size, 367 | Arc::::downgrade(&this), 368 | ))); 369 | // SAFETY: it is safe because this is constructor. 370 | // TODO: change to Arc::new_cyclic when 1.60 is released. 371 | unsafe { 372 | (*(Arc::::as_ptr(&this) as *mut Self)).kvcache = kvcache; 373 | } 374 | } 375 | this 376 | } 377 | 378 | /// Restart `KvCache` 379 | fn restart_kvcache(&self) { 380 | if self.restart_lock.try_lock().is_ok() { 381 | if let Some(ref kvcache) = self.kvcache { 382 | let self_weak = Weak::::clone(&kvcache.load().kv); 383 | let new_kvcache = KvCache::new(&self.watch_client, self.cache_size, self_weak); 384 | kvcache.store(new_kvcache); 385 | } 386 | } 387 | } 388 | 389 | /// Performs a key-value saving operation. 390 | /// 391 | /// # Errors 392 | /// 393 | /// Will return `Err` if RPC call is failed. 394 | #[inline] 395 | pub async fn put(&self, req: EtcdPutRequest) -> Res { 396 | let resp: EtcdPutResponse = retryable!(|| async { 397 | let resp = self.client.put_async(&req.clone().into())?; 398 | Ok(From::from(resp.await?)) 399 | }); 400 | // Wait until cache is updated and then return 401 | if let Some(ref kvcache) = self.kvcache { 402 | while let Some(kv) = kvcache.load().cache.search(req.get_key()).await { 403 | if kv.get_mod_revision() >= resp.get_revision() { 404 | break; 405 | } 406 | Timer::after(Duration::from_millis(1)).await; 407 | } 408 | } 409 | Ok(resp) 410 | } 411 | 412 | /// Performs a single key-value fetching operation. 413 | /// 414 | /// # Errors 415 | /// 416 | /// Will return `Err` if RPC call is failed. 417 | #[inline] 418 | pub async fn get(&self, req: EtcdGetRequest) -> Res { 419 | // cache is enabled 420 | if let Some(ref kvcache) = self.kvcache { 421 | if let Some(value) = kvcache.load().cache.search(req.get_key()).await { 422 | let mut response = RangeResponse::new(); 423 | response.set_count(1); 424 | response.set_kvs(RepeatedField::from_vec(vec![value])); 425 | return Ok(EtcdGetResponse::new(response)); 426 | } 427 | } 428 | 429 | let resp = retryable!(|| async { 430 | let resp = self.client.range_async(&req.clone().into())?; 431 | Ok(resp.await?) 432 | }); 433 | 434 | if let Some(ref kvcache_arc) = self.kvcache { 435 | let kvs = resp.get_kvs(); 436 | let kvcache = kvcache_arc.load(); 437 | for kv in kvs { 438 | let (succeed, is_insert) = kvcache 439 | .cache 440 | .insert_or_update(kv.get_key().to_vec(), kv.clone()) 441 | .await; 442 | if succeed && is_insert { 443 | // Creates a new watch request and adds to the send queue. 444 | let watch_request = 445 | LocalWatchRequest::create(kv.get_key().to_vec(), kv.get_mod_revision()); 446 | if let Err(e) = kvcache.watch_sender.send(watch_request).await { 447 | warn!( 448 | "Fail to send watch request, the error is {}, restart cache", 449 | e 450 | ); 451 | self.restart_kvcache(); 452 | return Err(e.into()); 453 | } 454 | 455 | // Adjust cache size 456 | if let Err(e) = kvcache.cache.adjust_cache_size(&kvcache.watch_sender).await { 457 | warn!( 458 | "Fail to send watch request, the error is {}, restart cache", 459 | e 460 | ); 461 | self.restart_kvcache(); 462 | return Err(e); 463 | } 464 | } 465 | } 466 | } 467 | Ok(From::from(resp)) 468 | } 469 | 470 | /// Performs a range key-value fetching operation. 471 | /// 472 | /// # Errors 473 | /// 474 | /// Will return `Err` if RPC call is failed. 475 | #[inline] 476 | pub async fn range(&self, req: EtcdRangeRequest) -> Res { 477 | // If the request is single key, use get() instead 478 | if req.is_single_key() { 479 | let resp = self 480 | .get(EtcdGetRequest::new(req.get_key_range().take_key())) 481 | .await?; 482 | return Ok(resp.get_inner().into()); 483 | } 484 | let resp = retryable!(|| async { 485 | let resp = self.client.range_async(&req.clone().into())?; 486 | Ok(From::from(resp.await?)) 487 | }); 488 | Ok(resp) 489 | } 490 | 491 | /// Performs a key-value deleting operation. 492 | /// 493 | /// # Errors 494 | /// 495 | /// Will return `Err` if RPC call is failed. 496 | #[inline] 497 | pub async fn delete(&self, mut req: EtcdDeleteRequest) -> Res { 498 | let request_prev_kv = req.request_prev_kv(); 499 | if self.kvcache.is_some() { 500 | req.set_prev_kv(true); 501 | }; 502 | let mut resp: EtcdDeleteResponse = retryable!(|| async { 503 | let resp = self.client.delete_range_async(&req.clone().into())?; 504 | Ok(From::from(resp.await?)) 505 | }); 506 | 507 | // Wait until cache is updated and then return 508 | if let Some(ref kvcache) = self.kvcache { 509 | let prev_kv = if request_prev_kv { 510 | resp.get_prev_kvs() 511 | } else { 512 | resp.take_prev_kvs() 513 | }; 514 | for kv in prev_kv { 515 | while let Some(kv) = kvcache.load().cache.search(kv.key()).await { 516 | if kv.get_mod_revision() >= resp.get_revision() { 517 | break; 518 | } 519 | Timer::after(Duration::from_millis(1)).await; 520 | } 521 | } 522 | } 523 | 524 | Ok(resp) 525 | } 526 | 527 | /// Performs a transaction operation. 528 | /// 529 | /// # Errors 530 | /// 531 | /// Will return `Err` if RPC call is failed. 532 | #[inline] 533 | pub async fn txn(&self, req: EtcdTxnRequest) -> Res { 534 | let resp = retryable!(|| async { 535 | let resp = self.client.txn_async(&req.clone().into())?; 536 | Ok(From::from(resp.await?)) 537 | }); 538 | Ok(resp) 539 | } 540 | 541 | /// Shut down the running watch task, if any. 542 | /// This should only be called when there are 543 | /// no other threads accessing the cache. 544 | #[inline] 545 | pub async fn shutdown(&self) { 546 | if let Some(ref kvcache) = self.kvcache { 547 | kvcache.load().shutdown_cache().await; 548 | } 549 | } 550 | } 551 | 552 | /// Watch request struct 553 | #[derive(Debug, Clone)] 554 | pub struct LocalWatchRequest { 555 | /// Request key 556 | key: Vec, 557 | /// Revision to start watch 558 | /// Set to -1 if the request is to cancel 559 | revision: i64, 560 | /// Create watch request or cancel watch request 561 | is_create: bool, 562 | } 563 | 564 | impl LocalWatchRequest { 565 | /// Create watch request 566 | fn create(key: Vec, revision: i64) -> Self { 567 | Self { 568 | key, 569 | revision, 570 | is_create: true, 571 | } 572 | } 573 | 574 | /// Create watch request 575 | fn cancel(key: Vec) -> Self { 576 | Self { 577 | key, 578 | revision: -1, 579 | is_create: false, 580 | } 581 | } 582 | } 583 | 584 | /// Key-Value pair. 585 | #[derive(Clone, PartialEq)] 586 | pub struct EtcdKeyValue { 587 | /// Etcd `KeyValue` pairs struct. 588 | proto: KeyValue, 589 | } 590 | 591 | impl EtcdKeyValue { 592 | /// Gets the key in bytes. An empty key is not allowed. 593 | #[inline] 594 | pub fn key(&self) -> &[u8] { 595 | &self.proto.key 596 | } 597 | 598 | /// Takes the key out of response, leaving an empty vector in its place. 599 | #[inline] 600 | pub fn take_key(&mut self) -> Vec { 601 | std::mem::take(&mut self.proto.key) 602 | } 603 | 604 | /// Converts the key from bytes `&[u8]` to `&str`. 605 | /// Leaves the original `&[u8]` in place, and creates a new string slice containing the entire content. 606 | /// 607 | /// # Panics 608 | /// 609 | /// Will panic if convert fail. 610 | #[inline] 611 | pub fn key_str(&self) -> &str { 612 | std::str::from_utf8(&self.proto.key) 613 | .unwrap_or_else(|e| panic!("Fail to convert bytes to string, the error is: {}", e)) 614 | } 615 | 616 | /// Gets the value held by the key, in bytes. 617 | #[inline] 618 | pub fn value(&self) -> &[u8] { 619 | &self.proto.value 620 | } 621 | 622 | /// Takes the value out of response, leaving an empty vector in its place. 623 | #[inline] 624 | pub fn take_value(&mut self) -> Vec { 625 | std::mem::take(&mut self.proto.value) 626 | } 627 | 628 | /// Converts the value from bytes `&[u8]` to `&str`. 629 | /// Leaves the original `&[u8]` in place, and creates a new string slice containing the entire content. 630 | /// 631 | /// # Panics 632 | /// 633 | /// Will panic if convert fail. 634 | #[inline] 635 | pub fn value_str(&self) -> &str { 636 | std::str::from_utf8(&self.proto.value) 637 | .unwrap_or_else(|e| panic!("Fail to convert bytes to string, the error is {}", e)) 638 | } 639 | 640 | /// Gets the revision of last creation on this key. 641 | #[inline] 642 | pub fn create_revision(&self) -> usize { 643 | self.proto.create_revision.cast() 644 | } 645 | 646 | /// Gets the revision of last modification on this key. 647 | #[inline] 648 | pub fn mod_revision(&self) -> usize { 649 | self.proto.mod_revision.cast() 650 | } 651 | 652 | /// Gets the version of the key. 653 | #[inline] 654 | pub fn version(&self) -> usize { 655 | self.proto.version.cast() 656 | } 657 | 658 | /// Gets the ID of the lease that attached to key. 659 | #[inline] 660 | pub fn lease(&self) -> usize { 661 | self.proto.lease.cast() 662 | } 663 | 664 | /// Returns `true` if this `KeyValue` has a lease attached, and `false` otherwise. 665 | #[inline] 666 | pub const fn has_lease(&self) -> bool { 667 | self.proto.lease != 0 668 | } 669 | } 670 | 671 | impl From for EtcdKeyValue { 672 | #[inline] 673 | fn from(kv: KeyValue) -> Self { 674 | Self { proto: kv } 675 | } 676 | } 677 | 678 | /// `KeyRange` is an abstraction for describing etcd key of various types. 679 | #[derive(PartialEq, Eq, PartialOrd, Ord, Clone)] 680 | pub struct KeyRange { 681 | /// The first key of the range and should be non-empty 682 | key: Vec, 683 | /// The key following the last key of the range 684 | range_end: Vec, 685 | } 686 | 687 | impl Display for KeyRange { 688 | #[inline] 689 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 690 | // Write strictly the first element into the supplied output 691 | // stream: `f`. Returns `fmt::Result` which indicates whether the 692 | // operation succeeded or failed. Note that `write!` uses syntax which 693 | // is very similar to `println!`. 694 | write!( 695 | f, 696 | "keyrange( begin:{}, end:{} )", 697 | str::from_utf8(&self.key).unwrap_or("not utf8"), 698 | str::from_utf8(&self.range_end).unwrap_or("not utf8") 699 | ) 700 | } 701 | } 702 | 703 | impl KeyRange { 704 | /// Creates a new `KeyRange` for describing a range of multiple keys. 705 | #[inline] 706 | pub fn range(key: K, range_end: R) -> Self 707 | where 708 | K: Into>, 709 | R: Into>, 710 | { 711 | Self { 712 | key: key.into(), 713 | range_end: range_end.into(), 714 | } 715 | } 716 | 717 | /// Creates a new `KeyRange` for describing a specified key. 718 | #[inline] 719 | pub fn key(key: K) -> Self 720 | where 721 | K: Into>, 722 | { 723 | Self { 724 | key: key.into(), 725 | range_end: vec![], 726 | } 727 | } 728 | 729 | /// Creates a new `KeyRange` for describing all keys. 730 | #[inline] 731 | #[must_use] 732 | pub fn all() -> Self { 733 | Self { 734 | key: vec![0], 735 | range_end: vec![0], 736 | } 737 | } 738 | 739 | /// Creates a new `KeyRange` for describing keys prefixed with specified value. 740 | #[inline] 741 | pub fn prefix(prefix: K) -> Self 742 | where 743 | K: Into>, 744 | { 745 | let key = prefix.into(); 746 | if key.is_empty() { 747 | // An empty Vec results in an invalid KeyRange. 748 | // Assume that an empty value passed to this method implies no prefix (i.e., all keys). 749 | return Self::all(); 750 | } 751 | 752 | let mut first_value = true; 753 | let mut range_end = key 754 | .iter() 755 | .rev() 756 | .filter_map(|e| { 757 | if *e < 0xff { 758 | if first_value { 759 | first_value = false; 760 | Some(e.overflow_add(1)) 761 | } else { 762 | Some(*e) 763 | } 764 | } else { 765 | None 766 | } 767 | }) 768 | .collect::>(); 769 | range_end.reverse(); 770 | Self { key, range_end } 771 | } 772 | 773 | /// Take key value 774 | #[inline] 775 | pub fn take_key(&mut self) -> Vec { 776 | std::mem::take(&mut self.key) 777 | } 778 | 779 | /// Take `range_end` value 780 | #[inline] 781 | pub fn take_range_end(&mut self) -> Vec { 782 | std::mem::take(&mut self.range_end) 783 | } 784 | } 785 | -------------------------------------------------------------------------------- /etcd-client/src/kv/put.rs: -------------------------------------------------------------------------------- 1 | use crate::protos::rpc::{PutRequest, PutResponse}; 2 | use crate::EtcdKeyValue; 3 | use crate::ResponseHeader; 4 | use clippy_utilities::Cast; 5 | 6 | /// Request for putting key-value. 7 | #[derive(Debug, Clone)] 8 | pub struct EtcdPutRequest { 9 | /// Etcd put key-value pairs request. 10 | proto: PutRequest, 11 | } 12 | 13 | impl EtcdPutRequest { 14 | /// Creates a new `EtcdPutRequest` for saving the specified key-value. 15 | #[inline] 16 | pub fn new(key: K, value: V) -> Self 17 | where 18 | K: Into>, 19 | V: Into>, 20 | { 21 | let put_request = PutRequest { 22 | key: key.into(), 23 | value: value.into(), 24 | lease: 0, 25 | prev_kv: false, 26 | ignore_value: false, 27 | ignore_lease: false, 28 | ..PutRequest::default() 29 | }; 30 | Self { proto: put_request } 31 | } 32 | 33 | /// Sets the lease ID to associate with the key in the key-value store. 34 | /// A lease value of 0 indicates no lease. 35 | #[inline] 36 | pub fn set_lease(&mut self, lease: u64) { 37 | self.proto.lease = lease.cast(); 38 | } 39 | 40 | /// When set, responds with the key-value pair data before the update from this Put request. 41 | #[inline] 42 | pub fn set_prev_kv(&mut self, prev_kv: bool) { 43 | self.proto.prev_kv = prev_kv; 44 | } 45 | 46 | /// When set, update the key without changing its current value. Returns an error if the key does not exist. 47 | #[inline] 48 | pub fn set_ignore_value(&mut self, ignore_value: bool) { 49 | self.proto.ignore_value = ignore_value; 50 | } 51 | 52 | /// When set, update the key without changing its current lease. Returns an error if the key does not exist. 53 | #[inline] 54 | pub fn set_ignore_lease(&mut self, ignore_lease: bool) { 55 | self.proto.ignore_lease = ignore_lease; 56 | } 57 | 58 | /// Gets the key from `PutRequest`. 59 | #[inline] 60 | pub fn get_key(&self) -> &[u8] { 61 | self.proto.get_key() 62 | } 63 | 64 | /// Gets the value from `PutRequest`. 65 | #[inline] 66 | pub fn get_value(&self) -> Vec { 67 | self.proto.get_value().to_vec() 68 | } 69 | } 70 | 71 | impl From for PutRequest { 72 | #[inline] 73 | fn from(e: EtcdPutRequest) -> Self { 74 | e.proto 75 | } 76 | } 77 | 78 | /// Response for putting key-value. 79 | #[derive(Debug)] 80 | pub struct EtcdPutResponse { 81 | /// Etcd put key-value pairs response. 82 | proto: PutResponse, 83 | } 84 | 85 | impl EtcdPutResponse { 86 | /// Takes the header out of response, leaving a `None` in its place. 87 | #[inline] 88 | pub fn take_header(&mut self) -> Option { 89 | self.proto.header.take().map(From::from) 90 | } 91 | 92 | /// Takes the previous key-value pair out of response, leaving a `None` in its place. 93 | #[inline] 94 | pub fn take_prev_kv(&mut self) -> Option { 95 | self.proto.prev_kv.take().map(From::from) 96 | } 97 | 98 | /// Gets the revision of the key-value store when generating the response. 99 | #[inline] 100 | pub fn get_revision(&self) -> i64 { 101 | self.proto.get_header().revision 102 | } 103 | } 104 | 105 | impl From for EtcdPutResponse { 106 | #[inline] 107 | fn from(resp: PutResponse) -> Self { 108 | Self { proto: resp } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /etcd-client/src/kv/range.rs: -------------------------------------------------------------------------------- 1 | use super::{EtcdKeyValue, KeyRange}; 2 | use crate::protos::rpc::{ 3 | RangeRequest, RangeRequest_SortOrder, RangeRequest_SortTarget, RangeResponse, 4 | }; 5 | use crate::ResponseHeader; 6 | use clippy_utilities::Cast; 7 | use protobuf::RepeatedField; 8 | 9 | /// Request for fetching key-value pairs. 10 | #[derive(Debug, Clone)] 11 | pub struct EtcdRangeRequest { 12 | /// Etcd range fetching request. 13 | proto: RangeRequest, 14 | } 15 | 16 | impl EtcdRangeRequest { 17 | /// Creates a new `RangeRequest` for the specified key range. 18 | #[inline] 19 | #[must_use] 20 | pub fn new(key_range: KeyRange) -> Self { 21 | let range_request = RangeRequest { 22 | key: key_range.key, 23 | range_end: key_range.range_end, 24 | limit: 0, 25 | revision: 0, 26 | sort_order: RangeRequest_SortOrder::NONE, 27 | sort_target: RangeRequest_SortTarget::KEY, 28 | serializable: false, 29 | keys_only: false, 30 | count_only: false, 31 | min_mod_revision: 0, 32 | max_mod_revision: 0, 33 | min_create_revision: 0, 34 | max_create_revision: 0, 35 | ..RangeRequest::default() 36 | }; 37 | Self { 38 | proto: range_request, 39 | } 40 | } 41 | 42 | /// Sets the maximum number of keys returned for the request. 43 | /// When limit is set to 0, it is treated as no limit. 44 | #[inline] 45 | pub fn set_limit(&mut self, limit: usize) { 46 | self.proto.limit = limit.cast(); 47 | } 48 | 49 | /// Gets the `key_range` from the `RangeRequest`. 50 | #[inline] 51 | pub fn get_key_range(&self) -> KeyRange { 52 | KeyRange { 53 | key: self.proto.get_key().to_vec(), 54 | range_end: self.proto.get_range_end().to_vec(), 55 | } 56 | } 57 | 58 | /// Return if the range request is a single key request 59 | #[inline] 60 | pub fn is_single_key(&self) -> bool { 61 | self.proto.get_range_end().is_empty() 62 | } 63 | } 64 | 65 | impl From for RangeRequest { 66 | #[inline] 67 | fn from(e: EtcdRangeRequest) -> Self { 68 | e.proto 69 | } 70 | } 71 | 72 | /// Response for `RangeRequest`. 73 | #[derive(Debug)] 74 | pub struct EtcdRangeResponse { 75 | /// Etcd range fetching response. 76 | proto: RangeResponse, 77 | } 78 | 79 | impl EtcdRangeResponse { 80 | /// Creates a new `EtcdRangeResponse` for the specified key range. 81 | #[inline] 82 | pub const fn new(range_response: RangeResponse) -> Self { 83 | Self { 84 | proto: range_response, 85 | } 86 | } 87 | 88 | /// Takes the header out of response, leaving a `None` in its place. 89 | #[inline] 90 | pub fn take_header(&mut self) -> Option { 91 | self.proto.header.take().map(From::from) 92 | } 93 | 94 | /// Takes the key-value pairs out of response, leaving an empty vector in its place. 95 | #[inline] 96 | pub fn take_kvs(&mut self) -> Vec { 97 | let kvs = std::mem::replace(&mut self.proto.kvs, RepeatedField::from_vec(vec![])); 98 | 99 | kvs.into_iter().map(From::from).collect() 100 | } 101 | 102 | /// Returns `true` if there are more keys to return in the requested range, and `false` otherwise. 103 | #[inline] 104 | pub const fn has_more(&self) -> bool { 105 | self.proto.more 106 | } 107 | 108 | /// Returns the number of keys within the range when requested. 109 | #[inline] 110 | pub fn count(&self) -> usize { 111 | self.proto.count.cast() 112 | } 113 | 114 | /// Gets the key-value pairs from the response. 115 | #[inline] 116 | pub fn get_kvs(&self) -> Vec { 117 | self.proto.kvs.clone().into_iter().map(From::from).collect() 118 | } 119 | } 120 | 121 | impl From for EtcdRangeResponse { 122 | #[inline] 123 | fn from(resp: RangeResponse) -> Self { 124 | Self { proto: resp } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /etcd-client/src/kv/txn.rs: -------------------------------------------------------------------------------- 1 | use super::{ 2 | EtcdDeleteRequest, EtcdDeleteResponse, EtcdPutRequest, EtcdPutResponse, EtcdRangeRequest, 3 | EtcdRangeResponse, KeyRange, 4 | }; 5 | use crate::protos::rpc::{ 6 | Compare, Compare_CompareResult, Compare_CompareTarget, Compare_oneof_target_union, RequestOp, 7 | ResponseOp, ResponseOp_oneof_response, TxnRequest, TxnResponse, 8 | }; 9 | use crate::ResponseHeader; 10 | use clippy_utilities::Cast; 11 | use protobuf::RepeatedField; 12 | 13 | /// Request for performing transaction operations. 14 | #[derive(Debug, Clone)] 15 | pub struct EtcdTxnRequest { 16 | /// Etcd transaction operations request. 17 | proto: TxnRequest, 18 | } 19 | 20 | impl EtcdTxnRequest { 21 | /// Creates a new `TxnRequest`. 22 | #[inline] 23 | #[must_use] 24 | pub fn new() -> Self { 25 | let txn_request = TxnRequest { 26 | compare: RepeatedField::from_vec(vec![]), 27 | success: RepeatedField::from_vec(vec![]), 28 | failure: RepeatedField::from_vec(vec![]), 29 | ..TxnRequest::default() 30 | }; 31 | Self { proto: txn_request } 32 | } 33 | 34 | /// Adds a version compare. 35 | #[inline] 36 | #[must_use] 37 | pub fn when_version(mut self, key_range: KeyRange, cmp: TxnCmp, version: usize) -> Self { 38 | let compare_result: Compare_CompareResult = cmp.into(); 39 | let compare = Compare { 40 | result: compare_result, 41 | target: Compare_CompareTarget::VERSION, 42 | key: key_range.key, 43 | range_end: key_range.range_end, 44 | target_union: Some(Compare_oneof_target_union::version(version.cast())), 45 | ..Compare::default() 46 | }; 47 | self.proto.compare.push(compare); 48 | self 49 | } 50 | 51 | /// Adds a create revision compare. 52 | #[inline] 53 | #[must_use] 54 | pub fn when_create_revision( 55 | mut self, 56 | key_range: KeyRange, 57 | cmp: TxnCmp, 58 | revision: usize, 59 | ) -> Self { 60 | let compare_result: Compare_CompareResult = cmp.into(); 61 | let compare = Compare { 62 | result: compare_result, 63 | target: Compare_CompareTarget::CREATE, 64 | key: key_range.key, 65 | range_end: key_range.range_end, 66 | target_union: Some(Compare_oneof_target_union::create_revision(revision.cast())), 67 | ..Compare::default() 68 | }; 69 | self.proto.compare.push(compare); 70 | self 71 | } 72 | 73 | /// Adds a mod revision compare. 74 | #[inline] 75 | #[must_use] 76 | pub fn when_mod_revision(mut self, key_range: KeyRange, cmp: TxnCmp, revision: usize) -> Self { 77 | let compare_result: Compare_CompareResult = cmp.into(); 78 | let compare = Compare { 79 | result: compare_result, 80 | target: Compare_CompareTarget::MOD, 81 | key: key_range.key, 82 | range_end: key_range.range_end, 83 | target_union: Some(Compare_oneof_target_union::mod_revision(revision.cast())), 84 | ..Compare::default() 85 | }; 86 | self.proto.compare.push(compare); 87 | self 88 | } 89 | 90 | /// Adds a value compare. 91 | #[inline] 92 | #[must_use] 93 | pub fn when_value(mut self, key_range: KeyRange, cmp: TxnCmp, value: V) -> Self 94 | where 95 | V: Into>, 96 | { 97 | let compare_result: Compare_CompareResult = cmp.into(); 98 | let compare = Compare { 99 | result: compare_result, 100 | target: Compare_CompareTarget::VALUE, 101 | key: key_range.key, 102 | range_end: key_range.range_end, 103 | target_union: Some(Compare_oneof_target_union::value(value.into())), 104 | ..Compare::default() 105 | }; 106 | self.proto.compare.push(compare); 107 | self 108 | } 109 | 110 | /// If compare success, then execute the specified operations. 111 | #[inline] 112 | #[must_use] 113 | pub fn and_then(mut self, op: O) -> Self 114 | where 115 | O: Into, 116 | { 117 | self.proto.success.push(op.into().into()); 118 | self 119 | } 120 | 121 | /// If compare fail, then execute the specified operations. 122 | #[inline] 123 | #[must_use] 124 | pub fn or_else(mut self, op: O) -> Self 125 | where 126 | O: Into, 127 | { 128 | self.proto.failure.push(op.into().into()); 129 | self 130 | } 131 | 132 | /// Get the success operations from `TxnRequest`. 133 | #[inline] 134 | pub fn get_success_operations(&self) -> Vec { 135 | self.proto.success.to_vec() 136 | } 137 | 138 | /// Get the failure operations from `TxnRequest`. 139 | #[inline] 140 | pub fn get_failure_operations(&self) -> Vec { 141 | self.proto.failure.to_vec() 142 | } 143 | } 144 | 145 | impl Default for EtcdTxnRequest { 146 | #[inline] 147 | fn default() -> Self { 148 | Self::new() 149 | } 150 | } 151 | 152 | impl From for TxnRequest { 153 | #[inline] 154 | fn from(e: EtcdTxnRequest) -> Self { 155 | e.proto 156 | } 157 | } 158 | 159 | /// Transaction Operation. 160 | pub enum TxnOp { 161 | /// Range fetching operation. 162 | Range(EtcdRangeRequest), 163 | /// Put operation. 164 | Put(EtcdPutRequest), 165 | /// Delete operation. 166 | Delete(EtcdDeleteRequest), 167 | /// Txn operation. 168 | Txn(EtcdTxnRequest), 169 | } 170 | 171 | impl From for RequestOp { 172 | fn from(e: TxnOp) -> Self { 173 | let mut request_op = Self::new(); 174 | match e { 175 | TxnOp::Range(req) => request_op.set_request_range(req.into()), 176 | TxnOp::Put(req) => request_op.set_request_put(req.into()), 177 | TxnOp::Delete(req) => request_op.set_request_delete_range(req.into()), 178 | TxnOp::Txn(req) => request_op.set_request_txn(req.into()), 179 | } 180 | request_op 181 | } 182 | } 183 | 184 | impl From for TxnOp { 185 | fn from(req: EtcdRangeRequest) -> Self { 186 | Self::Range(req) 187 | } 188 | } 189 | 190 | impl From for TxnOp { 191 | fn from(req: EtcdPutRequest) -> Self { 192 | Self::Put(req) 193 | } 194 | } 195 | 196 | impl From for TxnOp { 197 | fn from(req: EtcdDeleteRequest) -> Self { 198 | Self::Delete(req) 199 | } 200 | } 201 | 202 | impl From for TxnOp { 203 | fn from(req: EtcdTxnRequest) -> Self { 204 | Self::Txn(req) 205 | } 206 | } 207 | 208 | /// Transaction Comparation. 209 | #[non_exhaustive] 210 | #[derive(Clone, Copy)] 211 | pub enum TxnCmp { 212 | /// Equal comparation. 213 | Equal, 214 | /// NotEqual comparation. 215 | NotEqual, 216 | /// Greater comparation. 217 | Greater, 218 | /// Less comparation. 219 | Less, 220 | } 221 | 222 | impl From for Compare_CompareResult { 223 | #[inline] 224 | fn from(e: TxnCmp) -> Self { 225 | match e { 226 | TxnCmp::Equal => Self::EQUAL, 227 | TxnCmp::NotEqual => Self::NOT_EQUAL, 228 | TxnCmp::Greater => Self::GREATER, 229 | TxnCmp::Less => Self::LESS, 230 | } 231 | } 232 | } 233 | 234 | /// Response transaction operation. 235 | #[non_exhaustive] 236 | pub enum TxnOpResponse { 237 | /// Range reponse. 238 | Range(EtcdRangeResponse), 239 | /// Put reponse. 240 | Put(EtcdPutResponse), 241 | /// Delete response. 242 | Delete(EtcdDeleteResponse), 243 | /// Transaction response. 244 | Txn(EtcdTxnResponse), 245 | } 246 | 247 | impl From for TxnOpResponse { 248 | #[inline] 249 | fn from(mut resp: ResponseOp) -> Self { 250 | match resp 251 | .response 252 | .take() 253 | .unwrap_or_else(|| panic!("Fail to get TxnOpResponse")) 254 | { 255 | ResponseOp_oneof_response::response_range(r) => Self::Range(From::from(r)), 256 | ResponseOp_oneof_response::response_put(r) => Self::Put(From::from(r)), 257 | ResponseOp_oneof_response::response_txn(r) => Self::Txn(From::from(r)), 258 | ResponseOp_oneof_response::response_delete_range(r) => Self::Delete(From::from(r)), 259 | } 260 | } 261 | } 262 | 263 | /// Response for transaction. 264 | #[derive(Debug, Clone)] 265 | pub struct EtcdTxnResponse { 266 | /// Etcd transaction operations request. 267 | proto: TxnResponse, 268 | } 269 | 270 | impl EtcdTxnResponse { 271 | /// Takes the header out of response, leaving a `None` in its place. 272 | #[inline] 273 | pub fn take_header(&mut self) -> Option { 274 | self.proto.header.take().map(From::from) 275 | } 276 | 277 | /// Returns `true` if the compare evaluated to true, and `false` otherwise. 278 | #[inline] 279 | pub const fn is_success(&self) -> bool { 280 | self.proto.succeeded 281 | } 282 | 283 | /// Takes the responses corresponding to the results from applying the 284 | /// Success block if succeeded is true or the Failure if succeeded is false. 285 | #[inline] 286 | pub fn take_responses(&mut self) -> Vec { 287 | let responses = std::mem::take(&mut self.proto.responses); 288 | 289 | responses.into_iter().map(From::from).collect() 290 | } 291 | 292 | /// Takes the responses corresponding to the results from applying the 293 | /// Success block if succeeded is true or the Failure if succeeded is false. 294 | #[inline] 295 | pub fn get_responses(&self) -> Vec { 296 | self.proto 297 | .responses 298 | .clone() 299 | .into_iter() 300 | .map(From::from) 301 | .collect() 302 | } 303 | } 304 | 305 | impl From for EtcdTxnResponse { 306 | #[inline] 307 | fn from(resp: TxnResponse) -> Self { 308 | Self { proto: resp } 309 | } 310 | } 311 | -------------------------------------------------------------------------------- /etcd-client/src/lazy.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use std::ops::{Deref, DerefMut}; 3 | 4 | use async_trait::async_trait; 5 | use smol::lock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; 6 | 7 | use crate::Result; 8 | 9 | /// A lock over lazily-instantiated data. 10 | /// 11 | /// A `Lazy` holding a `T` is created from a closure returning a `T`. 12 | /// This closure is not run until the `.read()` method is called. 13 | pub struct Lazy { 14 | /// The (locked, optional) contents of this Lazy. Initially `None`. 15 | inner: RwLock>, 16 | /// A thunk that will instantiate a `T`. 17 | factory: Box T + Sync + Send>, 18 | } 19 | 20 | impl Lazy { 21 | /// Create a new `Lazy` from a closure. 22 | /// 23 | /// This closure will be called lazily on the first request to access to the 24 | /// `Lazy`'s contents. 25 | pub fn new T + Sync + Send>(factory: F) -> Self { 26 | Self { 27 | inner: RwLock::>::default(), 28 | factory: Box::new(factory), 29 | } 30 | } 31 | 32 | /// Acquire a read lock to the contents of this `Lazy`. 33 | /// 34 | /// If necessary, first instantiates these contents. 35 | pub async fn read(&self) -> LazyReadGuard<'_, T> { 36 | { 37 | let lock = self.inner.read().await; 38 | if lock.is_some() { 39 | return LazyReadGuard::new(lock); 40 | } 41 | } 42 | { 43 | let mut lock = self.inner.write().await; 44 | match *lock { 45 | None => { 46 | let value = (self.factory)(); 47 | lock.replace(value); 48 | } 49 | Some(_) => { 50 | // Someone beat us here! (They noticed that `self.inner` was 51 | // `None` at about the same time.) 52 | // That's fine; we'll use their value. 53 | } 54 | } 55 | } 56 | LazyReadGuard::new(self.inner.read().await) 57 | } 58 | 59 | /// Acquire a write lock to the contents of this `Lazy`. 60 | /// 61 | /// If necessary, first instantiates these contents. 62 | pub async fn write(&self) -> LazyWriteGuard<'_, T> { 63 | let mut lock = self.inner.write().await; 64 | if lock.is_none() { 65 | let value = (self.factory)(); 66 | lock.replace(value); 67 | } 68 | LazyWriteGuard::new(lock) 69 | } 70 | } 71 | 72 | /// Trait for values that can be asynchrously cleaned up. 73 | #[async_trait] 74 | pub trait Shutdown { 75 | /// Clean up `self`. 76 | /// 77 | /// Somewhat analogous to `Drop`, but with a few key differences: 78 | /// - `shutdown()` is async 79 | /// - doesn't happen automatically 80 | async fn shutdown(&mut self) -> Result<()>; 81 | } 82 | 83 | impl Lazy { 84 | /// evict 85 | pub async fn evict(&self) -> Result<()> { 86 | let mut lock = self.inner.write().await; 87 | if let Some(value) = lock.as_mut() { 88 | value.shutdown().await?; 89 | } 90 | lock.take(); 91 | Ok(()) 92 | } 93 | } 94 | 95 | /// The result of `Lazy.read()`: holds a read lock over `T` and derefs to `T`. 96 | /// 97 | /// Much like a `tokio::sync::RwLockReadGuard` (which this type wraps), a 98 | /// `LazyReadGuard` will release a lock permit on `Drop`. 99 | pub struct LazyReadGuard<'a, T> { 100 | /// inner 101 | inner: RwLockReadGuard<'a, Option>, 102 | } 103 | 104 | impl<'a, T> LazyReadGuard<'a, T> { 105 | /// Create a new `LazyReadGuard<'_, T>` from the 106 | /// `tokio::sync::RwLockReadGuard<'_, Option>` where the `Option` is 107 | /// known to be `Some`. 108 | fn new(inner: RwLockReadGuard<'a, Option>) -> Self { 109 | assert!( 110 | inner.is_some(), 111 | "Should only instantiate LazyReadGuard with RwLockReadGuard over Some(_)." 112 | ); 113 | LazyReadGuard { inner } 114 | } 115 | } 116 | 117 | impl Deref for LazyReadGuard<'_, T> { 118 | type Target = T; 119 | 120 | fn deref(&self) -> &Self::Target { 121 | self.inner 122 | .as_ref() 123 | .unwrap_or_else(|| panic!("Impossible case, Lazy inner should not be None")) 124 | } 125 | } 126 | 127 | /// The result of `Lazy.write()`: holds a write lock over `T` and derefs to `T`. 128 | /// 129 | /// Much like a `tokio::sync::RwLockWriteGuard` (which this type wraps), a 130 | /// `LazyWriteGuard` will release a lock permit on `Drop`. 131 | pub struct LazyWriteGuard<'a, T> { 132 | /// inner 133 | inner: RwLockWriteGuard<'a, Option>, 134 | } 135 | 136 | impl<'a, T> LazyWriteGuard<'a, T> { 137 | /// Create a new `LazyWriteGuard<'_, T>` from the 138 | /// `tokio::sync::RwLockWriteGuard<'_, Option>` where the `Option` is 139 | /// known to be `Some`. 140 | fn new(inner: RwLockWriteGuard<'a, Option>) -> Self { 141 | assert!( 142 | inner.is_some(), 143 | "Should only instantiate LazyWriteGuard with RwLockWriteGuard over Some(_)." 144 | ); 145 | LazyWriteGuard { inner } 146 | } 147 | } 148 | 149 | impl Deref for LazyWriteGuard<'_, T> { 150 | type Target = T; 151 | 152 | fn deref(&self) -> &Self::Target { 153 | self.inner 154 | .as_ref() 155 | .unwrap_or_else(|| panic!("Fail to deref on LazyWriteGuard")) 156 | } 157 | } 158 | 159 | impl DerefMut for LazyWriteGuard<'_, T> { 160 | fn deref_mut(&mut self) -> &mut Self::Target { 161 | self.inner 162 | .as_mut() 163 | .unwrap_or_else(|| panic!("Fail to deref_mut on LazyWriteGuard")) 164 | } 165 | } 166 | 167 | #[cfg(test)] 168 | mod tests { 169 | use super::*; 170 | use std::ptr; 171 | use std::sync::atomic::{AtomicUsize, Ordering}; 172 | use std::sync::Arc; 173 | 174 | /// Ordering for atomic loads/stores. 175 | /// 176 | /// It's arbitrary, as we don't run >1 task at a time in these tests. 177 | const ORDER: Ordering = Ordering::SeqCst; 178 | 179 | /// Test getting a read lock from a `Lazy`. 180 | #[test] 181 | fn test_lock_read() { 182 | smol::block_on(async { 183 | let calls = Arc::new(AtomicUsize::default()); 184 | 185 | let lazy = { 186 | let calls = Arc::clone(&calls); 187 | Lazy::new(move || { 188 | calls.fetch_add(1, ORDER); 189 | true 190 | }) 191 | }; 192 | assert_eq!(calls.load(ORDER), 0, "Expected thunk not called."); 193 | 194 | let lock = lazy.read().await; 195 | assert!(*lock, "Expected read() == thunk()."); 196 | assert_eq!(calls.load(ORDER), 1, "Expected thunk called *once*."); 197 | 198 | // Should be able to acquire many read permits at once 199 | let lock2 = lazy.read().await; 200 | assert!( 201 | ptr::eq(&(*lock), &(*lock2)), 202 | "Expected read() to give *same reference*." 203 | ); 204 | assert_eq!(calls.load(ORDER), 1, "Expected thunk *still* called once."); 205 | }); 206 | } 207 | 208 | /// Test getting a write lock from a `Lazy`. 209 | #[test] 210 | fn test_lock_write() { 211 | smol::block_on(async { 212 | let calls = Arc::new(AtomicUsize::default()); 213 | 214 | let lazy = { 215 | let calls = Arc::clone(&calls); 216 | Lazy::new(move || { 217 | calls.fetch_add(1, ORDER); 218 | true 219 | }) 220 | }; 221 | assert_eq!(calls.load(ORDER), 0, "Expected thunk not called."); 222 | 223 | { 224 | // need to let the write lock go out of scope before we can read 225 | let mut lock = lazy.write().await; 226 | assert!(*lock, "Expected write() == thunk()."); 227 | assert_eq!(calls.load(ORDER), 1, "Expected thunk called *once*."); 228 | 229 | *lock = false; 230 | } 231 | 232 | let lock = lazy.read().await; 233 | assert_eq!(calls.load(ORDER), 1, "Expected thunk *still* called once."); 234 | assert!(!(*lock), "Expected read() to have been changed."); 235 | }); 236 | } 237 | 238 | #[test] 239 | fn test_lock_evict() { 240 | struct Test { 241 | shutdown: Arc, 242 | } 243 | 244 | #[async_trait] 245 | impl Shutdown for Test { 246 | async fn shutdown(&mut self) -> Result<()> { 247 | self.shutdown.fetch_add(1, ORDER); 248 | Ok(()) 249 | } 250 | } 251 | 252 | smol::block_on(async { 253 | let init_calls = Arc::new(AtomicUsize::default()); 254 | let shutdown_calls = Arc::new(AtomicUsize::default()); 255 | 256 | let lazy = { 257 | let shutdown_calls = Arc::clone(&shutdown_calls); 258 | let init_calls = Arc::clone(&init_calls); 259 | Lazy::new(move || { 260 | init_calls.fetch_add(1, ORDER); 261 | Test { 262 | shutdown: Arc::clone(&shutdown_calls), 263 | } 264 | }) 265 | }; 266 | assert_eq!(init_calls.load(ORDER), 0, "Expected init not called."); 267 | assert_eq!( 268 | shutdown_calls.load(ORDER), 269 | 0, 270 | "Expected shutdown not called." 271 | ); 272 | 273 | { 274 | let lock = lazy.read().await; 275 | let _: &Test = &*lock; 276 | assert_eq!(init_calls.load(ORDER), 1, "Expected init called once."); 277 | assert_eq!( 278 | shutdown_calls.load(ORDER), 279 | 0, 280 | "Expected shutdown not called." 281 | ); 282 | } 283 | 284 | lazy.evict() 285 | .await 286 | .unwrap_or_else(|e| panic!("eviction should not fail, the error is {}", e)); 287 | 288 | { 289 | let lock = lazy.read().await; 290 | let _: &Test = &*lock; 291 | assert_eq!(init_calls.load(ORDER), 2, "Expected init called twice."); 292 | assert_eq!( 293 | shutdown_calls.load(ORDER), 294 | 1, 295 | "Expected shutdown called once." 296 | ); 297 | } 298 | 299 | // Two evictions in a row 300 | lazy.evict() 301 | .await 302 | .unwrap_or_else(|e| panic!("eviction should fail, the error is {}", e)); 303 | lazy.evict() 304 | .await 305 | .unwrap_or_else(|e| panic!("eviction should not fail, the error is {}", e)); // should be a no-op 306 | assert_eq!( 307 | init_calls.load(ORDER), 308 | 2, 309 | "Expected init *still* called twice." 310 | ); 311 | }); 312 | } 313 | } 314 | -------------------------------------------------------------------------------- /etcd-client/src/lease/grant.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use crate::protos::rpc::{LeaseGrantRequest, LeaseGrantResponse}; 4 | use crate::ResponseHeader; 5 | use clippy_utilities::Cast; 6 | 7 | /// Request for granting lease. 8 | #[derive(Debug, Clone)] 9 | pub struct EtcdLeaseGrantRequest { 10 | /// Etcd lease grant request. 11 | proto: LeaseGrantRequest, 12 | } 13 | 14 | impl EtcdLeaseGrantRequest { 15 | /// Creates a new `LeaseGrantRequest` with the specified TTL. 16 | #[inline] 17 | #[must_use] 18 | pub fn new(ttl: Duration) -> Self { 19 | let proto = LeaseGrantRequest { 20 | TTL: ttl.as_secs().cast(), 21 | ID: 0, 22 | ..LeaseGrantRequest::default() 23 | }; 24 | 25 | Self { proto } 26 | } 27 | 28 | /// Set custom lease ID. 29 | #[inline] 30 | pub fn set_id(&mut self, id: u64) { 31 | self.proto.ID = id.cast(); 32 | } 33 | } 34 | 35 | impl From for LeaseGrantRequest { 36 | #[inline] 37 | fn from(e: EtcdLeaseGrantRequest) -> Self { 38 | e.proto 39 | } 40 | } 41 | 42 | #[derive(Debug)] 43 | /// `LeaseGrant` Response 44 | pub struct EtcdLeaseGrantResponse { 45 | /// Etcd lease grant response. 46 | proto: LeaseGrantResponse, 47 | } 48 | 49 | impl EtcdLeaseGrantResponse { 50 | /// Takes the header out of response, leaving a `None` in its place. 51 | #[inline] 52 | pub fn take_header(&mut self) -> Option { 53 | self.proto.header.take().map(From::from) 54 | } 55 | 56 | /// Gets the lease ID for the granted lease. 57 | #[inline] 58 | pub fn id(&self) -> u64 { 59 | self.proto.ID.cast() 60 | } 61 | 62 | /// Gets the server chosen lease time-to-live in seconds. 63 | #[inline] 64 | pub fn ttl(&self) -> u64 { 65 | self.proto.TTL.cast() 66 | } 67 | } 68 | 69 | impl From for EtcdLeaseGrantResponse { 70 | #[inline] 71 | fn from(resp: LeaseGrantResponse) -> Self { 72 | Self { proto: resp } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /etcd-client/src/lease/keep_alive.rs: -------------------------------------------------------------------------------- 1 | use crate::protos::rpc::{LeaseKeepAliveRequest, LeaseKeepAliveResponse}; 2 | use crate::ResponseHeader; 3 | use clippy_utilities::Cast; 4 | 5 | /// Request for refreshing lease. 6 | #[derive(Debug)] 7 | pub struct EtcdLeaseKeepAliveRequest { 8 | /// Etcd lease keep alive request. 9 | proto: LeaseKeepAliveRequest, 10 | } 11 | 12 | impl EtcdLeaseKeepAliveRequest { 13 | /// Creates a new `LeaseKeepAliveRequest` which will refresh the specified lease. 14 | #[inline] 15 | #[must_use] 16 | pub fn new(id: u64) -> Self { 17 | let proto = LeaseKeepAliveRequest { 18 | ID: id.cast(), 19 | ..LeaseKeepAliveRequest::default() 20 | }; 21 | 22 | Self { proto } 23 | } 24 | } 25 | 26 | impl From for LeaseKeepAliveRequest { 27 | #[inline] 28 | fn from(e: EtcdLeaseKeepAliveRequest) -> Self { 29 | e.proto 30 | } 31 | } 32 | 33 | #[derive(Debug)] 34 | /// Response for refreshing lease. 35 | pub struct EtcdLeaseKeepAliveResponse { 36 | /// Etcd lease keep alive reponse. 37 | proto: LeaseKeepAliveResponse, 38 | } 39 | 40 | impl EtcdLeaseKeepAliveResponse { 41 | /// Takes the header out of response, leaving a `None` in its place. 42 | #[inline] 43 | pub fn take_header(&mut self) -> Option { 44 | self.proto.header.take().map(From::from) 45 | } 46 | 47 | /// Gets the lease ID for the refreshed lease. 48 | #[inline] 49 | pub fn id(&self) -> u64 { 50 | self.proto.ID.cast() 51 | } 52 | 53 | /// Get the new TTL for the lease. 54 | #[inline] 55 | pub fn ttl(&self) -> u64 { 56 | self.proto.TTL.cast() 57 | } 58 | } 59 | 60 | impl From for EtcdLeaseKeepAliveResponse { 61 | #[inline] 62 | fn from(resp: LeaseKeepAliveResponse) -> Self { 63 | Self { proto: resp } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /etcd-client/src/lease/mod.rs: -------------------------------------------------------------------------------- 1 | //! Leases are a mechanism for detecting client liveness. 2 | //! The cluster grants leases with a time-to-live. 3 | //! A lease expires if the etcd cluster does not receive a keepAlive within a given TTL period. 4 | //! 5 | //! # Examples 6 | //! 7 | //! Grant lease and keep lease alive 8 | //! 9 | //! ```no_run 10 | //! use std::time::Duration; 11 | //! 12 | //! use etcd_client::*; 13 | //! 14 | //! fn main() -> Result<()> { 15 | //! smol::block_on(async { 16 | //! let config = ClientConfig::new(vec!["http://127.0.0.1:2379".to_owned()], None, 32, true); 17 | //! let client = Client::connect(config).await?; 18 | //! 19 | //! let key = "foo"; 20 | //! 21 | //! // grant lease 22 | //! let lease = client 23 | //! .lease() 24 | //! .grant(EtcdLeaseGrantRequest::new(Duration::from_secs(3))) 25 | //! .await?; 26 | //! 27 | //! let lease_id = lease.id(); 28 | //! 29 | //! // set key with lease 30 | //! client 31 | //! .kv() 32 | //! .put({ 33 | //! let mut req = EtcdPutRequest::new(key, "bar"); 34 | //! req.set_lease(lease_id); 35 | //! 36 | //! req 37 | //! }) 38 | //! .await?; 39 | //! 40 | //! { 41 | //! client 42 | //! .lease() 43 | //! .keep_alive(EtcdLeaseKeepAliveRequest::new(lease_id)) 44 | //! .await; 45 | //! } 46 | //! 47 | //! // not necessary, but will cleanly shut down the long-running tasks 48 | //! // spawned by the client 49 | //! client.shutdown().await; 50 | //! 51 | //! Ok(()) 52 | //! }) 53 | //! } 54 | //! ``` 55 | 56 | use std::sync::Arc; 57 | 58 | use async_trait::async_trait; 59 | use futures::future::FutureExt; 60 | use futures::stream::StreamExt; 61 | 62 | use futures::prelude::*; 63 | 64 | use smol::channel::{unbounded, Receiver, Sender}; 65 | use smol::stream::Stream; 66 | 67 | pub use grant::{EtcdLeaseGrantRequest, EtcdLeaseGrantResponse}; 68 | pub use keep_alive::{EtcdLeaseKeepAliveRequest, EtcdLeaseKeepAliveResponse}; 69 | pub use revoke::{EtcdLeaseRevokeRequest, EtcdLeaseRevokeResponse}; 70 | 71 | use crate::lazy::{Lazy, Shutdown}; 72 | use crate::protos::rpc; 73 | use crate::protos::rpc_grpc::LeaseClient; 74 | use crate::retryable; 75 | use crate::Result; 76 | use crate::CURRENT_INTERVAL_ENV_KEY; 77 | use crate::CURRENT_INTERVAL_VALUE; 78 | use crate::INITIAL_INTERVAL_ENV_KEY; 79 | use crate::INITIAL_INTERVAL_VALUE; 80 | use crate::MAX_ELAPSED_TIME_ENV_KEY; 81 | use crate::MAX_ELAPSED_TIME_VALUE; 82 | use backoff::ExponentialBackoff; 83 | use std::time::Duration; 84 | 85 | use grpcio::WriteFlags; 86 | 87 | /// Grant mod for granting lease operations. 88 | mod grant; 89 | /// Keep alive mod for keeping lease operations. 90 | mod keep_alive; 91 | /// Revoke mod for revoking lease operations. 92 | mod revoke; 93 | 94 | /// `LeaseKeepAliveTunnel` is a reusable connection for `Lease Keep Alive` operation. 95 | /// The underlying `gRPC` method is Bi-directional streaming. 96 | struct LeaseKeepAliveTunnel { 97 | /// A channel sender to send keep alive request. 98 | req_sender: Sender, 99 | /// A channel receiver to receive keep alive response. 100 | resp_receiver: Option>>, 101 | /// A channel sender to send shutdown request. 102 | shutdown: Option>, 103 | } 104 | 105 | impl LeaseKeepAliveTunnel { 106 | /// Creates a new `LeaseClient`. 107 | fn new(client: &LeaseClient) -> Self { 108 | let (req_sender, req_receiver) = unbounded::(); 109 | let (resp_sender, resp_receiver) = unbounded::>(); 110 | 111 | let (shutdown_tx, shutdown_rx) = unbounded(); 112 | let shutdown_reponse = shutdown_rx.clone(); 113 | // Monitor inbound lease response and transfer to the receiver 114 | let (mut client_req_sender, mut client_resp_receiver) = client 115 | .lease_keep_alive() 116 | .unwrap_or_else(|e| panic!("Fail to lease_keep_alive, the error is: {}", e)); 117 | smol::spawn(async move { 118 | let mut shutdown_rx = shutdown_rx.into_future().fuse(); 119 | #[allow(clippy::mut_mut)] 120 | while let Ok(req) = req_receiver.recv().await { 121 | let lease_keep_alive_request: rpc::LeaseKeepAliveRequest = req.into(); 122 | 123 | futures::select! { 124 | res = client_req_sender.send( 125 | (lease_keep_alive_request, WriteFlags::default()) 126 | ).fuse() => res.unwrap_or_else( 127 | |e| panic!("Fail to send request, the error is {}", e) 128 | ), 129 | _ = shutdown_rx => return 130 | }; 131 | } 132 | }) 133 | .detach(); 134 | 135 | smol::spawn(async move { 136 | let mut shutdown_rx = shutdown_reponse.into_future().fuse(); 137 | loop { 138 | #[allow(clippy::mut_mut)] 139 | let resp = futures::select! { 140 | resp_opt = client_resp_receiver.next().fuse() => if let Some(resp)=resp_opt { 141 | resp 142 | } else { 143 | return; 144 | }, 145 | _ = shutdown_rx => return 146 | }; 147 | match resp { 148 | Ok(resp) => { 149 | resp_sender 150 | .send(Ok(From::from(resp))) 151 | .await 152 | .unwrap_or_else(|e| { 153 | panic!("failed to send response, the error is: {}", e) 154 | }); 155 | } 156 | Err(e) => { 157 | resp_sender 158 | .send(Err(From::from(e))) 159 | .await 160 | .unwrap_or_else(|e| { 161 | panic!("failed to send response, the error is: {}", e) 162 | }); 163 | } 164 | }; 165 | } 166 | }) 167 | .detach(); 168 | 169 | Self { 170 | req_sender, 171 | resp_receiver: Some(resp_receiver), 172 | shutdown: Some(shutdown_tx), 173 | } 174 | } 175 | 176 | /// Takes `resp_receiver` 177 | fn take_resp_receiver(&mut self) -> Receiver> { 178 | self.resp_receiver 179 | .take() 180 | .unwrap_or_else(|| panic!("failed fetch LeaseKeepAliveResponse")) 181 | } 182 | } 183 | 184 | #[async_trait] 185 | impl Shutdown for LeaseKeepAliveTunnel { 186 | async fn shutdown(&mut self) -> Result<()> { 187 | if let Some(shutdown) = self.shutdown.take() { 188 | shutdown.send(()).await?; 189 | } 190 | Ok(()) 191 | } 192 | } 193 | 194 | /// Lease client. 195 | #[derive(Clone)] 196 | pub struct Lease { 197 | /// Etcd lease client provides lease related operations. 198 | client: LeaseClient, 199 | /// A tunnel used to communicate with Etcd server to keep lease alive. 200 | keep_alive_tunnel: Arc>, 201 | } 202 | 203 | impl Lease { 204 | /// Creates a new `LeaseClient`. 205 | pub(crate) fn new(client: LeaseClient) -> Self { 206 | let keep_alive_tunnel = { 207 | let client = client.clone(); 208 | Arc::new(Lazy::new(move || { 209 | LeaseKeepAliveTunnel::new(&client.clone()) 210 | })) 211 | }; 212 | Self { 213 | client, 214 | keep_alive_tunnel, 215 | } 216 | } 217 | 218 | /// Performs a lease granting operation. 219 | /// # Errors 220 | /// 221 | /// Will return `Err` if tunnel is shut down. 222 | #[inline] 223 | pub async fn grant(&mut self, req: EtcdLeaseGrantRequest) -> Result { 224 | let resp = retryable!(|| async { 225 | let resp = self.client.lease_grant_async(&req.clone().into())?; 226 | Ok(From::from(resp.await?)) 227 | }); 228 | Ok(resp) 229 | } 230 | 231 | /// Performs a lease revoking operation. 232 | /// # Errors 233 | /// 234 | /// Will return `Err` if tunnel is shut down. 235 | #[inline] 236 | pub async fn revoke(&mut self, req: EtcdLeaseRevokeRequest) -> Result { 237 | let resp = retryable!(|| async { 238 | let resp = self.client.lease_revoke_async(&req.clone().into())?; 239 | Ok(From::from(resp.await?)) 240 | }); 241 | Ok(resp) 242 | } 243 | 244 | /// Fetches keep alive response stream. 245 | #[inline] 246 | pub async fn keep_alive_responses( 247 | &mut self, 248 | ) -> impl Stream> { 249 | self.keep_alive_tunnel.write().await.take_resp_receiver() 250 | } 251 | 252 | /// Performs a lease refreshing operation. 253 | /// # Errors 254 | /// 255 | /// Will return `Err` if tunnel is shut down. 256 | #[inline] 257 | pub async fn keep_alive(&mut self, req: EtcdLeaseKeepAliveRequest) -> Result<()> { 258 | self.keep_alive_tunnel 259 | .write() 260 | .await 261 | .req_sender 262 | .send(req) 263 | .await?; 264 | Ok(()) 265 | } 266 | 267 | /// Shut down the running lease task, if any. 268 | /// 269 | /// # Errors 270 | /// 271 | /// Will return `Err` if tunnel is shut down. 272 | #[inline] 273 | pub async fn shutdown(&mut self) -> Result<()> { 274 | // If we implemented `Shutdown` for this, callers would need it in scope in 275 | // order to call this method. 276 | self.keep_alive_tunnel.evict().await 277 | } 278 | } 279 | -------------------------------------------------------------------------------- /etcd-client/src/lease/revoke.rs: -------------------------------------------------------------------------------- 1 | use crate::protos::rpc::{LeaseRevokeRequest, LeaseRevokeResponse}; 2 | use crate::ResponseHeader; 3 | use clippy_utilities::Cast; 4 | 5 | /// Request for revoking lease. 6 | #[derive(Debug, Clone)] 7 | pub struct EtcdLeaseRevokeRequest { 8 | /// Etcd lease revoke request. 9 | proto: LeaseRevokeRequest, 10 | } 11 | 12 | impl EtcdLeaseRevokeRequest { 13 | /// Creates a new `LeaseRevokeRequest` which will revoke the specified lease. 14 | #[inline] 15 | #[must_use] 16 | pub fn new(id: u64) -> Self { 17 | let proto = LeaseRevokeRequest { 18 | ID: id.cast(), 19 | ..LeaseRevokeRequest::default() 20 | }; 21 | 22 | Self { proto } 23 | } 24 | } 25 | 26 | impl From for LeaseRevokeRequest { 27 | #[inline] 28 | fn from(e: EtcdLeaseRevokeRequest) -> Self { 29 | e.proto 30 | } 31 | } 32 | 33 | /// Response for revoking lease. 34 | #[derive(Debug)] 35 | pub struct EtcdLeaseRevokeResponse { 36 | /// Etcd lease revoke response. 37 | proto: LeaseRevokeResponse, 38 | } 39 | 40 | impl EtcdLeaseRevokeResponse { 41 | /// Takes the header out of response, leaving a `None` in its place. 42 | #[inline] 43 | pub fn take_header(&mut self) -> Option { 44 | self.proto.header.take().map(From::from) 45 | } 46 | } 47 | 48 | impl From for EtcdLeaseRevokeResponse { 49 | #[inline] 50 | fn from(resp: LeaseRevokeResponse) -> Self { 51 | Self { proto: resp } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /etcd-client/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! An asynchronously etcd client for Rust. 2 | //! 3 | //! etcd-client supports etcd v3 API and async/await syntax. 4 | //! 5 | //! # Examples 6 | //! 7 | //! A simple key-value read and write operation: 8 | //! 9 | //! ```no_run 10 | //! use etcd_client::*; 11 | //! 12 | //! fn main() -> Result<()> { 13 | //! smol::block_on(async { 14 | //! let config = 15 | //! ClientConfig::new(vec!["http://127.0.0.1:2379".to_owned()], None, 32, true); 16 | //! let client = Client::connect(config).await?; 17 | //! 18 | //! // print out all received watch responses 19 | //! let mut inbound = client.watch(KeyRange::key("foo")).await.unwrap(); 20 | //! smol::spawn(async move { 21 | //! while let Ok(resp) = inbound.recv().await { 22 | //! println!("watch response: {:?}", resp); 23 | //! } 24 | //! }) 25 | //! .detach(); 26 | //! 27 | //! let key = "foo"; 28 | //! client.kv().put(EtcdPutRequest::new(key, "bar")).await?; 29 | //! client.kv().put(EtcdPutRequest::new(key, "baz")).await?; 30 | //! client 31 | //! .kv() 32 | //! .delete(EtcdDeleteRequest::new(KeyRange::key(key))) 33 | //! .await?; 34 | //! 35 | //! // not necessary, but will cleanly shut down the long-running tasks 36 | //! // spawned by the client 37 | //! client.shutdown().await.unwrap(); 38 | //! 39 | //! Ok(()) 40 | //! }) 41 | //! } 42 | //! ``` 43 | 44 | #![deny( 45 | // The following are allowed by default lints according to 46 | // https://doc.rust-lang.org/rustc/lints/listing/allowed-by-default.html 47 | anonymous_parameters, 48 | bare_trait_objects, 49 | // box_pointers, 50 | // elided_lifetimes_in_paths, // allow anonymous lifetime 51 | missing_copy_implementations, 52 | // missing_debug_implementations, 53 | missing_docs, // TODO: add documents 54 | single_use_lifetimes, // TODO: fix lifetime names only used once 55 | trivial_casts, // TODO: remove trivial casts in code 56 | trivial_numeric_casts, 57 | // unreachable_pub, allow clippy::redundant_pub_crate lint instead 58 | unsafe_code, 59 | unstable_features, 60 | unused_extern_crates, 61 | unused_import_braces, 62 | unused_qualifications, 63 | // unused_results, 64 | variant_size_differences, 65 | 66 | warnings, // treat all wanings as errors 67 | 68 | clippy::all, 69 | clippy::restriction, 70 | clippy::pedantic, 71 | clippy::nursery, 72 | clippy::cargo 73 | )] 74 | #![allow( 75 | // Some explicitly allowed Clippy lints, must have clear reason to allow 76 | clippy::blanket_clippy_restriction_lints, // allow denying clippy::restriction directly 77 | clippy::implicit_return, // actually omitting the return keyword is idiomatic Rust code 78 | clippy::module_name_repetitions, // repeation of module name in a struct name is not big deal 79 | clippy::multiple_crate_versions, // multi-version dependency crates is not able to fix 80 | clippy::panic, // allow debug_assert, panic in production code 81 | clippy::shadow_same, // shadow a common pattern in Rust code 82 | clippy::shadow_unrelated, 83 | clippy::shadow_reuse, 84 | clippy::same_name_method, // generated proto has same name func on trait and struct 85 | clippy::separated_literal_suffix, // conflict with unsepatated 86 | clippy::mod_module_files, // conflict with self_named_module_files 87 | )] 88 | 89 | pub use auth::{Auth, EtcdAuthenticateRequest, EtcdAuthenticateResponse}; 90 | pub use client::{Client, ClientConfig}; 91 | pub use clippy_utilities::OverflowArithmetic; 92 | pub use error::EtcdError; 93 | pub use kv::{ 94 | EtcdDeleteRequest, EtcdDeleteResponse, EtcdGetRequest, EtcdGetResponse, EtcdKeyValue, 95 | EtcdPutRequest, EtcdPutResponse, EtcdRangeRequest, EtcdRangeResponse, EtcdTxnRequest, 96 | EtcdTxnResponse, KeyRange, Kv, TxnCmp, TxnOpResponse, 97 | }; 98 | pub use lease::{ 99 | EtcdLeaseGrantRequest, EtcdLeaseGrantResponse, EtcdLeaseKeepAliveRequest, 100 | EtcdLeaseKeepAliveResponse, EtcdLeaseRevokeRequest, EtcdLeaseRevokeResponse, Lease, 101 | }; 102 | pub use lock::Lock; 103 | pub use lock::{EtcdLockRequest, EtcdLockResponse, EtcdUnlockRequest, EtcdUnlockResponse}; 104 | pub use response_header::ResponseHeader; 105 | pub use watch::{EtcdWatchRequest, EtcdWatchResponse, Event, EventType, Watch}; 106 | 107 | use backoff::{future::Sleeper, Notify}; 108 | use std::{future::Future, pin::Pin, time::Duration}; 109 | 110 | /// Auth mod for authentication operations. 111 | mod auth; 112 | /// Client mod for Etcd client operations. 113 | mod client; 114 | /// Error mod for Etcd client error. 115 | mod error; 116 | /// Kv mod for key-value pairs operations. 117 | mod kv; 118 | /// Lazy mod for Etcd client lazy operations. 119 | mod lazy; 120 | /// Lease mod for lease operations. 121 | mod lease; 122 | /// Lock mod for lock operations. 123 | mod lock; 124 | /// Etcd client request and response protos 125 | mod protos; 126 | /// Etcd API response header 127 | mod response_header; 128 | /// Watch mod for watch operations. 129 | mod watch; 130 | 131 | /// Result with error information 132 | pub type Result = std::result::Result; 133 | 134 | /// The default value for current interval value 135 | pub const CURRENT_INTERVAL_VALUE: u64 = 1; 136 | /// The key of the default value for current interval environment variable 137 | pub const CURRENT_INTERVAL_ENV_KEY: &str = "CURRENT_INTERVAL"; 138 | /// The default value for current initial value 139 | pub const INITIAL_INTERVAL_VALUE: u64 = 1; 140 | /// The key of the default value for initial interval environment variable 141 | pub const INITIAL_INTERVAL_ENV_KEY: &str = "INITIAL_INTERVAL"; 142 | /// The default value for max elapsed value 143 | pub const MAX_ELAPSED_TIME_VALUE: u64 = 10; 144 | /// The key of the default value for max elapsed environment variable 145 | pub const MAX_ELAPSED_TIME_ENV_KEY: &str = "MAX_ELAPSED_TIME"; 146 | 147 | /// A retry macro to immediately attempt a function call after failure 148 | #[macro_export] 149 | macro_rules! retryable { 150 | ($args:expr) => { 151 | backoff::future::Retry::new( 152 | crate::SmolSleeper, 153 | ExponentialBackoff { 154 | current_interval: Duration::from_secs( 155 | match std::env::var(CURRENT_INTERVAL_ENV_KEY) { 156 | Ok(val) => val.parse().unwrap(), 157 | Err(_) => CURRENT_INTERVAL_VALUE, 158 | }, 159 | ), 160 | initial_interval: Duration::from_secs( 161 | match std::env::var(INITIAL_INTERVAL_ENV_KEY) { 162 | Ok(val) => val.parse().unwrap(), 163 | Err(_) => INITIAL_INTERVAL_VALUE, 164 | }, 165 | ), 166 | max_elapsed_time: Some(Duration::from_secs( 167 | match std::env::var(MAX_ELAPSED_TIME_ENV_KEY) { 168 | Ok(val) => val.parse().unwrap(), 169 | Err(_) => MAX_ELAPSED_TIME_VALUE, 170 | }, 171 | )), 172 | ..ExponentialBackoff::default() 173 | }, 174 | crate::NoopNotify, 175 | $args, 176 | ) 177 | .await? 178 | }; 179 | } 180 | 181 | /// The notifier does nothing 182 | #[non_exhaustive] 183 | #[derive(Debug, Clone, Copy)] 184 | pub struct NoopNotify; 185 | 186 | impl Notify for NoopNotify { 187 | #[inline] 188 | fn notify(&mut self, _: E, _: Duration) {} 189 | } 190 | 191 | /// The smol sleeper wrapper 192 | struct SmolSleeper; 193 | 194 | impl Sleeper for SmolSleeper { 195 | type Sleep = Pin + Send + 'static>>; 196 | fn sleep(&self, dur: Duration) -> Self::Sleep { 197 | Box::pin(sleep(dur)) 198 | } 199 | } 200 | 201 | /// A wrapper for smol sleep 202 | async fn sleep(d: Duration) { 203 | smol::Timer::after(d).await; 204 | } 205 | 206 | #[allow( 207 | clippy::unwrap_used, 208 | clippy::indexing_slicing, 209 | clippy::too_many_lines, 210 | dead_code 211 | )] 212 | #[cfg(test)] 213 | mod tests { 214 | use super::*; 215 | use async_compat::Compat; 216 | use clippy_utilities::Cast; 217 | use std::collections::HashMap; 218 | use std::env::set_var; 219 | use std::time::Duration; 220 | use std::time::SystemTime; 221 | 222 | const DEFAULT_ETCD_ENDPOINT1_FOR_TEST: &str = "127.0.0.1:2379"; 223 | // Should not connect 2380 port, which will cause lock operation error. 224 | //const DEFAULT_ETCD_ENDPOINT2_FOR_TEST: &str = "127.0.0.1:2380"; 225 | 226 | #[test] 227 | /// Here we used a blocking and sequential structure to run the tests 228 | /// because using separate test functions would result in parallel 229 | /// execution of different tests when we run the global test. 230 | /// However, some etcd operations conflict with each other, 231 | /// such as deleting all keys. 232 | /// Even when using prefixes to differentiate the scope of 233 | /// different test operations, the operation that deletes all keys still 234 | /// has a global impact and would read the results of other tests. 235 | fn test_all() -> Result<()> { 236 | set_var("RUST_LOG", "debug"); 237 | 238 | env_logger::try_init().unwrap_or_else(|e| { 239 | log::debug!("env_logger try init failed, err:{}", e); 240 | }); 241 | 242 | smol::block_on(Compat::new(async { 243 | { 244 | let client = build_etcd_client().await?; 245 | client 246 | .kv() 247 | .delete(EtcdDeleteRequest::new(KeyRange::all())) 248 | .await?; 249 | } 250 | test_kv().await?; 251 | test_transaction().await?; 252 | test_watch("test_all").await?; 253 | test_lock().await?; 254 | 255 | Ok(()) 256 | })) 257 | } 258 | 259 | async fn test_watch(key_prefix: &str) -> Result<()> { 260 | /// For one task to watch put and deletion of a key to check is it support multi watchers 261 | async fn watch_one(watch_key: &str, client: Client) { 262 | let mut watch = client 263 | .watch(KeyRange::key(watch_key)) 264 | .await 265 | .unwrap_or_else(|e| panic!("watch failed, err:{}", e)); 266 | 267 | { 268 | let mut resp = watch 269 | .recv() 270 | .await 271 | .unwrap_or_else(|e| panic!("failed to get watch, err:{}", e)); 272 | let mut resp_events = resp.take_events(); 273 | assert_eq!(resp_events.len(), 1, "There should be one event"); 274 | assert_eq!( 275 | resp_events[0].event_type(), 276 | EventType::Put, 277 | "The event should be put" 278 | ); 279 | let kvs = resp_events[0] 280 | .take_kvs() 281 | .unwrap_or_else(|| panic!("There should be kv")); 282 | assert_eq!(kvs.key_str(), watch_key, "The key should be watched key"); 283 | assert_eq!(kvs.value_str(), "baz3", "The value should be baz3"); 284 | } 285 | { 286 | let mut resp = watch 287 | .recv() 288 | .await 289 | .unwrap_or_else(|e| panic!("Failed to get watch, err:{}", e)); 290 | let mut resp_events = resp.take_events(); 291 | assert_eq!(resp_events.len(), 1, "There should be one event"); 292 | assert_eq!( 293 | resp_events[0].event_type(), 294 | EventType::Delete, 295 | "The event should be delete" 296 | ); 297 | let kvs = &mut resp_events[0] 298 | .take_kvs() 299 | .unwrap_or_else(|| panic!("should have kv")); 300 | assert_eq!(kvs.key_str(), watch_key, "The key should be watched key"); 301 | } 302 | } 303 | 304 | let client = build_etcd_client().await?; 305 | let key1 = format!("{}41_foo1", key_prefix); 306 | let key2 = format!("{}42_foo1", key_prefix); 307 | 308 | client 309 | .kv() 310 | .put(EtcdPutRequest::new(key1.as_str(), "baz1")) 311 | .await?; 312 | client 313 | .kv() 314 | .put(EtcdPutRequest::new(key2.as_str(), "baz2")) 315 | .await?; 316 | 317 | // Spawn an async task to do put operation and delete operation, 318 | // which should be watched by the watch task. 319 | { 320 | let client = client.clone(); 321 | let key1 = key1.clone(); 322 | let key2 = key2.clone(); 323 | smol::spawn(async move { 324 | smol::Timer::after(Duration::from_secs(1)).await; 325 | client 326 | .kv() 327 | .put(EtcdPutRequest::new(key1.as_str(), "baz3")) 328 | .await 329 | .unwrap(); 330 | client 331 | .kv() 332 | .put(EtcdPutRequest::new(key2.as_str(), "baz3")) 333 | .await 334 | .unwrap(); 335 | smol::Timer::after(Duration::from_secs(1)).await; 336 | client 337 | .kv() 338 | .delete(EtcdDeleteRequest::new(KeyRange::key(key1.as_str()))) 339 | .await 340 | .unwrap(); 341 | client 342 | .kv() 343 | .delete(EtcdDeleteRequest::new(KeyRange::key(key2.as_str()))) 344 | .await 345 | .unwrap(); 346 | }) 347 | .detach(); 348 | } 349 | 350 | // Spawn 4 async tasks to watch the put and delete operation of the key. 351 | // Every 2 tasks watch the same key. 352 | let joiners = vec![ 353 | { 354 | let client = client.clone(); 355 | let watchkey = key1.clone(); 356 | smol::spawn(async move { 357 | watch_one(watchkey.as_str(), client).await; 358 | }) 359 | }, 360 | { 361 | let client = client.clone(); 362 | let watchkey = key1.clone(); 363 | smol::spawn(async move { 364 | watch_one(watchkey.as_str(), client).await; 365 | }) 366 | }, 367 | { 368 | let client = client.clone(); 369 | let watchkey = key2.clone(); 370 | smol::spawn(async move { 371 | watch_one(watchkey.as_str(), client).await; 372 | }) 373 | }, 374 | { 375 | let client = client.clone(); 376 | let watchkey = key2.clone(); 377 | smol::spawn(async move { 378 | watch_one(watchkey.as_str(), client).await; 379 | }) 380 | }, 381 | ]; 382 | futures::future::join_all(joiners).await; 383 | clean_etcd(&client).await?; 384 | client.shutdown().await?; 385 | 386 | Ok(()) 387 | } 388 | 389 | async fn test_lock() -> Result<()> { 390 | log::debug!("test_lock"); 391 | 392 | // 1. Lock on "ABC" 393 | let client = build_etcd_client().await?; 394 | let lease_id = client 395 | .lease() 396 | .grant(EtcdLeaseGrantRequest::new(Duration::from_secs(10))) 397 | .await? 398 | .id(); 399 | let lease_id_2 = client 400 | .lease() 401 | .grant(EtcdLeaseGrantRequest::new(Duration::from_secs(10))) 402 | .await? 403 | .id(); 404 | let key_bytes = client 405 | .lock() 406 | .lock(EtcdLockRequest::new(b"ABC".to_vec(), lease_id)) 407 | .await? 408 | .take_key(); 409 | 410 | // 2. Wait until the first lock released automatically 411 | let time1 = SystemTime::now(); 412 | let key_bytes2 = client 413 | .lock() 414 | .lock(EtcdLockRequest::new(b"ABC".to_vec(), lease_id_2)) 415 | .await? 416 | .take_key(); 417 | let time2 = SystemTime::now(); 418 | 419 | // wait a least 5 seconds (the first lock has a 10s lease) 420 | assert!( 421 | time2 422 | .duration_since(time1) 423 | .unwrap_or_else(|e| panic!("Fail to convert time, error is {}", e)) 424 | .as_secs() 425 | > 5 426 | ); 427 | 428 | let key_slice = key_bytes.as_slice(); 429 | assert_eq!( 430 | key_slice 431 | .get(..3) 432 | .unwrap_or_else(|| panic!("key slice get first 3 bytes failed")), 433 | b"ABC".to_vec() 434 | ); 435 | 436 | // 3. Release all locks 437 | client 438 | .lock() 439 | .unlock(EtcdUnlockRequest::new(key_bytes)) 440 | .await?; 441 | 442 | client 443 | .lock() 444 | .unlock(EtcdUnlockRequest::new(key_bytes2)) 445 | .await?; 446 | 447 | clean_etcd(&client).await?; 448 | client.shutdown().await?; 449 | Ok(()) 450 | } 451 | 452 | async fn test_transaction() -> Result<()> { 453 | log::debug!("test_transaction"); 454 | let client = build_etcd_client().await?; 455 | test_compose(&client).await?; 456 | clean_etcd(&client).await?; 457 | client.shutdown().await?; 458 | Ok(()) 459 | } 460 | 461 | async fn test_compose(client: &Client) -> Result<()> { 462 | let revision; 463 | { 464 | let mut resp = client.kv().put(EtcdPutRequest::new("foo", "bar")).await?; 465 | revision = resp 466 | .take_header() 467 | .unwrap_or_else(|| panic!("Fail to take header from response")) 468 | .revision(); 469 | 470 | for v in 0_i32..10_i32 { 471 | let _c = client 472 | .kv() 473 | .put(EtcdPutRequest::new(format!("key-{}", v), format!("{}", v))) 474 | .await?; 475 | } 476 | } 477 | 478 | let txn = EtcdTxnRequest::new() 479 | .when_value(KeyRange::key("foo"), TxnCmp::Equal, "bar") 480 | .when_mod_revision(KeyRange::key("foo"), TxnCmp::Equal, revision.cast()) 481 | .and_then(EtcdPutRequest::new("foo", "bar")) 482 | .and_then(EtcdRangeRequest::new(KeyRange::all())) 483 | .and_then(EtcdDeleteRequest::new(KeyRange::all())) 484 | .and_then(EtcdTxnRequest::new()) 485 | .or_else(EtcdPutRequest::new("bar", "baz")); 486 | 487 | let mut txn_resp = client.kv().txn(txn).await?; 488 | 489 | for op_resp in txn_resp.take_responses() { 490 | match op_resp { 491 | TxnOpResponse::Put(_resp) => {} 492 | TxnOpResponse::Range(_resp) => {} 493 | TxnOpResponse::Delete(resp) => { 494 | assert_eq!( 495 | resp.count_deleted(), 496 | 11, 497 | "Deleted wrong value from etcd server" 498 | ); 499 | } 500 | TxnOpResponse::Txn(resp) => { 501 | assert!(resp.is_success(), "Txn did not success from etcd server"); 502 | } 503 | } 504 | } 505 | 506 | // The failure operation should not be proccessed. 507 | let req = EtcdRangeRequest::new(KeyRange::key("bar")); 508 | let range_resp = client.kv().range(req).await?; 509 | assert_eq!( 510 | range_resp.count(), 511 | 0, 512 | "The number of data fetched from etcd is wrong", 513 | ); 514 | 515 | Ok(()) 516 | } 517 | 518 | async fn test_kv() -> Result<()> { 519 | log::debug!("test_kv"); 520 | let client = build_etcd_client().await?; 521 | test_list_prefix(&client).await?; 522 | test_range_query(&client).await?; 523 | clean_etcd(&client).await?; 524 | client.shutdown().await?; 525 | Ok(()) 526 | } 527 | 528 | async fn test_range_query(client: &Client) -> Result<()> { 529 | let query_key = "41_foo1"; 530 | // Add test data to etcd 531 | let mut test_data = HashMap::new(); 532 | test_data.insert("41_foo1", "baz1"); 533 | test_data.insert("42_foo1", "baz1"); 534 | test_data.insert("42_foo2", "baz2"); 535 | test_data.insert("42_bar1", "baz3"); 536 | test_data.insert("42_bar2", "baz4"); 537 | 538 | for (key, value) in test_data.clone() { 539 | client.kv().put(EtcdPutRequest::new(key, value)).await?; 540 | } 541 | 542 | let req = EtcdRangeRequest::new(KeyRange::key(query_key)); 543 | let range_resp = client.kv().range(req).await?; 544 | assert_eq!( 545 | range_resp.count(), 546 | 1, 547 | "The number of data fetched from etcd is wrong", 548 | ); 549 | 550 | client 551 | .kv() 552 | .put(EtcdPutRequest::new(query_key, "newbaz1")) 553 | .await?; 554 | let req2 = EtcdRangeRequest::new(KeyRange::key(query_key)); 555 | let mut range_resp2 = client.kv().range(req2).await?; 556 | assert_eq!( 557 | range_resp2.count(), 558 | 1, 559 | "The number of data fetched from etcd is wrong", 560 | ); 561 | let expect_value: Vec = "newbaz1".into(); 562 | assert_eq!( 563 | range_resp2 564 | .take_kvs() 565 | .get(0) 566 | .unwrap_or_else(|| panic!("Fail to get key value from RangeResponse")) 567 | .value(), 568 | expect_value, 569 | "The value of updated data fetched from etcd is wrong", 570 | ); 571 | 572 | // Delete key-valeu pairs with prefix 573 | let req = EtcdDeleteRequest::new(KeyRange::all()); 574 | let delete_resp = client.kv().delete(req).await?; 575 | assert_eq!( 576 | delete_resp.count_deleted(), 577 | 5, 578 | "The number of data deleted in etcd is wrong", 579 | ); 580 | 581 | // After delete all, query one key should return nothing. 582 | let req = EtcdRangeRequest::new(KeyRange::key("41_foo1")); 583 | let range_resp = client.kv().range(req).await?; 584 | assert_eq!( 585 | range_resp.count(), 586 | 0, 587 | "The number of data fetched from etcd is wrong", 588 | ); 589 | 590 | Ok(()) 591 | } 592 | 593 | async fn test_list_prefix(client: &Client) -> Result<()> { 594 | let prefix = "42_"; 595 | // Add test data to etcd 596 | let mut test_data = HashMap::new(); 597 | test_data.insert("41_foo1", "newbaz1"); 598 | test_data.insert("42_foo1", "newbaz1"); 599 | test_data.insert("42_foo2", "newbaz2"); 600 | test_data.insert("42_bar1", "newbaz3"); 601 | test_data.insert("42_bar2", "newbaz4"); 602 | 603 | for (key, value) in test_data.clone() { 604 | client.kv().put(EtcdPutRequest::new(key, value)).await?; 605 | } 606 | 607 | let req = EtcdRangeRequest::new(KeyRange::key("41_foo1")); 608 | let range_resp = client.kv().range(req).await?; 609 | assert_eq!( 610 | range_resp.count(), 611 | 1, 612 | "The number of data fetched from etcd is wrong", 613 | ); 614 | 615 | // List key-value pairs with prefix 616 | let req = EtcdRangeRequest::new(KeyRange::prefix(prefix)); 617 | let mut resp = client.kv().range(req).await?; 618 | assert_eq!( 619 | resp.count(), 620 | 4, 621 | "The number of data fetched from etcd is wrong", 622 | ); 623 | for kv in resp.take_kvs() { 624 | assert!( 625 | test_data.contains_key(kv.key_str()), 626 | "Data fetched from etcd should not exist", 627 | ); 628 | assert_eq!( 629 | test_data.get(kv.key_str()), 630 | Some(&kv.value_str()), 631 | "Fetched wrong value from etcd server" 632 | ); 633 | } 634 | 635 | let prefix2 = "41_"; 636 | let req2 = EtcdRangeRequest::new(KeyRange::prefix(prefix2)); 637 | let resp2 = client.kv().range(req2).await?; 638 | assert_eq!( 639 | resp2.count(), 640 | 1, 641 | "The number of data fetched from etcd is wrong", 642 | ); 643 | 644 | // Delete key-valeu pairs with prefix 645 | let req = EtcdDeleteRequest::new(KeyRange::all()); 646 | let delete_resp = client.kv().delete(req).await?; 647 | assert_eq!( 648 | delete_resp.count_deleted(), 649 | 5, 650 | "The number of data deleted in etcd is wrong", 651 | ); 652 | 653 | // After delete all, query one key should return nothing. 654 | let req = EtcdRangeRequest::new(KeyRange::key("41_foo1")); 655 | let range_resp = client.kv().range(req).await?; 656 | assert_eq!( 657 | range_resp.count(), 658 | 0, 659 | "The number of data fetched from etcd is wrong", 660 | ); 661 | 662 | Ok(()) 663 | } 664 | 665 | async fn build_etcd_client() -> Result { 666 | let client = Client::connect(ClientConfig { 667 | endpoints: vec![ 668 | DEFAULT_ETCD_ENDPOINT1_FOR_TEST.to_owned(), 669 | //DEFAULT_ETCD_ENDPOINT2_FOR_TEST.to_owned(), 670 | ], 671 | auth: None, 672 | cache_size: 64, 673 | cache_enable: false, 674 | }) 675 | .await?; 676 | Ok(client) 677 | } 678 | async fn clean_etcd(client: &Client) -> Result<()> { 679 | let req = EtcdDeleteRequest::new(KeyRange::all()); 680 | client.kv().delete(req).await?; 681 | Ok(()) 682 | } 683 | } 684 | -------------------------------------------------------------------------------- /etcd-client/src/lock/mod.rs: -------------------------------------------------------------------------------- 1 | /// The mod of lock release operations 2 | mod release; 3 | /// The mod of lock require operations 4 | mod require; 5 | 6 | use crate::protos::lock_grpc::LockClient; 7 | use crate::Result as Res; 8 | pub use release::{EtcdUnlockRequest, EtcdUnlockResponse}; 9 | pub use require::{EtcdLockRequest, EtcdLockResponse}; 10 | 11 | /// Lock client. 12 | #[derive(Clone)] 13 | pub struct Lock { 14 | /// Etcd Lock client. 15 | client: LockClient, 16 | } 17 | 18 | impl Lock { 19 | /// Creates a new `LockClient`. 20 | /// 21 | /// This method should only be called within etcd client. 22 | pub(crate) const fn new(client: LockClient) -> Self { 23 | Self { client } 24 | } 25 | 26 | /// Performs a lock operation. 27 | /// 28 | /// # Errors 29 | /// 30 | /// Will return `Err` if RPC call is failed. 31 | #[inline] 32 | pub async fn lock(&mut self, req: EtcdLockRequest) -> Res { 33 | let resp = self.client.lock_async(&req.into())?.await?; 34 | Ok(From::from(resp)) 35 | } 36 | 37 | /// Performs a unlock operation. 38 | /// 39 | /// # Errors 40 | /// 41 | /// Will return `Err` if RPC call is failed. 42 | #[inline] 43 | pub async fn unlock(&mut self, req: EtcdUnlockRequest) -> Res { 44 | let resp = self.client.unlock_async(&req.into())?.await?; 45 | Ok(From::from(resp)) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /etcd-client/src/lock/release.rs: -------------------------------------------------------------------------------- 1 | use crate::protos::lock::{UnlockRequest, UnlockResponse}; 2 | use crate::ResponseHeader; 3 | 4 | /// Request for requiring a lock 5 | pub struct EtcdUnlockRequest { 6 | /// Etcd lock request 7 | proto: UnlockRequest, 8 | } 9 | 10 | impl EtcdUnlockRequest { 11 | /// Creates a new `EtcdUnlockRequest` for requiring a lock 12 | #[inline] 13 | pub fn new(key: T) -> Self 14 | where 15 | T: Into>, 16 | { 17 | let lock_request = UnlockRequest { 18 | key: key.into(), 19 | ..UnlockRequest::default() 20 | }; 21 | 22 | Self { 23 | proto: lock_request, 24 | } 25 | } 26 | 27 | /// Get the name from `UnlockRequest` 28 | #[inline] 29 | pub fn get_key(&self) -> Vec { 30 | self.proto.get_key().to_vec() 31 | } 32 | } 33 | 34 | impl From for UnlockRequest { 35 | #[inline] 36 | fn from(e: EtcdUnlockRequest) -> Self { 37 | e.proto 38 | } 39 | } 40 | 41 | /// Response for requring a lock. 42 | #[derive(Debug)] 43 | pub struct EtcdUnlockResponse { 44 | /// Etcd lock response 45 | proto: UnlockResponse, 46 | } 47 | 48 | impl EtcdUnlockResponse { 49 | /// Takes the header out of response, leaving a `None` in its place. 50 | #[inline] 51 | pub fn take_header(&mut self) -> Option { 52 | self.proto.header.take().map(From::from) 53 | } 54 | } 55 | 56 | impl From for EtcdUnlockResponse { 57 | #[inline] 58 | fn from(resp: UnlockResponse) -> Self { 59 | Self { proto: resp } 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /etcd-client/src/lock/require.rs: -------------------------------------------------------------------------------- 1 | use crate::protos::lock::{LockRequest, LockResponse}; 2 | use crate::ResponseHeader; 3 | use clippy_utilities::Cast; 4 | 5 | /// Request for requiring a lock 6 | pub struct EtcdLockRequest { 7 | /// Etcd lock request 8 | proto: LockRequest, 9 | } 10 | 11 | impl EtcdLockRequest { 12 | /// Creates a new `EtcdLockRequest` for requiring a lock 13 | #[inline] 14 | pub fn new(name: T, lease: u64) -> Self 15 | where 16 | T: Into>, 17 | { 18 | let lock_request = LockRequest { 19 | name: name.into(), 20 | lease: lease.cast(), 21 | ..LockRequest::default() 22 | }; 23 | 24 | Self { 25 | proto: lock_request, 26 | } 27 | } 28 | 29 | /// Get the name from `LockRequest` 30 | #[inline] 31 | pub fn get_name(&self) -> Vec { 32 | self.proto.get_name().to_vec() 33 | } 34 | 35 | /// Get the name from `LockRequest` 36 | #[inline] 37 | pub fn get_lease(&self) -> u64 { 38 | self.proto.get_lease().cast() 39 | } 40 | } 41 | 42 | impl From for LockRequest { 43 | #[inline] 44 | fn from(e: EtcdLockRequest) -> Self { 45 | e.proto 46 | } 47 | } 48 | 49 | /// Response for requring a lock. 50 | #[derive(Debug)] 51 | pub struct EtcdLockResponse { 52 | /// Etcd lock response 53 | proto: LockResponse, 54 | } 55 | 56 | impl EtcdLockResponse { 57 | /// Takes the header out of response, leaving a `None` in its place. 58 | #[inline] 59 | pub fn take_header(&mut self) -> Option { 60 | self.proto.header.take().map(From::from) 61 | } 62 | 63 | /// Take the key out of response, leaving a empty Vec in its place. 64 | #[inline] 65 | pub fn take_key(&mut self) -> Vec { 66 | self.proto.take_key() 67 | } 68 | } 69 | 70 | impl From for EtcdLockResponse { 71 | #[inline] 72 | fn from(resp: LockResponse) -> Self { 73 | Self { proto: resp } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /etcd-client/src/protos/mod.rs: -------------------------------------------------------------------------------- 1 | // Ignore format and lint to generated code 2 | #[rustfmt::skip] 3 | #[allow( 4 | unreachable_pub, 5 | clippy::all, 6 | clippy::restriction, 7 | clippy::pedantic, 8 | clippy::nursery, 9 | clippy::cargo, 10 | clippy::same_name_method 11 | )] 12 | pub mod rpc_grpc; 13 | 14 | // Ignore format and lint to generated code 15 | #[rustfmt::skip] 16 | #[allow( 17 | unreachable_pub, 18 | variant_size_differences, 19 | clippy::all, 20 | clippy::restriction, 21 | clippy::pedantic, 22 | clippy::nursery, 23 | clippy::cargo, 24 | clippy::same_name_method 25 | )] 26 | pub mod rpc; 27 | 28 | // Ignore format and lint to generated code 29 | #[rustfmt::skip] 30 | #[allow( 31 | unreachable_pub, 32 | clippy::all, 33 | clippy::restriction, 34 | clippy::pedantic, 35 | clippy::nursery, 36 | clippy::cargo, 37 | clippy::same_name_method 38 | )] 39 | pub mod auth; 40 | 41 | // Ignore format and lint to generated code 42 | #[rustfmt::skip] 43 | #[allow( 44 | unreachable_pub, 45 | clippy::all, 46 | clippy::restriction, 47 | clippy::pedantic, 48 | clippy::nursery, 49 | clippy::cargo, 50 | clippy::same_name_method 51 | )] 52 | pub mod kv; 53 | 54 | // Ignore format and lint to generated code 55 | #[rustfmt::skip] 56 | #[allow( 57 | unreachable_pub, 58 | clippy::all, 59 | clippy::restriction, 60 | clippy::pedantic, 61 | clippy::nursery, 62 | clippy::cargo, 63 | clippy::same_name_method 64 | )] 65 | pub mod lock; 66 | 67 | // Ignore format and lint to generated code 68 | #[rustfmt::skip] 69 | #[allow( 70 | unreachable_pub, 71 | clippy::all, 72 | clippy::restriction, 73 | clippy::pedantic, 74 | clippy::nursery, 75 | clippy::cargo, 76 | clippy::same_name_method 77 | )] 78 | pub mod lock_grpc; 79 | -------------------------------------------------------------------------------- /etcd-client/src/response_header.rs: -------------------------------------------------------------------------------- 1 | use crate::protos::rpc; 2 | use clippy_utilities::Cast; 3 | 4 | /// Response header. 5 | #[derive(Debug)] 6 | pub struct ResponseHeader { 7 | /// Etcd response header which includes cluster metadata for all responses from etcd API. 8 | proto: rpc::ResponseHeader, 9 | } 10 | 11 | impl ResponseHeader { 12 | /// Get the key-value store revision when the request was applied. 13 | #[inline] 14 | pub fn revision(&self) -> u64 { 15 | self.proto.revision.cast() 16 | } 17 | } 18 | 19 | impl From for ResponseHeader { 20 | #[inline] 21 | fn from(header: rpc::ResponseHeader) -> Self { 22 | Self { proto: header } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /etcd-client/src/watch/mod.rs: -------------------------------------------------------------------------------- 1 | //! The Watch API provides an event-based interface for asynchronously monitoring changes to keys. 2 | //! 3 | //! # Examples 4 | //! 5 | //! Watch key `foo` changes 6 | //! 7 | //! ```no_run 8 | //! 9 | //! use etcd_client::*; 10 | //! use futures::stream::StreamExt; 11 | //! 12 | //! fn main() -> Result<()> { 13 | //! smol::block_on(async { 14 | //! let config = ClientConfig::new(vec!["http://127.0.0.1:2379".to_owned()], None, 32, true); 15 | //! let client = Client::connect(config).await?; 16 | //! 17 | //! // print out all received watch responses 18 | //! let mut inbound = client.watch(KeyRange::key("foo")).await.unwrap(); 19 | //! smol::spawn(async move { 20 | //! while let Ok(resp) = inbound.recv().await { 21 | //! println!("watch response: {:?}", resp); 22 | //! } 23 | //! }); 24 | //! 25 | //! let key = "foo"; 26 | //! client.kv().put(EtcdPutRequest::new(key, "bar")).await?; 27 | //! client.kv().put(EtcdPutRequest::new(key, "baz")).await?; 28 | //! client 29 | //! .kv() 30 | //! .delete(EtcdDeleteRequest::new(KeyRange::key(key))) 31 | //! .await?; 32 | //! 33 | //! // not necessary, but will cleanly shut down the long-running tasks 34 | //! // spawned by the client 35 | //! client.shutdown().await; 36 | //! 37 | //! Ok(()) 38 | //! }) 39 | //! } 40 | //! 41 | //! ``` 42 | 43 | use std::collections::{BTreeMap, HashMap}; 44 | use std::sync::{Arc, Weak}; 45 | use std::time::Duration; 46 | 47 | use async_broadcast::{InactiveReceiver, Receiver as BroadcastRx, Sender as BroadcastTx}; 48 | use async_std::channel::bounded; 49 | use crossbeam_queue::SegQueue; 50 | use futures::future::FutureExt; 51 | use futures::stream::StreamExt; 52 | use futures::{SinkExt, TryFutureExt}; 53 | use grpcio::{ClientDuplexReceiver, StreamingCallSink, WriteFlags}; 54 | use smol::channel::{unbounded, Receiver, Sender}; 55 | use smol::Task; 56 | 57 | pub use watch_impl::{EtcdWatchRequest, EtcdWatchResponse}; 58 | 59 | use crate::lazy::Lazy; 60 | use crate::protos::kv; 61 | use crate::protos::rpc::{WatchRequest, WatchResponse}; 62 | use crate::protos::rpc_grpc::WatchClient; 63 | use crate::KeyRange; 64 | use crate::Result; 65 | use crate::{EtcdError, EtcdKeyValue}; 66 | 67 | /// Watch implementation mod. 68 | mod watch_impl; 69 | 70 | /// The timeout of waiting etcd response 71 | const WATCH_REQUEST_TIMEOUT_SEC: u64 = 2; 72 | 73 | /// Watch Id 74 | type WatchID = i64; 75 | 76 | /// Watch request message for watcher tasks to send to send loop 77 | type WatchRequestMsg = (EtcdWatchRequest, Sender); 78 | 79 | /// new watch request channel 80 | fn new_watch_request_chan() -> (Sender, Receiver) { 81 | unbounded::() 82 | } 83 | 84 | /// Cancel request message for watcher tasks to send to send loop 85 | type CancelRequestMsg = WatchID; 86 | 87 | /// new cancel request channel 88 | fn new_cancel_request_chan() -> (Sender, Receiver) { 89 | unbounded::() 90 | } 91 | 92 | /// Used for the send loop to receive the signal of a completed watch request from receive loop 93 | /// and handle the left watch request 94 | type WaitingWatchResponseMsg = (); 95 | 96 | /// new waiting watch response channel 97 | fn new_waiting_watch_response_chan() -> ( 98 | Sender, 99 | Receiver, 100 | ) { 101 | bounded::(1) 102 | } 103 | 104 | /// Shutdown message for send loop and receive loop async tasks to exit 105 | type ShutdownMsg = (); 106 | 107 | /// new shutdown channel 108 | fn new_shutdown_chan() -> (Sender, Receiver) { 109 | unbounded::() 110 | } 111 | 112 | /// the recorded watch info in a lock 113 | #[derive(Default)] 114 | struct WatchedMap { 115 | /// keyrange to watchid 116 | keyrange_2_watchid: BTreeMap, 117 | /// watchid to (sender to user, user receiver) 118 | watchid_2_detail: HashMap< 119 | WatchID, 120 | ( 121 | BroadcastTx, 122 | Weak, 123 | ), 124 | >, 125 | } 126 | impl WatchedMap { 127 | /// add new watched info 128 | fn add_watched( 129 | &mut self, 130 | watchid: WatchID, 131 | keyrange: KeyRange, 132 | sender_2_user: BroadcastTx, 133 | user_receiver: Weak, 134 | ) { 135 | self.keyrange_2_watchid.insert(keyrange, watchid); 136 | self.watchid_2_detail 137 | .insert(watchid, (sender_2_user, user_receiver)); 138 | } 139 | /// remove the watch to cancel 140 | fn remove_watch(&mut self, keyrange: &KeyRange) -> Option { 141 | if let Some(watch_id) = self.keyrange_2_watchid.remove(keyrange) { 142 | log::debug!("removed watch"); 143 | drop( 144 | self.watchid_2_detail 145 | .remove(&watch_id) 146 | .unwrap_or_else(|| panic!("")), 147 | ); 148 | return Some(watch_id); 149 | } 150 | None 151 | } 152 | 153 | /// get broadcast sender to watcher who requested with the watch id 154 | fn get_broadcast_sender_2_watcher( 155 | &self, 156 | watchid: WatchID, 157 | ) -> Option> { 158 | if let Some(&(ref sender, _)) = self.watchid_2_detail.get(&watchid) { 159 | return Some(sender.clone()); 160 | } 161 | None 162 | } 163 | 164 | /// get receiver of key range to send back to user who requested watch 165 | fn get_arc_receiver(&self, keyrange: &KeyRange) -> Option { 166 | self.keyrange_2_watchid.get(keyrange).and_then(|id|{ 167 | if let Some(&(_,ref weak_receiver_inner)) = self.watchid_2_detail.get(id) { 168 | match weak_receiver_inner.upgrade() { 169 | Some(arc_receiver_inner) => Some(SingleWatchEventReceiver::from_exist_inner( 170 | arc_receiver_inner, 171 | )), 172 | None => { 173 | // `remove_watch` in `WatchedMap` should be called when receiver is dropped 174 | panic!( 175 | "Receivers were all dropped but the registered info has not been removed, which is impossible" 176 | ); 177 | } 178 | } 179 | } else { 180 | None 181 | } 182 | }) 183 | } 184 | } 185 | 186 | /// Watch related data shared between watch communication task and user receivers 187 | struct WatchTunnelShared { 188 | /// A map shared to get the sender to registered watches for a keyrange 189 | watched_map: Lazy, 190 | /// Queued watch requests 191 | queued_watch_requests: SegQueue<(EtcdWatchRequest, Sender)>, 192 | /// Watch request waiting for response 193 | waiting_watch_request: Lazy)>>, 194 | 195 | /// A channel sender for watchers to send cancel request to send loop, 196 | /// then the send loop will send cancel request to etcd 197 | cancel_req_sender: Sender, 198 | /// Record the waiting cancel requests, when a cancel response arrived, 199 | /// it will get the sender from this map and send a signal to the canceling task. 200 | waiting_cancels: Lazy>>, 201 | 202 | /// A channel sender to send shutdown request. 203 | shutdown: Sender<()>, 204 | 205 | /// Sub tasks 206 | sub_tasks: Option>>, 207 | } 208 | impl Drop for WatchTunnelShared { 209 | fn drop(&mut self) { 210 | let sub_tasks = self 211 | .sub_tasks 212 | .take() 213 | .unwrap_or_else(|| panic!("sub_tasks should be some until dropped")); 214 | futures::executor::block_on(async { 215 | while let Some(task) = sub_tasks.pop() { 216 | task.await; 217 | } 218 | }); 219 | } 220 | } 221 | impl WatchTunnelShared { 222 | /// new `WatchTunnelShared` 223 | fn new(cancel_req_sender: Sender, shutdown: Sender<()>) -> Self { 224 | Self { 225 | watched_map: Lazy::new(WatchedMap::default), 226 | waiting_cancels: Lazy::new(HashMap::new), 227 | queued_watch_requests: SegQueue::new(), 228 | waiting_watch_request: Lazy::new(|| None), 229 | cancel_req_sender, 230 | shutdown, 231 | sub_tasks: Some(SegQueue::new()), 232 | } 233 | } 234 | 235 | /// cancel a watch in async task. 236 | async fn cancel_watch(&self, keyrange: KeyRange) { 237 | log::debug!("cancel watch {}", keyrange); 238 | let mut watched_map = self.watched_map.write().await; 239 | if let Some(watchid) = watched_map.remove_watch(&keyrange) { 240 | let (tx, rx) = bounded::<()>(1); 241 | self.waiting_cancels.write().await.insert(watchid, tx); 242 | if self.cancel_req_sender.send(watchid).await.is_ok() { 243 | self.sub_tasks 244 | .as_ref() 245 | .unwrap_or_else(|| { panic!("sub_tasks should be some until dropped") }) 246 | .push( 247 | smol::spawn(async move { 248 | futures::select! { 249 | _ = smol::Timer::after(Duration::from_secs(WATCH_REQUEST_TIMEOUT_SEC)).into_future().fuse()=>{ 250 | // todo: add retry for failed request 251 | // return Err(EtcdError::WaitingResponseTimeout("waiting for cancel response when calling `cancel_watch`".to_owned())); 252 | log::debug!("cancel watch wait response timeout"); 253 | } 254 | res = rx.recv().into_future().fuse()=>{ 255 | res.unwrap_or_else(|e|{ 256 | panic!("receive cancel response channel shouldn't be destroyed, err:{e}"); 257 | }); 258 | log::debug!("cancel watch successed"); 259 | } 260 | } 261 | }) 262 | ); 263 | } 264 | } else { 265 | panic!("logic bug, cancel watch should be called only when there's watched key"); 266 | } 267 | } 268 | } 269 | /// `WatchTunnel` is a reusable connection for `Watch` operation 270 | /// The underlying `gRPC` method is Bi-directional streaming 271 | #[allow(dead_code)] 272 | struct WatchTunnel { 273 | /// A channel sender to send watch request to send loop. 274 | watch_req_sender: Sender<(EtcdWatchRequest, Sender)>, 275 | /// A channel receiver to receive watch response. 276 | // resp_receiver: Option>>, 277 | 278 | /// Shared 279 | shared: Arc, 280 | } 281 | 282 | impl WatchTunnel { 283 | /// send watch request or add to queue or get receiver directly 284 | /// return true if a request is sent 285 | #[inline] 286 | async fn handle_watch_request( 287 | client_req_sender: &mut StreamingCallSink, 288 | shared: &WatchTunnelShared, 289 | req: EtcdWatchRequest, 290 | send_back: Sender, 291 | ) -> bool { 292 | let keyrange = KeyRange::range(req.get_key(), req.get_range_end()); 293 | // The locking operation on the map here is mutually exclusive with the map operation of cancel_watch. 294 | // Therefore, the sender to the user will definitely be valid during the map holding period. 295 | let watched_map = shared.watched_map.read().await; 296 | 297 | // The key range of request is already watched 298 | if let Some(event_receiver) = watched_map.get_arc_receiver(&keyrange) { 299 | log::debug!("{} watched directly return", keyrange); 300 | // already watched 301 | if let Err(err) = send_back.send(event_receiver).await { 302 | panic!("Send watch receiver to user failed, Watch canceled, err: {err}"); 303 | } 304 | } 305 | // There's a watch request waiting for response 306 | else if shared.waiting_watch_request.read().await.is_some() { 307 | drop(watched_map); 308 | log::debug!("watch queued"); 309 | shared.queued_watch_requests.push((req, send_back)); 310 | } 311 | // This request can be send directly 312 | else { 313 | drop(watched_map); 314 | log::debug!("{} isn't watched, send new watch request", keyrange); 315 | // new watch request 316 | *shared.waiting_watch_request.write().await = Some((keyrange, send_back)); 317 | 318 | client_req_sender 319 | .send((req.into(), WriteFlags::default())) 320 | .fuse() 321 | .await 322 | .unwrap_or_else(|e| panic!("Fail to send request, the error is {}", e)); 323 | // waiting_watch_response=true; 324 | return true; 325 | } 326 | false 327 | } 328 | 329 | #[inline] 330 | /// The loop to receive msg from watcher tasks and send watch request to etcd 331 | fn spawn_send_loop( 332 | shutdown_rx: Receiver, 333 | shared: Arc, 334 | waiting_watch_response_rx: Receiver, 335 | watch_req_receiver: Receiver, 336 | cancel_req_receiver: Receiver, 337 | mut client_req_sender: StreamingCallSink, 338 | ) { 339 | // Send loop 340 | smol::spawn(async move { 341 | let mut shutdown_rx = shutdown_rx.into_future().fuse(); 342 | 343 | #[allow(clippy::mut_mut)] 344 | loop { 345 | futures::select! { 346 | //1. Wait for new watch request 347 | res = watch_req_receiver.recv().into_future().fuse() => { 348 | // received user 349 | if let Ok((req,send_back)) = res { 350 | Self::handle_watch_request(&mut client_req_sender,&shared,req,send_back).await; 351 | }else{ 352 | break; 353 | } 354 | }, 355 | //2. wait for new cancel request 356 | res = cancel_req_receiver.recv().into_future().fuse() => { 357 | if let Ok(watch_id) = res { 358 | client_req_sender.send( 359 | (EtcdWatchRequest::cancel(watch_id).into(), WriteFlags::default()) 360 | ).fuse().await.unwrap_or_else( 361 | |e| panic!("Fail to send request, the error is {}", e) 362 | ); 363 | }else{ 364 | break; 365 | } 366 | }, 367 | //3. wait for watch response, after an response arrived, this channel will receive a msg 368 | _ = waiting_watch_response_rx.recv().into_future().fuse() =>{ 369 | // receive when a watch request got its response 370 | // waiting_watch_response=false; 371 | while let Some((req,send_back))= shared.queued_watch_requests.pop(){ 372 | log::debug!("handle queued watch request"); 373 | if Self::handle_watch_request(&mut client_req_sender,&shared,req,send_back).await{ 374 | // left request will be handled after current request get it's response 375 | break; 376 | } 377 | } 378 | }, 379 | _ = shutdown_rx => { break; }, 380 | } 381 | } 382 | }).detach(); 383 | } 384 | 385 | /// Handle create response from etcd server 386 | #[inline] 387 | async fn handle_create_response( 388 | resp: WatchResponse, 389 | shared: &Arc, 390 | waiting_watch_response_tx: &Sender, 391 | ) { 392 | let (keyrange, send_back) = shared 393 | .waiting_watch_request 394 | .write() 395 | .await 396 | .take() 397 | .unwrap_or_else(|| panic!("watch create response must have a waiting create request")); 398 | 399 | let (tx, rx) = async_broadcast::broadcast::(10); 400 | // let (tx,rx)=unbounded::>(); 401 | let receiver_for_user = 402 | SingleWatchEventReceiver::new(Arc::clone(shared), rx, keyrange.clone()); 403 | shared.watched_map.write().await.add_watched( 404 | resp.watch_id, 405 | keyrange.clone(), 406 | tx, 407 | receiver_for_user.get_weak_inner(), 408 | ); 409 | // send watch result back to user 410 | if let Err(e) = send_back.send(receiver_for_user).await { 411 | panic!("user receiver shouldn't be dropped before watch response arrive, err:{e}"); 412 | } 413 | // notify the send loop to handle next watch requests 414 | waiting_watch_response_tx 415 | .send(()) 416 | .await 417 | .unwrap_or_else(|e| { 418 | panic!("Failed to send watch resp from recv loop to send loop, err:{e}") 419 | }); 420 | log::debug!( 421 | "watch created response received and registered id:{} keyrange:{}", 422 | resp.watch_id, 423 | keyrange 424 | ); 425 | } 426 | 427 | /// Handle cancel response from etcd server 428 | async fn handle_cancel_response(resp: WatchResponse, shared: &Arc) { 429 | let sendback = shared 430 | .waiting_cancels 431 | .write() 432 | .await 433 | .remove(&resp.watch_id) 434 | .unwrap_or_else(|| { 435 | panic!( 436 | "watch id must be recorded in `waiting_cancels` before receive cancel respinse" 437 | ) 438 | }); 439 | sendback 440 | .send(()) 441 | .await 442 | .unwrap_or_else(|e| panic!("send back channel shouldn't be destroyed, err:{e}")); 443 | } 444 | 445 | /// Handle event response from etcd server 446 | async fn handle_event_response(resp: WatchResponse, shared: &Arc) { 447 | // The locking operation on the map here is mutually exclusive with the map operation of cancel_watch. 448 | // Therefore, the sender to the user will definitely be valid during the map holding period. 449 | let watched_map = shared.watched_map.read().await; 450 | let sendback = watched_map.get_broadcast_sender_2_watcher(resp.watch_id); 451 | if let Some(sender) = sendback { 452 | log::debug!("watch event received and sent"); 453 | sender.broadcast(resp.into()).await.unwrap_or_else(|e| { 454 | panic!("User receiver shouldn't be dropped and send back should work, err:{e}"); 455 | }); 456 | } else { 457 | log::debug!("received watch event but no user to send to"); 458 | } 459 | } 460 | 461 | #[inline] 462 | /// The loop to handle response from server and dispatch to watchers 463 | fn spawn_receive_loop( 464 | shared: Arc, 465 | waiting_watch_response_tx: Sender, 466 | shutdown_response: Receiver, 467 | mut client_resp_receiver: ClientDuplexReceiver, 468 | ) { 469 | // Receive loop 470 | smol::spawn(async move { 471 | let mut shutdown_rx = shutdown_response.into_future().fuse(); 472 | loop { 473 | #[allow(clippy::mut_mut)] 474 | let resp = futures::select! { 475 | resp_opt = client_resp_receiver.next().fuse() => resp_opt.unwrap_or_else( 476 | || panic!("Fail to receive response from client") 477 | ), 478 | _ = shutdown_rx => { return; } 479 | }; 480 | 481 | match resp { 482 | Ok(resp) => { 483 | // watch create response 484 | if resp.created { 485 | Self::handle_create_response(resp, &shared, &waiting_watch_response_tx) 486 | .await; 487 | } else if resp.canceled { 488 | Self::handle_cancel_response(resp, &shared).await; 489 | } else { 490 | Self::handle_event_response(resp, &shared).await; 491 | } 492 | } 493 | Err(e) => { 494 | log::debug!("Watch end with error: {e}"); 495 | break; 496 | } 497 | } 498 | } 499 | }) 500 | .detach(); 501 | } 502 | 503 | /// Creates a new `WatchClient`. 504 | fn new(client: &WatchClient) -> Self { 505 | let (watch_req_sender, watch_req_receiver) = new_watch_request_chan(); 506 | let (cancel_req_sender, cancel_req_receiver) = new_cancel_request_chan(); 507 | // From recv loop to send loop, notify a watch request is done, next watch can be excuted. 508 | let (waiting_watch_response_tx, waiting_watch_response_rx) = 509 | new_waiting_watch_response_chan(); 510 | let (shutdown_tx, shutdown_rx) = new_shutdown_chan(); 511 | let shutdown_response = shutdown_rx.clone(); 512 | // Monitor inbound watch response and transfer to the receiver 513 | let (client_req_sender, client_resp_receiver) = client 514 | .watch() 515 | .unwrap_or_else(|e| panic!("failed to send watch command, the error is: {}", e)); 516 | 517 | let shared = Arc::new(WatchTunnelShared::new(cancel_req_sender, shutdown_tx)); 518 | Self::spawn_receive_loop( 519 | Arc::clone(&shared), 520 | waiting_watch_response_tx, 521 | shutdown_response, 522 | client_resp_receiver, 523 | ); 524 | Self::spawn_send_loop( 525 | shutdown_rx, 526 | Arc::clone(&shared), 527 | waiting_watch_response_rx, 528 | watch_req_receiver, 529 | cancel_req_receiver, 530 | client_req_sender, 531 | ); 532 | 533 | Self { 534 | watch_req_sender, 535 | shared, 536 | } 537 | } 538 | 539 | /// shutdown the watch client 540 | async fn shutdown(&self) -> Result<()> { 541 | self.shared.shutdown.send(()).await?; 542 | Ok(()) 543 | } 544 | } 545 | 546 | /// shared inner of `SingleWatchEventReceiver` 547 | struct SingleWatchEventReceiverInner { 548 | /// A receiver to receive etcd watched event 549 | receiver: InactiveReceiver, 550 | /// A tunnel used to communicate with Etcd server for watch operations. 551 | shared: Arc, 552 | /// Watched keyrange 553 | keyrange: Option, 554 | } 555 | impl Drop for SingleWatchEventReceiverInner { 556 | fn drop(&mut self) { 557 | // send cancel task to send loop, after sent 558 | futures::executor::block_on(async { 559 | self.shared 560 | .cancel_watch(self.keyrange.take().unwrap_or_else(|| { 561 | panic!("keyrange in SingleWatchEventReceiverInner should be some until dropped") 562 | })) 563 | .await; 564 | }); 565 | } 566 | } 567 | 568 | /// Watch result return to user 569 | pub struct SingleWatchEventReceiver { 570 | /// The inner arc, when all `SingleWatchEventReceiver` dropped, 571 | /// `SingleWatchEventReceiverInner` fn drop will be triggered. 572 | /// In this drop, we should do the cancel watch operation. 573 | inner: Arc, 574 | 575 | /// A receiver to receive etcd watched event 576 | receiver: BroadcastRx, 577 | } 578 | 579 | impl SingleWatchEventReceiver { 580 | /// weak inner will be stored to create new inner 581 | /// if all receivers dropped, weak inner will be invalid 582 | fn get_weak_inner(&self) -> Weak { 583 | Arc::downgrade(&self.inner) 584 | } 585 | 586 | /// if there's prev watch, an inner will be stored to clone to create new receiver 587 | fn from_exist_inner(inner: Arc) -> Self { 588 | let receiver = inner.receiver.activate_cloned(); 589 | 590 | Self { inner, receiver } 591 | } 592 | 593 | /// new `SingleWatchEventReceiver` 594 | /// - `shared` A tunnel used to communicate with Etcd server for watch operations. 595 | /// - `receiver` A receiver to receive etcd watched event 596 | /// - `keyrange` Watched keyrange 597 | fn new( 598 | shared: Arc, 599 | receiver: BroadcastRx, 600 | keyrange: KeyRange, 601 | ) -> Self { 602 | let inner_receiver = receiver.clone().deactivate(); 603 | Self { 604 | inner: Arc::new(SingleWatchEventReceiverInner { 605 | shared, 606 | keyrange: Some(keyrange), 607 | receiver: inner_receiver, 608 | }), 609 | receiver, 610 | } 611 | } 612 | 613 | /// Blocking recv a watch event until system end. 614 | pub async fn recv(&mut self) -> Result { 615 | match self.receiver.recv().await { 616 | Ok(received) => Ok(received), 617 | Err(err) => { 618 | log::debug!("Receive event channel destroyed, the system is closing. err: {err}"); 619 | Err(EtcdError::ClientClosed( 620 | "The system is closing when call SingleWatchEventReceiver's recv.".to_owned(), 621 | )) 622 | } 623 | } 624 | } 625 | } 626 | 627 | /// Watch client. 628 | #[derive(Clone)] 629 | pub struct Watch { 630 | /// A tunnel used to communicate with Etcd server for watch operations. 631 | tunnel: Arc, 632 | } 633 | 634 | impl Watch { 635 | /// Create a new `WatchClient`. 636 | pub(crate) fn new(client: &WatchClient) -> Self { 637 | let tunnel = Arc::new(WatchTunnel::new(client)); 638 | 639 | Self { tunnel } 640 | } 641 | 642 | /// Performs a watch operation. 643 | /// Will fail if 644 | /// 645 | /// # Errors 646 | /// 647 | /// etcd error: client closed 648 | /// 649 | /// # Panics 650 | /// 651 | /// panic if recv watch response failed 652 | #[inline] 653 | pub async fn watch(&mut self, key_range: KeyRange) -> Result { 654 | let (tx, rx) = unbounded(); 655 | if let Err(err) = self 656 | .tunnel 657 | .watch_req_sender 658 | .send((EtcdWatchRequest::create(key_range), tx)) 659 | .await 660 | { 661 | log::debug!( 662 | "send watch watch request failed, the channel is destroyed and the system is closed, err:{err}" 663 | ); 664 | return Err(EtcdError::ClientClosed("send watch watch request failed, the channel is destroyed and the system is closed".to_owned())); 665 | } 666 | 667 | Ok(rx 668 | .recv() 669 | .await 670 | .unwrap_or_else(|e| panic!("watch resp channel shouldn't be ineffective, err:{}", e))) 671 | } 672 | 673 | /// Shut down the running watch task, if any. 674 | /// 675 | /// # Errors 676 | /// 677 | /// Will return `Err` if tunnel is shutdown. 678 | #[inline] 679 | pub async fn shutdown(&mut self) -> Result<()> { 680 | // If we implemented `Shutdown` for this, callers would need it in scope in 681 | // order to call this method. 682 | 683 | self.tunnel.shutdown().await?; 684 | Ok(()) 685 | } 686 | } 687 | 688 | /// The kind of event. 689 | #[non_exhaustive] 690 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 691 | pub enum EventType { 692 | /// Put event. 693 | Put, 694 | 695 | /// Delete event. 696 | Delete, 697 | } 698 | 699 | impl From for EventType { 700 | #[inline] 701 | fn from(event_type: kv::Event_EventType) -> Self { 702 | match event_type { 703 | kv::Event_EventType::PUT => Self::Put, 704 | kv::Event_EventType::DELETE => Self::Delete, 705 | } 706 | } 707 | } 708 | 709 | /// Every change to every key is represented with Event messages. 710 | #[derive(Debug)] 711 | pub struct Event { 712 | /// Etcd event proto. 713 | proto: kv::Event, 714 | } 715 | 716 | impl Event { 717 | /// Takes the key-value pair out of response, leaving a `None` in its place. 718 | #[inline] 719 | pub fn take_kvs(&mut self) -> Option { 720 | self.proto.kv.take().map(From::from) 721 | } 722 | 723 | /// Get the type of event 724 | #[inline] 725 | pub fn event_type(&self) -> EventType { 726 | EventType::from(self.proto.get_field_type()) 727 | } 728 | } 729 | 730 | impl From for Event { 731 | #[inline] 732 | fn from(event: kv::Event) -> Self { 733 | Self { proto: event } 734 | } 735 | } 736 | -------------------------------------------------------------------------------- /etcd-client/src/watch/watch_impl.rs: -------------------------------------------------------------------------------- 1 | use crate::protos::rpc::{ 2 | WatchCancelRequest, WatchCreateRequest, WatchRequest, WatchRequest_oneof_request_union, 3 | WatchResponse, 4 | }; 5 | use crate::Event; 6 | use crate::KeyRange; 7 | use crate::ResponseHeader; 8 | use clippy_utilities::Cast; 9 | 10 | /// Request for creating or canceling watch. 11 | #[derive(Debug, Clone)] 12 | pub struct EtcdWatchRequest { 13 | /// Etcd watch key request. 14 | proto: WatchRequest, 15 | } 16 | 17 | impl EtcdWatchRequest { 18 | /// Creates a new `WatchRequest` which will subscribe events of the specified key. 19 | #[inline] 20 | #[must_use] 21 | pub fn create(mut key_range: KeyRange) -> Self { 22 | let watch_create_request = WatchCreateRequest { 23 | key: key_range.take_key(), 24 | range_end: key_range.take_range_end(), 25 | start_revision: 0, 26 | progress_notify: false, 27 | filters: vec![], 28 | prev_kv: false, 29 | ..WatchCreateRequest::default() 30 | }; 31 | 32 | let mut watch_request = WatchRequest::new(); 33 | watch_request.set_create_request(watch_create_request); 34 | 35 | Self { 36 | proto: watch_request, 37 | } 38 | } 39 | 40 | /// Creates a new `WatchRequest` which will unsubscribe the specified watch. 41 | #[inline] 42 | #[must_use] 43 | pub fn cancel(watch_id: i64) -> Self { 44 | let mut watch_cancel_request = WatchCancelRequest::new(); 45 | watch_cancel_request.set_watch_id(watch_id); 46 | let mut watch_request = WatchRequest::new(); 47 | watch_request.set_cancel_request(watch_cancel_request); 48 | Self { 49 | proto: watch_request, 50 | } 51 | } 52 | 53 | /// Sets the revision to watch from (inclusive). No `start_revision` is "now". 54 | /// It only effects when the request is for subscribing. 55 | #[inline] 56 | pub fn set_start_revision(&mut self, revision: usize) { 57 | // TODO log warning if not CreateRequest 58 | if let Some(&mut WatchRequest_oneof_request_union::create_request(ref mut req)) = 59 | self.proto.request_union.as_mut() 60 | { 61 | req.start_revision = revision.cast(); 62 | } 63 | } 64 | 65 | /// Sets progress notify. 66 | /// It only effects when the request is for subscribing. 67 | #[inline] 68 | pub fn set_progress_notify(&mut self, progress_notify: bool) { 69 | // TODO log warning if not CreateRequest 70 | if let Some(&mut WatchRequest_oneof_request_union::create_request(ref mut req)) = 71 | self.proto.request_union.as_mut() 72 | { 73 | req.progress_notify = progress_notify; 74 | } 75 | } 76 | 77 | /// Sets previous key value. 78 | /// It only effects when the request is for subscribing. 79 | #[inline] 80 | pub fn set_prev_kv(&mut self, prev_kv: bool) { 81 | // TODO log warning if not CreateRequest 82 | if let Some(&mut WatchRequest_oneof_request_union::create_request(ref mut req)) = 83 | self.proto.request_union.as_mut() 84 | { 85 | req.prev_kv = prev_kv; 86 | } 87 | } 88 | 89 | /// Gets the watch key. 90 | /// It only effects when the request is for subscribing. 91 | #[inline] 92 | pub fn get_key(&self) -> Vec { 93 | if let Some(WatchRequest_oneof_request_union::create_request(req)) = 94 | self.proto.request_union.clone() 95 | { 96 | return req.get_key().to_vec(); 97 | } 98 | vec![] 99 | } 100 | /// Gets the watch key range end. 101 | /// It only effects when the request is for subscribing. 102 | #[inline] 103 | pub fn get_range_end(&self) -> Vec { 104 | if let &Some(WatchRequest_oneof_request_union::create_request(ref req)) = 105 | &self.proto.request_union 106 | { 107 | return req.get_range_end().to_vec(); 108 | } 109 | vec![] 110 | } 111 | /// Returns if the watch request is a create watch request. 112 | #[inline] 113 | pub fn is_create(&self) -> bool { 114 | if self.proto.has_create_request() { 115 | return true; 116 | } 117 | false 118 | } 119 | } 120 | 121 | impl From for WatchRequest { 122 | #[inline] 123 | fn from(e: EtcdWatchRequest) -> Self { 124 | e.proto 125 | } 126 | } 127 | 128 | #[derive(Debug, Clone)] 129 | /// Watch Response 130 | pub struct EtcdWatchResponse { 131 | /// Etcd watch key response. 132 | proto: WatchResponse, 133 | } 134 | 135 | impl EtcdWatchResponse { 136 | /// Takes the header out of response, leaving a `None` in its place. 137 | #[inline] 138 | pub fn take_header(&mut self) -> Option { 139 | self.proto.header.take().map(From::from) 140 | } 141 | 142 | /// Gets the ID of the watcher that corresponds to the response. 143 | #[inline] 144 | pub fn watch_id(&self) -> u64 { 145 | self.proto.watch_id.cast() 146 | } 147 | 148 | /// Takes the events out of response, leaving an empty vector in its place. 149 | #[inline] 150 | pub fn take_events(&mut self) -> Vec { 151 | let events = std::mem::take(&mut self.proto.events); 152 | 153 | events.into_iter().map(From::from).collect() 154 | } 155 | } 156 | 157 | impl From for EtcdWatchResponse { 158 | #[inline] 159 | fn from(resp: WatchResponse) -> Self { 160 | Self { proto: resp } 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /mock-etcd/.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | -------------------------------------------------------------------------------- /mock-etcd/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mock-etcd" 3 | version = "0.1.0" 4 | authors = ["Zheng Pan "] 5 | edition = "2021" 6 | keywords = ["etcd"] 7 | repository = "https://github.com/datenlord/mock-etcd" 8 | description = "mock etcd" 9 | license = "MIT" 10 | categories = ["storage"] 11 | 12 | [dependencies] 13 | async-lock = "2.3.0" 14 | async-io = "1.6.0" 15 | env_logger = "0.9.3" 16 | futures = "0.3.5" 17 | grpcio = { version = "0.9.1", default-features = false, features = [ 18 | "protobuf-codec", 19 | ] } 20 | log = "0.4.11" 21 | protobuf = "2.16.2" 22 | utilities = { git = "https://github.com/datenlord/utilities", rev = "4ef408d" } 23 | smol = { version = "1.3.0" } 24 | async-recursion = "1.0.4" 25 | 26 | [build-dependencies] 27 | protoc-grpcio = "3.0.0" 28 | 29 | [dev-dependencies] 30 | etcd-client = { path = "../etcd-client" } 31 | async-compat = "0.2.1" 32 | -------------------------------------------------------------------------------- /mock-etcd/README.md: -------------------------------------------------------------------------------- 1 | # mock_etcd 2 | -------------------------------------------------------------------------------- /mock-etcd/build.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | // grpcio depends on cmake, g++ and protoc, 3 | // run the following command to install: 4 | // `sudo apt install cmake g++ libprotobuf-dev protobuf-compiler` 5 | protoc_grpcio::compile_grpc_protos( 6 | &[ 7 | "proto/auth.proto", 8 | "proto/kv.proto", 9 | "proto/rpc.proto", 10 | "proto/lock.proto", 11 | ], // inputs 12 | &[".."], // includes 13 | "src", // output 14 | None, // customizations 15 | ) 16 | .unwrap_or_else(|_| panic!("Failed to compile gRPC definitions!")); 17 | } 18 | -------------------------------------------------------------------------------- /mock-etcd/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Mock etcd service 2 | 3 | #![deny( 4 | // The following are allowed by default lints according to 5 | // https://doc.rust-lang.org/rustc/lints/listing/allowed-by-default.html 6 | anonymous_parameters, 7 | bare_trait_objects, 8 | // box_pointers, // futures involve boxed pointers 9 | // elided_lifetimes_in_paths, // allow anonymous lifetime 10 | missing_copy_implementations, 11 | missing_debug_implementations, 12 | missing_docs, // TODO: add documents 13 | single_use_lifetimes, // TODO: fix lifetime names only used once 14 | trivial_casts, // TODO: remove trivial casts in code 15 | trivial_numeric_casts, 16 | // unreachable_pub, allow clippy::redundant_pub_crate lint instead 17 | unsafe_code, 18 | unstable_features, 19 | unused_extern_crates, 20 | unused_import_braces, 21 | unused_qualifications, 22 | // unused_results, // TODO: fix unused results 23 | variant_size_differences, 24 | 25 | warnings, // treat all wanings as errors 26 | 27 | clippy::all, 28 | clippy::restriction, 29 | clippy::pedantic, 30 | clippy::nursery, 31 | clippy::cargo, 32 | )] 33 | #![allow( 34 | // Some explicitly allowed Clippy lints, must have clear reason to allow 35 | clippy::blanket_clippy_restriction_lints, // allow denying clippy::restriction directly 36 | clippy::implicit_return, // actually omitting the return keyword is idiomatic Rust code 37 | clippy::module_name_repetitions, // repeation of module name in a struct name is not big deal 38 | clippy::multiple_crate_versions, // multi-version dependency crates is not able to fix 39 | clippy::panic, // allow debug_assert, panic in production code 40 | clippy::shadow_same, //it’s a common pattern in Rust code 41 | clippy::shadow_unrelated, 42 | clippy::shadow_reuse, 43 | clippy::same_name_method, 44 | clippy::separated_literal_suffix, 45 | clippy::mod_module_files, // TODO: fix code structure to pass this lint 46 | )] 47 | 48 | // Ignore format and lint to generated code 49 | #[rustfmt::skip] 50 | #[allow( 51 | unreachable_pub, 52 | clippy::all, 53 | clippy::restriction, 54 | clippy::pedantic, 55 | clippy::nursery, 56 | clippy::cargo, 57 | clippy::shadow_same, //it’s a common pattern in Rust code 58 | clippy::shadow_unrelated, 59 | clippy::shadow_reuse, 60 | clippy::same_name_method, 61 | clippy::separated_literal_suffix, 62 | clippy::mod_module_files, // TODO: fix code structure to pass this lint 63 | )] 64 | mod auth; 65 | // Ignore format and lint to generated code 66 | #[rustfmt::skip] 67 | #[allow( 68 | unreachable_pub, 69 | clippy::all, 70 | clippy::restriction, 71 | clippy::pedantic, 72 | clippy::nursery, 73 | clippy::cargo, 74 | clippy::shadow_same, //it’s a common pattern in Rust code 75 | clippy::shadow_unrelated, 76 | clippy::shadow_reuse, 77 | clippy::same_name_method, 78 | clippy::separated_literal_suffix, 79 | clippy::mod_module_files, // TODO: fix code structure to pass this lint 80 | )] 81 | mod kv; 82 | // Ignore format and lint to generated code 83 | #[rustfmt::skip] 84 | #[allow( 85 | variant_size_differences, 86 | unreachable_pub, 87 | clippy::all, 88 | clippy::restriction, 89 | clippy::pedantic, 90 | clippy::nursery, 91 | clippy::cargo, 92 | clippy::shadow_same, //it’s a common pattern in Rust code 93 | clippy::shadow_unrelated, 94 | clippy::shadow_reuse, 95 | clippy::same_name_method, 96 | clippy::separated_literal_suffix, 97 | clippy::mod_module_files, // TODO: fix code structure to pass this lint 98 | )] 99 | mod rpc; 100 | // Ignore format and lint to generated code 101 | #[rustfmt::skip] 102 | #[allow( 103 | unreachable_pub, 104 | clippy::all, 105 | clippy::restriction, 106 | clippy::pedantic, 107 | clippy::nursery, 108 | clippy::cargo, 109 | clippy::shadow_same, //it’s a common pattern in Rust code 110 | clippy::shadow_unrelated, 111 | clippy::shadow_reuse, 112 | clippy::same_name_method, 113 | clippy::separated_literal_suffix, 114 | clippy::mod_module_files, // TODO: fix code structure to pass this lint 115 | )] 116 | mod rpc_grpc; 117 | // Ignore format and lint to generated code 118 | #[rustfmt::skip] 119 | #[allow( 120 | unreachable_pub, 121 | clippy::all, 122 | clippy::restriction, 123 | clippy::pedantic, 124 | clippy::nursery, 125 | clippy::cargo, 126 | clippy::shadow_same, //it’s a common pattern in Rust code 127 | clippy::shadow_unrelated, 128 | clippy::shadow_reuse, 129 | clippy::same_name_method, 130 | clippy::separated_literal_suffix, 131 | clippy::mod_module_files, // TODO: fix code structure to pass this lint 132 | )] 133 | mod lock_grpc; 134 | // Ignore format and lint to generated code 135 | #[rustfmt::skip] 136 | #[allow( 137 | unreachable_pub, 138 | clippy::all, 139 | clippy::restriction, 140 | clippy::pedantic, 141 | clippy::nursery, 142 | clippy::cargo, 143 | clippy::shadow_same, //it’s a common pattern in Rust code 144 | clippy::shadow_unrelated, 145 | clippy::shadow_reuse, 146 | clippy::same_name_method, 147 | clippy::separated_literal_suffix, 148 | clippy::mod_module_files, // TODO: fix code structure to pass this lint 149 | )] 150 | mod lock; 151 | 152 | mod mock_etcd; 153 | 154 | /// Mock etcd server 155 | pub use crate::mock_etcd::MockEtcdServer; 156 | -------------------------------------------------------------------------------- /proto/auth.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package authpb; 4 | 5 | // User is a single entry in the bucket authUsers 6 | message User { 7 | bytes name = 1; 8 | bytes password = 2; 9 | repeated string roles = 3; 10 | } 11 | 12 | // Permission is a single entity 13 | message Permission { 14 | enum Type { 15 | READ = 0; 16 | WRITE = 1; 17 | READWRITE = 2; 18 | } 19 | Type permType = 1; 20 | 21 | bytes key = 2; 22 | bytes range_end = 3; 23 | } 24 | 25 | // Role is a single entry in the bucket authRoles 26 | message Role { 27 | bytes name = 1; 28 | 29 | repeated Permission keyPermission = 2; 30 | } 31 | -------------------------------------------------------------------------------- /proto/kv.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package mvccpb; 4 | 5 | message KeyValue { 6 | // key is the key in bytes. An empty key is not allowed. 7 | bytes key = 1; 8 | // create_revision is the revision of last creation on this key. 9 | int64 create_revision = 2; 10 | // mod_revision is the revision of last modification on this key. 11 | int64 mod_revision = 3; 12 | // version is the version of the key. A deletion resets 13 | // the version to zero and any modification of the key 14 | // increases its version. 15 | int64 version = 4; 16 | // value is the value held by the key, in bytes. 17 | bytes value = 5; 18 | // lease is the ID of the lease that attached to key. 19 | // When the attached lease expires, the key will be deleted. 20 | // If lease is 0, then no lease is attached to the key. 21 | int64 lease = 6; 22 | } 23 | 24 | message Event { 25 | enum EventType { 26 | PUT = 0; 27 | DELETE = 1; 28 | } 29 | // type is the kind of event. If type is a PUT, it indicates 30 | // new data has been stored to the key. If type is a DELETE, 31 | // it indicates the key was deleted. 32 | EventType type = 1; 33 | // kv holds the KeyValue for the event. 34 | // A PUT event contains current kv pair. 35 | // A PUT event with kv.Version=1 indicates the creation of a key. 36 | // A DELETE/EXPIRE event contains the deleted key with 37 | // its modification revision set to the revision of deletion. 38 | KeyValue kv = 2; 39 | 40 | // prev_kv holds the key-value pair before the event happens. 41 | KeyValue prev_kv = 3; 42 | } 43 | -------------------------------------------------------------------------------- /proto/lock.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "proto/rpc.proto"; 4 | 5 | package v3lockpb; 6 | 7 | // The lock service exposes client-side locking facilities as a gRPC interface. 8 | service Lock { 9 | // Lock acquires a distributed shared lock on a given named lock. 10 | // On success, it will return a unique key that exists so long as the 11 | // lock is held by the caller. This key can be used in conjunction with 12 | // transactions to safely ensure updates to etcd only occur while holding 13 | // lock ownership. The lock is held until Unlock is called on the key or the 14 | // lease associate with the owner expires. 15 | rpc Lock(LockRequest) returns (LockResponse) {} 16 | 17 | // Unlock takes a key returned by Lock and releases the hold on lock. The 18 | // next Lock caller waiting for the lock will then be woken up and given 19 | // ownership of the lock. 20 | rpc Unlock(UnlockRequest) returns (UnlockResponse) {} 21 | } 22 | 23 | message LockRequest { 24 | // name is the identifier for the distributed shared lock to be acquired. 25 | bytes name = 1; 26 | // lease is the ID of the lease that will be attached to ownership of the 27 | // lock. If the lease expires or is revoked and currently holds the lock, 28 | // the lock is automatically released. Calls to Lock with the same lease will 29 | // be treated as a single acquistion; locking twice with the same lease is a 30 | // no-op. 31 | int64 lease = 2; 32 | } 33 | 34 | message LockResponse { 35 | etcdserverpb.ResponseHeader header = 1; 36 | // key is a key that will exist on etcd for the duration that the Lock caller 37 | // owns the lock. Users should not modify this key or the lock may exhibit 38 | // undefined behavior. 39 | bytes key = 2; 40 | } 41 | 42 | message UnlockRequest { 43 | // key is the lock ownership key granted by Lock. 44 | bytes key = 1; 45 | } 46 | 47 | message UnlockResponse { 48 | etcdserverpb.ResponseHeader header = 1; 49 | } -------------------------------------------------------------------------------- /proto/rpc.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "proto/kv.proto"; 4 | import "proto/auth.proto"; 5 | 6 | package etcdserverpb; 7 | 8 | service KV { 9 | // Range gets the keys in the range from the key-value store. 10 | rpc Range(RangeRequest) returns (RangeResponse) {} 11 | 12 | // Put puts the given key into the key-value store. 13 | // A put request increments the revision of the key-value store 14 | // and generates one event in the event history. 15 | rpc Put(PutRequest) returns (PutResponse) {} 16 | 17 | // DeleteRange deletes the given range from the key-value store. 18 | // A delete request increments the revision of the key-value store 19 | // and generates a delete event in the event history for every deleted key. 20 | rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {} 21 | 22 | // Txn processes multiple requests in a single transaction. 23 | // A txn request increments the revision of the key-value store 24 | // and generates events with the same revision for every completed request. 25 | // It is not allowed to modify the same key several times within one txn. 26 | rpc Txn(TxnRequest) returns (TxnResponse) {} 27 | 28 | // Compact compacts the event history in the etcd key-value store. The key-value 29 | // store should be periodically compacted or the event history will continue to grow 30 | // indefinitely. 31 | rpc Compact(CompactionRequest) returns (CompactionResponse) {} 32 | } 33 | 34 | service Watch { 35 | // Watch watches for events happening or that have happened. Both input and output 36 | // are streams; the input stream is for creating and canceling watchers and the output 37 | // stream sends events. One watch RPC can watch on multiple key ranges, streaming events 38 | // for several watches at once. The entire event history can be watched starting from the 39 | // last compaction revision. 40 | rpc Watch(stream WatchRequest) returns (stream WatchResponse) {} 41 | } 42 | 43 | service Lease { 44 | // LeaseGrant creates a lease which expires if the server does not receive a keepAlive 45 | // within a given time to live period. All keys attached to the lease will be expired and 46 | // deleted if the lease expires. Each expired key generates a delete event in the event history. 47 | rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {} 48 | 49 | // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. 50 | rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {} 51 | 52 | // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client 53 | // to the server and streaming keep alive responses from the server to the client. 54 | rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {} 55 | 56 | // LeaseTimeToLive retrieves lease information. 57 | rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {} 58 | 59 | // TODO(xiangli) List all existing Leases? 60 | } 61 | 62 | service Cluster { 63 | // MemberAdd adds a member into the cluster. 64 | rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {} 65 | 66 | // MemberRemove removes an existing member from the cluster. 67 | rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {} 68 | 69 | // MemberUpdate updates the member configuration. 70 | rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {} 71 | 72 | // MemberList lists all the members in the cluster. 73 | rpc MemberList(MemberListRequest) returns (MemberListResponse) {} 74 | } 75 | 76 | service Maintenance { 77 | // Alarm activates, deactivates, and queries alarms regarding cluster health. 78 | rpc Alarm(AlarmRequest) returns (AlarmResponse) {} 79 | 80 | // Status gets the status of the member. 81 | rpc Status(StatusRequest) returns (StatusResponse) {} 82 | 83 | // Defragment defragments a member's backend database to recover storage space. 84 | rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {} 85 | 86 | // Hash returns the hash of the local KV state for consistency checking purpose. 87 | // This is designed for testing; do not use this in production when there 88 | // are ongoing transactions. 89 | rpc Hash(HashRequest) returns (HashResponse) {} 90 | 91 | // HashKV computes the hash of all MVCC keys up to a given revision. 92 | rpc HashKV(HashKVRequest) returns (HashKVResponse) {} 93 | 94 | // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. 95 | rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {} 96 | 97 | // MoveLeader requests current leader node to transfer its leadership to transferee. 98 | rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {} 99 | } 100 | 101 | service Auth { 102 | // AuthEnable enables authentication. 103 | rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {} 104 | 105 | // AuthDisable disables authentication. 106 | rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {} 107 | 108 | // Authenticate processes an authenticate request. 109 | rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {} 110 | 111 | // UserAdd adds a new user. 112 | rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {} 113 | 114 | // UserGet gets detailed user information. 115 | rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {} 116 | 117 | // UserList gets a list of all users. 118 | rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {} 119 | 120 | // UserDelete deletes a specified user. 121 | rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {} 122 | 123 | // UserChangePassword changes the password of a specified user. 124 | rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {} 125 | 126 | // UserGrant grants a role to a specified user. 127 | rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {} 128 | 129 | // UserRevokeRole revokes a role of specified user. 130 | rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {} 131 | 132 | // RoleAdd adds a new role. 133 | rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {} 134 | 135 | // RoleGet gets detailed role information. 136 | rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {} 137 | 138 | // RoleList gets lists of all roles. 139 | rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {} 140 | 141 | // RoleDelete deletes a specified role. 142 | rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {} 143 | 144 | // RoleGrantPermission grants a permission of a specified key or range to a specified role. 145 | rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {} 146 | 147 | // RoleRevokePermission revokes a key or range permission of a specified role. 148 | rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {} 149 | } 150 | 151 | message ResponseHeader { 152 | // cluster_id is the ID of the cluster which sent the response. 153 | uint64 cluster_id = 1; 154 | // member_id is the ID of the member which sent the response. 155 | uint64 member_id = 2; 156 | // revision is the key-value store revision when the request was applied. 157 | int64 revision = 3; 158 | // raft_term is the raft term when the request was applied. 159 | uint64 raft_term = 4; 160 | } 161 | 162 | message RangeRequest { 163 | enum SortOrder { 164 | NONE = 0; // default, no sorting 165 | ASCEND = 1; // lowest target value first 166 | DESCEND = 2; // highest target value first 167 | } 168 | enum SortTarget { 169 | KEY = 0; 170 | VERSION = 1; 171 | CREATE = 2; 172 | MOD = 3; 173 | VALUE = 4; 174 | } 175 | 176 | // key is the first key for the range. If range_end is not given, the request only looks up key. 177 | bytes key = 1; 178 | // range_end is the upper bound on the requested range [key, range_end). 179 | // If range_end is '\0', the range is all keys >= key. 180 | // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), 181 | // then the range request gets all keys prefixed with key. 182 | // If both key and range_end are '\0', then the range request returns all keys. 183 | bytes range_end = 2; 184 | // limit is a limit on the number of keys returned for the request. When limit is set to 0, 185 | // it is treated as no limit. 186 | int64 limit = 3; 187 | // revision is the point-in-time of the key-value store to use for the range. 188 | // If revision is less or equal to zero, the range is over the newest key-value store. 189 | // If the revision has been compacted, ErrCompacted is returned as a response. 190 | int64 revision = 4; 191 | 192 | // sort_order is the order for returned sorted results. 193 | SortOrder sort_order = 5; 194 | 195 | // sort_target is the key-value field to use for sorting. 196 | SortTarget sort_target = 6; 197 | 198 | // serializable sets the range request to use serializable member-local reads. 199 | // Range requests are linearizable by default; linearizable requests have higher 200 | // latency and lower throughput than serializable requests but reflect the current 201 | // consensus of the cluster. For better performance, in exchange for possible stale reads, 202 | // a serializable range request is served locally without needing to reach consensus 203 | // with other nodes in the cluster. 204 | bool serializable = 7; 205 | 206 | // keys_only when set returns only the keys and not the values. 207 | bool keys_only = 8; 208 | 209 | // count_only when set returns only the count of the keys in the range. 210 | bool count_only = 9; 211 | 212 | // min_mod_revision is the lower bound for returned key mod revisions; all keys with 213 | // lesser mod revisions will be filtered away. 214 | int64 min_mod_revision = 10; 215 | 216 | // max_mod_revision is the upper bound for returned key mod revisions; all keys with 217 | // greater mod revisions will be filtered away. 218 | int64 max_mod_revision = 11; 219 | 220 | // min_create_revision is the lower bound for returned key create revisions; all keys with 221 | // lesser create trevisions will be filtered away. 222 | int64 min_create_revision = 12; 223 | 224 | // max_create_revision is the upper bound for returned key create revisions; all keys with 225 | // greater create revisions will be filtered away. 226 | int64 max_create_revision = 13; 227 | } 228 | 229 | message RangeResponse { 230 | ResponseHeader header = 1; 231 | // kvs is the list of key-value pairs matched by the range request. 232 | // kvs is empty when count is requested. 233 | repeated mvccpb.KeyValue kvs = 2; 234 | // more indicates if there are more keys to return in the requested range. 235 | bool more = 3; 236 | // count is set to the number of keys within the range when requested. 237 | int64 count = 4; 238 | } 239 | 240 | message PutRequest { 241 | // key is the key, in bytes, to put into the key-value store. 242 | bytes key = 1; 243 | // value is the value, in bytes, to associate with the key in the key-value store. 244 | bytes value = 2; 245 | // lease is the lease ID to associate with the key in the key-value store. A lease 246 | // value of 0 indicates no lease. 247 | int64 lease = 3; 248 | 249 | // If prev_kv is set, etcd gets the previous key-value pair before changing it. 250 | // The previous key-value pair will be returned in the put response. 251 | bool prev_kv = 4; 252 | 253 | // If ignore_value is set, etcd updates the key using its current value. 254 | // Returns an error if the key does not exist. 255 | bool ignore_value = 5; 256 | 257 | // If ignore_lease is set, etcd updates the key using its current lease. 258 | // Returns an error if the key does not exist. 259 | bool ignore_lease = 6; 260 | } 261 | 262 | message PutResponse { 263 | ResponseHeader header = 1; 264 | // if prev_kv is set in the request, the previous key-value pair will be returned. 265 | mvccpb.KeyValue prev_kv = 2; 266 | } 267 | 268 | message DeleteRangeRequest { 269 | // key is the first key to delete in the range. 270 | bytes key = 1; 271 | // range_end is the key following the last key to delete for the range [key, range_end). 272 | // If range_end is not given, the range is defined to contain only the key argument. 273 | // If range_end is one bit larger than the given key, then the range is all the keys 274 | // with the prefix (the given key). 275 | // If range_end is '\0', the range is all keys greater than or equal to the key argument. 276 | bytes range_end = 2; 277 | 278 | // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. 279 | // The previous key-value pairs will be returned in the delete response. 280 | bool prev_kv = 3; 281 | } 282 | 283 | message DeleteRangeResponse { 284 | ResponseHeader header = 1; 285 | // deleted is the number of keys deleted by the delete range request. 286 | int64 deleted = 2; 287 | // if prev_kv is set in the request, the previous key-value pairs will be returned. 288 | repeated mvccpb.KeyValue prev_kvs = 3; 289 | } 290 | 291 | message RequestOp { 292 | // request is a union of request types accepted by a transaction. 293 | oneof request { 294 | RangeRequest request_range = 1; 295 | PutRequest request_put = 2; 296 | DeleteRangeRequest request_delete_range = 3; 297 | TxnRequest request_txn = 4; 298 | } 299 | } 300 | 301 | message ResponseOp { 302 | // response is a union of response types returned by a transaction. 303 | oneof response { 304 | RangeResponse response_range = 1; 305 | PutResponse response_put = 2; 306 | DeleteRangeResponse response_delete_range = 3; 307 | TxnResponse response_txn = 4; 308 | } 309 | } 310 | 311 | message Compare { 312 | enum CompareResult { 313 | EQUAL = 0; 314 | GREATER = 1; 315 | LESS = 2; 316 | NOT_EQUAL = 3; 317 | } 318 | enum CompareTarget { 319 | VERSION = 0; 320 | CREATE = 1; 321 | MOD = 2; 322 | VALUE= 3; 323 | } 324 | // result is logical comparison operation for this comparison. 325 | CompareResult result = 1; 326 | // target is the key-value field to inspect for the comparison. 327 | CompareTarget target = 2; 328 | // key is the subject key for the comparison operation. 329 | bytes key = 3; 330 | oneof target_union { 331 | // version is the version of the given key 332 | int64 version = 4; 333 | // create_revision is the creation revision of the given key 334 | int64 create_revision = 5; 335 | // mod_revision is the last modified revision of the given key. 336 | int64 mod_revision = 6; 337 | // value is the value of the given key, in bytes. 338 | bytes value = 7; 339 | } 340 | // range_end compares the given target to all keys in the range [key, range_end). 341 | // See RangeRequest for more details on key ranges. 342 | bytes range_end = 8; 343 | // TODO: fill out with most of the rest of RangeRequest fields when needed. 344 | } 345 | 346 | // From google paxosdb paper: 347 | // Our implementation hinges around a powerful primitive which we call MultiOp. All other database 348 | // operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically 349 | // and consists of three components: 350 | // 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check 351 | // for the absence or presence of a value, or compare with a given value. Two different tests in the guard 352 | // may apply to the same or different entries in the database. All tests in the guard are applied and 353 | // MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise 354 | // it executes f op (see item 3 below). 355 | // 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or 356 | // lookup operation, and applies to a single database entry. Two different operations in the list may apply 357 | // to the same or different entries in the database. These operations are executed 358 | // if guard evaluates to 359 | // true. 360 | // 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. 361 | message TxnRequest { 362 | // compare is a list of predicates representing a conjunction of terms. 363 | // If the comparisons succeed, then the success requests will be processed in order, 364 | // and the response will contain their respective responses in order. 365 | // If the comparisons fail, then the failure requests will be processed in order, 366 | // and the response will contain their respective responses in order. 367 | repeated Compare compare = 1; 368 | // success is a list of requests which will be applied when compare evaluates to true. 369 | repeated RequestOp success = 2; 370 | // failure is a list of requests which will be applied when compare evaluates to false. 371 | repeated RequestOp failure = 3; 372 | } 373 | 374 | message TxnResponse { 375 | ResponseHeader header = 1; 376 | // succeeded is set to true if the compare evaluated to true or false otherwise. 377 | bool succeeded = 2; 378 | // responses is a list of responses corresponding to the results from applying 379 | // success if succeeded is true or failure if succeeded is false. 380 | repeated ResponseOp responses = 3; 381 | } 382 | 383 | // CompactionRequest compacts the key-value store up to a given revision. All superseded keys 384 | // with a revision less than the compaction revision will be removed. 385 | message CompactionRequest { 386 | // revision is the key-value store revision for the compaction operation. 387 | int64 revision = 1; 388 | // physical is set so the RPC will wait until the compaction is physically 389 | // applied to the local database such that compacted entries are totally 390 | // removed from the backend database. 391 | bool physical = 2; 392 | } 393 | 394 | message CompactionResponse { 395 | ResponseHeader header = 1; 396 | } 397 | 398 | message HashRequest { 399 | } 400 | 401 | message HashResponse { 402 | ResponseHeader header = 1; 403 | // hash is the hash value computed from the responding member's key-value store. 404 | uint32 hash = 2; 405 | } 406 | 407 | message HashKVRequest { 408 | // revision is the key-value store revision for the hash operation. 409 | int64 revision = 1; 410 | } 411 | 412 | message HashKVResponse { 413 | ResponseHeader header = 1; 414 | // hash is the hash value computed from the responding member's MVCC keys up to a given revision. 415 | uint32 hash = 2; 416 | // compact_revision is the compacted revision of key-value store when hash begins. 417 | int64 compact_revision = 3; 418 | } 419 | 420 | message SnapshotRequest { 421 | } 422 | 423 | message SnapshotResponse { 424 | // header has the current key-value store information. The first header in the snapshot 425 | // stream indicates the point in time of the snapshot. 426 | ResponseHeader header = 1; 427 | 428 | // remaining_bytes is the number of blob bytes to be sent after this message 429 | uint64 remaining_bytes = 2; 430 | 431 | // blob contains the next chunk of the snapshot in the snapshot stream. 432 | bytes blob = 3; 433 | } 434 | 435 | message WatchRequest { 436 | // request_union is a request to either create a new watcher or cancel an existing watcher. 437 | oneof request_union { 438 | WatchCreateRequest create_request = 1; 439 | WatchCancelRequest cancel_request = 2; 440 | } 441 | } 442 | 443 | message WatchCreateRequest { 444 | // key is the key to register for watching. 445 | bytes key = 1; 446 | // range_end is the end of the range [key, range_end) to watch. If range_end is not given, 447 | // only the key argument is watched. If range_end is equal to '\0', all keys greater than 448 | // or equal to the key argument are watched. 449 | // If the range_end is one bit larger than the given key, 450 | // then all keys with the prefix (the given key) will be watched. 451 | bytes range_end = 2; 452 | // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". 453 | int64 start_revision = 3; 454 | // progress_notify is set so that the etcd server will periodically send a WatchResponse with 455 | // no events to the new watcher if there are no recent events. It is useful when clients 456 | // wish to recover a disconnected watcher starting from a recent known revision. 457 | // The etcd server may decide how often it will send notifications based on current load. 458 | bool progress_notify = 4; 459 | 460 | enum FilterType { 461 | // filter out put event. 462 | NOPUT = 0; 463 | // filter out delete event. 464 | NODELETE = 1; 465 | } 466 | // filters filter the events at server side before it sends back to the watcher. 467 | repeated FilterType filters = 5; 468 | 469 | // If prev_kv is set, created watcher gets the previous KV before the event happens. 470 | // If the previous KV is already compacted, nothing will be returned. 471 | bool prev_kv = 6; 472 | } 473 | 474 | message WatchCancelRequest { 475 | // watch_id is the watcher id to cancel so that no more events are transmitted. 476 | int64 watch_id = 1; 477 | } 478 | 479 | message WatchResponse { 480 | ResponseHeader header = 1; 481 | // watch_id is the ID of the watcher that corresponds to the response. 482 | int64 watch_id = 2; 483 | // created is set to true if the response is for a create watch request. 484 | // The client should record the watch_id and expect to receive events for 485 | // the created watcher from the same stream. 486 | // All events sent to the created watcher will attach with the same watch_id. 487 | bool created = 3; 488 | // canceled is set to true if the response is for a cancel watch request. 489 | // No further events will be sent to the canceled watcher. 490 | bool canceled = 4; 491 | // compact_revision is set to the minimum index if a watcher tries to watch 492 | // at a compacted index. 493 | // 494 | // This happens when creating a watcher at a compacted revision or the watcher cannot 495 | // catch up with the progress of the key-value store. 496 | // 497 | // The client should treat the watcher as canceled and should not try to create any 498 | // watcher with the same start_revision again. 499 | int64 compact_revision = 5; 500 | 501 | // cancel_reason indicates the reason for canceling the watcher. 502 | string cancel_reason = 6; 503 | 504 | repeated mvccpb.Event events = 11; 505 | } 506 | 507 | message LeaseGrantRequest { 508 | // TTL is the advisory time-to-live in seconds. 509 | int64 TTL = 1; 510 | // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. 511 | int64 ID = 2; 512 | } 513 | 514 | message LeaseGrantResponse { 515 | ResponseHeader header = 1; 516 | // ID is the lease ID for the granted lease. 517 | int64 ID = 2; 518 | // TTL is the server chosen lease time-to-live in seconds. 519 | int64 TTL = 3; 520 | string error = 4; 521 | } 522 | 523 | message LeaseRevokeRequest { 524 | // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. 525 | int64 ID = 1; 526 | } 527 | 528 | message LeaseRevokeResponse { 529 | ResponseHeader header = 1; 530 | } 531 | 532 | message LeaseKeepAliveRequest { 533 | // ID is the lease ID for the lease to keep alive. 534 | int64 ID = 1; 535 | } 536 | 537 | message LeaseKeepAliveResponse { 538 | ResponseHeader header = 1; 539 | // ID is the lease ID from the keep alive request. 540 | int64 ID = 2; 541 | // TTL is the new time-to-live for the lease. 542 | int64 TTL = 3; 543 | } 544 | 545 | message LeaseTimeToLiveRequest { 546 | // ID is the lease ID for the lease. 547 | int64 ID = 1; 548 | // keys is true to query all the keys attached to this lease. 549 | bool keys = 2; 550 | } 551 | 552 | message LeaseTimeToLiveResponse { 553 | ResponseHeader header = 1; 554 | // ID is the lease ID from the keep alive request. 555 | int64 ID = 2; 556 | // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. 557 | int64 TTL = 3; 558 | // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. 559 | int64 grantedTTL = 4; 560 | // Keys is the list of keys attached to this lease. 561 | repeated bytes keys = 5; 562 | } 563 | 564 | message Member { 565 | // ID is the member ID for this member. 566 | uint64 ID = 1; 567 | // name is the human-readable name of the member. If the member is not started, the name will be an empty string. 568 | string name = 2; 569 | // peerURLs is the list of URLs the member exposes to the cluster for communication. 570 | repeated string peerURLs = 3; 571 | // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. 572 | repeated string clientURLs = 4; 573 | } 574 | 575 | message MemberAddRequest { 576 | // peerURLs is the list of URLs the added member will use to communicate with the cluster. 577 | repeated string peerURLs = 1; 578 | } 579 | 580 | message MemberAddResponse { 581 | ResponseHeader header = 1; 582 | // member is the member information for the added member. 583 | Member member = 2; 584 | // members is a list of all members after adding the new member. 585 | repeated Member members = 3; 586 | } 587 | 588 | message MemberRemoveRequest { 589 | // ID is the member ID of the member to remove. 590 | uint64 ID = 1; 591 | } 592 | 593 | message MemberRemoveResponse { 594 | ResponseHeader header = 1; 595 | // members is a list of all members after removing the member. 596 | repeated Member members = 2; 597 | } 598 | 599 | message MemberUpdateRequest { 600 | // ID is the member ID of the member to update. 601 | uint64 ID = 1; 602 | // peerURLs is the new list of URLs the member will use to communicate with the cluster. 603 | repeated string peerURLs = 2; 604 | } 605 | 606 | message MemberUpdateResponse{ 607 | ResponseHeader header = 1; 608 | // members is a list of all members after updating the member. 609 | repeated Member members = 2; 610 | } 611 | 612 | message MemberListRequest { 613 | } 614 | 615 | message MemberListResponse { 616 | ResponseHeader header = 1; 617 | // members is a list of all members associated with the cluster. 618 | repeated Member members = 2; 619 | } 620 | 621 | message DefragmentRequest { 622 | } 623 | 624 | message DefragmentResponse { 625 | ResponseHeader header = 1; 626 | } 627 | 628 | message MoveLeaderRequest { 629 | // targetID is the node ID for the new leader. 630 | uint64 targetID = 1; 631 | } 632 | 633 | message MoveLeaderResponse { 634 | ResponseHeader header = 1; 635 | } 636 | 637 | enum AlarmType { 638 | NONE = 0; // default, used to query if any alarm is active 639 | NOSPACE = 1; // space quota is exhausted 640 | } 641 | 642 | message AlarmRequest { 643 | enum AlarmAction { 644 | GET = 0; 645 | ACTIVATE = 1; 646 | DEACTIVATE = 2; 647 | } 648 | // action is the kind of alarm request to issue. The action 649 | // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a 650 | // raised alarm. 651 | AlarmAction action = 1; 652 | // memberID is the ID of the member associated with the alarm. If memberID is 0, the 653 | // alarm request covers all members. 654 | uint64 memberID = 2; 655 | // alarm is the type of alarm to consider for this request. 656 | AlarmType alarm = 3; 657 | } 658 | 659 | message AlarmMember { 660 | // memberID is the ID of the member associated with the raised alarm. 661 | uint64 memberID = 1; 662 | // alarm is the type of alarm which has been raised. 663 | AlarmType alarm = 2; 664 | } 665 | 666 | message AlarmResponse { 667 | ResponseHeader header = 1; 668 | // alarms is a list of alarms associated with the alarm request. 669 | repeated AlarmMember alarms = 2; 670 | } 671 | 672 | message StatusRequest { 673 | } 674 | 675 | message StatusResponse { 676 | ResponseHeader header = 1; 677 | // version is the cluster protocol version used by the responding member. 678 | string version = 2; 679 | // dbSize is the size of the backend database, in bytes, of the responding member. 680 | int64 dbSize = 3; 681 | // leader is the member ID which the responding member believes is the current leader. 682 | uint64 leader = 4; 683 | // raftIndex is the current raft index of the responding member. 684 | uint64 raftIndex = 5; 685 | // raftTerm is the current raft term of the responding member. 686 | uint64 raftTerm = 6; 687 | } 688 | 689 | message AuthEnableRequest { 690 | } 691 | 692 | message AuthDisableRequest { 693 | } 694 | 695 | message AuthenticateRequest { 696 | string name = 1; 697 | string password = 2; 698 | } 699 | 700 | message AuthUserAddRequest { 701 | string name = 1; 702 | string password = 2; 703 | } 704 | 705 | message AuthUserGetRequest { 706 | string name = 1; 707 | } 708 | 709 | message AuthUserDeleteRequest { 710 | // name is the name of the user to delete. 711 | string name = 1; 712 | } 713 | 714 | message AuthUserChangePasswordRequest { 715 | // name is the name of the user whose password is being changed. 716 | string name = 1; 717 | // password is the new password for the user. 718 | string password = 2; 719 | } 720 | 721 | message AuthUserGrantRoleRequest { 722 | // user is the name of the user which should be granted a given role. 723 | string user = 1; 724 | // role is the name of the role to grant to the user. 725 | string role = 2; 726 | } 727 | 728 | message AuthUserRevokeRoleRequest { 729 | string name = 1; 730 | string role = 2; 731 | } 732 | 733 | message AuthRoleAddRequest { 734 | // name is the name of the role to add to the authentication system. 735 | string name = 1; 736 | } 737 | 738 | message AuthRoleGetRequest { 739 | string role = 1; 740 | } 741 | 742 | message AuthUserListRequest { 743 | } 744 | 745 | message AuthRoleListRequest { 746 | } 747 | 748 | message AuthRoleDeleteRequest { 749 | string role = 1; 750 | } 751 | 752 | message AuthRoleGrantPermissionRequest { 753 | // name is the name of the role which will be granted the permission. 754 | string name = 1; 755 | // perm is the permission to grant to the role. 756 | authpb.Permission perm = 2; 757 | } 758 | 759 | message AuthRoleRevokePermissionRequest { 760 | string role = 1; 761 | string key = 2; 762 | string range_end = 3; 763 | } 764 | 765 | message AuthEnableResponse { 766 | ResponseHeader header = 1; 767 | } 768 | 769 | message AuthDisableResponse { 770 | ResponseHeader header = 1; 771 | } 772 | 773 | message AuthenticateResponse { 774 | ResponseHeader header = 1; 775 | // token is an authorized token that can be used in succeeding RPCs 776 | string token = 2; 777 | } 778 | 779 | message AuthUserAddResponse { 780 | ResponseHeader header = 1; 781 | } 782 | 783 | message AuthUserGetResponse { 784 | ResponseHeader header = 1; 785 | 786 | repeated string roles = 2; 787 | } 788 | 789 | message AuthUserDeleteResponse { 790 | ResponseHeader header = 1; 791 | } 792 | 793 | message AuthUserChangePasswordResponse { 794 | ResponseHeader header = 1; 795 | } 796 | 797 | message AuthUserGrantRoleResponse { 798 | ResponseHeader header = 1; 799 | } 800 | 801 | message AuthUserRevokeRoleResponse { 802 | ResponseHeader header = 1; 803 | } 804 | 805 | message AuthRoleAddResponse { 806 | ResponseHeader header = 1; 807 | } 808 | 809 | message AuthRoleGetResponse { 810 | ResponseHeader header = 1; 811 | 812 | repeated authpb.Permission perm = 2; 813 | } 814 | 815 | message AuthRoleListResponse { 816 | ResponseHeader header = 1; 817 | 818 | repeated string roles = 2; 819 | } 820 | 821 | message AuthUserListResponse { 822 | ResponseHeader header = 1; 823 | 824 | repeated string users = 2; 825 | } 826 | 827 | message AuthRoleDeleteResponse { 828 | ResponseHeader header = 1; 829 | } 830 | 831 | message AuthRoleGrantPermissionResponse { 832 | ResponseHeader header = 1; 833 | } 834 | 835 | message AuthRoleRevokePermissionResponse { 836 | ResponseHeader header = 1; 837 | } 838 | --------------------------------------------------------------------------------