├── .env ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── build.rs ├── examples ├── conflict_resolution_tests │ ├── .gitignore │ ├── Cargo.toml │ ├── README.md │ └── src │ │ ├── cache.rs │ │ └── main.rs └── ha_setup │ ├── Cargo.toml │ ├── README.md │ └── src │ └── main.rs ├── justfile ├── proto └── cache.proto ├── src ├── client.rs ├── lib.rs ├── quorum.rs ├── rpc │ ├── cache.rs │ └── mod.rs └── server.rs └── tls ├── ca └── x509 │ ├── end_entity │ ├── 1 │ │ ├── cert-chain.pem │ │ ├── cert-chain.pem.b64 │ │ ├── cert-chain.pem.b64-twice │ │ ├── cert.der │ │ ├── cert.fingerprint │ │ ├── cert.pem │ │ ├── key.der │ │ ├── key.der.hex │ │ ├── key.pem │ │ ├── key.pem.b64 │ │ └── key.pem.b64-twice │ └── serial │ ├── intermediate │ ├── ca-chain.pem │ ├── intermediate.cert.der │ ├── intermediate.cert.pem │ ├── intermediate.fingerprint │ ├── intermediate.key.der │ ├── intermediate.key.der.hex │ ├── intermediate.key.pem │ └── intermediate.key.pem.hex │ └── root │ ├── root.cert.der │ ├── root.cert.pem │ ├── root.fingerprint │ ├── root.key.der │ ├── root.key.der.hex │ ├── root.key.pem │ └── root.key.pem.hex ├── redhac.ca-chain.pem ├── redhac.cert-chain.pem └── redhac.key.pem /.env: -------------------------------------------------------------------------------- 1 | # If the cache should start in HA mode or standalone 2 | # accepts 'true|false', defaults to 'false' 3 | HA_MODE=true 4 | 5 | # The connection strings (with hostnames) of the HA instances as a CSV 6 | # Format: 'scheme://hostname:port' 7 | #HA_HOSTS="http://redhac.redhac:8080, http://redhac.redhac:8180 ,http://redhac.redhac:8280" 8 | #HA_HOSTS="https://redhac.redhac.local:8080, https://redhac.redhac.local:8180 ,https://redhac.redhac.local:8280" 9 | HA_HOSTS="https://127.0.0.1:8001, https://127.0.0.1:8002 ,https://127.0.0.1:8003" 10 | 11 | # This can overwrite the hostname which is used to identify each cache member. 12 | # Useful in scenarios, where all members are on the same host or for testing. 13 | #HOSTNAME_OVERWRITE="127.0.0.1:8080" 14 | 15 | # Secret token, which is used to authenticate the cache members 16 | CACHE_AUTH_TOKEN=SuperSafeSecretToken1337 17 | 18 | # Enable / disable TLS for the cache communication (default: true) 19 | CACHE_TLS=true 20 | 21 | # The path to the server TLS certificate PEM file (default: tls/redhac.cert-chain.pem) 22 | CACHE_TLS_SERVER_CERT=tls/redhac.cert-chain.pem 23 | # The path to the server TLS key PEM file (default: tls/redhac.key.pem) 24 | CACHE_TLS_SERVER_KEY=tls/redhac.key.pem 25 | 26 | # The path to the client mTLS certificate PEM file. This is optional. 27 | CACHE_TLS_CLIENT_CERT=tls/redhac.local.cert.pem 28 | # The path to the client mTLS key PEM file. This is optional. 29 | CACHE_TLS_CLIENT_KEY=tls/redhac.local.key.pem 30 | 31 | # If not empty, the PEM file from the specified location will be added as the CA certificate chain for validating 32 | # the servers TLS certificate. This is optional. 33 | CACHE_TLS_CA_SERVER=tls/ca-chain.cert.pem 34 | # If not empty, the PEM file from the specified location will be added as the CA certificate chain for validating 35 | # the clients mTLS certificate. This is optional. 36 | CACHE_TLS_CA_CLIENT=tls/ca-chain.cert.pem 37 | 38 | # The domain / CN the client should validate the certificate against. This domain MUST be inside the 39 | # 'X509v3 Subject Alternative Name' when you take a look at the servers certificate with the openssl tool. 40 | # default: redhac.local 41 | CACHE_TLS_CLIENT_VALIDATE_DOMAIN=redhac.local 42 | 43 | # Can be used if you need to overwrite the SNI when the client connects to the server, for instance if you are behind 44 | # a loadbalancer which combines multiple certificates. (default: "") 45 | #CACHE_TLS_SNI_OVERWRITE= 46 | 47 | # Define different buffer sizes for channels between the components 48 | # Buffer for client request on the incoming stream - server side (default: 128) 49 | # Make sense to have the CACHE_BUF_SERVER set to: `(number of total HA cache hosts - 1) * CACHE_BUF_CLIENT` 50 | CACHE_BUF_SERVER=128 51 | # Buffer for client requests to remote servers for all cache operations (default: 64) 52 | CACHE_BUF_CLIENT=64 53 | 54 | # Connections Timeouts 55 | # The Server sends out keepalive pings with configured timeouts 56 | 57 | # The keepalive ping interval in seconds (default: 5) 58 | CACHE_KEEPALIVE_INTERVAL=5 59 | 60 | # The keepalive ping timeout in seconds (default: 5) 61 | CACHE_KEEPALIVE_TIMEOUT=5 62 | 63 | # The timeout for the leader election. If a newly saved leader request has not reached quorum after the timeout, the 64 | # leader will be reset and a new request will be sent out. 65 | # CAUTION: This should not be below CACHE_RECONNECT_TIMEOUT_UPPER, since cold starts and elections will be problematic in that case. 66 | # value in seconds, default: 2 67 | CACHE_ELECTION_TIMEOUT=2 68 | 69 | # These 2 values define the reconnect timeout for the HA Cache Clients. 70 | # The values are in ms and a random between these 2 will be chosen each time to avoid conflicts and race conditions 71 | # (default: 500) 72 | CACHE_RECONNECT_TIMEOUT_LOWER=500 73 | # (default: 2000) 74 | CACHE_RECONNECT_TIMEOUT_UPPER=2000 75 | 76 | RUST_LOG=info 77 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | target 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## v0.10.5 4 | 5 | - update external dependencies, one of them being tonic which internally now is using `hyper:1` 6 | 7 | ## v0.10.4 8 | 9 | - reduced logging output - cluster info state is now being logged on debug level only to reduce output 10 | - Rust v1.79 clippy lints have been fixed 11 | 12 | ## v0.10.3 13 | 14 | - lower default values for: 15 | - `CACHE_RECONNECT_TIMEOUT_LOWER=500` 16 | - `CACHE_RECONNECT_TIMEOUT_UPPER=2000` 17 | - `CACHE_ELECTION_TIMEOUT=2` 18 | to have quicker recoveries in case of a broken pipe or other errors 19 | - additionally reduced tracing output on the `info` level 20 | - the `insert` functions have been made more resilient during graceful leader switches and fail-overs 21 | 22 | ## v0.10.2 23 | 24 | - Send some additional TCP keepalives for connections that are idle for longer periods of time to 25 | prevent them from being dropped. 26 | - a little bit reduced logging for the `info` level 27 | 28 | ## v0.10.1 29 | 30 | - removes a possible panic that could occur, when a cache-receiving side abruptly cancels the task 31 | without waiting for an answer 32 | 33 | ## v0.10.0 34 | 35 | - core dependencies have been updated 36 | - latest rust nightly clippy lints have been applied 37 | - a very unlikely but possible channel panic in case of a conflict resolution has been fixed 38 | - bump MSRV to 1.70.0 39 | 40 | ## 0.9.1 41 | 42 | Typo corrections in documentation and removed now obsolete minimal-versions dependencies. 43 | 44 | ## 0.9.0 45 | 46 | ### Changes 47 | 48 | The TLS setup has been changed a bit to make it more flexible. 49 | The only mandatory values with `CACHE_TLS=true` are now: 50 | 51 | - `CACHE_TLS_SERVER_CERT` with new default: tls/redhac.cert-chain.pem 52 | - `CACHE_TLS_SERVER_KEY` with new default: tls/redhac.key.pem 53 | While the following ones are now optional and do not have a default anymore: 54 | - `CACHE_TLS_CLIENT_CERT` 55 | - `CACHE_TLS_CLIENT_KEY` 56 | - `CACHE_TLS_CA_SERVER` 57 | - `CACHE_TLS_CA_CLIENT` 58 | 59 | This makes it possible to use `redhac` with TLS without providing a private CA file, which you 60 | would never need, if you certificates can be validated on system level anyway already. Also, the 61 | mTLS setup is now optional with this change. 62 | 63 | Additionally, the TLS certificate generation in the Readme and Docs have been updated and use 64 | [Nioca](https://github.com/sebadob/nioca) for this task now, which is a lot more comfortable. 65 | The test TLS certificates are checked into git as well for a faster start and evaluation. 66 | Do not use them in production! 67 | 68 | ## 0.8.0 69 | 70 | ### Features 71 | 72 | This update introduces a small new feature. 73 | It is now possible to use the `redhac::quorum::QuorumHealthState` nicely externally. 74 | The `quorum` module has been exported as `pub` and the `QuorumHealthState` has been added as 75 | a `pub use` to the redhac main crate. This makes it possible for applications to actually use 76 | the `QuorumHealthState` and pass it around to functions without the need to always pass the 77 | whole `CacheConfig` each time. 78 | This is especially useful if you want to have your `CacheConfig.rx_health_state` in parts of 79 | your application. 80 | 81 | ### Changes 82 | 83 | - make `redhac::quorum::QuorumHealthState` pub usable 84 | - `pub mod` of the `quorum` crate (with some new `pub(crate)` exceptions) 85 | - bump versions or core dependencies 86 | - fix minimal versions for new dependencies 87 | 88 | ## 0.7.0 89 | 90 | This is a maintenance release. 91 | 92 | - bump versions or core dependencies 93 | - fix minimal versions for new dependencies 94 | - introduce a justfile for easier maintenance 95 | - bump MSRV to 1.65.0 due to core dependency updates 96 | [bcdfc62](https://github.com/sebadob/redhac/commit/bcdfc62665320a9ad3f832d0c28f0175d6e447c2) 97 | [506c9c6](https://github.com/sebadob/redhac/commit/506c9c6c2c2fb3cbb1253cabd6bf4cdf9b01f4b0) 98 | 99 | ## v0.6.0 100 | 101 | - `cache_insert` and `cache_remove` default to their non-HA counterparts if `HA_MODE == false` 102 | [bcde62c](https://github.com/sebadob/redhac/commit/bcde62cbea233a68c86b21cd7300c150b2690bbf) 103 | - deprecated code elements cleaned up 104 | [51fbe7f](https://github.com/sebadob/redhac/commit/51fbe7fe72598432c978ee24587a7a65e10f1c46) 105 | - stability improvements inside certain K8s clusters - broken pipe prevention 106 | [e20bc39](https://github.com/sebadob/redhac/commit/e20bc39b925bb7738a0025a733e554efa1b4a546) 107 | - removed the proto file build from `build.rs` to make docs.rs work (hopefully) 108 | [b53a2d3](https://github.com/sebadob/redhac/commit/b53a2d38f99f08bb9649035195228dc46be9cc7f) 109 | 110 | ## v0.5.0 111 | 112 | `redhac` goes open source 113 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "redhac" 3 | version = "0.10.5" 4 | edition = "2021" 5 | rust-version = "1.70.0" 6 | license = "Apache-2.0 OR MIT" 7 | authors = ["Sebastian Dobe 6 | The idea of `redhac` is to provide a caching library which can be embedded into any Rust 7 | application, while still providing the ability to build a distributed HA caching layer.
8 | It can be used as a cache for a single instance too, of course, but then it will not be the 9 | most performant, since it needs clones of most values to be able to send them over the network 10 | concurrently. If you need a distributed cache however, you might give `redhac` a try. 11 | 12 | The underlying system works with quorum and will elect a leader dynamically on startup. Each 13 | node will start a server and multiple client parts for bidirectional gRPC streaming. Each client will 14 | connect to each of the other servers. 15 | 16 | ## Release 17 | 18 | This crate has been used and tested already before going open source.
19 | By now, did thousands of HA cluster startups, where I produced leadership election conflicts on purpose to make 20 | sure they are handled and resolved properly. 21 | 22 | However, some benchmarks and maybe performance finetuning is still missing, which is the reason why this crate has not 23 | reached v1.0.0 yet. After the benchmarks the API may change, if another approach of handling everything is just faster. 24 | 25 | ## Consistency and guarantees 26 | 27 | **Important:** `redhac` is used for caching, not for persistent data! 28 | 29 | During normal operation, all the instances will forward their cache modifications to the other cache members.
30 | If a node goes down or has temporary network issues and therefore loses connection to the cluster, it will invalidate 31 | its own cache to make 100% sure, that it would never provide possibly outdated data from the cache. Once a node 32 | rejoins the cluster, it will start receiving and saving updates from the remotes again.
33 | There is an option in the `cache_get!` macro to fetch cached values from remote instances after such an event. This can 34 | be used if you maybe have some values that only live inside the cache and cannot be refreshed from the database, for 35 | instance. 36 | 37 | The other situation, when the cache will not save any values is when there is no quorum and cluster leader. 38 | 39 | If you need more consistency / guarantees / syncing after a late join, you may take a look at 40 | [openraft](https://github.com/datafuselabs/openraft) or other projects like this. 41 | 42 | ### Versions and dependencies 43 | 44 | - MSRV is Rust 1.70 45 | - Everything is checked and working with `-Zminimal-versions` 46 | - No issue reported by `cargo audit` (2024-04-09) 47 | 48 | ## Single Instance Example 49 | 50 | ```rust 51 | // These 3 are needed for the `cache_get` macro 52 | use redhac::{cache_get, cache_get_from, cache_get_value}; 53 | use redhac::{cache_put, cache_recv, CacheConfig, SizedCache}; 54 | 55 | #[tokio::main] 56 | async fn main() { 57 | let (_, mut cache_config) = CacheConfig::new(); 58 | 59 | // The cache name is used to reference the cache later on 60 | let cache_name = "my_cache"; 61 | // We need to spawn a global handler for each cache instance. 62 | // Communication is done over channels. 63 | cache_config.spawn_cache( 64 | cache_name.to_string(), SizedCache::with_size(16), None 65 | ); 66 | 67 | // Cache keys can only be `String`s at the time of writing. 68 | let key = "myKey"; 69 | // The value you want to cache must implement `serde::Serialize`. 70 | // The serialization of the values is done with `bincode`. 71 | let value = "myCacheValue".to_string(); 72 | 73 | // At this point, we need cloned values to make everything work 74 | // nicely with networked connections. If you only ever need a 75 | // local cache, you might be better off with using the `cached` 76 | // crate directly and use references whenever possible. 77 | cache_put( 78 | cache_name.to_string(), key.to_string(), &cache_config, &value 79 | ) 80 | .await 81 | .unwrap(); 82 | 83 | let res = cache_get!( 84 | // The type of the value we want to deserialize the value into 85 | String, 86 | // The cache name from above. We can start as many for our 87 | // application as we like 88 | cache_name.to_string(), 89 | // For retrieving values, the same as above is true - we 90 | // need real `String`s 91 | key.to_string(), 92 | // All our caches have the necessary information added to 93 | // the cache config. Here a 94 | // reference is totally fine. 95 | &cache_config, 96 | // This does not really apply to this single instance 97 | // example. If we would have started a HA cache layer we 98 | // could do remote lookups for a value we cannot find locally, 99 | // if this would be set to `true` 100 | false 101 | ) 102 | .await 103 | .unwrap(); 104 | 105 | assert!(res.is_some()); 106 | assert_eq!(res.unwrap(), value); 107 | } 108 | ``` 109 | 110 | ## High Availability Setup 111 | 112 | The High Availability (HA) works in a way, that each cache member connects to each other. When 113 | eventually quorum is reached, a leader will be elected, which then is responsible for all cache 114 | modifications to prevent collisions (if you do not decide against it with a direct `cache_put`). 115 | Since each node connects to each other, it means that you cannot just scale up the cache layer 116 | infinitely. The ideal number of nodes is 3. You can scale this number up for instance to 5 or 7 117 | if you like, but this has not been tested in greater detail so far. 118 | 119 | **Write performance** will degrade the more nodes you add to the cluster, since you simply need 120 | to wait for more Ack's from the other members. 121 | 122 | **Read performance** however should stay the same. 123 | 124 | Each node will keep a local copy of each value inside the cache (if it has not lost connection 125 | or joined the cluster at some later point), which means in most cases reads do not require any 126 | remote network access. 127 | 128 | ## Configuration 129 | 130 | The way to configure the `HA_MODE` is optimized for a Kubernetes deployment but may seem a bit 131 | odd at the same time, if you deploy somewhere else. You can either provide the `.env` file and 132 | use it as a config file, or just set these variables for the environment directly. You need 133 | to set the following values: 134 | 135 | ### `HA_MODE` 136 | 137 | The first one is easy, just set `HA_MODE=true` 138 | 139 | ### `HA_HOSTS` 140 | 141 | **NOTE:**
142 | In a few examples down below, the name for deployments may be `rauthy`. The reason is, that this 143 | crate was written originally to complement another project of mine, Rauthy (link will follow), 144 | which is an OIDC Provider and Single Sign-On Solution written in Rust. 145 | 146 | The `HA_HOSTS` is working in a way, that it is really easy inside Kubernetes to configure it, 147 | as long as a `StatefulSet` is used for the deployment. 148 | The way a cache node finds its members is by the `HA_HOSTS` and its own `HOSTNAME`. 149 | In the `HA_HOSTS`, add every cache member. For instance, if you want to use 3 replicas in HA 150 | mode which are running and deployed as a `StatefulSet` with the name `rauthy` again: 151 | 152 | ```text 153 | HA_HOSTS="http://rauthy-0:8000, http://rauthy-1:8000 ,http://rauthy-2:8000" 154 | ``` 155 | 156 | The way it works: 157 | 158 | 1. **A node gets its own hostname from the OS**
159 | This is the reason, why you use a StatefulSet for the deployment, even without any volumes 160 | attached. For a `StatefulSet` called `rauthy`, the replicas will always have the names `rauthy-0`, 161 | `rauthy-1`, ..., which are at the same time the hostnames inside the pod. 162 | 2. **Find "me" inside the `HA_HOSTS` variable**
163 | If the hostname cannot be found in the `HA_HOSTS`, the application will panic and exit because 164 | of a misconfiguration. 165 | 3. **Use the port from the "me"-Entry that was found for the server part**
166 | This means you do not need to specify the port in another variable which eliminates the risk of 167 | having inconsistencies 168 | or a bad config in that case. 169 | 4. **Extract "me" from the `HA_HOSTS`**
170 | then take the leftover nodes as all cache members and connect to them 171 | 5. **Once a quorum has been reached, a leader will be elected**
172 | From that point on, the cache will start accepting requests 173 | 6. **If the leader is lost - elect a new one - No values will be lost** 174 | 7. **If quorum is lost, the cache will be invalidated**
175 | This happens for security reasons to provide cache inconsistencies. Better invalidate the cache 176 | and fetch the values fresh from the DB or other cache members than working with possibly invalid 177 | values, which is especially true in an authn / authz situation. 178 | 179 | **NOTE:**
180 | If you are in an environment where the described mechanism with extracting the hostname would 181 | not work, you can set the `HOSTNAME_OVERWRITE` for each instance to match one of the `HA_HOSTS` 182 | entries, or you can overwrite the name when using the `redhac::start_cluster`. 183 | 184 | ### `CACHE_AUTH_TOKEN` 185 | 186 | You need to set a secret for the `CACHE_AUTH_TOKEN`, which is then used for authenticating 187 | cache members. 188 | 189 | ### TLS 190 | 191 | For the sake of this example, we will not dig into TLS and disable it in the example, which 192 | can be done with `CACHE_TLS=false`.
193 | You can add your TLS certificates in PEM format and an optional Root CA. This is true for the 194 | Server and the Client part separately. This means you can configure the cache layer to use mTLS 195 | connections. 196 | 197 | #### Generating TLS certificates (optional) 198 | 199 | You can of course provide your own set of certificates, if you already have some, or just use your preferred way of 200 | creating some. However, I want to show a way of doing this in the most simple way possible with another tool of mine 201 | called [Nioca](https://github.com/sebadob/nioca).
202 | If you have `docker` or similar available, this is the easiest option. If not, you can grab one of the binaries from 203 | the [out]() folder, which are available for linux amd64 and arm64. 204 | 205 | I suggest to use `docker` for this task. Otherwise, you can use the `nioca` binary directly on any linux machine. 206 | If you want a permanent way of generating certificates for yourself, take a look at Rauthy's `justfile` and copy 207 | and adjust the recipes `create-root-ca` and `create-end-entity-tls` to your liking. 208 | If you just want to get everything started quickly, follow these steps: 209 | 210 | ##### Folder for your certificates 211 | 212 | Let's create a folder for our certificates: 213 | 214 | ``` 215 | mkdir ca 216 | ``` 217 | 218 | ##### Create an alias for the `docker` command 219 | 220 | If you use one of the binaries directly, you can skip this step. 221 | 222 | ``` 223 | alias nioca='docker run --rm -it -v ./ca:/ca -u $(id -u ${USER}):$(id -g ${USER}) ghcr.io/sebadob/nioca' 224 | ``` 225 | 226 | To see the full feature set for more customization than mentioned below: 227 | ``` 228 | nioca x509 -h 229 | ``` 230 | 231 | ##### Generate full certificate chain 232 | 233 | We can create and generate a fully functioning, production ready Root Certificate Authority (CA) with just a single 234 | command. Make sure that at least one of your `--alt-name-dns` from here matches the `CACHE_TLS_CLIENT_VALIDATE_DOMAIN` 235 | from the redhac config later on.
236 | To keep things simple, we will use the same certificate for the server and the client. You can of course create 237 | separate ones, if you like: 238 | 239 | ``` 240 | nioca x509 \ 241 | --cn 'redhac.local' \ 242 | --alt-name-dns redhac.local \ 243 | --usages-ext server-auth \ 244 | --usages-ext client-auth \ 245 | --stage full \ 246 | --clean 247 | ``` 248 | 249 | You will be asked 6 times (yes, 6) for an at least 16 character password: 250 | - The first 3 times, you need to provide the encryption password for your Root CA 251 | - The last 3 times, you should provide a different password for your Intermediate CA 252 | 253 | When everything was successful, you will have a new folder named `x509` with sub folders `root`, `intermediate` 254 | and `end_entity` in your current one. 255 | 256 | From these, you will need the following files: 257 | 258 | ``` 259 | cp ca/x509/intermediate/ca-chain.pem ./redhac.ca-chain.pem && \ 260 | cp ca/x509/end_entity/$(cat ca/x509/end_entity/serial)/cert-chain.pem ./redhac.cert-chain.pem && \ 261 | cp ca/x509/end_entity/$(cat ca/x509/end_entity/serial)/key.pem ./redhac.key.pem 262 | ``` 263 | 264 | - You should have 3 files in `ls -l`: 265 | ``` 266 | redhac.ca-chain.pem 267 | redhac.cert-chain.pem 268 | redhac.key.pem 269 | ``` 270 | 271 | **4. Create Kubernetes Secrets** 272 | ``` 273 | kubectl create secret tls redhac-tls-server --key="redhac.key.pem" --cert="redhac.cert-chain.pem" && \ 274 | kubectl create secret tls redhac-tls-client --key="redhac.key.pem" --cert="redhac.cert-chain.pem" && \ 275 | kubectl create secret generic redhac-server-ca --from-file redhac.ca-chain.pem && \ 276 | kubectl create secret generic redhac-client-ca --from-file redhac.ca-chain.pem 277 | ``` 278 | 279 | ### Reference Config 280 | 281 | The following variables are the ones you can use to configure `redhac` via env vars. 282 | At the time of writing, the configuration can only be done via the env. 283 | 284 | ```text 285 | # If the cache should start in HA mode or standalone 286 | # accepts 'true|false', defaults to 'false' 287 | HA_MODE=true 288 | 289 | # The connection strings (with hostnames) of the HA instances 290 | # as a CSV. Format: 'scheme://hostname:port' 291 | HA_HOSTS="http://redhac.redhac:8080, http://redhac.redhac:8180, http://redhac.redhac:8280" 292 | 293 | # This can overwrite the hostname which is used to identify each 294 | # cache member. Useful in scenarios, where all members are on the 295 | # same host or for testing. You need to add the port, since `redhac` 296 | # will do an exact match to find "me". 297 | #HOSTNAME_OVERWRITE="127.0.0.1:8080" 298 | 299 | # Secret token, which is used to authenticate the cache members 300 | CACHE_AUTH_TOKEN=SuperSafeSecretToken1337 301 | 302 | # Enable / disable TLS for the cache communication (default: true) 303 | CACHE_TLS=true 304 | 305 | # The path to the server TLS certificate PEM file 306 | # default: tls/redhac.cert-chain.pem 307 | CACHE_TLS_SERVER_CERT=tls/redhac.cert-chain.pem 308 | # The path to the server TLS key PEM file 309 | # default: tls/redhac.key.pem 310 | CACHE_TLS_SERVER_KEY=tls/redhac.key.pem 311 | 312 | # The path to the client mTLS certificate PEM file. This is optional. 313 | CACHE_TLS_CLIENT_CERT=tls/redhac.cert-chain.pem 314 | # The path to the client mTLS key PEM file. This is optional. 315 | CACHE_TLS_CLIENT_KEY=tls/redhac.key.pem 316 | 317 | # If not empty, the PEM file from the specified location will be 318 | # added as the CA certificate chain for validating 319 | # the servers TLS certificate. This is optional. 320 | CACHE_TLS_CA_SERVER=tls/redhac.ca-chain.pem 321 | # If not empty, the PEM file from the specified location will 322 | # be added as the CA certificate chain for validating 323 | # the clients mTLS certificate. This is optional. 324 | CACHE_TLS_CA_CLIENT=tls/redhac.ca-chain.pem 325 | 326 | # The domain / CN the client should validate the certificate 327 | # against. This domain MUST be inside the 328 | # 'X509v3 Subject Alternative Name' when you take a look at 329 | # the servers certificate with the openssl tool. 330 | # default: redhac.local 331 | CACHE_TLS_CLIENT_VALIDATE_DOMAIN=redhac.local 332 | 333 | # Can be used if you need to overwrite the SNI when the 334 | # client connects to the server, for instance if you are 335 | # behind a loadbalancer which combines multiple certificates. 336 | # default: "" 337 | #CACHE_TLS_SNI_OVERWRITE= 338 | 339 | # Define different buffer sizes for channels between the 340 | # components. Buffer for client request on the incoming 341 | # stream - server side (default: 128) 342 | # Makes sense to have the CACHE_BUF_SERVER roughly set to: 343 | # `(number of total HA cache hosts - 1) * CACHE_BUF_CLIENT` 344 | CACHE_BUF_SERVER=128 345 | # Buffer for client requests to remote servers for all cache 346 | # operations (default: 64) 347 | CACHE_BUF_CLIENT=64 348 | 349 | # Connections Timeouts 350 | # The Server sends out keepalive pings with configured timeouts 351 | # The keepalive ping interval in seconds (default: 5) 352 | CACHE_KEEPALIVE_INTERVAL=5 353 | # The keepalive ping timeout in seconds (default: 5) 354 | CACHE_KEEPALIVE_TIMEOUT=5 355 | 356 | # The timeout for the leader election. If a newly saved leader 357 | # request has not reached quorum after the timeout, the leader 358 | # will be reset and a new request will be sent out. 359 | # CAUTION: This should not be below 360 | # CACHE_RECONNECT_TIMEOUT_UPPER, since cold starts and 361 | # elections will be problematic in that case. 362 | # value in seconds, default: 2 363 | CACHE_ELECTION_TIMEOUT=2 364 | 365 | # These 2 values define the reconnect timeout for the HA Cache 366 | # Clients. The values are in ms and a random between these 2 367 | # will be chosen each time to avoid conflicts 368 | # and race conditions (default: 500) 369 | CACHE_RECONNECT_TIMEOUT_LOWER=500 370 | # (default: 2000) 371 | CACHE_RECONNECT_TIMEOUT_UPPER=2000 372 | ``` 373 | 374 | ## Example Code 375 | 376 | For example code, please take a look at `./examples`. 377 | 378 | The `conflict_resolution_tests` are used for producing conflicts on purpose by starting multiple nodes from the same 379 | code at the exact same time, which is maybe not too helpful if you want to take a look at how it's done. 380 | 381 | The better example then would be the `ha_setup`, which can be used to start 3 nodes in 3 different terminals to observe 382 | the behavior. 383 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | // Note: needs a c++ compiler to work: 2 | // dnf install -y gcc-c++ 3 | 4 | fn main() { 5 | // tonic_build::configure() 6 | // .build_client(true) 7 | // .build_server(true) 8 | // .out_dir("src/rpc/") 9 | // .protoc_arg("--experimental_allow_proto3_optional") 10 | // .compile(&["proto/cache.proto"], &["proto"]) 11 | // .expect("Failed to compile proto/cache.proto"); 12 | } 13 | -------------------------------------------------------------------------------- /examples/conflict_resolution_tests/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | target 3 | -------------------------------------------------------------------------------- /examples/conflict_resolution_tests/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "conflict-resolution-tests" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "Apache-2.0 OR MIT" 6 | authors = ["Sebastian Dobe anyhow::Result { 6 | let start = time::Instant::now(); 7 | 8 | // Configure a cache 9 | let (tx_health_1, mut cache_config_1) = redhac::CacheConfig::new(); 10 | let cache_name = "my_cache"; 11 | let cache = redhac::SizedCache::with_size(16); 12 | 13 | // If you do not specify a buffer as the last argument, an unbounded channel will be used. 14 | // This would have no upper limit of course, but it has lower latency and is faster overall. 15 | cache_config_1.spawn_cache(cache_name.to_string(), cache.clone(), None); 16 | 17 | // start server 18 | redhac::start_cluster( 19 | tx_health_1, 20 | &mut cache_config_1, 21 | // optional notification channel: `Option>` 22 | None, 23 | // We need to overwrite the hostname so we can start all nodes on the same host for this 24 | // example. Usually, this will be set to `None` 25 | Some("127.0.0.1:7001".to_string()), 26 | ) 27 | .await?; 28 | info!("First cache node started"); 29 | 30 | // Mimic the other 2 cache members. This should usually not be done in the same code - only 31 | // for this example to make it work. 32 | let (tx_health_2, mut cache_config_2) = redhac::CacheConfig::new(); 33 | cache_config_2.spawn_cache(cache_name.to_string(), cache.clone(), None); 34 | redhac::start_cluster( 35 | tx_health_2, 36 | &mut cache_config_2, 37 | None, 38 | Some("127.0.0.1:7002".to_string()), 39 | ) 40 | .await?; 41 | info!("2nd cache node started"); 42 | // Now after the 2nd cache member has been started, we would already have quorum and a 43 | // working cache layer (as soon as the connection is established of course). As long as there 44 | // is no leader and / or quorum, the cache will not save any values to avoid inconsistencies. 45 | 46 | let (tx_health_3, mut cache_config_3) = redhac::CacheConfig::new(); 47 | cache_config_3.spawn_cache(cache_name.to_string(), cache.clone(), None); 48 | redhac::start_cluster( 49 | tx_health_3, 50 | &mut cache_config_3, 51 | None, 52 | Some("127.0.0.1:7003".to_string()), 53 | ) 54 | .await?; 55 | info!("3rd cache node started"); 56 | 57 | // For the sake of this example again, we need to wait until the cache is in a healthy 58 | // state, before we can actually insert a value 59 | let caches = [&cache_config_1, &cache_config_2, &cache_config_3]; 60 | loop { 61 | let mut leaders = 0; 62 | let mut followers = 0; 63 | for cache in caches { 64 | let health_borrow = cache.rx_health_state.borrow(); 65 | let health = health_borrow.as_ref().unwrap(); 66 | 67 | if health.health != redhac::quorum::QuorumHealth::Good { 68 | break; 69 | } 70 | match health.state { 71 | redhac::quorum::QuorumState::Leader => leaders += 1, 72 | redhac::quorum::QuorumState::Follower => followers += 1, 73 | _ => {} 74 | } 75 | } 76 | 77 | // Let's make 100% sure that we have 1 Leader and 2 Followers 78 | if leaders == 1 && followers == 2 { 79 | info!(">>> Each cache member is fully initialized <<<"); 80 | break; 81 | } 82 | 83 | info!("Wait until all cache members have found each other"); 84 | time::sleep(Duration::from_secs(1)).await; 85 | } 86 | 87 | let secs_until_healthy = start.elapsed().as_secs(); 88 | info!("Cache is fully started up and healthy - shutting down now"); 89 | 90 | // Graceful Shutdown 91 | // We should send a signal through the exit channel to execute a graceful shutdown of the cache. 92 | // The oneshot channel will send the ack back, when the shutdown has finished to not exit 93 | // too early. 94 | info!("Sending exit signal to cache 1"); 95 | cache_config_1.shutdown().await?; 96 | 97 | // Now to the same for cache 2 and 3, just for this example 98 | info!("Sending exit signal to cache 2"); 99 | cache_config_2.shutdown().await?; 100 | 101 | info!("Sending exit signal to cache 3"); 102 | cache_config_3.shutdown().await?; 103 | 104 | Ok(secs_until_healthy) 105 | } 106 | -------------------------------------------------------------------------------- /examples/conflict_resolution_tests/src/main.rs: -------------------------------------------------------------------------------- 1 | use std::cmp; 2 | use std::env; 3 | use std::time::Duration; 4 | use tokio::{select, time}; 5 | use tracing::Level; 6 | use tracing_subscriber::FmtSubscriber; 7 | 8 | mod cache; 9 | 10 | #[tokio::main] 11 | async fn main() -> anyhow::Result<()> { 12 | // Logging can be set up with the `tracing` crate 13 | let subscriber = FmtSubscriber::builder() 14 | .with_max_level(Level::INFO) 15 | .finish(); 16 | tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); 17 | 18 | // `redhac` is configured via env variables. 19 | // The idea behind it is that you can set up your application with HA capabilities while still 20 | // being able to switch modes via config file easily. 21 | // For the sake of this example, we set the env vars directly inside the code. Usually you 22 | // would want to configure them on the outside of course. 23 | 24 | // Enable the HA_MODE 25 | env::set_var("HA_MODE", "true"); 26 | 27 | // Configure the HA Cache members. You need an uneven number for quorum. 28 | // For this example, we will have all of them on the same host on different ports to make 29 | // it work. 30 | env::set_var( 31 | "HA_HOSTS", 32 | "http://127.0.0.1:7001, http://127.0.0.1:7002, http://127.0.0.1:7003", 33 | ); 34 | 35 | // Set a static token for cache authentication 36 | env::set_var("CACHE_AUTH_TOKEN", "SuperSecretToken1337"); 37 | 38 | // Disable TLS for this example 39 | env::set_var("CACHE_TLS", "false"); 40 | 41 | // This value controls how many runs should be done (successfully) until the conflict 42 | // resolution test can be considered successful. 43 | // This value is rather low for this example. It was tested with many thousands beforehand. 44 | let goal = 10; 45 | let mut success = 0; 46 | let mut exec_times = 0; 47 | let mut exec_min = 999; 48 | let mut exec_max = 0; 49 | 50 | for _ in 1..=goal { 51 | select! { 52 | res = cache::start_caches() => { 53 | match res { 54 | Ok(secs_until_healthy) => { 55 | success += 1; 56 | 57 | exec_times += secs_until_healthy; 58 | exec_min = cmp::min(exec_min, secs_until_healthy); 59 | exec_max = cmp::max(exec_max, secs_until_healthy); 60 | 61 | println!(r#" 62 | ############################## 63 | 64 | Successful runs: {} / {} 65 | 66 | ############################## 67 | 68 | "#, success, goal 69 | ); 70 | } 71 | Err(_) => break, 72 | } 73 | }, 74 | 75 | _ = time::sleep(Duration::from_secs(30)) => { 76 | eprintln!("Timeout exceeded - aborting"); 77 | break; 78 | } 79 | } 80 | } 81 | 82 | println!( 83 | r#" 84 | Successful runs: {} / {} 85 | Times takes until fully healthy cluster state: 86 | min: {} s 87 | max: {} s 88 | median: {} s 89 | "#, 90 | success, 91 | goal, 92 | exec_min, 93 | exec_max, 94 | // Yes, division by 0 if the first run fails - does not matter if it panics in that case 95 | exec_times / success 96 | ); 97 | 98 | Ok(()) 99 | } 100 | -------------------------------------------------------------------------------- /examples/ha_setup/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ha_setup" 3 | version = "0.1.0" 4 | edition = "2021" 5 | license = "Apache-2.0 OR MIT" 6 | authors = ["Sebastian Dobe anyhow::Result<()> { 9 | // Logging can be set up with the `tracing` crate 10 | let subscriber = FmtSubscriber::builder() 11 | .with_max_level(Level::INFO) 12 | .finish(); 13 | tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); 14 | 15 | // `redhac` is configured via env variables. 16 | // The idea behind it is that you can set up your application with HA capabilities while still 17 | // being able to switch modes via config file easily. 18 | // For the sake of this example, we set the env vars directly inside the code. Usually you 19 | // would want to configure them on the outside of course. 20 | 21 | // Enable the HA_MODE 22 | env::set_var("HA_MODE", "true"); 23 | 24 | // Configure the HA Cache members. You need an uneven number for quorum. 25 | // For this example, we will have all of them on the same host on different ports to make 26 | // it work. 27 | env::set_var( 28 | "HA_HOSTS", 29 | "http://127.0.0.1:7071, http://127.0.0.1:7072, http://127.0.0.1:7073", 30 | ); 31 | 32 | // Set a static token for cache authentication 33 | env::set_var("CACHE_AUTH_TOKEN", "SuperSecretToken1337"); 34 | 35 | // Disable TLS for this example 36 | env::set_var("CACHE_TLS", "false"); 37 | 38 | let args: Vec = env::args().collect(); 39 | let hostname_overwrite = if args.len() > 1 { 40 | args[1].clone() 41 | } else { 42 | return Err(anyhow::Error::msg( 43 | "Please provide the current node's HOSTNAME as the only argument", 44 | )); 45 | }; 46 | 47 | // Configure a cache 48 | let (tx_health, mut cache_config) = redhac::CacheConfig::new(); 49 | let cache_name = "my_cache"; 50 | let cache = redhac::SizedCache::with_size(16); 51 | 52 | // If you do not specify a buffer as the last argument, an unbounded channel will be used. 53 | // This would have no upper limit of course, but it has lower latency and is faster overall. 54 | cache_config.spawn_cache(cache_name.to_string(), cache.clone(), None); 55 | 56 | // start server 57 | redhac::start_cluster( 58 | tx_health, 59 | &mut cache_config, 60 | // optional notification channel: `Option>` 61 | None, 62 | // We need to overwrite the hostname so we can start all nodes on the same host for this 63 | // example. Usually, this will be set to `None` 64 | Some(hostname_overwrite), 65 | ) 66 | .await?; 67 | info!("First cache node started"); 68 | 69 | // Now just sleep until we ctrl + c, so we can start the other members and observe the behavior 70 | time::sleep(Duration::from_secs(6000)).await; 71 | 72 | // Let's simulate a graceful shutdown 73 | cache_config.shutdown().await?; 74 | 75 | Ok(()) 76 | } 77 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | set shell := ["bash", "-uc"] 2 | 3 | export TAG := `cat Cargo.toml | grep '^version =' | cut -d " " -f3 | xargs` 4 | 5 | # prints out the currently set version 6 | version: 7 | #!/usr/bin/env bash 8 | echo "v$TAG" 9 | 10 | 11 | # clippy lint + check with minimal versions from nightly 12 | check: 13 | #!/usr/bin/env bash 14 | set -euxo pipefail 15 | clear 16 | cargo update 17 | cargo +nightly clippy -- -D warnings 18 | cargo minimal-versions check 19 | 20 | 21 | # runs the full set of tests 22 | test: 23 | #!/usr/bin/env bash 24 | set -euxo pipefail 25 | clear 26 | cargo test 27 | HA_MODE=true cargo test test_ha_cache -- --ignored 28 | echo All tests successful 29 | 30 | 31 | # builds the code 32 | build: 33 | #!/usr/bin/env bash 34 | set -euxo pipefail 35 | # build as musl to make sure this works 36 | #cargo build --release --target x86_64-unknown-linux-musl 37 | cargo build --release 38 | 39 | 40 | # verifies the MSRV 41 | msrv-verify: 42 | cargo msrv verify 43 | 44 | 45 | # find's the new MSRV, if it needs a bump 46 | msrv-find: 47 | cargo msrv --min 1.70.0 48 | 49 | 50 | # verify thats everything is good 51 | verify: check test build msrv-verify 52 | 53 | 54 | # makes sure everything is fine 55 | verfiy-is-clean: verify 56 | #!/usr/bin/env bash 57 | set -euxo pipefail 58 | 59 | # make sure everything has been committed 60 | git diff --exit-code 61 | 62 | echo all good 63 | 64 | 65 | # sets a new git tag and pushes it 66 | release: verfiy-is-clean 67 | #!/usr/bin/env bash 68 | set -euxo pipefail 69 | 70 | # make sure git is clean 71 | git diff --quiet || exit 1 72 | 73 | git tag "v$TAG" 74 | git push origin "v$TAG" 75 | 76 | 77 | # dry-run publishing the latest version 78 | publish-dry: verfiy-is-clean 79 | #!/usr/bin/env bash 80 | set -euxo pipefail 81 | cargo publish --dry-run 82 | 83 | 84 | # publishes the current version to cargo.io 85 | publish: verfiy-is-clean 86 | #!/usr/bin/env bash 87 | set -euxo pipefail 88 | cargo publish 89 | -------------------------------------------------------------------------------- /proto/cache.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package cache; 4 | 5 | // Contains the endpoints for managing the Cache in a HA Cluster to exchange and update values remotely 6 | service Cache { 7 | // Inserts / Updates a value from remote in the local caching layer 8 | rpc StreamValues(stream CacheRequest) returns (stream Ack) {} 9 | } 10 | 11 | message Ack { 12 | oneof method { 13 | GetAck get_ack = 1; 14 | PutAck put_ack = 2; 15 | DelAck del_ack = 3; 16 | MgmtAck mgmt_ack = 4; 17 | Error error = 5; 18 | } 19 | } 20 | 21 | // A Cache key / value pair 22 | // Since it accepts any count of bytes as the value, it is generically working for any object, which can be serialized 23 | // manually. 24 | message CacheEntry { 25 | string cache_name = 1; 26 | string entry = 2; 27 | bytes value = 3; 28 | } 29 | 30 | // The Cache Value being sent over the stream. 31 | message CacheRequest { 32 | oneof method { 33 | Get get = 1; 34 | Put put = 2; 35 | Insert insert = 3; 36 | Del del = 4; 37 | Remove remove = 5; 38 | MgmtRequest mgmt_req = 6; 39 | } 40 | } 41 | 42 | message Error { 43 | string error = 1; 44 | } 45 | 46 | // GET message for a cache operation 47 | message Get { 48 | string cache_name = 1; 49 | string entry = 2; 50 | } 51 | 52 | // ACK for a successful GET 53 | message GetAck { 54 | string cache_name = 1; 55 | string entry = 2; 56 | optional bytes value = 3; 57 | } 58 | 59 | // PUT message for a cache operation 60 | message Put { 61 | string cache_name = 1; 62 | string entry = 2; 63 | bytes value = 3; 64 | optional string req_id = 4; 65 | } 66 | 67 | // INSERT message for a cache operation 68 | message Insert { 69 | string cache_name = 1; 70 | string entry = 2; 71 | bytes value = 3; 72 | string req_id = 4; 73 | AckLevel ack_level = 5; 74 | } 75 | 76 | // ACK for a successful PUT 77 | message PutAck { 78 | optional string req_id = 1; 79 | optional bool mod_res = 2; 80 | } 81 | 82 | // DEL message for a cache operation 83 | message Del { 84 | string cache_name = 1; 85 | string entry = 2; 86 | optional string req_id = 3; 87 | 88 | } 89 | 90 | // REMOVE message for a cache operation 91 | message Remove { 92 | string cache_name = 1; 93 | string entry = 2; 94 | string req_id = 3; 95 | AckLevel ack_level = 4; 96 | } 97 | 98 | // ACK for a successful DEL 99 | message DelAck { 100 | optional string req_id = 1; 101 | optional bool mod_res = 2; 102 | } 103 | 104 | // The AckLevel for HA cache modifying requests 105 | message AckLevel { 106 | oneof ack_level { 107 | Empty level_quorum = 1; 108 | Empty level_once = 2; 109 | Empty level_leader = 3; 110 | } 111 | } 112 | 113 | // The Cache Value being sent over the stream. 114 | message MgmtRequest { 115 | oneof method { 116 | Ping ping = 1; 117 | Health health = 2; 118 | LeaderReq leader_req = 3; 119 | LeaderReqAck leader_req_ack = 4; 120 | LeaderAck leader_ack = 5; 121 | LeaderSwitch leader_switch = 6; 122 | LeaderSwitchPriority leader_switch_priority = 7; 123 | LeaderDead leader_dead = 8; 124 | LeaderSwitchAck leader_switch_ack = 9; 125 | } 126 | } 127 | 128 | // Just a ping with no content 129 | message Ping {} 130 | 131 | // Requests health information 132 | message Health {} 133 | 134 | // Is sent out if quorum was established and no leader is present to make a request for taking over that role 135 | message LeaderReq { 136 | // The address where this host can be reached (multiple possible) 137 | string addr = 1; 138 | // Timestamp when the request has been created. In case of a conflict, the request with the earlier creation wins. 139 | int64 election_ts = 2; 140 | } 141 | 142 | // Is sent out if quorum was established and 'enough' hosts have accepted the leader request 143 | message LeaderAck { 144 | // The address where this host can be reached 145 | string addr = 1; 146 | // Timestamp when the request has been created. In case of a conflict, the request with the earlier creation wins. 147 | int64 election_ts = 2; 148 | } 149 | 150 | // Is sent out, when the current leader is about to shut down 151 | message LeaderSwitch { 152 | // The address of the new host that this leader votes for the next one. This creates less friction when changing. 153 | string vote_host = 1; 154 | int64 election_ts = 2; 155 | } 156 | 157 | // Is sent out, when there are multiple leadership requests to promote the one with the higher priority 158 | message LeaderSwitchPriority { 159 | string vote_host = 1; 160 | int64 election_ts = 2; 161 | } 162 | 163 | // Is sent out, when the leader is dead, so the sender can take over that role 164 | message LeaderDead { 165 | // The address of the new host that this leader votes for the next one. This creates less friction when changing. 166 | string vote_host = 1; 167 | int64 election_ts = 2; 168 | } 169 | 170 | message MgmtAck { 171 | oneof method { 172 | Pong pong = 1; 173 | HealthAck health_ack = 2; 174 | LeaderInfo leader_info = 3; 175 | LeaderReqAck leader_req_ack = 4; 176 | // TODO do we even need the leader_ack_ack? 177 | LeaderAckAck leader_ack_ack = 5; 178 | LeaderSwitchAck leader_switch_ack = 6; 179 | } 180 | } 181 | 182 | // Just a pong with no content 183 | message Pong {} 184 | 185 | // Returns health information 186 | message HealthAck { 187 | // uptime of this host in seconds 188 | uint64 uptime_secs = 1; 189 | // if it has quorum or not 190 | bool quorum = 2; 191 | // if this host is a leader or follower 192 | State state = 3; 193 | optional Leader leader = 4; 194 | // list of all configured hosts with their connection state 195 | repeated HostHealth host_health = 5; 196 | } 197 | 198 | message State { 199 | oneof value { 200 | Empty leader = 1; 201 | Empty leader_dead = 2; 202 | Empty leader_switch = 3; 203 | Empty leader_tx_await = 4; 204 | Empty leadership_requested = 5; 205 | Empty follower = 6; 206 | Empty undefined = 7; 207 | } 208 | } 209 | 210 | // Information about the leader 211 | message Leader { 212 | string addr = 1; 213 | int64 election_ts = 2; 214 | bool connected = 3; 215 | } 216 | 217 | // Information about the remote host 218 | message HostHealth { 219 | string addr = 1; 220 | bool connected = 2; 221 | } 222 | 223 | // Will be sent out on a new client connection to inform about a possibly existing leader 224 | message LeaderInfo { 225 | optional string addr = 1; 226 | int64 election_ts = 2; 227 | bool has_quorum = 3; 228 | } 229 | 230 | // Ack for accepting a LeaderReq 231 | message LeaderReqAck { 232 | // The address of the acked leader 233 | string addr = 1; 234 | // The original timestamp from the LeaderReq itself 235 | int64 election_ts = 2; 236 | } 237 | 238 | // Ack for accepting a LeaderAck 239 | message LeaderAckAck { 240 | string addr = 1; 241 | } 242 | 243 | // Ack for accepting a LeaderSwitch 244 | message LeaderSwitchAck { 245 | string addr = 1; 246 | } 247 | 248 | message Empty {} 249 | -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use crate::quorum::{AckLevel, RpcServer, RpcServerState}; 2 | use crate::rpc::cache; 3 | use crate::rpc::cache::cache_client::CacheClient; 4 | use crate::rpc::cache::mgmt_ack::Method; 5 | use crate::rpc::cache::{ack, cache_request, CacheRequest}; 6 | use crate::{get_cache_req_id, get_rand_between, CacheError, QuorumReq, TLS}; 7 | use cached::Cached; 8 | use futures_util::TryStreamExt; 9 | use lazy_static::lazy_static; 10 | use std::env; 11 | use std::str::FromStr; 12 | use std::sync::Arc; 13 | use std::time::Duration; 14 | use tokio::sync::{mpsc, watch}; 15 | use tokio::{fs, time}; 16 | use tokio_stream::wrappers::ReceiverStream; 17 | use tonic::codegen::InterceptedService; 18 | use tonic::metadata::MetadataValue; 19 | use tonic::transport::{Certificate, Channel, ClientTlsConfig, Identity, Uri}; 20 | use tonic::{Request, Status}; 21 | use tower::util::ServiceExt; 22 | use tracing::{debug, error, info, warn}; 23 | 24 | lazy_static! { 25 | static ref BUF_SIZE_CLIENT: usize = env::var("CACHE_BUF_CLIENT") 26 | .unwrap_or_else(|_| String::from("64")) 27 | .parse::() 28 | .expect("Error parsing 'CACHE_BUF_CLIENT' to usize"); 29 | pub(crate) static ref RECONNECT_TIMEOUT_LOWER: u64 = env::var("CACHE_RECONNECT_TIMEOUT_LOWER") 30 | .unwrap_or_else(|_| String::from("500")) 31 | .parse::() 32 | .expect("Error parsing 'CACHE_RECONNECT_TIMEOUT_LOWER' to u64"); 33 | pub(crate) static ref RECONNECT_TIMEOUT_UPPER: u64 = env::var("CACHE_RECONNECT_TIMEOUT_UPPER") 34 | .unwrap_or_else(|_| String::from("2000")) 35 | .parse::() 36 | .expect("Error parsing 'CACHE_RECONNECT_TIMEOUT_UPPER' to u64"); 37 | static ref PATH_TLS_CERT: Option = env::var("CACHE_TLS_CLIENT_CERT").ok(); 38 | static ref PATH_TLS_KEY: Option = env::var("CACHE_TLS_CLIENT_KEY").ok(); 39 | static ref PATH_SERVER_CA: Option = env::var("CACHE_TLS_CA_SERVER").ok(); 40 | static ref TLS_VALIDATE_DOMAIN: String = env::var("CACHE_TLS_CLIENT_VALIDATE_DOMAIN") 41 | .unwrap_or_else(|_| String::from("redhac.local")); 42 | static ref CACHE_TLS_SNI_OVERWRITE: String = 43 | env::var("CACHE_TLS_SNI_OVERWRITE").unwrap_or_else(|_| String::default()); 44 | } 45 | 46 | #[derive(Debug, Clone)] 47 | pub enum RpcRequest { 48 | Ping, 49 | Get { 50 | cache_name: String, 51 | entry: String, 52 | resp: flume::Sender>>, 53 | }, 54 | Put { 55 | cache_name: String, 56 | entry: String, 57 | value: Vec, 58 | resp: Option>, 59 | }, 60 | // 'Insert' is the HA version of Put - every request through the leader 61 | Insert { 62 | cache_name: String, 63 | entry: String, 64 | value: Vec, 65 | ack_level: AckLevel, 66 | resp: flume::Sender, 67 | }, 68 | Del { 69 | cache_name: String, 70 | entry: String, 71 | resp: Option>, 72 | }, 73 | // 'Remove' is the HA version of Del - every request through the leader 74 | Remove { 75 | cache_name: String, 76 | entry: String, 77 | ack_level: AckLevel, 78 | resp: flume::Sender, 79 | }, 80 | LeaderReq { 81 | addr: String, 82 | election_ts: i64, 83 | }, 84 | LeaderAck { 85 | addr: String, 86 | election_ts: i64, 87 | }, 88 | LeaderSwitch { 89 | vote_host: String, 90 | election_ts: i64, 91 | }, 92 | LeaderReqAck { 93 | addr: String, 94 | election_ts: i64, 95 | }, 96 | LeaderAckAck { 97 | addr: String, 98 | }, 99 | LeaderSwitchAck { 100 | addr: String, 101 | }, 102 | LeaderSwitchDead { 103 | vote_host: String, 104 | election_ts: i64, 105 | }, 106 | LeaderSwitchPriority { 107 | vote_host: String, 108 | election_ts: i64, 109 | }, 110 | Shutdown, 111 | } 112 | 113 | /// This is needed for a remote lookup GET request to return the correct result to the cache_get 114 | /// function. 115 | #[derive(Debug, Clone)] 116 | enum HAReq { 117 | SaveGetCallbackTx { 118 | cache_name: String, 119 | entry: String, 120 | tx: flume::Sender>>, 121 | }, 122 | SaveModCallbackTx { 123 | req_id: String, 124 | tx: flume::Sender, 125 | }, 126 | SendCacheGetResponse { 127 | cache_name: String, 128 | entry: String, 129 | value: Option>, 130 | }, 131 | SendCacheModResponse { 132 | req_id: String, 133 | value: bool, 134 | }, 135 | } 136 | 137 | type GrpcClient = 138 | CacheClient) -> Result, Status>>>; 139 | 140 | pub(crate) async fn cache_clients( 141 | ha_hosts: Vec, 142 | tx_quorum: flume::Sender, 143 | rx_remote_req: flume::Receiver, 144 | ) -> anyhow::Result<()> { 145 | let mut client_handles = vec![]; 146 | let mut client_tx = vec![]; 147 | 148 | let tx_quorum = Arc::new(tx_quorum); 149 | 150 | for h in ha_hosts { 151 | let (tx, rx) = flume::unbounded(); 152 | let handle = tokio::spawn(run_client(h.clone(), tx_quorum.clone(), tx.clone(), rx)); 153 | client_tx.push(tx); 154 | client_handles.push(handle); 155 | } 156 | 157 | // This task forwards incoming remote requests to every connected client. 158 | // Kind of like a broadcast channel from tokio, but async. 159 | tokio::spawn(async move { 160 | while let Ok(payload) = rx_remote_req.recv_async().await { 161 | debug!( 162 | "Received a RpcRequest in the ClientsBroadcastHandler: {:?}", 163 | payload 164 | ); 165 | for tx in client_tx.iter() { 166 | if let Err(err) = tx.send_async(payload.clone()).await { 167 | error!("Error in HA Cache client broadcaster: {}", err); 168 | } 169 | } 170 | if let RpcRequest::Shutdown = payload { 171 | debug!("Shutting down HA Cache client broadcaster"); 172 | break; 173 | } 174 | } 175 | }); 176 | 177 | for c in client_handles { 178 | c.await.unwrap().unwrap(); 179 | } 180 | debug!("All cache_client handles joined - Exiting"); 181 | 182 | Ok(()) 183 | } 184 | 185 | async fn run_client( 186 | host: String, 187 | tx_quorum: Arc>, 188 | tx_remote: flume::Sender, 189 | rx_remote_req: flume::Receiver, 190 | ) -> anyhow::Result<()> { 191 | loop { 192 | debug!("Trying to connect to host '{}'", host); 193 | 194 | let uri = Uri::from_str(&host).expect("Unable to build GRPC URI"); 195 | let chan_res = if *TLS { 196 | if host.starts_with("http://") { 197 | error!("Connecting to an HTTP address with active will fail!"); 198 | } 199 | 200 | let mut cfg = ClientTlsConfig::new(); 201 | // .identity(id) 202 | // .domain_name(&*TLS_VALIDATE_DOMAIN); 203 | 204 | if let Some(path) = &*PATH_TLS_CERT { 205 | debug!("Loading client TLS cert from {}", path); 206 | let cert = fs::read(path) 207 | .await 208 | .expect("Error reading client TLS Certificate"); 209 | 210 | let key = if let Some(path) = &*PATH_TLS_KEY { 211 | debug!("Loading client TLS key from {}", path); 212 | fs::read(path).await.expect("Error reading client TLS Key") 213 | } else { 214 | panic!("If PATH_TLS_CERT is given, just must provide PATH_TLS_KEY too"); 215 | }; 216 | 217 | cfg = cfg.identity(Identity::from_pem(cert, key)); 218 | } 219 | 220 | if let Some(path) = &*PATH_SERVER_CA { 221 | debug!("Loading server CA from {}", path); 222 | let ca = fs::read(path).await.expect("Error reading server TLS CA"); 223 | let ca_chain = Certificate::from_pem(ca); 224 | cfg = cfg.ca_certificate(ca_chain); 225 | } 226 | 227 | // We always want to validate the domain name 228 | cfg = cfg.domain_name(&*TLS_VALIDATE_DOMAIN); 229 | 230 | let mut channel = Channel::builder(uri) 231 | .tls_config(cfg) 232 | .expect("Error creating TLS Config for Cache Client") 233 | .tcp_keepalive(Some(Duration::from_secs(30))) 234 | .http2_keep_alive_interval(Duration::from_secs(30)) 235 | .keep_alive_while_idle(true); 236 | 237 | if !CACHE_TLS_SNI_OVERWRITE.is_empty() { 238 | let url = Uri::from_str(&CACHE_TLS_SNI_OVERWRITE) 239 | .expect("Error parsing CACHE_TLS_SNI_OVERWRITE to URI"); 240 | channel = channel.origin(url); 241 | } 242 | channel.connect().await 243 | } else { 244 | Channel::builder(uri) 245 | .tcp_keepalive(Some(Duration::from_secs(30))) 246 | .http2_keep_alive_interval(Duration::from_secs(30)) 247 | .keep_alive_while_idle(true) 248 | .connect() 249 | .await 250 | }; 251 | if chan_res.is_err() { 252 | error!("Unable to connect to host '{}'.", host); 253 | // retry with jitter 254 | time::sleep(Duration::from_millis(get_rand_between(2500, 7500))).await; 255 | continue; 256 | } 257 | let channel = chan_res.unwrap().ready_oneshot().await?; 258 | debug!( 259 | "Cache connection channel established successfully to host '{}'", 260 | host 261 | ); 262 | 263 | // when the channel has a valid connection, empty any possibly received cache operations 264 | // from the channel to not execute outdated operations from a maybe longer period of 265 | // "waiting for the host to come up" 266 | let drain = rx_remote_req.drain(); 267 | if drain.len() > 0 { 268 | debug!( 269 | "Drained 'rx_remote_req' channel for client {} had {} entries", 270 | host, 271 | drain.len() 272 | ); 273 | } 274 | 275 | let mut client: GrpcClient = CacheClient::with_interceptor(channel, add_auth); 276 | 277 | let mut server = RpcServer { 278 | address: host.clone(), 279 | state: RpcServerState::Alive, 280 | tx: Some(tx_remote.clone()), 281 | election_ts: -1, 282 | }; 283 | 284 | // We need a back channel for GET requests 285 | // This task stores the oneshot tx channels based on the lookup index so we can answer cache 286 | // remote lookups in case we get an answer from any remote host. 287 | let (callback_tx, callback_rx) = flume::unbounded::(); 288 | let callback_handle = tokio::spawn(async move { 289 | let mut get_cache = cached::TimedCache::with_lifespan(10); 290 | let mut ack_cache = cached::TimedCache::with_lifespan(10); 291 | 292 | loop { 293 | match callback_rx.recv_async().await { 294 | Err(err) => { 295 | debug!("Received None over get_rx - exiting: {:?}", err); 296 | break; 297 | } 298 | 299 | Ok(req) => match req { 300 | HAReq::SaveGetCallbackTx { 301 | cache_name, 302 | entry, 303 | tx, 304 | } => { 305 | let idx = format!("{}-{}", cache_name, entry); 306 | debug!("HAReq::SaveGetCallbackTx for '{}'", idx); 307 | get_cache.cache_set(idx, tx); 308 | } 309 | 310 | HAReq::SaveModCallbackTx { req_id, tx } => { 311 | debug!("HAReq::SaveModifyCallbackTx for req_id: {}", req_id); 312 | ack_cache.cache_set(req_id, tx); 313 | } 314 | 315 | HAReq::SendCacheGetResponse { 316 | cache_name, 317 | entry, 318 | value, 319 | } => { 320 | let idx = format!("{}-{}", cache_name, entry); 321 | debug!("GetReq::Send for '{}'", idx); 322 | match get_cache.cache_remove(&idx) { 323 | None => { 324 | error!("Error in HAReq::SendCacheGetResponse: tx does not exist in the cache - possibly response timeout"); 325 | continue; 326 | } 327 | Some(tx) => { 328 | if let Err(err) = tx.send_async(value).await { 329 | error!( 330 | "Error sending back GET response in HAReq::SendCacheGetResponse: {:?}", 331 | err 332 | ); 333 | } 334 | } 335 | }; 336 | } 337 | 338 | HAReq::SendCacheModResponse { req_id, value } => { 339 | debug!("HAReq::SendCacheModResponse for req_id: {}", req_id); 340 | match ack_cache.cache_remove(&req_id) { 341 | None => { 342 | error!("Error in HAReq::SendCacheModResponse: tx does not exist in the cache - possibly response timeout"); 343 | continue; 344 | } 345 | Some(tx) => { 346 | if let Err(err) = tx.send_async(value).await { 347 | debug!( 348 | "Error sending back MOD response in HAReq::SendCacheModResponse: {:?}", 349 | err 350 | ); 351 | } 352 | } 353 | } 354 | } 355 | }, 356 | } 357 | } 358 | }); 359 | 360 | debug!("Starting the Sending Stream to the Server"); 361 | if let Err(err) = tx_quorum 362 | .send_async(QuorumReq::UpdateServer { 363 | server: server.clone(), 364 | }) 365 | .await 366 | { 367 | // can fail in case of a conflict resolution, if the other side has just shut down 368 | debug!("tx_quorum send error: {:?}", err); 369 | callback_handle.abort(); 370 | time::sleep(Duration::from_millis(get_rand_between( 371 | *RECONNECT_TIMEOUT_LOWER, 372 | *RECONNECT_TIMEOUT_UPPER, 373 | ))) 374 | .await; 375 | continue; 376 | } 377 | 378 | // Sending Stream to the Server 379 | // the ReceiverStream only accepts an mpsc channel 380 | let (tx_server, rx_server) = mpsc::channel::(*BUF_SIZE_CLIENT); 381 | let (tx_shutdown, rx_shutdown) = watch::channel(false); 382 | let rx_remote_clone = rx_remote_req.clone(); 383 | let callback_tx_clone = callback_tx.clone(); 384 | let server_clone = server.clone(); 385 | let rpc_request_handle = tokio::spawn(async move { 386 | debug!("Listening for RpcRequests for connected Client"); 387 | 388 | while let Ok(req) = rx_remote_clone.recv_async().await { 389 | if tx_server.is_closed() { 390 | // we may end up here if the server rx could not be converted into a stream 391 | // successfully 392 | error!("Stream to server has been closed"); 393 | break; 394 | } 395 | 396 | let forward_req = match req { 397 | RpcRequest::Ping => CacheRequest { 398 | method: Some(cache_request::Method::MgmtReq(cache::MgmtRequest { 399 | method: Some(cache::mgmt_request::Method::Ping(cache::Ping {})), 400 | })), 401 | }, 402 | 403 | RpcRequest::Get { 404 | cache_name, 405 | entry, 406 | resp, 407 | } => { 408 | // We need to save the oneshot tx if we get an async replay later 409 | // on from the server 410 | if let Err(err) = callback_tx_clone 411 | .send_async(HAReq::SaveGetCallbackTx { 412 | cache_name: cache_name.clone(), 413 | entry: entry.clone(), 414 | tx: resp, 415 | }) 416 | .await 417 | { 418 | error!("Error sending HAReq::SaveGetCallbackTx: {}", err); 419 | }; 420 | let get = cache_request::Method::Get(cache::Get { cache_name, entry }); 421 | CacheRequest { method: Some(get) } 422 | } 423 | 424 | RpcRequest::Put { 425 | cache_name, 426 | entry, 427 | value, 428 | resp, 429 | } => { 430 | let req_id = if let Some(tx) = resp { 431 | let req_id = get_cache_req_id(); 432 | if let Err(err) = callback_tx_clone 433 | .send_async(HAReq::SaveModCallbackTx { 434 | req_id: req_id.clone(), 435 | tx, 436 | }) 437 | .await 438 | { 439 | error!( 440 | "Error sending HAReq::SaveModCallbackTx in RpcRequest::Put: {}", 441 | err 442 | ); 443 | }; 444 | Some(req_id) 445 | } else { 446 | None 447 | }; 448 | 449 | let put = cache_request::Method::Put(cache::Put { 450 | cache_name, 451 | entry, 452 | value, 453 | req_id, 454 | }); 455 | CacheRequest { method: Some(put) } 456 | } 457 | 458 | RpcRequest::Insert { 459 | cache_name, 460 | entry, 461 | value, 462 | ack_level, 463 | resp, 464 | } => { 465 | // We need to save the oneshot tx if we get an async replay later 466 | // on from the server 467 | let req_id = get_cache_req_id(); 468 | if let Err(err) = callback_tx_clone 469 | .send_async(HAReq::SaveModCallbackTx { 470 | req_id: req_id.clone(), 471 | tx: resp, 472 | }) 473 | .await 474 | { 475 | error!( 476 | "Error sending HAReq::SaveModCallbackTx in RpcRequest::Insert: {}", 477 | err 478 | ); 479 | }; 480 | 481 | debug!("Sending out HA Method::Insert for req_id {}", req_id); 482 | let insert = cache_request::Method::Insert(cache::Insert { 483 | cache_name, 484 | entry, 485 | value, 486 | req_id, 487 | ack_level: Some(ack_level.get_rpc_value()), 488 | }); 489 | CacheRequest { 490 | method: Some(insert), 491 | } 492 | } 493 | 494 | RpcRequest::Del { 495 | cache_name, 496 | entry, 497 | resp, 498 | } => { 499 | let req_id = if let Some(tx) = resp { 500 | let req_id = get_cache_req_id(); 501 | if let Err(err) = callback_tx_clone 502 | .send_async(HAReq::SaveModCallbackTx { 503 | req_id: req_id.clone(), 504 | tx, 505 | }) 506 | .await 507 | { 508 | error!( 509 | "Error sending HAReq::SaveModCallbackTx in RpcRequest::Del: {}", 510 | err 511 | ); 512 | }; 513 | Some(req_id) 514 | } else { 515 | None 516 | }; 517 | 518 | let req = cache_request::Method::Del(cache::Del { 519 | cache_name, 520 | entry, 521 | req_id, 522 | }); 523 | CacheRequest { method: Some(req) } 524 | } 525 | 526 | RpcRequest::Remove { 527 | cache_name, 528 | entry, 529 | ack_level, 530 | resp, 531 | } => { 532 | // We need to save the oneshot tx if we get an async replay later 533 | // on from the server 534 | let req_id = get_cache_req_id(); 535 | if let Err(err) = callback_tx_clone 536 | .send_async(HAReq::SaveModCallbackTx { 537 | req_id: req_id.clone(), 538 | tx: resp, 539 | }) 540 | .await 541 | { 542 | error!( 543 | "Error sending HAReq::SaveModCallbackTx in RpcRequest::Remove: {}", 544 | err 545 | ); 546 | }; 547 | 548 | debug!("Sending out HA Method::Remove for req_id {}", req_id); 549 | let req = cache_request::Method::Remove(cache::Remove { 550 | cache_name, 551 | entry, 552 | req_id, 553 | ack_level: Some(ack_level.get_rpc_value()), 554 | }); 555 | CacheRequest { method: Some(req) } 556 | } 557 | 558 | // sends out a req in case we want to be the new leader 559 | RpcRequest::LeaderReq { addr, election_ts } => { 560 | let req = cache::mgmt_request::Method::LeaderReq(cache::LeaderReq { 561 | addr, 562 | election_ts, 563 | }); 564 | CacheRequest { 565 | method: Some(cache_request::Method::MgmtReq(cache::MgmtRequest { 566 | method: Some(req), 567 | })), 568 | } 569 | } 570 | 571 | // should be sent out, when we are the leader and have quorum 572 | RpcRequest::LeaderAck { addr, election_ts } => { 573 | let req = cache::mgmt_request::Method::LeaderAck(cache::LeaderAck { 574 | addr, 575 | election_ts, 576 | }); 577 | CacheRequest { 578 | method: Some(cache_request::Method::MgmtReq(cache::MgmtRequest { 579 | method: Some(req), 580 | })), 581 | } 582 | } 583 | 584 | RpcRequest::LeaderSwitch { 585 | election_ts, 586 | vote_host, 587 | } => { 588 | let req = cache::mgmt_request::Method::LeaderSwitch(cache::LeaderSwitch { 589 | election_ts, 590 | vote_host, 591 | }); 592 | CacheRequest { 593 | method: Some(cache_request::Method::MgmtReq(cache::MgmtRequest { 594 | method: Some(req), 595 | })), 596 | } 597 | } 598 | 599 | // ack for a received LeaderReq from another host 600 | RpcRequest::LeaderReqAck { addr, election_ts } => { 601 | let req = cache::mgmt_request::Method::LeaderReqAck(cache::LeaderReqAck { 602 | addr, 603 | election_ts, 604 | }); 605 | CacheRequest { 606 | method: Some(cache_request::Method::MgmtReq(cache::MgmtRequest { 607 | method: Some(req), 608 | })), 609 | } 610 | } 611 | 612 | // acks a LeaderAck and starts the normal caching operation 613 | RpcRequest::LeaderAckAck { addr: _ } => { 614 | todo!("...do we even need this´?"); 615 | } 616 | 617 | // the same as LeaderAckAck, but for a switch / graceful shutdown of the 618 | // current leader 619 | RpcRequest::LeaderSwitchAck { addr } => { 620 | let req = 621 | cache::mgmt_request::Method::LeaderSwitchAck(cache::LeaderSwitchAck { 622 | addr, 623 | }); 624 | CacheRequest { 625 | method: Some(cache_request::Method::MgmtReq(cache::MgmtRequest { 626 | method: Some(req), 627 | })), 628 | } 629 | } 630 | 631 | RpcRequest::LeaderSwitchDead { 632 | vote_host, 633 | election_ts, 634 | } => { 635 | let req = cache::mgmt_request::Method::LeaderDead(cache::LeaderDead { 636 | vote_host, 637 | election_ts, 638 | }); 639 | CacheRequest { 640 | method: Some(cache_request::Method::MgmtReq(cache::MgmtRequest { 641 | method: Some(req), 642 | })), 643 | } 644 | } 645 | 646 | RpcRequest::LeaderSwitchPriority { 647 | vote_host, 648 | election_ts, 649 | } => { 650 | let req = cache::mgmt_request::Method::LeaderSwitchPriority( 651 | cache::LeaderSwitchPriority { 652 | vote_host, 653 | election_ts, 654 | }, 655 | ); 656 | CacheRequest { 657 | method: Some(cache_request::Method::MgmtReq(cache::MgmtRequest { 658 | method: Some(req), 659 | })), 660 | } 661 | } 662 | 663 | RpcRequest::Shutdown => { 664 | info!("Received shutdown message - exiting"); 665 | let _ = tx_shutdown.send(true); 666 | break; 667 | } 668 | }; 669 | 670 | if let Err(err) = tx_server.send(forward_req).await { 671 | error!( 672 | "Error forwarding Cache Client message to handler for host {}: {}", 673 | server_clone.address, err 674 | ); 675 | } 676 | } 677 | }); 678 | 679 | // the input stream from the server 680 | let in_stream = ReceiverStream::new(rx_server); 681 | let res = client.stream_values(in_stream).await; 682 | let mut res_stream = match res { 683 | Ok(r) => r.into_inner(), 684 | Err(err) => { 685 | error!( 686 | "Error opening 'stream_values' to the server {}: {}\n", 687 | host, err 688 | ); 689 | time::sleep(Duration::from_millis(get_rand_between(1000, 5000))).await; 690 | continue; 691 | } 692 | }; 693 | 694 | loop { 695 | match res_stream.try_next().await { 696 | Ok(recv) => { 697 | if recv.is_none() { 698 | warn!("Lost connection to remote cache host {}", host); 699 | break; 700 | } 701 | 702 | let method = recv.unwrap().method; 703 | if method.is_none() { 704 | error!("Error with Ack from Server - result is None"); 705 | break; 706 | } 707 | 708 | match method.unwrap() { 709 | ack::Method::GetAck(a) => { 710 | let get_req = HAReq::SendCacheGetResponse { 711 | cache_name: a.cache_name, 712 | entry: a.entry, 713 | value: a.value, 714 | }; 715 | let send = callback_tx.send_async(get_req).await; 716 | if send.is_err() { 717 | error!("Error forwarding remote Cache GET request"); 718 | } 719 | } 720 | 721 | ack::Method::PutAck(ack) => { 722 | if let Some(req_id) = ack.req_id { 723 | let req = HAReq::SendCacheModResponse { 724 | req_id, 725 | value: ack.mod_res.unwrap_or(false), 726 | }; 727 | if let Err(err) = callback_tx.send_async(req).await { 728 | error!("{}", err); 729 | } 730 | } 731 | } 732 | 733 | ack::Method::DelAck(ack) => { 734 | if let Some(req_id) = ack.req_id { 735 | let req = HAReq::SendCacheModResponse { 736 | req_id, 737 | value: ack.mod_res.unwrap_or(false), 738 | }; 739 | if let Err(err) = callback_tx.send_async(req).await { 740 | error!("{}", err); 741 | } 742 | } 743 | } 744 | 745 | ack::Method::MgmtAck(ack) => { 746 | if ack.method.is_none() { 747 | continue; 748 | } 749 | let method = ack.method.unwrap(); 750 | 751 | match method { 752 | Method::Pong(_) => { 753 | debug!("pong"); 754 | // todo!("Method::Pong - use it to calculate a RTT in the future - not yet implemented"); 755 | } 756 | 757 | Method::HealthAck(_) => {} 758 | 759 | Method::LeaderInfo(info) => { 760 | if let Some(addr) = info.addr { 761 | debug!( 762 | "Received LeaderInfo: {:?} with quorum: {}", 763 | addr, info.has_quorum 764 | ); 765 | if let Err(err) = tx_quorum 766 | .send_async(QuorumReq::LeaderInfo { 767 | addr, 768 | has_quorum: info.has_quorum, 769 | election_ts: info.election_ts, 770 | }) 771 | .await 772 | { 773 | error!("{:?}", CacheError::from(&err)); 774 | } 775 | } 776 | } 777 | 778 | Method::LeaderReqAck(req) => { 779 | debug!("Received LeaderReqAck: {:?}", req.addr); 780 | if let Err(err) = tx_quorum 781 | .send_async(QuorumReq::LeaderReqAck { 782 | addr: req.addr, 783 | election_ts: req.election_ts, 784 | }) 785 | .await 786 | { 787 | error!("{:?}", CacheError::from(&err)); 788 | } 789 | } 790 | 791 | Method::LeaderAckAck(_) => {} 792 | 793 | Method::LeaderSwitchAck(req) => { 794 | debug!("Received LeaderSwitchAck: {:?}", req.addr); 795 | if let Err(err) = tx_quorum 796 | .send_async(QuorumReq::LeaderSwitchAck { req }) 797 | .await 798 | { 799 | error!("{:?}", CacheError::from(&err)); 800 | } 801 | } 802 | } 803 | } 804 | 805 | ack::Method::Error(e) => { 806 | error!("Cache Server sent an error: {:?}", e); 807 | } 808 | } 809 | } 810 | 811 | Err(err) => { 812 | debug!( 813 | "Received an error in client receiver for Host {} - exiting: {:?}", 814 | host, err 815 | ); 816 | break; 817 | } 818 | } 819 | } 820 | 821 | // if we get here, we lost connection 822 | error!("Connection lost to host: '{}'", host); 823 | server.state = RpcServerState::Dead; 824 | tx_quorum 825 | .send_async(QuorumReq::UpdateServer { 826 | server: server.clone(), 827 | }) 828 | .await 829 | .expect("Unregistering Dead Client"); 830 | 831 | // Make sure to kill the spawns to not have memory leaks since they never end 832 | callback_handle.abort(); 833 | rpc_request_handle.abort(); 834 | 835 | if *rx_shutdown.borrow() { 836 | break; 837 | } 838 | 839 | time::sleep(Duration::from_millis(get_rand_between( 840 | *RECONNECT_TIMEOUT_LOWER, 841 | *RECONNECT_TIMEOUT_UPPER, 842 | ))) 843 | .await; 844 | } 845 | 846 | Ok(()) 847 | } 848 | 849 | fn add_auth(mut req: Request<()>) -> Result, Status> { 850 | let token_str = env::var("CACHE_AUTH_TOKEN").expect("CACHE_AUTH_TOKEN is not set"); 851 | let token: MetadataValue<_> = token_str 852 | .parse() 853 | .expect("Could not parse the Token to MetadataValue - needs to be ASCII"); 854 | req.metadata_mut().insert("authorization", token); 855 | Ok(req) 856 | } 857 | -------------------------------------------------------------------------------- /src/rpc/cache.rs: -------------------------------------------------------------------------------- 1 | // This file is @generated by prost-build. 2 | #[allow(clippy::derive_partial_eq_without_eq)] 3 | #[derive(Clone, PartialEq, ::prost::Message)] 4 | pub struct Ack { 5 | #[prost(oneof = "ack::Method", tags = "1, 2, 3, 4, 5")] 6 | pub method: ::core::option::Option, 7 | } 8 | /// Nested message and enum types in `Ack`. 9 | pub mod ack { 10 | #[allow(clippy::derive_partial_eq_without_eq)] 11 | #[derive(Clone, PartialEq, ::prost::Oneof)] 12 | pub enum Method { 13 | #[prost(message, tag = "1")] 14 | GetAck(super::GetAck), 15 | #[prost(message, tag = "2")] 16 | PutAck(super::PutAck), 17 | #[prost(message, tag = "3")] 18 | DelAck(super::DelAck), 19 | #[prost(message, tag = "4")] 20 | MgmtAck(super::MgmtAck), 21 | #[prost(message, tag = "5")] 22 | Error(super::Error), 23 | } 24 | } 25 | /// A Cache key / value pair 26 | /// Since it accepts any count of bytes as the value, it is generically working for any object, which can be serialized 27 | /// manually. 28 | #[allow(clippy::derive_partial_eq_without_eq)] 29 | #[derive(Clone, PartialEq, ::prost::Message)] 30 | pub struct CacheEntry { 31 | #[prost(string, tag = "1")] 32 | pub cache_name: ::prost::alloc::string::String, 33 | #[prost(string, tag = "2")] 34 | pub entry: ::prost::alloc::string::String, 35 | #[prost(bytes = "vec", tag = "3")] 36 | pub value: ::prost::alloc::vec::Vec, 37 | } 38 | /// The Cache Value being sent over the stream. 39 | #[allow(clippy::derive_partial_eq_without_eq)] 40 | #[derive(Clone, PartialEq, ::prost::Message)] 41 | pub struct CacheRequest { 42 | #[prost(oneof = "cache_request::Method", tags = "1, 2, 3, 4, 5, 6")] 43 | pub method: ::core::option::Option, 44 | } 45 | /// Nested message and enum types in `CacheRequest`. 46 | pub mod cache_request { 47 | #[allow(clippy::derive_partial_eq_without_eq)] 48 | #[derive(Clone, PartialEq, ::prost::Oneof)] 49 | pub enum Method { 50 | #[prost(message, tag = "1")] 51 | Get(super::Get), 52 | #[prost(message, tag = "2")] 53 | Put(super::Put), 54 | #[prost(message, tag = "3")] 55 | Insert(super::Insert), 56 | #[prost(message, tag = "4")] 57 | Del(super::Del), 58 | #[prost(message, tag = "5")] 59 | Remove(super::Remove), 60 | #[prost(message, tag = "6")] 61 | MgmtReq(super::MgmtRequest), 62 | } 63 | } 64 | #[allow(clippy::derive_partial_eq_without_eq)] 65 | #[derive(Clone, PartialEq, ::prost::Message)] 66 | pub struct Error { 67 | #[prost(string, tag = "1")] 68 | pub error: ::prost::alloc::string::String, 69 | } 70 | /// GET message for a cache operation 71 | #[allow(clippy::derive_partial_eq_without_eq)] 72 | #[derive(Clone, PartialEq, ::prost::Message)] 73 | pub struct Get { 74 | #[prost(string, tag = "1")] 75 | pub cache_name: ::prost::alloc::string::String, 76 | #[prost(string, tag = "2")] 77 | pub entry: ::prost::alloc::string::String, 78 | } 79 | /// ACK for a successful GET 80 | #[allow(clippy::derive_partial_eq_without_eq)] 81 | #[derive(Clone, PartialEq, ::prost::Message)] 82 | pub struct GetAck { 83 | #[prost(string, tag = "1")] 84 | pub cache_name: ::prost::alloc::string::String, 85 | #[prost(string, tag = "2")] 86 | pub entry: ::prost::alloc::string::String, 87 | #[prost(bytes = "vec", optional, tag = "3")] 88 | pub value: ::core::option::Option<::prost::alloc::vec::Vec>, 89 | } 90 | /// PUT message for a cache operation 91 | #[allow(clippy::derive_partial_eq_without_eq)] 92 | #[derive(Clone, PartialEq, ::prost::Message)] 93 | pub struct Put { 94 | #[prost(string, tag = "1")] 95 | pub cache_name: ::prost::alloc::string::String, 96 | #[prost(string, tag = "2")] 97 | pub entry: ::prost::alloc::string::String, 98 | #[prost(bytes = "vec", tag = "3")] 99 | pub value: ::prost::alloc::vec::Vec, 100 | #[prost(string, optional, tag = "4")] 101 | pub req_id: ::core::option::Option<::prost::alloc::string::String>, 102 | } 103 | /// INSERT message for a cache operation 104 | #[allow(clippy::derive_partial_eq_without_eq)] 105 | #[derive(Clone, PartialEq, ::prost::Message)] 106 | pub struct Insert { 107 | #[prost(string, tag = "1")] 108 | pub cache_name: ::prost::alloc::string::String, 109 | #[prost(string, tag = "2")] 110 | pub entry: ::prost::alloc::string::String, 111 | #[prost(bytes = "vec", tag = "3")] 112 | pub value: ::prost::alloc::vec::Vec, 113 | #[prost(string, tag = "4")] 114 | pub req_id: ::prost::alloc::string::String, 115 | #[prost(message, optional, tag = "5")] 116 | pub ack_level: ::core::option::Option, 117 | } 118 | /// ACK for a successful PUT 119 | #[allow(clippy::derive_partial_eq_without_eq)] 120 | #[derive(Clone, PartialEq, ::prost::Message)] 121 | pub struct PutAck { 122 | #[prost(string, optional, tag = "1")] 123 | pub req_id: ::core::option::Option<::prost::alloc::string::String>, 124 | #[prost(bool, optional, tag = "2")] 125 | pub mod_res: ::core::option::Option, 126 | } 127 | /// DEL message for a cache operation 128 | #[allow(clippy::derive_partial_eq_without_eq)] 129 | #[derive(Clone, PartialEq, ::prost::Message)] 130 | pub struct Del { 131 | #[prost(string, tag = "1")] 132 | pub cache_name: ::prost::alloc::string::String, 133 | #[prost(string, tag = "2")] 134 | pub entry: ::prost::alloc::string::String, 135 | #[prost(string, optional, tag = "3")] 136 | pub req_id: ::core::option::Option<::prost::alloc::string::String>, 137 | } 138 | /// REMOVE message for a cache operation 139 | #[allow(clippy::derive_partial_eq_without_eq)] 140 | #[derive(Clone, PartialEq, ::prost::Message)] 141 | pub struct Remove { 142 | #[prost(string, tag = "1")] 143 | pub cache_name: ::prost::alloc::string::String, 144 | #[prost(string, tag = "2")] 145 | pub entry: ::prost::alloc::string::String, 146 | #[prost(string, tag = "3")] 147 | pub req_id: ::prost::alloc::string::String, 148 | #[prost(message, optional, tag = "4")] 149 | pub ack_level: ::core::option::Option, 150 | } 151 | /// ACK for a successful DEL 152 | #[allow(clippy::derive_partial_eq_without_eq)] 153 | #[derive(Clone, PartialEq, ::prost::Message)] 154 | pub struct DelAck { 155 | #[prost(string, optional, tag = "1")] 156 | pub req_id: ::core::option::Option<::prost::alloc::string::String>, 157 | #[prost(bool, optional, tag = "2")] 158 | pub mod_res: ::core::option::Option, 159 | } 160 | /// The AckLevel for HA cache modifying requests 161 | #[allow(clippy::derive_partial_eq_without_eq)] 162 | #[derive(Clone, PartialEq, ::prost::Message)] 163 | pub struct AckLevel { 164 | #[prost(oneof = "ack_level::AckLevel", tags = "1, 2, 3")] 165 | pub ack_level: ::core::option::Option, 166 | } 167 | /// Nested message and enum types in `AckLevel`. 168 | pub mod ack_level { 169 | #[allow(clippy::derive_partial_eq_without_eq)] 170 | #[derive(Clone, PartialEq, ::prost::Oneof)] 171 | pub enum AckLevel { 172 | #[prost(message, tag = "1")] 173 | LevelQuorum(super::Empty), 174 | #[prost(message, tag = "2")] 175 | LevelOnce(super::Empty), 176 | #[prost(message, tag = "3")] 177 | LevelLeader(super::Empty), 178 | } 179 | } 180 | /// The Cache Value being sent over the stream. 181 | #[allow(clippy::derive_partial_eq_without_eq)] 182 | #[derive(Clone, PartialEq, ::prost::Message)] 183 | pub struct MgmtRequest { 184 | #[prost(oneof = "mgmt_request::Method", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9")] 185 | pub method: ::core::option::Option, 186 | } 187 | /// Nested message and enum types in `MgmtRequest`. 188 | pub mod mgmt_request { 189 | #[allow(clippy::derive_partial_eq_without_eq)] 190 | #[derive(Clone, PartialEq, ::prost::Oneof)] 191 | pub enum Method { 192 | #[prost(message, tag = "1")] 193 | Ping(super::Ping), 194 | #[prost(message, tag = "2")] 195 | Health(super::Health), 196 | #[prost(message, tag = "3")] 197 | LeaderReq(super::LeaderReq), 198 | #[prost(message, tag = "4")] 199 | LeaderReqAck(super::LeaderReqAck), 200 | #[prost(message, tag = "5")] 201 | LeaderAck(super::LeaderAck), 202 | #[prost(message, tag = "6")] 203 | LeaderSwitch(super::LeaderSwitch), 204 | #[prost(message, tag = "7")] 205 | LeaderSwitchPriority(super::LeaderSwitchPriority), 206 | #[prost(message, tag = "8")] 207 | LeaderDead(super::LeaderDead), 208 | #[prost(message, tag = "9")] 209 | LeaderSwitchAck(super::LeaderSwitchAck), 210 | } 211 | } 212 | /// Just a ping with no content 213 | #[allow(clippy::derive_partial_eq_without_eq)] 214 | #[derive(Clone, PartialEq, ::prost::Message)] 215 | pub struct Ping {} 216 | /// Requests health information 217 | #[allow(clippy::derive_partial_eq_without_eq)] 218 | #[derive(Clone, PartialEq, ::prost::Message)] 219 | pub struct Health {} 220 | /// Is sent out if quorum was established and no leader is present to make a request for taking over that role 221 | #[allow(clippy::derive_partial_eq_without_eq)] 222 | #[derive(Clone, PartialEq, ::prost::Message)] 223 | pub struct LeaderReq { 224 | /// The address where this host can be reached (multiple possible) 225 | #[prost(string, tag = "1")] 226 | pub addr: ::prost::alloc::string::String, 227 | /// Timestamp when the request has been created. In case of a conflict, the request with the earlier creation wins. 228 | #[prost(int64, tag = "2")] 229 | pub election_ts: i64, 230 | } 231 | /// Is sent out if quorum was established and 'enough' hosts have accepted the leader request 232 | #[allow(clippy::derive_partial_eq_without_eq)] 233 | #[derive(Clone, PartialEq, ::prost::Message)] 234 | pub struct LeaderAck { 235 | /// The address where this host can be reached 236 | #[prost(string, tag = "1")] 237 | pub addr: ::prost::alloc::string::String, 238 | /// Timestamp when the request has been created. In case of a conflict, the request with the earlier creation wins. 239 | #[prost(int64, tag = "2")] 240 | pub election_ts: i64, 241 | } 242 | /// Is sent out, when the current leader is about to shut down 243 | #[allow(clippy::derive_partial_eq_without_eq)] 244 | #[derive(Clone, PartialEq, ::prost::Message)] 245 | pub struct LeaderSwitch { 246 | /// The address of the new host that this leader votes for the next one. This creates less friction when changing. 247 | #[prost(string, tag = "1")] 248 | pub vote_host: ::prost::alloc::string::String, 249 | #[prost(int64, tag = "2")] 250 | pub election_ts: i64, 251 | } 252 | /// Is sent out, when there are multiple leadership requests to promote the one with the higher priority 253 | #[allow(clippy::derive_partial_eq_without_eq)] 254 | #[derive(Clone, PartialEq, ::prost::Message)] 255 | pub struct LeaderSwitchPriority { 256 | #[prost(string, tag = "1")] 257 | pub vote_host: ::prost::alloc::string::String, 258 | #[prost(int64, tag = "2")] 259 | pub election_ts: i64, 260 | } 261 | /// Is sent out, when the leader is dead, so the sender can take over that role 262 | #[allow(clippy::derive_partial_eq_without_eq)] 263 | #[derive(Clone, PartialEq, ::prost::Message)] 264 | pub struct LeaderDead { 265 | /// The address of the new host that this leader votes for the next one. This creates less friction when changing. 266 | #[prost(string, tag = "1")] 267 | pub vote_host: ::prost::alloc::string::String, 268 | #[prost(int64, tag = "2")] 269 | pub election_ts: i64, 270 | } 271 | #[allow(clippy::derive_partial_eq_without_eq)] 272 | #[derive(Clone, PartialEq, ::prost::Message)] 273 | pub struct MgmtAck { 274 | #[prost(oneof = "mgmt_ack::Method", tags = "1, 2, 3, 4, 5, 6")] 275 | pub method: ::core::option::Option, 276 | } 277 | /// Nested message and enum types in `MgmtAck`. 278 | pub mod mgmt_ack { 279 | #[allow(clippy::derive_partial_eq_without_eq)] 280 | #[derive(Clone, PartialEq, ::prost::Oneof)] 281 | pub enum Method { 282 | #[prost(message, tag = "1")] 283 | Pong(super::Pong), 284 | #[prost(message, tag = "2")] 285 | HealthAck(super::HealthAck), 286 | #[prost(message, tag = "3")] 287 | LeaderInfo(super::LeaderInfo), 288 | #[prost(message, tag = "4")] 289 | LeaderReqAck(super::LeaderReqAck), 290 | /// TODO do we even need the leader_ack_ack? 291 | #[prost(message, tag = "5")] 292 | LeaderAckAck(super::LeaderAckAck), 293 | #[prost(message, tag = "6")] 294 | LeaderSwitchAck(super::LeaderSwitchAck), 295 | } 296 | } 297 | /// Just a pong with no content 298 | #[allow(clippy::derive_partial_eq_without_eq)] 299 | #[derive(Clone, PartialEq, ::prost::Message)] 300 | pub struct Pong {} 301 | /// Returns health information 302 | #[allow(clippy::derive_partial_eq_without_eq)] 303 | #[derive(Clone, PartialEq, ::prost::Message)] 304 | pub struct HealthAck { 305 | /// uptime of this host in seconds 306 | #[prost(uint64, tag = "1")] 307 | pub uptime_secs: u64, 308 | /// if it has quorum or not 309 | #[prost(bool, tag = "2")] 310 | pub quorum: bool, 311 | /// if this host is a leader or follower 312 | #[prost(message, optional, tag = "3")] 313 | pub state: ::core::option::Option, 314 | #[prost(message, optional, tag = "4")] 315 | pub leader: ::core::option::Option, 316 | /// list of all configured hosts with their connection state 317 | #[prost(message, repeated, tag = "5")] 318 | pub host_health: ::prost::alloc::vec::Vec, 319 | } 320 | #[allow(clippy::derive_partial_eq_without_eq)] 321 | #[derive(Clone, PartialEq, ::prost::Message)] 322 | pub struct State { 323 | #[prost(oneof = "state::Value", tags = "1, 2, 3, 4, 5, 6, 7")] 324 | pub value: ::core::option::Option, 325 | } 326 | /// Nested message and enum types in `State`. 327 | pub mod state { 328 | #[allow(clippy::derive_partial_eq_without_eq)] 329 | #[derive(Clone, PartialEq, ::prost::Oneof)] 330 | pub enum Value { 331 | #[prost(message, tag = "1")] 332 | Leader(super::Empty), 333 | #[prost(message, tag = "2")] 334 | LeaderDead(super::Empty), 335 | #[prost(message, tag = "3")] 336 | LeaderSwitch(super::Empty), 337 | #[prost(message, tag = "4")] 338 | LeaderTxAwait(super::Empty), 339 | #[prost(message, tag = "5")] 340 | LeadershipRequested(super::Empty), 341 | #[prost(message, tag = "6")] 342 | Follower(super::Empty), 343 | #[prost(message, tag = "7")] 344 | Undefined(super::Empty), 345 | } 346 | } 347 | /// Information about the leader 348 | #[allow(clippy::derive_partial_eq_without_eq)] 349 | #[derive(Clone, PartialEq, ::prost::Message)] 350 | pub struct Leader { 351 | #[prost(string, tag = "1")] 352 | pub addr: ::prost::alloc::string::String, 353 | #[prost(int64, tag = "2")] 354 | pub election_ts: i64, 355 | #[prost(bool, tag = "3")] 356 | pub connected: bool, 357 | } 358 | /// Information about the remote host 359 | #[allow(clippy::derive_partial_eq_without_eq)] 360 | #[derive(Clone, PartialEq, ::prost::Message)] 361 | pub struct HostHealth { 362 | #[prost(string, tag = "1")] 363 | pub addr: ::prost::alloc::string::String, 364 | #[prost(bool, tag = "2")] 365 | pub connected: bool, 366 | } 367 | /// Will be sent out on a new client connection to inform about a possibly existing leader 368 | #[allow(clippy::derive_partial_eq_without_eq)] 369 | #[derive(Clone, PartialEq, ::prost::Message)] 370 | pub struct LeaderInfo { 371 | #[prost(string, optional, tag = "1")] 372 | pub addr: ::core::option::Option<::prost::alloc::string::String>, 373 | #[prost(int64, tag = "2")] 374 | pub election_ts: i64, 375 | #[prost(bool, tag = "3")] 376 | pub has_quorum: bool, 377 | } 378 | /// Ack for accepting a LeaderReq 379 | #[allow(clippy::derive_partial_eq_without_eq)] 380 | #[derive(Clone, PartialEq, ::prost::Message)] 381 | pub struct LeaderReqAck { 382 | /// The address of the acked leader 383 | #[prost(string, tag = "1")] 384 | pub addr: ::prost::alloc::string::String, 385 | /// The original timestamp from the LeaderReq itself 386 | #[prost(int64, tag = "2")] 387 | pub election_ts: i64, 388 | } 389 | /// Ack for accepting a LeaderAck 390 | #[allow(clippy::derive_partial_eq_without_eq)] 391 | #[derive(Clone, PartialEq, ::prost::Message)] 392 | pub struct LeaderAckAck { 393 | #[prost(string, tag = "1")] 394 | pub addr: ::prost::alloc::string::String, 395 | } 396 | /// Ack for accepting a LeaderSwitch 397 | #[allow(clippy::derive_partial_eq_without_eq)] 398 | #[derive(Clone, PartialEq, ::prost::Message)] 399 | pub struct LeaderSwitchAck { 400 | #[prost(string, tag = "1")] 401 | pub addr: ::prost::alloc::string::String, 402 | } 403 | #[allow(clippy::derive_partial_eq_without_eq)] 404 | #[derive(Clone, PartialEq, ::prost::Message)] 405 | pub struct Empty {} 406 | /// Generated client implementations. 407 | pub mod cache_client { 408 | #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] 409 | use tonic::codegen::*; 410 | use tonic::codegen::http::Uri; 411 | /// Contains the endpoints for managing the Cache in a HA Cluster to exchange and update values remotely 412 | #[derive(Debug, Clone)] 413 | pub struct CacheClient { 414 | inner: tonic::client::Grpc, 415 | } 416 | impl CacheClient { 417 | /// Attempt to create a new client by connecting to a given endpoint. 418 | pub async fn connect(dst: D) -> Result 419 | where 420 | D: TryInto, 421 | D::Error: Into, 422 | { 423 | let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; 424 | Ok(Self::new(conn)) 425 | } 426 | } 427 | impl CacheClient 428 | where 429 | T: tonic::client::GrpcService, 430 | T::Error: Into, 431 | T::ResponseBody: Body + Send + 'static, 432 | ::Error: Into + Send, 433 | { 434 | pub fn new(inner: T) -> Self { 435 | let inner = tonic::client::Grpc::new(inner); 436 | Self { inner } 437 | } 438 | pub fn with_origin(inner: T, origin: Uri) -> Self { 439 | let inner = tonic::client::Grpc::with_origin(inner, origin); 440 | Self { inner } 441 | } 442 | pub fn with_interceptor( 443 | inner: T, 444 | interceptor: F, 445 | ) -> CacheClient> 446 | where 447 | F: tonic::service::Interceptor, 448 | T::ResponseBody: Default, 449 | T: tonic::codegen::Service< 450 | http::Request, 451 | Response = http::Response< 452 | >::ResponseBody, 453 | >, 454 | >, 455 | , 457 | >>::Error: Into + Send + Sync, 458 | { 459 | CacheClient::new(InterceptedService::new(inner, interceptor)) 460 | } 461 | /// Compress requests with the given encoding. 462 | /// 463 | /// This requires the server to support it otherwise it might respond with an 464 | /// error. 465 | #[must_use] 466 | pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { 467 | self.inner = self.inner.send_compressed(encoding); 468 | self 469 | } 470 | /// Enable decompressing responses. 471 | #[must_use] 472 | pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { 473 | self.inner = self.inner.accept_compressed(encoding); 474 | self 475 | } 476 | /// Limits the maximum size of a decoded message. 477 | /// 478 | /// Default: `4MB` 479 | #[must_use] 480 | pub fn max_decoding_message_size(mut self, limit: usize) -> Self { 481 | self.inner = self.inner.max_decoding_message_size(limit); 482 | self 483 | } 484 | /// Limits the maximum size of an encoded message. 485 | /// 486 | /// Default: `usize::MAX` 487 | #[must_use] 488 | pub fn max_encoding_message_size(mut self, limit: usize) -> Self { 489 | self.inner = self.inner.max_encoding_message_size(limit); 490 | self 491 | } 492 | /// Inserts / Updates a value from remote in the local caching layer 493 | pub async fn stream_values( 494 | &mut self, 495 | request: impl tonic::IntoStreamingRequest, 496 | ) -> std::result::Result< 497 | tonic::Response>, 498 | tonic::Status, 499 | > { 500 | self.inner 501 | .ready() 502 | .await 503 | .map_err(|e| { 504 | tonic::Status::new( 505 | tonic::Code::Unknown, 506 | format!("Service was not ready: {}", e.into()), 507 | ) 508 | })?; 509 | let codec = tonic::codec::ProstCodec::default(); 510 | let path = http::uri::PathAndQuery::from_static("/cache.Cache/StreamValues"); 511 | let mut req = request.into_streaming_request(); 512 | req.extensions_mut().insert(GrpcMethod::new("cache.Cache", "StreamValues")); 513 | self.inner.streaming(req, path, codec).await 514 | } 515 | } 516 | } 517 | /// Generated server implementations. 518 | pub mod cache_server { 519 | #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] 520 | use tonic::codegen::*; 521 | /// Generated trait containing gRPC methods that should be implemented for use with CacheServer. 522 | #[async_trait] 523 | pub trait Cache: Send + Sync + 'static { 524 | /// Server streaming response type for the StreamValues method. 525 | type StreamValuesStream: tonic::codegen::tokio_stream::Stream< 526 | Item = std::result::Result, 527 | > 528 | + Send 529 | + 'static; 530 | /// Inserts / Updates a value from remote in the local caching layer 531 | async fn stream_values( 532 | &self, 533 | request: tonic::Request>, 534 | ) -> std::result::Result< 535 | tonic::Response, 536 | tonic::Status, 537 | >; 538 | } 539 | /// Contains the endpoints for managing the Cache in a HA Cluster to exchange and update values remotely 540 | #[derive(Debug)] 541 | pub struct CacheServer { 542 | inner: _Inner, 543 | accept_compression_encodings: EnabledCompressionEncodings, 544 | send_compression_encodings: EnabledCompressionEncodings, 545 | max_decoding_message_size: Option, 546 | max_encoding_message_size: Option, 547 | } 548 | struct _Inner(Arc); 549 | impl CacheServer { 550 | pub fn new(inner: T) -> Self { 551 | Self::from_arc(Arc::new(inner)) 552 | } 553 | pub fn from_arc(inner: Arc) -> Self { 554 | let inner = _Inner(inner); 555 | Self { 556 | inner, 557 | accept_compression_encodings: Default::default(), 558 | send_compression_encodings: Default::default(), 559 | max_decoding_message_size: None, 560 | max_encoding_message_size: None, 561 | } 562 | } 563 | pub fn with_interceptor( 564 | inner: T, 565 | interceptor: F, 566 | ) -> InterceptedService 567 | where 568 | F: tonic::service::Interceptor, 569 | { 570 | InterceptedService::new(Self::new(inner), interceptor) 571 | } 572 | /// Enable decompressing requests with the given encoding. 573 | #[must_use] 574 | pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { 575 | self.accept_compression_encodings.enable(encoding); 576 | self 577 | } 578 | /// Compress responses with the given encoding, if the client supports it. 579 | #[must_use] 580 | pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { 581 | self.send_compression_encodings.enable(encoding); 582 | self 583 | } 584 | /// Limits the maximum size of a decoded message. 585 | /// 586 | /// Default: `4MB` 587 | #[must_use] 588 | pub fn max_decoding_message_size(mut self, limit: usize) -> Self { 589 | self.max_decoding_message_size = Some(limit); 590 | self 591 | } 592 | /// Limits the maximum size of an encoded message. 593 | /// 594 | /// Default: `usize::MAX` 595 | #[must_use] 596 | pub fn max_encoding_message_size(mut self, limit: usize) -> Self { 597 | self.max_encoding_message_size = Some(limit); 598 | self 599 | } 600 | } 601 | impl tonic::codegen::Service> for CacheServer 602 | where 603 | T: Cache, 604 | B: Body + Send + 'static, 605 | B::Error: Into + Send + 'static, 606 | { 607 | type Response = http::Response; 608 | type Error = std::convert::Infallible; 609 | type Future = BoxFuture; 610 | fn poll_ready( 611 | &mut self, 612 | _cx: &mut Context<'_>, 613 | ) -> Poll> { 614 | Poll::Ready(Ok(())) 615 | } 616 | fn call(&mut self, req: http::Request) -> Self::Future { 617 | let inner = self.inner.clone(); 618 | match req.uri().path() { 619 | "/cache.Cache/StreamValues" => { 620 | #[allow(non_camel_case_types)] 621 | struct StreamValuesSvc(pub Arc); 622 | impl tonic::server::StreamingService 623 | for StreamValuesSvc { 624 | type Response = super::Ack; 625 | type ResponseStream = T::StreamValuesStream; 626 | type Future = BoxFuture< 627 | tonic::Response, 628 | tonic::Status, 629 | >; 630 | fn call( 631 | &mut self, 632 | request: tonic::Request< 633 | tonic::Streaming, 634 | >, 635 | ) -> Self::Future { 636 | let inner = Arc::clone(&self.0); 637 | let fut = async move { 638 | ::stream_values(&inner, request).await 639 | }; 640 | Box::pin(fut) 641 | } 642 | } 643 | let accept_compression_encodings = self.accept_compression_encodings; 644 | let send_compression_encodings = self.send_compression_encodings; 645 | let max_decoding_message_size = self.max_decoding_message_size; 646 | let max_encoding_message_size = self.max_encoding_message_size; 647 | let inner = self.inner.clone(); 648 | let fut = async move { 649 | let inner = inner.0; 650 | let method = StreamValuesSvc(inner); 651 | let codec = tonic::codec::ProstCodec::default(); 652 | let mut grpc = tonic::server::Grpc::new(codec) 653 | .apply_compression_config( 654 | accept_compression_encodings, 655 | send_compression_encodings, 656 | ) 657 | .apply_max_message_size_config( 658 | max_decoding_message_size, 659 | max_encoding_message_size, 660 | ); 661 | let res = grpc.streaming(method, req).await; 662 | Ok(res) 663 | }; 664 | Box::pin(fut) 665 | } 666 | _ => { 667 | Box::pin(async move { 668 | Ok( 669 | http::Response::builder() 670 | .status(200) 671 | .header("grpc-status", "12") 672 | .header("content-type", "application/grpc") 673 | .body(empty_body()) 674 | .unwrap(), 675 | ) 676 | }) 677 | } 678 | } 679 | } 680 | } 681 | impl Clone for CacheServer { 682 | fn clone(&self) -> Self { 683 | let inner = self.inner.clone(); 684 | Self { 685 | inner, 686 | accept_compression_encodings: self.accept_compression_encodings, 687 | send_compression_encodings: self.send_compression_encodings, 688 | max_decoding_message_size: self.max_decoding_message_size, 689 | max_encoding_message_size: self.max_encoding_message_size, 690 | } 691 | } 692 | } 693 | impl Clone for _Inner { 694 | fn clone(&self) -> Self { 695 | Self(Arc::clone(&self.0)) 696 | } 697 | } 698 | impl std::fmt::Debug for _Inner { 699 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 700 | write!(f, "{:?}", self.0) 701 | } 702 | } 703 | impl tonic::server::NamedService for CacheServer { 704 | const NAME: &'static str = "cache.Cache"; 705 | } 706 | } 707 | -------------------------------------------------------------------------------- /src/rpc/mod.rs: -------------------------------------------------------------------------------- 1 | #[allow(clippy::mixed_attributes_style)] 2 | pub mod cache; 3 | -------------------------------------------------------------------------------- /src/server.rs: -------------------------------------------------------------------------------- 1 | use crate::quorum::{AckLevel, QuorumState, RegisteredLeader, RpcServer, RpcServerState}; 2 | use crate::remove_from_leader; 3 | use crate::rpc::cache; 4 | use crate::rpc::cache::cache_server::{Cache, CacheServer}; 5 | use crate::rpc::cache::mgmt_request::Method; 6 | use crate::rpc::cache::{ack, cache_request, mgmt_ack, Ack, CacheRequest, DelAck, GetAck, PutAck}; 7 | use crate::TLS; 8 | use crate::{ 9 | insert_from_leader, CacheConfig, CacheMethod, CacheNotify, CacheReq, QuorumHealth, QuorumReq, 10 | }; 11 | use core::time::Duration; 12 | use futures_core::Stream; 13 | use futures_util::StreamExt; 14 | use lazy_static::lazy_static; 15 | use std::collections::HashMap; 16 | use std::env; 17 | use std::io::ErrorKind; 18 | use std::net::SocketAddr; 19 | use std::pin::Pin; 20 | use tokio::fs; 21 | use tokio::sync::{mpsc, oneshot}; 22 | use tokio::time::Instant; 23 | use tokio_stream::wrappers::ReceiverStream; 24 | use tonic::metadata::MetadataValue; 25 | use tonic::transport::{Certificate, Identity, Server, ServerTlsConfig}; 26 | use tonic::{Request, Response, Status, Streaming}; 27 | use tracing::{debug, error, info, warn}; 28 | 29 | lazy_static! { 30 | static ref BUF_SIZE_SERVER: usize = env::var("CACHE_BUF_SERVER") 31 | .unwrap_or_else(|_| String::from("128")) 32 | .parse::() 33 | .expect("Error parsing 'CACHE_BUF_SERVER' to usize"); 34 | static ref UPTIME: Instant = Instant::now(); 35 | static ref PATH_TLS_CERT: String = env::var("CACHE_TLS_SERVER_CERT") 36 | .unwrap_or_else(|_| String::from("tls/redhac.cert-chain.pem")); 37 | static ref PATH_TLS_KEY: String = 38 | env::var("CACHE_TLS_SERVER_KEY").unwrap_or_else(|_| String::from("tls/redhac.key.pem")); 39 | static ref PATH_CLIENT_CA: Option = env::var("CACHE_TLS_CA_CLIENT").ok(); 40 | } 41 | 42 | pub(crate) type CacheMap = HashMap>; 43 | 44 | fn match_for_io_error(err_status: &Status) -> Option<&std::io::Error> { 45 | let mut err: &(dyn std::error::Error + 'static) = err_status; 46 | debug!("match_for_io_error: {:?}", err_status); 47 | 48 | loop { 49 | if let Some(io_err) = err.downcast_ref::() { 50 | return Some(io_err); 51 | } 52 | 53 | err = match err.source() { 54 | Some(err) => err, 55 | None => return None, 56 | }; 57 | } 58 | } 59 | 60 | pub(crate) struct RpcCacheService { 61 | host_addr: String, 62 | cache_config: CacheConfig, 63 | tx_quorum: flume::Sender, 64 | tx_notify: Option>, 65 | } 66 | 67 | impl RpcCacheService { 68 | pub async fn serve( 69 | addr: String, 70 | cache_config: CacheConfig, 71 | tx_quorum: flume::Sender, 72 | tx_notify: Option>, 73 | ) -> anyhow::Result<()> { 74 | { 75 | // initialize the uptime 76 | let _ = UPTIME; 77 | } 78 | 79 | let interval = env::var("CACHE_KEEPALIVE_INTERVAL") 80 | .unwrap_or_else(|_| String::from("5")) 81 | .parse::() 82 | .expect("Error parsing 'CACHE_KEEPALIVE_INTERVAL' to u64"); 83 | let timeout = env::var("CACHE_KEEPALIVE_TIMEOUT") 84 | .unwrap_or_else(|_| String::from("5")) 85 | .parse::() 86 | .expect("Error parsing 'CACHE_KEEPALIVE_TIMEOUT' to u64"); 87 | 88 | let port = addr 89 | .split(':') 90 | .last() 91 | .expect("Error with the 'addr' format in 'RpcCacheService::serve'") 92 | .trim() 93 | .parse::() 94 | .expect("Cannot parse the Port to u16 in 'RpcCacheService::serve'"); 95 | let addr_mod = format!("0.0.0.0:{}", port); 96 | let socket_addr: SocketAddr = addr_mod 97 | .parse() 98 | .expect("Could not parse the addr for the GrpcServer"); 99 | 100 | let rpc_cache_service = RpcCacheService { 101 | host_addr: addr, 102 | cache_config, 103 | tx_quorum, 104 | tx_notify, 105 | }; 106 | 107 | let svc = CacheServer::with_interceptor(rpc_cache_service, check_auth); 108 | let mut server = Server::builder() 109 | .tcp_keepalive(Some(Duration::from_secs(interval))) 110 | .http2_keepalive_interval(Some(Duration::from_secs(interval))) 111 | .http2_keepalive_timeout(Some(Duration::from_millis(timeout))) 112 | .http2_adaptive_window(Some(true)) 113 | .timeout(Duration::from_secs(10)); 114 | 115 | if *TLS { 116 | debug!("Loading server TLS cert from {}", *PATH_TLS_CERT); 117 | let cert = fs::read(&*PATH_TLS_CERT) 118 | .await 119 | .expect("Error reading server TLS Certificate"); 120 | debug!("Loading server TLS key from {}", *PATH_TLS_KEY); 121 | let key = fs::read(&*PATH_TLS_KEY) 122 | .await 123 | .expect("Error reading server TLS Key"); 124 | let id = Identity::from_pem(cert, key); 125 | 126 | let mut cfg = ServerTlsConfig::new().identity(id); 127 | 128 | if let Some(path) = &*PATH_CLIENT_CA { 129 | debug!("Loading client CA from {}", path); 130 | let ca = fs::read(path).await.expect("Error reading client TLS CA"); 131 | let ca_chain = Certificate::from_pem(ca); 132 | cfg = cfg.client_ca_root(ca_chain); 133 | } 134 | 135 | server = server 136 | .tls_config(cfg) 137 | .expect("Error adding the TLS Certificate to the Cache Server"); 138 | } 139 | 140 | server.add_service(svc).serve(socket_addr).await?; 141 | 142 | Ok(()) 143 | } 144 | } 145 | 146 | #[tonic::async_trait()] 147 | impl Cache for RpcCacheService { 148 | type StreamValuesStream = Pin> + Send + 'static>>; 149 | 150 | async fn stream_values( 151 | &self, 152 | request: Request>, 153 | ) -> Result, Status> { 154 | debug!( 155 | "Debug req:\n{:?}\nRemote Addr: {:?}", 156 | request, 157 | request.remote_addr() 158 | ); 159 | 160 | let host_addr = self.host_addr.clone(); 161 | let addr = request.remote_addr().as_ref().map(|a| a.to_string()); 162 | 163 | let mut in_stream = request.into_inner(); 164 | let (tx, rx) = mpsc::channel(*BUF_SIZE_SERVER); 165 | 166 | let mut client = RpcServer { 167 | address: addr.unwrap_or_else(|| String::from("unknown")), 168 | state: RpcServerState::Alive, 169 | tx: None, 170 | election_ts: -1, 171 | }; 172 | info!("RpcCacheClient connected: {:?}", client); 173 | 174 | let tx_quorum = self.tx_quorum.clone(); 175 | 176 | let cache_config = self.cache_config.clone(); 177 | let tx_notify = self.tx_notify.clone(); 178 | 179 | // build up the LeaderInfo as the first message 180 | let (tx_oneshot, rx_quorum) = oneshot::channel(); 181 | tx_quorum 182 | .send_async(QuorumReq::GetReport { tx: tx_oneshot }) 183 | .await 184 | .expect("QuorumReq::GetReport in RpcServer"); 185 | let report = rx_quorum 186 | .await 187 | // cannot fail at this point since we just opened the channel 188 | .unwrap(); 189 | 190 | let has_quorum = match report.health { 191 | QuorumHealth::Good | QuorumHealth::Degraded => true, 192 | QuorumHealth::Bad => false, 193 | }; 194 | 195 | let (addr, election_ts) = match report.leader { 196 | None => (None, -1), 197 | Some(l) => match l { 198 | RegisteredLeader::Local(ts) => (Some(host_addr.clone()), ts), 199 | RegisteredLeader::Remote(lead) => (Some(lead.address), lead.election_ts), 200 | }, 201 | }; 202 | let info = mgmt_ack::Method::LeaderInfo(cache::LeaderInfo { 203 | addr, 204 | has_quorum, 205 | election_ts, 206 | }); 207 | tx.send(Ok(Ack { 208 | method: Some(ack::Method::MgmtAck(cache::MgmtAck { method: Some(info) })), 209 | })) 210 | .await 211 | .expect("Error sending LeaderInfo on connect"); 212 | 213 | // listen to the client stream 214 | tokio::spawn(async move { 215 | while let Some(result) = in_stream.next().await { 216 | match result { 217 | Ok(req) => { 218 | if req.method.is_none() { 219 | tx.send(Err(Status::aborted( 220 | "Error for CacheRequest - method is none", 221 | ))) 222 | .await 223 | .expect("Error Sending msg in stream_values"); 224 | continue; 225 | } 226 | 227 | match req.method.unwrap() { 228 | cache_request::Method::Get(m) => { 229 | let tx_cache = cache_config.cache_map.get(&m.cache_name); 230 | if tx_cache.is_none() { 231 | error!( 232 | "'cache_map' misconfiguration in rpc server - Method::Get" 233 | ); 234 | continue; 235 | } 236 | 237 | let (tx_get, rx_get) = flume::unbounded::>>(); 238 | let res = tx_cache 239 | .unwrap() 240 | .send_async(CacheReq::Get { 241 | entry: m.entry.clone(), 242 | resp: tx_get, 243 | }) 244 | .await; 245 | if res.is_err() { 246 | error!("Error executing GET on the internal cache"); 247 | } 248 | 249 | let val_res = rx_get.recv_async().await; 250 | if val_res.is_err() { 251 | error!("Error receiving value through GET channel from the internal cache"); 252 | continue; 253 | } 254 | let value = val_res.unwrap(); 255 | 256 | tx.send(Ok(Ack { 257 | method: Some(ack::Method::GetAck(GetAck { 258 | cache_name: m.cache_name, 259 | entry: m.entry.to_owned(), 260 | value, 261 | })), 262 | })) 263 | .await 264 | .expect("Error Sending msg in stream_values"); 265 | } 266 | 267 | cache_request::Method::Put(m) => { 268 | debug!("Method::Put request for cache_name '{}'", &m.cache_name); 269 | let tx_cache = cache_config.cache_map.get(&m.cache_name); 270 | if tx_cache.is_none() { 271 | error!( 272 | "'cache_map' misconfiguration in rpc server - Method::Put" 273 | ); 274 | continue; 275 | } 276 | 277 | if let Some(tx) = &tx_notify { 278 | let msg = CacheNotify { 279 | cache_name: m.cache_name.clone(), 280 | entry: m.entry.clone(), 281 | method: CacheMethod::Put, 282 | }; 283 | 284 | // Fail fast and don't send the update to not block the cache, if 285 | // the channel is full 286 | if let Err(err) = tx.try_send(msg) { 287 | debug!("Sending CacheNotify for PUT over channel: {}", err); 288 | } 289 | } 290 | 291 | match tx_cache 292 | .unwrap() 293 | .send_async(CacheReq::Put { 294 | entry: m.entry.clone(), 295 | value: m.value, 296 | }) 297 | .await 298 | { 299 | Ok(_) => { 300 | tx.send(Ok(Ack { 301 | method: Some(ack::Method::PutAck(PutAck { 302 | req_id: m.req_id, 303 | mod_res: Some(true), 304 | })), 305 | })) 306 | .await 307 | .expect("Error Sending msg in stream_values"); 308 | } 309 | Err(err) => { 310 | error!("{}", err); 311 | tx.send(Err(Status::internal( 312 | "Error executing PUT on the internal cache", 313 | ))) 314 | .await 315 | .expect("Error Sending msg in stream_values"); 316 | } 317 | } 318 | } 319 | 320 | cache_request::Method::Insert(req) => { 321 | debug!( 322 | "Method::Insert request for cache_name '{}'", 323 | &req.cache_name 324 | ); 325 | 326 | let ack_level = AckLevel::from_rpc_value(req.ack_level); 327 | match insert_from_leader( 328 | req.cache_name.clone(), 329 | req.entry.clone(), 330 | req.value, 331 | &cache_config, 332 | ack_level.clone(), 333 | None, 334 | ) 335 | .await 336 | { 337 | Ok(res) => { 338 | // notification listener channel 339 | if let Some(tx) = &tx_notify { 340 | let msg = CacheNotify { 341 | cache_name: req.cache_name, 342 | entry: req.entry, 343 | method: CacheMethod::Insert(ack_level), 344 | }; 345 | 346 | // Fail fast and don't send the update to not block the cache, if 347 | // the channel is full 348 | if let Err(err) = tx.try_send(msg) { 349 | debug!( 350 | "Sending CacheNotify for INSERT over channel: {}", 351 | err 352 | ); 353 | } 354 | } 355 | 356 | // answer to the client 357 | tx.send(Ok(Ack { 358 | method: Some(ack::Method::PutAck(PutAck { 359 | req_id: Some(req.req_id), 360 | mod_res: Some(res), 361 | })), 362 | })) 363 | .await 364 | .expect("Error Sending msg in stream_values"); 365 | } 366 | Err(err) => { 367 | error!("{:?}", err); 368 | tx.send(Err(Status::internal( 369 | "Error executing INSERT on the internal cache", 370 | ))) 371 | .await 372 | .expect("Error Sending msg in stream_values"); 373 | } 374 | } 375 | } 376 | 377 | cache_request::Method::Del(m) => { 378 | debug!("Method::Del request for cache_name '{}'", &m.cache_name); 379 | let tx_cache = cache_config.cache_map.get(&m.cache_name); 380 | if tx_cache.is_none() { 381 | error!( 382 | "'cache_map' misconfiguration in rpc server - Method::Del" 383 | ); 384 | continue; 385 | } 386 | 387 | if let Some(tx) = &tx_notify { 388 | let msg = CacheNotify { 389 | cache_name: m.cache_name.clone(), 390 | entry: m.entry.clone(), 391 | method: CacheMethod::Del, 392 | }; 393 | 394 | // Fail fast and don't send the update to not block the cache, if 395 | // the channel is full 396 | if let Err(err) = tx.try_send(msg) { 397 | error!("Sending CacheNotify for DEL over channel: {}", err); 398 | } 399 | } 400 | 401 | match tx_cache 402 | .unwrap() 403 | .send_async(CacheReq::Del { 404 | entry: m.entry.clone(), 405 | }) 406 | .await 407 | { 408 | Ok(_) => { 409 | tx.send(Ok(Ack { 410 | method: Some(ack::Method::DelAck(DelAck { 411 | req_id: m.req_id, 412 | mod_res: Some(true), 413 | })), 414 | })) 415 | .await 416 | .expect("Error Sending msg in stream_values"); 417 | } 418 | Err(err) => { 419 | error!("{}", err); 420 | tx.send(Err(Status::internal( 421 | "Error executing DEL on the internal cache", 422 | ))) 423 | .await 424 | .expect("Error Sending msg in stream_values"); 425 | } 426 | } 427 | } 428 | 429 | cache_request::Method::Remove(req) => { 430 | debug!( 431 | "Method::Remove request for cache_name '{}'", 432 | &req.cache_name 433 | ); 434 | 435 | let ack_level = AckLevel::from_rpc_value(req.ack_level); 436 | match remove_from_leader( 437 | req.cache_name.clone(), 438 | req.entry.clone(), 439 | &cache_config, 440 | ack_level.clone(), 441 | None, 442 | ) 443 | .await 444 | { 445 | Ok(res) => { 446 | // notification listener channel 447 | if let Some(tx) = &tx_notify { 448 | let msg = CacheNotify { 449 | cache_name: req.cache_name, 450 | entry: req.entry, 451 | method: CacheMethod::Remove(ack_level), 452 | }; 453 | 454 | // Fail fast and don't send the update to not block the cache, if 455 | // the channel is full 456 | if let Err(err) = tx.try_send(msg) { 457 | debug!( 458 | "Sending CacheNotify for REMOVE over channel: {}", 459 | err 460 | ); 461 | } 462 | } 463 | 464 | // answer to the client 465 | tx.send(Ok(Ack { 466 | method: Some(ack::Method::DelAck(DelAck { 467 | req_id: Some(req.req_id), 468 | mod_res: Some(res), 469 | })), 470 | })) 471 | .await 472 | .expect("Error Sending msg in stream_values"); 473 | } 474 | Err(err) => { 475 | error!("{:?}", err); 476 | tx.send(Err(Status::internal( 477 | "Error executing REMOVE on the internal cache", 478 | ))) 479 | .await 480 | .expect("Error Sending msg in stream_values"); 481 | } 482 | } 483 | } 484 | 485 | cache_request::Method::MgmtReq(mgmt_req) => { 486 | debug!("MgmtReq received: {:?}", mgmt_req); 487 | 488 | if let Some(m) = mgmt_req.method { 489 | match m { 490 | Method::Ping(_) => { 491 | debug!("Ping from {}", client.address); 492 | 493 | let pong = mgmt_ack::Method::Pong(cache::Pong {}); 494 | 495 | tx.send(Ok(Ack { 496 | method: Some(ack::Method::MgmtAck( 497 | cache::MgmtAck { method: Some(pong) }, 498 | )), 499 | })) 500 | .await 501 | .expect("Error sending Pong"); 502 | } 503 | 504 | Method::Health(_) => { 505 | debug!( 506 | "Received Health Request from {}", 507 | client.address 508 | ); 509 | 510 | let (tx_oneshot, rx_quorum) = oneshot::channel(); 511 | tx_quorum 512 | .send_async(QuorumReq::GetReport { tx: tx_oneshot }) 513 | .await 514 | .expect("QuorumReq::GetReport in RpcServer"); 515 | let report = rx_quorum.await.expect(""); 516 | 517 | let quorum = match report.health { 518 | QuorumHealth::Good | QuorumHealth::Degraded => true, 519 | QuorumHealth::Bad => false, 520 | }; 521 | let state = match report.state { 522 | QuorumState::Leader => cache::State { 523 | value: Some(cache::state::Value::Leader( 524 | Default::default(), 525 | )), 526 | }, 527 | QuorumState::LeaderDead => cache::State { 528 | value: Some(cache::state::Value::LeaderDead( 529 | Default::default(), 530 | )), 531 | }, 532 | QuorumState::LeaderSwitch => cache::State { 533 | value: Some(cache::state::Value::LeaderSwitch( 534 | Default::default(), 535 | )), 536 | }, 537 | QuorumState::LeaderTxAwait(_addr) => cache::State { 538 | value: Some( 539 | cache::state::Value::LeaderTxAwait( 540 | Default::default(), 541 | ), 542 | ), 543 | }, 544 | QuorumState::LeadershipRequested(_timestamp) => cache::State { 545 | value: Some( 546 | cache::state::Value::LeadershipRequested( 547 | Default::default(), 548 | ), 549 | ), 550 | }, 551 | QuorumState::Follower => cache::State { 552 | value: Some(cache::state::Value::Follower( 553 | Default::default(), 554 | )), 555 | }, 556 | QuorumState::Undefined => cache::State { 557 | value: Some(cache::state::Value::Undefined( 558 | Default::default(), 559 | )), 560 | }, 561 | QuorumState::Retry => cache::State { 562 | value: Some(cache::state::Value::Undefined( 563 | Default::default(), 564 | )), 565 | }, 566 | }; 567 | let leader = report.leader.map(|c| { 568 | let (addr, election_ts, connected) = match c { 569 | RegisteredLeader::Local(ts) => { 570 | (host_addr.clone(), ts, true) 571 | } 572 | RegisteredLeader::Remote(lead) => ( 573 | lead.address, 574 | lead.election_ts, 575 | lead.state == RpcServerState::Alive, 576 | ), 577 | }; 578 | cache::Leader { 579 | addr, 580 | election_ts, 581 | connected, 582 | } 583 | }); 584 | let mut host_health = vec![]; 585 | for host in report.hosts { 586 | host_health.push(cache::HostHealth { 587 | addr: host.address, 588 | connected: host.state == RpcServerState::Alive, 589 | }); 590 | } 591 | 592 | let health = 593 | mgmt_ack::Method::HealthAck(cache::HealthAck { 594 | uptime_secs: UPTIME.elapsed().as_secs(), 595 | quorum, 596 | state: Some(state), 597 | leader, 598 | host_health, 599 | }); 600 | 601 | tx.send(Ok(Ack { 602 | method: Some(ack::Method::MgmtAck( 603 | cache::MgmtAck { 604 | method: Some(health), 605 | }, 606 | )), 607 | })) 608 | .await 609 | .expect("Error sending HealthAck"); 610 | } 611 | 612 | Method::LeaderReq(req) => { 613 | debug!("Received a LeaderReq: {:?}", req); 614 | let (tx_oneshot, rx_quorum) = oneshot::channel(); 615 | let addr = req.addr.clone(); 616 | let election_ts = req.election_ts; 617 | 618 | tx_quorum 619 | .send_async(QuorumReq::LeaderReq { 620 | req, 621 | tx: tx_oneshot, 622 | }) 623 | .await 624 | .expect("Error sending LeaderReq"); 625 | 626 | let accepted = rx_quorum.await.expect( 627 | "Error receiving LeaderReqAck from quorum handler", 628 | ); 629 | if accepted { 630 | let req_ack = mgmt_ack::Method::LeaderReqAck( 631 | cache::LeaderReqAck { addr, election_ts }, 632 | ); 633 | tx.send(Ok(Ack { 634 | method: Some(ack::Method::MgmtAck( 635 | cache::MgmtAck { 636 | method: Some(req_ack), 637 | }, 638 | )), 639 | })) 640 | .await 641 | .expect("Error sending LeaderReqAck"); 642 | } 643 | } 644 | 645 | Method::LeaderReqAck(req) => { 646 | debug!("Received a LeaderReqAck: {:?}", req); 647 | tx_quorum 648 | .send_async(QuorumReq::LeaderReqAck { 649 | addr: req.addr, 650 | election_ts: req.election_ts, 651 | }) 652 | .await 653 | .expect("Error sending LeaderReq"); 654 | } 655 | 656 | Method::LeaderAck(ack) => { 657 | debug!("Received LeaderAck: {}", ack.addr); 658 | tx_quorum 659 | .send_async(QuorumReq::LeaderAck { ack }) 660 | .await 661 | .expect("Error sending LeaderAck"); 662 | } 663 | 664 | Method::LeaderSwitch(req) => { 665 | debug!("Received Method::LeaderSwitch: {:?}", req); 666 | 667 | tx_quorum 668 | .send_async(QuorumReq::LeaderSwitch { req }) 669 | .await 670 | .expect("Error sending LeaderSwitch"); 671 | } 672 | 673 | Method::LeaderSwitchPriority(req) => { 674 | debug!( 675 | "Received Method::LeaderSwitchPriority: {:?}", 676 | req 677 | ); 678 | 679 | tx_quorum 680 | .send_async(QuorumReq::LeaderSwitchPriority { req }) 681 | .await 682 | .expect("Error sending LeaderSwitchPriority"); 683 | } 684 | 685 | Method::LeaderDead(req) => { 686 | tx_quorum 687 | .send_async(QuorumReq::LeaderSwitchDead { req }) 688 | .await 689 | .expect("Error sending LeaderSwitchDead"); 690 | } 691 | 692 | Method::LeaderSwitchAck(req) => { 693 | tx_quorum 694 | .send_async(QuorumReq::LeaderSwitchAck { req }) 695 | .await 696 | .expect("Error sending LeaderSwitchDead"); 697 | } 698 | } 699 | } 700 | } 701 | } 702 | } 703 | Err(err) => { 704 | if let Some(io_err) = match_for_io_error(&err) { 705 | if io_err.kind() == ErrorKind::BrokenPipe { 706 | error!("cache rpc client disconnected: broken pipe"); 707 | break; 708 | } 709 | } 710 | 711 | match tx.send(Err(err)).await { 712 | Ok(_) => (), 713 | Err(err) => { 714 | error!("{:?}", err); 715 | break; 716 | } // response was dropped 717 | } 718 | } 719 | } 720 | } 721 | 722 | // stream has ended - client is dead 723 | client.state = RpcServerState::Dead; 724 | warn!( 725 | "cache stream with client '{:?}' on host '{}' ended", 726 | client, host_addr 727 | ); 728 | }); 729 | 730 | // just write the same data that was received 731 | let out_stream = ReceiverStream::new(rx); 732 | 733 | Ok(Response::new( 734 | Box::pin(out_stream) as Self::StreamValuesStream 735 | )) 736 | } 737 | } 738 | 739 | fn check_auth(req: Request<()>) -> Result, Status> { 740 | let token_str = env::var("CACHE_AUTH_TOKEN").expect("CACHE_AUTH_TOKEN is not set"); 741 | let token: MetadataValue<_> = token_str 742 | .parse() 743 | .expect("Could not parse the Token to MetadataValue - needs to be ASCII"); 744 | 745 | match req.metadata().get("authorization") { 746 | Some(t) if token == t => Ok(req), 747 | _ => { 748 | warn!("Connection request with bad Token"); 749 | Err(Status::unauthenticated("No valid auth token")) 750 | } 751 | } 752 | } 753 | -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/cert-chain.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIB8zCCAXqgAwIBAgIBATAKBggqhkjOPQQDAzAkMSIwIAYDVQQDDBlyZWRoYWMu 3 | bG9jYWwgSW50ZXJtZWRpYXRlMB4XDTIzMTEwNzA4NTQ0NloXDTI0MTExNjA5MDQ0 4 | NlowFzEVMBMGA1UEAwwMcmVkaGFjLmxvY2FsMHYwEAYHKoZIzj0CAQYFK4EEACID 5 | YgAEU7H56ygar9ClpYBv5roLX6Nqz6jei2Uq8VuH2Z05oTBpI+Y9BwRAmpbK5WEc 6 | bKOV3Fdm29/LmpzZKkqrESDDVtP2+00mNU4HKsg/NnP3H74jq/sCey/o8KgsIuZB 7 | 4U/Oo4GMMIGJMB8GA1UdIwQYMBaAFN/L7X0a7QIa8QzRFqj3VXPnU685MBcGA1Ud 8 | EQQQMA6CDHJlZGhhYy5sb2NhbDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH 9 | AwIwHQYDVR0OBBYEFKMmu0yvOf9Jgnn5t+hsFVzOp3YcMA8GA1UdEwEB/wQFMAMB 10 | AQAwCgYIKoZIzj0EAwMDZwAwZAIwTwFcqZwb1ENc148shE0jUCbr3sgE3/s88orS 11 | XHRMyRK1ZSJmo9K8B+cu0DJlMuOAAjBzB4P8AdGVdRbYdVqFljjYS5U6fg+L/mEH 12 | 4RqeIUpf9jljlGI873J5haF0IIvFlcU= 13 | -----END CERTIFICATE----- 14 | -----BEGIN CERTIFICATE----- 15 | MIIB5jCCAWygAwIBAgIVAN/L7X0a7QIa8QzRFqj3VXPnU685MAoGCCqGSM49BAMD 16 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MB4XDTIzMDEyMTA3NDA0NVoX 17 | DTMzMTEwNDA5MDQ0NVowJDEiMCAGA1UEAwwZcmVkaGFjLmxvY2FsIEludGVybWVk 18 | aWF0ZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABA5HsVZ6MP2HpUUN82ZV/Ypq7KlY 19 | Hrk4ahp7vMu8mYQf2cLJNd6R3/IXPn3x6jNyLIdlIYSrsgis7GEDlaL9k+I4sEG8 20 | gDCWwAtguUuEecSg2ZMIJcipmT0OLv8s55F+0KNmMGQwHwYDVR0jBBgwFoAUvFha 21 | U1lju9HpQ95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTfy+19 22 | Gu0CGvEM0Rao91Vz51OvOTASBgNVHRMBAf8ECDAGAQH/AgEAMAoGCCqGSM49BAMD 23 | A2gAMGUCMFE6GjkjMohjX9iDigLhg3MZKuwcIhxqaLo8S8afr1EHs7uN4Uc/CL2t 24 | S7vghbkilgIxANQUafZv1OXNoiQHEAzZPCTWtwixdOE5iko54RA4wc4TMeP6GbLr 25 | IJm0T6d9ImFhFQ== 26 | -----END CERTIFICATE----- 27 | -----BEGIN CERTIFICATE----- 28 | MIIB3TCCAWOgAwIBAgIVALxYWlNZY7vR6UPeaqfGPjKeqC1KMAoGCCqGSM49BAMD 29 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MCAXDTIyMDQzMDIzMTQ0MloY 30 | DzIwNTMxMDMwMDkwNDQyWjAcMRowGAYDVQQDDBFyZWRoYWMubG9jYWwgUm9vdDB2 31 | MBAGByqGSM49AgEGBSuBBAAiA2IABFtWaay8hkuVQMZJRxA9JtwGEPtPS2UlWVrD 32 | tqukeU3R0Vnp1Lxy7KNgDT3Vrdo8A7WLxDxGyRzpMWA/egKFjJfCxdib+GoUOKK2 33 | +jS0tHEI/bFQboqA9WVzujgFc3qwEKNjMGEwHwYDVR0jBBgwFoAUvFhaU1lju9Hp 34 | Q95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBS8WFpTWWO70elD 35 | 3mqnxj4ynqgtSjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMD4B 36 | DAjOLCAqIclg9n9O9iUQfEMfRIQmU/Wi+6jlzQFOf0TENL7D7nzImQiAR1oUKgIx 37 | APQJkiZxOYBrnWsI02yQGO6UtG5QRlnCUkxkBy42j8O2VfVXAY6Wt2R2DhFN8hGu 38 | RA== 39 | -----END CERTIFICATE----- 40 | -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/cert-chain.pem.b64: -------------------------------------------------------------------------------- 1 | LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUI4ekNDQVhxZ0F3SUJBZ0lCQVRBS0JnZ3Foa2pPUFFRREF6QWtNU0l3SUFZRFZRUUREQmx5WldSb1lXTXUKYkc5allXd2dTVzUwWlhKdFpXUnBZWFJsTUI0WERUSXpNVEV3TnpBNE5UUTBObG9YRFRJME1URXhOakE1TURRMApObG93RnpFVk1CTUdBMVVFQXd3TWNtVmthR0ZqTG14dlkyRnNNSFl3RUFZSEtvWkl6ajBDQVFZRks0RUVBQ0lECllnQUVVN0g1NnlnYXI5Q2xwWUJ2NXJvTFg2TnF6NmplaTJVcThWdUgyWjA1b1RCcEkrWTlCd1JBbXBiSzVXRWMKYktPVjNGZG0yOS9MbXB6WktrcXJFU0REVnRQMiswMG1OVTRIS3NnL05uUDNINzRqcS9zQ2V5L284S2dzSXVaQgo0VS9PbzRHTU1JR0pNQjhHQTFVZEl3UVlNQmFBRk4vTDdYMGE3UUlhOFF6UkZxajNWWFBuVTY4NU1CY0dBMVVkCkVRUVFNQTZDREhKbFpHaGhZeTVzYjJOaGJEQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0RBUVlJS3dZQkJRVUgKQXdJd0hRWURWUjBPQkJZRUZLTW11MHl2T2Y5SmdubjV0K2hzRlZ6T3AzWWNNQThHQTFVZEV3RUIvd1FGTUFNQgpBUUF3Q2dZSUtvWkl6ajBFQXdNRFp3QXdaQUl3VHdGY3Fad2IxRU5jMTQ4c2hFMGpVQ2JyM3NnRTMvczg4b3JTClhIUk15UksxWlNKbW85SzhCK2N1MERKbE11T0FBakJ6QjRQOEFkR1ZkUmJZZFZxRmxqallTNVU2ZmcrTC9tRUgKNFJxZUlVcGY5amxqbEdJODczSjVoYUYwSUl2RmxjVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJQjVqQ0NBV3lnQXdJQkFnSVZBTi9MN1gwYTdRSWE4UXpSRnFqM1ZYUG5VNjg1TUFvR0NDcUdTTTQ5QkFNRApNQnd4R2pBWUJnTlZCQU1NRVhKbFpHaGhZeTVzYjJOaGJDQlNiMjkwTUI0WERUSXpNREV5TVRBM05EQTBOVm9YCkRUTXpNVEV3TkRBNU1EUTBOVm93SkRFaU1DQUdBMVVFQXd3WmNtVmthR0ZqTG14dlkyRnNJRWx1ZEdWeWJXVmsKYVdGMFpUQjJNQkFHQnlxR1NNNDlBZ0VHQlN1QkJBQWlBMklBQkE1SHNWWjZNUDJIcFVVTjgyWlYvWXBxN0tsWQpIcms0YWhwN3ZNdThtWVFmMmNMSk5kNlIzL0lYUG4zeDZqTnlMSWRsSVlTcnNnaXM3R0VEbGFMOWsrSTRzRUc4CmdEQ1d3QXRndVV1RWVjU2cyWk1JSmNpcG1UME9MdjhzNTVGKzBLTm1NR1F3SHdZRFZSMGpCQmd3Rm9BVXZGaGEKVTFsanU5SHBROTVxcDhZK01wNm9MVW93RGdZRFZSMFBBUUgvQkFRREFnR0dNQjBHQTFVZERnUVdCQlRmeSsxOQpHdTBDR3ZFTTBSYW85MVZ6NTFPdk9UQVNCZ05WSFJNQkFmOEVDREFHQVFIL0FnRUFNQW9HQ0NxR1NNNDlCQU1ECkEyZ0FNR1VDTUZFNkdqa2pNb2hqWDlpRGlnTGhnM01aS3V3Y0loeHFhTG84UzhhZnIxRUhzN3VONFVjL0NMMnQKUzd2Z2hia2lsZ0l4QU5RVWFmWnYxT1hOb2lRSEVBelpQQ1RXdHdpeGRPRTVpa281NFJBNHdjNFRNZVA2R2JMcgpJSm0wVDZkOUltRmhGUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUIzVENDQVdPZ0F3SUJBZ0lWQUx4WVdsTlpZN3ZSNlVQZWFxZkdQaktlcUMxS01Bb0dDQ3FHU000OUJBTUQKTUJ3eEdqQVlCZ05WQkFNTUVYSmxaR2hoWXk1c2IyTmhiQ0JTYjI5ME1DQVhEVEl5TURRek1ESXpNVFEwTWxvWQpEekl3TlRNeE1ETXdNRGt3TkRReVdqQWNNUm93R0FZRFZRUUREQkZ5WldSb1lXTXViRzlqWVd3Z1VtOXZkREIyCk1CQUdCeXFHU000OUFnRUdCU3VCQkFBaUEySUFCRnRXYWF5OGhrdVZRTVpKUnhBOUp0d0dFUHRQUzJVbFdWckQKdHF1a2VVM1IwVm5wMUx4eTdLTmdEVDNWcmRvOEE3V0x4RHhHeVJ6cE1XQS9lZ0tGakpmQ3hkaWIrR29VT0tLMgoralMwdEhFSS9iRlFib3FBOVdWenVqZ0ZjM3F3RUtOak1HRXdId1lEVlIwakJCZ3dGb0FVdkZoYVUxbGp1OUhwClE5NXFwOFkrTXA2b0xVb3dEZ1lEVlIwUEFRSC9CQVFEQWdHR01CMEdBMVVkRGdRV0JCUzhXRnBUV1dPNzBlbEQKM21xbnhqNHlucWd0U2pBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUFvR0NDcUdTTTQ5QkFNREEyZ0FNR1VDTUQ0QgpEQWpPTENBcUljbGc5bjlPOWlVUWZFTWZSSVFtVS9XaSs2amx6UUZPZjBURU5MN0Q3bnpJbVFpQVIxb1VLZ0l4CkFQUUpraVp4T1lCcm5Xc0kwMnlRR082VXRHNVFSbG5DVWt4a0J5NDJqOE8yVmZWWEFZNld0MlIyRGhGTjhoR3UKUkE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/cert-chain.pem.b64-twice: -------------------------------------------------------------------------------- 1 | TFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSTRla05EUVZoeFowRjNTVUpCWjBsQ1FWUkJTMEpuWjNGb2EycFBVRkZSUkVGNlFXdE5VMGwzU1VGWlJGWlJVVVJFUW14NVdsZFNiMWxYVFhVS1lrYzVhbGxYZDJkVFZ6VXdXbGhLZEZwWFVuQlpXRkpzVFVJMFdFUlVTWHBOVkVWM1RucEJORTVVVVRCT2JHOVlSRlJKTUUxVVJYaE9ha0UxVFVSUk1BcE9iRzkzUm5wRlZrMUNUVWRCTVZWRlFYZDNUV050Vm10aFIwWnFURzE0ZGxreVJuTk5TRmwzUlVGWlNFdHZXa2w2YWpCRFFWRlpSa3MwUlVWQlEwbEVDbGxuUVVWVk4wZzFObmxuWVhJNVEyeHdXVUoyTlhKdlRGZzJUbkY2Tm1wbGFUSlZjVGhXZFVneVdqQTFiMVJDY0VrcldUbENkMUpCYlhCaVN6VlhSV01LWWt0UFZqTkdaRzB5T1M5TWJYQjZXa3RyY1hKRlUwUkVWblJRTWlzd01HMU9WVFJJUzNObkwwNXVVRE5JTnpScWNTOXpRMlY1TDI4NFMyZHpTWFZhUWdvMFZTOVBielJIVFUxSlIwcE5RamhIUVRGVlpFbDNVVmxOUW1GQlJrNHZURGRZTUdFM1VVbGhPRkY2VWtaeGFqTldXRkJ1VlRZNE5VMUNZMGRCTVZWa0NrVlJVVkZOUVRaRFJFaEtiRnBIYUdoWmVUVnpZakpPYUdKRVFXUkNaMDVXU0ZOVlJVWnFRVlZDWjJkeVFtZEZSa0pSWTBSQlVWbEpTM2RaUWtKUlZVZ0tRWGRKZDBoUldVUldVakJQUWtKWlJVWkxUVzExTUhsMlQyWTVTbWR1YmpWMEsyaHpSbFo2VDNBeldXTk5RVGhIUVRGVlpFVjNSVUl2ZDFGR1RVRk5RZ3BCVVVGM1EyZFpTVXR2V2tsNmFqQkZRWGROUkZwM1FYZGFRVWwzVkhkR1kzRmFkMkl4UlU1ak1UUTRjMmhGTUdwVlEySnlNM05uUlRNdmN6ZzRiM0pUQ2xoSVVrMTVVa3N4V2xOS2JXODVTemhDSzJOMU1FUktiRTExVDBGQmFrSjZRalJRT0VGa1IxWmtVbUpaWkZaeFJteHFhbGxUTlZVMlptY3JUQzl0UlVnS05GSnhaVWxWY0dZNWFteHFiRWRKT0RjelNqVm9ZVVl3U1VsMlJteGpWVDBLTFMwdExTMUZUa1FnUTBWU1ZFbEdTVU5CVkVVdExTMHRMUW90TFMwdExVSkZSMGxPSUVORlVsUkpSa2xEUVZSRkxTMHRMUzBLVFVsSlFqVnFRME5CVjNsblFYZEpRa0ZuU1ZaQlRpOU1OMWd3WVRkUlNXRTRVWHBTUm5GcU0xWllVRzVWTmpnMVRVRnZSME5EY1VkVFRUUTVRa0ZOUkFwTlFuZDRSMnBCV1VKblRsWkNRVTFOUlZoS2JGcEhhR2haZVRWellqSk9hR0pEUWxOaU1qa3dUVUkwV0VSVVNYcE5SRVY1VFZSQk0wNUVRVEJPVm05WUNrUlVUWHBOVkVWM1RrUkJOVTFFVVRCT1ZtOTNTa1JGYVUxRFFVZEJNVlZGUVhkM1dtTnRWbXRoUjBacVRHMTRkbGt5Um5OSlJXeDFaRWRXZVdKWFZtc0tZVmRHTUZwVVFqSk5Ra0ZIUW5seFIxTk5ORGxCWjBWSFFsTjFRa0pCUVdsQk1rbEJRa0UxU0hOV1dqWk5VREpJY0ZWVlRqZ3lXbFl2V1hCeE4wdHNXUXBJY21zMFlXaHdOM1pOZFRodFdWRm1NbU5NU2s1a05sSXpMMGxZVUc0emVEWnFUbmxNU1dSc1NWbFRjbk5uYVhNM1IwVkViR0ZNT1dzclNUUnpSVWM0Q21kRVExZDNRWFJuZFZWMVJXVmpVMmN5V2sxSlNtTnBjRzFVTUU5TWRqaHpOVFZHS3pCTFRtMU5SMUYzU0hkWlJGWlNNR3BDUW1kM1JtOUJWWFpHYUdFS1ZURnNhblU1U0hCUk9UVnhjRGhaSzAxd05tOU1WVzkzUkdkWlJGWlNNRkJCVVVndlFrRlJSRUZuUjBkTlFqQkhRVEZWWkVSblVWZENRbFJtZVNzeE9RcEhkVEJEUjNaRlRUQlNZVzg1TVZaNk5URlBkazlVUVZOQ1owNVdTRkpOUWtGbU9FVkRSRUZIUVZGSUwwRm5SVUZOUVc5SFEwTnhSMU5OTkRsQ1FVMUVDa0V5WjBGTlIxVkRUVVpGTmtkcWEycE5iMmhxV0RscFJHbG5UR2huTTAxYVMzVjNZMGxvZUhGaFRHODRVemhoWm5JeFJVaHpOM1ZPTkZWakwwTk1NblFLVXpkMloyaGlhMmxzWjBsNFFVNVJWV0ZtV25ZeFQxaE9iMmxSU0VWQmVscFFRMVJYZEhkcGVHUlBSVFZwYTI4MU5GSkJOSGRqTkZSTlpWQTJSMkpNY2dwSlNtMHdWRFprT1VsdFJtaEdVVDA5Q2kwdExTMHRSVTVFSUVORlVsUkpSa2xEUVZSRkxTMHRMUzBLTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVSXpWRU5EUVZkUFowRjNTVUpCWjBsV1FVeDRXVmRzVGxwWk4zWlNObFZRWldGeFprZFFha3RsY1VNeFMwMUJiMGREUTNGSFUwMDBPVUpCVFVRS1RVSjNlRWRxUVZsQ1owNVdRa0ZOVFVWWVNteGFSMmhvV1hrMWMySXlUbWhpUTBKVFlqSTVNRTFEUVZoRVZFbDVUVVJSZWsxRVNYcE5WRkV3VFd4dldRcEVla2wzVGxSTmVFMUVUWGROUkd0M1RrUlJlVmRxUVdOTlVtOTNSMEZaUkZaUlVVUkVRa1o1V2xkU2IxbFhUWFZpUnpscVdWZDNaMVZ0T1haa1JFSXlDazFDUVVkQ2VYRkhVMDAwT1VGblJVZENVM1ZDUWtGQmFVRXlTVUZDUm5SWFlXRjVPR2hyZFZaUlRWcEtVbmhCT1VwMGQwZEZVSFJRVXpKVmJGZFdja1FLZEhGMWEyVlZNMUl3Vm01d01VeDRlVGRMVG1kRVZETldjbVJ2T0VFM1YweDRSSGhIZVZKNmNFMVhRUzlsWjB0R2FrcG1RM2hrYVdJclIyOVZUMHRMTWdvcmFsTXdkRWhGU1M5aVJsRmliM0ZCT1ZkV2VuVnFaMFpqTTNGM1JVdE9hazFIUlhkSWQxbEVWbEl3YWtKQ1ozZEdiMEZWZGtab1lWVXhiR3AxT1Vod0NsRTVOWEZ3T0ZrclRYQTJiMHhWYjNkRVoxbEVWbEl3VUVGUlNDOUNRVkZFUVdkSFIwMUNNRWRCTVZWa1JHZFJWMEpDVXpoWFJuQlVWMWRQTnpCbGJFUUtNMjF4Ym5ocU5IbHVjV2QwVTJwQlVFSm5UbFpJVWsxQ1FXWTRSVUpVUVVSQlVVZ3ZUVUZ2UjBORGNVZFRUVFE1UWtGTlJFRXlaMEZOUjFWRFRVUTBRZ3BFUVdwUFRFTkJjVWxqYkdjNWJqbFBPV2xWVVdaRlRXWlNTVkZ0VlM5WGFTczJhbXg2VVVaUFpqQlVSVTVNTjBRM2JucEpiVkZwUVZJeGIxVkxaMGw0Q2tGUVVVcHJhVnA0VDFsQ2NtNVhjMGt3TW5sUlIwODJWWFJITlZGU2JHNURWV3Q0YTBKNU5ESnFPRTh5Vm1aV1dFRlpObGQwTWxJeVJHaEdUamhvUjNVS1VrRTlQUW90TFMwdExVVk9SQ0JEUlZKVVNVWkpRMEZVUlMwdExTMHRDZz09 -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/cert.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sebadob/redhac/5cb82c267da76cd5ba3e819e76e375f3b225bef6/tls/ca/x509/end_entity/1/cert.der -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/cert.fingerprint: -------------------------------------------------------------------------------- 1 | sha256:b52dd79e0f5da166b8e68bad1c3b488ac004d77d2cb48b6f586d0e1cc5a237a8 -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIB8zCCAXqgAwIBAgIBATAKBggqhkjOPQQDAzAkMSIwIAYDVQQDDBlyZWRoYWMu 3 | bG9jYWwgSW50ZXJtZWRpYXRlMB4XDTIzMTEwNzA4NTQ0NloXDTI0MTExNjA5MDQ0 4 | NlowFzEVMBMGA1UEAwwMcmVkaGFjLmxvY2FsMHYwEAYHKoZIzj0CAQYFK4EEACID 5 | YgAEU7H56ygar9ClpYBv5roLX6Nqz6jei2Uq8VuH2Z05oTBpI+Y9BwRAmpbK5WEc 6 | bKOV3Fdm29/LmpzZKkqrESDDVtP2+00mNU4HKsg/NnP3H74jq/sCey/o8KgsIuZB 7 | 4U/Oo4GMMIGJMB8GA1UdIwQYMBaAFN/L7X0a7QIa8QzRFqj3VXPnU685MBcGA1Ud 8 | EQQQMA6CDHJlZGhhYy5sb2NhbDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH 9 | AwIwHQYDVR0OBBYEFKMmu0yvOf9Jgnn5t+hsFVzOp3YcMA8GA1UdEwEB/wQFMAMB 10 | AQAwCgYIKoZIzj0EAwMDZwAwZAIwTwFcqZwb1ENc148shE0jUCbr3sgE3/s88orS 11 | XHRMyRK1ZSJmo9K8B+cu0DJlMuOAAjBzB4P8AdGVdRbYdVqFljjYS5U6fg+L/mEH 12 | 4RqeIUpf9jljlGI873J5haF0IIvFlcU= 13 | -----END CERTIFICATE----- 14 | -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/key.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sebadob/redhac/5cb82c267da76cd5ba3e819e76e375f3b225bef6/tls/ca/x509/end_entity/1/key.der -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/key.der.hex: -------------------------------------------------------------------------------- 1 | 3081b6020100301006072a8648ce3d020106052b8104002204819e30819b0201010430357c23fa87ad2e483ee1584dade81bbe1a1340a13f8948f74395298d54f777aee058591003b4948b1c2eba4aee982d5da1640362000453b1f9eb281aafd0a5a5806fe6ba0b5fa36acfa8de8b652af15b87d99d39a1306923e63d0704409a96cae5611c6ca395dc5766dbdfcb9a9cd92a4aab1120c356d3f6fb4d26354e072ac83f3673f71fbe23abfb027b2fe8f0a82c22e641e14fce -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDA1fCP6h60uSD7hWE2t 3 | 6Bu+GhNAoT+JSPdDlSmNVPd3ruBYWRADtJSLHC66Su6YLV2hZANiAARTsfnrKBqv 4 | 0KWlgG/mugtfo2rPqN6LZSrxW4fZnTmhMGkj5j0HBECalsrlYRxso5XcV2bb38ua 5 | nNkqSqsRIMNW0/b7TSY1TgcqyD82c/cfviOr+wJ7L+jwqCwi5kHhT84= 6 | -----END PRIVATE KEY----- 7 | -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/key.pem.b64: -------------------------------------------------------------------------------- 1 | LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JRzJBZ0VBTUJBR0J5cUdTTTQ5QWdFR0JTdUJCQUFpQklHZU1JR2JBZ0VCQkRBMWZDUDZoNjB1U0Q3aFdFMnQKNkJ1K0doTkFvVCtKU1BkRGxTbU5WUGQzcnVCWVdSQUR0SlNMSEM2NlN1NllMVjJoWkFOaUFBUlRzZm5yS0JxdgowS1dsZ0cvbXVndGZvMnJQcU42TFpTcnhXNGZablRtaE1Ha2o1ajBIQkVDYWxzcmxZUnhzbzVYY1YyYmIzOHVhCm5Oa3FTcXNSSU1OVzAvYjdUU1kxVGdjcXlEODJjL2NmdmlPcit3SjdMK2p3cUN3aTVrSGhUODQ9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/1/key.pem.b64-twice: -------------------------------------------------------------------------------- 1 | TFMwdExTMUNSVWRKVGlCUVVrbFdRVlJGSUV0RldTMHRMUzB0Q2sxSlJ6SkJaMFZCVFVKQlIwSjVjVWRUVFRRNVFXZEZSMEpUZFVKQ1FVRnBRa2xIWlUxSlIySkJaMFZDUWtSQk1XWkRVRFpvTmpCMVUwUTNhRmRGTW5RS05rSjFLMGRvVGtGdlZDdEtVMUJrUkd4VGJVNVdVR1F6Y25WQ1dWZFNRVVIwU2xOTVNFTTJObE4xTmxsTVZqSm9Xa0ZPYVVGQlVsUnpabTV5UzBKeGRnb3dTMWRzWjBjdmJYVm5kR1p2TW5KUWNVNDJURnBUY25oWE5HWmFibFJ0YUUxSGEybzFhakJJUWtWRFlXeHpjbXhaVW5oemJ6VllZMVl5WW1Jek9IVmhDbTVPYTNGVGNYTlNTVTFPVnpBdllqZFVVMWt4VkdkamNYbEVPREpqTDJObWRtbFBjaXQzU2pkTUsycDNjVU4zYVRWclNHaFVPRFE5Q2kwdExTMHRSVTVFSUZCU1NWWkJWRVVnUzBWWkxTMHRMUzBL -------------------------------------------------------------------------------- /tls/ca/x509/end_entity/serial: -------------------------------------------------------------------------------- 1 | 1 -------------------------------------------------------------------------------- /tls/ca/x509/intermediate/ca-chain.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIB5jCCAWygAwIBAgIVAN/L7X0a7QIa8QzRFqj3VXPnU685MAoGCCqGSM49BAMD 3 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MB4XDTIzMDEyMTA3NDA0NVoX 4 | DTMzMTEwNDA5MDQ0NVowJDEiMCAGA1UEAwwZcmVkaGFjLmxvY2FsIEludGVybWVk 5 | aWF0ZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABA5HsVZ6MP2HpUUN82ZV/Ypq7KlY 6 | Hrk4ahp7vMu8mYQf2cLJNd6R3/IXPn3x6jNyLIdlIYSrsgis7GEDlaL9k+I4sEG8 7 | gDCWwAtguUuEecSg2ZMIJcipmT0OLv8s55F+0KNmMGQwHwYDVR0jBBgwFoAUvFha 8 | U1lju9HpQ95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTfy+19 9 | Gu0CGvEM0Rao91Vz51OvOTASBgNVHRMBAf8ECDAGAQH/AgEAMAoGCCqGSM49BAMD 10 | A2gAMGUCMFE6GjkjMohjX9iDigLhg3MZKuwcIhxqaLo8S8afr1EHs7uN4Uc/CL2t 11 | S7vghbkilgIxANQUafZv1OXNoiQHEAzZPCTWtwixdOE5iko54RA4wc4TMeP6GbLr 12 | IJm0T6d9ImFhFQ== 13 | -----END CERTIFICATE----- 14 | -----BEGIN CERTIFICATE----- 15 | MIIB3TCCAWOgAwIBAgIVALxYWlNZY7vR6UPeaqfGPjKeqC1KMAoGCCqGSM49BAMD 16 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MCAXDTIyMDQzMDIzMTQ0MloY 17 | DzIwNTMxMDMwMDkwNDQyWjAcMRowGAYDVQQDDBFyZWRoYWMubG9jYWwgUm9vdDB2 18 | MBAGByqGSM49AgEGBSuBBAAiA2IABFtWaay8hkuVQMZJRxA9JtwGEPtPS2UlWVrD 19 | tqukeU3R0Vnp1Lxy7KNgDT3Vrdo8A7WLxDxGyRzpMWA/egKFjJfCxdib+GoUOKK2 20 | +jS0tHEI/bFQboqA9WVzujgFc3qwEKNjMGEwHwYDVR0jBBgwFoAUvFhaU1lju9Hp 21 | Q95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBS8WFpTWWO70elD 22 | 3mqnxj4ynqgtSjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMD4B 23 | DAjOLCAqIclg9n9O9iUQfEMfRIQmU/Wi+6jlzQFOf0TENL7D7nzImQiAR1oUKgIx 24 | APQJkiZxOYBrnWsI02yQGO6UtG5QRlnCUkxkBy42j8O2VfVXAY6Wt2R2DhFN8hGu 25 | RA== 26 | -----END CERTIFICATE----- 27 | -------------------------------------------------------------------------------- /tls/ca/x509/intermediate/intermediate.cert.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sebadob/redhac/5cb82c267da76cd5ba3e819e76e375f3b225bef6/tls/ca/x509/intermediate/intermediate.cert.der -------------------------------------------------------------------------------- /tls/ca/x509/intermediate/intermediate.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIB5jCCAWygAwIBAgIVAN/L7X0a7QIa8QzRFqj3VXPnU685MAoGCCqGSM49BAMD 3 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MB4XDTIzMDEyMTA3NDA0NVoX 4 | DTMzMTEwNDA5MDQ0NVowJDEiMCAGA1UEAwwZcmVkaGFjLmxvY2FsIEludGVybWVk 5 | aWF0ZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABA5HsVZ6MP2HpUUN82ZV/Ypq7KlY 6 | Hrk4ahp7vMu8mYQf2cLJNd6R3/IXPn3x6jNyLIdlIYSrsgis7GEDlaL9k+I4sEG8 7 | gDCWwAtguUuEecSg2ZMIJcipmT0OLv8s55F+0KNmMGQwHwYDVR0jBBgwFoAUvFha 8 | U1lju9HpQ95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTfy+19 9 | Gu0CGvEM0Rao91Vz51OvOTASBgNVHRMBAf8ECDAGAQH/AgEAMAoGCCqGSM49BAMD 10 | A2gAMGUCMFE6GjkjMohjX9iDigLhg3MZKuwcIhxqaLo8S8afr1EHs7uN4Uc/CL2t 11 | S7vghbkilgIxANQUafZv1OXNoiQHEAzZPCTWtwixdOE5iko54RA4wc4TMeP6GbLr 12 | IJm0T6d9ImFhFQ== 13 | -----END CERTIFICATE----- 14 | -------------------------------------------------------------------------------- /tls/ca/x509/intermediate/intermediate.fingerprint: -------------------------------------------------------------------------------- 1 | sha256:6a23c7911d15b2778ac773267f8d15bb28b1b8ca2edd26b918af4575a041d604 -------------------------------------------------------------------------------- /tls/ca/x509/intermediate/intermediate.key.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sebadob/redhac/5cb82c267da76cd5ba3e819e76e375f3b225bef6/tls/ca/x509/intermediate/intermediate.key.der -------------------------------------------------------------------------------- /tls/ca/x509/intermediate/intermediate.key.der.hex: -------------------------------------------------------------------------------- 1 | f4d298bbb54a6cec99ef2f6b04f168260c4d3c3069eb58c56a96f5dc980a049f1764ac4170f72491d16c0c1e800764292ca877dcfa7df680e44107de9ee193ae5d9b30dc23cfac3beb972475e293798da77676d68604fc28a68a82bc498d87883014b64d7dbaf4325e4f235cd193a7ceaf3ef76a69febb73d4dcff2d0112c34634c02e3e6cd1969f6e9ca0512a4aa4eca7cf16e9702b3b2b4637d6ddddf7c360035810fe8bfabc264cc2c37759af22c9bef3953c80cab8e40ef774df1a57ebeff1c72cf60d2e0e88473c7bd1bfc058c09fa257c28c -------------------------------------------------------------------------------- /tls/ca/x509/intermediate/intermediate.key.pem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sebadob/redhac/5cb82c267da76cd5ba3e819e76e375f3b225bef6/tls/ca/x509/intermediate/intermediate.key.pem -------------------------------------------------------------------------------- /tls/ca/x509/intermediate/intermediate.key.pem.hex: -------------------------------------------------------------------------------- 1 | 883c099be1784c359aea16f425cdc6d27b07121570df17fbcb59f0a9dc05a55ebff70881f887f680180afb072e34b416826c2d0be99f8b347c49ddb7c3ac2122c0d70623e5cd28c13f18fb19d5cd974e02f09c884bbc21c43450c6a8d1df6f2b84d7cdc8206799c14396276892e264ba01692cf3bf1dbb3b186409c32bdbfe57bfffe0e8e2153b858c8512e02c81888cc7a444b7cb1b3480af92dd565a467fd6c584c15cf4819d0295d891325f1124403c1ef9dac5c0b6711197634eb3008d8982d8f8246e6f5d584329edba69b39af40f3b51c0900e3e8097522e700f0f1499e7951990ac5afa5b5434eaab913d7553ac4c18730eb8d4d401680e35251e407b4f456066b5f595e4b8135b730554593dcaa50965f364a1df759cbc4a5ec75ad8c7bcd5226181f688ada2c2e9b2434497739cf9c4c120da6dfaaf53b58cc3f1f1e505b6959353110f9d35b754f3b1 -------------------------------------------------------------------------------- /tls/ca/x509/root/root.cert.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sebadob/redhac/5cb82c267da76cd5ba3e819e76e375f3b225bef6/tls/ca/x509/root/root.cert.der -------------------------------------------------------------------------------- /tls/ca/x509/root/root.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIB3TCCAWOgAwIBAgIVALxYWlNZY7vR6UPeaqfGPjKeqC1KMAoGCCqGSM49BAMD 3 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MCAXDTIyMDQzMDIzMTQ0MloY 4 | DzIwNTMxMDMwMDkwNDQyWjAcMRowGAYDVQQDDBFyZWRoYWMubG9jYWwgUm9vdDB2 5 | MBAGByqGSM49AgEGBSuBBAAiA2IABFtWaay8hkuVQMZJRxA9JtwGEPtPS2UlWVrD 6 | tqukeU3R0Vnp1Lxy7KNgDT3Vrdo8A7WLxDxGyRzpMWA/egKFjJfCxdib+GoUOKK2 7 | +jS0tHEI/bFQboqA9WVzujgFc3qwEKNjMGEwHwYDVR0jBBgwFoAUvFhaU1lju9Hp 8 | Q95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBS8WFpTWWO70elD 9 | 3mqnxj4ynqgtSjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMD4B 10 | DAjOLCAqIclg9n9O9iUQfEMfRIQmU/Wi+6jlzQFOf0TENL7D7nzImQiAR1oUKgIx 11 | APQJkiZxOYBrnWsI02yQGO6UtG5QRlnCUkxkBy42j8O2VfVXAY6Wt2R2DhFN8hGu 12 | RA== 13 | -----END CERTIFICATE----- 14 | -------------------------------------------------------------------------------- /tls/ca/x509/root/root.fingerprint: -------------------------------------------------------------------------------- 1 | sha256:720924340a7665da5ad498620fa32d4a6f08be503ac031e8913d536ffb6e75b6 -------------------------------------------------------------------------------- /tls/ca/x509/root/root.key.der: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sebadob/redhac/5cb82c267da76cd5ba3e819e76e375f3b225bef6/tls/ca/x509/root/root.key.der -------------------------------------------------------------------------------- /tls/ca/x509/root/root.key.der.hex: -------------------------------------------------------------------------------- 1 | dc66e1268dc383d6ea493a0b697007467bf726102bc26596cfccdc498ed1c138aa7385c756b378e09ed42eefefe45660ee3524cd4116fb3b25fd2241869fa64412d61d98080225883342f1534810d2aca4f699d388b58357601234bc22868a5632ba43e15dab9ca1ccabdb0e7f1c2e62065a55949dcebbb911313a9d3657a1ecf60bc19cb48b541b89cb151b717a7af8cd7602e193ed9d5e1a3ca3d6f9875f1603672d25bbe03cdfe8cd5672a88098f28cf019eecfec7aa4c824cb3d56a69e5decda9bab6b30463a7e765a5c6e1e609b83978a9dc5 -------------------------------------------------------------------------------- /tls/ca/x509/root/root.key.pem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sebadob/redhac/5cb82c267da76cd5ba3e819e76e375f3b225bef6/tls/ca/x509/root/root.key.pem -------------------------------------------------------------------------------- /tls/ca/x509/root/root.key.pem.hex: -------------------------------------------------------------------------------- 1 | 6965e9bc1ebdf53e4c55f40a4c93341ded45de56e9432ccee8387a5a22033039bc0667671eee149518978479002dce44de4800c978712fdc34baa25c90c3b54ea866cde0478f8b6122fd82b19ec584d543eab8656f84a1eaa1f42d389a91369e0963c15dabe364dee4300f56a9c8a00912766aba64164b41cba45f4310d01e477276c4251e8d54608726f9ec3bc28cd60e3a4f79f3d44646473a40daabd05ed3115f56d745405bee08505630f201cfdbe17527e1aadd4ed6625b9a7a23a3302f4eadfb02c6ec8b5767730518011c45f271525ba6e914f8eb9cc91a3d47723d508d6f446854d2ae40364d81cf5ed73d357b8c3d24d4a95289abf3a0a2f3c17606ba6920fb7fcc7e1f56f1986db4e6dd0bde7a470cf4995fe904030005e4b1d30ca7ec5f3e1015e5f01910fc8cfd6aa112597f2c28c13ceb0238098d92d1b20a75cba83e601a73797ddd7312a900a2 -------------------------------------------------------------------------------- /tls/redhac.ca-chain.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIB5jCCAWygAwIBAgIVAN/L7X0a7QIa8QzRFqj3VXPnU685MAoGCCqGSM49BAMD 3 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MB4XDTIzMDEyMTA3NDA0NVoX 4 | DTMzMTEwNDA5MDQ0NVowJDEiMCAGA1UEAwwZcmVkaGFjLmxvY2FsIEludGVybWVk 5 | aWF0ZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABA5HsVZ6MP2HpUUN82ZV/Ypq7KlY 6 | Hrk4ahp7vMu8mYQf2cLJNd6R3/IXPn3x6jNyLIdlIYSrsgis7GEDlaL9k+I4sEG8 7 | gDCWwAtguUuEecSg2ZMIJcipmT0OLv8s55F+0KNmMGQwHwYDVR0jBBgwFoAUvFha 8 | U1lju9HpQ95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTfy+19 9 | Gu0CGvEM0Rao91Vz51OvOTASBgNVHRMBAf8ECDAGAQH/AgEAMAoGCCqGSM49BAMD 10 | A2gAMGUCMFE6GjkjMohjX9iDigLhg3MZKuwcIhxqaLo8S8afr1EHs7uN4Uc/CL2t 11 | S7vghbkilgIxANQUafZv1OXNoiQHEAzZPCTWtwixdOE5iko54RA4wc4TMeP6GbLr 12 | IJm0T6d9ImFhFQ== 13 | -----END CERTIFICATE----- 14 | -----BEGIN CERTIFICATE----- 15 | MIIB3TCCAWOgAwIBAgIVALxYWlNZY7vR6UPeaqfGPjKeqC1KMAoGCCqGSM49BAMD 16 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MCAXDTIyMDQzMDIzMTQ0MloY 17 | DzIwNTMxMDMwMDkwNDQyWjAcMRowGAYDVQQDDBFyZWRoYWMubG9jYWwgUm9vdDB2 18 | MBAGByqGSM49AgEGBSuBBAAiA2IABFtWaay8hkuVQMZJRxA9JtwGEPtPS2UlWVrD 19 | tqukeU3R0Vnp1Lxy7KNgDT3Vrdo8A7WLxDxGyRzpMWA/egKFjJfCxdib+GoUOKK2 20 | +jS0tHEI/bFQboqA9WVzujgFc3qwEKNjMGEwHwYDVR0jBBgwFoAUvFhaU1lju9Hp 21 | Q95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBS8WFpTWWO70elD 22 | 3mqnxj4ynqgtSjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMD4B 23 | DAjOLCAqIclg9n9O9iUQfEMfRIQmU/Wi+6jlzQFOf0TENL7D7nzImQiAR1oUKgIx 24 | APQJkiZxOYBrnWsI02yQGO6UtG5QRlnCUkxkBy42j8O2VfVXAY6Wt2R2DhFN8hGu 25 | RA== 26 | -----END CERTIFICATE----- 27 | -------------------------------------------------------------------------------- /tls/redhac.cert-chain.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIB8zCCAXqgAwIBAgIBATAKBggqhkjOPQQDAzAkMSIwIAYDVQQDDBlyZWRoYWMu 3 | bG9jYWwgSW50ZXJtZWRpYXRlMB4XDTIzMTEwNzA4NTQ0NloXDTI0MTExNjA5MDQ0 4 | NlowFzEVMBMGA1UEAwwMcmVkaGFjLmxvY2FsMHYwEAYHKoZIzj0CAQYFK4EEACID 5 | YgAEU7H56ygar9ClpYBv5roLX6Nqz6jei2Uq8VuH2Z05oTBpI+Y9BwRAmpbK5WEc 6 | bKOV3Fdm29/LmpzZKkqrESDDVtP2+00mNU4HKsg/NnP3H74jq/sCey/o8KgsIuZB 7 | 4U/Oo4GMMIGJMB8GA1UdIwQYMBaAFN/L7X0a7QIa8QzRFqj3VXPnU685MBcGA1Ud 8 | EQQQMA6CDHJlZGhhYy5sb2NhbDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH 9 | AwIwHQYDVR0OBBYEFKMmu0yvOf9Jgnn5t+hsFVzOp3YcMA8GA1UdEwEB/wQFMAMB 10 | AQAwCgYIKoZIzj0EAwMDZwAwZAIwTwFcqZwb1ENc148shE0jUCbr3sgE3/s88orS 11 | XHRMyRK1ZSJmo9K8B+cu0DJlMuOAAjBzB4P8AdGVdRbYdVqFljjYS5U6fg+L/mEH 12 | 4RqeIUpf9jljlGI873J5haF0IIvFlcU= 13 | -----END CERTIFICATE----- 14 | -----BEGIN CERTIFICATE----- 15 | MIIB5jCCAWygAwIBAgIVAN/L7X0a7QIa8QzRFqj3VXPnU685MAoGCCqGSM49BAMD 16 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MB4XDTIzMDEyMTA3NDA0NVoX 17 | DTMzMTEwNDA5MDQ0NVowJDEiMCAGA1UEAwwZcmVkaGFjLmxvY2FsIEludGVybWVk 18 | aWF0ZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABA5HsVZ6MP2HpUUN82ZV/Ypq7KlY 19 | Hrk4ahp7vMu8mYQf2cLJNd6R3/IXPn3x6jNyLIdlIYSrsgis7GEDlaL9k+I4sEG8 20 | gDCWwAtguUuEecSg2ZMIJcipmT0OLv8s55F+0KNmMGQwHwYDVR0jBBgwFoAUvFha 21 | U1lju9HpQ95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTfy+19 22 | Gu0CGvEM0Rao91Vz51OvOTASBgNVHRMBAf8ECDAGAQH/AgEAMAoGCCqGSM49BAMD 23 | A2gAMGUCMFE6GjkjMohjX9iDigLhg3MZKuwcIhxqaLo8S8afr1EHs7uN4Uc/CL2t 24 | S7vghbkilgIxANQUafZv1OXNoiQHEAzZPCTWtwixdOE5iko54RA4wc4TMeP6GbLr 25 | IJm0T6d9ImFhFQ== 26 | -----END CERTIFICATE----- 27 | -----BEGIN CERTIFICATE----- 28 | MIIB3TCCAWOgAwIBAgIVALxYWlNZY7vR6UPeaqfGPjKeqC1KMAoGCCqGSM49BAMD 29 | MBwxGjAYBgNVBAMMEXJlZGhhYy5sb2NhbCBSb290MCAXDTIyMDQzMDIzMTQ0MloY 30 | DzIwNTMxMDMwMDkwNDQyWjAcMRowGAYDVQQDDBFyZWRoYWMubG9jYWwgUm9vdDB2 31 | MBAGByqGSM49AgEGBSuBBAAiA2IABFtWaay8hkuVQMZJRxA9JtwGEPtPS2UlWVrD 32 | tqukeU3R0Vnp1Lxy7KNgDT3Vrdo8A7WLxDxGyRzpMWA/egKFjJfCxdib+GoUOKK2 33 | +jS0tHEI/bFQboqA9WVzujgFc3qwEKNjMGEwHwYDVR0jBBgwFoAUvFhaU1lju9Hp 34 | Q95qp8Y+Mp6oLUowDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBS8WFpTWWO70elD 35 | 3mqnxj4ynqgtSjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMD4B 36 | DAjOLCAqIclg9n9O9iUQfEMfRIQmU/Wi+6jlzQFOf0TENL7D7nzImQiAR1oUKgIx 37 | APQJkiZxOYBrnWsI02yQGO6UtG5QRlnCUkxkBy42j8O2VfVXAY6Wt2R2DhFN8hGu 38 | RA== 39 | -----END CERTIFICATE----- 40 | -------------------------------------------------------------------------------- /tls/redhac.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDA1fCP6h60uSD7hWE2t 3 | 6Bu+GhNAoT+JSPdDlSmNVPd3ruBYWRADtJSLHC66Su6YLV2hZANiAARTsfnrKBqv 4 | 0KWlgG/mugtfo2rPqN6LZSrxW4fZnTmhMGkj5j0HBECalsrlYRxso5XcV2bb38ua 5 | nNkqSqsRIMNW0/b7TSY1TgcqyD82c/cfviOr+wJ7L+jwqCwi5kHhT84= 6 | -----END PRIVATE KEY----- 7 | --------------------------------------------------------------------------------