├── .github ├── dependabot.yaml └── workflows │ ├── ci.yaml │ └── release-please.yaml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── examples ├── README.md ├── shared-lease.rs └── simple-lease.rs ├── justfile ├── src ├── lease.rs └── lib.rs └── tests ├── README.md ├── create-lease.rs ├── leader-election.rs ├── release-lease.rs └── utils.rs /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | open-pull-requests-limit: 10 8 | allow: 9 | - dependency-name: kube 10 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: CI 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | pull_request: {} 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | 13 | jobs: 14 | test: 15 | name: Test 16 | runs-on: ubuntu-22.04 17 | steps: 18 | - uses: actions/checkout@v2 19 | - uses: nolar/setup-k3d-k3s@v1 20 | with: 21 | k3d-name: kle 22 | - uses: actions-rs/toolchain@v1 23 | with: 24 | profile: minimal 25 | toolchain: stable 26 | override: true 27 | - uses: Swatinem/rust-cache@v1 28 | - uses: actions-rs/cargo@v1 29 | with: 30 | command: test 31 | 32 | clippy_check: 33 | name: Lint 34 | runs-on: ubuntu-latest 35 | steps: 36 | - uses: actions/checkout@v2 37 | - uses: actions-rs/toolchain@v1 38 | with: 39 | toolchain: nightly 40 | components: clippy 41 | override: true 42 | - uses: Swatinem/rust-cache@v1 43 | - uses: actions-rs/clippy-check@v1 44 | with: 45 | token: ${{ secrets.GITHUB_TOKEN }} 46 | args: --all-features 47 | -------------------------------------------------------------------------------- /.github/workflows/release-please.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: CD 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | 9 | jobs: 10 | release-please: 11 | name: Release Please # https://github.com/googleapis/release-please 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Release Please 15 | uses: google-github-actions/release-please-action@v3 16 | id: release 17 | with: 18 | release-type: rust 19 | package-name: release-please-action 20 | bump-minor-pre-major: true 21 | bump-patch-for-minor-pre-major: true 22 | extra-files: | 23 | README.md 24 | 25 | - name: Checkout 26 | if: ${{ steps.release.outputs.release_created }} 27 | uses: actions/checkout@v2 28 | 29 | - name: Install Rust 30 | if: ${{ steps.release.outputs.release_created }} 31 | uses: actions-rs/toolchain@v1 32 | with: 33 | profile: minimal 34 | toolchain: stable 35 | override: true 36 | 37 | - name: Cache 38 | if: ${{ steps.release.outputs.release_created }} 39 | uses: Swatinem/rust-cache@v1 40 | 41 | - name: Publish to crates.io 42 | if: ${{ steps.release.outputs.release_created }} 43 | run: | 44 | cargo publish --token "${{ secrets.CARGO_REGISTRY_TOKEN }}" 45 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | kubeconfig 4 | .envrc 5 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## [0.41.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.40.0...v0.41.0) (2025-05-23) 4 | 5 | 6 | ### ⚠ BREAKING CHANGES 7 | 8 | * white space change to trigger release pls 9 | 10 | ### Features 11 | 12 | * white space change to trigger release pls ([90304bd](https://github.com/hendrikmaus/kube-leader-election/commit/90304bdf5eb8f369b5c39656ed359b85d94ac019)) 13 | 14 | ## [0.40.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.39.0...v0.40.0) (2025-03-12) 15 | 16 | 17 | ### ⚠ BREAKING CHANGES 18 | 19 | * update kube requirement from 0.98 to 0.99 ([#103](https://github.com/hendrikmaus/kube-leader-election/issues/103)) 20 | 21 | ### Features 22 | 23 | * update kube requirement from 0.98 to 0.99 ([#103](https://github.com/hendrikmaus/kube-leader-election/issues/103)) ([1b591ae](https://github.com/hendrikmaus/kube-leader-election/commit/1b591ae355083ee0264e7935748d7b8858f996d1)) 24 | 25 | ## [0.39.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.38.0...v0.39.0) (2025-01-13) 26 | 27 | 28 | ### ⚠ BREAKING CHANGES 29 | 30 | * update kube requirement from 0.97 to 0.98 ([#101](https://github.com/hendrikmaus/kube-leader-election/issues/101)) 31 | 32 | ### Features 33 | 34 | * update kube requirement from 0.97 to 0.98 ([#101](https://github.com/hendrikmaus/kube-leader-election/issues/101)) ([31de688](https://github.com/hendrikmaus/kube-leader-election/commit/31de6886cd2fa4b00ac38557871720e4f7bd78cc)) 35 | 36 | ## [0.38.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.37.0...v0.38.0) (2024-11-20) 37 | 38 | 39 | ### ⚠ BREAKING CHANGES 40 | 41 | * update kube requirement from 0.96 to 0.97 ([#99](https://github.com/hendrikmaus/kube-leader-election/issues/99)) 42 | 43 | ### Features 44 | 45 | * update kube requirement from 0.96 to 0.97 ([#99](https://github.com/hendrikmaus/kube-leader-election/issues/99)) ([4895308](https://github.com/hendrikmaus/kube-leader-election/commit/48953086619a7280f28a55b40c34ba0a68b6a68b)) 46 | 47 | ## [0.37.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.36.0...v0.37.0) (2024-10-11) 48 | 49 | 50 | ### ⚠ BREAKING CHANGES 51 | 52 | * update kube requirement from 0.95 to 0.96 ([#97](https://github.com/hendrikmaus/kube-leader-election/issues/97)) 53 | 54 | ### Features 55 | 56 | * update kube requirement from 0.95 to 0.96 ([#97](https://github.com/hendrikmaus/kube-leader-election/issues/97)) ([38df68b](https://github.com/hendrikmaus/kube-leader-election/commit/38df68ba08fff7583ecf2d0f133a2a147c18e4cb)) 57 | 58 | ## [0.36.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.35.0...v0.36.0) (2024-09-16) 59 | 60 | 61 | ### ⚠ BREAKING CHANGES 62 | 63 | * update kube requirement from 0.94 to 0.95 ([#95](https://github.com/hendrikmaus/kube-leader-election/issues/95)) 64 | 65 | ### Features 66 | 67 | * update kube requirement from 0.94 to 0.95 ([#95](https://github.com/hendrikmaus/kube-leader-election/issues/95)) ([1585baf](https://github.com/hendrikmaus/kube-leader-election/commit/1585baf9a85c5b51ac21b6920cfc5f88be2688d1)) 68 | 69 | ## [0.35.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.34.0...v0.35.0) (2024-09-14) 70 | 71 | 72 | ### ⚠ BREAKING CHANGES 73 | 74 | * update kube requirement from 0.93 to 0.94 ([#93](https://github.com/hendrikmaus/kube-leader-election/issues/93)) 75 | 76 | ### Features 77 | 78 | * update kube requirement from 0.93 to 0.94 ([#93](https://github.com/hendrikmaus/kube-leader-election/issues/93)) ([7895b4c](https://github.com/hendrikmaus/kube-leader-election/commit/7895b4c319cb24b163b24a7264cb8c71b0f80b33)) 79 | 80 | ## [0.34.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.33.0...v0.34.0) (2024-07-23) 81 | 82 | 83 | ### ⚠ BREAKING CHANGES 84 | 85 | * update kube requirement from 0.92 to 0.93 ([#91](https://github.com/hendrikmaus/kube-leader-election/issues/91)) 86 | 87 | ### Features 88 | 89 | * update kube requirement from 0.92 to 0.93 ([#91](https://github.com/hendrikmaus/kube-leader-election/issues/91)) ([cc9f19d](https://github.com/hendrikmaus/kube-leader-election/commit/cc9f19d9c8a62818a6a3e729a3511dcd37fd4a47)) 90 | 91 | ## [0.33.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.32.0...v0.33.0) (2024-06-12) 92 | 93 | 94 | ### ⚠ BREAKING CHANGES 95 | 96 | * update kube requirement from 0.91 to 0.92 ([#89](https://github.com/hendrikmaus/kube-leader-election/issues/89)) 97 | 98 | ### Features 99 | 100 | * update kube requirement from 0.91 to 0.92 ([#89](https://github.com/hendrikmaus/kube-leader-election/issues/89)) ([98a3eca](https://github.com/hendrikmaus/kube-leader-election/commit/98a3ecad7c0389805cc9f866767549606104016a)) 101 | 102 | ## [0.32.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.31.0...v0.32.0) (2024-05-06) 103 | 104 | 105 | ### ⚠ BREAKING CHANGES 106 | 107 | * update kube requirement from 0.90 to 0.91 ([#87](https://github.com/hendrikmaus/kube-leader-election/issues/87)) 108 | 109 | ### fest 110 | 111 | * update kube requirement from 0.90 to 0.91 ([#87](https://github.com/hendrikmaus/kube-leader-election/issues/87)) ([d888438](https://github.com/hendrikmaus/kube-leader-election/commit/d8884383c3a3af7a24ed222704c2eaa395ff828c)) 112 | 113 | ## [0.31.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.30.0...v0.31.0) (2024-04-04) 114 | 115 | 116 | ### ⚠ BREAKING CHANGES 117 | 118 | * update kube requirement from 0.89 to 0.90 ([#85](https://github.com/hendrikmaus/kube-leader-election/issues/85)) 119 | 120 | ### Features 121 | 122 | * update kube requirement from 0.89 to 0.90 ([#85](https://github.com/hendrikmaus/kube-leader-election/issues/85)) ([bf6e7a4](https://github.com/hendrikmaus/kube-leader-election/commit/bf6e7a46875c561cd4ffd80da29982eddc12b16d)) 123 | 124 | ## [0.30.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.29.0...v0.30.0) (2024-03-26) 125 | 126 | 127 | ### ⚠ BREAKING CHANGES 128 | 129 | * update kube requirement from 0.88 to 0.89 ([#83](https://github.com/hendrikmaus/kube-leader-election/issues/83)) 130 | 131 | ### Features 132 | 133 | * update kube requirement from 0.88 to 0.89 ([#83](https://github.com/hendrikmaus/kube-leader-election/issues/83)) ([f8aae8e](https://github.com/hendrikmaus/kube-leader-election/commit/f8aae8e7cea7a4448cfe503831ca00a0ac94e096)) 134 | 135 | ## [0.29.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.28.0...v0.29.0) (2024-01-22) 136 | 137 | 138 | ### ⚠ BREAKING CHANGES 139 | 140 | * update kube requirement from 0.87 to 0.88 ([#80](https://github.com/hendrikmaus/kube-leader-election/issues/80)) 141 | 142 | ### Features 143 | 144 | * update kube requirement from 0.87 to 0.88 ([#80](https://github.com/hendrikmaus/kube-leader-election/issues/80)) ([8107f21](https://github.com/hendrikmaus/kube-leader-election/commit/8107f21d0ef5623bd6647e62226f5ace68eaa5c2)) 145 | 146 | ## [0.28.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.27.0...v0.28.0) (2023-11-02) 147 | 148 | 149 | ### ⚠ BREAKING CHANGES 150 | 151 | * build(deps): update kube requirement from 0.86 to 0.87 ([#78](https://github.com/hendrikmaus/kube-leader-election/issues/78)) 152 | 153 | ### Features 154 | 155 | * build(deps): update kube requirement from 0.86 to 0.87 ([#78](https://github.com/hendrikmaus/kube-leader-election/issues/78)) ([63d9746](https://github.com/hendrikmaus/kube-leader-election/commit/63d9746236ce95dfeeee9b2b5abfb75ac783a701)) 156 | 157 | ## [0.27.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.26.0...v0.27.0) (2023-10-01) 158 | 159 | 160 | ### ⚠ BREAKING CHANGES 161 | 162 | * require k8s-openapi >=0.20 ([#76](https://github.com/hendrikmaus/kube-leader-election/issues/76)) 163 | 164 | ### Features 165 | 166 | * require k8s-openapi >=0.20 ([#76](https://github.com/hendrikmaus/kube-leader-election/issues/76)) ([27d2ac1](https://github.com/hendrikmaus/kube-leader-election/commit/27d2ac139998aca7b568058bfcb9e17263b7eed7)) 167 | 168 | ## [0.26.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.25.0...v0.26.0) (2023-10-01) 169 | 170 | 171 | ### ⚠ BREAKING CHANGES 172 | 173 | * update kube and make k8s-openapi more flexible ([#74](https://github.com/hendrikmaus/kube-leader-election/issues/74)) 174 | 175 | ### Features 176 | 177 | * update kube and make k8s-openapi more flexible ([#74](https://github.com/hendrikmaus/kube-leader-election/issues/74)) ([e4f1045](https://github.com/hendrikmaus/kube-leader-election/commit/e4f104593298377e0e663100288852deaa69f761)) 178 | 179 | ## [0.25.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.24.0...v0.25.0) (2023-09-01) 180 | 181 | 182 | ### ⚠ BREAKING CHANGES 183 | 184 | * update kube depencies ([#70](https://github.com/hendrikmaus/kube-leader-election/issues/70)) 185 | 186 | ### Features 187 | 188 | * update kube depencies ([#70](https://github.com/hendrikmaus/kube-leader-election/issues/70)) ([4823455](https://github.com/hendrikmaus/kube-leader-election/commit/4823455da6a8b6a5355b671c0871eca910b16999)) 189 | 190 | ## [0.24.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.23.0...v0.24.0) (2023-07-15) 191 | 192 | 193 | ### ⚠ BREAKING CHANGES 194 | 195 | * Update kube requirement from 0.83 to 0.84 ([#67](https://github.com/hendrikmaus/kube-leader-election/issues/67)) 196 | 197 | ### Features 198 | 199 | * Update kube requirement from 0.83 to 0.84 ([#67](https://github.com/hendrikmaus/kube-leader-election/issues/67)) ([e90d808](https://github.com/hendrikmaus/kube-leader-election/commit/e90d808a5015fa7e7e65ddc1d4b608c990b9ba06)) 200 | 201 | ## [0.23.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.22.0...v0.23.0) (2023-06-06) 202 | 203 | 204 | ### ⚠ BREAKING CHANGES 205 | 206 | * Update kube requirement from 0.82 to 0.83 ([#65](https://github.com/hendrikmaus/kube-leader-election/issues/65)) 207 | 208 | ### Features 209 | 210 | * Update kube requirement from 0.82 to 0.83 ([#65](https://github.com/hendrikmaus/kube-leader-election/issues/65)) ([b5a87e6](https://github.com/hendrikmaus/kube-leader-election/commit/b5a87e6a4127e0223feed8e312adae6ff062e5e9)) 211 | 212 | ## [0.22.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.21.0...v0.22.0) (2023-04-27) 213 | 214 | 215 | ### Features 216 | 217 | * Update kube to 0.82 and k8s-openapi to 0.18 ([#63](https://github.com/hendrikmaus/kube-leader-election/issues/63)) ([79508cf](https://github.com/hendrikmaus/kube-leader-election/commit/79508cf80773a0cb84e3a6c876c44090adbfae3e)) 218 | 219 | 220 | ### Miscellaneous Chores 221 | 222 | * **release:** set release version ([e017444](https://github.com/hendrikmaus/kube-leader-election/commit/e017444d6b7ee8e117fd785a6cb55734ca35928a)) 223 | 224 | ## [0.21.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.20.0...v0.21.0) (2023-03-02) 225 | 226 | 227 | ### ⚠ BREAKING CHANGES 228 | 229 | * Update kube requirement from 0.79 to 0.80 ([#59](https://github.com/hendrikmaus/kube-leader-election/issues/59)) 230 | 231 | ### Features 232 | 233 | * Update kube requirement from 0.79 to 0.80 ([#59](https://github.com/hendrikmaus/kube-leader-election/issues/59)) ([a905c12](https://github.com/hendrikmaus/kube-leader-election/commit/a905c120a3d4c354b9fc18fcb99da4d36ff060f1)) 234 | 235 | ## [0.20.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.19.0...v0.20.0) (2023-02-24) 236 | 237 | 238 | ### ⚠ BREAKING CHANGES 239 | 240 | * Update kube requirement from 0.78 to 0.79 ([#54](https://github.com/hendrikmaus/kube-leader-election/issues/54)) 241 | 242 | ### Features 243 | 244 | * Update kube requirement from 0.78 to 0.79 ([#54](https://github.com/hendrikmaus/kube-leader-election/issues/54)) ([ff03d19](https://github.com/hendrikmaus/kube-leader-election/commit/ff03d19929670d57a76067ca9305aa1167125117)) 245 | 246 | ## [0.19.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.18.0...v0.19.0) (2023-01-10) 247 | 248 | 249 | ### ⚠ BREAKING CHANGES 250 | 251 | * Update kube to 0.78 and k8s-openapi to 0.17 ([#52](https://github.com/hendrikmaus/kube-leader-election/issues/52)) 252 | 253 | ### Features 254 | 255 | * Update kube to 0.78 and k8s-openapi to 0.17 ([#52](https://github.com/hendrikmaus/kube-leader-election/issues/52)) ([d246856](https://github.com/hendrikmaus/kube-leader-election/commit/d24685682116bfac5dd3d892b0f32a444037fff0)) 256 | 257 | 258 | ### Bug Fixes 259 | 260 | * update kube dev dependency as well ([d95aa1f](https://github.com/hendrikmaus/kube-leader-election/commit/d95aa1f265f416a1954b354b4c15adbe2f88947c)) 261 | 262 | ## [0.18.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.17.0...v0.18.0) (2022-12-15) 263 | 264 | 265 | ### ⚠ BREAKING CHANGES 266 | 267 | * Update kube requirement from 0.76 to 0.77 ([#49](https://github.com/hendrikmaus/kube-leader-election/issues/49)) 268 | 269 | ### fest 270 | 271 | * Update kube requirement from 0.76 to 0.77 ([#49](https://github.com/hendrikmaus/kube-leader-election/issues/49)) ([7b63bff](https://github.com/hendrikmaus/kube-leader-election/commit/7b63bff0e2773ef9713ffb723984229276ca55c9)) 272 | 273 | ## [0.17.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.16.0...v0.17.0) (2022-10-31) 274 | 275 | 276 | ### Miscellaneous Chores 277 | 278 | * Update kube requirement from 0.75 to 0.76 ([#47](https://github.com/hendrikmaus/kube-leader-election/issues/47)) ([c31a68a](https://github.com/hendrikmaus/kube-leader-election/commit/c31a68ad8807bb64ad89f35b361a164bdc63b85a)) 279 | 280 | ## [0.16.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.15.0...v0.16.0) (2022-10-07) 281 | 282 | 283 | ### ⚠ BREAKING CHANGES 284 | 285 | * do not enable chrono default features since that results in a dependency on a vulerable crate (#45) 286 | 287 | ### Features 288 | 289 | * do not enable chrono default features since that results in a dependency on a vulerable crate ([#45](https://github.com/hendrikmaus/kube-leader-election/issues/45)) ([4bc1940](https://github.com/hendrikmaus/kube-leader-election/commit/4bc19409dfcb864911a942facc5fd7fb71dc0008)) 290 | 291 | ## [0.15.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.14.0...v0.15.0) (2022-09-22) 292 | 293 | 294 | ### ⚠ BREAKING CHANGES 295 | 296 | * update kube requirement from 0.74 to 0.75 (#43) 297 | 298 | ### Features 299 | 300 | * update kube requirement from 0.74 to 0.75 ([#43](https://github.com/hendrikmaus/kube-leader-election/issues/43)) ([c9a1ff9](https://github.com/hendrikmaus/kube-leader-election/commit/c9a1ff97708ad63ff418104d91dc138163f756c6)) 301 | 302 | ## [0.14.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.13.0...v0.14.0) (2022-07-28) 303 | 304 | 305 | ### Miscellaneous Chores 306 | 307 | * release 0.14.0 ([61d5cd9](https://github.com/hendrikmaus/kube-leader-election/commit/61d5cd9884c75836d276a09250b9649017dea06a)) 308 | 309 | ## [0.13.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.12.0...v0.13.0) (2022-05-26) 310 | 311 | 312 | ### ⚠ BREAKING CHANGES 313 | 314 | * update kube requirement from 0.72 to 0.73 (#38) 315 | 316 | ### Features 317 | 318 | * update kube requirement from 0.72 to 0.73 ([#38](https://github.com/hendrikmaus/kube-leader-election/issues/38)) ([c6ea466](https://github.com/hendrikmaus/kube-leader-election/commit/c6ea466f5e97b1c81f4920f8a093987cd1b41f14)) 319 | 320 | ## [0.12.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.11.0...v0.12.0) (2022-05-18) 321 | 322 | 323 | ### ⚠ BREAKING CHANGES 324 | 325 | * Update kube requirement from 0.71 to 0.72 (#35) 326 | 327 | ### Features 328 | 329 | * Update kube requirement from 0.71 to 0.72 ([#35](https://github.com/hendrikmaus/kube-leader-election/issues/35)) ([f190dc4](https://github.com/hendrikmaus/kube-leader-election/commit/f190dc4ae42f0ad43f19b8a47a4de37e7471d5bd)) 330 | 331 | 332 | ### Bug Fixes 333 | 334 | * Reduce severity of 'successfully renewed lease' logging to DEBUG ([#36](https://github.com/hendrikmaus/kube-leader-election/issues/36)) ([43f970d](https://github.com/hendrikmaus/kube-leader-election/commit/43f970d577f9e7e55abd5c3107e97740bd3d1309)) 335 | 336 | ## [0.11.0](https://github.com/hendrikmaus/kube-leader-election/compare/v0.10.2...v0.11.0) (2022-04-14) 337 | 338 | ### Miscellaneous Chores 339 | 340 | * **master:** release 0.11.0 ([b8f49e0](https://github.com/hendrikmaus/kube-leader-election/commit/b8f49e02357613bf7170fcc441d63df37f350eb4)) 341 | * Update kube requirement from 0.70 to 0.71 https://github.com/hendrikmaus/kube-leader-election/pull/32 342 | * Update kube requirement from 0.69 to 0.70 https://github.com/hendrikmaus/kube-leader-election/pull/31 343 | 344 | ### [0.10.2](https://github.com/hendrikmaus/kube-leader-election/compare/v0.10.1...v0.10.2) (2022-02-15) 345 | 346 | 347 | ### Bug Fixes 348 | 349 | * use caching in the release workflow ([#28](https://github.com/hendrikmaus/kube-leader-election/issues/28)) ([065e97f](https://github.com/hendrikmaus/kube-leader-election/commit/065e97f835ba7d8fa51ed5e5a014f8fd3c483c91)) 350 | 351 | ### 0.10.1 (2022-02-15) 352 | 353 | 354 | ### Bug Fixes 355 | 356 | * unify workflow names and job names ([#26](https://github.com/hendrikmaus/kube-leader-election/issues/26)) ([f2a04b4](https://github.com/hendrikmaus/kube-leader-election/commit/f2a04b43dd349aa355a416ee2ebcc9c1c7dd6896)) 357 | 358 | 359 | ### Miscellaneous Chores 360 | 361 | * release 0.10.1 ([232a6b1](https://github.com/hendrikmaus/kube-leader-election/commit/232a6b1e0e8befcf66e4946083285d2d150ab22e)) 362 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kube-leader-election" 3 | version = "0.41.0" 4 | edition = "2018" 5 | authors = ["Hendrik Maus "] 6 | description = "Leader election implementations for Kubernetes workloads" 7 | repository = "https://github.com/hendrikmaus/kube-leader-election" 8 | license = "MIT" 9 | 10 | [dependencies] 11 | chrono = { version = "0.4", default-features = false } 12 | kube = { version = "1", default-features = false, features = ["client"] } 13 | k8s-openapi = ">=0.25" 14 | serde = "1" 15 | serde_json = "1" 16 | thiserror = "1" 17 | log = "0.4" 18 | 19 | [dev-dependencies] 20 | anyhow = "1" 21 | tokio = { version = "1", features = ["macros", "rt-multi-thread"] } 22 | kube = "1.0" 23 | k8s-openapi = { version = ">=0.25", features = ["v1_32"] } 24 | env_logger = "0.10" 25 | rand = "0.8" 26 | cmd_lib = "1" 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Hendrik Maus 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Leader Election in Rust 2 | 3 | ![CI workflow](https://github.com/hendrikmaus/kube-leader-election/actions/workflows/ci.yaml/badge.svg) 4 | ![crates.io version](https://img.shields.io/crates/v/kube-leader-election) 5 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 6 | 7 | This library provides simple leader election for Kubernetes workloads. 8 | 9 | 10 | ```toml 11 | [dependencies] 12 | kube-leader-election = "0.41.0" 13 | ``` 14 | 15 | 16 | ## Example 17 | 18 | Acquire leadership on a Kubernetes [`Lease`](https://kubernetes.io/docs/reference/kubernetes-api/cluster-resources/lease-v1/) called `some-operator-lock`, in the `default` namespace and promise to renew the lock every 15 seconds: 19 | 20 | ```rust 21 | let leadership = LeaseLock::new( 22 | kube::Client::try_default().await?, 23 | "default", 24 | LeaseLockParams { 25 | holder_id: "some-operator".into(), 26 | lease_name: "some-operator-lock".into(), 27 | lease_ttl: Duration::from_secs(15), 28 | }, 29 | ); 30 | 31 | // Run this in a background task every 5 seconds 32 | // Share the result with the rest of your application; for example using Arc 33 | // See https://github.com/hendrikmaus/kube-leader-election/blob/master/examples/shared-lease.rs 34 | let lease = leadership.try_acquire_or_renew().await?; 35 | 36 | log::info!("currently leading: {}", lease.acquired_lease); 37 | ``` 38 | 39 | *Please refer to the [`examples`](https://github.com/hendrikmaus/kube-leader-election/tree/master/examples) for runnable usage demonstrations.* 40 | 41 | ## Features 42 | 43 | - Kubernetes `Lease` locking, similar to [client-go's leaderelection](https://pkg.go.dev/k8s.io/client-go/tools/leaderelection) 44 | 45 | ## Kubernetes `Lease` Locking 46 | 47 | A very basic form of leader election without fencing, i.e., only use this if your application can tolerate multiple replicas acting as leader for a short amount of time. 48 | 49 | This implementation uses a Kubernetes `Lease` resource from the API group `coordination.k8s.io`, which is locked and continuously renewed by the leading replica. The leaseholder, as well as all candidates, use timestamps to determine if a lease can be acquired. Therefore, this implementation is volatile to datetime skew within a cluster. 50 | 51 | Only use this implementation if you are aware of its downsides, and your workload can tolerate them. 52 | 53 | ## Contributing 54 | 55 | Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change. 56 | 57 | Please make sure to update tests as appropriate. 58 | 59 | ## License 60 | 61 | [MIT](https://choosealicense.com/licenses/mit/) -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Leader Election Examples 2 | 3 | All examples require access to a Kubernetes cluster. 4 | 5 | You can create a local cluster from the root using `just start-test-cluster`; requires [`docker`](https://docker.com), [`just`](https://github.com/casey/just) and [`k3d`](https://k3d.io). 6 | 7 | ## Run an Example 8 | 9 | ```shell 10 | cargo run --example 11 | ``` 12 | 13 | ## Simple Lease - `simple-lease.rs` 14 | 15 | This example creates a Kubernetes `Lease` based locking mechanism, acquires leadership and steps down after a few seconds. 16 | 17 | ## Shared Lease - `shared-lease.rs` 18 | 19 | A more sophisticated usage example with the leader election running in a background process that updates an `AtomicBool` inside an `Arc`. 20 | 21 | Open up two shells and start a replica of the example in each of them. The first one will acquire the lock and be leading. The second one will be a follower. Now try to terminate the leading replica and wait at least 15 seconds to see the following acquiring the leadership. 22 | -------------------------------------------------------------------------------- /examples/shared-lease.rs: -------------------------------------------------------------------------------- 1 | use kube_leader_election::{LeaseLock, LeaseLockParams}; 2 | use rand::distributions::Alphanumeric; 3 | use rand::Rng; 4 | use std::sync::atomic::{AtomicBool, Ordering}; 5 | use std::sync::Arc; 6 | use std::time::Duration; 7 | 8 | #[tokio::main] 9 | async fn main() -> anyhow::Result<()> { 10 | std::env::set_var( 11 | "RUST_LOG", 12 | std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()), 13 | ); 14 | env_logger::init(); 15 | 16 | // Shared across the application to allow other parts to determine if they can act as leader 17 | let is_leader = Arc::new(AtomicBool::new(false)); 18 | 19 | // Run leader election as background process 20 | { 21 | let is_leader = is_leader.clone(); 22 | 23 | tokio::spawn(async move { 24 | let client = kube::Client::try_default().await.unwrap(); 25 | 26 | // random id part for the sake of simulating something like a pod hash 27 | let random: String = rand::thread_rng() 28 | .sample_iter(&Alphanumeric) 29 | .take(7) 30 | .map(char::from) 31 | .collect(); 32 | let holder_id = format!("shared-lease-{}", random.to_lowercase()); 33 | 34 | let leadership = LeaseLock::new( 35 | client, 36 | "default", 37 | LeaseLockParams { 38 | holder_id, 39 | lease_name: "shared-lease-example".into(), 40 | lease_ttl: Duration::from_secs(15), 41 | }, 42 | ); 43 | 44 | loop { 45 | match leadership.try_acquire_or_renew().await { 46 | Ok(ll) => is_leader.store(ll.acquired_lease, Ordering::Relaxed), 47 | Err(err) => log::error!("{:?}", err), 48 | } 49 | tokio::time::sleep(Duration::from_secs(5)).await; 50 | } 51 | }); 52 | } 53 | 54 | loop { 55 | log::info!("currently leading: {}", is_leader.load(Ordering::Relaxed)); 56 | tokio::time::sleep(Duration::from_secs(5)).await; 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /examples/simple-lease.rs: -------------------------------------------------------------------------------- 1 | use kube_leader_election::{LeaseLock, LeaseLockParams}; 2 | use std::time::Duration; 3 | 4 | #[tokio::main] 5 | async fn main() -> anyhow::Result<()> { 6 | std::env::set_var( 7 | "RUST_LOG", 8 | std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()), 9 | ); 10 | env_logger::init(); 11 | 12 | // Configure the LeaseLock mechanism 13 | // 14 | // One should try to renew/acquire the lease before `lease_ttl` runs out. 15 | // E.g. if `lease_ttl` is set to 15 seconds, one should renew it every 5 seconds. 16 | let leadership = LeaseLock::new( 17 | kube::Client::try_default().await?, 18 | "default", 19 | LeaseLockParams { 20 | holder_id: "simple-lease".into(), 21 | lease_name: "simple-lease-example".into(), 22 | lease_ttl: Duration::from_secs(15), 23 | }, 24 | ); 25 | 26 | // Run this in a background task and share the result with the rest of your application 27 | let _lease = leadership.try_acquire_or_renew().await?; 28 | // `lease.acquired_lease` can be used to determine if we're leading or not 29 | 30 | log::info!("waiting 5 seconds, then stepping down again"); 31 | tokio::time::sleep(Duration::from_secs(5)).await; 32 | 33 | // To give up leadership, call `step_down`. 34 | // 35 | // This will set the current ttl on the lease to 1s and remove the current holder identity, 36 | // so all other candidates start to race for the lock on the lease. 37 | leadership.step_down().await?; 38 | 39 | Ok(()) 40 | } 41 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | _default: 2 | just --list --unsorted 3 | 4 | # name for the local cluster 5 | cluster := "kube-leader-election" 6 | 7 | # start a local kubernetes cluster, if not running 8 | start-test-cluster: 9 | #!/usr/bin/env bash 10 | set -euo pipefail 11 | if ! k3d cluster list | grep -qF "{{cluster}}"; then 12 | k3d cluster create "{{cluster}}" --k3s-arg="--disable=traefik@server:0" 13 | else 14 | echo "Cluster already running" 15 | fi 16 | 17 | # clean the local cluster 18 | stop-test-cluster: 19 | #!/usr/bin/env bash 20 | set -euo pipefail 21 | if k3d cluster list | grep -qF "{{cluster}}"; then 22 | k3d cluster delete "{{cluster}}" 23 | else 24 | echo "Cluster already pruned" 25 | fi 26 | -------------------------------------------------------------------------------- /src/lease.rs: -------------------------------------------------------------------------------- 1 | use k8s_openapi::api::coordination::v1::Lease; 2 | use k8s_openapi::chrono::SecondsFormat; 3 | use kube::api::{PatchParams, PostParams}; 4 | use kube::error::ErrorResponse; 5 | use kube::ResourceExt; 6 | use serde_json::json; 7 | 8 | #[derive(thiserror::Error, Debug)] 9 | pub enum Error { 10 | #[error("could not get the Lease from Kubernetes: {0}")] 11 | GetLease(kube::Error), 12 | 13 | #[error("could not create the Lease in Kubernetes: {0}")] 14 | CreateLease(kube::Error), 15 | 16 | #[error("failed to acquire the Lease in Kubernetes: {0}")] 17 | AcquireLease(kube::Error), 18 | 19 | #[error("failed to renew the Lease in Kubernetes: {0}")] 20 | RenewLease(kube::Error), 21 | 22 | #[error("failed to traverse the Lease spec from Kubernetes at key `{key:}`")] 23 | TraverseLease { key: String }, 24 | 25 | #[error("got unexpected Kubernetes API error: {response:}")] 26 | ApiError { response: ErrorResponse }, 27 | 28 | #[error("aborted to release lock because we are not leading, the lock is held by {leader:}")] 29 | ReleaseLockWhenNotLeading { leader: String }, 30 | 31 | #[error("failed to release the lock in Kubernetes: {0}")] 32 | ReleaseLease(kube::Error), 33 | 34 | #[error("error deserializing response")] 35 | SerdeError(#[from] serde_json::Error), 36 | 37 | #[error(transparent)] 38 | Kube(#[from] kube::Error), 39 | } 40 | 41 | /// Represent a `LeaseLock` mechanism to try and acquire leadership status 42 | pub struct LeaseLock { 43 | /// Parameters to describe the Lease 44 | params: LeaseLockParams, 45 | 46 | /// Handle to interact with Kubernetes Leases 47 | lease_api: kube::Api, 48 | } 49 | 50 | /// Parameters to create a `LeaseLock` lock 51 | #[derive(Debug)] 52 | pub struct LeaseLockParams { 53 | /// Name of the Kubernetes Lease resource 54 | pub lease_name: String, 55 | 56 | /// Identity of the entity which wants to acquire the lock on the Lease 57 | pub holder_id: String, 58 | 59 | /// Lifetime of the lease 60 | pub lease_ttl: std::time::Duration, 61 | } 62 | 63 | /// Result of a `try_acquire_or_renew` call on a `LeaseLock` 64 | #[derive(Default, Debug)] 65 | pub struct LeaseLockResult { 66 | /// Bool to indicate whether leadership was acquired 67 | pub acquired_lease: bool, 68 | 69 | /// The latest `Lease` resource 70 | pub lease: Option, 71 | } 72 | 73 | impl LeaseLock { 74 | /// Create a new `LeaseLock` 75 | #[must_use] 76 | pub fn new(client: kube::Client, namespace: &str, params: LeaseLockParams) -> Self { 77 | LeaseLock { 78 | params, 79 | lease_api: kube::Api::namespaced(client, namespace), 80 | } 81 | } 82 | 83 | /// Try to acquire the lock on the Kubernetes 'Lease' resource. 84 | /// 85 | /// Returns `LeaseLockResult` with information on the current state. 86 | pub async fn try_acquire_or_renew(&self) -> Result { 87 | return match self.lease_api.get(&self.params.lease_name).await { 88 | Ok(l) => { 89 | if self.are_we_leading(&l)? { 90 | let lease = self.renew_lease().await?; 91 | log::debug!("successfully renewed lease {}", l.name_any()); 92 | 93 | Ok(LeaseLockResult { 94 | acquired_lease: true, 95 | lease: Some(lease), 96 | }) 97 | } else if self.has_lease_expired(&l)? { 98 | let lease = self.acquire_lease(&l).await?; 99 | log::info!("successfully acquired lease {}", lease.name_any()); 100 | 101 | Ok(LeaseLockResult { 102 | acquired_lease: true, 103 | lease: Some(lease), 104 | }) 105 | } else { 106 | log::info!( 107 | "lease is held by {} and has not yet expired", 108 | l.spec 109 | .as_ref() 110 | .ok_or(Error::TraverseLease { 111 | key: "spec".to_string() 112 | })? 113 | .holder_identity 114 | .as_ref() 115 | .ok_or(Error::TraverseLease { 116 | key: "spec.holderIdentity".to_string() 117 | })? 118 | ); 119 | 120 | Ok(LeaseLockResult { 121 | acquired_lease: false, 122 | lease: None, 123 | }) 124 | } 125 | } 126 | Err(kube::Error::Api(api_err)) => { 127 | if api_err.code != 404 { 128 | return Err(Error::ApiError { response: api_err }); 129 | } 130 | 131 | let lease = self.create_lease().await?; 132 | log::info!("successfully acquired lease {}", lease.name_any()); 133 | 134 | Ok(LeaseLockResult { 135 | acquired_lease: true, 136 | lease: Some(lease), 137 | }) 138 | } 139 | Err(e) => Err(e.into()), 140 | }; 141 | } 142 | 143 | /// Helper to determine if the current lease identity has leadership 144 | fn are_we_leading(&self, lease: &Lease) -> Result { 145 | let holder_id = lease 146 | .spec 147 | .as_ref() 148 | .ok_or(Error::TraverseLease { 149 | key: "spec".to_string(), 150 | })? 151 | .holder_identity 152 | .as_ref() 153 | .ok_or(Error::TraverseLease { 154 | key: "spec.holderIdentity".to_string(), 155 | })?; 156 | 157 | Ok(holder_id.eq(&self.params.holder_id)) 158 | } 159 | 160 | /// Helper to determine if the given lease has expired and can be acquired 161 | fn has_lease_expired(&self, lease: &Lease) -> Result { 162 | let now = chrono::Utc::now(); 163 | let spec = lease.spec.as_ref().ok_or(Error::TraverseLease { 164 | key: "spec".to_string(), 165 | })?; 166 | let last_renewed = spec 167 | .renew_time 168 | .as_ref() 169 | .ok_or(Error::TraverseLease { 170 | key: "spec.renewTime".to_string(), 171 | })? 172 | .0; 173 | let lease_duration = spec 174 | .lease_duration_seconds 175 | .as_ref() 176 | .ok_or(Error::TraverseLease { 177 | key: "spec.leaseDurationSeconds".to_string(), 178 | })?; 179 | let timeout = last_renewed + chrono::Duration::seconds(*lease_duration as i64); 180 | 181 | Ok(now.gt(&timeout)) 182 | } 183 | 184 | /// Create a `Lease` resource in Kubernetes 185 | async fn create_lease(&self) -> Result { 186 | let now: &str = &chrono::Utc::now().to_rfc3339_opts(SecondsFormat::Micros, false); 187 | 188 | let lease: Lease = serde_json::from_value(json!({ 189 | "apiVersion": "coordination.k8s.io/v1", 190 | "kind": "Lease", 191 | "metadata": { "name": &self.params.lease_name }, 192 | "spec": { 193 | "acquireTime": now, 194 | "renewTime": now, 195 | "holderIdentity": &self.params.holder_id, 196 | "leaseDurationSeconds": self.params.lease_ttl.as_secs(), 197 | "leaseTransitions": 0 198 | } 199 | }))?; 200 | 201 | self.lease_api 202 | .create(&PostParams::default(), &lease) 203 | .await 204 | .map_err(Error::CreateLease) 205 | } 206 | 207 | /// Acquire the `Lease` resource 208 | async fn acquire_lease(&self, lease: &Lease) -> Result { 209 | let now: &str = &chrono::Utc::now().to_rfc3339_opts(SecondsFormat::Micros, false); 210 | let transitions = &lease 211 | .spec 212 | .as_ref() 213 | .ok_or(Error::TraverseLease { 214 | key: "spec".to_string(), 215 | })? 216 | .lease_transitions 217 | .ok_or(Error::TraverseLease { 218 | key: "spec.leaseTransitions".to_string(), 219 | })?; 220 | 221 | let patch = json!({ 222 | "apiVersion": "coordination.k8s.io/v1", 223 | "kind": "Lease", 224 | "metadata": { "name": &self.params.lease_name }, 225 | "spec": { 226 | "acquireTime": now, 227 | "renewTime": now, 228 | "leaseTransitions": transitions + 1, 229 | "holderIdentity": &self.params.holder_id, 230 | "leaseDurationSeconds": self.params.lease_ttl.as_secs(), 231 | } 232 | }); 233 | let patch = kube::api::Patch::Merge(&patch); 234 | 235 | self.lease_api 236 | .patch( 237 | &self.params.lease_name, 238 | &PatchParams::apply(&self.params.holder_id), 239 | &patch, 240 | ) 241 | .await 242 | .map_err(Error::AcquireLease) 243 | } 244 | 245 | /// Renew the `Lease` resource 246 | async fn renew_lease(&self) -> Result { 247 | let patch = json!({ 248 | "apiVersion": "coordination.k8s.io/v1", 249 | "kind": "Lease", 250 | "metadata": { "name": &self.params.lease_name }, 251 | "spec": { 252 | "renewTime": chrono::Utc::now().to_rfc3339_opts(SecondsFormat::Micros, false), 253 | "leaseDurationSeconds": self.params.lease_ttl.as_secs(), 254 | } 255 | }); 256 | let patch = kube::api::Patch::Merge(&patch); 257 | 258 | self.lease_api 259 | .patch( 260 | &self.params.lease_name, 261 | &PatchParams::apply(&self.params.holder_id), 262 | &patch, 263 | ) 264 | .await 265 | .map_err(Error::RenewLease) 266 | } 267 | 268 | /// Release the lock if we hold it 269 | pub async fn step_down(&self) -> Result<(), Error> { 270 | let lease = self 271 | .lease_api 272 | .get(&self.params.lease_name) 273 | .await 274 | .map_err(Error::GetLease)?; 275 | 276 | if !self.are_we_leading(&lease)? { 277 | let leader = lease 278 | .spec 279 | .ok_or(Error::TraverseLease { 280 | key: "spec".to_string(), 281 | })? 282 | .holder_identity 283 | .ok_or(Error::TraverseLease { 284 | key: "spec.holderIdentity".to_string(), 285 | })?; 286 | return Err(Error::ReleaseLockWhenNotLeading { leader }); 287 | } 288 | 289 | let now: &str = &chrono::Utc::now().to_rfc3339_opts(SecondsFormat::Micros, false); 290 | let patch = json!({ 291 | "apiVersion": "coordination.k8s.io/v1", 292 | "kind": "Lease", 293 | "metadata": { "name": &self.params.lease_name }, 294 | "spec": { 295 | "acquireTime": now, 296 | "renewTime": now, 297 | "leaseDurationSeconds": 1, 298 | "holderIdentity": "" 299 | } 300 | }); 301 | let patch = kube::api::Patch::Merge(&patch); 302 | 303 | self.lease_api 304 | .patch( 305 | &self.params.lease_name, 306 | &PatchParams::apply(&self.params.holder_id), 307 | &patch, 308 | ) 309 | .await 310 | .map_err(Error::ReleaseLease)?; 311 | 312 | log::info!("successfully released lease {}", lease.name_any()); 313 | 314 | Ok(()) 315 | } 316 | } 317 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Crate for implementing leader election in Kubernetes operators 2 | //! 3 | //! # Example 4 | //! 5 | //! The following example uses a Kubernetes `Lease` to implement leader election, acquires leadership, 6 | //! waits a little while and steps down again. 7 | //! 8 | //! ```rust,no_run 9 | //!use kube_leader_election::{LeaseLock, LeaseLockParams}; 10 | //!use std::time::Duration; 11 | //! 12 | //!#[tokio::main] 13 | //!async fn main() -> anyhow::Result<()> { 14 | //! std::env::set_var( 15 | //! "RUST_LOG", 16 | //! std::env::var("RUST_LOG").unwrap_or_else(|_| "info".to_string()), 17 | //! ); 18 | //! env_logger::init(); 19 | //! 20 | //! // Configure the LeaseLock mechanism 21 | //! // 22 | //! // One should try to renew/acquire the lease before `lease_ttl` runs out. 23 | //! // E.g. if `lease_ttl` is set to 15 seconds, one should renew it every 5 seconds. 24 | //! let leadership = LeaseLock::new( 25 | //! kube::Client::try_default().await?, 26 | //! "default", 27 | //! LeaseLockParams { 28 | //! holder_id: "simple-lease".into(), 29 | //! lease_name: "simple-lease-example".into(), 30 | //! lease_ttl: Duration::from_secs(15), 31 | //! }, 32 | //! ); 33 | //! 34 | //! // Run this in a background task and share the result with the rest of your application 35 | //! let _lease = leadership.try_acquire_or_renew().await?; 36 | //! // `lease.acquired_lease` can be used to determine if we're leading or not 37 | //! 38 | //! log::info!("waiting 5 seconds, then stepping down again"); 39 | //! tokio::time::sleep(Duration::from_secs(5)).await; 40 | //! 41 | //! // To give up leadership, call `step_down`. 42 | //! // 43 | //! // This will set the current ttl on the lease to 1s and remove the current holder identity, 44 | //! // so all other candidates start to race for the lock on the lease. 45 | //! leadership.step_down().await?; 46 | //! 47 | //! Ok(()) 48 | //!} 49 | //!``` 50 | //! 51 | //! Please refer to the [examples](./examples) for more details. 52 | 53 | #![deny(unsafe_code)] 54 | 55 | mod lease; 56 | 57 | pub use lease::{Error, LeaseLock, LeaseLockParams, LeaseLockResult}; 58 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Leader Election Tests 2 | 3 | All tests require a running Kubernetes cluster and `kubectl` to be present, pointed to the test cluster. 4 | 5 | You can create a local cluster from the root using `just start-test-cluster`; requires [`docker`](https://docker.com), [`just`](https://github.com/casey/just) and [`k3d`](https://k3d.io). 6 | -------------------------------------------------------------------------------- /tests/create-lease.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use crate::utils::KubeTestUtil; 4 | use k8s_openapi::api::coordination::v1::Lease; 5 | use kube_leader_election::{LeaseLock, LeaseLockParams}; 6 | use std::time::Duration; 7 | 8 | #[tokio::test] 9 | async fn create_lease() -> anyhow::Result<()> { 10 | const NAMESPACE: &str = "create-lease"; 11 | const LEASE_NAME: &str = "create-lease-test"; 12 | const HOLDER_ID: &str = "create-lease-test-holder"; 13 | 14 | KubeTestUtil::create_namespace(NAMESPACE)?; 15 | KubeTestUtil::delete_lease(NAMESPACE, LEASE_NAME)?; 16 | 17 | let client = kube::Client::try_default().await?; 18 | let leases: kube::Api = kube::Api::namespaced(client.clone(), NAMESPACE); 19 | 20 | // start by asserting that we have no lease resource in the cluster 21 | let lease = leases.get(LEASE_NAME).await; 22 | assert!(lease.is_err()); 23 | 24 | // next create a lease 25 | let leadership = LeaseLock::new( 26 | client.clone(), 27 | NAMESPACE, 28 | LeaseLockParams { 29 | holder_id: HOLDER_ID.into(), 30 | lease_name: LEASE_NAME.into(), 31 | lease_ttl: Duration::from_secs(15), 32 | }, 33 | ); 34 | leadership.try_acquire_or_renew().await?; 35 | 36 | // assert that the lease was created 37 | let lease = leases.get(LEASE_NAME).await?; 38 | assert_eq!(LEASE_NAME, lease.metadata.name.unwrap()); 39 | assert_eq!( 40 | HOLDER_ID, 41 | lease 42 | .spec 43 | .as_ref() 44 | .unwrap() 45 | .holder_identity 46 | .as_ref() 47 | .unwrap() 48 | ); 49 | assert_eq!( 50 | 15, 51 | *lease 52 | .spec 53 | .as_ref() 54 | .unwrap() 55 | .lease_duration_seconds 56 | .as_ref() 57 | .unwrap() 58 | ); 59 | assert_eq!( 60 | 0, 61 | *lease 62 | .spec 63 | .as_ref() 64 | .unwrap() 65 | .lease_transitions 66 | .as_ref() 67 | .unwrap() 68 | ); 69 | 70 | KubeTestUtil::delete_namespace(NAMESPACE)?; 71 | 72 | Ok(()) 73 | } 74 | -------------------------------------------------------------------------------- /tests/leader-election.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use crate::utils::KubeTestUtil; 4 | use kube_leader_election::{LeaseLock, LeaseLockParams}; 5 | use std::time::Duration; 6 | 7 | #[tokio::test] 8 | async fn leader_election() -> anyhow::Result<()> { 9 | const NAMESPACE: &str = "leader-election"; 10 | const LEASE_NAME: &str = "leader-election-test"; 11 | const HOLDER_ID_01: &str = "leader-election-test-holder01"; 12 | const HOLDER_ID_02: &str = "leader-election-test-holder02"; 13 | 14 | KubeTestUtil::create_namespace(NAMESPACE)?; 15 | KubeTestUtil::delete_lease(NAMESPACE, LEASE_NAME)?; 16 | 17 | let client = kube::Client::try_default().await?; 18 | 19 | let leadership_01 = LeaseLock::new( 20 | client.clone(), 21 | NAMESPACE, 22 | LeaseLockParams { 23 | holder_id: HOLDER_ID_01.into(), 24 | lease_name: LEASE_NAME.into(), 25 | lease_ttl: Duration::from_secs(15), 26 | }, 27 | ); 28 | leadership_01.try_acquire_or_renew().await?; 29 | 30 | // HOLDER_ID_01 is now leading, so HOLDER_ID_02 should follow 31 | 32 | let leadership_02 = LeaseLock::new( 33 | client.clone(), 34 | NAMESPACE, 35 | LeaseLockParams { 36 | holder_id: HOLDER_ID_02.into(), 37 | lease_name: LEASE_NAME.into(), 38 | lease_ttl: Duration::from_secs(15), 39 | }, 40 | ); 41 | let res = leadership_02.try_acquire_or_renew().await?; 42 | assert_eq!(false, res.acquired_lease); 43 | 44 | // now HOLDER_ID_01 will release the lock 45 | 46 | leadership_01.step_down().await?; 47 | 48 | // since the new lease ttl is 1 second, we should be able to acquire it if we wait 2s 49 | tokio::time::sleep(Duration::from_secs(2)).await; 50 | 51 | let res = leadership_02.try_acquire_or_renew().await?; 52 | assert!(res.acquired_lease); 53 | 54 | KubeTestUtil::delete_namespace(NAMESPACE)?; 55 | 56 | Ok(()) 57 | } 58 | -------------------------------------------------------------------------------- /tests/release-lease.rs: -------------------------------------------------------------------------------- 1 | mod utils; 2 | 3 | use crate::utils::KubeTestUtil; 4 | use k8s_openapi::api::coordination::v1::Lease; 5 | use kube_leader_election::{LeaseLock, LeaseLockParams}; 6 | use std::time::Duration; 7 | 8 | #[tokio::test] 9 | async fn release_lease() -> anyhow::Result<()> { 10 | const NAMESPACE: &str = "release-lease"; 11 | const LEASE_NAME: &str = "release-lease-test"; 12 | const HOLDER_ID: &str = "release-lease-test-holder"; 13 | 14 | KubeTestUtil::create_namespace(NAMESPACE)?; 15 | KubeTestUtil::delete_lease(NAMESPACE, LEASE_NAME)?; 16 | 17 | let client = kube::Client::try_default().await?; 18 | let leases: kube::Api = kube::Api::namespaced(client.clone(), NAMESPACE); 19 | 20 | let leadership = LeaseLock::new( 21 | client.clone(), 22 | NAMESPACE, 23 | LeaseLockParams { 24 | holder_id: HOLDER_ID.into(), 25 | lease_name: LEASE_NAME.into(), 26 | lease_ttl: Duration::from_secs(15), 27 | }, 28 | ); 29 | leadership.try_acquire_or_renew().await?; 30 | 31 | // assert that the lease was created 32 | let lease = leases.get(LEASE_NAME).await?; 33 | assert_eq!(LEASE_NAME, lease.metadata.name.unwrap()); 34 | 35 | // release lease 36 | leadership.step_down().await?; 37 | 38 | let lease = leases.get(LEASE_NAME).await?; 39 | assert_eq!( 40 | "", 41 | lease 42 | .spec 43 | .as_ref() 44 | .unwrap() 45 | .holder_identity 46 | .as_ref() 47 | .unwrap() 48 | ); 49 | 50 | assert_eq!( 51 | 1, 52 | *lease 53 | .spec 54 | .as_ref() 55 | .unwrap() 56 | .lease_duration_seconds 57 | .as_ref() 58 | .unwrap() 59 | ); 60 | 61 | tokio::time::sleep(Duration::from_secs(2)).await; 62 | 63 | // if we re-acquire the lease, its duration is increased from 1 to 15 again 64 | let lease = leadership.try_acquire_or_renew().await?.lease.unwrap(); 65 | 66 | assert_eq!( 67 | 15, 68 | *lease 69 | .spec 70 | .as_ref() 71 | .unwrap() 72 | .lease_duration_seconds 73 | .as_ref() 74 | .unwrap() 75 | ); 76 | 77 | KubeTestUtil::delete_namespace(NAMESPACE)?; 78 | 79 | Ok(()) 80 | } 81 | -------------------------------------------------------------------------------- /tests/utils.rs: -------------------------------------------------------------------------------- 1 | use cmd_lib::run_cmd; 2 | 3 | pub struct KubeTestUtil; 4 | 5 | impl KubeTestUtil { 6 | /// Create a Kubernetes namespace, if it doesn't exist 7 | pub fn create_namespace(namespace: &str) -> anyhow::Result<()> { 8 | if let Err(_) = run_cmd!(kubectl get namespace ${namespace}) { 9 | run_cmd!(kubectl create namespace ${namespace} > /dev/null 2>&1)?; 10 | } 11 | 12 | Ok(()) 13 | } 14 | 15 | /// Delete a Kubernetes namespace, if it exists, and exit immediately, without waiting for completion 16 | pub fn delete_namespace(namespace: &str) -> anyhow::Result<()> { 17 | if let Ok(_) = run_cmd!(kubectl get namespace ${namespace}) { 18 | run_cmd!(kubectl delete namespace ${namespace} --wait=false > /dev/null 2>&1)?; 19 | } 20 | 21 | Ok(()) 22 | } 23 | 24 | /// Delete a Kubernetes Lease, if it exists 25 | pub fn delete_lease(namespace: &str, lease_name: &str) -> anyhow::Result<()> { 26 | if let Ok(_) = run_cmd!(kubectl --namespace ${namespace} get lease ${lease_name}) { 27 | run_cmd!(kubectl --namespace ${namespace} delete lease ${lease_name} > /dev/null 2>&1)?; 28 | } 29 | 30 | Ok(()) 31 | } 32 | } 33 | --------------------------------------------------------------------------------