├── .circleci └── config.yml ├── .codecov.yml ├── .dockerignore ├── .github ├── FUNDING.yml ├── dependabot.yml ├── kubernix.png └── kubernix.svg ├── .gitignore ├── .rustfmt.toml ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── contrib ├── crio-master-overlay.nix └── prepare-system ├── nix ├── build.nix ├── default.nix ├── derivations │ └── cargo-kcov.nix ├── nixpkgs.json ├── nixpkgs.nix └── overlay.nix ├── src ├── apiserver.rs ├── assets │ ├── Dockerfile │ ├── apiserver.yml │ ├── coredns.yml │ ├── crio.conf │ ├── encryptionconfig.yml │ ├── kubelet.yml │ ├── podman-bridge.json │ ├── policy.json │ ├── proxy.yml │ └── scheduler.yml ├── config.rs ├── container.rs ├── controllermanager.rs ├── coredns.rs ├── crio.rs ├── encryptionconfig.rs ├── etcd.rs ├── kubeconfig.rs ├── kubectl.rs ├── kubelet.rs ├── lib.rs ├── logger.rs ├── main.rs ├── network.rs ├── nix.rs ├── node.rs ├── pki.rs ├── podman.rs ├── process.rs ├── progress.rs ├── proxy.rs ├── scheduler.rs └── system.rs └── tests ├── common.rs ├── e2e.rs └── integration.rs /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2.1 3 | 4 | stdenv: &stdenv 5 | environment: 6 | CONTAINER_RUNTIME: docker 7 | IMAGE: docker.io/saschagrunert/kubernix:latest 8 | LOCAL_IMAGE: &local_image image.tar 9 | 10 | executors: 11 | container: 12 | docker: 13 | - image: &image nixos/nix:2.3 14 | <<: *stdenv 15 | 16 | container-musl: 17 | docker: 18 | - image: ekidd/rust-musl-builder:stable 19 | <<: *stdenv 20 | 21 | machine: 22 | machine: 23 | image: ubuntu-1604:201903-01 24 | <<: *stdenv 25 | 26 | workflows: 27 | version: 2 28 | pipeline: 29 | jobs: 30 | - build 31 | - build-release 32 | - build-static 33 | - build-image 34 | - deploy-image: 35 | requires: 36 | - build-image 37 | filters: 38 | branches: 39 | only: master 40 | - deploy: 41 | requires: 42 | - build-static 43 | filters: 44 | branches: 45 | ignore: /.*/ 46 | tags: 47 | only: /v.*/ 48 | - docs 49 | - docs-publish: 50 | requires: 51 | - docs 52 | filters: 53 | branches: 54 | only: master 55 | - lint-clippy 56 | - lint-rustfmt 57 | - machine-run: 58 | name: test-e2e-local-single-node 59 | cache: v10 60 | run: make test-e2e ARGS=local_single_node 61 | requires: 62 | - build-release 63 | - machine-run: 64 | name: test-integration-local-single-node 65 | cache: v15 66 | run: make test-integration ARGS=local_single_node 67 | requires: 68 | - build-release 69 | - machine-run: 70 | name: test-unit 71 | cache: v16 72 | run: | 73 | make coverage 74 | bash <(curl -s https://codecov.io/bash) 75 | 76 | prepare-env: &prepare-env 77 | run: 78 | name: Install dependencies 79 | command: | 80 | apk add bash ca-certificates curl file findutils git jq make openssh 81 | 82 | static-binary: &static-binary target/x86_64-unknown-linux-musl/release/kubernix 83 | 84 | jobs: 85 | build: 86 | executor: container 87 | steps: 88 | - <<: *prepare-env 89 | - checkout 90 | - restore_cache: 91 | keys: 92 | - v1-build-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 93 | - run: 94 | name: build 95 | command: make 96 | - save_cache: 97 | key: v1-build-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 98 | paths: 99 | - /nix 100 | - target 101 | - ~/.cargo 102 | 103 | build-release: 104 | executor: container 105 | steps: 106 | - <<: *prepare-env 107 | - checkout 108 | - restore_cache: 109 | keys: 110 | - v1-build-release-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 111 | - run: 112 | name: build release 113 | command: make build-release 114 | - persist_to_workspace: 115 | root: . 116 | paths: 117 | - target/release/kubernix 118 | - save_cache: 119 | key: v1-build-release-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 120 | paths: 121 | - /nix 122 | - target 123 | - ~/.cargo 124 | 125 | build-static: 126 | executor: container-musl 127 | steps: 128 | - checkout 129 | - restore_cache: 130 | keys: 131 | - v1-build-release-static-{{ checksum "Cargo.lock" }} 132 | - run: 133 | name: build static release 134 | command: cargo build --release 135 | - save_cache: 136 | key: v1-build-release-static-{{ checksum "Cargo.lock" }} 137 | paths: 138 | - target 139 | - ~/.cargo 140 | - persist_to_workspace: 141 | root: . 142 | paths: 143 | - *static-binary 144 | - store_artifacts: 145 | path: *static-binary 146 | 147 | deploy: 148 | executor: container 149 | steps: 150 | - <<: *prepare-env 151 | - checkout 152 | - attach_workspace: 153 | at: . 154 | - run: 155 | name: Deploy static release 156 | command: | 157 | ID=$(curl https://$GITHUB_TOKEN:@api.$SLUG/latest | jq .id) 158 | curl -H "Authorization: token $GITHUB_TOKEN" \ 159 | -H "Content-Type: $(file -b --mime-type $FILE)" \ 160 | --data-binary @$FILE \ 161 | "https://uploads.$SLUG/$ID/assets?name=$(basename $FILE)" 162 | environment: 163 | FILE: *static-binary 164 | SLUG: github.com/repos/saschagrunert/kubernix/releases 165 | 166 | deploy-image: 167 | executor: machine 168 | steps: 169 | - attach_workspace: 170 | at: . 171 | - run: 172 | name: Load the image 173 | command: docker load -i $LOCAL_IMAGE 174 | - run: 175 | name: Login to registry 176 | command: docker login -u $DOCKER_USER -p $DOCKER_PASS 177 | - run: 178 | name: Push the image 179 | command: docker push $IMAGE 180 | 181 | build-image: 182 | executor: machine 183 | steps: 184 | - checkout 185 | - run: 186 | name: Build the image 187 | command: make build-image 188 | - run: 189 | name: Save the image 190 | command: docker save $IMAGE -o $LOCAL_IMAGE 191 | - persist_to_workspace: 192 | root: . 193 | paths: 194 | - *local_image 195 | 196 | docs: 197 | executor: container 198 | steps: 199 | - <<: *prepare-env 200 | - checkout 201 | - restore_cache: 202 | keys: 203 | - v1-docs-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 204 | - run: 205 | name: documentation 206 | command: make docs 207 | - save_cache: 208 | key: v1-docs-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 209 | paths: 210 | - /nix 211 | - target 212 | - ~/.cargo 213 | - persist_to_workspace: 214 | root: . 215 | paths: 216 | - target/doc 217 | 218 | docs-publish: 219 | executor: container 220 | steps: 221 | - <<: *prepare-env 222 | - add_ssh_keys: 223 | fingerprints: 224 | - eb:6f:46:b8:ee:6f:17:76:9d:aa:91:39:e6:37:de:4c 225 | - checkout 226 | - run: 227 | name: Setup git 228 | command: | 229 | git config --global user.email mail@saschagrunert.de 230 | git config --global user.name "CircleCI" 231 | - attach_workspace: 232 | at: . 233 | - run: 234 | name: Deploy documentation 235 | command: | 236 | git fetch origin gh-pages 237 | git checkout -f gh-pages 238 | rm -rf doc 239 | mv target/doc . 240 | git add . 241 | git diff-index --quiet HEAD || git commit -m 'Update documentation' 242 | git push -f origin gh-pages 243 | 244 | lint-clippy: 245 | executor: container 246 | steps: 247 | - <<: *prepare-env 248 | - checkout 249 | - restore_cache: 250 | keys: 251 | - v1-lint-clippy-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 252 | - run: 253 | name: clippy 254 | command: make lint-clippy 255 | - save_cache: 256 | key: v1-lint-clippy-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 257 | paths: 258 | - /nix 259 | - target 260 | - ~/.cargo 261 | 262 | lint-rustfmt: 263 | executor: container 264 | steps: 265 | - <<: *prepare-env 266 | - checkout 267 | - restore_cache: 268 | keys: 269 | - v1-lint-rustfmt-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 270 | - run: 271 | name: rustfmt 272 | command: make lint-rustfmt 273 | - save_cache: 274 | key: v1-lint-rustfmt-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 275 | paths: 276 | - /nix 277 | - target 278 | - ~/.cargo 279 | 280 | machine-run: 281 | executor: machine 282 | parameters: 283 | run: 284 | type: string 285 | cache: 286 | type: string 287 | steps: 288 | - checkout 289 | - run: 290 | name: Prepare cache dir 291 | command: | 292 | sudo mkdir /nix 293 | sudo chown $(id -u):$(id -g) /nix 294 | - restore_cache: 295 | keys: 296 | - << parameters.cache >>-machine-run-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 297 | - attach_workspace: 298 | at: . 299 | - run: 300 | name: Install Nix 301 | command: curl -L https://nixos.org/nix/install | sh 302 | - run: 303 | name: Set hostname 304 | command: | 305 | echo "127.0.0.1 test" | sudo tee -a /etc/hosts 306 | sudo hostnamectl set-hostname test 307 | - run: 308 | name: Prepare the system 309 | command: sudo contrib/prepare-system 310 | - run: 311 | name: Run the tests 312 | command: << parameters.run >> 313 | environment: 314 | BASH_ENV: ~/.nix-profile/etc/profile.d/nix.sh 315 | no_output_timeout: 60m 316 | - run: 317 | name: Fixup cache 318 | command: sudo chown -R $(id -u):$(id -g) /nix 319 | - save_cache: 320 | key: << parameters.cache >>-machine-run-{{ checksum "nix/nixpkgs.json" }}-{{ checksum "Cargo.lock" }} 321 | paths: 322 | - /nix 323 | - target 324 | - ~/.cargo 325 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | --- 2 | codecov: 3 | notify: 4 | after_n_builds: 1 5 | require_ci_to_pass: false 6 | 7 | coverage: 8 | precision: 1 9 | round: down 10 | range: 50..75 11 | 12 | status: 13 | project: true 14 | patch: false 15 | changes: false 16 | 17 | comment: 18 | layout: "header, diff" 19 | behavior: default 20 | require_changes: false 21 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | *.rs.bk 2 | .git 3 | .kube 4 | /kubernix* 5 | /target 6 | /test-* 7 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: 2 | - saschagrunert 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "11:00" 8 | open-pull-requests-limit: 10 9 | allow: 10 | - dependency-type: direct 11 | - dependency-type: indirect 12 | -------------------------------------------------------------------------------- /.github/kubernix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/saschagrunert/kubernix/630087e023e403d461c4bb8b1c9368b26a2c0744/.github/kubernix.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.rs.bk 2 | *.orig 3 | .kube 4 | /target 5 | /test-* 6 | kubernix* 7 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 100 2 | hard_tabs = false 3 | tab_spaces = 4 4 | newline_style = "Auto" 5 | use_small_heuristics = "Default" 6 | indent_style = "Block" 7 | wrap_comments = true 8 | format_code_in_doc_comments = true 9 | comment_width = 80 10 | normalize_comments = true 11 | normalize_doc_attributes = true 12 | license_template_path = "" 13 | format_strings = true 14 | format_macro_matchers = true 15 | format_macro_bodies = true 16 | empty_item_single_line = true 17 | struct_lit_single_line = true 18 | fn_single_line = true 19 | where_single_line = true 20 | imports_indent = "Block" 21 | imports_layout = "Mixed" 22 | merge_imports = false 23 | reorder_imports = true 24 | reorder_modules = true 25 | reorder_impl_items = true 26 | type_punctuation_density = "Wide" 27 | space_before_colon = false 28 | space_after_colon = true 29 | spaces_around_ranges = false 30 | binop_separator = "Front" 31 | remove_nested_parens = true 32 | combine_control_expr = true 33 | overflow_delimited_expr = false 34 | struct_field_align_threshold = 0 35 | enum_discrim_align_threshold = 0 36 | match_arm_blocks = true 37 | force_multiline_blocks = false 38 | fn_args_layout = "Tall" 39 | brace_style = "SameLineWhere" 40 | control_brace_style = "AlwaysSameLine" 41 | trailing_semicolon = true 42 | trailing_comma = "Vertical" 43 | match_block_trailing_comma = false 44 | blank_lines_upper_bound = 1 45 | blank_lines_lower_bound = 0 46 | edition = "2018" 47 | version = "One" 48 | inline_attribute_width = 0 49 | merge_derives = true 50 | use_try_shorthand = true 51 | use_field_init_shorthand = true 52 | force_explicit_abi = true 53 | condense_wildcard_suffixes = false 54 | color = "Auto" 55 | required_version = "1.4.4" 56 | unstable_features = true 57 | disable_all_formatting = false 58 | skip_children = false 59 | hide_parse_errors = false 60 | error_on_line_overflow = false 61 | error_on_unformatted = false 62 | report_todo = "Never" 63 | report_fixme = "Never" 64 | ignore = [] 65 | emit_mode = "Files" 66 | make_backup = false 67 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "anyhow" 5 | version = "1.0.43" 6 | source = "registry+https://github.com/rust-lang/crates.io-index" 7 | checksum = "28ae2b3dec75a406790005a200b1bd89785afc02517a00ca99ecfe093ee9e6cf" 8 | 9 | [[package]] 10 | name = "atty" 11 | version = "0.2.14" 12 | source = "registry+https://github.com/rust-lang/crates.io-index" 13 | checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" 14 | dependencies = [ 15 | "hermit-abi", 16 | "libc", 17 | "winapi", 18 | ] 19 | 20 | [[package]] 21 | name = "autocfg" 22 | version = "1.0.1" 23 | source = "registry+https://github.com/rust-lang/crates.io-index" 24 | checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" 25 | 26 | [[package]] 27 | name = "base64" 28 | version = "0.13.0" 29 | source = "registry+https://github.com/rust-lang/crates.io-index" 30 | checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" 31 | 32 | [[package]] 33 | name = "bitflags" 34 | version = "1.2.1" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" 37 | 38 | [[package]] 39 | name = "cc" 40 | version = "1.0.72" 41 | source = "registry+https://github.com/rust-lang/crates.io-index" 42 | checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" 43 | 44 | [[package]] 45 | name = "cfg-if" 46 | version = "1.0.0" 47 | source = "registry+https://github.com/rust-lang/crates.io-index" 48 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 49 | 50 | [[package]] 51 | name = "clap" 52 | version = "3.0.0-beta.2" 53 | source = "git+https://github.com/clap-rs/clap#36c972a302ed8071e489b895f1a372398a9bf8b5" 54 | dependencies = [ 55 | "atty", 56 | "bitflags", 57 | "clap_derive", 58 | "indexmap", 59 | "lazy_static", 60 | "os_str_bytes", 61 | "strsim", 62 | "termcolor", 63 | "terminal_size", 64 | "textwrap", 65 | "vec_map", 66 | ] 67 | 68 | [[package]] 69 | name = "clap_derive" 70 | version = "3.0.0-beta.2" 71 | source = "git+https://github.com/clap-rs/clap#36c972a302ed8071e489b895f1a372398a9bf8b5" 72 | dependencies = [ 73 | "heck", 74 | "proc-macro-error", 75 | "proc-macro2", 76 | "quote", 77 | "syn", 78 | ] 79 | 80 | [[package]] 81 | name = "console" 82 | version = "0.14.1" 83 | source = "registry+https://github.com/rust-lang/crates.io-index" 84 | checksum = "3993e6445baa160675931ec041a5e03ca84b9c6e32a056150d3aa2bdda0a1f45" 85 | dependencies = [ 86 | "encode_unicode", 87 | "lazy_static", 88 | "libc", 89 | "regex", 90 | "terminal_size", 91 | "unicode-width", 92 | "winapi", 93 | ] 94 | 95 | [[package]] 96 | name = "crossbeam-channel" 97 | version = "0.5.1" 98 | source = "registry+https://github.com/rust-lang/crates.io-index" 99 | checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" 100 | dependencies = [ 101 | "cfg-if", 102 | "crossbeam-utils", 103 | ] 104 | 105 | [[package]] 106 | name = "crossbeam-deque" 107 | version = "0.8.1" 108 | source = "registry+https://github.com/rust-lang/crates.io-index" 109 | checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" 110 | dependencies = [ 111 | "cfg-if", 112 | "crossbeam-epoch", 113 | "crossbeam-utils", 114 | ] 115 | 116 | [[package]] 117 | name = "crossbeam-epoch" 118 | version = "0.9.5" 119 | source = "registry+https://github.com/rust-lang/crates.io-index" 120 | checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" 121 | dependencies = [ 122 | "cfg-if", 123 | "crossbeam-utils", 124 | "lazy_static", 125 | "memoffset", 126 | "scopeguard", 127 | ] 128 | 129 | [[package]] 130 | name = "crossbeam-utils" 131 | version = "0.8.5" 132 | source = "registry+https://github.com/rust-lang/crates.io-index" 133 | checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" 134 | dependencies = [ 135 | "cfg-if", 136 | "lazy_static", 137 | ] 138 | 139 | [[package]] 140 | name = "dtoa" 141 | version = "0.4.8" 142 | source = "registry+https://github.com/rust-lang/crates.io-index" 143 | checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" 144 | 145 | [[package]] 146 | name = "either" 147 | version = "1.6.1" 148 | source = "registry+https://github.com/rust-lang/crates.io-index" 149 | checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" 150 | 151 | [[package]] 152 | name = "encode_unicode" 153 | version = "0.3.6" 154 | source = "registry+https://github.com/rust-lang/crates.io-index" 155 | checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" 156 | 157 | [[package]] 158 | name = "err-derive" 159 | version = "0.2.4" 160 | source = "registry+https://github.com/rust-lang/crates.io-index" 161 | checksum = "22deed3a8124cff5fa835713fa105621e43bbdc46690c3a6b68328a012d350d4" 162 | dependencies = [ 163 | "proc-macro-error", 164 | "proc-macro2", 165 | "quote", 166 | "rustversion", 167 | "syn", 168 | "synstructure", 169 | ] 170 | 171 | [[package]] 172 | name = "getrandom" 173 | version = "0.2.3" 174 | source = "registry+https://github.com/rust-lang/crates.io-index" 175 | checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" 176 | dependencies = [ 177 | "cfg-if", 178 | "libc", 179 | "wasi", 180 | ] 181 | 182 | [[package]] 183 | name = "getset" 184 | version = "0.1.1" 185 | source = "registry+https://github.com/rust-lang/crates.io-index" 186 | checksum = "24b328c01a4d71d2d8173daa93562a73ab0fe85616876f02500f53d82948c504" 187 | dependencies = [ 188 | "proc-macro-error", 189 | "proc-macro2", 190 | "quote", 191 | "syn", 192 | ] 193 | 194 | [[package]] 195 | name = "hashbrown" 196 | version = "0.11.2" 197 | source = "registry+https://github.com/rust-lang/crates.io-index" 198 | checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" 199 | 200 | [[package]] 201 | name = "heck" 202 | version = "0.3.3" 203 | source = "registry+https://github.com/rust-lang/crates.io-index" 204 | checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" 205 | dependencies = [ 206 | "unicode-segmentation", 207 | ] 208 | 209 | [[package]] 210 | name = "hermit-abi" 211 | version = "0.1.19" 212 | source = "registry+https://github.com/rust-lang/crates.io-index" 213 | checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" 214 | dependencies = [ 215 | "libc", 216 | ] 217 | 218 | [[package]] 219 | name = "hostname" 220 | version = "0.3.1" 221 | source = "registry+https://github.com/rust-lang/crates.io-index" 222 | checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" 223 | dependencies = [ 224 | "libc", 225 | "match_cfg", 226 | "winapi", 227 | ] 228 | 229 | [[package]] 230 | name = "indexmap" 231 | version = "1.7.0" 232 | source = "registry+https://github.com/rust-lang/crates.io-index" 233 | checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" 234 | dependencies = [ 235 | "autocfg", 236 | "hashbrown", 237 | ] 238 | 239 | [[package]] 240 | name = "indicatif" 241 | version = "0.15.0" 242 | source = "registry+https://github.com/rust-lang/crates.io-index" 243 | checksum = "7baab56125e25686df467fe470785512329883aab42696d661247aca2a2896e4" 244 | dependencies = [ 245 | "console", 246 | "lazy_static", 247 | "number_prefix", 248 | "regex", 249 | ] 250 | 251 | [[package]] 252 | name = "instant" 253 | version = "0.1.12" 254 | source = "registry+https://github.com/rust-lang/crates.io-index" 255 | checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" 256 | dependencies = [ 257 | "cfg-if", 258 | ] 259 | 260 | [[package]] 261 | name = "ipnetwork" 262 | version = "0.18.0" 263 | source = "registry+https://github.com/rust-lang/crates.io-index" 264 | checksum = "4088d739b183546b239688ddbc79891831df421773df95e236daf7867866d355" 265 | dependencies = [ 266 | "serde", 267 | ] 268 | 269 | [[package]] 270 | name = "itoa" 271 | version = "0.4.8" 272 | source = "registry+https://github.com/rust-lang/crates.io-index" 273 | checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" 274 | 275 | [[package]] 276 | name = "kubernix" 277 | version = "0.2.0" 278 | dependencies = [ 279 | "anyhow", 280 | "base64", 281 | "clap", 282 | "console", 283 | "crossbeam-channel", 284 | "getset", 285 | "hostname", 286 | "indicatif", 287 | "ipnetwork", 288 | "lazy_static", 289 | "log", 290 | "nix", 291 | "parking_lot", 292 | "proc-mounts", 293 | "rand", 294 | "rayon", 295 | "serde", 296 | "serde_json", 297 | "serde_yaml", 298 | "signal-hook", 299 | "tempfile", 300 | "toml", 301 | ] 302 | 303 | [[package]] 304 | name = "lazy_static" 305 | version = "1.4.0" 306 | source = "registry+https://github.com/rust-lang/crates.io-index" 307 | checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" 308 | 309 | [[package]] 310 | name = "libc" 311 | version = "0.2.104" 312 | source = "registry+https://github.com/rust-lang/crates.io-index" 313 | checksum = "7b2f96d100e1cf1929e7719b7edb3b90ab5298072638fccd77be9ce942ecdfce" 314 | 315 | [[package]] 316 | name = "linked-hash-map" 317 | version = "0.5.4" 318 | source = "registry+https://github.com/rust-lang/crates.io-index" 319 | checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" 320 | 321 | [[package]] 322 | name = "lock_api" 323 | version = "0.4.5" 324 | source = "registry+https://github.com/rust-lang/crates.io-index" 325 | checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" 326 | dependencies = [ 327 | "scopeguard", 328 | ] 329 | 330 | [[package]] 331 | name = "log" 332 | version = "0.4.14" 333 | source = "registry+https://github.com/rust-lang/crates.io-index" 334 | checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" 335 | dependencies = [ 336 | "cfg-if", 337 | "serde", 338 | ] 339 | 340 | [[package]] 341 | name = "match_cfg" 342 | version = "0.1.0" 343 | source = "registry+https://github.com/rust-lang/crates.io-index" 344 | checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" 345 | 346 | [[package]] 347 | name = "memoffset" 348 | version = "0.6.4" 349 | source = "registry+https://github.com/rust-lang/crates.io-index" 350 | checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" 351 | dependencies = [ 352 | "autocfg", 353 | ] 354 | 355 | [[package]] 356 | name = "nix" 357 | version = "0.22.1" 358 | source = "registry+https://github.com/rust-lang/crates.io-index" 359 | checksum = "e7555d6c7164cc913be1ce7f95cbecdabda61eb2ccd89008524af306fb7f5031" 360 | dependencies = [ 361 | "bitflags", 362 | "cc", 363 | "cfg-if", 364 | "libc", 365 | "memoffset", 366 | ] 367 | 368 | [[package]] 369 | name = "num_cpus" 370 | version = "1.13.0" 371 | source = "registry+https://github.com/rust-lang/crates.io-index" 372 | checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" 373 | dependencies = [ 374 | "hermit-abi", 375 | "libc", 376 | ] 377 | 378 | [[package]] 379 | name = "number_prefix" 380 | version = "0.3.0" 381 | source = "registry+https://github.com/rust-lang/crates.io-index" 382 | checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a" 383 | 384 | [[package]] 385 | name = "os_str_bytes" 386 | version = "3.1.0" 387 | source = "registry+https://github.com/rust-lang/crates.io-index" 388 | checksum = "6acbef58a60fe69ab50510a55bc8cdd4d6cf2283d27ad338f54cb52747a9cf2d" 389 | 390 | [[package]] 391 | name = "parking_lot" 392 | version = "0.11.2" 393 | source = "registry+https://github.com/rust-lang/crates.io-index" 394 | checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" 395 | dependencies = [ 396 | "instant", 397 | "lock_api", 398 | "parking_lot_core", 399 | ] 400 | 401 | [[package]] 402 | name = "parking_lot_core" 403 | version = "0.8.5" 404 | source = "registry+https://github.com/rust-lang/crates.io-index" 405 | checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" 406 | dependencies = [ 407 | "cfg-if", 408 | "instant", 409 | "libc", 410 | "redox_syscall", 411 | "smallvec", 412 | "winapi", 413 | ] 414 | 415 | [[package]] 416 | name = "partition-identity" 417 | version = "0.2.8" 418 | source = "registry+https://github.com/rust-lang/crates.io-index" 419 | checksum = "ec13ba9a0eec5c10a89f6ec1b6e9e2ef7d29b810d771355abbd1c43cae003ed6" 420 | dependencies = [ 421 | "err-derive", 422 | ] 423 | 424 | [[package]] 425 | name = "ppv-lite86" 426 | version = "0.2.10" 427 | source = "registry+https://github.com/rust-lang/crates.io-index" 428 | checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" 429 | 430 | [[package]] 431 | name = "proc-macro-error" 432 | version = "1.0.4" 433 | source = "registry+https://github.com/rust-lang/crates.io-index" 434 | checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" 435 | dependencies = [ 436 | "proc-macro-error-attr", 437 | "proc-macro2", 438 | "quote", 439 | "syn", 440 | "version_check", 441 | ] 442 | 443 | [[package]] 444 | name = "proc-macro-error-attr" 445 | version = "1.0.4" 446 | source = "registry+https://github.com/rust-lang/crates.io-index" 447 | checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" 448 | dependencies = [ 449 | "proc-macro2", 450 | "quote", 451 | "version_check", 452 | ] 453 | 454 | [[package]] 455 | name = "proc-macro2" 456 | version = "1.0.29" 457 | source = "registry+https://github.com/rust-lang/crates.io-index" 458 | checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" 459 | dependencies = [ 460 | "unicode-xid", 461 | ] 462 | 463 | [[package]] 464 | name = "proc-mounts" 465 | version = "0.2.4" 466 | source = "registry+https://github.com/rust-lang/crates.io-index" 467 | checksum = "2ad7e9c8d1b8c20f16a84d61d7c4c0325a5837c1307a2491b509cd92fb4e4442" 468 | dependencies = [ 469 | "lazy_static", 470 | "partition-identity", 471 | ] 472 | 473 | [[package]] 474 | name = "quote" 475 | version = "1.0.10" 476 | source = "registry+https://github.com/rust-lang/crates.io-index" 477 | checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" 478 | dependencies = [ 479 | "proc-macro2", 480 | ] 481 | 482 | [[package]] 483 | name = "rand" 484 | version = "0.8.4" 485 | source = "registry+https://github.com/rust-lang/crates.io-index" 486 | checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" 487 | dependencies = [ 488 | "libc", 489 | "rand_chacha", 490 | "rand_core", 491 | "rand_hc", 492 | ] 493 | 494 | [[package]] 495 | name = "rand_chacha" 496 | version = "0.3.1" 497 | source = "registry+https://github.com/rust-lang/crates.io-index" 498 | checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" 499 | dependencies = [ 500 | "ppv-lite86", 501 | "rand_core", 502 | ] 503 | 504 | [[package]] 505 | name = "rand_core" 506 | version = "0.6.3" 507 | source = "registry+https://github.com/rust-lang/crates.io-index" 508 | checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" 509 | dependencies = [ 510 | "getrandom", 511 | ] 512 | 513 | [[package]] 514 | name = "rand_hc" 515 | version = "0.3.1" 516 | source = "registry+https://github.com/rust-lang/crates.io-index" 517 | checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" 518 | dependencies = [ 519 | "rand_core", 520 | ] 521 | 522 | [[package]] 523 | name = "rayon" 524 | version = "1.5.1" 525 | source = "registry+https://github.com/rust-lang/crates.io-index" 526 | checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" 527 | dependencies = [ 528 | "autocfg", 529 | "crossbeam-deque", 530 | "either", 531 | "rayon-core", 532 | ] 533 | 534 | [[package]] 535 | name = "rayon-core" 536 | version = "1.9.1" 537 | source = "registry+https://github.com/rust-lang/crates.io-index" 538 | checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" 539 | dependencies = [ 540 | "crossbeam-channel", 541 | "crossbeam-deque", 542 | "crossbeam-utils", 543 | "lazy_static", 544 | "num_cpus", 545 | ] 546 | 547 | [[package]] 548 | name = "redox_syscall" 549 | version = "0.2.10" 550 | source = "registry+https://github.com/rust-lang/crates.io-index" 551 | checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" 552 | dependencies = [ 553 | "bitflags", 554 | ] 555 | 556 | [[package]] 557 | name = "regex" 558 | version = "1.5.4" 559 | source = "registry+https://github.com/rust-lang/crates.io-index" 560 | checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" 561 | dependencies = [ 562 | "regex-syntax", 563 | ] 564 | 565 | [[package]] 566 | name = "regex-syntax" 567 | version = "0.6.25" 568 | source = "registry+https://github.com/rust-lang/crates.io-index" 569 | checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" 570 | 571 | [[package]] 572 | name = "remove_dir_all" 573 | version = "0.5.3" 574 | source = "registry+https://github.com/rust-lang/crates.io-index" 575 | checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" 576 | dependencies = [ 577 | "winapi", 578 | ] 579 | 580 | [[package]] 581 | name = "rustversion" 582 | version = "1.0.5" 583 | source = "registry+https://github.com/rust-lang/crates.io-index" 584 | checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" 585 | 586 | [[package]] 587 | name = "ryu" 588 | version = "1.0.5" 589 | source = "registry+https://github.com/rust-lang/crates.io-index" 590 | checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" 591 | 592 | [[package]] 593 | name = "scopeguard" 594 | version = "1.1.0" 595 | source = "registry+https://github.com/rust-lang/crates.io-index" 596 | checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" 597 | 598 | [[package]] 599 | name = "serde" 600 | version = "1.0.130" 601 | source = "registry+https://github.com/rust-lang/crates.io-index" 602 | checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" 603 | dependencies = [ 604 | "serde_derive", 605 | ] 606 | 607 | [[package]] 608 | name = "serde_derive" 609 | version = "1.0.130" 610 | source = "registry+https://github.com/rust-lang/crates.io-index" 611 | checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" 612 | dependencies = [ 613 | "proc-macro2", 614 | "quote", 615 | "syn", 616 | ] 617 | 618 | [[package]] 619 | name = "serde_json" 620 | version = "1.0.70" 621 | source = "registry+https://github.com/rust-lang/crates.io-index" 622 | checksum = "e277c495ac6cd1a01a58d0a0c574568b4d1ddf14f59965c6a58b8d96400b54f3" 623 | dependencies = [ 624 | "itoa", 625 | "ryu", 626 | "serde", 627 | ] 628 | 629 | [[package]] 630 | name = "serde_yaml" 631 | version = "0.8.20" 632 | source = "registry+https://github.com/rust-lang/crates.io-index" 633 | checksum = "ad104641f3c958dab30eb3010e834c2622d1f3f4c530fef1dee20ad9485f3c09" 634 | dependencies = [ 635 | "dtoa", 636 | "indexmap", 637 | "serde", 638 | "yaml-rust", 639 | ] 640 | 641 | [[package]] 642 | name = "signal-hook" 643 | version = "0.3.10" 644 | source = "registry+https://github.com/rust-lang/crates.io-index" 645 | checksum = "9c98891d737e271a2954825ef19e46bd16bdb98e2746f2eec4f7a4ef7946efd1" 646 | dependencies = [ 647 | "libc", 648 | "signal-hook-registry", 649 | ] 650 | 651 | [[package]] 652 | name = "signal-hook-registry" 653 | version = "1.4.0" 654 | source = "registry+https://github.com/rust-lang/crates.io-index" 655 | checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" 656 | dependencies = [ 657 | "libc", 658 | ] 659 | 660 | [[package]] 661 | name = "smallvec" 662 | version = "1.6.1" 663 | source = "registry+https://github.com/rust-lang/crates.io-index" 664 | checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" 665 | 666 | [[package]] 667 | name = "strsim" 668 | version = "0.10.0" 669 | source = "registry+https://github.com/rust-lang/crates.io-index" 670 | checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" 671 | 672 | [[package]] 673 | name = "syn" 674 | version = "1.0.76" 675 | source = "registry+https://github.com/rust-lang/crates.io-index" 676 | checksum = "c6f107db402c2c2055242dbf4d2af0e69197202e9faacbef9571bbe47f5a1b84" 677 | dependencies = [ 678 | "proc-macro2", 679 | "quote", 680 | "unicode-xid", 681 | ] 682 | 683 | [[package]] 684 | name = "synstructure" 685 | version = "0.12.5" 686 | source = "registry+https://github.com/rust-lang/crates.io-index" 687 | checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" 688 | dependencies = [ 689 | "proc-macro2", 690 | "quote", 691 | "syn", 692 | "unicode-xid", 693 | ] 694 | 695 | [[package]] 696 | name = "tempfile" 697 | version = "3.2.0" 698 | source = "registry+https://github.com/rust-lang/crates.io-index" 699 | checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" 700 | dependencies = [ 701 | "cfg-if", 702 | "libc", 703 | "rand", 704 | "redox_syscall", 705 | "remove_dir_all", 706 | "winapi", 707 | ] 708 | 709 | [[package]] 710 | name = "termcolor" 711 | version = "1.1.2" 712 | source = "registry+https://github.com/rust-lang/crates.io-index" 713 | checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" 714 | dependencies = [ 715 | "winapi-util", 716 | ] 717 | 718 | [[package]] 719 | name = "terminal_size" 720 | version = "0.1.17" 721 | source = "registry+https://github.com/rust-lang/crates.io-index" 722 | checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" 723 | dependencies = [ 724 | "libc", 725 | "winapi", 726 | ] 727 | 728 | [[package]] 729 | name = "textwrap" 730 | version = "0.13.4" 731 | source = "registry+https://github.com/rust-lang/crates.io-index" 732 | checksum = "cd05616119e612a8041ef58f2b578906cc2531a6069047ae092cfb86a325d835" 733 | dependencies = [ 734 | "terminal_size", 735 | "unicode-width", 736 | ] 737 | 738 | [[package]] 739 | name = "toml" 740 | version = "0.5.8" 741 | source = "registry+https://github.com/rust-lang/crates.io-index" 742 | checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" 743 | dependencies = [ 744 | "serde", 745 | ] 746 | 747 | [[package]] 748 | name = "unicode-segmentation" 749 | version = "1.8.0" 750 | source = "registry+https://github.com/rust-lang/crates.io-index" 751 | checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" 752 | 753 | [[package]] 754 | name = "unicode-width" 755 | version = "0.1.8" 756 | source = "registry+https://github.com/rust-lang/crates.io-index" 757 | checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" 758 | 759 | [[package]] 760 | name = "unicode-xid" 761 | version = "0.2.2" 762 | source = "registry+https://github.com/rust-lang/crates.io-index" 763 | checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" 764 | 765 | [[package]] 766 | name = "vec_map" 767 | version = "0.8.2" 768 | source = "registry+https://github.com/rust-lang/crates.io-index" 769 | checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" 770 | 771 | [[package]] 772 | name = "version_check" 773 | version = "0.9.3" 774 | source = "registry+https://github.com/rust-lang/crates.io-index" 775 | checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" 776 | 777 | [[package]] 778 | name = "wasi" 779 | version = "0.10.2+wasi-snapshot-preview1" 780 | source = "registry+https://github.com/rust-lang/crates.io-index" 781 | checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" 782 | 783 | [[package]] 784 | name = "winapi" 785 | version = "0.3.9" 786 | source = "registry+https://github.com/rust-lang/crates.io-index" 787 | checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" 788 | dependencies = [ 789 | "winapi-i686-pc-windows-gnu", 790 | "winapi-x86_64-pc-windows-gnu", 791 | ] 792 | 793 | [[package]] 794 | name = "winapi-i686-pc-windows-gnu" 795 | version = "0.4.0" 796 | source = "registry+https://github.com/rust-lang/crates.io-index" 797 | checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 798 | 799 | [[package]] 800 | name = "winapi-util" 801 | version = "0.1.5" 802 | source = "registry+https://github.com/rust-lang/crates.io-index" 803 | checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" 804 | dependencies = [ 805 | "winapi", 806 | ] 807 | 808 | [[package]] 809 | name = "winapi-x86_64-pc-windows-gnu" 810 | version = "0.4.0" 811 | source = "registry+https://github.com/rust-lang/crates.io-index" 812 | checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 813 | 814 | [[package]] 815 | name = "yaml-rust" 816 | version = "0.4.5" 817 | source = "registry+https://github.com/rust-lang/crates.io-index" 818 | checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" 819 | dependencies = [ 820 | "linked-hash-map", 821 | ] 822 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kubernix" 3 | version = "0.2.0" 4 | authors = ["Sascha Grunert "] 5 | edition = "2018" 6 | license = "MIT" 7 | categories = ["command-line-utilities"] 8 | description = "Kubernetes development cluster bootstrapping with Nix packages" 9 | documentation = "https://docs.rs/kubernix" 10 | homepage = "https://github.com/saschagrunert/kubernix" 11 | keywords = ["kubernetes", "nix", "nix-shell", "crio", "kube"] 12 | readme = "README.md" 13 | repository = "https://github.com/saschagrunert/kubernix" 14 | 15 | [[bin]] 16 | name = "kubernix" 17 | path = "src/main.rs" 18 | 19 | [dependencies] 20 | anyhow = "1.0.43" 21 | base64 = "0.13.0" 22 | clap = { git = "https://github.com/clap-rs/clap", features = ["wrap_help"] } 23 | console = "0.14.1" 24 | crossbeam-channel = "0.5.1" 25 | getset = "0.1.1" 26 | hostname = "0.3.1" 27 | indicatif = "0.15.0" 28 | ipnetwork = "0.18.0" 29 | lazy_static = "1.4.0" 30 | log = { version = "0.4.14", features = ["serde", "std"] } 31 | nix = "0.22.1" 32 | parking_lot = "0.11.2" 33 | proc-mounts = "0.2.4" 34 | rand = "0.8.4" 35 | rayon = "1.5.1" 36 | serde = { version = "1.0.130", features = ["derive"] } 37 | serde_json = "1.0.70" 38 | serde_yaml = "0.8.20" 39 | signal-hook = "0.3.10" 40 | toml = "0.5.8" 41 | 42 | [dev-dependencies] 43 | tempfile = "3.2.0" 44 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # static binary build 2 | FROM ekidd/rust-musl-builder:stable AS builder 3 | COPY . . 4 | RUN cargo build --release 5 | 6 | # nix dependency collection 7 | FROM nixos/nix:latest as bootstrapper 8 | COPY nix /bootstrap 9 | RUN nix run -f /bootstrap -c echo done 10 | 11 | # target image 12 | FROM nixos/nix:latest 13 | RUN apk add bash 14 | ENV SHELL /bin/bash 15 | COPY --from=builder \ 16 | /home/rust/src/target/x86_64-unknown-linux-musl/release/kubernix . 17 | COPY --from=bootstrapper /nix /nix 18 | ENTRYPOINT [ "/kubernix" ] 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Sascha Grunert 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ARGS ?= 2 | SUDO ?= sudo -E 3 | KUBERNIX ?= $(SUDO) target/release/kubernix $(ARGS) 4 | CONTAINER_RUNTIME ?= $(SUDO) podman 5 | RUN_DIR ?= $(shell pwd)/kubernix-run 6 | 7 | export IMAGE ?= docker.io/saschagrunert/kubernix 8 | 9 | define nix 10 | nix run -f nix/build.nix $(1) 11 | endef 12 | 13 | define nix-run 14 | $(call nix,-c $(1)) 15 | endef 16 | 17 | define nix-run-pure 18 | $(call nix,-ik SSH_AUTH_SOCK -c $(1)) 19 | endef 20 | 21 | all: build 22 | 23 | .PHONY: build 24 | build: 25 | $(call nix-run-pure,cargo build) 26 | 27 | .PHONY: build-image 28 | build-image: 29 | $(CONTAINER_RUNTIME) build -t $(IMAGE) . 30 | 31 | .PHONY: build-release 32 | build-release: 33 | $(call nix-run-pure,cargo build --release) 34 | 35 | .PHONY: coverage 36 | coverage: 37 | $(call nix-run-pure,cargo kcov --lib) 38 | 39 | .PHONY: e2e 40 | e2e: 41 | $(call nix-run,$(SUDO) \ 42 | KUBERNETES_SERVICE_HOST=127.0.0.1 \ 43 | KUBERNETES_SERVICE_PORT=6443 \ 44 | KUBECONFIG=$(RUN_DIR)/kubeconfig/admin.kubeconfig \ 45 | e2e.test \ 46 | --provider=local \ 47 | --ginkgo.focus='.*$(FOCUS).*' \ 48 | --ginkgo.progress \ 49 | $(ARGS) \ 50 | ) 51 | 52 | .PHONY: docs 53 | docs: 54 | $(call nix-run-pure,cargo doc --no-deps) 55 | 56 | .PHONY: lint-clippy 57 | lint-clippy: 58 | $(call nix-run-pure,cargo clippy --all -- -D warnings) 59 | 60 | .PHONY: lint-rustfmt 61 | lint-rustfmt: 62 | $(call nix-run-pure,cargo fmt && git diff --exit-code) 63 | 64 | .PHONY: nix 65 | nix: 66 | $(call nix-run-pure,$(shell which bash)) 67 | 68 | .PHONY: nixdeps 69 | nixdeps: 70 | @echo '| Application | Version |' 71 | @echo '| - | - |' 72 | @nix-instantiate nix 2> /dev/null \ 73 | | sed -n 's;/nix/store/[[:alnum:]]\{32\}-\(.*\)-\(.*\).drv\(!bin\)\{0,1\};| \1 | v\2 |;p' \ 74 | | sort 75 | 76 | .PHONY: nixpkgs 77 | nixpkgs: 78 | @nix run -f channel:nixpkgs-unstable nix-prefetch-git -c nix-prefetch-git \ 79 | --no-deepClone https://github.com/nixos/nixpkgs > nix/nixpkgs.json 80 | 81 | .PHONY: run 82 | run: build-release 83 | $(KUBERNIX) 84 | 85 | .PHONY: run-image 86 | run-image: 87 | $(SUDO) contrib/prepare-system 88 | mkdir -p $(RUN_DIR) 89 | if [ -d /dev/mapper ]; then \ 90 | DEV_MAPPER=-v/dev/mapper:/dev/mapper ;\ 91 | fi ;\ 92 | $(CONTAINER_RUNTIME) run \ 93 | -v $(RUN_DIR):/kubernix-run \ 94 | --rm \ 95 | --privileged \ 96 | --net=host \ 97 | $$DEV_MAPPER \ 98 | -it $(IMAGE) $(ARGS) 99 | 100 | .PHONY: shell 101 | shell: build-release 102 | $(KUBERNIX) shell 103 | 104 | define test 105 | $(call nix-run,\ 106 | cargo test \ 107 | --test $(1) $(ARGS) \ 108 | -- \ 109 | --test-threads 1 \ 110 | --nocapture) 111 | endef 112 | 113 | .PHONY: test-integration 114 | test-integration: 115 | $(call test,integration) 116 | 117 | .PHONY: test-e2e 118 | test-e2e: 119 | $(call test,e2e) 120 | 121 | .PHONY: test-unit 122 | test-unit: 123 | $(call nix-run-pure,cargo test --lib) 124 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | [![CircleCI](https://circleci.com/gh/saschagrunert/kubernix.svg?style=shield)](https://circleci.com/gh/saschagrunert/kubernix) 4 | [![Docs master](https://img.shields.io/badge/doc-master-orange.svg)](https://saschagrunert.github.io/kubernix/doc/kubernix/index.html) 5 | [![Docs release](https://docs.rs/kubernix/badge.svg)](https://docs.rs/kubernix) 6 | [![Coverage](https://codecov.io/gh/saschagrunert/kubernix/branch/master/graph/badge.svg)](https://codecov.io/gh/saschagrunert/kubernix) 7 | [![Dependencies](https://deps.rs/repo/github/saschagrunert/kubernix/status.svg)](https://deps.rs/repo/github/saschagrunert/kubernix) 8 | [![Crates.io](https://img.shields.io/crates/v/kubernix.svg)](https://crates.io/crates/kubernix) 9 | [![License MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/saschagrunert/kubernix/blob/master/LICENSE) 10 | 11 | ## Kubernetes development cluster bootstrapping with Nix packages 12 | 13 | This project aims to provide **single dependency** [Kubernetes][1] clusters 14 | for local testing, experimenting and development purposes. 15 | 16 | [1]: https://kubernetes.io 17 | 18 | Moving pictures are worth more than thousand words, so here is a short demo: 19 | 20 | ![demo](.github/kubernix.svg) 21 | 22 | ### Nix? 23 | 24 | Have you ever heard about [Nix][2], the functional package manager? 25 | 26 | In case you haven't, don’t worry – the important thing is that it provides all the third-party 27 | dependencies needed for this project, pinned to a dedicated version. This guarantees stable, 28 | reproducible installations. 29 | 30 | [2]: https://nixos.org/nix 31 | 32 | KuberNix itself is a Rusty helper program, which takes care of bootstrapping 33 | the Kubernetes cluster, passing the right configuration parameters around and 34 | keeping track of the running processes. 35 | 36 | ### What is inside 37 | 38 | The following technology stack is currently being used: 39 | 40 | | Application | Version | 41 | | --------------- | ------------ | 42 | | cfssl | v1.5.0 | 43 | | cni-plugins | v0.9.0 | 44 | | conmon | v2.0.25 | 45 | | conntrack-tools | v1.4.6 | 46 | | cri-o-wrapper | v1.20.0 | 47 | | cri-tools | v1.20.0 | 48 | | etcd | v3.3.25 | 49 | | iproute2 | v5.10.0 | 50 | | iptables | v1.8.6 | 51 | | kmod | v27 | 52 | | kubectl | v1.19.5 | 53 | | kubernetes | v1.19.5 | 54 | | nss-cacert | v3.60 | 55 | | podman-wrapper | v2.2.1 | 56 | | runc | v1.0.0-rc92 | 57 | | socat | v1.7.4.1 | 58 | | sysctl | v1003.1.2008 | 59 | | util-linux | v2.36.1 | 60 | 61 | Some other tools are not explicitly mentioned here, because they are no 62 | first-level dependencies. 63 | 64 | ### Single Dependency 65 | 66 | #### With Nix 67 | 68 | As already mentioned, there is only one single dependency needed to run this 69 | project: **Nix**. To setup Nix, simply run: 70 | 71 | ```shell 72 | $ curl https://nixos.org/nix/install | sh 73 | ``` 74 | 75 | Please make sure to follow the instructions output by the script. 76 | 77 | #### With the Container Runtime of your Choice 78 | 79 | It is also possible to run KuberNix in the container runtime of your choice. To 80 | do this, simply grab the latest image from [`saschagrunert/kubernix`][40]. 81 | Please note that running KuberNix inside a container image requires to run 82 | `privileged` mode and `host` networking. For example, we can run KuberNix with 83 | [podman][41] like this: 84 | 85 | [40]: https://cloud.docker.com/u/saschagrunert/repository/docker/saschagrunert/kubernix 86 | [41]: https://github.com/containers/libpod 87 | 88 | ``` 89 | $ sudo podman run \ 90 | --net=host \ 91 | --privileged \ 92 | -it docker.io/saschagrunert/kubernix:latest 93 | ``` 94 | 95 | ### Getting Started 96 | 97 | #### Cluster Bootstrap 98 | 99 | To bootstrap your first cluster, download one of the latest [release binaries][18] or 100 | build the application via: 101 | 102 | [18]: https://github.com/saschagrunert/kubernix/releases/latest 103 | 104 | ```shell 105 | $ make build-release 106 | ``` 107 | 108 | The binary should now be available in the `target/release/kubernix` directory of 109 | the project. Alternatively, install the application via `cargo install kubernix`. 110 | 111 | After the successful binary retrieval, start KuberNix by running it as `root`: 112 | 113 | ``` 114 | $ sudo kubernix 115 | ``` 116 | 117 | KuberNix will now take care that the Nix environment gets correctly setup, 118 | downloads the needed binaries and starts the cluster. Per default it will create 119 | a directory called `kubernix-run` in the current path which contains all necessary 120 | data for the cluster. 121 | 122 | #### Shell Environment 123 | 124 | If everything went fine, you should be dropped into a new shell session, 125 | like this: 126 | 127 | ``` 128 | [INFO ] Everything is up and running 129 | [INFO ] Spawning interactive shell 130 | [INFO ] Please be aware that the cluster stops if you exit the shell 131 | > 132 | ``` 133 | 134 | Now you can access your cluster via tools like `kubectl`: 135 | 136 | ``` 137 | > kubectl get pods --all-namespaces 138 | NAMESPACE NAME READY STATUS RESTARTS AGE 139 | kube-system coredns-85d84dd694-xz997 1/1 Running 0 102s 140 | ``` 141 | 142 | All configuration files have been written to the target directory, which is now 143 | the current one: 144 | 145 | ``` 146 | > ls -1 147 | apiserver/ 148 | controllermanager/ 149 | coredns/ 150 | crio/ 151 | encryptionconfig/ 152 | etcd/ 153 | kubeconfig/ 154 | kubelet/ 155 | kubernix.env 156 | kubernix.toml 157 | nix/ 158 | pki/ 159 | policy.json 160 | proxy/ 161 | scheduler/ 162 | ``` 163 | 164 | For example, the log files for the different running components are now 165 | available within their corresponding directory: 166 | 167 | ``` 168 | > ls -1 **.log 169 | apiserver/kube-apiserver.log 170 | controllermanager/kube-controller-manager.log 171 | crio/crio.log 172 | etcd/etcd.log 173 | kubelet/kubelet.log 174 | proxy/kube-proxy.log 175 | scheduler/kube-scheduler.log 176 | ``` 177 | 178 | If you want to spawn an additional shell session, simply run `kubernix shell` in 179 | the same directory as where the initial bootstrap happened. 180 | 181 | ``` 182 | $ sudo kubernix shell 183 | [INFO kubernix] Spawning new kubernix shell in 'kubernix-run' 184 | > kubectl run --generator=run-pod/v1 --image=alpine -it alpine sh 185 | If you don't see a command prompt, try pressing enter. 186 | / # 187 | ``` 188 | 189 | This means that you can spawn as many shells as you want to. 190 | 191 | #### Cleanup 192 | 193 | The whole cluster gets automatically destroyed if you exit the shell session 194 | from the initial process: 195 | 196 | ``` 197 | > exit 198 | [INFO ] Cleaning up 199 | ``` 200 | 201 | Please note that the directory where all the data is stored is not being 202 | removed after the exit of KuberNix. This means that you’re still able to 203 | access the log and configuration files for further processing. If you start 204 | the cluster again, then the cluster files will be reused. This is especially 205 | handy if you want to test configuration changes. 206 | 207 | #### Restart 208 | 209 | If you start KuberNix again in the same run directory, then it will re-use the 210 | configuration during the cluster bootstrapping process. This means that you 211 | can modify all data inside the run root for testing and debugging purposes. The 212 | startup of the individual components will be initiated by YAML files called 213 | `run.yml`, which are available inside the directories of the corresponding 214 | components. For example, etc gets started via: 215 | 216 | ``` 217 | > cat kubernix-run/etcd/run.yml 218 | ``` 219 | 220 | ```yml 221 | --- 222 | command: /nix/store/qlbsv0hvi0j5qj3631dzl9srl75finlk-etcd-3.3.13-bin/bin/etcd 223 | args: 224 | - "--advertise-client-urls=https://127.0.0.1:2379" 225 | - "--client-cert-auth" 226 | - "--data-dir=/…/kubernix-run/etcd/run" 227 | - "--initial-advertise-peer-urls=https://127.0.0.1:2380" 228 | - "--initial-cluster-state=new" 229 | - "--initial-cluster-token=etcd-cluster" 230 | - "--initial-cluster=etcd=https://127.0.0.1:2380" 231 | - "--listen-client-urls=https://127.0.0.1:2379" 232 | - "--listen-peer-urls=https://127.0.0.1:2380" 233 | - "--name=etcd" 234 | - "--peer-client-cert-auth" 235 | - "--cert-file=/…/kubernix-run/pki/kubernetes.pem" 236 | - "--key-file=/…/kubernix-run/pki/kubernetes-key.pem" 237 | - "--peer-cert-file=/…/kubernix-run/pki/kubernetes.pem" 238 | - "--peer-key-file=/…/kubernix-run/pki/kubernetes-key.pem" 239 | - "--peer-trusted-ca-file=/…/kubernix-run/pki/ca.pem" 240 | - "--trusted-ca-file=/…/kubernix-run/pki/ca.pem" 241 | ``` 242 | 243 | ### Configuration 244 | 245 | KuberNix has some configuration possibilities, which are currently: 246 | 247 | | CLI argument | Description | Default | Environment Variable | 248 | | ------------------------- | ----------------------------------------------------------------------------------- | -------------- | ---------------------------- | 249 | | `-r, --root` | Path where all the runtime data is stored | `kubernix-run` | `KUBERNIX_ROOT` | 250 | | `-l, --log-level` | Logging verbosity | `info` | `KUBERNIX_LOG_LEVEL` | 251 | | `-c, --cidr` | CIDR used for the cluster network | `10.10.0.0/16` | `KUBERNIX_CIDR` | 252 | | `-s, --shell` | The shell executable to be used | `$SHELL`/`sh` | `KUBERNIX_SHELL` | 253 | | `-e, --no-shell` | Do not spawn an interactive shell after bootstrap | `false` | `KUBERNIX_NO_SHELL` | 254 | | `-n, --nodes` | The number of nodes to be registered | `1` | `KUBERNIX_NODES` | 255 | | `-u, --container-runtime` | The container runtime to be used for the nodes, irrelevant if `nodes` equals to `1` | `podman` | `KUBERNIX_CONTAINER_RUNTIME` | 256 | | `-o, --overlay` | Nix package overlay to be used | | `KUBERNIX_OVERLAY` | 257 | | `-p, --packages` | Additional Nix dependencies to be added to the environment | | `KUBERNIX_PACKAGES` | 258 | 259 | Please ensure that the CIDR is not overlapping with existing local networks and 260 | that your setup has access to the internet. The CIDR will be automatically split 261 | up over the necessary cluster components. 262 | 263 | #### Multinode Support 264 | 265 | It is possible to spawn multiple worker nodes, too. To do this, simply adjust 266 | the `-n, --nodes` command line argument as well as your preferred container 267 | runtime via `-u, --container-runtime`. The default runtime is [podman][41], 268 | but every other Docker drop-in replacement should work out of the box. 269 | 270 | #### Overlays 271 | 272 | Overlays provide a method to extend and change Nix derivations. This means, that 273 | we’re able to change dependencies during the cluster bootstrapping process. For 274 | example, we can exchange the used CRI-O version to use a local checkout by 275 | writing this simple `overlay.nix`: 276 | 277 | ```nix 278 | self: super: { 279 | cri-o = super.cri-o.overrideAttrs(old: { 280 | src = ../path/to/go/src/github.com/cri-o/cri-o; 281 | }); 282 | } 283 | ``` 284 | 285 | Now we can run KuberNix with the `--overlay, -o` command line argument: 286 | 287 | ``` 288 | $ sudo kubernix --overlay overlay.nix 289 | [INFO kubernix] Nix environment not found, bootstrapping one 290 | [INFO kubernix] Using custom overlay 'overlay.nix' 291 | these derivations will be built: 292 | /nix/store/9jb43i2mqjc94mbx30d9nrx529w6lngw-cri-o-1.15.2.drv 293 | building '/nix/store/9jb43i2mqjc94mbx30d9nrx529w6lngw-cri-o-1.15.2.drv'... 294 | ``` 295 | 296 | Using this technique makes it easy for daily development of Kubernetes 297 | components, by simply changing it to local paths or trying out new versions. 298 | 299 | #### Additional Packages 300 | 301 | It is also possible to add additional packages to the KuberNix environment by 302 | specifying them via the `--packages, -p` command line parameter. This way you 303 | can easily utilize additional tools in a reproducible way. For example, when to 304 | comes to using always the same [Helm][20] version, you could simply run: 305 | 306 | ``` 307 | $ sudo kubernix -p kubernetes-helm 308 | [INFO ] Nix environment not found, bootstrapping one 309 | [INFO ] Bootstrapping cluster inside nix environment 310 | … 311 | > helm init 312 | > helm version 313 | Client: &version.Version{SemVer:"v2.14.3", GitCommit:"", GitTreeState:"clean"} 314 | Server: &version.Version{SemVer:"v2.14.3", GitCommit:"0e7f3b6637f7af8fcfddb3d2941fcc7cbebb0085", GitTreeState:"clean"} 315 | ``` 316 | 317 | All available packages are listed on the [official Nix index][21]. 318 | 319 | [20]: https://helm.sh 320 | [21]: https://nixos.org/nixos/packages.html?channel=nixpkgs-unstable 321 | 322 | ## Contributing 323 | 324 | You want to contribute to this project? Wow, thanks! So please just fork it and 325 | send me a pull request. 326 | -------------------------------------------------------------------------------- /contrib/crio-master-overlay.nix: -------------------------------------------------------------------------------- 1 | self: super: { 2 | cri-o = super.cri-o.overrideAttrs(old: { 3 | version = "master"; 4 | src = super.fetchFromGitHub { 5 | owner = "cri-o"; 6 | repo = "cri-o"; 7 | rev = "9a322651bb25a5f15410d555a8d33bcb04d7cfcf"; # master: 11 Nov 2019 11:54:03 AM CET 8 | sha256 = "0q23l397fp0waj107jy8wf87fkq034lp8z736c5wmagnqlxca4iz"; 9 | }; 10 | }); 11 | } 12 | -------------------------------------------------------------------------------- /contrib/prepare-system: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # The system preparation step needs to be done explicitly if running inside a 3 | # container 4 | set -euo pipefail 5 | 6 | modprobe overlay 7 | modprobe ip_conntrack 8 | modprobe br_netfilter 9 | 10 | sysctl net.bridge.bridge-nf-call-ip6tables=1 11 | sysctl net.bridge.bridge-nf-call-iptables=1 12 | sysctl net.ipv4.conf.all.route_localnet=1 13 | sysctl net.ipv4.ip_forward=1 14 | -------------------------------------------------------------------------------- /nix/build.nix: -------------------------------------------------------------------------------- 1 | let 2 | rustCommit = "8c007b60731c07dd7a052cce508de3bb1ae849b4"; 3 | overlay = import ( 4 | builtins.fetchTarball "https://github.com/mozilla/nixpkgs-mozilla/archive/${rustCommit}.tar.gz" 5 | ); 6 | pkgs = import ./nixpkgs.nix { 7 | overlays = [ overlay ]; 8 | }; 9 | ruststable = (pkgs.latest.rustChannels.stable.rust.override { 10 | extensions = [ 11 | "clippy-preview" 12 | "rustfmt-preview" 13 | ]; 14 | }); 15 | deps = with pkgs; (import ./default.nix) ++ [ 16 | (pkgs.callPackage ./derivations/cargo-kcov.nix { }) 17 | binutils 18 | coreutils 19 | curl 20 | gcc 21 | git 22 | kcov 23 | nix-prefetch-git 24 | procps 25 | ruststable 26 | ]; 27 | in 28 | deps 29 | -------------------------------------------------------------------------------- /nix/default.nix: -------------------------------------------------------------------------------- 1 | let 2 | pkgs = import ./nixpkgs.nix { overlays = [ (import ./overlay.nix) ]; }; 3 | packages = with pkgs; [ 4 | cacert 5 | cfssl 6 | cni-plugins 7 | conmon 8 | conntrack-tools 9 | cri-o 10 | cri-tools 11 | etcd 12 | iproute 13 | iptables 14 | kmod 15 | kubernetes 16 | kubectl 17 | podman 18 | runc 19 | socat 20 | sysctl 21 | utillinux 22 | ] ++ [ /* PACKAGES */ ]; 23 | in 24 | packages 25 | -------------------------------------------------------------------------------- /nix/derivations/cargo-kcov.nix: -------------------------------------------------------------------------------- 1 | { lib, rustPlatform, fetchFromGitHub }: 2 | 3 | rustPlatform.buildRustPackage rec { 4 | pname = "cargo-kcov"; 5 | version = "0.5.2"; 6 | 7 | src = fetchFromGitHub { 8 | owner = "kennytm"; 9 | repo = "cargo-kcov"; 10 | rev = "v${version}"; 11 | sha256 = "0hqplgj3i8js42v2kj44khk543a93sk3n6wlfpv3c84pdqlm29br"; 12 | }; 13 | 14 | doCheck = false; 15 | cargoSha256 = "1dzm33cfriwgq4zvg6l6y76d5lp9hpcywdkwpl92qyjqg1hx8a1w"; 16 | } 17 | -------------------------------------------------------------------------------- /nix/nixpkgs.json: -------------------------------------------------------------------------------- 1 | { 2 | "url": "https://github.com/nixos/nixpkgs", 3 | "rev": "f0a821afc5c66c9878665073c2b247790f269088", 4 | "date": "2021-01-26T08:17:31+01:00", 5 | "path": "/nix/store/ppyvywk3fq8m2s6flqlx1zbn1926i0xl-nixpkgs", 6 | "sha256": "1gjfj8lzz3x2z7dxi4dk9bcdh4ix0q9053wj9m4vpphv4rwjs2gh", 7 | "fetchSubmodules": false, 8 | "deepClone": false, 9 | "leaveDotGit": false 10 | } 11 | -------------------------------------------------------------------------------- /nix/nixpkgs.nix: -------------------------------------------------------------------------------- 1 | let 2 | json = builtins.fromJSON (builtins.readFile ./nixpkgs.json); 3 | nixpkgs = import (builtins.fetchTarball { 4 | name = "nixos-unstable"; 5 | url = "${json.url}/archive/${json.rev}.tar.gz"; 6 | inherit (json) sha256; 7 | }); 8 | in 9 | nixpkgs 10 | -------------------------------------------------------------------------------- /nix/overlay.nix: -------------------------------------------------------------------------------- 1 | self: super: { } 2 | -------------------------------------------------------------------------------- /src/apiserver.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::Config, 3 | encryptionconfig::EncryptionConfig, 4 | kubectl::Kubectl, 5 | network::Network, 6 | pki::Pki, 7 | process::{Process, ProcessState, Stoppable}, 8 | }; 9 | use anyhow::{Context, Result}; 10 | use log::debug; 11 | use std::{ 12 | fs::{self, create_dir_all}, 13 | path::Path, 14 | }; 15 | 16 | pub struct ApiServer { 17 | process: Process, 18 | } 19 | 20 | impl ApiServer { 21 | pub fn start( 22 | config: &Config, 23 | network: &Network, 24 | pki: &Pki, 25 | encryptionconfig: &EncryptionConfig, 26 | kubectl: &Kubectl, 27 | ) -> ProcessState { 28 | let dir = config.root().join("apiserver"); 29 | create_dir_all(&dir)?; 30 | 31 | let mut process = Process::start( 32 | &dir, 33 | "API Server", 34 | "kube-apiserver", 35 | &[ 36 | "--allow-privileged=true", 37 | "--audit-log-maxage=30", 38 | "--audit-log-maxbackup=3", 39 | "--audit-log-maxsize=100", 40 | &format!("--audit-log-path={}", dir.join("audit.log").display()), 41 | "--authorization-mode=Node,RBAC", 42 | "--bind-address=0.0.0.0", 43 | &format!("--client-ca-file={}", pki.ca().cert().display()), 44 | &format!("--etcd-cafile={}", pki.ca().cert().display()), 45 | &format!("--etcd-certfile={}", pki.apiserver().cert().display()), 46 | &format!("--etcd-keyfile={}", pki.apiserver().key().display()), 47 | &format!("--etcd-servers=https://{}", network.etcd_client()), 48 | "--event-ttl=1h", 49 | &format!( 50 | "--encryption-provider-config={}", 51 | encryptionconfig.path().display() 52 | ), 53 | &format!( 54 | "--kubelet-certificate-authority={}", 55 | pki.ca().cert().display() 56 | ), 57 | &format!( 58 | "--kubelet-client-certificate={}", 59 | pki.apiserver().cert().display() 60 | ), 61 | &format!("--kubelet-client-key={}", pki.apiserver().key().display()), 62 | "--runtime-config=api/all=true", 63 | &format!( 64 | "--service-account-key-file={}", 65 | pki.service_account().cert().display() 66 | ), 67 | &format!("--service-cluster-ip-range={}", network.service_cidr()), 68 | &format!("--tls-cert-file={}", pki.apiserver().cert().display()), 69 | &format!("--tls-private-key-file={}", pki.apiserver().key().display()), 70 | "--v=2", 71 | ], 72 | )?; 73 | 74 | process.wait_ready("sending update to cc")?; 75 | Self::setup_rbac(&dir, kubectl)?; 76 | Ok(Box::new(Self { process })) 77 | } 78 | 79 | fn setup_rbac(dir: &Path, kubectl: &Kubectl) -> Result<()> { 80 | debug!("Creating API Server RBAC rule for kubelet"); 81 | let file = dir.join("rbac.yml"); 82 | 83 | if !file.exists() { 84 | fs::write(&file, include_str!("assets/apiserver.yml"))?; 85 | } 86 | 87 | kubectl 88 | .apply(&file) 89 | .context("Unable to deploy API server RBAC rules")?; 90 | 91 | debug!("API Server RBAC rule created"); 92 | Ok(()) 93 | } 94 | } 95 | 96 | impl Stoppable for ApiServer { 97 | fn stop(&mut self) -> Result<()> { 98 | self.process.stop() 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/assets/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/nixos/nix:latest 2 | COPY {nix} {root} 3 | RUN nix run -f {root} -c echo bootstrap done 4 | ENTRYPOINT [ "nix", "run", "-f", "{root}", "-c" ] 5 | -------------------------------------------------------------------------------- /src/assets/apiserver.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | annotations: 6 | rbac.authorization.kubernetes.io/autoupdate: "true" 7 | labels: 8 | kubernetes.io/bootstrapping: rbac-defaults 9 | name: system:kube-apiserver-to-kubelet 10 | rules: 11 | - apiGroups: 12 | - "" 13 | resources: 14 | - nodes/proxy 15 | - nodes/stats 16 | - nodes/log 17 | - nodes/spec 18 | - nodes/metrics 19 | verbs: 20 | - "*" 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: ClusterRoleBinding 24 | metadata: 25 | name: system:kube-apiserver 26 | namespace: "" 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: ClusterRole 30 | name: system:kube-apiserver-to-kubelet 31 | subjects: 32 | - apiGroup: rbac.authorization.k8s.io 33 | kind: User 34 | name: kubernetes 35 | -------------------------------------------------------------------------------- /src/assets/coredns.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | labels: 11 | kubernetes.io/bootstrapping: rbac-defaults 12 | name: system:coredns 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - endpoints 18 | - services 19 | - pods 20 | - namespaces 21 | verbs: 22 | - list 23 | - watch 24 | - apiGroups: 25 | - "" 26 | resources: 27 | - nodes 28 | verbs: 29 | - get 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: ClusterRoleBinding 33 | metadata: 34 | annotations: 35 | rbac.authorization.kubernetes.io/autoupdate: "true" 36 | labels: 37 | kubernetes.io/bootstrapping: rbac-defaults 38 | name: system:coredns 39 | roleRef: 40 | apiGroup: rbac.authorization.k8s.io 41 | kind: ClusterRole 42 | name: system:coredns 43 | subjects: 44 | - kind: ServiceAccount 45 | name: coredns 46 | namespace: kube-system 47 | --- 48 | apiVersion: v1 49 | kind: ConfigMap 50 | metadata: 51 | name: coredns 52 | namespace: kube-system 53 | data: 54 | Corefile: | 55 | .:53 {{ 56 | errors 57 | health {{ 58 | lameduck 5s 59 | }} 60 | ready 61 | kubernetes cluster.local in-addr.arpa ip6.arpa {{ 62 | pods insecure 63 | fallthrough in-addr.arpa ip6.arpa 64 | ttl 30 65 | }} 66 | forward . /etc/resolv.conf {{ 67 | max_concurrent 1000 68 | }} 69 | prometheus :9153 70 | cache 30 71 | loop 72 | reload 73 | loadbalance 74 | }} 75 | --- 76 | apiVersion: apps/v1 77 | kind: Deployment 78 | metadata: 79 | name: coredns 80 | namespace: kube-system 81 | labels: 82 | k8s-app: coredns 83 | kubernetes.io/name: "CoreDNS" 84 | spec: 85 | strategy: 86 | type: RollingUpdate 87 | rollingUpdate: 88 | maxUnavailable: 1 89 | selector: 90 | matchLabels: 91 | k8s-app: coredns 92 | template: 93 | metadata: 94 | labels: 95 | k8s-app: coredns 96 | spec: 97 | securityContext: 98 | seccompProfile: 99 | type: RuntimeDefault 100 | priorityClassName: system-cluster-critical 101 | serviceAccountName: coredns 102 | affinity: 103 | podAntiAffinity: 104 | preferredDuringSchedulingIgnoredDuringExecution: 105 | - weight: 100 106 | podAffinityTerm: 107 | labelSelector: 108 | matchExpressions: 109 | - key: k8s-app 110 | operator: In 111 | values: ["coredns"] 112 | topologyKey: kubernetes.io/hostname 113 | tolerations: 114 | - key: "CriticalAddonsOnly" 115 | operator: "Exists" 116 | nodeSelector: 117 | kubernetes.io/os: linux 118 | containers: 119 | - name: coredns 120 | image: k8s.gcr.io/coredns:1.7.0 121 | imagePullPolicy: IfNotPresent 122 | resources: 123 | limits: 124 | memory: 170Mi 125 | requests: 126 | cpu: 100m 127 | memory: 70Mi 128 | args: [ "-conf", "/etc/coredns/Corefile" ] 129 | volumeMounts: 130 | - name: config-volume 131 | mountPath: /etc/coredns 132 | readOnly: true 133 | ports: 134 | - containerPort: 53 135 | name: dns 136 | protocol: UDP 137 | - containerPort: 53 138 | name: dns-tcp 139 | protocol: TCP 140 | - containerPort: 9153 141 | name: metrics 142 | protocol: TCP 143 | livenessProbe: 144 | httpGet: 145 | path: /health 146 | port: 8080 147 | scheme: HTTP 148 | initialDelaySeconds: 60 149 | timeoutSeconds: 5 150 | successThreshold: 1 151 | failureThreshold: 5 152 | readinessProbe: 153 | httpGet: 154 | path: /ready 155 | port: 8181 156 | scheme: HTTP 157 | securityContext: 158 | allowPrivilegeEscalation: false 159 | capabilities: 160 | add: 161 | - NET_BIND_SERVICE 162 | drop: 163 | - all 164 | readOnlyRootFilesystem: true 165 | dnsPolicy: Default 166 | volumes: 167 | - name: config-volume 168 | configMap: 169 | name: coredns 170 | items: 171 | - key: Corefile 172 | path: Corefile 173 | --- 174 | apiVersion: v1 175 | kind: Service 176 | metadata: 177 | name: coredns 178 | namespace: kube-system 179 | annotations: 180 | prometheus.io/port: "9153" 181 | prometheus.io/scrape: "true" 182 | labels: 183 | k8s-app: coredns 184 | kubernetes.io/cluster-service: "true" 185 | kubernetes.io/name: "CoreDNS" 186 | spec: 187 | selector: 188 | k8s-app: coredns 189 | clusterIP: {} 190 | ports: 191 | - name: dns 192 | port: 53 193 | protocol: UDP 194 | - name: dns-tcp 195 | port: 53 196 | protocol: TCP 197 | - name: metrics 198 | port: 9153 199 | protocol: TCP 200 | -------------------------------------------------------------------------------- /src/assets/crio.conf: -------------------------------------------------------------------------------- 1 | # The CRI-O configuration file specifies all of the available configuration 2 | # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime 3 | # daemon, but in a TOML format that can be more easily modified and versioned. 4 | # 5 | # Please refer to crio.conf(5) for details of all configuration options. 6 | 7 | # CRI-O supports partial configuration reload during runtime, which can be 8 | # done by sending SIGHUP to the running process. Currently supported options 9 | # are explicitly mentioned with: 'This option supports live configuration 10 | # reload'. 11 | 12 | # CRI-O reads its storage defaults from the containers-storage.conf(5) file 13 | # located at /etc/containers/storage.conf. Modify this storage configuration if 14 | # you want to change the system's defaults. If you want to modify storage just 15 | # for CRI-O, you can change the storage configuration options here. 16 | [crio] 17 | 18 | # Path to the "root directory". CRI-O stores all of its data, including 19 | # containers images, in this directory. 20 | root = "{containers_root}" 21 | 22 | # Path to the "run directory". CRI-O stores all of its state in this directory. 23 | runroot = "{containers_runroot}" 24 | 25 | # Storage driver used to manage the storage of images and containers. Please 26 | # refer to containers-storage.conf(5) to see all available storage drivers. 27 | storage_driver = "{storage_driver}" 28 | 29 | # List to pass options to the storage driver. Please refer to 30 | # containers-storage.conf(5) to see all available storage options. 31 | #storage_option = [ 32 | #] 33 | 34 | # The default log directory where all logs will go unless directly specified by 35 | # the kubelet. The log directory specified must be an absolute directory. 36 | log_dir = "{log_dir}" 37 | 38 | # Location for CRI-O to lay down the version file 39 | version_file = "{version_file}" 40 | 41 | # The crio.api table contains settings for the kubelet/gRPC interface. 42 | [crio.api] 43 | 44 | # Path to AF_LOCAL socket on which CRI-O will listen. 45 | listen = "{listen}" 46 | 47 | # IP address on which the stream server will listen. 48 | stream_address = "127.0.0.1" 49 | 50 | # The port on which the stream server will listen. If the port is set to "0", then 51 | # CRI-O will allocate a random free port number. 52 | stream_port = "0" 53 | 54 | # Enable encrypted TLS transport of the stream server. 55 | stream_enable_tls = false 56 | 57 | # Path to the x509 certificate file used to serve the encrypted stream. This 58 | # file can change, and CRI-O will automatically pick up the changes within 5 59 | # minutes. 60 | stream_tls_cert = "" 61 | 62 | # Path to the key file used to serve the encrypted stream. This file can 63 | # change and CRI-O will automatically pick up the changes within 5 minutes. 64 | stream_tls_key = "" 65 | 66 | # Path to the x509 CA(s) file used to verify and authenticate client 67 | # communication with the encrypted stream. This file can change and CRI-O will 68 | # automatically pick up the changes within 5 minutes. 69 | stream_tls_ca = "" 70 | 71 | # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 16 * 1024 * 1024. 72 | grpc_max_send_msg_size = 16777216 73 | 74 | # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 16 * 1024 * 1024. 75 | grpc_max_recv_msg_size = 16777216 76 | 77 | # The crio.runtime table contains settings pertaining to the OCI runtime used 78 | # and options for how to set up and manage the OCI runtime. 79 | [crio.runtime] 80 | 81 | # A list of ulimits to be set in containers by default, specified as 82 | # "=:", for example: 83 | # "nofile=1024:2048" 84 | # If nothing is set here, settings will be inherited from the CRI-O daemon 85 | #default_ulimits = [ 86 | #] 87 | 88 | # default_runtime is the _name_ of the OCI runtime to be used as the default. 89 | # The name is matched against the runtimes map below. 90 | default_runtime = "local-runc" 91 | 92 | # If true, the runtime will not use pivot_root, but instead use MS_MOVE. 93 | no_pivot = false 94 | 95 | # decryption_keys_path is the path where the keys required for 96 | # image decryption are stored. This option supports live configuration reload. 97 | decryption_keys_path = "/etc/crio/keys/" 98 | 99 | # Path to the conmon binary, used for monitoring the OCI runtime. 100 | # Will be searched for using $PATH if empty. 101 | conmon = "{conmon}" 102 | 103 | # Cgroup setting for conmon 104 | conmon_cgroup = "pod" 105 | 106 | # Environment variable list for the conmon process, used for passing necessary 107 | # environment variables to conmon or the runtime. 108 | conmon_env = [ 109 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", 110 | ] 111 | 112 | # If true, SELinux will be used for pod separation on the host. 113 | selinux = false 114 | 115 | # Path to the seccomp.json profile which is used as the default seccomp profile 116 | # for the runtime. If not specified, then the internal default seccomp profile 117 | # will be used. This option supports live configuration reload. 118 | seccomp_profile = "" 119 | 120 | # Used to change the name of the default AppArmor profile of CRI-O. The default 121 | # profile name is "crio-default". This profile only takes effect if the user 122 | # does not specify a profile via the Kubernetes Pod's metadata annotation. If 123 | # the profile is set to "unconfined", then this equals to disabling AppArmor. 124 | # This option supports live configuration reload. 125 | apparmor_profile = "crio-default" 126 | 127 | # Cgroup management implementation used for the runtime. 128 | cgroup_manager = "cgroupfs" 129 | 130 | # List of default capabilities for containers. If it is empty or commented out, 131 | # only the capabilities defined in the containers json file by the user/kube 132 | # will be added. 133 | default_capabilities = [ 134 | "CHOWN", 135 | "DAC_OVERRIDE", 136 | "FSETID", 137 | "FOWNER", 138 | "SETGID", 139 | "SETUID", 140 | "SETPCAP", 141 | "NET_BIND_SERVICE", 142 | "KILL", 143 | ] 144 | 145 | # List of default sysctls. If it is empty or commented out, only the sysctls 146 | # defined in the container json file by the user/kube will be added. 147 | default_sysctls = [ 148 | ] 149 | 150 | # List of additional devices. specified as 151 | # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". 152 | #If it is empty or commented out, only the devices 153 | # defined in the container json file by the user/kube will be added. 154 | additional_devices = [ 155 | ] 156 | 157 | # Path to OCI hooks directories for automatically executed hooks. If one of the 158 | # directories does not exist, then CRI-O will automatically skip them. 159 | hooks_dir = [ 160 | "/usr/share/containers/oci/hooks.d", 161 | ] 162 | 163 | # List of default mounts for each container. **Deprecated:** this option will 164 | # be removed in future versions in favor of default_mounts_file. 165 | default_mounts = [ 166 | ] 167 | 168 | # Path to the file specifying the defaults mounts for each container. The 169 | # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads 170 | # its default mounts from the following two files: 171 | # 172 | # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the 173 | # override file, where users can either add in their own default mounts, or 174 | # override the default mounts shipped with the package. 175 | # 176 | # 2) /usr/share/containers/mounts.conf: This is the default file read for 177 | # mounts. If you want CRI-O to read from a different, specific mounts file, 178 | # you can change the default_mounts_file. Note, if this is done, CRI-O will 179 | # only add mounts it finds in this file. 180 | # 181 | #default_mounts_file = "" 182 | 183 | # Maximum number of processes allowed in a container. 184 | pids_limit = 1024 185 | 186 | # Maximum sized allowed for the container log file. Negative numbers indicate 187 | # that no size limit is imposed. If it is positive, it must be >= 8192 to 188 | # match/exceed conmon's read buffer. The file is truncated and re-opened so the 189 | # limit is never exceeded. 190 | log_size_max = -1 191 | 192 | # Whether container output should be logged to journald in addition to the kuberentes log file 193 | log_to_journald = false 194 | 195 | # Path to directory in which container exit files are written to by conmon. 196 | container_exits_dir = "{exits_dir}" 197 | 198 | # Path to directory for container attach sockets. 199 | container_attach_socket_dir = "/var/run/crio" 200 | 201 | # The prefix to use for the source of the bind mounts. 202 | bind_mount_prefix = "" 203 | 204 | # If set to true, all containers will run in read-only mode. 205 | read_only = false 206 | 207 | # Changes the verbosity of the logs based on the level it is set to. Options 208 | # are fatal, panic, error, warn, info, and debug. This option supports live 209 | # configuration reload. 210 | log_level = "debug" 211 | 212 | # Filter the log messages by the provided regular expression. 213 | # This option supports live configuration reload. 214 | log_filter = "" 215 | 216 | # The UID mappings for the user namespace of each container. A range is 217 | # specified in the form containerUID:HostUID:Size. Multiple ranges must be 218 | # separated by comma. 219 | uid_mappings = "" 220 | 221 | # The GID mappings for the user namespace of each container. A range is 222 | # specified in the form containerGID:HostGID:Size. Multiple ranges must be 223 | # separated by comma. 224 | gid_mappings = "" 225 | 226 | # The minimal amount of time in seconds to wait before issuing a timeout 227 | # regarding the proper termination of the container. The lowest possible 228 | # value is 30s, whereas lower values are not considered by CRI-O. 229 | ctr_stop_timeout = 30 230 | 231 | # **DEPRECATED** this option is being replaced by manage_ns_lifecycle, which is described below. 232 | # manage_network_ns_lifecycle = false 233 | 234 | # manage_ns_lifecycle determines whether we pin and remove namespaces 235 | # and manage their lifecycle 236 | manage_ns_lifecycle = false 237 | 238 | # The directory where the state of the managed namespaces gets tracked. 239 | # Only used when manage_ns_lifecycle is true. 240 | namespaces_dir = "/var/run" 241 | 242 | # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle 243 | pinns_path = "" 244 | 245 | # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. 246 | # The runtime to use is picked based on the runtime_handler provided by the CRI. 247 | # If no runtime_handler is provided, the runtime will be picked based on the level 248 | # of trust of the workload. Each entry in the table should follow the format: 249 | # 250 | #[crio.runtime.runtimes.runtime-handler] 251 | # runtime_path = "/path/to/the/executable" 252 | # runtime_type = "oci" 253 | # runtime_root = "/path/to/the/root" 254 | # 255 | # Where: 256 | # - runtime-handler: name used to identify the runtime 257 | # - runtime_path (optional, string): absolute path to the runtime executable in 258 | # the host filesystem. If omitted, the runtime-handler identifier should match 259 | # the runtime executable name, and the runtime executable should be placed 260 | # in $PATH. 261 | # - runtime_type (optional, string): type of runtime, one of: "oci", "vm". If 262 | # omitted, an "oci" runtime is assumed. 263 | # - runtime_root (optional, string): root directory for storage of containers 264 | # state. 265 | 266 | [crio.runtime.runtimes.local-runc] 267 | runtime_path = "{runtime_path}" 268 | runtime_root = "{runtime_root}" 269 | runtime_type = "oci" 270 | 271 | # Kata Containers is an OCI runtime, where containers are run inside lightweight 272 | # VMs. Kata provides additional isolation towards the host, minimizing the host attack 273 | # surface and mitigating the consequences of containers breakout. 274 | 275 | # Kata Containers with the default configured VMM 276 | #[crio.runtime.runtimes.kata-runtime] 277 | 278 | # Kata Containers with the QEMU VMM 279 | #[crio.runtime.runtimes.kata-qemu] 280 | 281 | # Kata Containers with the Firecracker VMM 282 | #[crio.runtime.runtimes.kata-fc] 283 | 284 | # The crio.image table contains settings pertaining to the management of OCI images. 285 | # 286 | # CRI-O reads its configured registries defaults from the system wide 287 | # containers-registries.conf(5) located in /etc/containers/registries.conf. If 288 | # you want to modify just CRI-O, you can change the registries configuration in 289 | # this file. Otherwise, leave insecure_registries and registries commented out to 290 | # use the system's defaults from /etc/containers/registries.conf. 291 | [crio.image] 292 | 293 | # Default transport for pulling images from a remote container storage. 294 | default_transport = "docker://" 295 | 296 | # The path to a file containing credentials necessary for pulling images from 297 | # secure registries. The file is similar to that of /var/lib/kubelet/config.json 298 | global_auth_file = "" 299 | 300 | # The image used to instantiate infra containers. 301 | # This option supports live configuration reload. 302 | pause_image = "k8s.gcr.io/pause:3.2" 303 | 304 | # The path to a file containing credentials specific for pulling the pause_image from 305 | # above. The file is similar to that of /var/lib/kubelet/config.json 306 | # This option supports live configuration reload. 307 | pause_image_auth_file = "" 308 | 309 | # The command to run to have a container stay in the paused state. 310 | # When explicitly set to "", it will fallback to the entrypoint and command 311 | # specified in the pause image. When commented out, it will fallback to the 312 | # default: "/pause". This option supports live configuration reload. 313 | pause_command = "/pause" 314 | 315 | # Path to the file which decides what sort of policy we use when deciding 316 | # whether or not to trust an image that we've pulled. It is not recommended that 317 | # this option be used, as the default behavior of using the system-wide default 318 | # policy (i.e., /etc/containers/policy.json) is most often preferred. Please 319 | # refer to containers-policy.json(5) for more details. 320 | signature_policy = "{signature_policy}" 321 | 322 | # List of registries to skip TLS verification for pulling images. Please 323 | # consider configuring the registries via /etc/containers/registries.conf before 324 | # changing them here. 325 | #insecure_registries = "[]" 326 | 327 | # Controls how image volumes are handled. The valid values are mkdir, bind and 328 | # ignore; the latter will ignore volumes entirely. 329 | image_volumes = "mkdir" 330 | 331 | # List of registries to be used when pulling an unqualified image (e.g., 332 | # "alpine:latest"). By default, registries is set to "docker.io" for 333 | # compatibility reasons. Depending on your workload and usecase you may add more 334 | # registries (e.g., "quay.io", "registry.fedoraproject.org", 335 | # "registry.opensuse.org", etc.). 336 | registries = [ 337 | "docker.io", 338 | "quay.io", 339 | ] 340 | 341 | # The crio.network table containers settings pertaining to the management of 342 | # CNI plugins. 343 | [crio.network] 344 | 345 | # The default CNI network name to be selected. If not set or "", then 346 | # CRI-O will pick-up the first one found in network_dir. 347 | # cni_default_network = "" 348 | 349 | # Path to the directory where CNI configuration files are located. 350 | network_dir = "{network_dir}" 351 | 352 | # Paths to directories where CNI plugin binaries are located. 353 | plugin_dirs = [ "{plugin_dir}" ] 354 | 355 | # A necessary configuration for Prometheus based metrics retrieval 356 | [crio.metrics] 357 | 358 | # Globally enable or disable metrics support. 359 | enable_metrics = false 360 | 361 | # The port on which the metrics server will listen. 362 | metrics_port = 9090 363 | -------------------------------------------------------------------------------- /src/assets/encryptionconfig.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: EncryptionConfig 3 | apiVersion: v1 4 | resources: 5 | - resources: 6 | - secrets 7 | providers: 8 | - aescbc: 9 | keys: 10 | - name: key1 11 | secret: {} 12 | - identity: {{}} 13 | -------------------------------------------------------------------------------- /src/assets/kubelet.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: KubeletConfiguration 3 | apiVersion: kubelet.config.k8s.io/v1beta1 4 | authentication: 5 | anonymous: 6 | enabled: false 7 | webhook: 8 | enabled: true 9 | x509: 10 | clientCAFile: "{ca}" 11 | authorization: 12 | mode: Webhook 13 | clusterDomain: "cluster.local" 14 | clusterDNS: 15 | - "{dns}" 16 | podCIDR: "{cidr}" 17 | runtimeRequestTimeout: "15m" 18 | tlsCertFile: "{cert}" 19 | tlsPrivateKeyFile: "{key}" 20 | failSwapOn: false 21 | featureGates: 22 | DevicePlugins: false 23 | LocalStorageCapacityIsolation: false 24 | port: {port} 25 | healthzPort: {healthzPort} 26 | -------------------------------------------------------------------------------- /src/assets/podman-bridge.json: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.4.0", 3 | "name": "podman", 4 | "plugins": [ 5 | { 6 | "type": "bridge", 7 | "bridge": "cni-podman0", 8 | "isGateway": true, 9 | "ipMasq": true, 10 | "ipam": { 11 | "type": "host-local", 12 | "routes": [{ "dst": "0.0.0.0/0" }], 13 | "ranges": [ 14 | [ 15 | { 16 | "subnet": "172.88.0.0/16", 17 | "gateway": "172.88.0.1" 18 | } 19 | ] 20 | ] 21 | } 22 | }, 23 | { "type": "portmap", "capabilities": { "portMappings": true } }, 24 | { "type": "firewall", "backend": "iptables" } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /src/assets/policy.json: -------------------------------------------------------------------------------- 1 | { "default": [{ "type": "insecureAcceptAnything" }] } 2 | -------------------------------------------------------------------------------- /src/assets/proxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: KubeProxyConfiguration 3 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 4 | clientConnection: 5 | kubeconfig: "{}" 6 | mode: "iptables" 7 | clusterCIDR: "{}" 8 | -------------------------------------------------------------------------------- /src/assets/scheduler.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kubescheduler.config.k8s.io/v1beta1 3 | kind: KubeSchedulerConfiguration 4 | clientConnection: 5 | kubeconfig: "{}" 6 | leaderElection: 7 | leaderElect: false 8 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | //! Configuration related structures 2 | use crate::{podman::Podman, system::System}; 3 | use anyhow::{Context, Result}; 4 | use clap::{AppSettings, Clap}; 5 | use getset::{CopyGetters, Getters}; 6 | use ipnetwork::Ipv4Network; 7 | use log::LevelFilter; 8 | use serde::{Deserialize, Serialize}; 9 | use std::{ 10 | fs::{self, canonicalize, create_dir_all, read_to_string}, 11 | path::PathBuf, 12 | }; 13 | 14 | #[derive(Clap, CopyGetters, Getters, Deserialize, Serialize)] 15 | #[serde(rename_all = "kebab-case")] 16 | #[clap( 17 | after_help("More info at: https://github.com/saschagrunert/kubernix"), 18 | author("Sascha Grunert "), 19 | global_setting(AppSettings::ColoredHelp) 20 | )] 21 | /// The global configuration 22 | pub struct Config { 23 | #[get = "pub"] 24 | #[clap(subcommand)] 25 | /// All available subcommands 26 | subcommand: Option, 27 | 28 | #[get = "pub"] 29 | #[clap( 30 | default_value("kubernix-run"), 31 | env("KUBERNIX_RUN"), 32 | global(true), 33 | long("root"), 34 | short('r'), 35 | value_name("PATH") 36 | )] 37 | /// Path where all the runtime data is stored 38 | root: PathBuf, 39 | 40 | #[get_copy = "pub"] 41 | #[clap( 42 | default_value("info"), 43 | env("KUBERNIX_LOG_LEVEL"), 44 | long("log-level"), 45 | possible_values(&["trace", "debug", "info", "warn", "error", "off"]), 46 | short('l'), 47 | value_name("LEVEL") 48 | )] 49 | /// The logging level of the application 50 | log_level: LevelFilter, 51 | 52 | #[get_copy = "pub"] 53 | #[clap( 54 | default_value("10.10.0.0/16"), 55 | env("KUBERNIX_CIDR"), 56 | long("cidr"), 57 | short('c'), 58 | value_name("CIDR") 59 | )] 60 | /// The CIDR used for the cluster 61 | cidr: Ipv4Network, 62 | 63 | #[get = "pub"] 64 | #[clap( 65 | env("KUBERNIX_OVERLAY"), 66 | long("overlay"), 67 | short('o'), 68 | value_name("PATH") 69 | )] 70 | /// The Nix package overlay to be used 71 | overlay: Option, 72 | 73 | #[get = "pub"] 74 | #[clap( 75 | env("KUBERNIX_PACKAGES"), 76 | long("packages"), 77 | multiple(true), 78 | short('p'), 79 | value_name("PACKAGE") 80 | )] 81 | /// Additional dependencies to be added to the environment 82 | packages: Vec, 83 | 84 | #[get = "pub"] 85 | #[clap(env("KUBERNIX_SHELL"), long("shell"), short('s'), value_name("SHELL"))] 86 | /// The shell executable to be used, defaults to $SHELL, fallback is `sh` 87 | shell: Option, 88 | 89 | #[get_copy = "pub"] 90 | #[clap( 91 | default_value("1"), 92 | env("KUBERNIX_NODES"), 93 | long("nodes"), 94 | short('n'), 95 | value_name("NODES") 96 | )] 97 | /// The number of nodes to be registered 98 | nodes: u8, 99 | 100 | #[get = "pub"] 101 | #[clap( 102 | env("KUBERNIX_CONTAINER_RUNTIME"), 103 | long("container-runtime"), 104 | default_value(Podman::EXECUTABLE), 105 | requires("nodes"), 106 | short('u'), 107 | value_name("RUNTIME") 108 | )] 109 | /// The container runtime to be used for the nodes, irrelevant if `nodes` equals to `1` 110 | container_runtime: String, 111 | 112 | #[get = "pub"] 113 | #[clap( 114 | conflicts_with("shell"), 115 | env("KUBERNIX_NO_SHELL"), 116 | long("no-shell"), 117 | short('e'), 118 | takes_value(false) 119 | )] 120 | /// Do not spawn an interactive shell after bootstrap 121 | no_shell: bool, 122 | } 123 | 124 | /// Possible subcommands 125 | #[derive(Clap, Deserialize, Serialize)] 126 | pub enum SubCommand { 127 | /// Spawn an additional shell session 128 | #[clap(name("shell"))] 129 | Shell, 130 | } 131 | 132 | impl Default for Config { 133 | fn default() -> Self { 134 | let mut config = Self::parse(); 135 | if config.shell.is_none() { 136 | config.shell = System::shell().ok(); 137 | } 138 | config 139 | } 140 | } 141 | 142 | impl Config { 143 | const FILENAME: &'static str = "kubernix.toml"; 144 | 145 | /// Make the configs root path absolute 146 | pub fn canonicalize_root(&mut self) -> Result<()> { 147 | self.create_root_dir()?; 148 | self.root = 149 | canonicalize(self.root()).context("Unable to canonicalize config root directory")?; 150 | Ok(()) 151 | } 152 | 153 | /// Write the current configuration to the internal set root path 154 | pub fn to_file(&self) -> Result<()> { 155 | self.create_root_dir()?; 156 | fs::write(self.root().join(Self::FILENAME), toml::to_string(&self)?) 157 | .context("Unable to write configuration to file")?; 158 | Ok(()) 159 | } 160 | 161 | /// Read the configuration from the internal set root path 162 | /// If not existing, write the current configuration to the path. 163 | pub fn try_load_file(&mut self) -> Result<()> { 164 | let file = self.root().join(Self::FILENAME); 165 | if file.exists() { 166 | *self = toml::from_str(&read_to_string(&file).with_context(|| { 167 | format!( 168 | "Unable to read expected configuration file '{}'", 169 | file.display(), 170 | ) 171 | })?) 172 | .with_context(|| format!("Unable to load config file '{}'", file.display()))?; 173 | } else { 174 | self.to_file()?; 175 | } 176 | Ok(()) 177 | } 178 | 179 | /// Return the set shell as result type 180 | pub fn shell_ok(&self) -> Result { 181 | let shell = self.shell.as_ref().context("No shell set")?; 182 | Ok(shell.into()) 183 | } 184 | 185 | /// Returns true if multi node support is enabled 186 | pub fn multi_node(&self) -> bool { 187 | self.nodes() > 1 188 | } 189 | 190 | fn create_root_dir(&self) -> Result<()> { 191 | create_dir_all(self.root()).context("Unable to create root directory") 192 | } 193 | } 194 | 195 | #[cfg(test)] 196 | pub mod tests { 197 | use super::*; 198 | use std::path::Path; 199 | use tempfile::tempdir; 200 | 201 | pub fn test_config() -> Result { 202 | let mut c = Config::default(); 203 | c.root = tempdir()?.into_path(); 204 | c.canonicalize_root()?; 205 | Ok(c) 206 | } 207 | 208 | pub fn test_config_wrong_root() -> Result { 209 | let mut c = test_config()?; 210 | c.root = Path::new("/").join("proc"); 211 | Ok(c) 212 | } 213 | 214 | pub fn test_config_wrong_cidr() -> Result { 215 | let mut c = test_config()?; 216 | c.cidr = "10.0.0.1/25".parse()?; 217 | Ok(c) 218 | } 219 | 220 | #[test] 221 | fn canonicalize_root_success() -> Result<()> { 222 | let mut c = Config::default(); 223 | c.root = tempdir()?.into_path(); 224 | c.canonicalize_root() 225 | } 226 | 227 | #[test] 228 | fn canonicalize_root_failure() { 229 | let mut c = Config::default(); 230 | c.root = Path::new("/").join("proc").join("invalid"); 231 | assert!(c.canonicalize_root().is_err()) 232 | } 233 | 234 | #[test] 235 | fn to_file_success() -> Result<()> { 236 | let mut c = Config::default(); 237 | c.root = tempdir()?.into_path(); 238 | c.to_file() 239 | } 240 | 241 | #[test] 242 | fn to_file_failure() { 243 | let mut c = Config::default(); 244 | c.root = Path::new("/").join("proc").join("invalid"); 245 | assert!(c.to_file().is_err()) 246 | } 247 | 248 | #[test] 249 | fn try_load_file_success() -> Result<()> { 250 | let mut c = Config::default(); 251 | c.root = tempdir()?.into_path(); 252 | fs::write( 253 | c.root.join(Config::FILENAME), 254 | r#" 255 | cidr = "1.1.1.1/16" 256 | container-runtime = "podman" 257 | log-level = "DEBUG" 258 | no-shell = false 259 | nodes = 1 260 | packages = [] 261 | root = "root" 262 | "#, 263 | )?; 264 | c.try_load_file()?; 265 | assert_eq!(c.root(), Path::new("root")); 266 | assert_eq!(c.log_level(), LevelFilter::Debug); 267 | assert_eq!(&c.cidr().to_string(), "1.1.1.1/16"); 268 | Ok(()) 269 | } 270 | 271 | #[test] 272 | fn try_load_file_failure() -> Result<()> { 273 | let mut c = Config::default(); 274 | c.root = tempdir()?.into_path(); 275 | fs::write(c.root.join(Config::FILENAME), "invalid")?; 276 | assert!(c.try_load_file().is_err()); 277 | Ok(()) 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /src/container.rs: -------------------------------------------------------------------------------- 1 | use crate::{nix::Nix, podman::Podman, process::Process, system::System, Config}; 2 | use anyhow::{bail, Result}; 3 | use log::{debug, info, trace, LevelFilter}; 4 | use std::{ 5 | fmt::Display, 6 | fs, 7 | path::{Path, PathBuf}, 8 | process::{Command, Stdio}, 9 | }; 10 | 11 | const DEFAULT_IMAGE: &str = "kubernix:base"; 12 | const DEFAULT_ROOT: &str = "kubernix"; 13 | 14 | pub struct Container; 15 | 16 | impl Container { 17 | /// Build the base image used for the nodes 18 | pub fn build(config: &Config) -> Result<()> { 19 | // Verify that the provided runtime exists 20 | System::find_executable(config.container_runtime())?; 21 | 22 | // Write the policy file 23 | let policy_json = Self::policy_json(config); 24 | fs::write(&policy_json, include_str!("assets/policy.json"))?; 25 | 26 | // Nothing needs to be done on single node runs or root users 27 | if !config.multi_node() { 28 | return Ok(()); 29 | } 30 | 31 | // Build the base container image 32 | info!("Building base container image '{}'", DEFAULT_IMAGE); 33 | 34 | // Prepare the Dockerfile 35 | let file = config.root().join("Dockerfile"); 36 | if !file.exists() { 37 | fs::write( 38 | &file, 39 | format!( 40 | include_str!("assets/Dockerfile"), 41 | nix = Nix::DIR, 42 | root = DEFAULT_ROOT 43 | ), 44 | )?; 45 | } 46 | 47 | // Prepare the arguments 48 | let mut args = if Podman::is_configured(config) { 49 | Podman::build_args(config, &policy_json)? 50 | } else { 51 | vec!["build".into()] 52 | }; 53 | args.extend(vec![format!("-t={}", DEFAULT_IMAGE), ".".into()]); 54 | trace!("Container runtime build args: {:?}", args); 55 | 56 | // Run the build 57 | debug!("Running container runtime with args: {}", args.join(" ")); 58 | let status = Command::new(config.container_runtime()) 59 | .current_dir(config.root()) 60 | .args(args) 61 | .stderr(Self::stdio(config)) 62 | .stdout(Self::stdio(config)) 63 | .status()?; 64 | if !status.success() { 65 | bail!("Unable to build container base image"); 66 | } 67 | 68 | info!("Container base image built"); 69 | Ok(()) 70 | } 71 | 72 | /// Retrieve the default signature policy file location 73 | pub fn policy_json(config: &Config) -> PathBuf { 74 | config.root().join("policy.json") 75 | } 76 | 77 | /// Start a new container based process 78 | pub fn start( 79 | config: &Config, 80 | dir: &Path, 81 | identifier: &str, 82 | process_name: &str, 83 | container_name: &str, 84 | args: &[&str], 85 | ) -> Result { 86 | // Cleanup possible containers 87 | Self::remove(config, container_name)?; 88 | 89 | // Prepare the arguments 90 | let arg_hostname = &format!("--hostname={}", container_name); 91 | let arg_name = &format!("--name={}", Self::prefixed_container_name(container_name)); 92 | let arg_volume_root = &Self::volume_arg(config.root().display()); 93 | let mut args_vec = vec![ 94 | "run", 95 | "--net=host", 96 | "--privileged", 97 | "--rm", 98 | arg_hostname, 99 | arg_name, 100 | arg_volume_root, 101 | ]; 102 | 103 | // Podman specific arguments 104 | let podman_args = Podman::default_args(config)?; 105 | if Podman::is_configured(config) { 106 | args_vec.extend(podman_args.iter().map(|x| x.as_str()).collect::>()) 107 | } 108 | 109 | // Mount /dev/mapper if available 110 | let dev_mapper = PathBuf::from("/").join("dev").join("mapper"); 111 | let arg_volume_dev_mapper = &Self::volume_arg(dev_mapper.display()); 112 | if dev_mapper.exists() { 113 | args_vec.push(arg_volume_dev_mapper); 114 | } 115 | 116 | // Add the process and the user provided args 117 | args_vec.extend(&[DEFAULT_IMAGE, process_name]); 118 | args_vec.extend(args); 119 | 120 | // Start the process 121 | trace!("Container runtime start args: {:?}", args_vec); 122 | Process::start(dir, identifier, config.container_runtime(), &args_vec) 123 | } 124 | 125 | fn volume_arg(volume: T) -> String { 126 | format!("--volume={v}:{v}", v = volume) 127 | } 128 | 129 | /// Exec a command on a container instance 130 | pub fn exec( 131 | config: &Config, 132 | dir: &Path, 133 | identifier: &str, 134 | process_name: &str, 135 | container_name: &str, 136 | args: &[&str], 137 | ) -> Result { 138 | // Prepare the args 139 | let mut args_vec = vec![]; 140 | 141 | let podman_args = Podman::default_args(config)?; 142 | if Podman::is_configured(config) { 143 | args_vec.extend(podman_args.iter().map(|x| x.as_str()).collect::>()) 144 | } 145 | 146 | let name = Self::prefixed_container_name(container_name); 147 | args_vec.extend(vec![ 148 | "exec", 149 | &name, 150 | "nix", 151 | "run", 152 | "-f", 153 | DEFAULT_ROOT, 154 | "-c", 155 | process_name, 156 | ]); 157 | args_vec.extend(args); 158 | 159 | // Run as usual process 160 | trace!("Container runtime exec args: {:?}", args_vec); 161 | Process::start(dir, identifier, config.container_runtime(), &args_vec) 162 | } 163 | 164 | /// Remove the provided (maybe running) container 165 | fn remove(config: &Config, name: &str) -> Result<()> { 166 | Command::new(config.container_runtime()) 167 | .arg("rm") 168 | .arg("-f") 169 | .arg(Self::prefixed_container_name(name)) 170 | .stderr(Stdio::null()) 171 | .stdout(Stdio::null()) 172 | .status()?; 173 | Ok(()) 174 | } 175 | 176 | /// Retrieve a stdio for the provided config log level 177 | fn stdio(config: &Config) -> Stdio { 178 | if config.log_level() > LevelFilter::Info { 179 | Stdio::inherit() 180 | } else { 181 | Stdio::null() 182 | } 183 | } 184 | 185 | /// Retrieve a prefixed container name 186 | fn prefixed_container_name(name: &str) -> String { 187 | format!("{}-{}", DEFAULT_ROOT, name) 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /src/controllermanager.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::Config, 3 | kubeconfig::KubeConfig, 4 | network::Network, 5 | pki::Pki, 6 | process::{Process, ProcessState, Stoppable}, 7 | }; 8 | use anyhow::Result; 9 | use std::fs::create_dir_all; 10 | 11 | pub struct ControllerManager { 12 | process: Process, 13 | } 14 | 15 | impl ControllerManager { 16 | pub fn start( 17 | config: &Config, 18 | network: &Network, 19 | pki: &Pki, 20 | kubeconfig: &KubeConfig, 21 | ) -> ProcessState { 22 | let dir = config.root().join("controllermanager"); 23 | create_dir_all(&dir)?; 24 | 25 | let mut process = Process::start( 26 | &dir, 27 | "Controller Manager", 28 | "kube-controller-manager", 29 | &[ 30 | "--bind-address=0.0.0.0", 31 | &format!("--cluster-cidr={}", network.cluster_cidr()), 32 | "--cluster-name=kubernetes", 33 | &format!("--cluster-signing-cert-file={}", pki.ca().cert().display()), 34 | &format!("--cluster-signing-key-file={}", pki.ca().key().display()), 35 | &format!("--kubeconfig={}", kubeconfig.controller_manager().display()), 36 | "--leader-elect=false", 37 | &format!("--root-ca-file={}", pki.ca().cert().display()), 38 | &format!( 39 | "--service-account-private-key-file={}", 40 | pki.service_account().key().display() 41 | ), 42 | &format!("--service-cluster-ip-range={}", network.service_cidr()), 43 | "--use-service-account-credentials=true", 44 | "--v=2", 45 | ], 46 | )?; 47 | 48 | process.wait_ready("Serving securely")?; 49 | Ok(Box::new(Self { process })) 50 | } 51 | } 52 | 53 | impl Stoppable for ControllerManager { 54 | fn stop(&mut self) -> Result<()> { 55 | self.process.stop() 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/coredns.rs: -------------------------------------------------------------------------------- 1 | use crate::{config::Config, kubectl::Kubectl, network::Network}; 2 | use anyhow::{Context, Result}; 3 | use log::info; 4 | use std::fs::{self, create_dir_all}; 5 | 6 | pub struct CoreDns; 7 | 8 | impl CoreDns { 9 | pub fn apply(config: &Config, network: &Network, kubectl: &Kubectl) -> Result<()> { 10 | info!("Deploying CoreDNS and waiting to be ready"); 11 | 12 | let dir = config.root().join("coredns"); 13 | create_dir_all(&dir)?; 14 | 15 | let yml = format!(include_str!("assets/coredns.yml"), network.dns()?); 16 | let file = dir.join("coredns.yml"); 17 | 18 | if !file.exists() { 19 | fs::write(&file, yml)?; 20 | } 21 | 22 | kubectl.apply(&file).context("Unable to deploy CoreDNS")?; 23 | kubectl.wait_ready("coredns")?; 24 | info!("CoreDNS deployed"); 25 | Ok(()) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /src/crio.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | container::Container, 3 | network::Network, 4 | node::Node, 5 | process::{Process, ProcessState, Stoppable}, 6 | system::System, 7 | Config, RUNTIME_ENV, 8 | }; 9 | use anyhow::{bail, Context, Result}; 10 | use log::debug; 11 | use serde_json::{json, to_string_pretty}; 12 | use std::{ 13 | fmt::{self, Display, Formatter}, 14 | fs::{self, create_dir_all}, 15 | path::PathBuf, 16 | process::Command, 17 | }; 18 | 19 | pub struct Crio { 20 | process: Process, 21 | socket: CriSocket, 22 | node_name: String, 23 | } 24 | 25 | /// Simple CRI socket abstraction 26 | pub struct CriSocket(PathBuf); 27 | 28 | impl Display for CriSocket { 29 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 30 | write!(f, "{}", self.0.display()) 31 | } 32 | } 33 | 34 | impl CriSocket { 35 | pub fn new(path: PathBuf) -> Result { 36 | if path.display().to_string().len() > 100 { 37 | bail!("Socket path '{}' is too long") 38 | } 39 | Ok(CriSocket(path)) 40 | } 41 | 42 | pub fn to_socket_string(&self) -> String { 43 | format!("unix://{}", self.0.display()) 44 | } 45 | } 46 | 47 | const CRIO: &str = "crio"; 48 | 49 | impl Crio { 50 | pub fn start(config: &Config, node: u8, network: &Network) -> ProcessState { 51 | let node_name = Node::name(config, network, node); 52 | 53 | let conmon = System::find_executable("conmon")?; 54 | let loopback = System::find_executable("loopback")?; 55 | let cni_plugin = loopback.parent().context("Unable to find CNI plugin dir")?; 56 | 57 | let dir = Self::path(config, network, node); 58 | let config_dir = dir.join("crio.conf.d"); 59 | let config_file = config_dir.join("crio.conf"); 60 | let network_dir = dir.join("cni"); 61 | let socket = Self::socket(config, network, node)?; 62 | 63 | if !dir.exists() { 64 | create_dir_all(&dir)?; 65 | create_dir_all(&network_dir)?; 66 | create_dir_all(&config_dir)?; 67 | 68 | let containers_dir = dir.join("containers"); 69 | fs::write( 70 | &config_file, 71 | format!( 72 | include_str!("assets/crio.conf"), 73 | conmon = conmon.display(), 74 | containers_root = containers_dir.join("storage").display(), 75 | containers_runroot = containers_dir.join("run").display(), 76 | listen = socket, 77 | log_dir = dir.join("log").display(), 78 | network_dir = network_dir.display(), 79 | plugin_dir = cni_plugin.display(), 80 | exits_dir = dir.join("exits").display(), 81 | runtime_path = System::find_executable("runc")?.display(), 82 | runtime_root = dir.join("runc").display(), 83 | signature_policy = Container::policy_json(config).display(), 84 | storage_driver = if config.multi_node() || System::in_container()? { 85 | "vfs" 86 | } else { 87 | "overlay" 88 | }, 89 | version_file = dir.join("version").display(), 90 | ), 91 | )?; 92 | 93 | let cidr = network 94 | .crio_cidrs() 95 | .get(node as usize) 96 | .with_context(|| format!("Unable to find CIDR for {}", node_name))?; 97 | fs::write( 98 | network_dir.join("10-bridge.json"), 99 | to_string_pretty(&json!({ 100 | "cniVersion": "0.3.1", 101 | "name": format!("kubernix-{}", node_name), 102 | "type": "bridge", 103 | "bridge": format!("{}.{}", Network::INTERFACE_PREFIX, node), 104 | "isGateway": true, 105 | "ipMasq": true, 106 | "hairpinMode": true, 107 | "ipam": { 108 | "type": "host-local", 109 | "routes": [{ "dst": "0.0.0.0/0" }], 110 | "ranges": [[{ "subnet": cidr }]] 111 | } 112 | }))?, 113 | )?; 114 | } 115 | let args: &[&str] = &[&format!("--config-dir={}", config_file.display())]; 116 | 117 | let mut process = if config.multi_node() { 118 | // Run inside a container 119 | let identifier = format!("CRI-O {}", node_name); 120 | Container::start(config, &dir, &identifier, CRIO, &node_name, args)? 121 | } else { 122 | // Run as usual process 123 | Process::start(&dir, "CRI-O", CRIO, args)? 124 | }; 125 | process.wait_ready("Sandboxes:")?; 126 | 127 | Ok(Box::new(Self { 128 | process, 129 | socket, 130 | node_name, 131 | })) 132 | } 133 | 134 | /// Retrieve the CRI socket 135 | pub fn socket(config: &Config, network: &Network, node: u8) -> Result { 136 | CriSocket::new(Self::path(config, network, node).join("crio.sock")) 137 | } 138 | 139 | /// Retrieve the working path for the node 140 | fn path(config: &Config, network: &Network, node: u8) -> PathBuf { 141 | config 142 | .root() 143 | .join(CRIO) 144 | .join(Node::name(config, network, node)) 145 | } 146 | 147 | /// Remove all containers via crictl invocations 148 | fn remove_all_containers(&self) -> Result<()> { 149 | debug!("Removing all CRI-O workloads on {}", self.node_name); 150 | 151 | let output = Command::new("crictl") 152 | .env(RUNTIME_ENV, self.socket.to_socket_string()) 153 | .arg("pods") 154 | .arg("-q") 155 | .output()?; 156 | let stdout = String::from_utf8(output.stdout)?; 157 | if !output.status.success() { 158 | debug!("critcl pods stdout ({}): {}", self.node_name, stdout); 159 | debug!( 160 | "critcl pods stderr ({}): {}", 161 | self.node_name, 162 | String::from_utf8(output.stderr)? 163 | ); 164 | bail!("crictl pods command failed ({})", self.node_name); 165 | } 166 | 167 | for x in stdout.lines() { 168 | debug!("Removing pod {} on {}", x, self.node_name); 169 | let output = Command::new("crictl") 170 | .env(RUNTIME_ENV, self.socket.to_socket_string()) 171 | .arg("rmp") 172 | .arg("-f") 173 | .arg(x) 174 | .output()?; 175 | if !output.status.success() { 176 | debug!("critcl rmp ({}): {:?}", self.node_name, output); 177 | bail!("crictl rmp command failed ({})", self.node_name); 178 | } 179 | } 180 | 181 | debug!("All workloads removed on {}", self.node_name); 182 | Ok(()) 183 | } 184 | } 185 | 186 | impl Stoppable for Crio { 187 | fn stop(&mut self) -> Result<()> { 188 | // Remove all running containers 189 | self.remove_all_containers() 190 | .with_context(|| format!("Unable to remove CRI-O containers on {}", self.node_name,))?; 191 | 192 | // Stop the process, should never really fail 193 | self.process.stop() 194 | } 195 | } 196 | 197 | #[cfg(test)] 198 | pub mod tests { 199 | use super::*; 200 | 201 | #[test] 202 | fn cri_socket_success() -> Result<()> { 203 | CriSocket::new("/some/path.sock".into())?; 204 | Ok(()) 205 | } 206 | 207 | #[test] 208 | fn cri_socket_failure() { 209 | assert!(CriSocket::new("a".repeat(101).into()).is_err()); 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /src/encryptionconfig.rs: -------------------------------------------------------------------------------- 1 | use crate::Config; 2 | use anyhow::Result; 3 | use base64::encode; 4 | use getset::Getters; 5 | use log::info; 6 | use rand::{thread_rng, Rng}; 7 | use std::{ 8 | fs::{self, create_dir_all}, 9 | path::PathBuf, 10 | }; 11 | 12 | #[derive(Getters)] 13 | pub struct EncryptionConfig { 14 | #[get = "pub"] 15 | path: PathBuf, 16 | } 17 | 18 | impl EncryptionConfig { 19 | pub fn new(config: &Config) -> Result { 20 | let dir = &config.root().join("encryptionconfig"); 21 | create_dir_all(dir)?; 22 | let path = dir.join("config.yml"); 23 | 24 | // Create only if not already existing to make cluster reuse work 25 | if !path.exists() { 26 | info!("Creating encryption config"); 27 | let rnd = thread_rng().gen::<[u8; 32]>(); 28 | let b64 = encode(&rnd); 29 | let yml = format!(include_str!("assets/encryptionconfig.yml"), b64); 30 | fs::write(&path, yml)?; 31 | } 32 | 33 | Ok(EncryptionConfig { path }) 34 | } 35 | } 36 | 37 | #[cfg(test)] 38 | mod tests { 39 | use super::*; 40 | use crate::config::tests::{test_config, test_config_wrong_root}; 41 | 42 | #[test] 43 | fn encryptionconfig_success() -> Result<()> { 44 | let c = test_config()?; 45 | let e = EncryptionConfig::new(&c)?; 46 | assert!(e.path().exists()); 47 | Ok(()) 48 | } 49 | 50 | #[test] 51 | fn encryptionconfig_failure() -> Result<()> { 52 | let c = test_config_wrong_root()?; 53 | assert!(EncryptionConfig::new(&c).is_err()); 54 | Ok(()) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/etcd.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::Config, 3 | network::Network, 4 | pki::Pki, 5 | process::{Process, ProcessState, Stoppable}, 6 | }; 7 | use anyhow::Result; 8 | use std::fs::create_dir_all; 9 | 10 | pub struct Etcd { 11 | process: Process, 12 | } 13 | 14 | impl Etcd { 15 | pub fn start(config: &Config, network: &Network, pki: &Pki) -> ProcessState { 16 | const ETCD: &str = "etcd"; 17 | let dir = config.root().join(ETCD); 18 | create_dir_all(&dir)?; 19 | 20 | let mut process = Process::start( 21 | &dir, 22 | ETCD, 23 | ETCD, 24 | &[ 25 | "--client-cert-auth", 26 | "--initial-cluster-state=new", 27 | "--initial-cluster-token=etcd-cluster", 28 | "--peer-client-cert-auth", 29 | &format!( 30 | "--initial-advertise-peer-urls=https://{}", 31 | network.etcd_peer() 32 | ), 33 | &format!("--advertise-client-urls=https://{}", network.etcd_client()), 34 | &format!("--cert-file={}", pki.apiserver().cert().display()), 35 | &format!("--data-dir={}", dir.join("run").display()), 36 | &format!("--initial-cluster=etcd=https://{}", network.etcd_peer()), 37 | &format!("--key-file={}", pki.apiserver().key().display()), 38 | &format!("--listen-client-urls=https://{}", network.etcd_client()), 39 | &format!("--listen-peer-urls=https://{}", network.etcd_peer()), 40 | &format!("--name={}", ETCD), 41 | &format!("--peer-cert-file={}", pki.apiserver().cert().display()), 42 | &format!("--peer-key-file={}", pki.apiserver().key().display()), 43 | &format!("--peer-trusted-ca-file={}", pki.ca().cert().display()), 44 | &format!("--trusted-ca-file={}", pki.ca().cert().display()), 45 | ], 46 | )?; 47 | 48 | process.wait_ready("ready to serve client requests")?; 49 | Ok(Box::new(Self { process })) 50 | } 51 | } 52 | 53 | impl Stoppable for Etcd { 54 | fn stop(&mut self) -> Result<()> { 55 | self.process.stop() 56 | } 57 | } 58 | 59 | #[cfg(test)] 60 | mod tests { 61 | use super::*; 62 | use crate::{config::tests::test_config, network::tests::test_network}; 63 | 64 | #[test] 65 | fn new_success() -> Result<()> { 66 | let c = test_config()?; 67 | let n = test_network()?; 68 | let p = Pki::new(&c, &n)?; 69 | 70 | let mut etcd = Etcd::start(&c, &n, &p)?; 71 | etcd.stop() 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/kubeconfig.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | kubectl::Kubectl, 3 | pki::{Idendity, Pki}, 4 | Config, 5 | }; 6 | use anyhow::{format_err, Context, Result}; 7 | use getset::Getters; 8 | use log::{debug, info}; 9 | use nix::sys::stat::{fchmod, Mode}; 10 | use std::{ 11 | fs::{create_dir_all, File}, 12 | net::Ipv4Addr, 13 | os::unix::io::AsRawFd, 14 | path::{Path, PathBuf}, 15 | }; 16 | 17 | #[derive(Getters)] 18 | pub struct KubeConfig { 19 | #[get = "pub"] 20 | kubelets: Vec, 21 | 22 | #[get = "pub"] 23 | proxy: PathBuf, 24 | 25 | #[get = "pub"] 26 | controller_manager: PathBuf, 27 | 28 | #[get = "pub"] 29 | scheduler: PathBuf, 30 | 31 | #[get = "pub"] 32 | admin: PathBuf, 33 | } 34 | 35 | impl KubeConfig { 36 | pub fn new(config: &Config, pki: &Pki) -> Result { 37 | // Create the target dir 38 | let dir = config.root().join("kubeconfig"); 39 | 40 | if dir.exists() { 41 | info!("Kubeconfig directory already exists, skipping generation"); 42 | 43 | let kubelets = pki 44 | .kubelets() 45 | .iter() 46 | .map(|i| Self::target_config(&dir, i)) 47 | .collect(); 48 | 49 | Ok(KubeConfig { 50 | kubelets, 51 | proxy: Self::target_config(&dir, pki.proxy()), 52 | controller_manager: Self::target_config(&dir, pki.controller_manager()), 53 | scheduler: Self::target_config(&dir, pki.scheduler()), 54 | admin: Self::target_config(&dir, pki.admin()), 55 | }) 56 | } else { 57 | info!("Creating kubeconfigs"); 58 | create_dir_all(&dir)?; 59 | 60 | let kubelets = pki 61 | .kubelets() 62 | .iter() 63 | .map(|x| Self::setup_kubeconfig(&dir, x, pki.ca().cert())) 64 | .collect::, _>>()?; 65 | 66 | Ok(KubeConfig { 67 | kubelets, 68 | proxy: Self::setup_kubeconfig(&dir, pki.proxy(), pki.ca().cert())?, 69 | controller_manager: Self::setup_kubeconfig( 70 | &dir, 71 | pki.controller_manager(), 72 | pki.ca().cert(), 73 | )?, 74 | scheduler: Self::setup_kubeconfig(&dir, pki.scheduler(), pki.ca().cert())?, 75 | admin: Self::setup_kubeconfig(&dir, pki.admin(), pki.ca().cert())?, 76 | }) 77 | } 78 | } 79 | 80 | fn setup_kubeconfig(dir: &Path, idendity: &Idendity, ca: &Path) -> Result { 81 | debug!("Creating kubeconfig for {}", idendity.name()); 82 | let kubeconfig = Self::target_config(dir, idendity); 83 | 84 | let embed_certs = "--embed-certs=true"; 85 | let cluster = "kubernetes"; 86 | let kubectl = Kubectl::new(&kubeconfig); 87 | kubectl.config(&[ 88 | "set-cluster", 89 | cluster, 90 | &format!("--certificate-authority={}", ca.display()), 91 | &format!("--server=https://{}:6443", &Ipv4Addr::LOCALHOST), 92 | embed_certs, 93 | ])?; 94 | 95 | kubectl.config(&[ 96 | "set-credentials", 97 | &idendity.user(), 98 | &format!("--client-certificate={}", idendity.cert().display()), 99 | &format!("--client-key={}", idendity.key().display()), 100 | embed_certs, 101 | ])?; 102 | 103 | let context = "kubernix"; 104 | kubectl.config(&[ 105 | "set-context", 106 | context, 107 | &format!("--cluster={}", cluster), 108 | &format!("--user={}", idendity.user()), 109 | ])?; 110 | 111 | kubectl.config(&["use-context", context])?; 112 | 113 | // Adapt file permissions 114 | fchmod( 115 | File::open(&kubeconfig) 116 | .context("unable to open kubeconfig")? 117 | .as_raw_fd(), 118 | Mode::from_bits(0o644).ok_or_else(|| format_err!("unable to get mode bits"))?, 119 | ) 120 | .context("unable to set kubeconfig permissions")?; 121 | 122 | debug!("Kubeconfig created for {}", idendity.name()); 123 | Ok(kubeconfig) 124 | } 125 | 126 | fn target_config(dir: &Path, idendity: &Idendity) -> PathBuf { 127 | dir.join(format!("{}.kubeconfig", idendity.name())) 128 | } 129 | } 130 | 131 | #[cfg(test)] 132 | mod tests { 133 | use super::*; 134 | use crate::{config::tests::test_config, network::tests::test_network}; 135 | 136 | #[test] 137 | fn new_success() -> Result<()> { 138 | let c = test_config()?; 139 | let n = test_network()?; 140 | let p = Pki::new(&c, &n)?; 141 | KubeConfig::new(&c, &p)?; 142 | Ok(()) 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/kubectl.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{bail, Result}; 2 | use getset::Getters; 3 | use log::{debug, trace}; 4 | use std::{ 5 | path::{Path, PathBuf}, 6 | process::{Command, Output}, 7 | thread::sleep, 8 | time::{Duration, Instant}, 9 | }; 10 | 11 | #[derive(Getters)] 12 | pub struct Kubectl { 13 | #[get = "pub"] 14 | kubeconfig: PathBuf, 15 | } 16 | 17 | impl Kubectl { 18 | /// Create a new kubectl client for the provided kubeconfig 19 | pub fn new(kubeconfig: &Path) -> Self { 20 | Self { 21 | kubeconfig: kubeconfig.into(), 22 | } 23 | } 24 | 25 | /// Run a generic kubectl command 26 | pub fn execute(&self, args: &[&str]) -> Result { 27 | let output = Command::new("kubectl") 28 | .args(args) 29 | .arg("--kubeconfig") 30 | .arg(&self.kubeconfig) 31 | .output()?; 32 | if !output.status.success() { 33 | trace!("kubectl args: {:?}", args); 34 | debug!("kubectl output: {:?}", output); 35 | bail!("kubectl command failed"); 36 | } 37 | Ok(output) 38 | } 39 | 40 | /// Run kubectl config 41 | pub fn config(&self, args: &[&str]) -> Result<()> { 42 | let mut final_args = vec!["config"]; 43 | final_args.extend(args); 44 | self.execute(&final_args)?; 45 | Ok(()) 46 | } 47 | 48 | /// Run kubectl apply 49 | pub fn apply(&self, file: &Path) -> Result<()> { 50 | let file_arg = file.display().to_string(); 51 | let args = &["apply", "-f", &file_arg]; 52 | self.execute(args)?; 53 | Ok(()) 54 | } 55 | 56 | /// Wait for a pod to be ready 57 | pub fn wait_ready(&self, name: &str) -> Result<()> { 58 | debug!("Waiting for {} to be ready", name); 59 | const TIMEOUT: u64 = 60; 60 | let now = Instant::now(); 61 | while now.elapsed().as_secs() < TIMEOUT { 62 | let output = self.execute(&[ 63 | "get", 64 | "pods", 65 | "-n=kube-system", 66 | &format!("-l=k8s-app={}", name), 67 | "--no-headers", 68 | ])?; 69 | let stdout = String::from_utf8(output.stdout)?; 70 | if let Some(status) = stdout.split_whitespace().nth(1) { 71 | debug!( 72 | "{} status: {} ({}/{}s)", 73 | name, 74 | status, 75 | now.elapsed().as_secs(), 76 | TIMEOUT, 77 | ); 78 | if stdout.contains("1/1") { 79 | debug!("{} ready", name); 80 | return Ok(()); 81 | } 82 | } else { 83 | debug!( 84 | "{} status not available ({}/{}s)", 85 | name, 86 | now.elapsed().as_secs(), 87 | TIMEOUT, 88 | ) 89 | } 90 | sleep(Duration::from_secs(2)); 91 | } 92 | bail!("Unable to wait for {} pod", name) 93 | } 94 | } 95 | 96 | #[cfg(test)] 97 | mod tests { 98 | use super::*; 99 | use std::path::PathBuf; 100 | 101 | #[test] 102 | fn execute_success() -> Result<()> { 103 | let k = Kubectl::new(&PathBuf::from("")); 104 | k.execute(&[])?; 105 | Ok(()) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/kubelet.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::Config, 3 | container::Container, 4 | crio::Crio, 5 | kubeconfig::KubeConfig, 6 | network::Network, 7 | node::Node, 8 | pki::Pki, 9 | process::{Process, ProcessState, Stoppable}, 10 | }; 11 | use anyhow::{bail, Context, Result}; 12 | use std::fs::{self, create_dir_all}; 13 | 14 | pub struct Kubelet { 15 | process: Process, 16 | } 17 | 18 | impl Kubelet { 19 | pub fn start( 20 | config: &Config, 21 | node: u8, 22 | network: &Network, 23 | pki: &Pki, 24 | kubeconfig: &KubeConfig, 25 | ) -> ProcessState { 26 | let node_name = Node::name(config, network, node); 27 | const KUBELET: &str = "kubelet"; 28 | 29 | let dir = config.root().join(KUBELET).join(&node_name); 30 | let root_dir = dir.join("run"); 31 | if root_dir.display().to_string().len() + "kubelet.sock".len() > 100 { 32 | bail!( 33 | "Kubelet run path '{}' is too long for kubelet.sock", 34 | root_dir.display() 35 | ) 36 | } 37 | 38 | create_dir_all(&dir)?; 39 | 40 | let idendity = pki 41 | .kubelets() 42 | .get(node as usize) 43 | .with_context(|| format!("Unable to retrieve kubelet idendity for {}", node_name))?; 44 | 45 | let yml = format!( 46 | include_str!("assets/kubelet.yml"), 47 | ca = pki.ca().cert().display(), 48 | dns = network.dns()?, 49 | cidr = network 50 | .crio_cidrs() 51 | .get(node as usize) 52 | .context("Unable to retrieve kubelet CIDR")?, 53 | cert = idendity.cert().display(), 54 | key = idendity.key().display(), 55 | port = 11250 + u16::from(node), 56 | healthzPort = 12250 + u16::from(node), 57 | ); 58 | let cfg = dir.join("config.yml"); 59 | 60 | if !cfg.exists() { 61 | fs::write(&cfg, yml)?; 62 | } 63 | 64 | let args = &[ 65 | "--container-runtime=remote", 66 | &format!("--config={}", cfg.display()), 67 | &format!("--root-dir={}", root_dir.display()), 68 | &format!( 69 | "--container-runtime-endpoint={}", 70 | Crio::socket(config, network, node)?.to_socket_string(), 71 | ), 72 | &format!( 73 | "--kubeconfig={}", 74 | kubeconfig 75 | .kubelets() 76 | .get(node as usize) 77 | .with_context(|| format!( 78 | "Unable to retrieve kubelet config for {}", 79 | node_name 80 | ))? 81 | .display() 82 | ), 83 | "--v=2", 84 | ]; 85 | 86 | let mut process = if config.multi_node() { 87 | // Run inside a container 88 | let arg_hostname = &format!("--hostname-override={}", node_name); 89 | let mut modargs: Vec<&str> = vec![arg_hostname]; 90 | modargs.extend(args); 91 | Container::exec( 92 | config, 93 | &dir, 94 | &format!("Kubelet {}", node_name), 95 | KUBELET, 96 | &node_name, 97 | &modargs, 98 | )? 99 | } else { 100 | // Run as usual process 101 | Process::start(&dir, "Kubelet", KUBELET, args)? 102 | }; 103 | process.wait_ready("Successfully registered node")?; 104 | Ok(Box::new(Self { process })) 105 | } 106 | } 107 | 108 | impl Stoppable for Kubelet { 109 | fn stop(&mut self) -> Result<()> { 110 | self.process.stop() 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # kubernix 2 | #![deny(missing_docs)] 3 | 4 | mod apiserver; 5 | mod config; 6 | mod container; 7 | mod controllermanager; 8 | mod coredns; 9 | mod crio; 10 | mod encryptionconfig; 11 | mod etcd; 12 | mod kubeconfig; 13 | mod kubectl; 14 | mod kubelet; 15 | mod logger; 16 | mod network; 17 | mod nix; 18 | mod node; 19 | mod pki; 20 | mod podman; 21 | mod process; 22 | mod progress; 23 | mod proxy; 24 | mod scheduler; 25 | mod system; 26 | 27 | pub use config::Config; 28 | pub use logger::Logger; 29 | 30 | use crate::nix::Nix; 31 | use apiserver::ApiServer; 32 | use container::Container; 33 | use controllermanager::ControllerManager; 34 | use coredns::CoreDns; 35 | use crio::Crio; 36 | use encryptionconfig::EncryptionConfig; 37 | use etcd::Etcd; 38 | use kubeconfig::KubeConfig; 39 | use kubectl::Kubectl; 40 | use kubelet::Kubelet; 41 | use network::Network; 42 | use pki::Pki; 43 | use process::{Process, Stoppables}; 44 | use progress::Progress; 45 | use proxy::Proxy; 46 | use scheduler::Scheduler; 47 | use system::System; 48 | 49 | use ::nix::{ 50 | mount::{umount2, MntFlags}, 51 | unistd::getuid, 52 | }; 53 | use anyhow::{bail, Context, Result}; 54 | use log::{debug, error, info, set_boxed_logger}; 55 | use proc_mounts::MountIter; 56 | use rayon::{prelude::*, scope}; 57 | use signal_hook::{ 58 | consts::signal::{SIGHUP, SIGINT, SIGTERM}, 59 | flag, 60 | }; 61 | use std::{ 62 | fs, 63 | path::PathBuf, 64 | process::{id, Command}, 65 | sync::{ 66 | atomic::{AtomicBool, Ordering}, 67 | Arc, 68 | }, 69 | thread::sleep, 70 | time::{Duration, Instant}, 71 | }; 72 | 73 | const RUNTIME_ENV: &str = "CONTAINER_RUNTIME_ENDPOINT"; 74 | 75 | /// The main entry point for the application 76 | pub struct Kubernix { 77 | config: Config, 78 | network: Network, 79 | kubectl: Kubectl, 80 | processes: Stoppables, 81 | system: System, 82 | } 83 | 84 | impl Kubernix { 85 | /// Start kubernix by consuming the provided configuration 86 | pub fn start(mut config: Config) -> Result<()> { 87 | Self::prepare_env(&mut config)?; 88 | 89 | // Bootstrap if we're not inside a nix shell 90 | if Nix::is_active() { 91 | Self::bootstrap_cluster(config) 92 | } else { 93 | Nix::bootstrap(config) 94 | } 95 | } 96 | 97 | /// Spawn a new shell into the provided configuration environment 98 | pub fn new_shell(mut config: Config) -> Result<()> { 99 | Self::prepare_env(&mut config)?; 100 | 101 | info!( 102 | "Spawning new kubernix shell in: '{}'", 103 | config.root().display() 104 | ); 105 | 106 | let env_file = Self::env_file(&config); 107 | if !env_file.exists() { 108 | bail!( 109 | "Necessary environment file '{}' does not exist", 110 | env_file.display() 111 | ) 112 | } 113 | 114 | Nix::run( 115 | &config, 116 | &[ 117 | &config.shell_ok()?, 118 | "-c", 119 | &format!(". {} && {}", env_file.display(), config.shell_ok()?,), 120 | ], 121 | )?; 122 | 123 | info!("Bye, leaving the Kubernix environment"); 124 | Ok(()) 125 | } 126 | 127 | /// Prepare the environment based on the provided config 128 | fn prepare_env(config: &mut Config) -> Result<()> { 129 | // Rootless is currently not supported 130 | if !getuid().is_root() { 131 | bail!("Please run kubernix as root") 132 | } 133 | 134 | // Prepare the configuration 135 | if config.root().exists() { 136 | config.try_load_file()?; 137 | } else { 138 | config.to_file()?; 139 | } 140 | config.canonicalize_root()?; 141 | 142 | // Setup the logger 143 | set_boxed_logger(Logger::new(config.log_level())).context("Unable to set logger") 144 | } 145 | 146 | /// Stop kubernix by cleaning up all running processes 147 | fn stop(&mut self) { 148 | for x in &mut self.processes { 149 | if let Err(e) = x.stop() { 150 | debug!("{}", e) 151 | } 152 | } 153 | } 154 | 155 | /// The amount of processes to be run 156 | fn processes(config: &Config) -> u64 { 157 | 5 + 2 * u64::from(config.nodes()) 158 | } 159 | 160 | /// Bootstrap the whole cluster, which assumes to be inside a nix shell 161 | fn bootstrap_cluster(config: Config) -> Result<()> { 162 | // Setup the progress bar 163 | const BASE_STEPS: u64 = 15; 164 | let steps = if config.multi_node() { 165 | u64::from(config.nodes()) * 2 + BASE_STEPS 166 | } else { 167 | BASE_STEPS 168 | } + Self::processes(&config); 169 | let p = Progress::new(steps, config.log_level()); 170 | info!("Bootstrapping cluster"); 171 | 172 | // Ensure that the system is prepared 173 | let system = System::setup(&config).context("Unable to setup system")?; 174 | Container::build(&config)?; 175 | 176 | // Setup the network 177 | let network = Network::new(&config)?; 178 | 179 | // Setup the public key infrastructure 180 | let pki = Pki::new(&config, &network)?; 181 | 182 | // Setup the configs 183 | let kubeconfig = KubeConfig::new(&config, &pki)?; 184 | let kubectl = Kubectl::new(kubeconfig.admin()); 185 | let encryptionconfig = EncryptionConfig::new(&config)?; 186 | 187 | // All processes 188 | info!("Starting processes"); 189 | let mut api_server = Process::stopped(); 190 | let mut controller_manager = Process::stopped(); 191 | let mut etcd = Process::stopped(); 192 | let mut scheduler = Process::stopped(); 193 | let mut proxy = Process::stopped(); 194 | let mut crios = (0..config.nodes()) 195 | .map(|_| Process::stopped()) 196 | .collect::>(); 197 | let mut kubelets = (0..config.nodes()) 198 | .map(|_| Process::stopped()) 199 | .collect::>(); 200 | 201 | // Spawn the processes 202 | scope(|a| { 203 | // Control plane 204 | a.spawn(|b| { 205 | etcd = Etcd::start(&config, &network, &pki); 206 | b.spawn(|c| { 207 | api_server = 208 | ApiServer::start(&config, &network, &pki, &encryptionconfig, &kubectl); 209 | c.spawn(|_| { 210 | controller_manager = 211 | ControllerManager::start(&config, &network, &pki, &kubeconfig) 212 | }); 213 | c.spawn(|_| scheduler = Scheduler::start(&config, &kubeconfig)); 214 | }); 215 | }); 216 | 217 | // Node processes 218 | a.spawn(|c| { 219 | crios 220 | .par_iter_mut() 221 | .zip(kubelets.par_iter_mut()) 222 | .enumerate() 223 | .for_each(|(i, (c, k))| { 224 | *c = Crio::start(&config, i as u8, &network); 225 | if c.is_ok() { 226 | *k = Kubelet::start(&config, i as u8, &network, &pki, &kubeconfig); 227 | } 228 | }); 229 | c.spawn(|_| proxy = Proxy::start(&config, &network, &kubeconfig)); 230 | }); 231 | }); 232 | 233 | // This order is important since we will shut down the processes in order 234 | let mut results = vec![scheduler, proxy, controller_manager, api_server, etcd]; 235 | results.extend(kubelets); 236 | results.extend(crios); 237 | let all_ok = results.iter().all(|x| x.is_ok()); 238 | 239 | // Note: wait for `drain_filter()` to be stable and make it more straightforward 240 | let mut processes = vec![]; 241 | for process in results { 242 | match process { 243 | Ok(p) => processes.push(p), 244 | Err(e) => debug!("{}", e), 245 | } 246 | } 247 | 248 | // Setup the main instance 249 | let spawn_shell = !config.no_shell(); 250 | let mut kubernix = Kubernix { 251 | config, 252 | network, 253 | kubectl, 254 | processes, 255 | system, 256 | }; 257 | 258 | // No dead processes 259 | if all_ok { 260 | // Apply all cluster addons 261 | kubernix.apply_addons()?; 262 | kubernix.write_env_file()?; 263 | info!("Everything is up and running"); 264 | p.reset(); 265 | 266 | if spawn_shell { 267 | kubernix.spawn_shell()?; 268 | } else { 269 | kubernix.wait()?; 270 | } 271 | } else { 272 | error!("Unable to start all processes") 273 | } 274 | 275 | Ok(()) 276 | } 277 | 278 | /// Apply needed workloads to the running cluster. This method stops the cluster on any error. 279 | fn apply_addons(&mut self) -> Result<()> { 280 | info!("Applying cluster addons"); 281 | CoreDns::apply(&self.config, &self.network, &self.kubectl) 282 | } 283 | 284 | /// Wait until a termination signal occurs 285 | fn wait(&self) -> Result<()> { 286 | // Setup the signal handlers 287 | let term = Arc::new(AtomicBool::new(false)); 288 | flag::register(SIGTERM, Arc::clone(&term))?; 289 | flag::register(SIGINT, Arc::clone(&term))?; 290 | flag::register(SIGHUP, Arc::clone(&term))?; 291 | info!("Waiting for interrupt…"); 292 | 293 | // Write the pid file 294 | let pid_file = self.config.root().join("kubernix.pid"); 295 | debug!("Writing pid file to: {}", pid_file.display()); 296 | fs::write(pid_file, id().to_string())?; 297 | 298 | // Wait for the signals 299 | while !term.load(Ordering::Relaxed) {} 300 | Ok(()) 301 | } 302 | 303 | /// Spawn a new interactive default system shell 304 | fn spawn_shell(&self) -> Result<()> { 305 | info!("Spawning interactive shell"); 306 | info!("Please be aware that the cluster stops if you exit the shell"); 307 | 308 | Command::new(self.config.shell_ok()?) 309 | .current_dir(self.config.root()) 310 | .arg("-c") 311 | .arg(format!( 312 | ". {} && {}", 313 | Self::env_file(&self.config).display(), 314 | self.config.shell_ok()?, 315 | )) 316 | .status()?; 317 | Ok(()) 318 | } 319 | 320 | /// Lay out the env file 321 | fn write_env_file(&self) -> Result<()> { 322 | info!("Writing environment file"); 323 | fs::write( 324 | Self::env_file(&self.config), 325 | format!( 326 | "export {}={}\nexport {}={}", 327 | RUNTIME_ENV, 328 | Crio::socket(&self.config, &self.network, 0)?.to_socket_string(), 329 | "KUBECONFIG", 330 | self.kubectl.kubeconfig().display(), 331 | ), 332 | )?; 333 | Ok(()) 334 | } 335 | 336 | /// Retrieve the path to the env file 337 | fn env_file(config: &Config) -> PathBuf { 338 | config.root().join("kubernix.env") 339 | } 340 | 341 | /// Remove all stale mounts 342 | fn umount(&self) { 343 | debug!("Removing active mounts"); 344 | let now = Instant::now(); 345 | while now.elapsed().as_secs() < 5 { 346 | match MountIter::new() { 347 | Err(e) => { 348 | debug!("Unable to retrieve mounts: {}", e); 349 | sleep(Duration::from_secs(1)); 350 | } 351 | Ok(mounts) => { 352 | let mut found_mount = false; 353 | mounts 354 | .filter_map(|x| x.ok()) 355 | .filter(|x| x.dest.starts_with(self.config.root())) 356 | .filter(|x| !x.dest.eq(self.config.root())) 357 | .for_each(|m| { 358 | found_mount = true; 359 | debug!("Removing mount: {}", m.dest.display()); 360 | if let Err(e) = umount2(&m.dest, MntFlags::MNT_FORCE) { 361 | debug!("Unable to umount '{}': {}", m.dest.display(), e); 362 | } 363 | }); 364 | if !found_mount { 365 | break; 366 | } 367 | } 368 | }; 369 | } 370 | } 371 | } 372 | 373 | impl Drop for Kubernix { 374 | fn drop(&mut self) { 375 | let p = Progress::new(Self::processes(&self.config), self.config.log_level()); 376 | 377 | info!("Cleaning up"); 378 | self.stop(); 379 | self.umount(); 380 | self.system.cleanup(); 381 | info!("Cleanup done"); 382 | 383 | p.reset(); 384 | debug!("All done"); 385 | } 386 | } 387 | -------------------------------------------------------------------------------- /src/logger.rs: -------------------------------------------------------------------------------- 1 | use crate::progress::Progress; 2 | use console::{style, Color}; 3 | use log::{set_max_level, Level, LevelFilter, Log, Metadata, Record}; 4 | use std::io::{stderr, Write}; 5 | 6 | /// The main logging faccade 7 | pub struct Logger { 8 | level: LevelFilter, 9 | } 10 | 11 | impl Logger { 12 | /// Create a new logger 13 | pub fn new(level: LevelFilter) -> Box { 14 | set_max_level(LevelFilter::Trace); 15 | Self { level }.into() 16 | } 17 | 18 | /// Log an error message 19 | pub fn error(msg: &str) { 20 | Self { 21 | level: LevelFilter::Error, 22 | } 23 | .log( 24 | &Record::builder() 25 | .args(format_args!("{}", msg)) 26 | .level(Level::Error) 27 | .build(), 28 | ); 29 | } 30 | } 31 | 32 | impl Log for Logger { 33 | fn enabled(&self, metadata: &Metadata<'_>) -> bool { 34 | metadata.level() <= self.level 35 | } 36 | 37 | fn log(&self, record: &Record<'_>) { 38 | if !self.enabled(record.metadata()) { 39 | return; 40 | } 41 | 42 | let level = record.metadata().level(); 43 | let (level_name, level_color) = match level { 44 | Level::Error => ("ERROR", Color::Red), 45 | Level::Warn => ("WARN ", Color::Yellow), 46 | Level::Info => ("INFO ", Color::Green), 47 | Level::Debug => ("DEBUG", Color::Cyan), 48 | Level::Trace => ("TRACE", Color::Magenta), 49 | }; 50 | let msg = format!( 51 | "{}{}{} {}", 52 | style("[").white().dim(), 53 | style(level_name).fg(level_color), 54 | style("]").white().dim(), 55 | style(record.args()), 56 | ); 57 | 58 | if let Some(pb) = Progress::get() { 59 | if level != Level::Info { 60 | pb.println(msg); 61 | } else { 62 | pb.inc(1); 63 | pb.set_message(&record.args().to_string()); 64 | } 65 | } else { 66 | writeln!(stderr(), "{}", msg).ok(); 67 | } 68 | } 69 | 70 | fn flush(&self) {} 71 | } 72 | 73 | #[cfg(test)] 74 | pub mod tests { 75 | use super::*; 76 | use log::{MetadataBuilder, Record}; 77 | 78 | #[test] 79 | fn logger_success() { 80 | let l = Logger::new(LevelFilter::Info); 81 | let record = Record::builder() 82 | .args(format_args!("Error!")) 83 | .level(Level::Error) 84 | .build(); 85 | l.log(&record); 86 | let err_metadata = MetadataBuilder::new().level(Level::Error).build(); 87 | assert!(l.enabled(&err_metadata)); 88 | let dbg_metadata = MetadataBuilder::new().level(Level::Debug).build(); 89 | assert!(!l.enabled(&dbg_metadata)); 90 | l.flush(); 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use kubernix::{Config, Kubernix, Logger}; 3 | use std::process::exit; 4 | 5 | pub fn main() { 6 | if let Err(e) = run() { 7 | Logger::error( 8 | &e.chain() 9 | .map(|x| x.to_string()) 10 | .collect::>() 11 | .join(": "), 12 | ); 13 | exit(1); 14 | } 15 | } 16 | 17 | fn run() -> Result<()> { 18 | // Parse CLI arguments 19 | let config = Config::default(); 20 | 21 | if config.subcommand().is_some() { 22 | // Spawn only a new shell 23 | Kubernix::new_shell(config) 24 | } else { 25 | // Bootstrap the cluster 26 | Kubernix::start(config) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/network.rs: -------------------------------------------------------------------------------- 1 | use crate::Config; 2 | use anyhow::{bail, Context, Result}; 3 | use getset::Getters; 4 | use hostname::get; 5 | use ipnetwork::Ipv4Network; 6 | use log::{debug, warn}; 7 | use std::{ 8 | net::{Ipv4Addr, SocketAddr}, 9 | process::Command, 10 | }; 11 | 12 | #[derive(Getters)] 13 | pub struct Network { 14 | #[get = "pub"] 15 | cluster_cidr: Ipv4Network, 16 | 17 | #[get = "pub"] 18 | crio_cidrs: Vec, 19 | 20 | #[get = "pub"] 21 | service_cidr: Ipv4Network, 22 | 23 | #[get = "pub"] 24 | etcd_client: SocketAddr, 25 | 26 | #[get = "pub"] 27 | etcd_peer: SocketAddr, 28 | 29 | #[get = "pub"] 30 | hostname: String, 31 | } 32 | 33 | impl Network { 34 | /// The global name for the interface 35 | pub const INTERFACE_PREFIX: &'static str = "kubernix"; 36 | 37 | /// Create a new network from the provided config 38 | pub fn new(config: &Config) -> Result { 39 | // Preflight checks 40 | if config.cidr().prefix() > 24 { 41 | bail!( 42 | "Specified IP network {} is too small, please use at least a /24 subnet", 43 | config.cidr() 44 | ) 45 | } 46 | Self::warn_overlapping_route(config.cidr())?; 47 | 48 | // Calculate the CIDRs 49 | let cluster_cidr = Ipv4Network::new(config.cidr().ip(), 24)?; 50 | debug!("Using cluster CIDR {}", cluster_cidr); 51 | 52 | let service_cidr = Ipv4Network::new( 53 | config 54 | .cidr() 55 | .nth(cluster_cidr.size()) 56 | .context("Unable to retrieve service CIDR start IP")?, 57 | 24, 58 | )?; 59 | debug!("Using service CIDR {}", service_cidr); 60 | 61 | let mut crio_cidrs = vec![]; 62 | let mut offset = cluster_cidr.size() + service_cidr.size(); 63 | for node in 0..config.nodes() { 64 | let cidr = Ipv4Network::new( 65 | config 66 | .cidr() 67 | .nth(offset) 68 | .context("Unable to retrieve CRI-O CIDR start IP")?, 69 | 24, 70 | )?; 71 | offset += cidr.size(); 72 | debug!("Using CRI-O ({}) CIDR {}", node, cidr); 73 | crio_cidrs.push(cidr); 74 | } 75 | 76 | // Set the rest of the networking related adresses and paths 77 | let etcd_client = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 2379); 78 | let etcd_peer = SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 2380); 79 | let hostname = get() 80 | .context("Unable to get hostname")? 81 | .to_str() 82 | .context("Unable to convert hostname into string")? 83 | .into(); 84 | 85 | Ok(Self { 86 | cluster_cidr, 87 | crio_cidrs, 88 | service_cidr, 89 | etcd_client, 90 | etcd_peer, 91 | hostname, 92 | }) 93 | } 94 | 95 | /// Check if there are overlapping routes and warn 96 | fn warn_overlapping_route(cidr: Ipv4Network) -> Result<()> { 97 | let cmd = Command::new("ip").arg("route").output()?; 98 | if !cmd.status.success() { 99 | bail!("Unable to obtain `ip` routes") 100 | } 101 | String::from_utf8(cmd.stdout)? 102 | .lines() 103 | .filter(|x| !x.contains(Self::INTERFACE_PREFIX)) 104 | .filter_map(|x| x.split_whitespace().next()) 105 | .filter_map(|x| x.parse::().ok()) 106 | .filter(|x| x.is_supernet_of(cidr)) 107 | .for_each(|x| { 108 | warn!( 109 | "There seems to be an overlapping IP route {}. {}", 110 | x, "the cluster may not work as expected", 111 | ); 112 | }); 113 | Ok(()) 114 | } 115 | 116 | /// Retrieve the DNS address from the service CIDR 117 | pub fn api(&self) -> Result { 118 | self.service_cidr().nth(1).with_context(|| { 119 | format!( 120 | "Unable to retrieve first IP from service CIDR: {}", 121 | self.service_cidr() 122 | ) 123 | }) 124 | } 125 | 126 | /// Retrieve the DNS address from the service CIDR 127 | pub fn dns(&self) -> Result { 128 | self.service_cidr().nth(2).with_context(|| { 129 | format!( 130 | "Unable to retrieve second IP from service CIDR: {}", 131 | self.service_cidr() 132 | ) 133 | }) 134 | } 135 | } 136 | 137 | #[cfg(test)] 138 | pub mod tests { 139 | use super::*; 140 | use crate::config::tests::{test_config, test_config_wrong_cidr}; 141 | 142 | pub fn test_network() -> Result { 143 | let c = test_config()?; 144 | Network::new(&c) 145 | } 146 | 147 | #[test] 148 | fn new_success() -> Result<()> { 149 | let c = test_config()?; 150 | Network::new(&c)?; 151 | Ok(()) 152 | } 153 | 154 | #[test] 155 | fn new_failure() -> Result<()> { 156 | let c = test_config_wrong_cidr()?; 157 | assert!(Network::new(&c).is_err()); 158 | Ok(()) 159 | } 160 | 161 | #[test] 162 | fn dns_success() -> Result<()> { 163 | let c = test_config()?; 164 | let n = Network::new(&c)?; 165 | assert_eq!(n.dns()?, Ipv4Addr::new(10, 10, 1, 2)); 166 | Ok(()) 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /src/nix.rs: -------------------------------------------------------------------------------- 1 | use crate::{system::System, Config}; 2 | use anyhow::Result; 3 | use log::{debug, info}; 4 | use std::{ 5 | env::{current_exe, var}, 6 | fs::{self, create_dir_all}, 7 | process::Command, 8 | }; 9 | 10 | pub struct Nix; 11 | 12 | impl Nix { 13 | pub const DIR: &'static str = "nix"; 14 | const NIX_ENV: &'static str = "IN_NIX"; 15 | 16 | /// Bootstrap the nix environment 17 | pub fn bootstrap(config: Config) -> Result<()> { 18 | // Prepare the nix dir 19 | debug!("Nix environment not found, bootstrapping one"); 20 | let dir = config.root().join(Self::DIR); 21 | 22 | // Write the configuration if not existing 23 | if !dir.exists() { 24 | create_dir_all(&dir)?; 25 | 26 | fs::write( 27 | dir.join("nixpkgs.json"), 28 | include_str!("../nix/nixpkgs.json"), 29 | )?; 30 | fs::write(dir.join("nixpkgs.nix"), include_str!("../nix/nixpkgs.nix"))?; 31 | 32 | let packages = &config.packages().join(" "); 33 | debug!("Adding additional packages: {:?}", config.packages()); 34 | fs::write( 35 | dir.join("default.nix"), 36 | include_str!("../nix/default.nix").replace("/* PACKAGES */", packages), 37 | )?; 38 | 39 | // Apply the overlay if existing 40 | let target_overlay = dir.join("overlay.nix"); 41 | match config.overlay() { 42 | // User defined overlay 43 | Some(overlay) => { 44 | info!("Using custom overlay '{}'", overlay.display()); 45 | fs::copy(overlay, target_overlay)?; 46 | } 47 | 48 | // The default overlay 49 | None => { 50 | debug!("Using default overlay"); 51 | fs::write(target_overlay, include_str!("../nix/overlay.nix"))?; 52 | } 53 | } 54 | } 55 | 56 | // Run the shell 57 | Self::run( 58 | &config, 59 | &[ 60 | &format!("{}", current_exe()?.display()), 61 | "--root", 62 | &format!("{}", config.root().display()), 63 | ], 64 | ) 65 | } 66 | 67 | /// Run a pure nix command 68 | pub fn run(config: &Config, args: &[&str]) -> Result<()> { 69 | Command::new(System::find_executable("nix")?) 70 | .env(Self::NIX_ENV, "true") 71 | .arg("run") 72 | .arg("-f") 73 | .arg(config.root().join(Self::DIR)) 74 | .arg("-c") 75 | .args(args) 76 | .status()?; 77 | Ok(()) 78 | } 79 | 80 | /// Returns true if running in nix environment 81 | pub fn is_active() -> bool { 82 | var(Nix::NIX_ENV).is_ok() 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/node.rs: -------------------------------------------------------------------------------- 1 | use crate::{network::Network, Config}; 2 | 3 | pub struct Node; 4 | 5 | impl Node { 6 | /// Retrieve the node name for the node number 7 | pub fn name(config: &Config, network: &Network, number: u8) -> String { 8 | if config.multi_node() { 9 | Self::raw(number) 10 | } else { 11 | network.hostname().into() 12 | } 13 | } 14 | 15 | /// Retrieve the raw node name 16 | pub fn raw(number: u8) -> String { 17 | const PREFIX: &str = "node"; 18 | format!("{}-{}", PREFIX, number) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/pki.rs: -------------------------------------------------------------------------------- 1 | use crate::{network::Network, node::Node, Config}; 2 | use anyhow::{bail, Context, Result}; 3 | use getset::Getters; 4 | use log::{debug, info}; 5 | use serde_json::{json, to_string_pretty}; 6 | use std::{ 7 | fs::{self, create_dir_all}, 8 | net::Ipv4Addr, 9 | path::{Path, PathBuf}, 10 | process::{Command, Stdio}, 11 | }; 12 | 13 | #[derive(Getters)] 14 | pub struct Pki { 15 | #[get = "pub"] 16 | admin: Idendity, 17 | 18 | #[get = "pub"] 19 | apiserver: Idendity, 20 | 21 | #[get = "pub"] 22 | ca: Idendity, 23 | 24 | #[get = "pub"] 25 | controller_manager: Idendity, 26 | 27 | #[get = "pub"] 28 | kubelets: Vec, 29 | 30 | #[get = "pub"] 31 | proxy: Idendity, 32 | 33 | #[get = "pub"] 34 | scheduler: Idendity, 35 | 36 | #[get = "pub"] 37 | service_account: Idendity, 38 | } 39 | 40 | #[derive(Getters)] 41 | pub struct Idendity { 42 | #[get = "pub"] 43 | name: String, 44 | 45 | #[get = "pub"] 46 | user: String, 47 | 48 | #[get = "pub"] 49 | cert: PathBuf, 50 | 51 | #[get = "pub"] 52 | key: PathBuf, 53 | } 54 | 55 | impl Idendity { 56 | pub fn new(dir: &Path, name: &str, user: &str) -> Idendity { 57 | Idendity { 58 | cert: dir.join(format!("{}.pem", name)), 59 | key: dir.join(format!("{}-key.pem", name)), 60 | name: name.into(), 61 | user: user.into(), 62 | } 63 | } 64 | } 65 | 66 | #[derive(Getters)] 67 | struct PkiConfig<'a> { 68 | #[get = "pub"] 69 | ca: &'a Idendity, 70 | 71 | #[get = "pub"] 72 | ca_config: PathBuf, 73 | 74 | #[get = "pub"] 75 | dir: &'a Path, 76 | 77 | #[get = "pub"] 78 | hostnames: &'a str, 79 | } 80 | 81 | const ADMIN_NAME: &str = "admin"; 82 | const APISERVER_NAME: &str = "kubernetes"; 83 | const CA_NAME: &str = "ca"; 84 | const CONTROLLER_MANAGER_NAME: &str = "kube-controller-manager"; 85 | const CONTROLLER_MANAGER_USER: &str = "system:kube-controller-manager"; 86 | const PROXY_NAME: &str = "kube-proxy"; 87 | const PROXY_USER: &str = "system:kube-proxy"; 88 | const SCHEDULER_NAME: &str = "kube-scheduler"; 89 | const SCHEDULER_USER: &str = "system:kube-scheduler"; 90 | const SERVICE_ACCOUNT_NAME: &str = "service-account"; 91 | 92 | impl Pki { 93 | pub fn new(config: &Config, network: &Network) -> Result { 94 | let dir = &config.root().join("pki"); 95 | let nodes = (0..config.nodes()) 96 | .map(|n| Node::name(config, network, n)) 97 | .collect::>(); 98 | 99 | // Create the CA only if necessary 100 | if dir.exists() { 101 | info!("PKI directory already exists, skipping generation"); 102 | 103 | let kubelets = if config.multi_node() { 104 | // Multiple nodes get identified via their node name 105 | nodes 106 | .iter() 107 | .map(|n| Idendity::new(dir, n, &Self::node_user(n))) 108 | .collect() 109 | } else { 110 | // Single node gets identified via its hostname 111 | vec![Idendity::new( 112 | dir, 113 | network.hostname(), 114 | &Self::node_user(network.hostname()), 115 | )] 116 | }; 117 | 118 | Ok(Pki { 119 | admin: Idendity::new(dir, ADMIN_NAME, ADMIN_NAME), 120 | apiserver: Idendity::new(dir, APISERVER_NAME, APISERVER_NAME), 121 | ca: Idendity::new(dir, CA_NAME, CA_NAME), 122 | controller_manager: Idendity::new( 123 | dir, 124 | CONTROLLER_MANAGER_NAME, 125 | CONTROLLER_MANAGER_USER, 126 | ), 127 | kubelets, 128 | proxy: Idendity::new(dir, PROXY_NAME, PROXY_USER), 129 | scheduler: Idendity::new(dir, SCHEDULER_NAME, SCHEDULER_USER), 130 | service_account: Idendity::new(dir, SERVICE_ACCOUNT_NAME, SERVICE_ACCOUNT_NAME), 131 | }) 132 | } else { 133 | info!("Generating certificates"); 134 | create_dir_all(dir)?; 135 | let ca_config = Self::write_ca_config(dir)?; 136 | let ca = Self::setup_ca(dir)?; 137 | 138 | let mut hostnames = vec![ 139 | network.api()?.to_string(), 140 | Ipv4Addr::LOCALHOST.to_string(), 141 | network.hostname().into(), 142 | "kubernetes".into(), 143 | "kubernetes.default".into(), 144 | "kubernetes.default.svc".into(), 145 | "kubernetes.default.svc.cluster".into(), 146 | "kubernetes.svc.cluster.local".into(), 147 | ]; 148 | hostnames.extend(nodes.clone()); 149 | 150 | let pki_config = &PkiConfig { 151 | dir, 152 | ca: &ca, 153 | ca_config, 154 | hostnames: &hostnames.join(","), 155 | }; 156 | 157 | let kubelets = if config.multi_node() { 158 | // Multiple nodes get identified via their node name 159 | nodes 160 | .iter() 161 | .map(|n| Self::setup_kubelet(pki_config, n)) 162 | .collect::, _>>()? 163 | } else { 164 | // Single node gets identified via its hostname 165 | vec![Self::setup_kubelet(pki_config, network.hostname())?] 166 | }; 167 | 168 | Ok(Pki { 169 | admin: Self::setup_admin(pki_config)?, 170 | apiserver: Self::setup_apiserver(pki_config)?, 171 | controller_manager: Self::setup_controller_manager(pki_config)?, 172 | kubelets, 173 | proxy: Self::setup_proxy(pki_config)?, 174 | scheduler: Self::setup_scheduler(pki_config)?, 175 | service_account: Self::setup_service_account(pki_config)?, 176 | ca, 177 | }) 178 | } 179 | } 180 | 181 | fn setup_ca(dir: &Path) -> Result { 182 | debug!("Creating CA certificates"); 183 | const CN: &str = "kubernetes"; 184 | let csr = dir.join("ca-csr.json"); 185 | Self::write_csr(CN, CN, &csr)?; 186 | 187 | let mut cfssl = Command::new("cfssl") 188 | .arg("gencert") 189 | .arg("-initca") 190 | .arg(csr) 191 | .stdout(Stdio::piped()) 192 | .stderr(Stdio::null()) 193 | .spawn()?; 194 | 195 | let pipe = cfssl.stdout.take().context("unable to get stdout")?; 196 | let output = Command::new("cfssljson") 197 | .arg("-bare") 198 | .arg(dir.join(CA_NAME)) 199 | .stdin(pipe) 200 | .output()?; 201 | if !output.status.success() { 202 | debug!("cfssl/json: {:?}", output); 203 | bail!("CA certificate generation failed"); 204 | } 205 | debug!("CA certificates created"); 206 | Ok(Idendity::new(dir, CA_NAME, CA_NAME)) 207 | } 208 | 209 | fn setup_kubelet(pki_config: &PkiConfig, node: &str) -> Result { 210 | let user = Self::node_user(node); 211 | let csr_file = pki_config.dir().join(format!("{}-csr.json", node)); 212 | Self::write_csr(&user, "system:nodes", &csr_file)?; 213 | Self::generate(pki_config, node, &csr_file, &user) 214 | } 215 | 216 | fn setup_admin(pki_config: &PkiConfig) -> Result { 217 | let csr_file = pki_config.dir().join("admin-csr.json"); 218 | Self::write_csr(ADMIN_NAME, "system:masters", &csr_file)?; 219 | Self::generate(pki_config, ADMIN_NAME, &csr_file, ADMIN_NAME) 220 | } 221 | 222 | fn setup_controller_manager(pki_config: &PkiConfig) -> Result { 223 | let csr_file = pki_config.dir().join("kube-controller-manager-csr.json"); 224 | Self::write_csr(CONTROLLER_MANAGER_USER, CONTROLLER_MANAGER_USER, &csr_file)?; 225 | Self::generate( 226 | pki_config, 227 | CONTROLLER_MANAGER_NAME, 228 | &csr_file, 229 | CONTROLLER_MANAGER_USER, 230 | ) 231 | } 232 | 233 | fn setup_proxy(pki_config: &PkiConfig) -> Result { 234 | let csr_file = pki_config.dir().join("kube-proxy-csr.json"); 235 | Self::write_csr("system:kube-proxy", "system:node-proxier", &csr_file)?; 236 | Self::generate(pki_config, PROXY_NAME, &csr_file, PROXY_USER) 237 | } 238 | 239 | fn setup_scheduler(pki_config: &PkiConfig) -> Result { 240 | let csr_file = pki_config.dir().join("kube-scheduler-csr.json"); 241 | Self::write_csr(SCHEDULER_USER, SCHEDULER_USER, &csr_file)?; 242 | Self::generate(pki_config, SCHEDULER_NAME, &csr_file, SCHEDULER_USER) 243 | } 244 | 245 | fn setup_apiserver(pki_config: &PkiConfig) -> Result { 246 | let csr_file = pki_config.dir().join("kubernetes-csr.json"); 247 | Self::write_csr(APISERVER_NAME, APISERVER_NAME, &csr_file)?; 248 | Self::generate(pki_config, APISERVER_NAME, &csr_file, APISERVER_NAME) 249 | } 250 | 251 | fn setup_service_account(pki_config: &PkiConfig) -> Result { 252 | let csr_file = pki_config.dir().join("service-account-csr.json"); 253 | Self::write_csr("service-accounts", "kubernetes", &csr_file)?; 254 | Self::generate( 255 | pki_config, 256 | SERVICE_ACCOUNT_NAME, 257 | &csr_file, 258 | SERVICE_ACCOUNT_NAME, 259 | ) 260 | } 261 | 262 | fn generate(pki_config: &PkiConfig, name: &str, csr: &Path, user: &str) -> Result { 263 | debug!("Creating certificate for {}", name); 264 | 265 | let mut cfssl = Command::new("cfssl") 266 | .arg("gencert") 267 | .arg(format!("-ca={}", pki_config.ca().cert().display())) 268 | .arg(format!("-ca-key={}", pki_config.ca().key().display())) 269 | .arg(format!("-config={}", pki_config.ca_config().display())) 270 | .arg("-profile=kubernetes") 271 | .arg(format!("-hostname={}", pki_config.hostnames())) 272 | .arg(csr) 273 | .stdout(Stdio::piped()) 274 | .stderr(Stdio::null()) 275 | .spawn()?; 276 | 277 | let pipe = cfssl.stdout.take().context("unable to get stdout")?; 278 | let output = Command::new("cfssljson") 279 | .arg("-bare") 280 | .arg(pki_config.dir().join(name)) 281 | .stdin(pipe) 282 | .output()?; 283 | if !output.status.success() { 284 | debug!("cfssl/json: {:?}", output.stdout); 285 | bail!("cfssl command failed"); 286 | } 287 | debug!("Certificate created for {}", name); 288 | 289 | Ok(Idendity::new(pki_config.dir(), name, user)) 290 | } 291 | 292 | fn write_csr(cn: &str, o: &str, dest: &Path) -> Result<()> { 293 | let csr = json!({ 294 | "CN": cn, 295 | "key": { 296 | "algo": "rsa", 297 | "size": 2048 298 | }, 299 | "names": [{ 300 | "O": o, 301 | "OU": "kubernetes", 302 | }] 303 | }); 304 | fs::write(dest, to_string_pretty(&csr)?)?; 305 | Ok(()) 306 | } 307 | 308 | fn write_ca_config(dir: &Path) -> Result { 309 | let cfg = json!({ 310 | "signing": { 311 | "default": { 312 | "expiry": "8760h" 313 | }, 314 | "profiles": { 315 | "kubernetes": { 316 | "usages": [ 317 | "signing", 318 | "key encipherment", 319 | "server auth", 320 | "client auth" 321 | ], 322 | "expiry": "8760h" 323 | } 324 | } 325 | } 326 | }); 327 | let dest = dir.join("ca-config.json"); 328 | fs::write(&dest, to_string_pretty(&cfg)?)?; 329 | Ok(dest) 330 | } 331 | 332 | /// Retrieve the node user 333 | fn node_user(node: &str) -> String { 334 | format!("system:node:{}", node) 335 | } 336 | } 337 | 338 | #[cfg(test)] 339 | mod tests { 340 | use super::*; 341 | use crate::{ 342 | config::tests::{test_config, test_config_wrong_root}, 343 | network::tests::test_network, 344 | }; 345 | 346 | #[test] 347 | fn new_success() -> Result<()> { 348 | let c = test_config()?; 349 | let n = test_network()?; 350 | Pki::new(&c, &n)?; 351 | Ok(()) 352 | } 353 | 354 | #[test] 355 | fn new_failure() -> Result<()> { 356 | let c = test_config_wrong_root()?; 357 | let n = test_network()?; 358 | assert!(Pki::new(&c, &n).is_err()); 359 | Ok(()) 360 | } 361 | } 362 | -------------------------------------------------------------------------------- /src/podman.rs: -------------------------------------------------------------------------------- 1 | use crate::{system::System, Config}; 2 | use anyhow::Result; 3 | use log::LevelFilter; 4 | use std::{ 5 | fs::{self, create_dir_all}, 6 | path::{Path, PathBuf}, 7 | }; 8 | 9 | pub struct Podman; 10 | 11 | impl Podman { 12 | /// The executable name 13 | pub const EXECUTABLE: &'static str = "podman"; 14 | 15 | /// Returns true if podman is configured as container runtime 16 | pub fn is_configured(config: &Config) -> bool { 17 | config.container_runtime() == Self::EXECUTABLE 18 | } 19 | 20 | /// Retrieve the podman build args 21 | pub fn build_args(config: &Config, policy_json: &Path) -> Result> { 22 | // Prepare the CNI dir 23 | let dir = Self::cni_dir(config); 24 | create_dir_all(&dir)?; 25 | fs::write( 26 | &dir.join("87-podman-bridge.conflist"), 27 | include_str!("assets/podman-bridge.json"), 28 | )?; 29 | 30 | let mut args = Self::default_args(config)?; 31 | args.extend(vec![ 32 | "build".into(), 33 | format!("--signature-policy={}", policy_json.display()), 34 | ]); 35 | 36 | Ok(args) 37 | } 38 | 39 | /// Podman args which should apply to every command 40 | pub fn default_args(config: &Config) -> Result> { 41 | let log_level = if config.log_level() >= LevelFilter::Debug { 42 | "debug".into() 43 | } else { 44 | config.log_level().to_string() 45 | }; 46 | let mut args = vec![ 47 | format!("--cni-config-dir={}", Self::cni_dir(config).display()), 48 | format!("--conmon={}", System::find_executable("conmon")?.display()), 49 | format!("--log-level={}", log_level), 50 | format!("--runtime={}", System::find_executable("runc")?.display()), 51 | "--cgroup-manager=cgroupfs".into(), 52 | "--events-backend=none".into(), 53 | ]; 54 | if System::in_container()? { 55 | args.push("--storage-driver=vfs".into()); 56 | } 57 | Ok(args) 58 | } 59 | 60 | /// Retrieve the internal CNI directory 61 | fn cni_dir(config: &Config) -> PathBuf { 62 | config.root().join(Self::EXECUTABLE) 63 | } 64 | } 65 | 66 | #[cfg(test)] 67 | mod tests { 68 | use super::*; 69 | use crate::config::tests::{test_config, test_config_wrong_root}; 70 | 71 | #[test] 72 | fn is_configured_success() -> Result<()> { 73 | let c = test_config()?; 74 | assert!(Podman::is_configured(&c)); 75 | Ok(()) 76 | } 77 | 78 | #[test] 79 | fn build_args_success() -> Result<()> { 80 | let c = test_config()?; 81 | let p = PathBuf::from("policy.json"); 82 | Podman::build_args(&c, &p)?; 83 | Ok(()) 84 | } 85 | 86 | #[test] 87 | fn build_args_failure() -> Result<()> { 88 | let c = test_config_wrong_root()?; 89 | let p = PathBuf::from("policy.json"); 90 | assert!(Podman::build_args(&c, &p).is_err()); 91 | Ok(()) 92 | } 93 | 94 | #[test] 95 | fn default_args_success() -> Result<()> { 96 | let c = test_config()?; 97 | assert!(!Podman::default_args(&c)?.is_empty()); 98 | Ok(()) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /src/process.rs: -------------------------------------------------------------------------------- 1 | use crate::system::System; 2 | use anyhow::{bail, Context, Result}; 3 | use crossbeam_channel::{bounded, Receiver, Sender}; 4 | use log::{debug, error, info}; 5 | use nix::{ 6 | sys::signal::{kill, Signal}, 7 | unistd::Pid, 8 | }; 9 | use serde::{Deserialize, Serialize}; 10 | use std::{ 11 | fs::{self, create_dir_all, File}, 12 | io::{BufRead, BufReader}, 13 | path::{Path, PathBuf}, 14 | process::{Command, Stdio}, 15 | thread::{spawn, JoinHandle}, 16 | time::Instant, 17 | }; 18 | 19 | /// A general process abstraction 20 | pub struct Process { 21 | command: String, 22 | died: Receiver<()>, 23 | kill: Sender<()>, 24 | log_file: PathBuf, 25 | name: String, 26 | pid: u32, 27 | readyness_timeout: u64, 28 | watch: Option>>, 29 | } 30 | 31 | /// The trait to stop something 32 | pub trait Stoppable { 33 | /// Stop the process 34 | fn stop(&mut self) -> Result<()>; 35 | } 36 | 37 | /// A started process 38 | pub type Started = Box; 39 | 40 | /// A vector of processes which can be stopped 41 | pub type Stoppables = Vec; 42 | 43 | /// The process state as result 44 | pub type ProcessState = Result; 45 | 46 | #[derive(Deserialize, Serialize)] 47 | struct Run { 48 | command: PathBuf, 49 | args: Vec, 50 | } 51 | 52 | impl Process { 53 | /// Creates a new `Process` instance by spawning the provided `command` and `args`. 54 | /// If the process creation fails, an `Error` will be returned. 55 | pub fn start(dir: &Path, identifier: &str, command: &str, args: &[&str]) -> Result { 56 | // Prepare the commands 57 | if command.is_empty() { 58 | bail!("No valid command provided") 59 | } 60 | info!("Starting {}", identifier); 61 | 62 | // Write the executed command into the dir 63 | create_dir_all(dir)?; 64 | 65 | // If the run file exists, execute only that one 66 | let run_file = dir.join("run.yml"); 67 | let run = if !run_file.exists() { 68 | debug!( 69 | "No previous run file '{}' found, writing new one", 70 | run_file.display() 71 | ); 72 | // Write the run file 73 | let f = Run { 74 | command: System::find_executable(command)?, 75 | args: args.iter().map(|x| (*x).to_string()).collect(), 76 | }; 77 | fs::write(run_file, serde_yaml::to_string(&f)?)?; 78 | f 79 | } else { 80 | debug!("Re using run file '{}'", run_file.display()); 81 | let f = File::open(run_file)?; 82 | serde_yaml::from_reader(f)? 83 | }; 84 | 85 | // Prepare the log dir and file 86 | let mut log_file = dir.join(command); 87 | log_file.set_extension("log"); 88 | let out_file = File::create(&log_file)?; 89 | let err_file = out_file.try_clone()?; 90 | 91 | // Spawn the process child 92 | let mut child = Command::new(run.command) 93 | .args(run.args) 94 | .stderr(Stdio::from(err_file)) 95 | .stdout(Stdio::from(out_file)) 96 | .spawn() 97 | .with_context(|| format!("Unable to start process '{}' ({})", identifier, command,))?; 98 | 99 | // Start the watcher thread 100 | let (kill, killed) = bounded(1); 101 | let (dead, died) = bounded(1); 102 | let c = command.to_owned(); 103 | let n = identifier.to_owned(); 104 | let pid = child.id(); 105 | let watch = spawn(move || { 106 | // Wait for the process to exit 107 | let status = child.wait()?; 108 | 109 | // No kill send, we assume that the process died 110 | if killed.try_recv().is_err() { 111 | error!("{} ({}) died unexpectedly", n, c); 112 | dead.send(())?; 113 | } else { 114 | info!("{} stopped", n); 115 | } 116 | debug!("{} ({}) {}", n, c, status); 117 | Ok(()) 118 | }); 119 | 120 | Ok(Process { 121 | command: command.into(), 122 | died, 123 | kill, 124 | log_file, 125 | name: identifier.into(), 126 | pid, 127 | readyness_timeout: 120, 128 | watch: Some(watch), 129 | }) 130 | } 131 | 132 | /// Wait for the process to become ready, by searching for the pattern in 133 | /// every line of its output. 134 | pub fn wait_ready(&mut self, pattern: &str) -> Result<()> { 135 | debug!( 136 | "Waiting for process '{}' ({}) to become ready with pattern: '{}'", 137 | self.name, self.command, pattern 138 | ); 139 | let now = Instant::now(); 140 | let file = File::open(&self.log_file)?; 141 | let mut reader = BufReader::new(file); 142 | 143 | while now.elapsed().as_secs() < self.readyness_timeout { 144 | let mut line = String::new(); 145 | reader.read_line(&mut line)?; 146 | 147 | if line.contains(pattern) { 148 | info!("{} is ready", self.name); 149 | debug!("Found pattern '{}' in line '{}'", pattern, line.trim()); 150 | return Ok(()); 151 | } 152 | 153 | if self.died.try_recv().is_ok() { 154 | bail!("{} ({}) died", self.command, self.name) 155 | } 156 | } 157 | 158 | // Cleanup since process is not ready 159 | self.stop()?; 160 | error!( 161 | "Timed out waiting for process '{}' ({}) to become ready", 162 | self.name, self.command 163 | ); 164 | bail!("Process timeout") 165 | } 166 | 167 | /// Retrieve a pseudo state for stopped processes 168 | pub fn stopped() -> ProcessState { 169 | bail!("Process not started yet") 170 | } 171 | } 172 | 173 | impl Stoppable for Process { 174 | /// Stopping the process by killing it 175 | fn stop(&mut self) -> Result<()> { 176 | debug!("Stopping process {} (via {})", self.name, self.command); 177 | 178 | // Indicate that this shutdown is intended 179 | self.kill.send(()).with_context(|| { 180 | format!( 181 | "Unable to send kill signal to process {} (via {})", 182 | self.name, self.command, 183 | ) 184 | })?; 185 | 186 | // Send SIGTERM to the process 187 | kill(Pid::from_raw(self.pid as i32), Signal::SIGTERM)?; 188 | 189 | // Join the waiting thread 190 | if let Some(handle) = self.watch.take() { 191 | if handle.join().is_err() { 192 | bail!( 193 | "Unable to stop process {} (via {})", 194 | self.name, 195 | self.command 196 | ); 197 | } 198 | } 199 | debug!("Process {} (via {}) stopped", self.name, self.command); 200 | Ok(()) 201 | } 202 | } 203 | 204 | #[cfg(test)] 205 | mod tests { 206 | use super::*; 207 | use tempfile::tempdir; 208 | 209 | #[test] 210 | fn stopped() { 211 | assert!(Process::stopped().is_err()) 212 | } 213 | 214 | #[test] 215 | fn start_success() -> Result<()> { 216 | let d = tempdir()?; 217 | Process::start(d.path(), "", "echo", &[])?; 218 | Ok(()) 219 | } 220 | 221 | #[test] 222 | fn start_failure_no_command() -> Result<()> { 223 | let d = tempdir()?; 224 | assert!(Process::start(d.path(), "", "", &[]).is_err()); 225 | Ok(()) 226 | } 227 | 228 | #[test] 229 | fn start_failure_invalid_command() -> Result<()> { 230 | let d = tempdir()?; 231 | assert!(Process::start(d.path(), "", "invalid_command", &[]).is_err()); 232 | Ok(()) 233 | } 234 | 235 | #[test] 236 | fn wait_ready_success() -> Result<()> { 237 | let d = tempdir()?; 238 | let mut p = Process::start(d.path(), "", "echo", &["test"])?; 239 | p.wait_ready("test")?; 240 | Ok(()) 241 | } 242 | 243 | #[test] 244 | fn wait_ready_failure() -> Result<()> { 245 | let d = tempdir()?; 246 | let mut p = Process::start(d.path(), "", "echo", &["test"])?; 247 | p.readyness_timeout = 1; 248 | assert!(p.wait_ready("invalid").is_err()); 249 | Ok(()) 250 | } 251 | 252 | #[test] 253 | fn stop_success() -> Result<()> { 254 | let d = tempdir()?; 255 | let mut p = Process::start(d.path(), "", "sleep", &["500"])?; 256 | p.stop()?; 257 | Ok(()) 258 | } 259 | } 260 | -------------------------------------------------------------------------------- /src/progress.rs: -------------------------------------------------------------------------------- 1 | use console::style; 2 | use indicatif::{ProgressBar, ProgressStyle}; 3 | use lazy_static::lazy_static; 4 | use log::LevelFilter; 5 | use parking_lot::RwLock; 6 | use std::sync::{Arc, Weak}; 7 | 8 | pub struct Progress { 9 | inner: Option>, 10 | } 11 | 12 | lazy_static! { 13 | static ref PROGRESS_BAR: RwLock>> = RwLock::new(None); 14 | } 15 | 16 | impl Progress { 17 | // Create a new global progress bar 18 | pub fn new(items: u64, level: LevelFilter) -> Progress { 19 | if level < LevelFilter::Info { 20 | return Progress { inner: None }; 21 | } 22 | 23 | // Create the progress bar 24 | let p = Arc::new(ProgressBar::new(items)); 25 | p.set_style(ProgressStyle::default_bar().template(&format!( 26 | "{}{}{} {}", 27 | style("[").white().dim(), 28 | "{spinner:.green} {elapsed:>3}", 29 | style("]").white().dim(), 30 | "{bar:25.green/blue} {pos:>2}/{len} {msg}", 31 | ))); 32 | p.enable_steady_tick(100); 33 | 34 | // Set the global instance 35 | *PROGRESS_BAR.write() = Some(Arc::downgrade(&p)); 36 | 37 | Progress { inner: Some(p) } 38 | } 39 | 40 | // Get the progress bar 41 | pub fn get() -> Option> { 42 | PROGRESS_BAR.read().as_ref()?.upgrade() 43 | } 44 | 45 | // Reset and consume the progress bar 46 | pub fn reset(self) { 47 | if let Some(p) = self.inner { 48 | p.finish() 49 | } 50 | *PROGRESS_BAR.write() = None; 51 | } 52 | } 53 | 54 | #[cfg(test)] 55 | pub mod tests { 56 | use super::*; 57 | 58 | #[test] 59 | fn progress_success() { 60 | let p = Progress::new(10, LevelFilter::Info); 61 | assert!(Progress::get().is_some()); 62 | p.reset(); 63 | assert!(Progress::get().is_none()); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/proxy.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::Config, 3 | kubeconfig::KubeConfig, 4 | network::Network, 5 | node::Node, 6 | process::{Process, ProcessState, Stoppable}, 7 | }; 8 | use anyhow::Result; 9 | use std::fs::{self, create_dir_all}; 10 | 11 | pub struct Proxy { 12 | process: Process, 13 | } 14 | 15 | impl Proxy { 16 | pub fn start(config: &Config, network: &Network, kubeconfig: &KubeConfig) -> ProcessState { 17 | let dir = config.root().join("proxy"); 18 | create_dir_all(&dir)?; 19 | 20 | let yml = format!( 21 | include_str!("assets/proxy.yml"), 22 | kubeconfig.proxy().display(), 23 | network.cluster_cidr(), 24 | ); 25 | let cfg = dir.join("config.yml"); 26 | 27 | if !cfg.exists() { 28 | fs::write(&cfg, yml)?; 29 | } 30 | 31 | let mut process = Process::start( 32 | &dir, 33 | "Proxy", 34 | "kube-proxy", 35 | &[ 36 | &format!("--config={}", cfg.display()), 37 | &format!( 38 | "--hostname-override={}", 39 | if config.multi_node() { 40 | Node::name(config, network, 0) 41 | } else { 42 | network.hostname().into() 43 | } 44 | ), 45 | ], 46 | )?; 47 | 48 | process.wait_ready("Caches are synced")?; 49 | Ok(Box::new(Proxy { process })) 50 | } 51 | } 52 | 53 | impl Stoppable for Proxy { 54 | fn stop(&mut self) -> Result<()> { 55 | self.process.stop() 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/scheduler.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | config::Config, 3 | kubeconfig::KubeConfig, 4 | process::{Process, ProcessState, Stoppable}, 5 | }; 6 | use anyhow::Result; 7 | use std::fs::{self, create_dir_all}; 8 | 9 | pub struct Scheduler { 10 | process: Process, 11 | } 12 | 13 | impl Scheduler { 14 | pub fn start(config: &Config, kubeconfig: &KubeConfig) -> ProcessState { 15 | let dir = config.root().join("scheduler"); 16 | create_dir_all(&dir)?; 17 | 18 | let yml = format!( 19 | include_str!("assets/scheduler.yml"), 20 | kubeconfig.scheduler().display() 21 | ); 22 | let cfg = &dir.join("config.yml"); 23 | 24 | if !cfg.exists() { 25 | fs::write(cfg, yml)?; 26 | } 27 | 28 | let mut process = Process::start( 29 | &dir, 30 | "Scheduler", 31 | "kube-scheduler", 32 | &[&format!("--config={}", cfg.display()), "--v=2"], 33 | )?; 34 | 35 | process.wait_ready("Serving securely")?; 36 | Ok(Box::new(Self { process })) 37 | } 38 | } 39 | 40 | impl Stoppable for Scheduler { 41 | fn stop(&mut self) -> Result<()> { 42 | self.process.stop() 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/system.rs: -------------------------------------------------------------------------------- 1 | use crate::{node::Node, Config}; 2 | use anyhow::{bail, Context, Result}; 3 | use log::{debug, info, warn}; 4 | use std::{ 5 | env::{split_paths, var, var_os}, 6 | fmt::Display, 7 | fs::{self, read_to_string}, 8 | net::Ipv4Addr, 9 | path::{Path, PathBuf}, 10 | process::Command, 11 | }; 12 | 13 | pub struct System { 14 | hosts: Option, 15 | } 16 | 17 | impl System { 18 | /// Create a new system 19 | pub fn setup(config: &Config) -> Result { 20 | if Self::in_container()? { 21 | info!("Skipping modprobe and sysctl for sake of containerization") 22 | } else { 23 | for module in &["overlay", "br_netfilter", "ip_conntrack"] { 24 | Self::modprobe(module)?; 25 | } 26 | for sysctl in &[ 27 | "net.bridge.bridge-nf-call-ip6tables", 28 | "net.bridge.bridge-nf-call-iptables", 29 | "net.ipv4.conf.all.route_localnet", 30 | "net.ipv4.ip_forward", 31 | ] { 32 | Self::sysctl_enable(sysctl)?; 33 | } 34 | } 35 | 36 | let hosts = if config.multi_node() { 37 | // Try to write the hostnames, which does not work on every system 38 | let hosts_file = Self::hosts(); 39 | let hosts = read_to_string(&hosts_file)?; 40 | let local_hosts = (0..config.nodes()) 41 | .map(|x| format!("{} {}", Ipv4Addr::LOCALHOST, Node::raw(x))) 42 | .collect::>(); 43 | 44 | let mut new_hosts = hosts 45 | .lines() 46 | .filter(|x| !local_hosts.iter().any(|y| x == y)) 47 | .map(|x| x.into()) 48 | .collect::>(); 49 | new_hosts.extend(local_hosts); 50 | 51 | match fs::write(&hosts_file, new_hosts.join("\n")) { 52 | Err(e) => { 53 | warn!( 54 | "Unable to write hosts file '{}'. The nodes may be not reachable: {}", 55 | hosts_file.display(), 56 | e 57 | ); 58 | None 59 | } 60 | _ => Some(hosts), 61 | } 62 | } else { 63 | None 64 | }; 65 | 66 | Ok(Self { hosts }) 67 | } 68 | 69 | /// Returns true if the process is running inside a container 70 | pub fn in_container() -> Result { 71 | Ok( 72 | read_to_string(PathBuf::from("/").join("proc").join("1").join("cgroup")) 73 | .context("Unable to retrieve systems container status")? 74 | .lines() 75 | .any(|x| x.contains("libpod") || x.contains("podman") || x.contains("docker")), 76 | ) 77 | } 78 | 79 | /// Restore the initial system state 80 | pub fn cleanup(&self) { 81 | if let Some(hosts) = &self.hosts { 82 | if let Err(e) = fs::write(Self::hosts(), hosts) { 83 | warn!( 84 | "Unable to restore hosts file, may need manual cleanup: {}", 85 | e 86 | ) 87 | } 88 | } 89 | } 90 | 91 | /// Find an executable inside the current $PATH environment 92 | pub fn find_executable

(name: P) -> Result 93 | where 94 | P: AsRef + Display, 95 | { 96 | var_os("PATH") 97 | .and_then(|paths| { 98 | split_paths(&paths) 99 | .filter_map(|dir| { 100 | let full_path = dir.join(&name); 101 | if full_path.is_file() { 102 | Some(full_path) 103 | } else { 104 | None 105 | } 106 | }) 107 | .next() 108 | }) 109 | .with_context(|| format!("Unable to find executable '{}' in $PATH", name)) 110 | } 111 | 112 | /// Return the full path to the default system shell 113 | pub fn shell() -> Result { 114 | let shell = var("SHELL").unwrap_or_else(|_| "sh".into()); 115 | Ok(format!( 116 | "{}", 117 | Self::find_executable(&shell) 118 | .with_context(|| format!("Unable to find system shell '{}'", shell))? 119 | .display() 120 | )) 121 | } 122 | 123 | /// Load a single kernel module via 'modprobe' 124 | fn modprobe(module: &str) -> Result<()> { 125 | debug!("Loading kernel module '{}'", module); 126 | let output = Command::new("modprobe").arg(module).output()?; 127 | if !output.status.success() { 128 | bail!( 129 | "Unable to load '{}' kernel module: {}", 130 | module, 131 | String::from_utf8(output.stderr)?, 132 | ); 133 | } 134 | Ok(()) 135 | } 136 | 137 | /// Enable a single sysctl by setting it to '1' 138 | fn sysctl_enable(key: &str) -> Result<()> { 139 | debug!("Enabling sysctl '{}'", key); 140 | let enable_arg = format!("{}=1", key); 141 | let output = Command::new("sysctl").arg("-w").arg(&enable_arg).output()?; 142 | let stderr = String::from_utf8(output.stderr)?; 143 | if !stderr.is_empty() { 144 | bail!("Unable to set sysctl '{}': {}", enable_arg, stderr); 145 | } 146 | Ok(()) 147 | } 148 | 149 | fn hosts() -> PathBuf { 150 | PathBuf::from("/").join("etc").join("hosts") 151 | } 152 | } 153 | 154 | #[cfg(test)] 155 | mod tests { 156 | use super::*; 157 | use std::env::set_var; 158 | 159 | const VALID_EXECUTABLE: &str = "runc"; 160 | const INVALID_EXECUTABLE: &str = "should-not-exist"; 161 | 162 | #[test] 163 | fn module_failure() { 164 | assert!(System::modprobe("invalid").is_err()); 165 | } 166 | 167 | #[test] 168 | fn sysctl_failure() { 169 | assert!(System::sysctl_enable("invalid").is_err()); 170 | } 171 | 172 | #[test] 173 | fn find_executable_success() { 174 | assert!(System::find_executable(VALID_EXECUTABLE).is_ok()); 175 | } 176 | 177 | #[test] 178 | fn find_executable_failure() { 179 | assert!(System::find_executable(INVALID_EXECUTABLE).is_err()); 180 | } 181 | 182 | #[test] 183 | fn find_shell_success() { 184 | set_var("SHELL", VALID_EXECUTABLE); 185 | assert!(System::shell().is_ok()); 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /tests/common.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use anyhow::{bail, Result}; 3 | use std::{ 4 | env::{current_dir, var}, 5 | fs::{canonicalize, create_dir_all, File}, 6 | io::{BufRead, BufReader}, 7 | path::PathBuf, 8 | process::{Command, Stdio}, 9 | time::Instant, 10 | }; 11 | 12 | const TIMEOUT: u64 = 2000; 13 | pub const SUDO: &str = "sudo"; 14 | 15 | pub fn run_podman_test(test: &str, args: Option<&[&str]>) -> Result<()> { 16 | run_container_test(test, "podman", args) 17 | } 18 | 19 | pub fn run_docker_test(test: &str, args: Option<&[&str]>) -> Result<()> { 20 | run_container_test(test, "docker", args) 21 | } 22 | 23 | fn run_container_test(test: &str, command: &str, args: Option<&[&str]>) -> Result<()> { 24 | let image = var("IMAGE")?; 25 | let mut full_args = vec![ 26 | command, 27 | "run", 28 | "--name", 29 | test, 30 | "--rm", 31 | "--privileged", 32 | "--net=host", 33 | ]; 34 | 35 | // Mount /dev/mapper if needed 36 | let devmapper = PathBuf::from("/").join("dev").join("mapper"); 37 | let devmapper_arg = format!("-v={d}:{d}", d = devmapper.display()); 38 | if devmapper.exists() { 39 | full_args.push(&devmapper_arg); 40 | } 41 | 42 | // Mount test dir 43 | let mut test_dir = test_dir(test); 44 | create_dir_all(&test_dir)?; 45 | test_dir = canonicalize(&test_dir)?; 46 | let test_dir_volume_arg = format!("-v={d}:/kubernix-run", d = test_dir.display()); 47 | full_args.push(&test_dir_volume_arg); 48 | 49 | full_args.push(&image); 50 | if let Some(a) = args { 51 | full_args.extend(a); 52 | } 53 | full_args.push("--log-level=debug"); 54 | 55 | let success = run_test(test, &full_args, none_hook)?; 56 | 57 | // Cleanup 58 | let status = Command::new(SUDO) 59 | .arg(command) 60 | .arg("rm") 61 | .arg("-f") 62 | .arg(test) 63 | .status()?; 64 | 65 | // Result evaluation 66 | if !success || !status.success() { 67 | bail!("Test failed") 68 | } 69 | Ok(()) 70 | } 71 | 72 | pub fn run_local_test(test: &str, args: Option<&[&str]>, hook: F) -> Result<()> 73 | where 74 | F: Fn() -> Result<()>, 75 | { 76 | let binary = current_dir()? 77 | .join("target") 78 | .join("release") 79 | .join("kubernix") 80 | .display() 81 | .to_string(); 82 | let root = format!("--root={}", run_root(test).display()); 83 | let mut full_args: Vec<&str> = vec![&binary, &root, "--log-level=debug"]; 84 | if let Some(a) = args { 85 | full_args.extend(a); 86 | } 87 | let success = run_test(test, &full_args, hook)?; 88 | 89 | // Kill the kubernix pid 90 | let pid_file = run_root(test).join("kubernix.pid"); 91 | println!("Killing pid: {}", pid_file.display()); 92 | Command::new(SUDO) 93 | .arg("pkill") 94 | .arg("-F") 95 | .arg(&pid_file) 96 | .status()?; 97 | let cleanup_success = check_file_for_output(test, "All done", "died unexpectedly")?; 98 | 99 | // Results evaluation 100 | if !success || !cleanup_success { 101 | bail!("Test failed") 102 | } 103 | Ok(()) 104 | } 105 | 106 | pub fn run_root(test: &str) -> PathBuf { 107 | test_dir(test).join("run") 108 | } 109 | 110 | pub fn none_hook() -> Result<()> { 111 | Ok(()) 112 | } 113 | 114 | fn run_test(test: &str, args: &[&str], hook: F) -> Result 115 | where 116 | F: Fn() -> Result<()>, 117 | { 118 | // Prepare the logs dir 119 | let test_dir = test_dir(test); 120 | Command::new(SUDO) 121 | .arg("rm") 122 | .arg("-rf") 123 | .arg(&test_dir) 124 | .status()?; 125 | create_dir_all(&test_dir)?; 126 | let log_file = log_file(test); 127 | println!("Writing to log file: {}", log_file.display()); 128 | 129 | // Start the process 130 | println!("running: {}", args.join(" ")); 131 | let out_file = File::create(&log_file)?; 132 | let err_file = out_file.try_clone()?; 133 | Command::new(SUDO) 134 | .arg("env") 135 | .arg(format!("PATH={}", var("PATH")?)) 136 | .args(args) 137 | .arg("--no-shell") 138 | .stderr(Stdio::from(err_file)) 139 | .stdout(Stdio::from(out_file)) 140 | .spawn()?; 141 | 142 | // Check the expected output 143 | println!("Waiting for process to be ready"); 144 | let success_ready = check_file_for_output( 145 | test, 146 | "Waiting for interrupt", 147 | "Unable to start all processes", 148 | )?; 149 | println!("Process ready: {}", success_ready); 150 | 151 | // Run the test hook 152 | let success_hook = if success_ready { 153 | if let Err(e) = hook() { 154 | println!("Hook errored: {}", e); 155 | false 156 | } else { 157 | true 158 | } 159 | } else { 160 | false 161 | }; 162 | 163 | // Check results 164 | Ok(success_ready && success_hook) 165 | } 166 | 167 | fn check_file_for_output(test: &str, success_pattern: &str, failure_pattern: &str) -> Result { 168 | let mut success = false; 169 | let now = Instant::now(); 170 | let mut reader = BufReader::new(File::open(log_file(test))?); 171 | 172 | while now.elapsed().as_secs() < TIMEOUT { 173 | let mut line = String::new(); 174 | reader.read_line(&mut line)?; 175 | if !line.is_empty() { 176 | print!("{}", line); 177 | if line.contains(success_pattern) { 178 | success = true; 179 | break; 180 | } 181 | if line.contains(failure_pattern) { 182 | break; 183 | } 184 | } 185 | } 186 | return Ok(success); 187 | } 188 | 189 | fn test_dir(test: &str) -> PathBuf { 190 | PathBuf::from(format!("test-{}", test)) 191 | } 192 | 193 | fn log_file(test: &str) -> PathBuf { 194 | test_dir(test).join("kubernix.log") 195 | } 196 | -------------------------------------------------------------------------------- /tests/e2e.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | use anyhow::{bail, Result}; 4 | use common::{run_local_test, run_root, SUDO}; 5 | use std::{env::var, process::Command}; 6 | 7 | #[test] 8 | fn local_single_node() -> Result<()> { 9 | let test = "e2e-single-node"; 10 | run_local_test(test, None, || { 11 | let kubeconfig = run_root(test).join("kubeconfig").join("admin.kubeconfig"); 12 | if !Command::new(SUDO) 13 | .arg("env") 14 | .arg(format!("PATH={}", var("PATH")?)) 15 | .arg(format!("KUBECONFIG={}", kubeconfig.display())) 16 | .arg("KUBERNETES_SERVICE_HOST=127.0.0.1") 17 | .arg("KUBERNETES_SERVICE_PORT=6443") 18 | .arg("e2e.test") 19 | .arg("--provider=local") 20 | // TODO: enable more tests 21 | // .arg("--ginkgo.focus=.*\\[Conformance\\].*") 22 | .arg("--ginkgo.focus=.*should serve a basic endpoint from pods.*") 23 | .status()? 24 | .success() 25 | { 26 | bail!("e2e tests failed"); 27 | } 28 | Ok(()) 29 | }) 30 | } 31 | -------------------------------------------------------------------------------- /tests/integration.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | use anyhow::Result; 4 | use common::{none_hook, run_docker_test, run_local_test, run_podman_test}; 5 | 6 | #[test] 7 | fn local_single_node() -> Result<()> { 8 | run_local_test("integration-local-single", None, none_hook) 9 | } 10 | 11 | #[test] 12 | fn local_multi_node() -> Result<()> { 13 | run_local_test("integration-local-multi", Some(&["--nodes=2"]), none_hook) 14 | } 15 | 16 | #[test] 17 | fn docker_single_node() -> Result<()> { 18 | run_docker_test("integration-docker-single", None) 19 | } 20 | 21 | #[test] 22 | fn podman_single_node() -> Result<()> { 23 | run_podman_test("integration-podman-single", None) 24 | } 25 | 26 | #[test] 27 | fn podman_multi_node() -> Result<()> { 28 | run_podman_test("integration-podman-multi", Some(&["--nodes=2"])) 29 | } 30 | --------------------------------------------------------------------------------