├── .ansible-lint ├── .devcontainer ├── Dockerfile └── devcontainer.json ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── config.yml │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── ci.yml │ └── release.yml ├── .gitignore ├── .yamllint ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE.txt ├── README.md ├── defaults └── main.yml ├── documentation ├── README.md ├── configuration │ ├── 2-node-ha-ext-datastore.md │ ├── ipv4-ipv6-dual-stack.md │ ├── multiple-standalone-k3s-nodes.md │ ├── node-labels-and-component-args.md │ ├── systemd-config.md │ └── use-an-alternate-cni.md ├── operations │ ├── extending-a-cluster.md │ ├── shrinking-a-cluster.md │ ├── stop-start-cluster.md │ └── updating-k3s.md ├── quickstart-cluster.md ├── quickstart-ha-cluster.md └── quickstart-single-node.md ├── handlers └── main.yml ├── meta └── main.yml ├── molecule ├── autodeploy │ ├── converge.yml │ ├── molecule.yml │ ├── prepare.yml │ ├── templates │ │ └── 00-ns-monitoring.yml.j2 │ └── verify.yml ├── debug │ ├── converge.yml │ ├── molecule.yml │ ├── prepare.yml │ └── verify.yml ├── default │ ├── Dockerfile.j2 │ ├── INSTALL.rst │ ├── converge.yml │ ├── molecule.yml │ ├── playbook-download.yml │ ├── playbook-restart-cluster.yml │ ├── playbook-rootless.yml │ ├── playbook-standalone.yml │ ├── playbook-start-cluster.yml │ ├── playbook-stop-cluster.yml │ ├── playbook-uninstall-cluster.yml │ ├── prepare-rootless.yml │ ├── prepare.yml │ └── tests │ │ ├── test_default.py │ │ └── test_default.pyc ├── highavailabilitydb │ ├── Dockerfile.j2 │ ├── INSTALL.rst │ ├── converge.yml │ ├── haproxy-loadbalancer.conf.j2 │ ├── molecule.yml │ ├── prepare.yml │ └── tests │ │ ├── test_default.py │ │ └── test_default.pyc ├── highavailabilityetcd │ ├── converge.yml │ ├── haproxy-loadbalancer.conf.j2 │ ├── molecule.yml │ └── prepare.yml ├── lint-requirements.txt ├── nodeploy │ ├── .gitignore │ ├── converge.yml │ ├── k3s_agent.yml │ ├── k3s_server.yml │ ├── molecule.yml │ ├── prepare.yml │ └── verify.yml └── requirements.txt ├── requirements.txt ├── tasks ├── determine_systemd_context.yml ├── ensure_cluster.yml ├── ensure_containerd_registries.yml ├── ensure_control_plane_started_openrc.yml ├── ensure_control_plane_started_systemd.yml ├── ensure_directories.yml ├── ensure_downloads.yml ├── ensure_drain_and_remove_nodes.yml ├── ensure_installed.yml ├── ensure_installed_node.yml ├── ensure_k3s_auto_deploy.yml ├── ensure_k3s_config_files.yml ├── ensure_pre_configuration.yml ├── ensure_started.yml ├── ensure_stopped.yml ├── ensure_uninstalled.yml ├── ensure_uploads.yml ├── main.yml ├── post_checks_control_plane.yml ├── post_checks_nodes.yml ├── post_checks_uninstalled.yml ├── pre_checks.yml ├── pre_checks_cgroups.yml ├── pre_checks_cluster.yml ├── pre_checks_control_node_count.yml ├── pre_checks_experimental_variables.yml ├── pre_checks_issue_data.yml ├── pre_checks_packages.yml ├── pre_checks_unsupported_rootless.yml ├── pre_checks_variables.yml ├── pre_checks_version.yml ├── state_downloaded.yml ├── state_installed.yml ├── state_restarted.yml ├── state_started.yml ├── state_stopped.yml ├── state_uninstalled.yml └── state_validated.yml ├── templates ├── cluster-token.j2 ├── config.yaml.j2 ├── k3s-killall.sh.j2 ├── k3s-uninstall.sh.j2 ├── k3s.logrotate.j2 ├── k3s.openrc.j2 ├── k3s.service.j2 └── registries.yaml.j2 └── vars └── main.yml /.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | skip_list: 4 | - role-name 5 | - name[template] 6 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG VARIANT=focal 2 | FROM ubuntu:${VARIANT} 3 | 4 | COPY molecule/requirements.txt /tmp/molecule/requirements.txt 5 | COPY requirements.txt /tmp/requirements.txt 6 | 7 | RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 8 | && apt-get -y install curl git python3-dev python3-pip \ 9 | python3-venv shellcheck sudo unzip docker.io jq \ 10 | && curl -L \ 11 | "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ 12 | -o /usr/bin/kubectl \ 13 | && chmod +x /usr/bin/kubectl \ 14 | && python3 -m pip install pip --upgrade \ 15 | && python3 -m pip install -r /tmp/molecule/requirements.txt 16 | 17 | RUN useradd -s /bin/bash -m vscode && \ 18 | usermod -aG docker vscode && \ 19 | echo 'vscode ALL=(ALL:ALL) NOPASSWD: ALL' > /etc/sudoers.d/vscode && \ 20 | echo 'source /etc/bash_completion.d/git-prompt' >> /home/vscode/.bashrc && \ 21 | echo 'sudo chown vscode /var/run/docker-host.sock' >> /home/vscode/.bashrc && \ 22 | echo 'export PS1="${PS1:0:-1}\[\033[38;5;196m\]$(__git_ps1)\[$(tput sgr0)\] "' >> /home/vscode/.bashrc 23 | 24 | RUN ln -s /var/run/docker-host.sock /var/run/docker.sock 25 | 26 | USER vscode 27 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Ubuntu", 3 | "build": { 4 | "context": "..", 5 | "dockerfile": "Dockerfile", 6 | "args": { "VARIANT": "focal" } 7 | }, 8 | 9 | "settings": { 10 | "terminal.integrated.profiles.linux": { 11 | "bash (login)": { 12 | "path": "/bin/bash", 13 | "args": ["-l"] 14 | } 15 | } 16 | }, 17 | 18 | "extensions": [ 19 | "ms-azuretools.vscode-docker", 20 | "redhat.vscode-yaml" 21 | ], 22 | 23 | "mounts": [ 24 | "source=/var/run/docker.sock,target=/var/run/docker-host.sock,type=bind" 25 | ], 26 | 27 | "remoteUser": "vscode" 28 | } 29 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | --- 5 | 6 | 7 | 8 | 9 | ### Summary 10 | 11 | 12 | 13 | ### Issue Type 14 | 15 | - Bug Report 16 | 17 | ### Controller Environment and Configuration 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | ```text 26 | 27 | ``` 28 | 29 | ### Steps to Reproduce 30 | 31 | 32 | 33 | 34 | 35 | ```yaml 36 | 37 | ``` 38 | 39 | ### Expected Result 40 | 41 | 42 | 43 | ```text 44 | 45 | ``` 46 | 47 | ### Actual Result 48 | 49 | 50 | 51 | 52 | 53 | ```text 54 | 55 | ``` 56 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | blank_issues_enabled: true 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | --- 5 | 6 | 7 | 8 | 9 | ### Summary 10 | 11 | 12 | 13 | ### Issue Type 14 | 15 | - Feature Request 16 | 17 | ### User Story 18 | 19 | 20 | 21 | 22 | _As a_ \ 23 | _I want to_ \ 24 | _So that_ 25 | 26 | ### Additional Information 27 | 28 | 29 | 30 | 31 | ```yaml 32 | 33 | ``` 34 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## TITLE 2 | 3 | ### Summary 4 | 5 | 6 | 7 | 8 | 9 | ### Issue type 10 | 11 | 12 | - Bugfix 13 | - Documentation 14 | - Feature 15 | 16 | ### Test instructions 17 | 18 | 19 | 20 | ### Acceptance Criteria 21 | 22 | 23 | 24 | 28 | 29 | ### Additional Information 30 | 31 | 32 | 33 | 34 | 35 | ```text 36 | 37 | ``` 38 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | name: CI 4 | 'on': 5 | pull_request: 6 | push: 7 | branches: 8 | - master 9 | - main 10 | - v1_release 11 | schedule: 12 | - cron: "0 1 1 * *" 13 | 14 | defaults: 15 | run: 16 | working-directory: "xanmanning.k3s" 17 | 18 | jobs: 19 | ansible-lint: 20 | name: Ansible Lint 21 | runs-on: ubuntu-latest 22 | steps: 23 | - name: Checkout codebase 24 | uses: actions/checkout@v2 25 | with: 26 | path: "xanmanning.k3s" 27 | 28 | - name: Set up Python 3 29 | uses: actions/setup-python@v2 30 | with: 31 | python-version: "3.x" 32 | 33 | - name: Install test dependencies 34 | run: pip3 install -r molecule/lint-requirements.txt 35 | 36 | - name: Run yamllint 37 | run: yamllint -s . 38 | 39 | - name: Run ansible-lint 40 | run: ansible-lint --exclude molecule/ --exclude meta/ 41 | 42 | molecule: 43 | name: Molecule 44 | runs-on: ubuntu-20.04 45 | strategy: 46 | fail-fast: false 47 | matrix: 48 | include: 49 | - distro: geerlingguy/docker-debian11-ansible:latest 50 | scenario: default 51 | prebuilt: 'true' 52 | - distro: geerlingguy/docker-ubuntu2204-ansible:latest 53 | scenario: default 54 | prebuilt: 'true' 55 | - distro: geerlingguy/docker-amazonlinux2-ansible:latest 56 | scenario: default 57 | prebuilt: 'true' 58 | - distro: geerlingguy/docker-ubuntu2004-ansible:latest 59 | scenario: default 60 | prebuilt: 'true' 61 | - distro: geerlingguy/docker-fedora35-ansible:latest 62 | scenario: nodeploy 63 | prebuilt: 'true' 64 | - distro: geerlingguy/docker-fedora34-ansible:latest 65 | scenario: highavailabilitydb 66 | prebuilt: 'true' 67 | - distro: geerlingguy/docker-fedora33-ansible:latest 68 | scenario: autodeploy 69 | - distro: xanmanning/docker-alpine-ansible:3.16 70 | scenario: highavailabilityetcd 71 | prebuilt: 'false' 72 | - distro: geerlingguy/docker-rockylinux9-ansible:latest 73 | scenario: highavailabilityetcd 74 | prebuilt: 'true' 75 | 76 | steps: 77 | - name: Checkout codebase 78 | uses: actions/checkout@v2 79 | with: 80 | path: "xanmanning.k3s" 81 | 82 | - name: Set up Python 3 83 | uses: actions/setup-python@v2 84 | with: 85 | python-version: "3.x" 86 | 87 | - name: Install test dependencies 88 | run: pip3 install -r molecule/requirements.txt 89 | 90 | - name: Run Molecule tests 91 | run: molecule test --scenario-name "${{ matrix.scenario }}" 92 | # continue-on-error: true 93 | env: 94 | PY_COLORS: '1' 95 | ANSIBLE_FORCE_COLOR: '1' 96 | MOLECULE_DISTRO: ${{ matrix.distro }} 97 | MOLECULE_PREBUILT: ${{ matrix.prebuilt }} 98 | MOLECULE_DOCKER_COMMAND: ${{ matrix.command }} 99 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | name: Release 4 | 'on': 5 | push: 6 | tags: 7 | - '*' 8 | 9 | defaults: 10 | run: 11 | working-directory: "xanmanning.k3s" 12 | 13 | jobs: 14 | release: 15 | name: Release 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout codebase 19 | uses: actions/checkout@v2 20 | with: 21 | path: "xanmanning.k3s" 22 | 23 | - name: Set up Python 3 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: "3.x" 27 | 28 | - name: Install Ansible 29 | run: pip3 install -r requirements.txt 30 | 31 | - name: Trigger a new import on Galaxy 32 | run: ansible-galaxy role import --api-key ${{ secrets.GALAXY_API_KEY }} $(echo ${{ github.repository }} | cut -d/ -f1) $(echo ${{ github.repository }} | cut -d/ -f2) 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | *.retry 3 | VAULT_PASSWORD 4 | VAULT_PASS 5 | .vault_pass 6 | .vault_pass.asc 7 | vagramt/fetch 8 | vagrant/ubuntu-*.log 9 | __pycache__ 10 | ansible.cfg 11 | pyratlabs-issue-dump.txt 12 | .cache 13 | /.idea/ 14 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | 16 | 17 | ## 2023-06-17, v3.4.2 18 | 19 | ### Notable changes 20 | 21 | - fix: static pods should be deployed to all control nodes #207 22 | - fix: create registries.yaml if k3s_registries.mirrors or k3s_registries.configs are not none #208 23 | 24 | ### Contributors 25 | 26 | - [onedr0p](https://github.com/onedr0p) 27 | - [matteyeux](https://github.com/matteyeux) 28 | 29 | ## 2023-05-17, v3.4.1 30 | 31 | ### Notable changes 32 | 33 | - fix: resolve ansible lint warnings and fix molecule tests in github actions 34 | 35 | ### Contributors 36 | 37 | - [dbrennand](https://github.com/dbrennand) 38 | 39 | --- 40 | 41 | ## 2023-03-11, v3.4.0 42 | 43 | ### Notable changes 44 | 45 | - refactor: add `until: 1.23.15` to `secrets-encryption` from `k3s_experimental_config` as it is no longer experimental. Fixes #200. 46 | - docs(fix): typo in `CONTRIBUTING.md` 47 | 48 | ### Contributors 49 | 50 | - [dbrennand](https://github.com/dbrennand) 51 | 52 | --- 53 | 54 | ## 2022-11-15, v3.3.1 55 | 56 | ### Notable changes 57 | 58 | - fix: length indentation in registry.yaml 59 | 60 | --- 61 | 62 | ## 2022-09-11, v3.3.0 63 | 64 | ### Notable changes 65 | 66 | - fix: `no_log` removed from `ansible.builtin.uri` tasks 67 | - feat: `k3s_skip_post_checks` option added 68 | 69 | --- 70 | 71 | ## 2022-06-17, v3.2.0 72 | 73 | ### Notable changes 74 | 75 | - feature: added support for alpine #182 76 | - fix: `k3s_control_token` not working #187 77 | 78 | ## 2022-05-02, v3.1.2 79 | 80 | ### Notable changes 81 | 82 | - fix: molecule tests 83 | 84 | --- 85 | 86 | ## 2022-02-18, v3.1.1 87 | 88 | ### Notable changes 89 | 90 | - fix: support nftables for debian 11 91 | 92 | ### Contributors 93 | 94 | - [eaglesemanation](https://github.com/eaglesemanation) 95 | 96 | --- 97 | 98 | ## 2022-01-30, v3.1.0 99 | 100 | ### Notable changes 101 | 102 | - feat: use basename of url for items in `k3s_server_manifests_urls` and 103 | `k3s_server_pod_manifests_urls` if filename is not provided #177 104 | 105 | ### Contributors 106 | 107 | - [kossmac](https://github.com/kossmac) 108 | 109 | --- 110 | 111 | ## 2022-01-06, v3.0.1 112 | 113 | ### Notable changes 114 | 115 | - fix: adding become to pre checks packages #173 116 | 117 | ### Contributors 118 | 119 | - [xlejo](https://github.com/xlejo) 120 | 121 | --- 122 | 123 | ## 2022-01-02, v3.0.0 124 | 125 | ### Notable changes 126 | 127 | - feat: Flattened task filesystem 128 | - feat: Moved some tasks into `vars/` as templated variables 129 | - feat: Airgap installation method added #165 130 | 131 | ### Breaking changes 132 | 133 | - Minimum `python` version on targets is 3.6 134 | - `k3s_become_for_all` renamed to `k3s_become` 135 | - `k3s_become_for_*` removed. 136 | 137 | ### Contributors 138 | 139 | - [crutonjohn](https://github.com/crutonjohn) 140 | 141 | --- 142 | 143 | ## 2021-12-23, v2.12.1 144 | 145 | ### Notable changes 146 | 147 | - Fix typo in systemd unit file 148 | 149 | ### Contributors 150 | 151 | - [andrewchen5678](https://github.com/andrewchen5678) 152 | 153 | --- 154 | 155 | ## 2021-12-20, v2.12.0 156 | 157 | ### Notable changes 158 | 159 | - Fix RockyLinux HA etcd tests 160 | - add Debian 11 test 161 | - Fix Snapshotter in Molecule tests 162 | - Added missing documentation for `k3s_api_url` 163 | - Added option to change K3s updates API url 164 | - Custom environment variables in systemd unit files 165 | - Debian Bullseye support 166 | - Fix HA etcd cluster startup 167 | - Fix rootless for Debian 168 | 169 | ### Contributors 170 | 171 | - [janar153](https://github.com/janar153) 172 | 173 | --- 174 | 175 | ## 2021-10-10, v2.11.1 176 | 177 | ### Notable changes 178 | 179 | - docs: fixed references to `write-kubeconfig-mode` to set correct permissions #157 180 | - fix: Flag --delete-local-data has been deprecated #159 181 | 182 | --- 183 | 184 | ## 2021-09-08, v2.11.0 185 | 186 | ### Notable changes 187 | 188 | - docs: example of IPv6 configuration 189 | - feat: checks for s3 backup configuration 190 | - feat: implement config.yaml.d 191 | 192 | ### Contributors 193 | 194 | - [onedr0p](https://github.com/onedr0p) 195 | 196 | --- 197 | 198 | ## 2021-08-18, v2.10.6 199 | 200 | ### Notable changes 201 | 202 | - Fix: Define registration address from node-ip #142 203 | 204 | --- 205 | 206 | ## 2021-08-14, v2.10.5 207 | 208 | ### Notable changes 209 | 210 | - Add advertised address #139 211 | 212 | ### Contributors 213 | 214 | - [@abelfodil](https://github.com/abelfodil) 215 | 216 | --- 217 | 218 | ## 2021-07-24, v2.10.4 219 | 220 | ### Notable changes 221 | 222 | - Updated systemd template to use token when joining a cluster #138 223 | 224 | --- 225 | 226 | ## 2021-07-21, v2.10.3 227 | 228 | ### Notable changes 229 | 230 | - fix: typo #133 231 | - fix: restore clustering and avoid failure with jinja2_native=true #135 232 | - fix: do ignore etcd member count when uninstalling #136 233 | 234 | ### Contributors 235 | 236 | - [@Yaro](https://github.com/Yajo) 237 | 238 | --- 239 | 240 | ## 2021-06-22, v2.10.2 241 | 242 | ### Notable changes 243 | 244 | - Role is now tested against RockyLinux 245 | 246 | --- 247 | 248 | ## 2021-05-30, v2.10.1 249 | 250 | ### Notable changes 251 | 252 | - Case insensitive control node lookup #126 253 | 254 | ### Contributors 255 | 256 | - [@mrobinsn](https://github.com/mrobinsn) 257 | 258 | --- 259 | 260 | ## 2021-05-27, v2.10.0 261 | 262 | ### Notable changes 263 | 264 | - Only deploy templates on primary controller #119 265 | - Allow control plane static pods #120 266 | - Add support for specifying URLs in templates #124 267 | 268 | ### Contributors 269 | 270 | - [@bjw-s](https://github.com/bjw-s) 271 | - [@onedr0p](https://github.com/onedr0p) 272 | 273 | --- 274 | 275 | ## 2021-05-14, v2.9.1 276 | 277 | 278 | 279 | ### Notable changes 280 | 281 | - Documentation, remove references to deprecated configuration techniques #115 282 | - Bugfix: Templating issue. 283 | 284 | --- 285 | 286 | ## 2021-05-13, v2.9.0 287 | 288 | 289 | 290 | ### Notable changes 291 | 292 | - Feature: Support k3s private registry configuration #114 293 | 294 | ### Contributors 295 | 296 | - [@anjia0532](https://github.com/anjia0532) 297 | 298 | --- 299 | 300 | ## 2021-05-06, v2.8.5 301 | 302 | ### Notable changes 303 | 304 | - Bugfix: Unmount CSI plugin folder to avoid data lost on uninstall #113 305 | 306 | ### Contributors 307 | 308 | - [@angelnu](https://github.com/angelnu) 309 | 310 | --- 311 | 312 | ## 2021-05-01, v2.8.4 313 | 314 | ### Notable changes 315 | 316 | - Fixed issue with draining nodes #112 317 | 318 | ### Contributors 319 | 320 | - [@anjia0532](https://github.com/anjia0532) 321 | 322 | --- 323 | 324 | ## 2021-04-18, v2.8.3 325 | 326 | ### Notable changes 327 | 328 | - Typo fix in README.md #110 329 | - Fixed check mode for cgroup test #111 330 | - Added check mode into molecule test sequence 331 | - `inventory.yml` is now `blockinfile` 332 | 333 | ### Contributors 334 | 335 | - [@bdronneau](https://github.com/bdronneau) 336 | 337 | --- 338 | 339 | ## 2021-04-10, v2.8.2 340 | 341 | ### Notable changes 342 | 343 | - #105 - Added Ansible v2.9.16 support 344 | - #102 - Pre-check for cgroup status 345 | 346 | ### Known issues 347 | 348 | - As per README.md, you require `ansible` >= 2.9.16 349 | or `ansible-base` >= 2.10.4. See [#105(comment)](https://github.com/PyratLabs/ansible-role-k3s/issues/105#issuecomment-817182233) 350 | 351 | --- 352 | 353 | ## 2021-03-22, v2.8.1 354 | 355 | ### Notable changes 356 | 357 | - #100 - Fixed typo in README.md 358 | 359 | ### Contributors 360 | 361 | - [@mbwmbw1337](https://github.com/mbwmbw1337) 362 | 363 | --- 364 | 365 | ## 2021-03-14, v2.8.0 366 | 367 | Happy π day! 368 | 369 | ### Notable changes 370 | 371 | - Updated GitHub Actions, resolved linting errors. 372 | - Renamed `k3s_control_node_address` -> `k3s_registration_address` 373 | 374 | ### Breaking changes 375 | 376 | - A task has been added to rename `k3s_control_node_address` to 377 | `k3s_registration_address` for any users still using this variable name, 378 | however this might still break something. 379 | 380 | --- 381 | 382 | ## 2021-02-28, v2.7.1 383 | 384 | ### Notable changes 385 | 386 | - Bugfix, missing become on cluster token check. 387 | 388 | --- 389 | 390 | ## 2021-02-27, v2.7.0 391 | 392 | ### Notable changes 393 | 394 | - Cluster init checks added. 395 | - Tidy up of tasks, failed checks. 396 | - Possible fix for #93 - force draining of nodes added. 397 | 398 | --- 399 | 400 | ## 2021-02-27, v2.6.1 401 | 402 | ### Notable changes 403 | 404 | - Bugfix: Templating error for single control plane nodes using Etcd. 405 | - Bugfix: a number of typos fixed. 406 | 407 | --- 408 | 409 | ## 2021-02-16, v2.6.0 410 | 411 | ### Notable changes 412 | 413 | - Tidy up of `when` params and `assert` tasks to be more readable. 414 | - Added feature to tweak K3S service dependencies. 415 | - Updated documentation: 416 | - Node labels and component arguments 417 | - systemd config 418 | - Use alternate CNI (Calico example) 419 | 420 | --- 421 | 422 | ## 2021-01-31, v2.5.3 423 | 424 | ### Notable changes 425 | 426 | - Bugfix, missing update to minimum ansible version var #91. 427 | 428 | --- 429 | 430 | ## 2021-01-30, v2.5.2 431 | 432 | ### Notable changes 433 | 434 | - Bugfix, missing `k3s_start_on_boot` to control `systemd.enabled` added. 435 | 436 | --- 437 | 438 | ## 2021-01-30, v2.5.1 439 | 440 | ### Notable changes 441 | 442 | - Added uninstall task to remove hard-linked files #88 443 | - Fixed missing become for `systemd` operations tasks. #89 444 | - Added `k3s_start_on_boot` to control `systemd.enabled`. 445 | 446 | --- 447 | 448 | ## 2021-01-24, v2.5.0 449 | 450 | ### Notable changes 451 | 452 | - Added support for Ansible >= 2.9.17 #83 453 | 454 | --- 455 | 456 | ## 2021-01-23, v2.4.3 457 | 458 | ### Notable changes 459 | 460 | - Bufgix: Installation hangs on "Check that all nodes to be ready" #84 461 | 462 | --- 463 | 464 | ## 2021-01-10, v2.4.2 465 | 466 | ### Notable changes 467 | 468 | - Bufgix: Docker check still failing on "false" 469 | 470 | --- 471 | 472 | ## 2021-01-02, v2.4.1 473 | 474 | ### Notable changes 475 | 476 | - Fixed issue with armv6l (Raspberry Pi Zero W) 477 | - Added path for private repositories config to directory creation list. 478 | 479 | --- 480 | 481 | ## 2020-12-21, v2.4.0 482 | 483 | ### Notable changes 484 | 485 | - `k3s_config_dir` derived from `k3s_config_file`, reused throughout the role 486 | to allow for easy removal of "Rancher" references #73. 487 | - `k3s_token_location` has moved to be in `k3s_config_dir`. 488 | - Tasks for creating directories now looped to caputure configuration from 489 | `k3s_server` and `k3s_agent` and ensure directories exist before k3s 490 | starts, see #75. 491 | - Server token collected directly from token file, not symlinked file 492 | (node-token). 493 | - `k3s_runtime_config` defined in `vars/` for validation and overwritten in 494 | tasks for control plane and workers. 495 | - Removed unused references to GitHub API. 496 | - `set_fact` and `command` tasks now use FQCN. 497 | - Check of `ansible_version` in environment check. 498 | - Introduction of target environment checks for #72. 499 | - Fixed bug with non-default listening port not being passed to workers. 500 | - Added ability to put documentation links into validation checks #76. 501 | - Removed the requirement for `jmespath` on the Ansible controller. 502 | - Fixed bug with issue data collection tasks. 503 | 504 | ### Breaking changes 505 | 506 | - Ansible minimum version is hard set to v2.10.4 507 | - `k3s_token_location` has moved to be in `k3s_config_dir` so re-running the 508 | role will create a duplicate file here. 509 | 510 | --- 511 | 512 | ## 2020-12-19, v2.3.0 513 | 514 | ### Notable changes 515 | 516 | - Updated k3s uninstall scripts #74 517 | - Started moving Rancher references to `vars/` as per #73 518 | 519 | --- 520 | 521 | ## 2020-12-19, v2.2.2 522 | 523 | ### Notable changes 524 | 525 | - Fixed typos in documentation. 526 | - Molecule testing pinned to v3.1 due to tests failing. 527 | 528 | --- 529 | 530 | ## 2020-12-16, v2.2.1 531 | 532 | ### Notable changes 533 | 534 | - Re-working documentation 535 | - Updated GitHub link, org changed from Rancher to k3s-io. 536 | - Replace deprecated `play_hosts` variable. 537 | 538 | ### Breaking changes 539 | 540 | - Moving git branch from `master` to `main`. 541 | 542 | --- 543 | 544 | ## 2020-12-12, v2.2.0 545 | 546 | ### Notable changes 547 | 548 | - Use of FQCNs enforced, minimum Ansible version now v2.10 549 | - `k3s_etcd_datastore` no longer experimental after K3s version v1.19.5+k3s1 550 | - Docker marked as deprecated for K3s > v1.20.0+k3s1 551 | 552 | ### Breaking changes 553 | 554 | - Use of FQCNs enforced, minimum Ansible version now v2.10 555 | - Use of Docker requires `k3s_use_unsupported_config` to be `true` after 556 | v1.20.0+k3s1 557 | 558 | --- 559 | 560 | ## 2020-12-05, v2.1.1 561 | 562 | ### Notable changes 563 | 564 | - Fixed link to documentation. 565 | 566 | --- 567 | 568 | ## 2020-12-05, v2.1.0 569 | 570 | ### Notable changes 571 | 572 | - Deprecated configuration check built into validation steps. 573 | - Removed duplicated tasks for single node cluster. 574 | - Added documentation providing quickstart examples and common operations. 575 | - Fixed data-dir configuration. 576 | - Some tweaks to rootless. 577 | - Fix draining and removing of nodes. 578 | 579 | ### Breaking changes 580 | 581 | - `k3s_token_location` now points to a file location, not a directory. 582 | - `k3s_systemd_unit_directory` renamed to `k3s_systemd_unit_dir` 583 | - Removed `k3s_node_data_dir` as this is now configured with `data-dir` in 584 | `k3s_server` and/or `k3s_agent`. 585 | 586 | ### Known issues 587 | 588 | - Rootless is still broken, this is still not supported as a method for 589 | running k3s using this role. 590 | 591 | --- 592 | 593 | ## 2020-11-30, v2.0.2 594 | 595 | ### Notable changes 596 | 597 | - Updated issue template and information collection tasks. 598 | 599 | --- 600 | 601 | ## 2020-11-30, v2.0.1 602 | 603 | ### Notable changes 604 | 605 | - Fixed a number of typos in the README.md 606 | - Updated the meta/main.yml to put quotes around minimum Ansible version. 607 | 608 | --- 609 | 610 | ## 2020-11-29, v2.0.0 611 | 612 | ### Notable changes 613 | 614 | - #64 - Initial release of v2.0.0 of 615 | [ansible-role-k3s](https://github.com/PyratLabs/ansible-role-k3s). 616 | - Minimum supported k3s version now: v1.19.1+k3s1 617 | - Minimum supported Ansible version now: v2.10.0 618 | - #62 - Remove all references to the word "master". 619 | - #53 - Move to file-based configuration. 620 | - Refactored to avoid duplication in code and make contribution easier. 621 | - Validation checks moved to using variables defined in `vars/` 622 | 623 | ### Breaking changes 624 | 625 | #### File based configuration 626 | 627 | Issue #53 628 | 629 | With the release of v1.19.1+k3s1, this role has moved to file-based 630 | configuration of k3s. This requires manuall translation of v1 configuration 631 | variables into configuration file format. 632 | 633 | Please see: https://rancher.com/docs/k3s/latest/en/installation/install-options/#configuration-file 634 | 635 | #### Minimum supported k3s version 636 | 637 | As this role now relies on file-based configuration, the v2.x release of this 638 | role will only support v1.19+ of k3s. If you are not in a position to update 639 | k3s you will need to continue using the v1.x release of this role, which will 640 | be supported until March 2021. 641 | 642 | #### Minimum supported ansible version 643 | 644 | This role now only supports Ansible v2.10+, this is because it has moved on to 645 | using FQDNs, with the exception of `set_fact` tasks which have 646 | [been broken](https://github.com/ansible/ansible/issues/72319) and the fixes 647 | have [not yet been backported to v2.10](https://github.com/ansible/ansible/pull/71824). 648 | 649 | The use of FQDNs allows for custom modules to be introduced to override task 650 | behavior. If this role requires a custom ansible module to be introduced then 651 | this can be added as a dependency and targeted specifically by using the 652 | correct FQDN. 653 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guidelines 2 | 3 | Thank you for taking time to contribute to this Ansible role. 4 | 5 | There are a number of ways that you can contribute to this project, not all of 6 | them requiring you to be able to write code. Below is a list of suggested 7 | contributions welcomed by the community: 8 | 9 | - Submit bug reports in GitHub issues 10 | - Comment on bug reports with further information or suggestions 11 | - Suggest new features 12 | - Create Pull Requests fixing bugs or adding new features 13 | - Update and improve documentation 14 | - Review the role on Ansible Galaxy 15 | - Write a blog post reviewing the role 16 | - Sponsor me. 17 | 18 | ## Issue guidelines 19 | 20 | Issues are the best way to capture an bug in the role, or suggest new features. 21 | This is due to issues being visible to the entire community and allows for 22 | other contributors to pick up the work, so is a better communication medium 23 | than email. 24 | 25 | A good bug issue will include as much information as possible about the 26 | environment Ansible is running in, as well as the role configuration. If there 27 | are any relevant pieces of documentation from upstream projects, this should 28 | be included. 29 | 30 | New feature requests are also best captured in issues, these should include 31 | as much relevant information as possible and if possible include a "user story" 32 | (don't sweat if you don't know how to write one). If there are any relevant 33 | pieces of documentation from upstream projects, this should be included. 34 | 35 | ## Pull request guidelines 36 | 37 | PRs should only contain 1 issue fix at a time to limit the scope of testing 38 | required. The smaller the scope of the PR, the easier it is for it to be 39 | reviewed. 40 | 41 | PRs should include the keyword `Fixes` before an issue number if the PR will 42 | completely close the issue. This is because automation will close the issue 43 | once the PR is merged. 44 | 45 | PRs are preferred to be merged in as a single commit, so rebasing before 46 | pushing is recommended, however this isn't a strict rule. 47 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, Xan Manning 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: k3s (v3.x) 2 | 3 | Ansible role for installing [K3S](https://k3s.io/) ("Lightweight 4 | Kubernetes") as either a standalone server or cluster. 5 | 6 | [![CI](https://github.com/PyratLabs/ansible-role-k3s/workflows/CI/badge.svg?event=push)](https://github.com/PyratLabs/ansible-role-k3s/actions?query=workflow%3ACI) 7 | 8 | ## Help Wanted! 9 | 10 | Hi! :wave: [@xanmanning](https://github.com/xanmanning) is looking for a new 11 | maintainer to work on this Ansible role. This is because I don't have as much 12 | free time any more and I no longer write Ansible regularly as part of my day 13 | job. If you're interested, get in touch. 14 | 15 | ## Release notes 16 | 17 | Please see [Releases](https://github.com/PyratLabs/ansible-role-k3s/releases) 18 | and [CHANGELOG.md](CHANGELOG.md). 19 | 20 | ## Requirements 21 | 22 | The host you're running Ansible from requires the following Python dependencies: 23 | 24 | - `python >= 3.6.0` - [See Notes below](#important-note-about-python). 25 | - `ansible >= 2.9.16` or `ansible-base >= 2.10.4` 26 | 27 | You can install dependencies using the requirements.txt file in this repository: 28 | `pip3 install -r requirements.txt`. 29 | 30 | This role has been tested against the following Linux Distributions: 31 | 32 | - Alpine Linux 33 | - Amazon Linux 2 34 | - Archlinux 35 | - CentOS 8 36 | - Debian 11 37 | - Fedora 31 38 | - Fedora 32 39 | - Fedora 33 40 | - openSUSE Leap 15 41 | - RockyLinux 8 42 | - Ubuntu 20.04 LTS 43 | 44 | :warning: The v3 releases of this role only supports `k3s >= v1.19`, for 45 | `k3s < v1.19` please consider updating or use the v1.x releases of this role. 46 | 47 | Before upgrading, see [CHANGELOG](CHANGELOG.md) for notifications of breaking 48 | changes. 49 | 50 | ## Role Variables 51 | 52 | Since K3s [v1.19.1+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.19.1%2Bk3s1) 53 | you can now configure K3s using a 54 | [configuration file](https://rancher.com/docs/k3s/latest/en/installation/install-options/#configuration-file) 55 | rather than environment variables or command line arguments. The v2 release of 56 | this role has moved to the configuration file method rather than populating a 57 | systemd unit file with command-line arguments. There may be exceptions that are 58 | defined in [Global/Cluster Variables](#globalcluster-variables), however you will 59 | mostly be configuring k3s by configuration files using the `k3s_server` and 60 | `k3s_agent` variables. 61 | 62 | See "_Server (Control Plane) Configuration_" and "_Agent (Worker) Configuraion_" 63 | below. 64 | 65 | ### Global/Cluster Variables 66 | 67 | Below are variables that are set against all of the play hosts for environment 68 | consistency. These are generally cluster-level configuration. 69 | 70 | | Variable | Description | Default Value | 71 | |--------------------------------------|--------------------------------------------------------------------------------------------|--------------------------------| 72 | | `k3s_state` | State of k3s: installed, started, stopped, downloaded, uninstalled, validated. | installed | 73 | | `k3s_release_version` | Use a specific version of k3s, eg. `v0.2.0`. Specify `false` for stable. | `false` | 74 | | `k3s_airgap` | Boolean to enable air-gapped installations | `false` | 75 | | `k3s_config_file` | Location of the k3s configuration file. | `/etc/rancher/k3s/config.yaml` | 76 | | `k3s_build_cluster` | When multiple play hosts are available, attempt to cluster. Read notes below. | `true` | 77 | | `k3s_registration_address` | Fixed registration address for nodes. IP or FQDN. | NULL | 78 | | `k3s_github_url` | Set the GitHub URL to install k3s from. | https://github.com/k3s-io/k3s | 79 | | `k3s_api_url` | URL for K3S updates API. | https://update.k3s.io | 80 | | `k3s_install_dir` | Installation directory for k3s. | `/usr/local/bin` | 81 | | `k3s_install_hard_links` | Install using hard links rather than symbolic links. | `false` | 82 | | `k3s_server_config_yaml_d_files` | A flat list of templates to supplement the `k3s_server` configuration. | [] | 83 | | `k3s_agent_config_yaml_d_files` | A flat list of templates to supplement the `k3s_agent` configuration. | [] | 84 | | `k3s_server_manifests_urls` | A list of URLs to deploy on the primary control plane. Read notes below. | [] | 85 | | `k3s_server_manifests_templates` | A flat list of templates to deploy on the primary control plane. | [] | 86 | | `k3s_server_pod_manifests_urls` | A list of URLs for installing static pod manifests on the control plane. Read notes below. | [] | 87 | | `k3s_server_pod_manifests_templates` | A flat list of templates for installing static pod manifests on the control plane. | [] | 88 | | `k3s_use_experimental` | Allow the use of experimental features in k3s. | `false` | 89 | | `k3s_use_unsupported_config` | Allow the use of unsupported configurations in k3s. | `false` | 90 | | `k3s_etcd_datastore` | Enable etcd embedded datastore (read notes below). | `false` | 91 | | `k3s_debug` | Enable debug logging on the k3s service. | `false` | 92 | | `k3s_registries` | Registries configuration file content. | `{ mirrors: {}, configs:{} }` | 93 | 94 | ### K3S Service Configuration 95 | 96 | The below variables change how and when the systemd service unit file for K3S 97 | is run. Use this with caution, please refer to the [systemd documentation](https://www.freedesktop.org/software/systemd/man/systemd.unit.html#%5BUnit%5D%20Section%20Options) 98 | for more information. 99 | 100 | | Variable | Description | Default Value | 101 | |------------------------|----------------------------------------------------------------------|---------------| 102 | | `k3s_start_on_boot` | Start k3s on boot. | `true` | 103 | | `k3s_service_requires` | List of required systemd units to k3s service unit. | [] | 104 | | `k3s_service_wants` | List of "wanted" systemd unit to k3s (weaker than "requires"). | []\* | 105 | | `k3s_service_before` | Start k3s before a defined list of systemd units. | [] | 106 | | `k3s_service_after` | Start k3s after a defined list of systemd units. | []\* | 107 | | `k3s_service_env_vars` | Dictionary of environment variables to use within systemd unit file. | {} | 108 | | `k3s_service_env_file` | Location on host of a environment file to include. | `false`\*\* | 109 | 110 | \* The systemd unit template **always** specifies `network-online.target` for 111 | `wants` and `after`. 112 | 113 | \*\* The file must already exist on the target host, this role will not create 114 | nor manage the file. You can manage this file outside of the role with 115 | pre-tasks in your Ansible playbook. 116 | 117 | ### Group/Host Variables 118 | 119 | Below are variables that are set against individual or groups of play hosts. 120 | Typically you'd set these at group level for the control plane or worker nodes. 121 | 122 | | Variable | Description | Default Value | 123 | |--------------------|-------------------------------------------------------------------|---------------------------------------------------| 124 | | `k3s_control_node` | Specify if a host (or host group) are part of the control plane. | `false` (role will automatically delegate a node) | 125 | | `k3s_server` | Server (control plane) configuration, see notes below. | `{}` | 126 | | `k3s_agent` | Agent (worker) configuration, see notes below. | `{}` | 127 | 128 | #### Server (Control Plane) Configuration 129 | 130 | The control plane is configured with the `k3s_server` dict variable. Please 131 | refer to the below documentation for configuration options: 132 | 133 | https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/ 134 | 135 | The `k3s_server` dictionary variable will contain flags from the above 136 | (removing the `--` prefix). Below is an example: 137 | 138 | ```yaml 139 | k3s_server: 140 | datastore-endpoint: postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable 141 | cluster-cidr: 172.20.0.0/16 142 | flannel-backend: 'none' # This needs to be in quotes 143 | disable: 144 | - traefik 145 | - coredns 146 | ``` 147 | 148 | Alternatively, you can create a .yaml file and read it in to the `k3s_server` 149 | variable as per the below example: 150 | 151 | ```yaml 152 | k3s_server: "{{ lookup('file', 'path/to/k3s_server.yml') | from_yaml }}" 153 | ``` 154 | 155 | Check out the [Documentation](documentation/README.md) for example 156 | configuration. 157 | 158 | #### Agent (Worker) Configuration 159 | 160 | Workers are configured with the `k3s_agent` dict variable. Please refer to the 161 | below documentation for configuration options: 162 | 163 | https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config 164 | 165 | The `k3s_agent` dictionary variable will contain flags from the above 166 | (removing the `--` prefix). Below is an example: 167 | 168 | ```yaml 169 | k3s_agent: 170 | with-node-id: true 171 | node-label: 172 | - "foo=bar" 173 | - "hello=world" 174 | ``` 175 | 176 | Alternatively, you can create a .yaml file and read it in to the `k3s_agent` 177 | variable as per the below example: 178 | 179 | ```yaml 180 | k3s_agent: "{{ lookup('file', 'path/to/k3s_agent.yml') | from_yaml }}" 181 | ``` 182 | 183 | Check out the [Documentation](documentation/README.md) for example 184 | configuration. 185 | 186 | ### Ansible Controller Configuration Variables 187 | 188 | The below variables are used to change the way the role executes in Ansible, 189 | particularly with regards to privilege escalation. 190 | 191 | | Variable | Description | Default Value | 192 | |------------------------|----------------------------------------------------------------|---------------| 193 | | `k3s_skip_validation` | Skip all tasks that validate configuration. | `false` | 194 | | `k3s_skip_env_checks` | Skip all tasks that check environment configuration. | `false` | 195 | | `k3s_skip_post_checks` | Skip all tasks that check post execution state. | `false` | 196 | | `k3s_become` | Escalate user privileges for tasks that need root permissions. | `false` | 197 | 198 | #### Important note about Python 199 | 200 | From v3 of this role, Python 3 is required on the target system as well as on 201 | the Ansible controller. This is to ensure consistent behaviour for Ansible 202 | tasks as Python 2 is now EOL. 203 | 204 | If target systems have both Python 2 and Python 3 installed, it is most likely 205 | that Python 2 will be selected by default. To ensure Python 3 is used on a 206 | target with both versions of Python, ensure `ansible_python_interpreter` is 207 | set in your inventory. Below is an example inventory: 208 | 209 | ```yaml 210 | --- 211 | 212 | k3s_cluster: 213 | hosts: 214 | kube-0: 215 | ansible_user: ansible 216 | ansible_host: 10.10.9.2 217 | ansible_python_interpreter: /usr/bin/python3 218 | kube-1: 219 | ansible_user: ansible 220 | ansible_host: 10.10.9.3 221 | ansible_python_interpreter: /usr/bin/python3 222 | kube-2: 223 | ansible_user: ansible 224 | ansible_host: 10.10.9.4 225 | ansible_python_interpreter: /usr/bin/python3 226 | ``` 227 | 228 | #### Important note about `k3s_release_version` 229 | 230 | If you do not set a `k3s_release_version` the latest version from the stable 231 | channel of k3s will be installed. If you are developing against a specific 232 | version of k3s you must ensure this is set in your Ansible configuration, eg: 233 | 234 | ```yaml 235 | k3s_release_version: v1.19.3+k3s1 236 | ``` 237 | 238 | It is also possible to install specific K3s "Channels", below are some 239 | examples for `k3s_release_version`: 240 | 241 | ```yaml 242 | k3s_release_version: false # defaults to 'stable' channel 243 | k3s_release_version: stable # latest 'stable' release 244 | k3s_release_version: testing # latest 'testing' release 245 | k3s_release_version: v1.19 # latest 'v1.19' release 246 | k3s_release_version: v1.19.3+k3s3 # specific release 247 | 248 | # Specific commit 249 | # CAUTION - only used for testing - must be 40 characters 250 | k3s_release_version: 48ed47c4a3e420fa71c18b2ec97f13dc0659778b 251 | ``` 252 | 253 | #### Important note about `k3s_install_hard_links` 254 | 255 | If you are using the [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller) 256 | you will need to use hard links rather than symbolic links as the controller 257 | will not be able to follow symbolic links. This option has been added however 258 | is not enabled by default to avoid breaking existing installations. 259 | 260 | To enable the use of hard links, ensure `k3s_install_hard_links` is set 261 | to `true`. 262 | 263 | ```yaml 264 | k3s_install_hard_links: true 265 | ``` 266 | 267 | The result of this can be seen by running the following in `k3s_install_dir`: 268 | 269 | `ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort` 270 | 271 | Symbolic Links: 272 | 273 | ```text 274 | [root@node1 bin]# ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort 275 | 3277823 -rwxr-xr-x 1 root root 52M Jul 25 12:50 k3s-v1.18.4+k3s1 276 | 3279565 lrwxrwxrwx 1 root root 31 Jul 25 12:52 k3s -> /usr/local/bin/k3s-v1.18.6+k3s1 277 | 3279644 -rwxr-xr-x 1 root root 51M Jul 25 12:52 k3s-v1.18.6+k3s1 278 | 3280079 lrwxrwxrwx 1 root root 31 Jul 25 12:52 ctr -> /usr/local/bin/k3s-v1.18.6+k3s1 279 | 3280080 lrwxrwxrwx 1 root root 31 Jul 25 12:52 crictl -> /usr/local/bin/k3s-v1.18.6+k3s1 280 | 3280081 lrwxrwxrwx 1 root root 31 Jul 25 12:52 kubectl -> /usr/local/bin/k3s-v1.18.6+k3s1 281 | ``` 282 | 283 | Hard Links: 284 | 285 | ```text 286 | [root@node1 bin]# ls -larthi | grep -E 'k3s|ctr|ctl' | grep -vE ".sh$" | sort 287 | 3277823 -rwxr-xr-x 1 root root 52M Jul 25 12:50 k3s-v1.18.4+k3s1 288 | 3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 crictl 289 | 3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 ctr 290 | 3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 k3s 291 | 3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 k3s-v1.18.6+k3s1 292 | 3279644 -rwxr-xr-x 5 root root 51M Jul 25 12:52 kubectl 293 | ``` 294 | 295 | #### Important note about `k3s_build_cluster` 296 | 297 | If you set `k3s_build_cluster` to `false`, this role will install each play 298 | host as a standalone node. An example of when you might use this would be 299 | when building a large number of standalone IoT devices running K3s. Below is a 300 | hypothetical situation where we are to deploy 25 Raspberry Pi devices, each a 301 | standalone system and not a cluster of 25 nodes. To do this we'd use a playbook 302 | similar to the below: 303 | 304 | ```yaml 305 | - hosts: k3s_nodes # eg. 25 RPi's defined in our inventory. 306 | vars: 307 | k3s_build_cluster: false 308 | roles: 309 | - xanmanning.k3s 310 | ``` 311 | 312 | #### Important note about `k3s_control_node` and High Availability (HA) 313 | 314 | By default only one host will be defined as a control node by Ansible, If you 315 | do not set a host as a control node, this role will automatically delegate 316 | the first play host as a control node. This is not suitable for use within 317 | a Production workload. 318 | 319 | If multiple hosts have `k3s_control_node` set to `true`, you must also set 320 | `datastore-endpoint` in `k3s_server` as the connection string to a MySQL or 321 | PostgreSQL database, or external Etcd cluster else the play will fail. 322 | 323 | If using TLS, the CA, Certificate and Key need to already be available on 324 | the play hosts. 325 | 326 | See: [High Availability with an External DB](https://rancher.com/docs/k3s/latest/en/installation/ha/) 327 | 328 | It is also possible, though not supported, to run a single K3s control node 329 | with a `datastore-endpoint` defined. As this is not a typically supported 330 | configuration you will need to set `k3s_use_unsupported_config` to `true`. 331 | 332 | Since K3s v1.19.1 it is possible to use an embedded Etcd as the backend 333 | database, and this is done by setting `k3s_etcd_datastore` to `true`. 334 | The best practice for Etcd is to define at least 3 members to ensure quorum is 335 | established. In addition to this, an odd number of members is recommended to 336 | ensure a majority in the event of a network partition. If you want to use 2 337 | members or an even number of members, please set `k3s_use_unsupported_config` 338 | to `true`. 339 | 340 | #### Important note about `k3s_server_manifests_urls` and `k3s_server_pod_manifests_urls` 341 | 342 | To deploy server manifests and server pod manifests from URL, you need to 343 | specify a `url` and optionally a `filename` (if none provided basename is used). Below is an example of how to deploy the 344 | Tigera operator for Calico and kube-vip. 345 | 346 | ```yaml 347 | --- 348 | 349 | k3s_server_manifests_urls: 350 | - url: https://docs.projectcalico.org/archive/v3.19/manifests/tigera-operator.yaml 351 | filename: tigera-operator.yaml 352 | 353 | k3s_server_pod_manifests_urls: 354 | - url: https://raw.githubusercontent.com/kube-vip/kube-vip/main/example/deploy/0.1.4.yaml 355 | filename: kube-vip.yaml 356 | 357 | ``` 358 | 359 | #### Important note about `k3s_airgap` 360 | 361 | When deploying k3s in an air gapped environment you should provide the `k3s` binary in `./files/`. The binary will not be downloaded from Github and will subsequently not be verified using the provided sha256 sum, nor able to verify the version that you are running. All risks and burdens associated are assumed by the user in this scenario. 362 | 363 | ## Dependencies 364 | 365 | No dependencies on other roles. 366 | 367 | ## Example Playbooks 368 | 369 | Example playbook, single control node running `testing` channel k3s: 370 | 371 | ```yaml 372 | - hosts: k3s_nodes 373 | vars: 374 | k3s_release_version: testing 375 | roles: 376 | - role: xanmanning.k3s 377 | ``` 378 | 379 | Example playbook, Highly Available with PostgreSQL database running the latest 380 | stable release: 381 | 382 | ```yaml 383 | - hosts: k3s_nodes 384 | vars: 385 | k3s_registration_address: loadbalancer # Typically a load balancer. 386 | k3s_server: 387 | datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable" 388 | pre_tasks: 389 | - name: Set each node to be a control node 390 | ansible.builtin.set_fact: 391 | k3s_control_node: true 392 | when: inventory_hostname in ['node2', 'node3'] 393 | roles: 394 | - role: xanmanning.k3s 395 | ``` 396 | 397 | ## License 398 | 399 | [BSD 3-clause](LICENSE.txt) 400 | 401 | ## Contributors 402 | 403 | Contributions from the community are very welcome, but please read the 404 | [contribution guidelines](CONTRIBUTING.md) before doing so, this will help 405 | make things as streamlined as possible. 406 | 407 | Also, please check out the awesome 408 | [list of contributors](https://github.com/PyratLabs/ansible-role-k3s/graphs/contributors). 409 | 410 | ## Author Information 411 | 412 | [Xan Manning](https://xan.manning.io/) 413 | -------------------------------------------------------------------------------- /defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ## 4 | # Global/Cluster Configuration 5 | ## 6 | 7 | # k3s state, options: installed, started, stopped, restarted, uninstalled, validated 8 | # (default: installed) 9 | k3s_state: installed 10 | 11 | # Use a specific k3s version, if set to "false" we will get the latest 12 | # k3s_release_version: v1.19.3 13 | k3s_release_version: false 14 | 15 | # Location of the k3s configuration file 16 | k3s_config_file: "/etc/rancher/k3s/config.yaml" 17 | 18 | # Location of the k3s configuration directory 19 | k3s_config_yaml_d_dir: "/etc/rancher/k3s/config.yaml.d" 20 | 21 | # When multiple ansible_play_hosts are present, attempt to cluster the nodes. 22 | # Using false will create multiple standalone nodes. 23 | # (default: true) 24 | k3s_build_cluster: true 25 | 26 | # URL for GitHub project 27 | k3s_github_url: https://github.com/k3s-io/k3s 28 | 29 | # URL for K3s updates API 30 | k3s_api_url: https://update.k3s.io 31 | 32 | # Install K3s in Air Gapped scenarios 33 | k3s_airgap: false 34 | 35 | # Skip all tasks that validate configuration 36 | k3s_skip_validation: false 37 | 38 | # Skip all tasks that check environment configuration 39 | k3s_skip_env_checks: false 40 | 41 | # Skip post-checks 42 | k3s_skip_post_checks: false 43 | 44 | # Installation directory for k3s 45 | k3s_install_dir: /usr/local/bin 46 | 47 | # Install using hard links rather than symbolic links 48 | k3s_install_hard_links: false 49 | 50 | # A list of templates used for configuring the server. 51 | k3s_server_config_yaml_d_files: [] 52 | 53 | # A list of templates used for configuring the agent. 54 | k3s_agent_config_yaml_d_files: [] 55 | 56 | # A list of templates used for pre-configuring the cluster. 57 | k3s_server_manifests_templates: [] 58 | 59 | # A list of URLs used for pre-configuring the cluster. 60 | k3s_server_manifests_urls: [] 61 | # - url: https://some/url/to/manifest.yml 62 | # filename: manifest.yml 63 | 64 | # A list of templates used for installing static pod manifests on the control plane. 65 | k3s_server_pod_manifests_templates: [] 66 | 67 | # A list of URLs used for installing static pod manifests on the control plane. 68 | k3s_server_pod_manifests_urls: [] 69 | # - url: https://some/url/to/manifest.yml 70 | # filename: manifest.yml 71 | 72 | # Use experimental features in k3s? 73 | k3s_use_experimental: false 74 | 75 | # Allow for unsupported configurations in k3s? 76 | k3s_use_unsupported_config: false 77 | 78 | # Enable etcd embedded datastore 79 | k3s_etcd_datastore: false 80 | 81 | ## 82 | # Systemd config 83 | ## 84 | 85 | # Start k3s on system boot 86 | k3s_start_on_boot: true 87 | 88 | # List of required systemd units to k3s service unit. 89 | k3s_service_requires: [] 90 | 91 | # List of "wanted" systemd unit to k3s (weaker than "requires"). 92 | k3s_service_wants: [] 93 | 94 | # Start k3s before a defined list of systemd units. 95 | k3s_service_before: [] 96 | 97 | # Start k3s after a defined list of systemd units. 98 | k3s_service_after: [] 99 | 100 | # Dictionary of environment variables to use within systemd unit file 101 | # Some examples below 102 | k3s_service_env_vars: {} 103 | # PATH: /opt/k3s/bin 104 | # GOGC: 10 105 | 106 | # Location on host of a environment file to include. This must already exist on 107 | # the target as this role will not populate this file. 108 | k3s_service_env_file: false 109 | 110 | # Additional ExecStartPre commands for the k3s service in systemd 111 | # This dictionary can be used to specify additional ExecStartPre commands 112 | # for the k3s service. These commands will be executed before the main 113 | # service starts. For example, the command below removes the 114 | # cpu_manager_state file if it exists. 115 | # 116 | # Usage example: 117 | # k3s_service_exec_start_pre_vars: 118 | # - "-/usr/bin/rm -f /var/lib/kubelet/cpu_manager_state" 119 | # 120 | # Note: The "-" prefix before the command ensures that systemd ignores 121 | # any errors that occur when executing the command. 122 | k3s_service_exec_start_pre_vars: {} 123 | 124 | ## 125 | # Server Configuration 126 | ## 127 | 128 | k3s_server: {} 129 | # k3s_server: 130 | # listen-port: 6443 131 | 132 | ## 133 | # Agent Configuration 134 | ## 135 | 136 | k3s_agent: {} 137 | # k3s_agent: 138 | # node-label: 139 | # - "foo=bar" 140 | # - "bish=bosh" 141 | 142 | ## 143 | # Ansible Controller configuration 144 | ## 145 | 146 | # Use become privileges? 147 | k3s_become: false 148 | 149 | # Private registry configuration. 150 | # Rancher k3s documentation: https://rancher.com/docs/k3s/latest/en/installation/private-registry/ 151 | k3s_registries: 152 | 153 | mirrors: 154 | # docker.io: 155 | # endpoint: 156 | # - "https://mycustomreg.com:5000" 157 | configs: 158 | # "mycustomreg:5000": 159 | # auth: 160 | # # this is the registry username 161 | # username: xxxxxx 162 | # # this is the registry password 163 | # password: xxxxxx 164 | # tls: 165 | # # path to the cert file used in the registry 166 | # cert_file: 167 | # # path to the key file used in the registry 168 | # key_file: 169 | # # path to the ca file used in the registry 170 | # ca_file: 171 | -------------------------------------------------------------------------------- /documentation/README.md: -------------------------------------------------------------------------------- 1 | # ansible-role-k3s 2 | 3 | This document describes a number of ways of consuming this Ansible role for use 4 | in your own k3s deployments. It will not be able to cover every use case 5 | scenario but will provide some common example configurations. 6 | 7 | ## Requirements 8 | 9 | Before you start you will need an Ansible controller. This can either be your 10 | workstation, or a dedicated system that you have access to. The instructions 11 | in this documentation assume you are using `ansible` CLI, there are no 12 | instructions available for Ansible Tower at this time. 13 | 14 | Follow the below guide to get Ansible installed. 15 | 16 | https://docs.ansible.com/ansible/latest/installation_guide/index.html 17 | 18 | ## Quickstart 19 | 20 | Below are quickstart examples for a single node k3s server, a k3s cluster 21 | with a single control node and HA k3s cluster. These represent the bare 22 | minimum configuration. 23 | 24 | - [Single node k3s](quickstart-single-node.md) 25 | - [Simple k3s cluster](quickstart-cluster.md) 26 | - [HA k3s cluster using embedded etcd](quickstart-ha-cluster.md) 27 | 28 | ## Example configurations and operations 29 | 30 | ### Configuration 31 | 32 | - [Setting up 2-node HA control plane with external datastore](configuration/2-node-ha-ext-datastore.md) 33 | - [Provision multiple standalone k3s nodes](configuration/multiple-standalone-k3s-nodes.md) 34 | - [Set node labels and component arguments](configuration/node-labels-and-component-args.md) 35 | - [Use an alternate CNI](configuration/use-an-alternate-cni.md) 36 | - [IPv4/IPv6 Dual-Stack config](configuration/ipv4-ipv6-dual-stack.md) 37 | - [Start K3S after another service](configuration/systemd-config.md) 38 | 39 | ### Operations 40 | 41 | - [Stop/Start a cluster](operations/stop-start-cluster.md) 42 | - [Updating k3s](operations/updating-k3s.md) 43 | - [Extending a cluster](operations/extending-a-cluster.md) 44 | - [Shrinking a cluster](operations/shrinking-a-cluster.md) 45 | -------------------------------------------------------------------------------- /documentation/configuration/2-node-ha-ext-datastore.md: -------------------------------------------------------------------------------- 1 | # 2 Node HA Control Plane with external database 2 | 3 | For this configuration we are deploying a highly available control plane 4 | composed of two control nodes. This can be achieved with embedded etcd, however 5 | etcd ideally has an odd number of nodes. 6 | 7 | The example below will use an external PostgreSQL datastore to store the 8 | cluster state information. 9 | 10 | Main guide: https://rancher.com/docs/k3s/latest/en/installation/ha/ 11 | 12 | ## Architecture 13 | 14 | ```text 15 | +-------------------+ 16 | | Load Balancer/VIP | 17 | +---------+---------+ 18 | | 19 | | 20 | | 21 | | 22 | +------------+ | +------------+ 23 | | | | | | 24 | +--------+ control-01 +<-----+----->+ control-02 | 25 | | | | | | 26 | | +-----+------+ +------+-----+ 27 | | | | 28 | | +-------------+-------------+ 29 | | | | | 30 | | +------v----+ +-----v-----+ +----v------+ 31 | | | | | | | | 32 | | | worker-01 | | worker-02 | | worker-03 | 33 | | | | | | | | 34 | | +-----------+ +-----------+ +-----------+ 35 | | 36 | | +-------+ +-------+ 37 | | | | | | 38 | +-------------------> db-01 +--+ db-02 | 39 | | | | | 40 | +-------+ +-------+ 41 | ``` 42 | 43 | ### Required Components 44 | 45 | - Load balancer 46 | - 2 control plane nodes 47 | - 1 or more worker nodes 48 | - PostgreSQL Database (replicated, or Linux HA Cluster). 49 | 50 | ## Configuration 51 | 52 | For your control nodes, you will need to instruct the control plane of the 53 | PostgreSQL datastore endpoint and set `k3s_registration_address` to be the 54 | hostname or IP of your load balancer or VIP. 55 | 56 | Below is the example for PostgreSQL, it is possible to use MySQL or an Etcd 57 | cluster as well. Consult the below guide for using alternative datastore 58 | endpoints. 59 | 60 | https://rancher.com/docs/k3s/latest/en/installation/datastore/#datastore-endpoint-format-and-functionality 61 | 62 | ```yaml 63 | --- 64 | 65 | k3s_server: 66 | datastore-endpoint: postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable 67 | node-taint: 68 | - "k3s-controlplane=true:NoExecute" 69 | ``` 70 | 71 | Your worker nodes need to know how to connect to the control plane, this is 72 | defined by setting `k3s_registration_address` to the hostname or IP address of 73 | the load balancer. 74 | 75 | ```yaml 76 | --- 77 | 78 | k3s_registration_address: control.examplek3s.com 79 | ``` 80 | -------------------------------------------------------------------------------- /documentation/configuration/ipv4-ipv6-dual-stack.md: -------------------------------------------------------------------------------- 1 | # IPv4 and IPv6 Dual-stack config 2 | 3 | If you need to run your K3S cluster with both IPv4 and IPv6 address ranges 4 | you will need to configure the `k3s_server.cluster-cidr` and 5 | `k3s_server.service-cidr` values specifying both ranges. 6 | 7 | :hand: if you are using `k3s<1.23` you will need to use a different CNI as 8 | dual-stack support is not available in Flannel. 9 | 10 | Below is a noddy example: 11 | 12 | ```yaml 13 | --- 14 | 15 | k3s_server: 16 | # Using Calico on k3s<1.23 so Flannel needs to be disabled. 17 | flannel-backend: 'none' 18 | # Format: ipv4/cidr,ipv6/cidr 19 | cluster-cidr: 10.42.0.0/16,fc00:a0::/64 20 | service-cidr: 10.43.0.0/16,fc00:a1::/64 21 | ``` 22 | -------------------------------------------------------------------------------- /documentation/configuration/multiple-standalone-k3s-nodes.md: -------------------------------------------------------------------------------- 1 | # Multiple standalone K3s nodes 2 | 3 | This is an example of when you might want to configure multiple standalone 4 | k3s nodes simultaneously. For this we will assume a hypothetical situation 5 | where we are configuring 25 Raspberry Pis to deploy to our shop floors. 6 | 7 | Each Rasperry Pi will be configured as a standalone IoT device hosting an 8 | application that will push data to head office. 9 | 10 | ## Architecture 11 | 12 | ```text 13 | +-------------+ 14 | | | 15 | | Node-01 +-+ 16 | | | | 17 | +--+----------+ +-+ 18 | | | | 19 | +--+---------+ +-+ 20 | | | | 21 | +--+--------+ | 22 | | | Node-N 23 | +----------+ 24 | 25 | ``` 26 | 27 | ## Configuration 28 | 29 | Below is our example inventory of 200 nodes (Truncated): 30 | 31 | ```yaml 32 | --- 33 | 34 | k3s_workers: 35 | hosts: 36 | kube-0: 37 | ansible_user: ansible 38 | ansible_host: 10.10.9.2 39 | ansible_python_interpreter: /usr/bin/python3 40 | kube-1: 41 | ansible_user: ansible 42 | ansible_host: 10.10.9.3 43 | ansible_python_interpreter: /usr/bin/python3 44 | kube-2: 45 | ansible_user: ansible 46 | ansible_host: 10.10.9.4 47 | ansible_python_interpreter: /usr/bin/python3 48 | 49 | # ..... SNIP ..... 50 | 51 | kube-199: 52 | ansible_user: ansible 53 | ansible_host: 10.10.9.201 54 | ansible_python_interpreter: /usr/bin/python3 55 | kube-200: 56 | ansible_user: ansible 57 | ansible_host: 10.10.9.202 58 | ansible_python_interpreter: /usr/bin/python3 59 | 60 | ``` 61 | 62 | In our `group_vars/` (or as `vars:` in our playbook), we will need to set the 63 | `k3s_build_cluster` variable to `false`. This will stop the role from 64 | attempting to cluster all 200 nodes, instead it will install k3s across each 65 | node as as 200 standalone servers. 66 | 67 | ```yaml 68 | --- 69 | 70 | k3s_build_cluster: false 71 | ``` 72 | -------------------------------------------------------------------------------- /documentation/configuration/node-labels-and-component-args.md: -------------------------------------------------------------------------------- 1 | # Configure node labels and component arguments 2 | 3 | The following command line arguments can be specified multiple times with 4 | `key=value` pairs: 5 | 6 | - `--kube-kubelet-arg` 7 | - `--kube-proxy-arg` 8 | - `--kube-apiserver-arg` 9 | - `--kube-scheduler-arg` 10 | - `--kube-controller-manager-arg` 11 | - `--kube-cloud-controller-manager-arg` 12 | - `--node-label` 13 | - `--node-taint` 14 | 15 | In the config file, this is done by defining a list of values for each 16 | command like argument, for example: 17 | 18 | ```yaml 19 | --- 20 | 21 | k3s_server: 22 | # Set the plugins registry directory 23 | kubelet-arg: 24 | - "volume-plugin-dir=/var/lib/rancher/k3s/agent/kubelet/plugins_registry" 25 | # Set the pod eviction timeout and node monitor grace period 26 | kube-controller-manager-arg: 27 | - "pod-eviction-timeout=2m" 28 | - "node-monitor-grace-period=30s" 29 | # Set API server feature gate 30 | kube-apiserver-arg: 31 | - "feature-gates=RemoveSelfLink=false" 32 | # Laels to apply to a node 33 | node-label: 34 | - "NodeTier=development" 35 | - "NodeLocation=eu-west-2a" 36 | # Stop k3s control plane having workloads scheduled on them 37 | node-taint: 38 | - "k3s-controlplane=true:NoExecute" 39 | ``` 40 | -------------------------------------------------------------------------------- /documentation/configuration/systemd-config.md: -------------------------------------------------------------------------------- 1 | # systemd config 2 | 3 | Below are examples to tweak how and when K3S starts up. 4 | 5 | ## Wanted service units 6 | 7 | In this example, we're going to start K3S after Wireguard. Our example server 8 | has a Wireguard connection `wg0`. We are using "wants" rather than "requires" 9 | as it's a weaker requirement that Wireguard must be running. We then want 10 | K3S to start after Wireguard has started. 11 | 12 | ```yaml 13 | --- 14 | 15 | k3s_service_wants: 16 | - wg-quick@wg0.service 17 | k3s_service_after: 18 | - wg-quick@wg0.service 19 | ``` 20 | -------------------------------------------------------------------------------- /documentation/configuration/use-an-alternate-cni.md: -------------------------------------------------------------------------------- 1 | # Use an alternate CNI 2 | 3 | K3S ships with Flannel, however sometimes you want an different CNI such as 4 | Calico, Canal or Weave Net. To do this you will need to disable Flannel with 5 | `flannel-backend: "none"`, specify a `cluster-cidr` and add your CNI manifests 6 | to the `k3s_server_manifests_templates`. 7 | 8 | ## Calico example 9 | 10 | The below is based on the 11 | [Calico quickstart documentation](https://docs.projectcalico.org/getting-started/kubernetes/quickstart). 12 | 13 | Steps: 14 | 15 | 1. Download `tigera-operator.yaml` to the manifests directory. 16 | 1. Download `custom-resources.yaml` to the manifests directory. 17 | 1. Choose a `cluster-cidr` (we are using 192.168.0.0/16) 18 | 1. Set `k3s_server` and `k3s_server_manifest_templates` as per the below, 19 | ensure the paths to manifests are correct for your project repo. 20 | 21 | ```yaml 22 | --- 23 | 24 | # K3S Server config, don't deploy flannel and set cluster pod CIDR. 25 | k3s_server: 26 | cluster-cidr: 192.168.0.0/16 27 | flannel-backend: "none" 28 | 29 | # Deploy the following k3s server templates. 30 | k3s_server_manifests_templates: 31 | - "manifests/calico/tigera-operator.yaml" 32 | - "manifests/calico/custom-resources.yaml" 33 | ``` 34 | 35 | All nodes should come up as "Ready", below is a 3-node cluster: 36 | 37 | ```text 38 | $ kubectl get nodes -o wide -w 39 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 40 | kube-0 Ready control-plane,etcd,master 114s v1.20.2+k3s1 10.10.9.2 10.10.9.2 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1 41 | kube-1 Ready control-plane,etcd,master 80s v1.20.2+k3s1 10.10.9.3 10.10.9.3 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1 42 | kube-2 Ready control-plane,etcd,master 73s v1.20.2+k3s1 10.10.9.4 10.10.9.4 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.3-k3s1 43 | ``` 44 | 45 | Pods should be deployed with deployed within the CIDR specified in our config 46 | file. 47 | 48 | ```text 49 | $ kubectl get pods -o wide -A 50 | NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 51 | calico-system calico-kube-controllers-cfb4ff54b-8rp8r 1/1 Running 0 5m4s 192.168.145.65 kube-0 52 | calico-system calico-node-2cm2m 1/1 Running 0 5m4s 10.10.9.2 kube-0 53 | calico-system calico-node-2s6lx 1/1 Running 0 4m42s 10.10.9.4 kube-2 54 | calico-system calico-node-zwqjz 1/1 Running 0 4m49s 10.10.9.3 kube-1 55 | calico-system calico-typha-7b6747d665-78swq 1/1 Running 0 3m5s 10.10.9.4 kube-2 56 | calico-system calico-typha-7b6747d665-8ff66 1/1 Running 0 3m5s 10.10.9.3 kube-1 57 | calico-system calico-typha-7b6747d665-hgplx 1/1 Running 0 5m5s 10.10.9.2 kube-0 58 | kube-system coredns-854c77959c-6qhgt 1/1 Running 0 5m20s 192.168.145.66 kube-0 59 | kube-system helm-install-traefik-4czr9 0/1 Completed 0 5m20s 192.168.145.67 kube-0 60 | kube-system metrics-server-86cbb8457f-qcxf5 1/1 Running 0 5m20s 192.168.145.68 kube-0 61 | kube-system traefik-6f9cbd9bd4-7h4rl 1/1 Running 0 2m50s 192.168.126.65 kube-1 62 | tigera-operator tigera-operator-b6c4bfdd9-29hhr 1/1 Running 0 5m20s 10.10.9.2 kube-0 63 | ``` 64 | -------------------------------------------------------------------------------- /documentation/operations/extending-a-cluster.md: -------------------------------------------------------------------------------- 1 | # Extending a cluster 2 | 3 | This document describes the method for extending an cluster with new worker 4 | nodes. 5 | 6 | ## Assumptions 7 | 8 | It is assumed that you have already deployed a k3s cluster using this role, 9 | you have an appropriately configured inventory and playbook to create the 10 | cluster. 11 | 12 | Below, our example inventory and playbook are as follows: 13 | 14 | - inventory: `inventory.yml` 15 | - playbook: `cluster.yml` 16 | 17 | Currently your `inventory.yml` looks like this, it has two nodes defined, 18 | `kube-0` (control node) and `kube-1` (worker node). 19 | 20 | ```yaml 21 | --- 22 | 23 | k3s_cluster: 24 | hosts: 25 | kube-0: 26 | ansible_user: ansible 27 | ansible_host: 10.10.9.2 28 | ansible_python_interpreter: /usr/bin/python3 29 | kube-1: 30 | ansible_user: ansible 31 | ansible_host: 10.10.9.3 32 | ansible_python_interpreter: /usr/bin/python3 33 | ``` 34 | 35 | ## Method 36 | 37 | We have our two nodes, one control, one worker. The goal is to extend this to 38 | add capacity by adding a new worker node, `kube-2`. To do this we will add the 39 | new node to our inventory. 40 | 41 | ```yaml 42 | --- 43 | 44 | k3s_cluster: 45 | hosts: 46 | kube-0: 47 | ansible_user: ansible 48 | ansible_host: 10.10.9.2 49 | ansible_python_interpreter: /usr/bin/python3 50 | kube-1: 51 | ansible_user: ansible 52 | ansible_host: 10.10.9.3 53 | ansible_python_interpreter: /usr/bin/python3 54 | kube-2: 55 | ansible_user: ansible 56 | ansible_host: 10.10.9.4 57 | ansible_python_interpreter: /usr/bin/python3 58 | ``` 59 | 60 | Once the new node has been added, you can re-run the automation to join it to 61 | the cluster. You should expect the majority of changes to the worker node being 62 | introduced to the cluster. 63 | 64 | ```text 65 | PLAY RECAP ******************************************************************************************************* 66 | kube-0 : ok=53 changed=1 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0 67 | kube-1 : ok=40 changed=1 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0 68 | kube-2 : ok=42 changed=10 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0 69 | ``` 70 | -------------------------------------------------------------------------------- /documentation/operations/shrinking-a-cluster.md: -------------------------------------------------------------------------------- 1 | # Shrinking a cluster 2 | 3 | This document describes the method for shrinking a cluster, by removing a 4 | worker nodes. 5 | 6 | ## Assumptions 7 | 8 | It is assumed that you have already deployed a k3s cluster using this role, 9 | you have an appropriately configured inventory and playbook to create the 10 | cluster. 11 | 12 | Below, our example inventory and playbook are as follows: 13 | 14 | - inventory: `inventory.yml` 15 | - playbook: `cluster.yml` 16 | 17 | Currently your `inventory.yml` looks like this, it has three nodes defined, 18 | `kube-0` (control node) and `kube-1`, `kube-2` (worker nodes). 19 | 20 | ```yaml 21 | --- 22 | 23 | k3s_cluster: 24 | hosts: 25 | kube-0: 26 | ansible_user: ansible 27 | ansible_host: 10.10.9.2 28 | ansible_python_interpreter: /usr/bin/python3 29 | kube-1: 30 | ansible_user: ansible 31 | ansible_host: 10.10.9.3 32 | ansible_python_interpreter: /usr/bin/python3 33 | kube-2: 34 | ansible_user: ansible 35 | ansible_host: 10.10.9.4 36 | ansible_python_interpreter: /usr/bin/python3 37 | ``` 38 | 39 | ## Method 40 | 41 | We have our three nodes, one control, two workers. The goal is to shrink this to 42 | remove excess capacity by offboarding the worker node `kube-2`. To do this we 43 | will set `kube-2` node to `k3s_state: uninstalled` in our inventory. 44 | 45 | ```yaml 46 | --- 47 | 48 | k3s_cluster: 49 | hosts: 50 | kube-0: 51 | ansible_user: ansible 52 | ansible_host: 10.10.9.2 53 | ansible_python_interpreter: /usr/bin/python3 54 | kube-1: 55 | ansible_user: ansible 56 | ansible_host: 10.10.9.3 57 | ansible_python_interpreter: /usr/bin/python3 58 | kube-2: 59 | ansible_user: ansible 60 | ansible_host: 10.10.9.4 61 | ansible_python_interpreter: /usr/bin/python3 62 | k3s_state: uninstalled 63 | ``` 64 | 65 | What you will typically see is changes to your control plane (`kube-0`) and the 66 | node being removed (`kube-2`). The role will register the removal of the node 67 | with the cluster by draining the node and removing it from the cluster. 68 | 69 | ```text 70 | PLAY RECAP ******************************************************************************************************* 71 | kube-0 : ok=55 changed=2 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0 72 | kube-1 : ok=40 changed=0 unreachable=0 failed=0 skipped=35 rescued=0 ignored=0 73 | kube-2 : ok=23 changed=2 unreachable=0 failed=0 skipped=17 rescued=0 ignored=1 74 | ``` 75 | -------------------------------------------------------------------------------- /documentation/operations/stop-start-cluster.md: -------------------------------------------------------------------------------- 1 | # Stopping and Starting a cluster 2 | 3 | This document describes the Ansible method for restarting a k3s cluster 4 | deployed by this role. 5 | 6 | ## Assumptions 7 | 8 | It is assumed that you have already deployed a k3s cluster using this role, 9 | you have an appropriately configured inventory and playbook to create the 10 | cluster. 11 | 12 | Below, our example inventory and playbook are as follows: 13 | 14 | - inventory: `inventory.yml` 15 | - playbook: `cluster.yml` 16 | 17 | ## Method 18 | 19 | ### Start cluster 20 | 21 | You can start the cluster using either of the following commands: 22 | 23 | - Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=started'` 24 | - Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=started' --become all` 25 | 26 | Below is example output, remember that Ansible is idempotent so re-running a 27 | command may not necessarily change the state. 28 | 29 | **Playbook method output**: 30 | 31 | ```text 32 | PLAY RECAP ******************************************************************************************************* 33 | kube-0 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 34 | kube-1 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 35 | kube-2 : ok=6 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 36 | ``` 37 | 38 | ### Stop cluster 39 | 40 | You can stop the cluster using either of the following commands: 41 | 42 | - Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=stopped'` 43 | - Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=stopped' --become all` 44 | 45 | Below is example output, remember that Ansible is idempotent so re-running a 46 | command may not necessarily change the state. 47 | 48 | **Playbook method output**: 49 | 50 | ```text 51 | PLAY RECAP ******************************************************************************************************* 52 | kube-0 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 53 | kube-1 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 54 | kube-2 : ok=6 changed=1 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 55 | ``` 56 | 57 | ### Restart cluster 58 | 59 | Just like the `service` module, you can also specify `restarted` as a state. 60 | This will do `stop` followed by `start`. 61 | 62 | - Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted'` 63 | - Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become all` 64 | 65 | ```text 66 | PLAY RECAP ******************************************************************************************************* 67 | kube-0 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 68 | kube-1 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 69 | kube-2 : ok=7 changed=1 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 70 | ``` 71 | 72 | ## Tips 73 | 74 | You can limit the targets by adding the `-l` flag to your `ansible-playbook` 75 | command, or simply target your ad-hoc commands. For example, in a 3 node 76 | cluster (called `kube-0`, `kube-1` and `kube-2`) we can limit the restart to 77 | `kube-1` and `kube-2` with the following: 78 | 79 | - Using the playbook: `ansible-playbook -i inventory.yml cluster.yml --become -e 'k3s_state=restarted' -l "kube-1,kube-2"` 80 | - Using an ad-hoc command: `ansible -i inventory.yml -m service -a 'name=k3s state=restarted' --become "kube-1,kube-2"` 81 | 82 | ```text 83 | PLAY RECAP ******************************************************************************************************** 84 | kube-1 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 85 | kube-2 : ok=7 changed=2 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 86 | ``` 87 | 88 | ## FAQ 89 | 90 | 1. _Why might I use the `ansible-playbook` command over an ad-hoc command?_ 91 | - The stop/start tasks will be aware of configuration. As the role 92 | develops, there might be some pre-tasks added to change how a cluster 93 | is stopped or started. 94 | -------------------------------------------------------------------------------- /documentation/operations/updating-k3s.md: -------------------------------------------------------------------------------- 1 | # Updating k3s 2 | 3 | ## Before you start! 4 | 5 | Ensure you back up your k3s cluster. This is particularly important if you use 6 | an external datastore or embedded Etcd. Please refer to the below guide to 7 | backing up your k3s datastore: 8 | 9 | https://rancher.com/docs/k3s/latest/en/backup-restore/ 10 | 11 | Also, check your volume backups are also working! 12 | 13 | ## Proceedure 14 | 15 | ### Updates using Ansible 16 | 17 | To update via Ansible, set `k3s_release_version` to the target version you wish 18 | to go to. For example, from your `v1.19.3+k3s1` playbook: 19 | 20 | ```yaml 21 | --- 22 | # BEFORE 23 | 24 | - name: Provision k3s cluster 25 | hosts: k3s_cluster 26 | vars: 27 | k3s_release_version: v1.19.3+k3s1 28 | roles: 29 | - name: xanmanning.k3s 30 | ``` 31 | 32 | Updating to `v1.20.2+k3s1`: 33 | 34 | ```yaml 35 | --- 36 | # AFTER 37 | 38 | - name: Provision k3s cluster 39 | hosts: k3s_cluster 40 | vars: 41 | k3s_release_version: v1.20.2+k3s1 42 | roles: 43 | - name: xanmanning.k3s 44 | ``` 45 | 46 | ### Automatic updates 47 | 48 | For automatic updates, consider installing Rancher's 49 | [system-upgrade-controller](https://rancher.com/docs/k3s/latest/en/upgrades/automated/) 50 | 51 | **Please note**, to be able to update using the system-upgrade-controller you 52 | will need to set `k3s_install_hard_links` to `true`. 53 | -------------------------------------------------------------------------------- /documentation/quickstart-cluster.md: -------------------------------------------------------------------------------- 1 | # Quickstart: K3s cluster with a single control node 2 | 3 | This is the quickstart guide to creating your own k3s cluster with one control 4 | plane node. This control plane node will also be a worker. 5 | 6 | :hand: This example requires your Ansible user to be able to connect to the 7 | servers over SSH using key-based authentication. The user is also has an entry 8 | in a sudoers file that allows privilege escalation without requiring a 9 | password. 10 | 11 | To test this is the case, run the following check replacing `` 12 | and ``. The expected output is `Works` 13 | 14 | `ssh @ 'sudo cat /etc/shadow >/dev/null && echo "Works"'` 15 | 16 | For example: 17 | 18 | ```text 19 | [ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"' 20 | Works 21 | [ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ 22 | ``` 23 | 24 | ## Directory structure 25 | 26 | Our working directory will have the following files: 27 | 28 | ```text 29 | kubernetes-playground/ 30 | |_ inventory.yml 31 | |_ cluster.yml 32 | ``` 33 | 34 | ## Inventory 35 | 36 | Here's a YAML based example inventory for our servers called `inventory.yml`: 37 | 38 | ```yaml 39 | --- 40 | 41 | k3s_cluster: 42 | hosts: 43 | kube-0: 44 | ansible_user: ansible 45 | ansible_host: 10.10.9.2 46 | ansible_python_interpreter: /usr/bin/python3 47 | kube-1: 48 | ansible_user: ansible 49 | ansible_host: 10.10.9.3 50 | ansible_python_interpreter: /usr/bin/python3 51 | kube-2: 52 | ansible_user: ansible 53 | ansible_host: 10.10.9.4 54 | ansible_python_interpreter: /usr/bin/python3 55 | 56 | ``` 57 | 58 | We can test this works with `ansible -i inventory.yml -m ping all`, expected 59 | result: 60 | 61 | ```text 62 | kube-0 | SUCCESS => { 63 | "changed": false, 64 | "ping": "pong" 65 | } 66 | kube-1 | SUCCESS => { 67 | "changed": false, 68 | "ping": "pong" 69 | } 70 | kube-2 | SUCCESS => { 71 | "changed": false, 72 | "ping": "pong" 73 | } 74 | 75 | ``` 76 | 77 | ## Playbook 78 | 79 | Here is our playbook for the k3s cluster (`cluster.yml`): 80 | 81 | ```yaml 82 | --- 83 | 84 | - name: Build a cluster with a single control node 85 | hosts: k3s_cluster 86 | vars: 87 | k3s_become: true 88 | roles: 89 | - role: xanmanning.k3s 90 | ``` 91 | 92 | ## Execution 93 | 94 | To execute the playbook against our inventory file, we will run the following 95 | command: 96 | 97 | `ansible-playbook -i inventory.yml cluster.yml` 98 | 99 | The output we can expect is similar to the below, with no failed or unreachable 100 | nodes. The default behavior of this role is to delegate the first play host as 101 | the control node, so kube-0 will have more changed tasks than others: 102 | 103 | ```text 104 | PLAY RECAP ******************************************************************************************************* 105 | kube-0 : ok=56 changed=11 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0 106 | kube-1 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0 107 | kube-2 : ok=43 changed=10 unreachable=0 failed=0 skipped=32 rescued=0 ignored=0 108 | ``` 109 | 110 | ## Testing 111 | 112 | After logging into kube-0, we can test that k3s is running across the cluster, 113 | that all nodes are ready and that everything is ready to execute our Kubernetes 114 | workloads by running the following: 115 | 116 | - `sudo kubectl get nodes -o wide` 117 | - `sudo kubectl get pods -o wide --all-namespaces` 118 | 119 | :hand: Note we are using `sudo` because we need to be root to access the 120 | kube config for this node. This behavior can be changed with specifying 121 | `write-kubeconfig-mode: '0644'` in `k3s_server`. 122 | 123 | **Get Nodes**: 124 | 125 | ```text 126 | ansible@kube-0:~$ sudo kubectl get nodes -o wide 127 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 128 | kube-0 Ready master 34s v1.19.4+k3s1 10.0.2.15 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 129 | kube-2 Ready 14s v1.19.4+k3s1 10.0.2.17 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 130 | kube-1 Ready 14s v1.19.4+k3s1 10.0.2.16 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 131 | ansible@kube-0:~$ 132 | ``` 133 | 134 | **Get Pods**: 135 | 136 | ```text 137 | ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces 138 | NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 139 | kube-system local-path-provisioner-7ff9579c6-72j8x 1/1 Running 0 55s 10.42.2.2 kube-1 140 | kube-system metrics-server-7b4f8b595-lkspj 1/1 Running 0 55s 10.42.1.2 kube-2 141 | kube-system helm-install-traefik-b6vnt 0/1 Completed 0 55s 10.42.0.3 kube-0 142 | kube-system coredns-66c464876b-llsh7 1/1 Running 0 55s 10.42.0.2 kube-0 143 | kube-system svclb-traefik-jrqg7 2/2 Running 0 27s 10.42.1.3 kube-2 144 | kube-system svclb-traefik-gh65q 2/2 Running 0 27s 10.42.0.4 kube-0 145 | kube-system svclb-traefik-5z7zp 2/2 Running 0 27s 10.42.2.3 kube-1 146 | kube-system traefik-5dd496474-l2k74 1/1 Running 0 27s 10.42.1.4 kube-2 147 | ``` 148 | -------------------------------------------------------------------------------- /documentation/quickstart-ha-cluster.md: -------------------------------------------------------------------------------- 1 | # Quickstart: K3s cluster with a HA control plane using embedded etcd 2 | 3 | This is the quickstart guide to creating your own 3 node k3s cluster with a 4 | highly available control plane using the embedded etcd datastore. 5 | The control plane will all be workers as well. 6 | 7 | :hand: This example requires your Ansible user to be able to connect to the 8 | servers over SSH using key-based authentication. The user is also has an entry 9 | in a sudoers file that allows privilege escalation without requiring a 10 | password. 11 | 12 | To test this is the case, run the following check replacing `` 13 | and ``. The expected output is `Works` 14 | 15 | `ssh @ 'sudo cat /etc/shadow >/dev/null && echo "Works"'` 16 | 17 | For example: 18 | 19 | ```text 20 | [ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"' 21 | Works 22 | [ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ 23 | ``` 24 | 25 | ## Directory structure 26 | 27 | Our working directory will have the following files: 28 | 29 | ```text 30 | kubernetes-playground/ 31 | |_ inventory.yml 32 | |_ ha_cluster.yml 33 | ``` 34 | 35 | ## Inventory 36 | 37 | Here's a YAML based example inventory for our servers called `inventory.yml`: 38 | 39 | ```yaml 40 | --- 41 | 42 | # We're adding k3s_control_node to each host, this can be done in host_vars/ 43 | # or group_vars/ as well - but for simplicity we are setting it here. 44 | k3s_cluster: 45 | hosts: 46 | kube-0: 47 | ansible_user: ansible 48 | ansible_host: 10.10.9.2 49 | ansible_python_interpreter: /usr/bin/python3 50 | k3s_control_node: true 51 | kube-1: 52 | ansible_user: ansible 53 | ansible_host: 10.10.9.3 54 | ansible_python_interpreter: /usr/bin/python3 55 | k3s_control_node: true 56 | kube-2: 57 | ansible_user: ansible 58 | ansible_host: 10.10.9.4 59 | ansible_python_interpreter: /usr/bin/python3 60 | k3s_control_node: true 61 | 62 | ``` 63 | 64 | We can test this works with `ansible -i inventory.yml -m ping all`, expected 65 | result: 66 | 67 | ```text 68 | kube-0 | SUCCESS => { 69 | "changed": false, 70 | "ping": "pong" 71 | } 72 | kube-1 | SUCCESS => { 73 | "changed": false, 74 | "ping": "pong" 75 | } 76 | kube-2 | SUCCESS => { 77 | "changed": false, 78 | "ping": "pong" 79 | } 80 | 81 | ``` 82 | 83 | ## Playbook 84 | 85 | Here is our playbook for the k3s cluster (`ha_cluster.yml`): 86 | 87 | ```yaml 88 | --- 89 | 90 | - name: Build a cluster with HA control plane 91 | hosts: k3s_cluster 92 | vars: 93 | k3s_become: true 94 | k3s_etcd_datastore: true 95 | k3s_use_experimental: true # Note this is required for k3s < v1.19.5+k3s1 96 | roles: 97 | - role: xanmanning.k3s 98 | ``` 99 | 100 | ## Execution 101 | 102 | To execute the playbook against our inventory file, we will run the following 103 | command: 104 | 105 | `ansible-playbook -i inventory.yml ha_cluster.yml` 106 | 107 | The output we can expect is similar to the below, with no failed or unreachable 108 | nodes. The default behavior of this role is to delegate the first play host as 109 | the primary control node, so kube-0 will have more changed tasks than others: 110 | 111 | ```text 112 | PLAY RECAP ******************************************************************************************************* 113 | kube-0 : ok=53 changed=8 unreachable=0 failed=0 skipped=30 rescued=0 ignored=0 114 | kube-1 : ok=47 changed=10 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0 115 | kube-2 : ok=47 changed=9 unreachable=0 failed=0 skipped=28 rescued=0 ignored=0 116 | ``` 117 | 118 | ## Testing 119 | 120 | After logging into any of the servers (it doesn't matter), we can test that k3s 121 | is running across the cluster, that all nodes are ready and that everything is 122 | ready to execute our Kubernetes workloads by running the following: 123 | 124 | - `sudo kubectl get nodes -o wide` 125 | - `sudo kubectl get pods -o wide --all-namespaces` 126 | 127 | :hand: Note we are using `sudo` because we need to be root to access the 128 | kube config for this node. This behavior can be changed with specifying 129 | `write-kubeconfig-mode: '0644'` in `k3s_server`. 130 | 131 | **Get Nodes**: 132 | 133 | ```text 134 | ansible@kube-0:~$ sudo kubectl get nodes -o wide 135 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 136 | kube-0 Ready etcd,master 2m58s v1.19.4+k3s1 10.10.9.2 10.10.9.2 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 137 | kube-1 Ready etcd,master 2m22s v1.19.4+k3s1 10.10.9.3 10.10.9.3 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 138 | kube-2 Ready etcd,master 2m10s v1.19.4+k3s1 10.10.9.4 10.10.9.4 Ubuntu 20.04.1 LTS 5.4.0-56-generic containerd://1.4.1-k3s1 139 | ``` 140 | 141 | **Get Pods**: 142 | 143 | ```text 144 | ansible@kube-0:~$ sudo kubectl get pods -o wide --all-namespaces 145 | NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 146 | kube-system coredns-66c464876b-rhgn6 1/1 Running 0 3m38s 10.42.0.2 kube-0 147 | kube-system helm-install-traefik-vwglv 0/1 Completed 0 3m39s 10.42.0.3 kube-0 148 | kube-system local-path-provisioner-7ff9579c6-d5xpb 1/1 Running 0 3m38s 10.42.0.5 kube-0 149 | kube-system metrics-server-7b4f8b595-nhbt8 1/1 Running 0 3m38s 10.42.0.4 kube-0 150 | kube-system svclb-traefik-9lzcq 2/2 Running 0 2m56s 10.42.1.2 kube-1 151 | kube-system svclb-traefik-vq487 2/2 Running 0 2m45s 10.42.2.2 kube-2 152 | kube-system svclb-traefik-wkwkk 2/2 Running 0 3m1s 10.42.0.7 kube-0 153 | kube-system traefik-5dd496474-lw6x8 1/1 Running 0 3m1s 10.42.0.6 kube-0 154 | ``` 155 | -------------------------------------------------------------------------------- /documentation/quickstart-single-node.md: -------------------------------------------------------------------------------- 1 | # Quickstart: K3s single node 2 | 3 | This is the quickstart guide to creating your own single-node k3s "cluster". 4 | 5 | :hand: This example requires your Ansible user to be able to connect to the 6 | server over SSH using key-based authentication. The user is also has an entry 7 | in a sudoers file that allows privilege escalation without requiring a 8 | password. 9 | 10 | To test this is the case, run the following check replacing `` 11 | and ``. The expected output is `Works` 12 | 13 | `ssh @ 'sudo cat /etc/shadow >/dev/null && echo "Works"'` 14 | 15 | For example: 16 | 17 | ```text 18 | [ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ ssh ansible@kube-0 'sudo cat /etc/shadow >/dev/null && echo "Works"' 19 | Works 20 | [ xmanning@dreadfort:~/git/kubernetes-playground ] (master) $ 21 | ``` 22 | 23 | ## Directory structure 24 | 25 | Our working directory will have the following files: 26 | 27 | ```text 28 | kubernetes-playground/ 29 | |_ inventory.yml 30 | |_ single_node.yml 31 | ``` 32 | 33 | ## Inventory 34 | 35 | Here's a YAML based example inventory for our server called `inventory.yml`: 36 | 37 | ```yaml 38 | --- 39 | 40 | k3s_cluster: 41 | hosts: 42 | kube-0: 43 | ansible_user: ansible 44 | ansible_host: 10.10.9.2 45 | ansible_python_interpreter: /usr/bin/python3 46 | 47 | ``` 48 | 49 | We can test this works with `ansible -i inventory.yml -m ping all`, expected 50 | result: 51 | 52 | ```text 53 | kube-0 | SUCCESS => { 54 | "changed": false, 55 | "ping": "pong" 56 | } 57 | ``` 58 | 59 | ## Playbook 60 | 61 | Here is our playbook for a single node k3s cluster (`single_node.yml`): 62 | 63 | ```yaml 64 | --- 65 | 66 | - name: Build a single node k3s cluster 67 | hosts: kube-0 68 | vars: 69 | k3s_become: true 70 | roles: 71 | - role: xanmanning.k3s 72 | ``` 73 | 74 | ## Execution 75 | 76 | To execute the playbook against our inventory file, we will run the following 77 | command: 78 | 79 | `ansible-playbook -i inventory.yml single_node.yml` 80 | 81 | The output we can expect is similar to the below, with no failed or unreachable 82 | nodes: 83 | 84 | ```text 85 | PLAY RECAP ******************************************************************************************************* 86 | kube-0 : ok=39 changed=8 unreachable=0 failed=0 skipped=39 rescued=0 ignored=0 87 | ``` 88 | 89 | ## Testing 90 | 91 | After logging into the server, we can test that k3s is running and that it is 92 | ready to execute our Kubernetes workloads by running the following: 93 | 94 | - `sudo kubectl get nodes` 95 | - `sudo kubectl get pods -o wide --all-namespaces` 96 | 97 | :hand: Note we are using `sudo` because we need to be root to access the 98 | kube config for this node. This behavior can be changed with specifying 99 | `write-kubeconfig-mode: '0644'` in `k3s_server`. 100 | 101 | **Get Nodes**: 102 | 103 | ```text 104 | ansible@kube-0:~$ sudo kubectl get nodes 105 | NAME STATUS ROLES AGE VERSION 106 | kube-0 Ready master 5m27s v1.19.4+k3s 107 | ansible@kube-0:~$ 108 | ``` 109 | 110 | **Get Pods**: 111 | 112 | ```text 113 | ansible@kube-0:~$ sudo kubectl get pods --all-namespaces -o wide 114 | NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 115 | kube-system metrics-server-7b4f8b595-k692h 1/1 Running 0 9m38s 10.42.0.2 kube-0 116 | kube-system local-path-provisioner-7ff9579c6-5lgzb 1/1 Running 0 9m38s 10.42.0.3 kube-0 117 | kube-system coredns-66c464876b-xg42q 1/1 Running 0 9m38s 10.42.0.5 kube-0 118 | kube-system helm-install-traefik-tdpcs 0/1 Completed 0 9m38s 10.42.0.4 kube-0 119 | kube-system svclb-traefik-hk248 2/2 Running 0 9m4s 10.42.0.7 kube-0 120 | kube-system traefik-5dd496474-bf4kv 1/1 Running 0 9m4s 10.42.0.6 kube-0 121 | ``` 122 | -------------------------------------------------------------------------------- /handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Reload systemd 4 | ansible.builtin.systemd: 5 | daemon_reload: true 6 | scope: "{{ k3s_systemd_context }}" 7 | become: "{{ k3s_become }}" 8 | 9 | - name: Reload service 10 | ansible.builtin.set_fact: 11 | k3s_service_reloaded: true 12 | become: "{{ k3s_become }}" 13 | 14 | - name: Restart k3s systemd 15 | ansible.builtin.systemd: 16 | name: k3s 17 | state: restarted 18 | scope: "{{ k3s_systemd_context }}" 19 | enabled: "{{ k3s_start_on_boot }}" 20 | retries: 3 21 | delay: 3 22 | register: k3s_systemd_restart_k3s 23 | failed_when: 24 | - k3s_systemd_restart_k3s is not success 25 | - not ansible_check_mode 26 | become: "{{ k3s_become }}" 27 | 28 | - name: Restart k3s service 29 | ansible.builtin.service: 30 | name: k3s 31 | state: restarted 32 | enabled: "{{ k3s_start_on_boot }}" 33 | retries: 3 34 | delay: 3 35 | register: k3s_service_restart_k3s 36 | failed_when: 37 | - k3s_service_restart_k3s is not success 38 | - not ansible_check_mode 39 | become: "{{ k3s_become }}" 40 | -------------------------------------------------------------------------------- /meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | galaxy_info: 4 | role_name: k3s 5 | namespace: xanmanning 6 | author: Xan Manning 7 | description: Ansible role for installing k3s as either a standalone server or HA cluster 8 | company: Pyrat Ltd. 9 | github_branch: main 10 | 11 | # If the issue tracker for your role is not on github, uncomment the 12 | # next line and provide a value 13 | # issue_tracker_url: http://example.com/issue/tracker 14 | 15 | # Some suggested licenses: 16 | # - BSD (default) 17 | # - MIT 18 | # - GPLv2 19 | # - GPLv3 20 | # - Apache 21 | # - CC-BY 22 | license: BSD 23 | 24 | min_ansible_version: '2.9' 25 | 26 | # If this a Container Enabled role, provide the minimum Ansible Container version. 27 | # min_ansible_container_version: 28 | 29 | # Optionally specify the branch Galaxy will use when accessing the GitHub 30 | # repo for this role. During role install, if no tags are available, 31 | # Galaxy will use this branch. During import Galaxy will access files on 32 | # this branch. If Travis integration is configured, only notifications for this 33 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 34 | # (usually main) will be used. 35 | # github_branch: 36 | 37 | # 38 | # platforms is a list of platforms, and each platform has a name and a list of versions. 39 | # 40 | platforms: 41 | - name: Alpine 42 | versions: 43 | - all 44 | - name: Archlinux 45 | versions: 46 | - all 47 | - name: EL 48 | versions: 49 | - 7 50 | - 8 51 | - name: Amazon 52 | - name: Fedora 53 | versions: 54 | - 29 55 | - 30 56 | - 31 57 | - name: Debian 58 | versions: 59 | - buster 60 | - jessie 61 | - stretch 62 | - name: SLES 63 | versions: 64 | - 15 65 | - name: Ubuntu 66 | versions: 67 | - xenial 68 | - bionic 69 | 70 | galaxy_tags: 71 | - k3s 72 | - k8s 73 | - kubernetes 74 | - containerd 75 | - cluster 76 | - lightweight 77 | # List tags for your role here, one per line. A tag is a keyword that describes 78 | # and categorizes the role. Users find roles by searching for tags. Be sure to 79 | # remove the '[]' above, if you add tags to this list. 80 | # 81 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 82 | # Maximum 20 tags per role. 83 | 84 | dependencies: [] 85 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 86 | # if you add dependencies to this list. 87 | -------------------------------------------------------------------------------- /molecule/autodeploy/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: node* 4 | become: true 5 | vars: 6 | molecule_is_test: true 7 | k3s_release_version: v1.22 8 | k3s_build_cluster: false 9 | k3s_control_token: 55ba04e5-e17d-4535-9170-3e4245453f4d 10 | k3s_install_dir: /opt/k3s/bin 11 | k3s_config_file: /opt/k3s/etc/k3s_config.yaml 12 | k3s_server: 13 | data-dir: /var/lib/k3s-io 14 | default-local-storage-path: /var/lib/k3s-io/local-storage 15 | disable: 16 | - metrics-server 17 | - traefik 18 | # k3s_agent: 19 | # snapshotter: native 20 | k3s_server_manifests_templates: 21 | - "molecule/autodeploy/templates/00-ns-monitoring.yml.j2" 22 | k3s_server_manifests_urls: 23 | - url: https://raw.githubusercontent.com/metallb/metallb/v0.9.6/manifests/namespace.yaml 24 | filename: 05-metallb-namespace.yml 25 | k3s_service_env_vars: 26 | K3S_TEST_VAR: "Hello world!" 27 | roles: 28 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 29 | -------------------------------------------------------------------------------- /molecule/autodeploy/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | driver: 6 | name: docker 7 | scenario: 8 | test_sequence: 9 | - dependency 10 | - cleanup 11 | - destroy 12 | - syntax 13 | - create 14 | - prepare 15 | - check 16 | - converge 17 | - idempotence 18 | - side_effect 19 | - verify 20 | - cleanup 21 | - destroy 22 | platforms: 23 | - name: node1 24 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 25 | command: ${MOLECULE_DOCKER_COMMAND:-""} 26 | volumes: 27 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 28 | privileged: true 29 | pre_build_image: ${MOLECULE_PREBUILT:-true} 30 | networks: 31 | - name: k3snet 32 | - name: node2 33 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 34 | command: ${MOLECULE_DOCKER_COMMAND:-""} 35 | volumes: 36 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 37 | privileged: true 38 | pre_build_image: ${MOLECULE_PREBUILT:-true} 39 | networks: 40 | - name: k3snet 41 | - name: node3 42 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 43 | command: ${MOLECULE_DOCKER_COMMAND:-""} 44 | volumes: 45 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 46 | privileged: true 47 | pre_build_image: ${MOLECULE_PREBUILT:-true} 48 | networks: 49 | - name: k3snet 50 | provisioner: 51 | name: ansible 52 | options: 53 | verbose: true 54 | verifier: 55 | name: ansible 56 | -------------------------------------------------------------------------------- /molecule/autodeploy/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: node* 4 | become: true 5 | tasks: 6 | - name: Ensure apt cache is updated and iptables is installed 7 | ansible.builtin.apt: 8 | name: iptables 9 | state: present 10 | update_cache: true 11 | when: ansible_pkg_mgr == 'apt' 12 | 13 | - name: Ensure install directory and configuration directory exists 14 | ansible.builtin.file: 15 | path: "/opt/k3s/{{ item }}" 16 | state: directory 17 | mode: 0755 18 | loop: 19 | - bin 20 | - etc 21 | 22 | - name: Ensure data directory exists 23 | ansible.builtin.file: 24 | path: "/var/lib/k3s-io" 25 | state: directory 26 | mode: 0755 27 | -------------------------------------------------------------------------------- /molecule/autodeploy/templates/00-ns-monitoring.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /molecule/autodeploy/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is an example playbook to execute Ansible tests. 3 | 4 | - name: Verify 5 | hosts: all 6 | tasks: 7 | - name: Example assertion 8 | ansible.builtin.assert: 9 | that: true 10 | -------------------------------------------------------------------------------- /molecule/debug/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | pyratlabs_issue_controller_dump: true 7 | # k3s_agent: 8 | # snapshotter: native 9 | pre_tasks: 10 | - name: Ensure k3s_debug is set 11 | ansible.builtin.set_fact: 12 | k3s_debug: true 13 | roles: 14 | - xanmanning.k3s 15 | -------------------------------------------------------------------------------- /molecule/debug/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | driver: 6 | name: docker 7 | scenario: 8 | test_sequence: 9 | - dependency 10 | - cleanup 11 | - destroy 12 | - syntax 13 | - create 14 | - prepare 15 | - check 16 | - converge 17 | - idempotence 18 | - side_effect 19 | - verify 20 | - cleanup 21 | - destroy 22 | platforms: 23 | - name: node1 24 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 25 | command: ${MOLECULE_DOCKER_COMMAND:-""} 26 | volumes: 27 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 28 | privileged: true 29 | pre_build_image: ${MOLECULE_PREBUILT:-true} 30 | networks: 31 | - name: k3snet 32 | - name: node2 33 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 34 | command: ${MOLECULE_DOCKER_COMMAND:-""} 35 | volumes: 36 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 37 | privileged: true 38 | pre_build_image: ${MOLECULE_PREBUILT:-true} 39 | networks: 40 | - name: k3snet 41 | - name: node3 42 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 43 | command: ${MOLECULE_DOCKER_COMMAND:-""} 44 | volumes: 45 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 46 | privileged: true 47 | pre_build_image: ${MOLECULE_PREBUILT:-true} 48 | networks: 49 | - name: k3snet 50 | provisioner: 51 | name: ansible 52 | options: 53 | verbose: true 54 | verifier: 55 | name: ansible 56 | -------------------------------------------------------------------------------- /molecule/debug/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | tasks: 5 | - name: Ensure apt cache is updated and iptables is installed 6 | ansible.builtin.apt: 7 | name: iptables 8 | state: present 9 | update_cache: true 10 | when: ansible_pkg_mgr == 'apt' 11 | -------------------------------------------------------------------------------- /molecule/debug/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is an example playbook to execute Ansible tests. 3 | 4 | - name: Verify 5 | hosts: all 6 | tasks: 7 | - name: Example assertion 8 | ansible.builtin.assert: 9 | that: true 10 | -------------------------------------------------------------------------------- /molecule/default/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python systemd sudo bash ca-certificates && apt-get clean; \ 10 | elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python systemd sudo python-devel python*-dnf bash && dnf clean all; \ 11 | elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python systemd sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ 12 | elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python systemd sudo bash python-xml && zypper clean -a; \ 13 | elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo systemd bash ca-certificates; \ 14 | elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python systemd sudo bash ca-certificates && xbps-remove -O; fi 15 | 16 | RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ 17 | rm -f /lib/systemd/system/multi-user.target.wants/*; \ 18 | rm -f /etc/systemd/system/*.wants/*; \ 19 | rm -f /lib/systemd/system/local-fs.target.wants/*; \ 20 | rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ 21 | rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ 22 | rm -f /lib/systemd/system/basic.target.wants/*; \ 23 | rm -f /lib/systemd/system/anaconda.target.wants/*; 24 | 25 | VOLUME [“/sys/fs/cgroup”] 26 | CMD [“/usr/sbin/init”] 27 | -------------------------------------------------------------------------------- /molecule/default/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Docker driver installation guide 3 | ******* 4 | 5 | Requirements 6 | ============ 7 | 8 | * Docker Engine 9 | 10 | Install 11 | ======= 12 | 13 | Please refer to the `Virtual environment`_ documentation for installation best 14 | practices. If not using a virtual environment, please consider passing the 15 | widely recommended `'--user' flag`_ when invoking ``pip``. 16 | 17 | .. _Virtual environment: https://virtualenv.pypa.io/en/latest/ 18 | .. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site 19 | 20 | .. code-block:: bash 21 | 22 | $ pip install 'molecule[docker]' 23 | -------------------------------------------------------------------------------- /molecule/default/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | roles: 6 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 7 | vars: 8 | molecule_is_test: true 9 | k3s_install_hard_links: true 10 | k3s_release_version: stable 11 | # k3s_agent: 12 | # snapshotter: native 13 | -------------------------------------------------------------------------------- /molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | driver: 6 | name: docker 7 | scenario: 8 | test_sequence: 9 | - dependency 10 | - cleanup 11 | - destroy 12 | - syntax 13 | - create 14 | - prepare 15 | - check 16 | - converge 17 | - idempotence 18 | - side_effect 19 | - verify 20 | - cleanup 21 | - destroy 22 | platforms: 23 | - name: node1 24 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 25 | command: ${MOLECULE_DOCKER_COMMAND:-""} 26 | volumes: 27 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 28 | privileged: true 29 | pre_build_image: ${MOLECULE_PREBUILT:-true} 30 | networks: 31 | - name: k3snet 32 | - name: node2 33 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 34 | command: ${MOLECULE_DOCKER_COMMAND:-""} 35 | volumes: 36 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 37 | privileged: true 38 | pre_build_image: ${MOLECULE_PREBUILT:-true} 39 | networks: 40 | - name: k3snet 41 | - name: node3 42 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 43 | command: ${MOLECULE_DOCKER_COMMAND:-""} 44 | volumes: 45 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 46 | privileged: true 47 | pre_build_image: ${MOLECULE_PREBUILT:-true} 48 | networks: 49 | - name: k3snet 50 | provisioner: 51 | name: ansible 52 | options: 53 | verbose: true 54 | verifier: 55 | name: ansible 56 | -------------------------------------------------------------------------------- /molecule/default/playbook-download.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | molecule_is_test: true 7 | k3s_state: downloaded 8 | roles: 9 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 10 | -------------------------------------------------------------------------------- /molecule/default/playbook-restart-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | molecule_is_test: true 7 | k3s_state: restarted 8 | roles: 9 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 10 | -------------------------------------------------------------------------------- /molecule/default/playbook-rootless.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: node1 4 | become: true 5 | become_user: k3suser 6 | vars: 7 | molecule_is_test: true 8 | k3s_use_experimental: true 9 | k3s_server: 10 | rootless: true 11 | k3s_agent: 12 | rootless: true 13 | k3s_install_dir: "/home/{{ ansible_user_id }}/bin" 14 | roles: 15 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 16 | -------------------------------------------------------------------------------- /molecule/default/playbook-standalone.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | molecule_is_test: true 7 | k3s_build_cluster: false 8 | roles: 9 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 10 | -------------------------------------------------------------------------------- /molecule/default/playbook-start-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | molecule_is_test: true 7 | k3s_state: started 8 | roles: 9 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 10 | -------------------------------------------------------------------------------- /molecule/default/playbook-stop-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | molecule_is_test: true 7 | k3s_state: stopped 8 | roles: 9 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 10 | -------------------------------------------------------------------------------- /molecule/default/playbook-uninstall-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | molecule_is_test: true 7 | k3s_state: uninstalled 8 | roles: 9 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 10 | -------------------------------------------------------------------------------- /molecule/default/prepare-rootless.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: node1 4 | become: true 5 | tasks: 6 | - name: Ensure a user group exists 7 | ansible.builtin.group: 8 | name: user 9 | state: present 10 | 11 | - name: Ensure a normal user exists 12 | ansible.builtin.user: 13 | name: k3suser 14 | group: user 15 | state: present 16 | 17 | - name: Ensure a normal user has bin directory 18 | ansible.builtin.file: 19 | path: /home/k3suser/bin 20 | state: directory 21 | owner: k3suser 22 | group: user 23 | mode: 0700 24 | -------------------------------------------------------------------------------- /molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | tasks: 5 | - name: Ensure apt cache is updated and iptables is installed 6 | ansible.builtin.apt: 7 | name: iptables 8 | state: present 9 | update_cache: true 10 | when: ansible_pkg_mgr == 'apt' 11 | -------------------------------------------------------------------------------- /molecule/default/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | 5 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 6 | os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') 7 | 8 | 9 | def test_hosts_file(host): 10 | f = host.file('/etc/hosts') 11 | 12 | assert f.exists 13 | assert f.user == 'root' 14 | assert f.group == 'root' 15 | -------------------------------------------------------------------------------- /molecule/default/tests/test_default.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyratLabs/ansible-role-k3s/2a44b15921d24cae29ddcc3aa6d35cb65e56747c/molecule/default/tests/test_default.pyc -------------------------------------------------------------------------------- /molecule/highavailabilitydb/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | -------------------------------------------------------------------------------- /molecule/highavailabilitydb/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Docker driver installation guide 3 | ******* 4 | 5 | Requirements 6 | ============ 7 | 8 | * Docker Engine 9 | 10 | Install 11 | ======= 12 | 13 | Please refer to the `Virtual environment`_ documentation for installation best 14 | practices. If not using a virtual environment, please consider passing the 15 | widely recommended `'--user' flag`_ when invoking ``pip``. 16 | 17 | .. _Virtual environment: https://virtualenv.pypa.io/en/latest/ 18 | .. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site 19 | 20 | .. code-block:: bash 21 | 22 | $ pip install 'molecule[docker]' 23 | -------------------------------------------------------------------------------- /molecule/highavailabilitydb/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Converge 4 | hosts: node* 5 | become: true 6 | vars: 7 | molecule_is_test: true 8 | k3s_registration_address: loadbalancer 9 | k3s_control_token: 55ba04e5-e17d-4535-9170-3e4245453f4d 10 | k3s_server: 11 | datastore-endpoint: "postgres://postgres:verybadpass@database:5432/postgres?sslmode=disable" 12 | # k3s_agent: 13 | # snapshotter: native 14 | k3s_service_env_file: /tmp/k3s.env 15 | pre_tasks: 16 | - name: Set each node to be a control node 17 | ansible.builtin.set_fact: 18 | k3s_control_node: true 19 | when: inventory_hostname in ['node2', 'node3'] 20 | roles: 21 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 22 | -------------------------------------------------------------------------------- /molecule/highavailabilitydb/haproxy-loadbalancer.conf.j2: -------------------------------------------------------------------------------- 1 | frontend loadbalancer 2 | bind *:6443 3 | mode tcp 4 | default_backend control_nodes 5 | timeout client 1m 6 | 7 | backend control_nodes 8 | mode tcp 9 | balance roundrobin 10 | server node2 node2:6443 11 | server node3 node3:6443 12 | timeout connect 30s 13 | timeout server 30m 14 | -------------------------------------------------------------------------------- /molecule/highavailabilitydb/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | driver: 6 | name: docker 7 | scenario: 8 | test_sequence: 9 | - dependency 10 | - cleanup 11 | - destroy 12 | - syntax 13 | - create 14 | - prepare 15 | - check 16 | - converge 17 | - idempotence 18 | - side_effect 19 | - verify 20 | - cleanup 21 | - destroy 22 | platforms: 23 | - name: node1 24 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 25 | command: ${MOLECULE_DOCKER_COMMAND:-""} 26 | volumes: 27 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 28 | privileged: true 29 | pre_build_image: ${MOLECULE_PREBUILT:-true} 30 | networks: 31 | - name: k3snet 32 | - name: node2 33 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 34 | command: ${MOLECULE_DOCKER_COMMAND:-""} 35 | volumes: 36 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 37 | privileged: true 38 | pre_build_image: ${MOLECULE_PREBUILT:-true} 39 | networks: 40 | - name: k3snet 41 | - name: node3 42 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 43 | command: ${MOLECULE_DOCKER_COMMAND:-""} 44 | volumes: 45 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 46 | privileged: true 47 | pre_build_image: ${MOLECULE_PREBUILT:-true} 48 | networks: 49 | - name: k3snet 50 | - name: database 51 | image: postgres:11-alpine 52 | pre_build_image: true 53 | command: "postgres" 54 | env: 55 | POSTGRES_PASSWORD: "verybadpass" 56 | networks: 57 | - name: k3snet 58 | - name: loadbalancer 59 | image: geerlingguy/docker-rockylinux8-ansible:latest 60 | pre_build_image: true 61 | ports: 62 | - "6443:6443" 63 | networks: 64 | - name: k3snet 65 | provisioner: 66 | name: ansible 67 | options: 68 | verbose: true 69 | -------------------------------------------------------------------------------- /molecule/highavailabilitydb/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare Load Balancer 3 | hosts: loadbalancer 4 | tasks: 5 | - name: Ensure apt cache is updated 6 | ansible.builtin.apt: 7 | update_cache: true 8 | when: ansible_pkg_mgr == 'apt' 9 | 10 | - name: Ensure HAProxy is installed 11 | ansible.builtin.package: 12 | name: haproxy 13 | state: present 14 | 15 | - name: Ensure HAProxy config directory exists 16 | ansible.builtin.file: 17 | path: /usr/local/etc/haproxy 18 | state: directory 19 | mode: 0755 20 | 21 | - name: Ensure HAProxy is configured 22 | ansible.builtin.template: 23 | src: haproxy-loadbalancer.conf.j2 24 | dest: /usr/local/etc/haproxy/haproxy.cfg 25 | mode: 0644 26 | 27 | - name: Ensure HAProxy service is started 28 | ansible.builtin.command: 29 | cmd: haproxy -D -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid 30 | args: 31 | creates: /var/run/haproxy.pid 32 | 33 | - name: Prepare nodes 34 | hosts: node* 35 | tasks: 36 | - name: Ensure apt cache is updated and iptables is installed 37 | ansible.builtin.apt: 38 | name: iptables 39 | state: present 40 | update_cache: true 41 | when: ansible_pkg_mgr == 'apt' 42 | 43 | - name: Ensure environment file exists for k3s_service_env_file 44 | ansible.builtin.lineinfile: 45 | path: /tmp/k3s.env 46 | line: "THISHOST={{ ansible_hostname }}" 47 | mode: 0644 48 | create: true 49 | -------------------------------------------------------------------------------- /molecule/highavailabilitydb/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | 5 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 6 | os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') 7 | 8 | 9 | def test_hosts_file(host): 10 | f = host.file('/etc/hosts') 11 | 12 | assert f.exists 13 | assert f.user == 'root' 14 | assert f.group == 'root' 15 | -------------------------------------------------------------------------------- /molecule/highavailabilitydb/tests/test_default.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PyratLabs/ansible-role-k3s/2a44b15921d24cae29ddcc3aa6d35cb65e56747c/molecule/highavailabilitydb/tests/test_default.pyc -------------------------------------------------------------------------------- /molecule/highavailabilityetcd/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Converge 4 | hosts: node* 5 | become: true 6 | vars: 7 | molecule_is_test: true 8 | k3s_release_version: "v1.21" 9 | k3s_use_experimental: true 10 | k3s_etcd_datastore: true 11 | k3s_server: 12 | secrets-encryption: true 13 | k3s_agent: 14 | node-ip: "{{ ansible_default_ipv4.address }}" 15 | snapshotter: native 16 | selinux: "{{ ansible_os_family | lower == 'redhat' }}" 17 | k3s_skip_validation: "{{ k3s_service_handler[ansible_service_mgr] == 'service' }}" 18 | # k3s_skip_post_checks: "{{ ansible_os_family | lower == 'redhat' }}" 19 | pre_tasks: 20 | - name: Set each node to be a control node 21 | ansible.builtin.set_fact: 22 | k3s_control_node: true 23 | roles: 24 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 25 | -------------------------------------------------------------------------------- /molecule/highavailabilityetcd/haproxy-loadbalancer.conf.j2: -------------------------------------------------------------------------------- 1 | frontend loadbalancer 2 | bind *:6443 3 | mode tcp 4 | default_backend control_nodes 5 | timeout client 1m 6 | 7 | backend control_nodes 8 | mode tcp 9 | balance roundrobin 10 | server node2 node2:6443 11 | server node3 node3:6443 12 | timeout connect 30s 13 | timeout server 30m 14 | -------------------------------------------------------------------------------- /molecule/highavailabilityetcd/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | driver: 6 | name: docker 7 | scenario: 8 | test_sequence: 9 | - dependency 10 | - cleanup 11 | - destroy 12 | - syntax 13 | - create 14 | - prepare 15 | - check 16 | - converge 17 | - idempotence 18 | - side_effect 19 | - verify 20 | - cleanup 21 | - destroy 22 | platforms: 23 | - name: node1 24 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 25 | command: ${MOLECULE_DOCKER_COMMAND:-""} 26 | volumes: 27 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 28 | privileged: true 29 | pre_build_image: ${MOLECULE_PREBUILT:-true} 30 | networks: 31 | - name: k3snet 32 | - name: node2 33 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 34 | command: ${MOLECULE_DOCKER_COMMAND:-""} 35 | volumes: 36 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 37 | privileged: true 38 | pre_build_image: ${MOLECULE_PREBUILT:-true} 39 | networks: 40 | - name: k3snet 41 | - name: node3 42 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 43 | command: ${MOLECULE_DOCKER_COMMAND:-""} 44 | volumes: 45 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 46 | privileged: true 47 | pre_build_image: ${MOLECULE_PREBUILT:-true} 48 | networks: 49 | - name: k3snet 50 | - name: loadbalancer 51 | image: geerlingguy/docker-rockylinux8-ansible:latest 52 | pre_build_image: true 53 | ports: 54 | - "6443:6443" 55 | networks: 56 | - name: k3snet 57 | provisioner: 58 | name: ansible 59 | options: 60 | verbose: true 61 | -------------------------------------------------------------------------------- /molecule/highavailabilityetcd/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Prepare all nodes 4 | hosts: all 5 | tasks: 6 | - name: Ensure apt cache is updated 7 | ansible.builtin.apt: 8 | update_cache: true 9 | when: ansible_pkg_mgr == 'apt' 10 | 11 | - name: Ensure sudo is installed 12 | community.general.apk: 13 | name: sudo 14 | state: present 15 | update_cache: true 16 | when: ansible_pkg_mgr == 'apk' 17 | 18 | - name: Prepare Load Balancer 19 | hosts: loadbalancer 20 | tasks: 21 | - name: Ensure HAProxy is installed 22 | ansible.builtin.package: 23 | name: haproxy 24 | state: present 25 | 26 | - name: Ensure HAProxy config directory exists 27 | ansible.builtin.file: 28 | path: /usr/local/etc/haproxy 29 | state: directory 30 | mode: 0755 31 | 32 | - name: Ensure HAProxy is configured 33 | ansible.builtin.template: 34 | src: haproxy-loadbalancer.conf.j2 35 | dest: /usr/local/etc/haproxy/haproxy.cfg 36 | mode: 0644 37 | 38 | - name: Ensure HAProxy service is started 39 | ansible.builtin.command: 40 | cmd: haproxy -D -f /usr/local/etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid 41 | args: 42 | creates: /var/run/haproxy.pid 43 | 44 | - name: Prepare nodes 45 | hosts: node* 46 | tasks: 47 | - name: Ensure apt cache is updated and iptables is installed 48 | ansible.builtin.apt: 49 | name: iptables 50 | state: present 51 | update_cache: true 52 | when: ansible_pkg_mgr == 'apt' 53 | 54 | - name: Ensure iproute is installed 55 | ansible.builtin.dnf: 56 | name: iproute 57 | state: present 58 | update_cache: true 59 | when: ansible_pkg_mgr == 'dnf' 60 | -------------------------------------------------------------------------------- /molecule/lint-requirements.txt: -------------------------------------------------------------------------------- 1 | -r ../requirements.txt 2 | 3 | yamllint>=1.25.0 4 | ansible-lint>=4.3.5 5 | -------------------------------------------------------------------------------- /molecule/nodeploy/.gitignore: -------------------------------------------------------------------------------- 1 | files/* 2 | -------------------------------------------------------------------------------- /molecule/nodeploy/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | vars: 6 | molecule_is_test: true 7 | k3s_server: "{{ lookup('file', 'k3s_server.yml') | from_yaml }}" 8 | k3s_agent: "{{ lookup('file', 'k3s_agent.yml') | from_yaml }}" 9 | k3s_airgap: true 10 | k3s_release_version: latest 11 | roles: 12 | - role: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') | basename }}" 13 | -------------------------------------------------------------------------------- /molecule/nodeploy/k3s_agent.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | node-label: 4 | - "foo=bar" 5 | - "hello=world" 6 | kubelet-arg: 7 | - "cloud-provider=external" 8 | - "provider-id=azure" 9 | # snapshotter: native 10 | -------------------------------------------------------------------------------- /molecule/nodeploy/k3s_server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | flannel-backend: 'none' 4 | disable-scheduler: true 5 | disable-cloud-controller: true 6 | disable-network-policy: true 7 | disable: 8 | - coredns 9 | - traefik 10 | - servicelb 11 | - local-storage 12 | - metrics-server 13 | node-taint: 14 | - "k3s-controlplane=true:NoExecute" 15 | -------------------------------------------------------------------------------- /molecule/nodeploy/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | driver: 6 | name: docker 7 | scenario: 8 | test_sequence: 9 | - dependency 10 | - cleanup 11 | - destroy 12 | - syntax 13 | - create 14 | - prepare 15 | - check 16 | - converge 17 | - idempotence 18 | - side_effect 19 | - verify 20 | - cleanup 21 | - destroy 22 | platforms: 23 | - name: node1 24 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 25 | command: ${MOLECULE_DOCKER_COMMAND:-""} 26 | volumes: 27 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 28 | privileged: true 29 | pre_build_image: ${MOLECULE_PREBUILT:-true} 30 | networks: 31 | - name: k3snet 32 | - name: node2 33 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 34 | command: ${MOLECULE_DOCKER_COMMAND:-""} 35 | volumes: 36 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 37 | privileged: true 38 | pre_build_image: ${MOLECULE_PREBUILT:-true} 39 | networks: 40 | - name: k3snet 41 | - name: node3 42 | image: ${MOLECULE_DISTRO:-"geerlingguy/docker-rockylinux8-ansible:latest"} 43 | command: ${MOLECULE_DOCKER_COMMAND:-""} 44 | volumes: 45 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 46 | privileged: true 47 | pre_build_image: ${MOLECULE_PREBUILT:-true} 48 | networks: 49 | - name: k3snet 50 | provisioner: 51 | name: ansible 52 | options: 53 | verbose: true 54 | verifier: 55 | name: ansible 56 | -------------------------------------------------------------------------------- /molecule/nodeploy/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | tasks: 5 | - name: Ensure apt cache is updated and iptables is installed 6 | ansible.builtin.apt: 7 | name: iptables 8 | state: present 9 | update_cache: true 10 | when: ansible_pkg_mgr == 'apt' 11 | 12 | - name: Prepare air-gapped installation 13 | delegate_to: localhost 14 | run_once: true 15 | block: 16 | 17 | - name: Ensure files directory exists 18 | ansible.builtin.file: 19 | path: ./files 20 | state: directory 21 | mode: 0750 22 | 23 | - name: Ensure k3s is downloaded for air-gap installation 24 | ansible.builtin.get_url: 25 | url: https://github.com/k3s-io/k3s/releases/download/v1.22.5%2Bk3s1/k3s 26 | dest: ./files/k3s 27 | mode: 0755 28 | -------------------------------------------------------------------------------- /molecule/nodeploy/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is an example playbook to execute Ansible tests. 3 | 4 | - name: Verify 5 | hosts: all 6 | tasks: 7 | - name: Example assertion 8 | ansible.builtin.assert: 9 | that: true 10 | -------------------------------------------------------------------------------- /molecule/requirements.txt: -------------------------------------------------------------------------------- 1 | -r ../requirements.txt 2 | 3 | molecule-plugins[docker] 4 | docker>=4.3.1 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible>=2.10.7 2 | -------------------------------------------------------------------------------- /tasks/determine_systemd_context.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure systemd context is correct if we are running k3s rootless 4 | ansible.builtin.set_fact: 5 | k3s_systemd_context: user 6 | k3s_systemd_unit_dir: "{{ ansible_user_dir }}/.config/systemd/user" 7 | when: 8 | - k3s_runtime_config is defined 9 | - k3s_runtime_config.rootless is defined 10 | - k3s_runtime_config.rootless 11 | -------------------------------------------------------------------------------- /tasks/ensure_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Ensure cluster token is captured from {{ k3s_control_delegate }}" 4 | ansible.builtin.slurp: 5 | path: "{{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}/server/token" 6 | register: k3s_slurped_cluster_token 7 | delegate_to: "{{ k3s_control_delegate }}" 8 | when: 9 | - k3s_control_token is not defined 10 | - not ansible_check_mode 11 | become: "{{ k3s_become }}" 12 | 13 | - name: Ensure cluster token is formatted correctly for use in templates 14 | ansible.builtin.set_fact: 15 | k3s_control_token_content: "{{ k3s_control_token | default(k3s_slurped_cluster_token.content | b64decode) }}" 16 | when: 17 | - k3s_control_token is not defined 18 | - not ansible_check_mode 19 | 20 | - name: Ensure dummy cluster token is defined for ansible_check_mode 21 | ansible.builtin.set_fact: 22 | k3s_control_token_content: "{{ k3s_control_delegate | to_uuid }}" 23 | check_mode: false 24 | when: 25 | - ansible_check_mode 26 | 27 | - name: Ensure the cluster token file location exists 28 | ansible.builtin.file: 29 | path: "{{ k3s_token_location | dirname }}" 30 | state: directory 31 | mode: 0755 32 | become: "{{ k3s_become }}" 33 | 34 | - name: Ensure k3s cluster token file is present 35 | ansible.builtin.template: 36 | src: cluster-token.j2 37 | dest: "{{ k3s_token_location }}" 38 | mode: 0600 39 | become: "{{ k3s_become }}" 40 | notify: 41 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 42 | 43 | - name: Ensure k3s service unit file is present 44 | ansible.builtin.template: 45 | src: k3s.service.j2 46 | dest: "{{ k3s_systemd_unit_dir }}/k3s.service" 47 | mode: 0644 48 | become: "{{ k3s_become }}" 49 | when: 50 | - k3s_service_handler[ansible_service_mgr] == 'systemd' 51 | notify: 52 | - "Reload {{ k3s_service_handler[ansible_service_mgr] }}" 53 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 54 | 55 | - name: Ensure k3s service file is present 56 | ansible.builtin.template: 57 | src: k3s.openrc.j2 58 | dest: "{{ k3s_openrc_service_dir }}/k3s" 59 | mode: 0744 60 | when: 61 | - k3s_service_handler[ansible_service_mgr] == 'service' 62 | notify: 63 | - "Reload {{ k3s_service_handler[ansible_service_mgr] }}" 64 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 65 | become: "{{ k3s_become }}" 66 | 67 | - name: Ensure k3s logrotate file is present 68 | ansible.builtin.template: 69 | src: k3s.logrotate.j2 70 | dest: "{{ k3s_logrotate_dir }}/k3s" 71 | mode: 0640 72 | when: 73 | - k3s_service_handler[ansible_service_mgr] == 'service' 74 | notify: 75 | - "Reload {{ k3s_service_handler[ansible_service_mgr] }}" 76 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 77 | become: "{{ k3s_become }}" 78 | 79 | - name: Ensure k3s config file exists 80 | ansible.builtin.template: 81 | src: config.yaml.j2 82 | dest: "{{ k3s_config_file }}" 83 | mode: 0644 84 | notify: 85 | - "Reload {{ k3s_service_handler[ansible_service_mgr] }}" 86 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 87 | become: "{{ k3s_become }}" 88 | 89 | - name: Ensure secondary controllers are started 90 | ansible.builtin.include_tasks: ensure_control_plane_started_{{ ansible_service_mgr }}.yml 91 | when: 92 | - k3s_control_node 93 | - not k3s_primary_control_node 94 | 95 | - name: Run control plane post checks 96 | ansible.builtin.import_tasks: post_checks_control_plane.yml 97 | when: 98 | - not k3s_skip_validation 99 | - not k3s_skip_post_checks 100 | 101 | - name: Flush Handlers 102 | ansible.builtin.meta: flush_handlers 103 | 104 | - name: Run node post checks 105 | ansible.builtin.import_tasks: post_checks_nodes.yml 106 | when: 107 | - not k3s_skip_validation 108 | - not k3s_skip_post_checks 109 | -------------------------------------------------------------------------------- /tasks/ensure_containerd_registries.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure containerd registries file exists 4 | ansible.builtin.template: 5 | src: registries.yaml.j2 6 | dest: "{{ k3s_config_dir }}/registries.yaml" 7 | mode: 0600 8 | notify: 9 | - "Reload {{ k3s_service_handler[ansible_service_mgr] }}" 10 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 11 | become: "{{ k3s_become }}" 12 | -------------------------------------------------------------------------------- /tasks/ensure_control_plane_started_openrc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s control plane server is started 4 | ansible.builtin.service: 5 | name: k3s 6 | state: started 7 | enabled: "{{ k3s_start_on_boot }}" 8 | register: k3s_service_start_k3s 9 | until: k3s_service_start_k3s is succeeded 10 | retries: 3 11 | delay: 3 12 | failed_when: 13 | - k3s_service_start_k3s is not succeeded 14 | - not ansible_check_mode 15 | become: "{{ k3s_become }}" 16 | -------------------------------------------------------------------------------- /tasks/ensure_control_plane_started_systemd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s control plane server is started 4 | ansible.builtin.systemd: 5 | name: k3s 6 | state: started 7 | enabled: "{{ k3s_start_on_boot }}" 8 | scope: "{{ k3s_systemd_context }}" 9 | register: k3s_systemd_start_k3s 10 | until: k3s_systemd_start_k3s is succeeded 11 | retries: 3 12 | delay: 3 13 | failed_when: 14 | - k3s_systemd_start_k3s is not succeeded 15 | - not ansible_check_mode 16 | become: "{{ k3s_become }}" 17 | -------------------------------------------------------------------------------- /tasks/ensure_directories.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure {{ directory.name }} exists 4 | ansible.builtin.file: 5 | path: "{{ directory.path }}" 6 | state: directory 7 | mode: "{{ directory.mode | default(755) }}" 8 | become: "{{ k3s_become }}" 9 | when: 10 | - directory.path is defined 11 | - directory.path | length > 0 12 | - directory.path != omit 13 | -------------------------------------------------------------------------------- /tasks/ensure_downloads.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure target host architecture information is set as a fact 4 | ansible.builtin.set_fact: 5 | k3s_arch: "{{ k3s_arch_lookup[ansible_architecture].arch }}" 6 | k3s_arch_suffix: "{{ k3s_arch_lookup[ansible_architecture].suffix }}" 7 | check_mode: false 8 | 9 | - name: Ensure URLs are set as facts for downloading binaries 10 | ansible.builtin.set_fact: 11 | k3s_binary_url: "{{ k3s_github_download_url }}/{{ k3s_release_version }}/k3s{{ k3s_arch_suffix }}" 12 | k3s_hash_url: "{{ k3s_github_download_url }}/{{ k3s_release_version }}/sha256sum-{{ k3s_arch }}.txt" 13 | check_mode: false 14 | 15 | - name: Override k3s_binary_url and k3s_hash_url facts for testing specific commit 16 | ansible.builtin.set_fact: 17 | k3s_binary_url: "https://storage.googleapis.com/k3s-ci-builds/k3s{{ k3s_arch_suffix }}-{{ k3s_release_version }}" 18 | k3s_hash_url: "https://storage.googleapis.com/k3s-ci-builds/k3s{{ k3s_arch_suffix }}-{{ k3s_release_version }}.sha256sum" 19 | when: 20 | - k3s_release_version | regex_search("^[a-z0-9]{40}$") 21 | check_mode: false 22 | 23 | - name: Ensure the k3s hashsum is downloaded 24 | ansible.builtin.uri: 25 | url: "{{ k3s_hash_url }}" 26 | return_content: true 27 | register: k3s_hash_sum_raw 28 | check_mode: false 29 | 30 | - name: Ensure sha256sum is set from hashsum variable 31 | ansible.builtin.set_fact: 32 | k3s_hash_sum: "{{ (k3s_hash_sum_raw.content.split('\n') | 33 | select('search', 'k3s' + k3s_arch_suffix) | 34 | reject('search', 'images') | 35 | first).split() | first }}" 36 | changed_when: false 37 | check_mode: false 38 | 39 | - name: Ensure installation directory exists 40 | ansible.builtin.file: 41 | path: "{{ k3s_install_dir }}" 42 | state: directory 43 | mode: 0755 44 | 45 | - name: Ensure k3s binary is downloaded 46 | ansible.builtin.get_url: 47 | url: "{{ k3s_binary_url }}" 48 | dest: "{{ k3s_install_dir }}/k3s-{{ k3s_release_version }}" 49 | checksum: "sha256:{{ k3s_hash_sum }}" 50 | mode: 0755 51 | become: "{{ k3s_become }}" 52 | -------------------------------------------------------------------------------- /tasks/ensure_drain_and_remove_nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check if kubectl exists 4 | ansible.builtin.stat: 5 | path: "{{ k3s_install_dir }}/kubectl" 6 | register: k3s_check_kubectl 7 | become: "{{ k3s_become }}" 8 | 9 | - name: Clean up nodes that are in an uninstalled state 10 | when: 11 | - k3s_check_kubectl.stat.exists is defined 12 | - k3s_check_kubectl.stat.exists 13 | - k3s_control_delegate is defined 14 | - not ansible_check_mode 15 | block: 16 | - name: Gather a list of nodes 17 | ansible.builtin.command: 18 | cmd: "{{ k3s_install_dir }}/kubectl get nodes" 19 | changed_when: false 20 | failed_when: false 21 | delegate_to: "{{ k3s_control_delegate }}" 22 | run_once: true 23 | register: kubectl_get_nodes_result 24 | become: "{{ k3s_become }}" 25 | 26 | - name: Ensure uninstalled nodes are drained # noqa no-changed-when 27 | ansible.builtin.command: 28 | cmd: >- 29 | {{ k3s_install_dir }}/kubectl drain {{ hostvars[item].ansible_hostname }} 30 | --ignore-daemonsets 31 | --{{ k3s_drain_command[ansible_version.string is version_compare('1.22', '>=')] }} 32 | --force 33 | delegate_to: "{{ k3s_control_delegate }}" 34 | run_once: true 35 | when: 36 | - kubectl_get_nodes_result.stdout is defined 37 | - hostvars[item].ansible_hostname in kubectl_get_nodes_result.stdout 38 | - hostvars[item].k3s_state is defined 39 | - hostvars[item].k3s_state == 'uninstalled' 40 | loop: "{{ ansible_play_hosts }}" 41 | become: "{{ k3s_become }}" 42 | 43 | - name: Ensure uninstalled nodes are removed # noqa no-changed-when 44 | ansible.builtin.command: 45 | cmd: "{{ k3s_install_dir }}/kubectl delete node {{ hostvars[item].ansible_hostname }}" 46 | delegate_to: "{{ k3s_control_delegate }}" 47 | run_once: true 48 | when: 49 | - kubectl_get_nodes_result.stdout is defined 50 | - hostvars[item].ansible_hostname in kubectl_get_nodes_result.stdout 51 | - hostvars[item].k3s_state is defined 52 | - hostvars[item].k3s_state == 'uninstalled' 53 | loop: "{{ ansible_play_hosts }}" 54 | become: "{{ k3s_become }}" 55 | -------------------------------------------------------------------------------- /tasks/ensure_installed.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure directories exist 4 | ansible.builtin.include_tasks: ensure_directories.yml 5 | loop: "{{ k3s_ensure_directories_exist }}" 6 | loop_control: 7 | loop_var: directory 8 | 9 | - name: Ensure installed node 10 | ansible.builtin.include_tasks: ensure_installed_node.yml 11 | when: 12 | - ((k3s_control_node and k3s_controller_list | length == 1) 13 | or (k3s_primary_control_node and k3s_controller_list | length > 1)) 14 | - not ansible_check_mode 15 | 16 | - name: Flush Handlers 17 | ansible.builtin.meta: flush_handlers 18 | 19 | - name: Ensure installed node | k3s_build_cluster 20 | ansible.builtin.include_tasks: ensure_installed_node.yml 21 | when: k3s_build_cluster 22 | 23 | - name: Determine if the systems are already clustered 24 | ansible.builtin.stat: 25 | path: "{{ k3s_token_location }}" 26 | register: k3s_token_cluster_check 27 | 28 | - name: Ensure control plane started with {{ ansible_service_mgr }} 29 | ansible.builtin.include_tasks: ensure_control_plane_started_{{ ansible_service_mgr }}.yml 30 | when: (k3s_control_node and k3s_controller_list | length == 1) 31 | or (k3s_primary_control_node and k3s_controller_list | length > 1) 32 | or k3s_token_cluster_check.stat.exists 33 | -------------------------------------------------------------------------------- /tasks/ensure_installed_node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s is linked into the installation destination 4 | ansible.builtin.file: 5 | src: "{{ k3s_install_dir }}/k3s-{{ k3s_release_version }}" 6 | dest: "{{ k3s_install_dir }}/{{ item }}" 7 | state: "{{ 'hard' if k3s_install_hard_links else 'link' }}" 8 | force: "{{ k3s_install_hard_links }}" 9 | mode: 0755 10 | loop: 11 | - k3s 12 | - kubectl 13 | - crictl 14 | - ctr 15 | when: not ansible_check_mode 16 | notify: 17 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 18 | become: "{{ k3s_become }}" 19 | 20 | - name: Ensure k3s config file exists 21 | ansible.builtin.template: 22 | src: config.yaml.j2 23 | dest: "{{ k3s_config_file }}" 24 | mode: 0644 25 | notify: 26 | - "Reload {{ k3s_service_handler[ansible_service_mgr] }}" 27 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 28 | become: "{{ k3s_become }}" 29 | 30 | - name: Ensure cluster token is present when pre-defined 31 | when: k3s_control_token is defined 32 | block: 33 | - name: Ensure the cluster token file location exists 34 | ansible.builtin.file: 35 | path: "{{ k3s_token_location | dirname }}" 36 | state: directory 37 | mode: 0755 38 | become: "{{ k3s_become }}" 39 | 40 | - name: Ensure k3s cluster token file is present 41 | ansible.builtin.template: 42 | src: cluster-token.j2 43 | dest: "{{ k3s_token_location }}" 44 | mode: 0600 45 | become: "{{ k3s_become }}" 46 | notify: 47 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 48 | 49 | - name: Ensure k3s service unit file is present 50 | ansible.builtin.template: 51 | src: k3s.service.j2 52 | dest: "{{ k3s_systemd_unit_dir }}/k3s.service" 53 | mode: 0644 54 | when: 55 | - k3s_service_handler[ansible_service_mgr] == 'systemd' 56 | notify: 57 | - "Reload {{ k3s_service_handler[ansible_service_mgr] }}" 58 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 59 | become: "{{ k3s_become }}" 60 | 61 | - name: Ensure k3s service file is present 62 | ansible.builtin.template: 63 | src: k3s.openrc.j2 64 | dest: "{{ k3s_openrc_service_dir }}/k3s" 65 | mode: 0744 66 | when: 67 | - k3s_service_handler[ansible_service_mgr] == 'service' 68 | notify: 69 | - "Reload {{ k3s_service_handler[ansible_service_mgr] }}" 70 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 71 | become: "{{ k3s_become }}" 72 | 73 | - name: Ensure k3s logrotate file is present 74 | ansible.builtin.template: 75 | src: k3s.logrotate.j2 76 | dest: "{{ k3s_logrotate_dir }}/k3s" 77 | mode: 0640 78 | when: 79 | - k3s_service_handler[ansible_service_mgr] == 'service' 80 | notify: 81 | - "Reload {{ k3s_service_handler[ansible_service_mgr] }}" 82 | - "Restart k3s {{ k3s_service_handler[ansible_service_mgr] }}" 83 | become: "{{ k3s_become }}" 84 | 85 | - name: Ensure k3s killall script is present 86 | ansible.builtin.template: 87 | src: k3s-killall.sh.j2 88 | dest: "/usr/local/bin/k3s-killall.sh" 89 | mode: 0700 90 | become: "{{ k3s_become }}" 91 | when: 92 | - k3s_runtime_config is defined 93 | - ("rootless" not in k3s_runtime_config or not k3s_runtime_config.rootless) 94 | 95 | - name: Ensure k3s uninstall script is present 96 | ansible.builtin.template: 97 | src: k3s-uninstall.sh.j2 98 | dest: "/usr/local/bin/k3s-uninstall.sh" 99 | mode: 0700 100 | become: "{{ k3s_become }}" 101 | when: 102 | - k3s_runtime_config is defined 103 | - ("rootless" not in k3s_runtime_config or not k3s_runtime_config.rootless) 104 | -------------------------------------------------------------------------------- /tasks/ensure_k3s_auto_deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure that the manifests directory exists 4 | ansible.builtin.file: 5 | state: directory 6 | path: "{{ k3s_server_manifests_dir }}" 7 | mode: 0755 8 | when: >- 9 | k3s_primary_control_node and 10 | (k3s_server_manifests_templates | length > 0 11 | or k3s_server_manifests_urls | length > 0) 12 | become: "{{ k3s_become }}" 13 | 14 | - name: Ensure that the pod-manifests directory exists 15 | ansible.builtin.file: 16 | state: directory 17 | path: "{{ k3s_server_pod_manifests_dir }}" 18 | mode: 0755 19 | when: >- 20 | k3s_control_node and 21 | (k3s_server_pod_manifests_templates | length > 0 22 | or k3s_server_pod_manifests_urls | length > 0) 23 | become: "{{ k3s_become }}" 24 | 25 | # https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests 26 | - name: Ensure auto-deploying manifests are copied to the primary controller 27 | ansible.builtin.template: 28 | src: "{{ item }}" 29 | dest: "{{ k3s_server_manifests_dir }}/{{ item | basename | replace('.j2', '') }}" 30 | mode: 0644 31 | loop: "{{ k3s_server_manifests_templates }}" 32 | become: "{{ k3s_become }}" 33 | when: 34 | - k3s_primary_control_node 35 | - k3s_server_manifests_templates | length > 0 36 | 37 | - name: Ensure auto-deploying manifests are downloaded to the primary controller 38 | ansible.builtin.get_url: 39 | url: "{{ item.url }}" 40 | dest: "{{ k3s_server_manifests_dir }}/{{ item.filename | default(item.url | basename) }}" 41 | mode: 0644 42 | loop: "{{ k3s_server_manifests_urls }}" 43 | become: "{{ k3s_become }}" 44 | when: 45 | - k3s_primary_control_node 46 | - not ansible_check_mode 47 | - k3s_server_manifests_urls | length > 0 48 | 49 | # https://github.com/k3s-io/k3s/pull/1691 50 | - name: Ensure static pod manifests are copied to controllers 51 | ansible.builtin.template: 52 | src: "{{ item }}" 53 | dest: "{{ k3s_server_pod_manifests_dir }}/{{ item | basename | replace('.j2', '') }}" 54 | mode: 0644 55 | loop: "{{ k3s_server_pod_manifests_templates }}" 56 | become: "{{ k3s_become }}" 57 | when: 58 | - k3s_control_node 59 | 60 | # https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests 61 | - name: Ensure auto-deploying manifests are downloaded to the primary controller 62 | ansible.builtin.get_url: 63 | url: "{{ item.url }}" 64 | dest: "{{ k3s_server_pod_manifests_dir }}/{{ item.filename | default(item.url | basename) }}" 65 | mode: 0644 66 | loop: "{{ k3s_server_pod_manifests_urls }}" 67 | become: "{{ k3s_become }}" 68 | when: 69 | - k3s_control_node 70 | - not ansible_check_mode 71 | -------------------------------------------------------------------------------- /tasks/ensure_k3s_config_files.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure that the config.yaml.d directory exists 4 | ansible.builtin.file: 5 | state: directory 6 | path: "{{ k3s_config_yaml_d_dir }}" 7 | mode: 0755 8 | when: >- 9 | k3s_server_config_yaml_d_files | length > 0 10 | or k3s_agent_config_yaml_d_files | length > 0 11 | become: "{{ k3s_become }}" 12 | 13 | # https://github.com/k3s-io/k3s/pull/3162 14 | - name: Ensure configuration files are copied to controllers 15 | ansible.builtin.template: 16 | src: "{{ item }}" 17 | dest: "{{ k3s_config_yaml_d_dir }}/{{ item | basename | replace('.j2', '') }}" 18 | mode: 0644 19 | loop: "{{ k3s_server_config_yaml_d_files }}" 20 | become: "{{ k3s_become }}" 21 | when: k3s_control_node 22 | 23 | # https://github.com/k3s-io/k3s/pull/3162 24 | - name: Ensure configuration files are copied to agents 25 | ansible.builtin.template: 26 | src: "{{ item }}" 27 | dest: "{{ k3s_config_yaml_d_dir }}/{{ item | basename | replace('.j2', '') }}" 28 | mode: 0644 29 | loop: "{{ k3s_agent_config_yaml_d_files }}" 30 | become: "{{ k3s_become }}" 31 | when: not k3s_control_node 32 | -------------------------------------------------------------------------------- /tasks/ensure_pre_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s_build_cluster is false if running against a single node. 4 | ansible.builtin.set_fact: 5 | k3s_build_cluster: false 6 | when: 7 | - ansible_play_hosts | length < 2 8 | - k3s_registration_address is not defined 9 | 10 | - name: Ensure k3s control node fact is set 11 | ansible.builtin.set_fact: 12 | k3s_control_node: "{{ not k3s_build_cluster }}" 13 | when: k3s_control_node is not defined 14 | 15 | - name: Ensure k3s primary control node fact is set 16 | ansible.builtin.set_fact: 17 | k3s_primary_control_node: "{{ not k3s_build_cluster }}" 18 | when: k3s_primary_control_node is not defined 19 | 20 | - name: Ensure k3s control plane port is captured 21 | ansible.builtin.set_fact: 22 | k3s_control_plane_port: "{{ k3s_runtime_config['https-listen-port'] | default(6443) }}" 23 | delegate_to: k3s_primary_control_node 24 | 25 | - name: Ensure k3s node IP is configured when node-ip is defined 26 | ansible.builtin.set_fact: 27 | k3s_node_ip: "{{ k3s_runtime_config['node-ip'] }}" 28 | when: 29 | - k3s_runtime_config['node-ip'] is defined 30 | 31 | - name: Ensure a count of control nodes is generated from ansible_play_hosts 32 | ansible.builtin.set_fact: 33 | k3s_controller_list: "{{ k3s_controller_list + [item] }}" 34 | when: 35 | - hostvars[item].k3s_control_node is defined 36 | - hostvars[item].k3s_control_node 37 | loop: "{{ ansible_play_hosts }}" 38 | 39 | - name: Ensure a k3s control node is defined if none are found in ansible_play_hosts 40 | when: 41 | - k3s_controller_list | length < 1 42 | - k3s_build_cluster is defined 43 | - k3s_build_cluster 44 | block: 45 | - name: Set the control host 46 | ansible.builtin.set_fact: 47 | k3s_control_node: true 48 | when: inventory_hostname == ansible_play_hosts[0] 49 | 50 | - name: Ensure a count of control nodes is generated 51 | ansible.builtin.set_fact: 52 | k3s_controller_list: "{{ k3s_controller_list + [item] }}" 53 | when: 54 | - hostvars[item].k3s_control_node is defined 55 | - hostvars[item].k3s_control_node 56 | loop: "{{ ansible_play_hosts }}" 57 | 58 | - name: Ensure an existing primary k3s control node is defined if multiple are found and at least one is running 59 | when: 60 | - k3s_controller_list | length >= 1 61 | - k3s_build_cluster is defined 62 | - k3s_build_cluster 63 | - k3s_control_delegate is not defined 64 | block: 65 | - name: Test if control plane is running 66 | ansible.builtin.wait_for: 67 | port: "{{ k3s_runtime_config['https-listen-port'] | default('6443') }}" 68 | host: "{{ k3s_runtime_config['bind-address'] | default('127.0.0.1') }}" 69 | timeout: 5 70 | register: k3s_control_node_running 71 | ignore_errors: true 72 | when: k3s_control_node 73 | 74 | - name: List running control planes 75 | ansible.builtin.set_fact: 76 | k3s_running_controller_list: "{{ k3s_running_controller_list + [item] }}" 77 | when: 78 | - hostvars[item].k3s_control_node_running is not skipped 79 | - hostvars[item].k3s_control_node_running is succeeded 80 | loop: "{{ ansible_play_hosts }}" 81 | 82 | - name: Choose first running node as delegate 83 | ansible.builtin.set_fact: 84 | k3s_control_delegate: "{{ k3s_running_controller_list[0] }}" 85 | when: k3s_running_controller_list | length >= 1 86 | 87 | - name: Ensure k3s_primary_control_node is set on the delegate 88 | ansible.builtin.set_fact: 89 | k3s_primary_control_node: true 90 | when: 91 | - k3s_control_delegate is defined 92 | - inventory_hostname == k3s_control_delegate 93 | 94 | - name: Ensure a primary k3s control node is defined if multiple are found in ansible_play_hosts 95 | ansible.builtin.set_fact: 96 | k3s_primary_control_node: true 97 | when: 98 | - k3s_controller_list is defined 99 | - inventory_hostname == k3s_controller_list[0] 100 | - k3s_build_cluster is defined 101 | - k3s_build_cluster 102 | - k3s_control_delegate is not defined 103 | 104 | - name: Ensure ansible_host is mapped to inventory_hostname 105 | ansible.builtin.blockinfile: 106 | path: /tmp/inventory.txt 107 | block: | 108 | {% for host in ansible_play_hosts %} 109 | {% filter replace('\n', ' ') %} 110 | {{ host }} 111 | @@@ 112 | {{ hostvars[host].ansible_host | default(hostvars[host].ansible_fqdn) | string }} 113 | @@@ 114 | C_{{ hostvars[host].k3s_control_node | string }} 115 | @@@ 116 | P_{{ hostvars[host].k3s_primary_control_node | default(False) | string }} 117 | {% endfilter %} 118 | @@@ END:{{ host }} 119 | {% endfor %} 120 | create: true 121 | mode: 0600 122 | check_mode: false 123 | when: k3s_control_node is defined 124 | 125 | - name: Delegate an initializing control plane node 126 | when: k3s_registration_address is not defined 127 | or k3s_control_delegate is not defined 128 | block: 129 | - name: Lookup control node from file 130 | ansible.builtin.command: 131 | cmd: "grep -i '{{ 'P_True' if (k3s_controller_list | length > 1) else 'C_True' }}' /tmp/inventory.txt" 132 | changed_when: false 133 | check_mode: false 134 | register: k3s_control_delegate_raw 135 | 136 | - name: Ensure control node is delegated for obtaining a cluster token 137 | ansible.builtin.set_fact: 138 | k3s_control_delegate: "{{ k3s_control_delegate_raw.stdout.split(' @@@ ')[0] }}" 139 | check_mode: false 140 | when: k3s_control_delegate is not defined 141 | 142 | - name: Ensure the node registration address is defined from k3s_control_node_address 143 | ansible.builtin.set_fact: 144 | k3s_registration_address: "{{ k3s_control_node_address }}" 145 | check_mode: false 146 | when: k3s_control_node_address is defined 147 | 148 | - name: Ensure the node registration address is defined from node-ip 149 | ansible.builtin.set_fact: 150 | k3s_registration_address: "{{ (hostvars[k3s_control_delegate].k3s_node_ip | split(','))[0] | ipwrap }}" 151 | check_mode: false 152 | when: 153 | - k3s_registration_address is not defined 154 | - k3s_control_node_address is not defined 155 | - hostvars[k3s_control_delegate].k3s_node_ip is defined 156 | 157 | - name: Ensure the node registration address is defined 158 | ansible.builtin.set_fact: 159 | k3s_registration_address: "{{ hostvars[k3s_control_delegate].ansible_host | default(hostvars[k3s_control_delegate].ansible_fqdn) }}" 160 | check_mode: false 161 | when: 162 | - k3s_registration_address is not defined 163 | - k3s_control_node_address is not defined 164 | -------------------------------------------------------------------------------- /tasks/ensure_started.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s service is started 4 | ansible.builtin.systemd: 5 | name: k3s 6 | state: started 7 | enabled: "{{ k3s_start_on_boot }}" 8 | when: k3s_non_root is not defined or not k3s_non_root 9 | become: "{{ k3s_become }}" 10 | 11 | - name: Ensure k3s service is started 12 | ansible.builtin.systemd: 13 | name: k3s 14 | state: started 15 | enabled: "{{ k3s_start_on_boot }}" 16 | scope: user 17 | when: 18 | - k3s_non_root is defined 19 | - k3s_non_root 20 | become: "{{ k3s_become }}" 21 | -------------------------------------------------------------------------------- /tasks/ensure_stopped.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s service is stopped 4 | ansible.builtin.systemd: 5 | name: k3s 6 | state: stopped 7 | enabled: "{{ k3s_start_on_boot }}" 8 | when: k3s_non_root is not defined or not k3s_non_root 9 | become: "{{ k3s_become }}" 10 | 11 | - name: Ensure k3s service is stopped 12 | ansible.builtin.systemd: 13 | name: k3s 14 | state: stopped 15 | enabled: "{{ k3s_start_on_boot }}" 16 | scope: user 17 | when: 18 | - k3s_non_root is defined 19 | - k3s_non_root 20 | become: "{{ k3s_become }}" 21 | -------------------------------------------------------------------------------- /tasks/ensure_uninstalled.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check to see if k3s-killall.sh exits 4 | ansible.builtin.stat: 5 | path: /usr/local/bin/k3s-killall.sh 6 | register: check_k3s_killall_script 7 | 8 | - name: Check to see if k3s-uninstall.sh exits 9 | ansible.builtin.stat: 10 | path: /usr/local/bin/k3s-uninstall.sh 11 | register: check_k3s_uninstall_script 12 | 13 | - name: Run k3s-killall.sh 14 | ansible.builtin.command: 15 | cmd: /usr/local/bin/k3s-killall.sh 16 | register: k3s_killall 17 | changed_when: k3s_killall.rc == 0 18 | when: check_k3s_killall_script.stat.exists 19 | become: "{{ k3s_become }}" 20 | 21 | - name: Run k3s-uninstall.sh 22 | ansible.builtin.command: 23 | cmd: /usr/local/bin/k3s-uninstall.sh 24 | args: 25 | removes: /usr/local/bin/k3s-uninstall.sh 26 | register: k3s_uninstall 27 | changed_when: k3s_uninstall.rc == 0 28 | when: check_k3s_uninstall_script.stat.exists 29 | become: "{{ k3s_become }}" 30 | 31 | - name: Ensure hard links are removed 32 | ansible.builtin.file: 33 | path: "{{ k3s_install_dir }}/{{ item }}" 34 | state: absent 35 | loop: 36 | - kubectl 37 | - crictl 38 | - ctr 39 | when: 40 | - k3s_install_hard_links 41 | - not ansible_check_mode 42 | become: "{{ k3s_become }}" 43 | -------------------------------------------------------------------------------- /tasks/ensure_uploads.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure installation directory exists 4 | ansible.builtin.file: 5 | path: "{{ k3s_install_dir }}" 6 | state: directory 7 | mode: 0755 8 | 9 | - name: Ensure k3s binary is copied from controller to target host 10 | ansible.builtin.copy: 11 | src: k3s 12 | # TODO: allow airgap to bypass version post-fix 13 | dest: "{{ k3s_install_dir }}/k3s-{{ k3s_release_version }}" 14 | mode: 0755 15 | become: "{{ k3s_become }}" 16 | -------------------------------------------------------------------------------- /tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Run pre-checks 4 | ansible.builtin.import_tasks: pre_checks.yml 5 | 6 | - name: Ensure state {{ (k3s_state | lower) | default('installed') }} 7 | ansible.builtin.include_tasks: state_{{ (k3s_state | lower) | default('installed') }}.yml 8 | -------------------------------------------------------------------------------- /tasks/post_checks_control_plane.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check that the control plane is available to accept connections 4 | ansible.builtin.wait_for: 5 | port: "{{ k3s_runtime_config['https-listen-port'] | default('6443') }}" 6 | host: "{{ k3s_runtime_config['bind-address'] | default('127.0.0.1') }}" 7 | delay: 5 8 | sleep: 5 9 | timeout: 300 10 | when: k3s_control_node 11 | -------------------------------------------------------------------------------- /tasks/post_checks_nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check that all nodes to be ready 4 | ansible.builtin.command: 5 | cmd: "{{ k3s_install_dir }}/kubectl get nodes" 6 | changed_when: false 7 | failed_when: >- 8 | kubectl_get_nodes_result.stdout.find("was refused") != -1 or 9 | kubectl_get_nodes_result.stdout.find("ServiceUnavailable") != -1 10 | register: kubectl_get_nodes_result 11 | until: 12 | - kubectl_get_nodes_result.rc == 0 13 | - kubectl_get_nodes_result.stdout.find("NotReady") == -1 14 | retries: 30 15 | delay: 5 16 | when: 17 | - k3s_control_node 18 | - ("flannel-backend" not in k3s_runtime_config 19 | or k3s_runtime_config["flannel-backend"] != "none") 20 | - not ansible_check_mode 21 | become: "{{ k3s_become }}" 22 | -------------------------------------------------------------------------------- /tasks/post_checks_uninstalled.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check that k3s is not running 4 | ansible.builtin.command: 5 | cmd: pgrep k3s 6 | failed_when: 7 | - check_k3s_process.rc == 0 8 | - not ansible_check_mode 9 | changed_when: false 10 | register: check_k3s_process 11 | 12 | - name: Fail if k3s binaries have not been removed 13 | ansible.builtin.stat: 14 | path: "{{ k3s_install_dir }}/{{ item }}" 15 | register: check_k3s_binaries_removed 16 | failed_when: check_k3s_binaries_removed.stat.exists 17 | loop: 18 | - k3s 19 | - kubectl 20 | - crictl 21 | - ctr 22 | 23 | - name: Check k3s-killall.sh is removed 24 | ansible.builtin.stat: 25 | path: /usr/local/bin/k3s-killall.sh 26 | register: check_k3s_killall 27 | 28 | - name: Fail if k3s-killall.sh script still exists 29 | ansible.builtin.fail: 30 | msg: k3s-killall.sh still exists, uninstall script failed. Please investigate. 31 | when: check_k3s_killall.stat.exists 32 | 33 | - name: Check k3s-uninstall.sh is removed 34 | ansible.builtin.stat: 35 | path: /usr/local/bin/k3s-uninstall.sh 36 | register: check_k3s_uninstall 37 | 38 | - name: Fail if k3s-uninstall.sh script still exists 39 | ansible.builtin.fail: 40 | msg: k3s-uninstall.sh is still exists, uninstall script failed. Please investigate. 41 | when: check_k3s_uninstall.stat.exists 42 | -------------------------------------------------------------------------------- /tasks/pre_checks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check that k3s_state is a supported value 4 | ansible.builtin.assert: 5 | that: 6 | - k3s_state in k3s_valid_states 7 | fail_msg: "k3s_state not valid. Check README.md for details." 8 | success_msg: "k3s_state is valid." 9 | when: k3s_state is defined 10 | 11 | - name: Check that Ansible v{{ ansible_version.string }} is supported by this role 12 | ansible.builtin.assert: 13 | that: 14 | - ansible_version.string is version_compare(k3s_ansible_min_version, '>=') 15 | fail_msg: >- 16 | Ansible v{{ ansible_version.string }} is not supported by this role. 17 | Please install >= v{{ k3s_ansible_min_version }}. 18 | success_msg: "Ansible v{{ ansible_version.string }} is supported." 19 | become: false 20 | delegate_to: localhost 21 | run_once: true 22 | when: 23 | - not k3s_skip_validation 24 | - not k3s_skip_env_checks 25 | 26 | - name: Check that Python v{{ ansible_python_version }} is supported by this role 27 | ansible.builtin.assert: 28 | that: 29 | - ansible_python_version is version_compare(k3s_python_min_version, '>=') 30 | fail_msg: >- 31 | Python v{{ ansible_python_version }} is not supported by this role. 32 | Please install >= v{{ k3s_python_min_version }}. 33 | success_msg: "Python v{{ ansible_python_version }} is supported." 34 | become: false 35 | delegate_to: localhost 36 | run_once: true 37 | when: 38 | - not k3s_skip_validation 39 | - not k3s_skip_env_checks 40 | 41 | - name: Check that the target init system is supported by this role 42 | ansible.builtin.assert: 43 | that: 44 | - ansible_service_mgr in k3s_supported_init 45 | fail_msg: >- 46 | {{ ansible_service_mgr }} is not supported by this role. 47 | Supported init systems: {{ k3s_supported_init | join(', ') }} 48 | success_msg: "{{ ansible_service_mgr }} is supported" 49 | when: 50 | - not k3s_skip_validation 51 | - not k3s_skip_env_checks 52 | 53 | - name: Determining if {{ ansible_service_mgr }} is actually openrc 54 | ansible.builtin.stat: 55 | path: /sbin/openrc-run 56 | register: k3s_check_openrc_run 57 | when: 58 | - k3s_service_handler[ansible_service_mgr] == 'service' 59 | - not k3s_skip_validation 60 | - not k3s_skip_env_checks 61 | 62 | - name: Check that {{ ansible_service_mgr }} is actually openrc 63 | ansible.builtin.assert: 64 | that: 65 | - k3s_check_openrc_run.stat.exists 66 | fail_msg: >- 67 | openrc was not found, cannot install to {{ ansible_service_mgr }} 68 | success_msg: "openrc found" 69 | when: 70 | - k3s_service_handler[ansible_service_mgr] == 'service' 71 | - not k3s_skip_validation 72 | - not k3s_skip_env_checks 73 | 74 | - name: Run version pre-checks 75 | ansible.builtin.include_tasks: pre_checks_version.yml 76 | when: 77 | - (k3s_release_version is not defined 78 | or not k3s_release_version 79 | or k3s_release_version is not regex('\\+k3s[1-9]$')) 80 | - not k3s_airgap 81 | 82 | - name: Run cgroups pre-checks 83 | ansible.builtin.include_tasks: pre_checks_cgroups.yml 84 | loop: "{{ k3s_cgroup_subsys }}" 85 | loop_control: 86 | loop_var: cgroup 87 | when: 88 | - not k3s_skip_validation 89 | - not k3s_skip_env_checks 90 | 91 | - name: Run packages pre-checks 92 | ansible.builtin.include_tasks: pre_checks_packages.yml 93 | loop: "{{ k3s_check_packages[k3s_os_distribution_version] }}" 94 | loop_control: 95 | loop_var: package 96 | when: 97 | - not k3s_skip_validation 98 | - not k3s_skip_env_checks 99 | - k3s_check_packages[k3s_os_distribution_version] is defined 100 | 101 | - name: Run issue data pre-checks 102 | ansible.builtin.include_tasks: pre_checks_issue_data.yml 103 | when: 104 | - pyratlabs_issue_controller_dump is defined 105 | - pyratlabs_issue_controller_dump 106 | 107 | - name: Run variables pre-checks 108 | ansible.builtin.import_tasks: pre_checks_variables.yml 109 | when: 110 | - not k3s_skip_validation 111 | 112 | - name: Ensure experimental variables pre-checks 113 | ansible.builtin.import_tasks: pre_checks_experimental_variables.yml 114 | when: 115 | - not k3s_skip_validation 116 | 117 | - name: Run unsupported rootless pre-checks 118 | ansible.builtin.import_tasks: pre_checks_unsupported_rootless.yml 119 | when: 120 | - k3s_runtime_config.rootless is defined 121 | - k3s_runtime_config.rootless 122 | - not k3s_skip_validation 123 | 124 | - name: Run pre-configuration tasks 125 | ansible.builtin.import_tasks: ensure_pre_configuration.yml 126 | 127 | - name: Run control node count pre-checks 128 | ansible.builtin.import_tasks: pre_checks_control_node_count.yml 129 | when: 130 | - k3s_build_cluster is defined 131 | - k3s_build_cluster 132 | - not k3s_skip_validation 133 | -------------------------------------------------------------------------------- /tasks/pre_checks_cgroups.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check if {{ cgroup.name }} cgroup is enabled 4 | ansible.builtin.command: 5 | cmd: 'grep -E "^{{ cgroup.name }}\s+.*\s+1$" /proc/cgroups' 6 | failed_when: false 7 | changed_when: false 8 | check_mode: false 9 | register: k3s_check_cgroup_option 10 | 11 | - name: Fail if {{ cgroup.name }} cgroup is not enabled 12 | ansible.builtin.assert: 13 | that: 14 | - k3s_check_cgroup_option.rc == 0 15 | fail_msg: | 16 | {{ cgroup.name }} cgroup disabled. {{ cgroup.documentation }} 17 | success_msg: "{{ cgroup.name }} cgroup enabled." 18 | -------------------------------------------------------------------------------- /tasks/pre_checks_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check that the initial control plane server is available to accept connections 4 | ansible.builtin.wait_for: 5 | port: "{{ k3s_runtime_config['https-listen-port'] | default('6443') }}" 6 | host: "{{ k3s_runtime_config['bind-address'] | default('127.0.0.1') }}" 7 | delay: 5 8 | sleep: 5 9 | timeout: 300 10 | 11 | - name: Check that cluster-token exists 12 | ansible.builtin.stat: 13 | path: "{{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}/server/token" 14 | register: k3s_check_cluster_token 15 | check_mode: false 16 | failed_when: 17 | - not k3s_check_cluster_token.stat.exists 18 | - not ansible_check_mode 19 | become: "{{ k3s_become }}" 20 | -------------------------------------------------------------------------------- /tasks/pre_checks_control_node_count.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check the conditions when a single controller is defined 4 | ansible.builtin.assert: 5 | that: 6 | - (k3s_controller_list | length == 1) 7 | - ("datastore-endpoint" not in k3s_runtime_config or not k3s_runtime_config['datastore-endpoint']) 8 | - (k3s_etcd_datastore is not defined or not k3s_etcd_datastore) 9 | success_msg: "Control plane configuration is valid." 10 | fail_msg: >- 11 | Control plane configuration is invalid. 12 | Please see notes about k3s_control_node and HA in README.md. 13 | when: 14 | - k3s_controller_list | length == 1 15 | - not k3s_use_unsupported_config 16 | - k3s_control_node 17 | 18 | - name: Check the conditions when multiple controllers are defined 19 | ansible.builtin.assert: 20 | that: 21 | - (k3s_controller_list | length >= 2) 22 | - (("datastore-endpoint" in k3s_runtime_config and k3s_runtime_config['datastore-endpoint']) 23 | or (k3s_etcd_datastore is defined and k3s_etcd_datastore)) 24 | success_msg: "Control plane configuration is valid." 25 | fail_msg: >- 26 | Control plane configuration is invalid. Please see notes about 27 | k3s_control_node and HA in README.md. 28 | when: 29 | - k3s_controller_list | length >= 2 30 | - k3s_control_node 31 | 32 | - name: Check the conditions when embedded etcd is defined 33 | ansible.builtin.assert: 34 | that: 35 | - (k3s_controller_list | length >= 3) 36 | - (((k3s_controller_list | length) % 2) == 1) 37 | success_msg: "Control plane configuration is valid." 38 | fail_msg: >- 39 | Etcd should have a minimum of 3 defined members and the number of 40 | members should be odd. Please see notes about HA in README.md 41 | when: 42 | - k3s_etcd_datastore is defined 43 | - k3s_etcd_datastore 44 | - not k3s_use_unsupported_config 45 | - k3s_control_node 46 | - k3s_state != 'uninstalled' 47 | -------------------------------------------------------------------------------- /tasks/pre_checks_experimental_variables.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check if embedded etcd datastore is enabled and marked as experimental 4 | ansible.builtin.assert: 5 | that: 6 | - k3s_use_experimental is defined and k3s_use_experimental 7 | success_msg: "Experimental variables are defined and enabled." 8 | fail_msg: >- 9 | Experimental variable k3s_etcd_datastore has been configured. 10 | If you want to use this ensure you set k3s_use_experimental, 11 | when: 12 | - k3s_etcd_datastore is defined 13 | - k3s_etcd_datastore 14 | - (k3s_release_version | replace('v', '')) is version_compare("1.19.5", '<') 15 | 16 | - name: Check if any experimental variables are configure and if they are enabled with k3s_use_experimental 17 | ansible.builtin.assert: 18 | that: 19 | - k3s_use_experimental is defined and k3s_use_experimental 20 | success_msg: "Experimental variables are defined and enabled." 21 | fail_msg: >- 22 | Experimental variable {{ item.setting }} has been configured. 23 | If you want to use this ensure you set k3s_use_experimental. 24 | {% if item.document is defined %} 25 | Documentation: {{ item.documentation }} 26 | {% endif %} 27 | loop: "{{ k3s_experimental_config }}" 28 | when: 29 | - (item.setting in k3s_runtime_config and k3s_runtime_config[item.setting]) 30 | - ((item.until is not defined) or 31 | (item.until is defined and (k3s_release_version | replace('v', '')) is version_compare(item.until, '<'))) 32 | -------------------------------------------------------------------------------- /tasks/pre_checks_issue_data.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure facts are gathered 4 | ansible.builtin.setup: 5 | 6 | - name: Ensure Ansible version is captured 7 | ansible.builtin.command: 8 | cmd: ansible --version 9 | failed_when: false 10 | changed_when: false 11 | register: check_ansible_version 12 | delegate_to: localhost 13 | run_once: true 14 | become: false 15 | 16 | - name: Ensure Ansible config is captured 17 | ansible.builtin.command: 18 | cmd: ansible-config dump --only-changed 19 | failed_when: false 20 | changed_when: false 21 | register: check_ansible_config 22 | delegate_to: localhost 23 | run_once: true 24 | become: false 25 | 26 | - name: Ensure a list of roles is captured 27 | ansible.builtin.command: 28 | cmd: ansible-galaxy role list 29 | failed_when: false 30 | changed_when: false 31 | register: check_ansible_roles 32 | delegate_to: localhost 33 | run_once: true 34 | become: false 35 | 36 | - name: Ensure facts are written to disk 37 | ansible.builtin.copy: 38 | dest: "{{ playbook_dir }}/pyratlabs-issue-dump.txt" 39 | content: | 40 | # Begin ANSIBLE VERSION 41 | {{ check_ansible_version.stdout }} 42 | # End ANSIBLE VERSION 43 | 44 | # Begin ANSIBLE CONFIG 45 | {{ check_ansible_config.stdout }} 46 | # End ANSIBLE CONFIG 47 | 48 | # Begin ANSIBLE ROLES 49 | {{ check_ansible_roles.stdout }} 50 | # End ANSIBLE ROLES 51 | 52 | # Begin PLAY HOSTS 53 | {{ ansible_play_hosts | to_json }} 54 | # End PLAY HOSTS 55 | 56 | # Begin K3S ROLE CONFIG 57 | {% for host in ansible_play_hosts %} 58 | ## {{ host }} 59 | {% for config_key in hostvars[host] %} 60 | {% if config_key | regex_search('^k3s_') %} 61 | {{ config_key }}: {{ hostvars[host][config_key] | to_json }} 62 | {% endif %} 63 | {% endfor %} 64 | 65 | {% endfor %} 66 | # End K3S ROLE CONFIG 67 | 68 | # Begin K3S RUNTIME CONFIG 69 | {% for host in ansible_play_hosts %} 70 | ## {{ host }} 71 | {% if hostvars[host].k3s_runtime_config is defined %} 72 | {{ hostvars[host].k3s_runtime_config }} 73 | {% endif %} 74 | {% endfor %} 75 | # End K3S RUNTIME CONFIG 76 | mode: 0600 77 | delegate_to: localhost 78 | run_once: true 79 | become: false 80 | 81 | - name: Fail the play 82 | ansible.builtin.fail: 83 | msg: >- 84 | Please include the output of 85 | {{ playbook_dir }}/pyratlabs-issue-dump.txt in your bug report. 86 | delegate_to: localhost 87 | run_once: true 88 | -------------------------------------------------------------------------------- /tasks/pre_checks_packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check that {{ package.name }} is installed 4 | ansible.builtin.command: 5 | cmd: "which {{ package.name }}" 6 | changed_when: false 7 | failed_when: false 8 | register: check_k3s_required_package 9 | become: "{{ k3s_become }}" 10 | 11 | - name: Test that checks for {{ package.name }} passed 12 | ansible.builtin.assert: 13 | that: 14 | - check_k3s_required_package.rc == 0 15 | success_msg: "Found required package: {{ package.name }}." 16 | fail_msg: >- 17 | Could not find package: {{ package.name }}. 18 | {% if package.documentation is defined %} 19 | Documentation: {{ package.documentation }} 20 | {% endif %} 21 | when: 22 | - check_k3s_required_package.rc is defined 23 | - (package.until is not defined 24 | or (k3s_release_version | replace('v', '')) is version_compare(package.until, '<')) 25 | - (package.from is not defined 26 | or (k3s_release_version | replace('v', '')) is version_compare(package.from, '>=')) 27 | -------------------------------------------------------------------------------- /tasks/pre_checks_unsupported_rootless.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check if newuidmap is available 4 | ansible.builtin.command: 5 | cmd: which newuidmap 6 | failed_when: false 7 | changed_when: false 8 | register: k3s_check_newuidmap_installed 9 | 10 | - name: Check if /proc/sys/kernel/unprivileged_userns_clone exists 11 | ansible.builtin.stat: 12 | path: /proc/sys/kernel/unprivileged_userns_clone 13 | register: k3s_check_unprivileged_userns_exists 14 | 15 | - name: Get the value of /proc/sys/kernel/unprivileged_userns_clone 16 | ansible.builtin.slurp: 17 | src: /proc/sys/kernel/unprivileged_userns_clone 18 | register: k3s_get_unprivileged_userns_clone 19 | when: k3s_check_unprivileged_userns_exists.stat.exists 20 | 21 | - name: Set the value of k3s_get_unprivileged_userns_clone 22 | ansible.builtin.set_fact: 23 | k3s_get_unprivileged_userns_clone: 24 | content: "MQo=" 25 | when: not k3s_check_unprivileged_userns_exists.stat.exists 26 | 27 | - name: Get the value of /proc/sys/user/max_user_namespaces 28 | ansible.builtin.slurp: 29 | src: /proc/sys/user/max_user_namespaces 30 | register: k3s_get_max_user_namespaces 31 | 32 | - name: Get the contents of /etc/subuid 33 | ansible.builtin.slurp: 34 | src: /etc/subuid 35 | register: k3s_get_subuid 36 | 37 | - name: Get the contents of /etc/subgid 38 | ansible.builtin.slurp: 39 | src: /etc/subgid 40 | register: k3s_get_subgid 41 | 42 | - name: Get current user subuid and subgid values 43 | ansible.builtin.set_fact: 44 | k3s_current_user_subuid: "{{ (k3s_get_subuid['content'] | b64decode).split('\n') 45 | | select('search', ansible_user_id) | first | default('UserNotFound:0:0') }}" 46 | k3s_current_user_subgid: "{{ (k3s_get_subgid['content'] | b64decode).split('\n') 47 | | select('search', ansible_user_id) | first | default('UserNotFound:0:0') }}" 48 | 49 | - name: Check user namespaces kernel parameters are adequate 50 | ansible.builtin.assert: 51 | that: 52 | - k3s_get_unprivileged_userns_clone['content'] | b64decode | int == 1 53 | - ((k3s_get_max_user_namespaces['content'] | b64decode | int >= 28633) or (k3s_os_family != "redhat")) 54 | - k3s_current_user_subuid != "UserNotFound:0:0" 55 | - k3s_current_user_subgid != "UserNotFound:0:0" 56 | - k3s_current_user_subuid.split(':')[2] | int >= 65536 57 | - k3s_current_user_subgid.split(':')[2] | int >= 65536 58 | - ansible_env['XDG_RUNTIME_DIR'] is defined 59 | - k3s_check_newuidmap_installed.rc == 0 60 | success_msg: All kernel parameters passed 61 | fail_msg: >- 62 | Kernel parameters are not set correctly, please check 63 | https://github.com/rootless-containers/rootlesskit 64 | -------------------------------------------------------------------------------- /tasks/pre_checks_variables.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check that k3s_release_version >= {{ k3s_min_version }}" 4 | ansible.builtin.assert: 5 | that: 6 | - (k3s_release_version | replace('v', '')) is version_compare(k3s_min_version, '>=') 7 | success_msg: "{{ k3s_release_version }} is supported by this role." 8 | fail_msg: "{{ k3s_release_version }} is not supported by this role, please use xanmanning.k3s v1.x." 9 | when: not k3s_airgap 10 | 11 | - name: Check configuration in k3s_server and k3s_agent that needs alternate configuration 12 | ansible.builtin.assert: 13 | that: 14 | - (item.setting not in k3s_runtime_config) 15 | success_msg: "{{ item.setting }} not found in server/agent config" 16 | fail_msg: >- 17 | {{ item.setting }} found in server/agent config. 18 | Please set {{ item.correction }} to use this option. 19 | {% if item.document is defined %} 20 | Documentation: {{ item.documentation }} 21 | {% endif %} 22 | loop: "{{ k3s_config_exclude }}" 23 | 24 | - name: Check configuration in k3s_server and k3s_agent for deprecated configuration 25 | ansible.builtin.assert: 26 | that: 27 | - (item.setting not in k3s_runtime_config) 28 | or (not k3s_runtime_config[item.setting]) 29 | success_msg: "{{ item.setting }} not found in server/agent config" 30 | fail_msg: >- 31 | {{ item.setting }} found in server/agent config. 32 | Please set {{ item.correction }} to use this option. 33 | {% if item.document is defined %} 34 | Documentation: {{ item.documentation }} 35 | {% endif %} 36 | loop: "{{ k3s_deprecated_config }}" 37 | when: 38 | - not k3s_airgap 39 | - (item.when is not defined 40 | or (item.when is defined and (k3s_release_version | replace('v', '')) is version_compare(item.when, '>='))) 41 | - not k3s_use_unsupported_config 42 | 43 | - name: Check configuration in k3s_server and k3s_agent against release version 44 | ansible.builtin.assert: 45 | that: 46 | - (k3s_release_version | replace('v', '')) is version_compare(item.version, '>=') 47 | success_msg: "{{ item.setting }} is supported by {{ k3s_release_version }}" 48 | fail_msg: >- 49 | {{ item.setting }} is not supported in {{ k3s_release_version }}. 50 | Please update to v{{ item.version }} to use this option. 51 | {% if item.document is defined %} 52 | Documentation: {{ item.documentation }} 53 | {% endif %} 54 | loop: "{{ k3s_config_version_check }}" 55 | when: 56 | - k3s_config_version_check is defined 57 | - item.setting in k3s_runtime_config 58 | -------------------------------------------------------------------------------- /tasks/pre_checks_version.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s_release_version is set to default if false 4 | ansible.builtin.set_fact: 5 | k3s_release_version: "{{ k3s_release_channel }}" 6 | check_mode: false 7 | when: 8 | - k3s_release_version is defined 9 | - not k3s_release_version 10 | 11 | - name: Ensure the default release channel is set 12 | ansible.builtin.set_fact: 13 | k3s_release_channel: "{{ k3s_release_version | default('stable') }}" 14 | check_mode: false 15 | 16 | - name: "Get the latest release version from {{ k3s_api_releases }}" 17 | ansible.builtin.uri: 18 | url: "{{ k3s_api_releases }}" 19 | return_content: true 20 | body_format: json 21 | register: k3s_latest_release 22 | check_mode: false 23 | 24 | - name: Ensure the release version is set as a fact 25 | ansible.builtin.set_fact: 26 | k3s_release_version: "{{ item.latest }}" 27 | loop: "{{ k3s_latest_release.json.data }}" 28 | check_mode: false 29 | when: 30 | - item.name == k3s_release_channel 31 | - item.type == "channel" 32 | -------------------------------------------------------------------------------- /tasks/state_downloaded.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Run version pre-checks 4 | ansible.builtin.import_tasks: pre_checks_version.yml 5 | when: 6 | - k3s_release_version is not defined or not k3s_release_version 7 | - not k3s_airgap 8 | 9 | - name: Run k3s binary download and install tasks 10 | ansible.builtin.import_tasks: ensure_downloads.yml 11 | when: not k3s_airgap 12 | 13 | - name: Run k3s binary upload tasks | k3s_airgap 14 | ansible.builtin.import_tasks: ensure_uploads.yml 15 | when: k3s_airgap 16 | -------------------------------------------------------------------------------- /tasks/state_installed.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure nodes are drained and removed 4 | ansible.builtin.import_tasks: ensure_drain_and_remove_nodes.yml 5 | 6 | - name: Determine systemd context 7 | ansible.builtin.import_tasks: determine_systemd_context.yml 8 | 9 | - name: Flush Handlers 10 | ansible.builtin.meta: flush_handlers 11 | 12 | - name: Run k3s binary download and install tasks 13 | ansible.builtin.import_tasks: ensure_downloads.yml 14 | when: not k3s_airgap 15 | 16 | - name: Run k3s binary upload tasks | k3s_airgap 17 | ansible.builtin.import_tasks: ensure_uploads.yml 18 | when: k3s_airgap 19 | 20 | - name: Run auto-deploy manifests and pod manifests tasks 21 | ansible.builtin.import_tasks: ensure_k3s_auto_deploy.yml 22 | 23 | - name: Ensure k3s configuration files are copied to controllers and agents 24 | ansible.builtin.import_tasks: ensure_k3s_config_files.yml 25 | 26 | - name: Run k3s installation tasks 27 | ansible.builtin.import_tasks: ensure_installed.yml 28 | 29 | - name: Ensure containerd registries 30 | ansible.builtin.include_tasks: ensure_containerd_registries.yml 31 | when: 32 | - (k3s_registries.mirrors | default(None)) != None or (k3s_registries.configs | default(None) != None) 33 | - ('rootless' not in k3s_runtime_config or not k3s_runtime_config.rootless) 34 | 35 | - name: Run cluster pre-checks 36 | ansible.builtin.include_tasks: pre_checks_cluster.yml 37 | when: 38 | - k3s_control_delegate is defined 39 | - k3s_control_delegate == inventory_hostname 40 | 41 | - name: Run k3s cluster tasks 42 | ansible.builtin.import_tasks: ensure_cluster.yml 43 | when: 44 | - k3s_build_cluster is defined 45 | - k3s_build_cluster 46 | - k3s_registration_address is defined 47 | 48 | - name: Flush Handlers 49 | ansible.builtin.meta: flush_handlers 50 | -------------------------------------------------------------------------------- /tasks/state_restarted.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s is stopped 4 | ansible.builtin.import_tasks: ensure_stopped.yml 5 | 6 | - name: Ensure k3s is started 7 | ansible.builtin.import_tasks: ensure_started.yml 8 | -------------------------------------------------------------------------------- /tasks/state_started.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s is started 4 | ansible.builtin.import_tasks: ensure_started.yml 5 | -------------------------------------------------------------------------------- /tasks/state_stopped.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Ensure k3s is stopped 4 | ansible.builtin.import_tasks: ensure_stopped.yml 5 | -------------------------------------------------------------------------------- /tasks/state_uninstalled.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Run pre-configuration tasks 4 | ansible.builtin.import_tasks: ensure_pre_configuration.yml 5 | 6 | - name: Ensure nodes are drained and removed 7 | ansible.builtin.import_tasks: ensure_drain_and_remove_nodes.yml 8 | 9 | - name: Run uninstall tasks 10 | ansible.builtin.import_tasks: ensure_uninstalled.yml 11 | 12 | - name: Run uninstall post checks 13 | ansible.builtin.import_tasks: post_checks_uninstalled.yml 14 | when: 15 | - not k3s_skip_validation 16 | - not k3s_skip_post_checks 17 | -------------------------------------------------------------------------------- /tasks/state_validated.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Run control plane post checks 4 | ansible.builtin.import_tasks: post_checks_control_plane.yml 5 | 6 | - name: Run node post checks 7 | ansible.builtin.import_tasks: post_checks_nodes.yml 8 | -------------------------------------------------------------------------------- /templates/cluster-token.j2: -------------------------------------------------------------------------------- 1 | {{ k3s_control_token_content | default(k3s_control_token | default(k3s_control_delegate | to_uuid)) }} 2 | -------------------------------------------------------------------------------- /templates/config.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | {% if k3s_etcd_datastore and (k3s_control_node is defined and k3s_control_node) and (k3s_primary_control_node is defined and k3s_primary_control_node) %} 4 | cluster-init: true 5 | {% endif %} 6 | {% if k3s_runtime_config is defined and k3s_runtime_config | length > 0 %} 7 | {{ k3s_runtime_config | to_nice_yaml(indent=2) }} 8 | {% endif %} 9 | -------------------------------------------------------------------------------- /templates/k3s-killall.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [ $(id -u) -eq 0 ] || exec sudo $0 $@ 4 | 5 | for bin in {{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}/data/**/bin/; do 6 | [ -d $bin ] && export PATH=$PATH:$bin:$bin/aux 7 | done 8 | 9 | set -x 10 | 11 | for service in {{ k3s_systemd_unit_dir }}/k3s*.service; do 12 | [ -s $service ] && systemctl stop $(basename $service) 13 | done 14 | 15 | for service in /etc/init.d/k3s*; do 16 | [ -x $service ] && $service stop 17 | done 18 | 19 | pschildren() { 20 | ps -e -o ppid= -o pid= | \ 21 | sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \ 22 | grep -w "^$1" | \ 23 | cut -f2 24 | } 25 | 26 | pstree() { 27 | for pid in $@; do 28 | echo $pid 29 | for child in $(pschildren $pid); do 30 | pstree $child 31 | done 32 | done 33 | } 34 | 35 | killtree() { 36 | kill -9 $( 37 | { set +x; } 2>/dev/null; 38 | pstree $@; 39 | set -x; 40 | ) 2>/dev/null 41 | } 42 | 43 | remove_interfaces() { 44 | # Delete network interface(s) that match 'master cni0' 45 | ip link show 2>/dev/null | grep 'master cni0' | while read ignore iface ignore; do 46 | iface=${iface%%@*} 47 | [ -z "$iface" ] || ip link delete $iface 48 | done 49 | 50 | # Delete cni related interfaces 51 | ip link delete cni0 52 | ip link delete flannel.1 53 | ip link delete flannel-v6.1 54 | ip link delete kube-ipvs0 55 | ip link delete flannel-wg 56 | ip link delete flannel-wg-v6 57 | 58 | # Restart tailscale 59 | if [ -n "$(command -v tailscale)" ]; then 60 | tailscale set --advertise-routes= 61 | fi 62 | } 63 | 64 | getshims() { 65 | ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 66 | } 67 | 68 | killtree $({ set +x; } 2>/dev/null; getshims; set -x) 69 | 70 | do_unmount_and_remove() { 71 | set +x 72 | while read -r _ path _; do 73 | case "$path" in $1*) echo "$path" ;; esac 74 | done < /proc/self/mounts | sort -r | xargs -r -t -n 1 sh -c 'umount -f "$0" && rm -rf "$0"' 75 | set -x 76 | } 77 | 78 | do_unmount_and_remove '/run/k3s' 79 | do_unmount_and_remove '{{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }}' 80 | do_unmount_and_remove '/var/lib/kubelet/pods' 81 | do_unmount_and_remove '/var/lib/kubelet/plugins' 82 | do_unmount_and_remove '/run/netns/cni-' 83 | 84 | # Remove CNI namespaces 85 | ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns delete 86 | 87 | remove_interfaces 88 | 89 | rm -rf /var/lib/cni/ 90 | iptables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | iptables-restore 91 | ip6tables-save | grep -v KUBE- | grep -v CNI- | grep -iv flannel | ip6tables-restore 92 | -------------------------------------------------------------------------------- /templates/k3s-uninstall.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | [ $(id -u) -eq 0 ] || exec sudo $0 $@ 4 | 5 | /usr/local/bin/k3s-killall.sh 6 | 7 | if command -v systemctl; then 8 | systemctl disable k3s 9 | systemctl reset-failed k3s 10 | systemctl daemon-reload 11 | fi 12 | if command -v rc-update; then 13 | rc-update delete k3s default 14 | fi 15 | 16 | rm -f {{ k3s_systemd_unit_dir }}/k3s.service 17 | rm -f {{ k3s_systemd_unit_dir }}/k3s.env 18 | 19 | remove_uninstall() { 20 | rm -f /usr/local/bin/k3s-uninstall.sh 21 | } 22 | trap remove_uninstall EXIT 23 | 24 | if (ls {{ k3s_systemd_unit_dir }}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then 25 | set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x 26 | exit 27 | fi 28 | 29 | for cmd in kubectl crictl ctr; do 30 | if [ -L {{ k3s_install_dir }}/$cmd ]; then 31 | rm -f {{ k3s_install_dir }}/$cmd 32 | fi 33 | done 34 | 35 | rm -rf {{ k3s_config_dir }} 36 | rm -rf /run/k3s 37 | rm -rf /run/flannel 38 | rm -rf {{ k3s_runtime_config['data-dir'] | default(k3s_data_dir) }} 39 | rm -rf /var/lib/kubelet 40 | rm -f {{ k3s_install_dir }}/k3s 41 | rm -f /usr/local/bin/k3s-killall.sh 42 | 43 | if type yum >/dev/null 2>&1; then 44 | yum remove -y k3s-selinux 45 | rm -f /etc/yum.repos.d/rancher-k3s-common*.repo 46 | elif type rpm-ostree >/dev/null 2>&1; then 47 | rpm-ostree uninstall k3s-selinux 48 | rm -f /etc/yum.repos.d/rancher-k3s-common*.repo 49 | elif type zypper >/dev/null 2>&1; then 50 | uninstall_cmd="zypper remove -y k3s-selinux" 51 | if [ "${TRANSACTIONAL_UPDATE=false}" != "true" ] && [ -x /usr/sbin/transactional-update ]; then 52 | uninstall_cmd="transactional-update --no-selfupdate -d run $uninstall_cmd" 53 | fi 54 | $uninstall_cmd 55 | rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo 56 | fi 57 | -------------------------------------------------------------------------------- /templates/k3s.logrotate.j2: -------------------------------------------------------------------------------- 1 | /var/log/k3s.log { 2 | missingok 3 | notifempty 4 | copytruncate 5 | } 6 | -------------------------------------------------------------------------------- /templates/k3s.openrc.j2: -------------------------------------------------------------------------------- 1 | #!/sbin/openrc-run 2 | 3 | depend() { 4 | after network-online 5 | want cgroups 6 | } 7 | 8 | start_pre() { 9 | rm -f /tmp/k3s.* 10 | } 11 | 12 | supervisor=supervise-daemon 13 | name="k3s" 14 | command="{{ k3s_install_dir }}/k3s" 15 | command_args="{% filter regex_replace('\s+', ' ') %} 16 | {% filter replace('\n', ' ') %} 17 | {% if k3s_debug is defined and k3s_debug %} 18 | --debug 19 | {% endif %} 20 | {% if k3s_control_node %} 21 | server 22 | {% if (k3s_etcd_datastore is defined and k3s_etcd_datastore) and (k3s_primary_control_node is not defined or not k3s_primary_control_node) and k3s_controller_list | length > 1 %} 23 | --server https://{{ k3s_registration_address }}:{{ k3s_control_plane_port | default(6443) | string }} 24 | {% endif %} 25 | {% if k3s_server is defined %} 26 | --config {{ k3s_config_file }} 27 | {% endif %} 28 | {% if not k3s_primary_control_node or k3s_control_token is defined %} 29 | --token-file {{ k3s_token_location }} 30 | {% endif %} 31 | {% else %} 32 | agent 33 | --server https://{{ k3s_registration_address }}:{{ k3s_control_plane_port | default(6443) | string }} 34 | --token-file {{ k3s_token_location }} 35 | {% if k3s_agent is defined %} 36 | --config {{ k3s_config_file }} 37 | {% endif %} 38 | {% endif %} >>/var/log/k3s.log 2>&1" 39 | {% endfilter %} 40 | {% endfilter %} 41 | 42 | output_log="/var/log/k3s.log" 43 | error_log="/var/log/k3s.log" 44 | 45 | pidfile="/var/run/k3s.pid" 46 | respawn_delay=5 47 | respawn_max=0 48 | -------------------------------------------------------------------------------- /templates/k3s.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Lightweight Kubernetes 3 | Documentation=https://k3s.io 4 | {% for requires_unit in k3s_service_requires %} 5 | Requires={{ requires_unit }} 6 | {% endfor %} 7 | Wants=network-online.target 8 | {% for wants_unit in k3s_service_wants %} 9 | Wants={{ wants_unit }} 10 | {% endfor %} 11 | {% for before_unit in k3s_service_before %} 12 | Before={{ before_unit }} 13 | {% endfor %} 14 | After=network-online.target 15 | {% for after_unit in k3s_service_after %} 16 | After={{ after_unit }} 17 | {% endfor %} 18 | 19 | [Service] 20 | Type={{ 'notify' if k3s_control_node else 'exec' }} 21 | {% if k3s_service_env_vars is defined and k3s_service_env_vars is iterable %} 22 | {% for env_var in k3s_service_env_vars %} 23 | Environment={{ env_var }}="{{ k3s_service_env_vars[env_var] }}" 24 | {% endfor %} 25 | {% endif %} 26 | {% if k3s_service_env_file is defined and k3s_service_env_file %} 27 | EnvironmentFile={{ k3s_service_env_file }} 28 | {% endif %} 29 | ExecStartPre=/bin/sh -xc '! /usr/bin/systemctl is-enabled --quiet nm-cloud-setup.service' 30 | ExecStartPre=-/sbin/modprobe br_netfilter 31 | ExecStartPre=-/sbin/modprobe overlay 32 | {% if k3s_service_exec_start_pre_vars is defined and k3s_service_exec_start_pre_vars is iterable %} 33 | {% for exec_start_pre_var in k3s_service_exec_start_pre_vars %} 34 | ExecStartPre={{ exec_start_pre_var }} 35 | {% endfor %} 36 | {% endif %} 37 | {% filter regex_replace('\s+', ' ') %} 38 | {% filter replace('\n', ' ') %} 39 | ExecStart={{ k3s_install_dir }}/k3s 40 | {% if k3s_debug is defined and k3s_debug %} 41 | --debug 42 | {% endif %} 43 | {% if k3s_control_node %} 44 | server 45 | {% if (k3s_etcd_datastore is defined and k3s_etcd_datastore) and (k3s_primary_control_node is not defined or not k3s_primary_control_node) and k3s_controller_list | length > 1 %} 46 | --server https://{{ k3s_registration_address }}:{{ k3s_control_plane_port | default(6443) | string }} 47 | {% endif %} 48 | {% if k3s_server is defined %} 49 | --config {{ k3s_config_file }} 50 | {% endif %} 51 | {% if not k3s_primary_control_node or k3s_control_token is defined %} 52 | --token-file {{ k3s_token_location }} 53 | {% endif %} 54 | {% else %} 55 | agent 56 | --server https://{{ k3s_registration_address }}:{{ k3s_control_plane_port | default(6443) | string }} 57 | --token-file {{ k3s_token_location }} 58 | {% if k3s_agent is defined %} 59 | --config {{ k3s_config_file }} 60 | {% endif %} 61 | {% endif %} 62 | {% endfilter %} 63 | {% endfilter %} 64 | 65 | KillMode=process 66 | Delegate=yes 67 | LimitNOFILE=1048576 68 | LimitNPROC=infinity 69 | LimitCORE=infinity 70 | TasksMax=infinity 71 | TimeoutStartSec=0 72 | Restart=always 73 | RestartSec=5s 74 | 75 | [Install] 76 | WantedBy=multi-user.target 77 | -------------------------------------------------------------------------------- /templates/registries.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | {{ k3s_registries | to_nice_yaml(indent=2) }} 3 | -------------------------------------------------------------------------------- /vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Minimum supported versions 4 | k3s_min_version: 1.19.3 5 | k3s_ansible_min_version: 2.9.16 6 | k3s_python_min_version: 3.6 7 | 8 | # Valid states for this role 9 | k3s_valid_states: 10 | - installed 11 | - started 12 | - stopped 13 | - restarted 14 | - downloaded 15 | - uninstalled 16 | - validated 17 | 18 | # Supported init systems 19 | k3s_supported_init: 20 | - systemd 21 | - openrc 22 | 23 | # Map ansible fact gathering architecture to a release name and suffix in github. 24 | k3s_arch_lookup: 25 | amd64: 26 | arch: amd64 27 | suffix: "" 28 | x86_64: 29 | arch: amd64 30 | suffix: "" 31 | arm64: 32 | arch: arm64 33 | suffix: "-arm64" 34 | aarch64: 35 | arch: arm64 36 | suffix: "-arm64" 37 | arm: 38 | arch: arm 39 | suffix: "-armhf" 40 | arm6l: 41 | arch: arm 42 | suffix: "-armhf" 43 | armv6l: 44 | arch: arm 45 | suffix: "-armhf" 46 | arm7: 47 | arch: arm 48 | suffix: "-armhf" 49 | armv7l: 50 | arch: arm 51 | suffix: "-armhf" 52 | armhf: 53 | arch: arm 54 | suffix: "-armhf" 55 | 56 | # Always default to stable channel, this will change with k3s_release_version 57 | k3s_release_channel: stable 58 | 59 | # K3s updates API 60 | k3s_api_releases: "{{ k3s_api_url }}/v1-release/channels" 61 | # Download location for releases 62 | k3s_github_download_url: "{{ k3s_github_url }}/releases/download" 63 | 64 | # Generate a runtime config dictionary 65 | k3s_runtime_config: "{{ 66 | (k3s_control_node is defined and k3s_control_node) 67 | | ternary((k3s_server | default({})) | combine(k3s_agent | default({})), 68 | (k3s_agent | default({})), 69 | (k3s_server | default({})) | combine(k3s_agent | default({}))) 70 | }}" 71 | 72 | # Determine if a cluster should be built 73 | k3s_conf_build_cluster: "{{ 74 | not ((ansible_play_hosts_all | length < 2) 75 | and k3s_registration_address is not defined) 76 | }}" 77 | 78 | # Empty array for counting the number of control plane nodes 79 | k3s_controller_list: [] 80 | k3s_running_controller_list: [] 81 | 82 | # Control plane port default 83 | k3s_control_plane_port: "{{ k3s_runtime_config['https-listen-port'] | default(6443) }}" 84 | 85 | # Default to the "system" systemd context, this will be "user" when running rootless 86 | k3s_systemd_context: system 87 | 88 | # Directory for systemd unit files to be installed. As this role doesn't use package 89 | # management, this should live in /etc/systemd, not /lib/systemd 90 | k3s_systemd_unit_dir: "/etc/systemd/{{ k3s_systemd_context }}" 91 | 92 | # Directory for installing openrc service file 93 | k3s_openrc_service_dir: /etc/init.d 94 | 95 | # Directory for installing logrotate config 96 | k3s_logrotate_dir: /etc/logrotate.d 97 | 98 | # Service handler 99 | k3s_service_handler: 100 | systemd: systemd 101 | openrc: service 102 | 103 | # Data directory location for k3s 104 | k3s_data_dir: "{{ k3s_runtime_config['data-dir'] | default('/var/lib/rancher/k3s') }}" 105 | 106 | # Config directory location for k3s 107 | k3s_config_dir: "{{ k3s_config_file | dirname }}" 108 | 109 | # Directory for gathering the k3s token for clustering. I don't see this changing. 110 | k3s_token_location: "{{ k3s_config_dir }}/cluster-token" 111 | 112 | # Path for additional Kubernetes Manifests 113 | # https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests 114 | k3s_server_manifests_dir: "{{ k3s_data_dir }}/server/manifests" 115 | 116 | # Path for static pod manifests that are deployed on the control plane 117 | # https://github.com/k3s-io/k3s/pull/1691 118 | k3s_server_pod_manifests_dir: "{{ k3s_data_dir }}/agent/pod-manifests" 119 | 120 | # OS formatted strings 121 | k3s_os_distribution: "{{ ansible_distribution | replace(' ', '-') | lower }}" 122 | k3s_os_version: "{{ ansible_distribution_version | replace([' ', '.'], '-') | lower }}" 123 | k3s_os_distribution_version: "{{ k3s_os_distribution }}-{{ k3s_os_version }}" 124 | k3s_os_family: "{{ ansible_os_family | replace(' ', '-') | lower }}" 125 | 126 | # Packages that we need to check are installed 127 | k3s_check_packages: 128 | debian-11: 129 | - name: iptables-legacy 130 | from: 1.19.2 131 | until: 1.22.3 132 | documentation: https://rancher.com/docs/k3s/latest/en/advanced/#enabling-legacy-iptables-on-raspbian-buster 133 | # - name: dummy 134 | # from: 1.19.2 135 | # until: 1.21.0 136 | # documentation: https://example.com 137 | 138 | # Directories that we need to ensure exist 139 | k3s_ensure_directories_exist: 140 | - name: Config directory 141 | path: "{{ k3s_config_dir }}" 142 | - name: Config.yaml.d directory 143 | path: "{{ k3s_config_yaml_d_dir }}" 144 | - name: Systemd unit file directory 145 | path: "{{ k3s_systemd_unit_dir }}" 146 | - name: Data directory 147 | path: "{{ k3s_data_dir }}" 148 | - name: Default local storage path 149 | path: "{{ k3s_runtime_config['default-local-storage-path'] | default(omit) }}" 150 | - name: Private registry config file 151 | path: "{{ (k3s_runtime_config['private-registry'] | default(omit)) | dirname }}" 152 | 153 | # Config items that should not appear in k3s_server or k3s_agent 154 | k3s_config_exclude: 155 | - setting: server 156 | correction: k3s_registration_address 157 | - setting: cluster-init 158 | correction: k3s_etcd_datastore 159 | - setting: token 160 | correction: k3s_control_token 161 | - setting: token-file 162 | correction: k3s_token_location 163 | 164 | # Config items and the versions that they were introduced 165 | k3s_config_version_check: 166 | - setting: etcd-s3-bucket 167 | version: 1.20.6 # Prior to this there was very buggy support! 168 | 169 | # Config items that should be marked as experimental 170 | k3s_experimental_config: 171 | - setting: selinux 172 | until: 1.19.4 173 | - setting: rootless 174 | - setting: secrets-encryption 175 | until: 1.23.15 176 | - setting: agent-token 177 | - setting: agent-token-file 178 | - setting: cluster-reset 179 | until: 1.19.5 180 | - setting: prefer-bundled-bin 181 | 182 | # Config items that should be marked as deprecated 183 | k3s_deprecated_config: 184 | - setting: no-flannel 185 | correction: "flannel-backend: 'none'" 186 | # when: 0.10.2 # Example 187 | - setting: cluster-secret 188 | correction: token 189 | - setting: no-deploy 190 | correction: "disable: VALUE" 191 | - setting: docker 192 | correction: "docker: false" 193 | when: 1.20.0 194 | 195 | # cgroup checks 196 | k3s_cgroup_subsys: 197 | - name: memory 198 | documentation: | 199 | If you are running on a Raspberry Pi, see: 200 | https://rancher.com/docs/k3s/latest/en/advanced/#enabling-cgroups-for-raspbian-buster 201 | - name: cpuset 202 | documentation: | 203 | If you are running Alpine Linux, see: 204 | https://rancher.com/docs/k3s/latest/en/advanced/#additional-preparation-for-alpine-linux-setup 205 | 206 | # Drain command 207 | k3s_drain_command: 208 | true: delete-emptydir-data 209 | false: delete-local-data 210 | --------------------------------------------------------------------------------