├── .ansible-lint ├── .flake8 ├── .github └── workflows │ ├── build-test-image.yml │ ├── run-test.yml │ └── weekly.yml ├── .gitignore ├── .test-ansible └── .gitignore ├── .yamllint ├── Dockerfile ├── LICENSE.md ├── README.md ├── Vagrantfile ├── docker-compose.override.yaml.sample ├── docker-compose.yaml ├── docs ├── hooks_and_plugins.md ├── quickstart.md ├── tips_tricks.md └── variables.md ├── galaxy.yml ├── playbooks ├── 00-apiserver-proxy.yml ├── 00-inventory.yml ├── 00_apiserver_proxy.yml ├── 00_inventory.yml ├── 01-site.yml ├── 01_site.yml ├── 02-node-upgrade.yml ├── 02_node_upgrade.yml ├── 99-change-cgroup-driver.yml ├── 99_change_cgroup_driver.yml └── roles ├── plugins ├── filter │ └── bootstrap_token.py └── modules │ ├── kubectl.py │ └── pkg_version_match.py ├── poetry.lock ├── pyproject.toml ├── roles ├── apiserver_docker │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── docker-compose.yaml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── apiserver-proxy-endpoints.env.j2 │ └── vars │ │ └── os_Debian.yml ├── apiserver_docker_common │ └── defaults │ │ └── main.yml ├── apiserver_haproxy │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── haproxy_repo_Debian.yml │ │ ├── main.yml │ │ ├── pkg_Debian.yml │ │ └── upgrade_from_docker.yml │ ├── templates │ │ └── haproxy.cfg.j2 │ └── vars │ │ ├── os_Debian.yml │ │ └── os_Ubuntu.yml ├── apiserver_haproxy_upgrade │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── bootstrap_token │ ├── defaults │ │ └── main.yml │ ├── filter_plugins │ │ └── bootstrap_token.py │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── bootstrap_token.yml │ │ └── main.yaml │ └── vars │ │ └── main.yml ├── common_vars │ ├── defaults │ │ └── main.yml │ └── meta │ │ └── main.yml ├── control_plane_patches │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ └── patch.yml │ └── templates │ │ ├── etcd+json.yaml.j2 │ │ ├── kube-controller-manager+json.yaml.j2 │ │ └── kube-scheduler+json.yaml.j2 ├── discover │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── drain_nodes │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── find_ip │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── find_ip.yml │ │ └── main.yml ├── find_running_cp │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yaml │ └── tasks │ │ └── main.yaml ├── hooks_call │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── hooks_plugins_register │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── init_cp │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── init_config.j2 ├── join_nodes │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── join_config.j2 ├── kubeadm_configs_compute │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── vars │ │ └── main.yml ├── kubeadm_configs_update │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── kubeadm_config.yaml.j2 ├── kubectl_module │ ├── library │ │ └── kubectl.py │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── packages │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── find_version_Debian.yml │ │ ├── kube_repo_Debian.yml │ │ ├── main.yml │ │ └── pkg_Debian.yml │ └── vars │ │ └── os_Debian.yml ├── packages_common │ └── library │ │ └── pkg_version_match.py ├── preflight_check_cp │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── check_control_plane_endpoint.yml │ │ ├── check_control_plane_health.yml │ │ ├── check_version.yml │ │ └── main.yml ├── preflight_check_nodes │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ ├── check_kubelet_config.yml │ │ ├── check_kubelet_version.yml │ │ └── main.yml ├── process_reasons │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── upgrade_cp │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── upgrade_nodes │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml └── user_kubeconfig │ ├── defaults │ └── main.yml │ ├── meta │ └── main.yml │ └── tasks │ └── main.yml ├── scripts ├── detect-user.sh └── lint.sh └── tests ├── __init__.py ├── ansible.requirements.yml ├── conftest.py ├── dev ├── .gitignore └── group_vars │ └── all │ └── main.yml ├── features ├── haproxy.feature ├── install.feature └── upgrade.feature ├── helpers ├── ansible.py ├── provider.py ├── terraform.py └── vagrant.py ├── playbooks ├── cni.yml ├── prepare.yml ├── roles │ └── .gitignore └── verify.yml ├── requirements.txt ├── terraform └── openstack │ ├── .gitignore │ ├── .terraform.lock.hcl │ ├── common.tf │ ├── control_plane.tf │ ├── inventory.tf │ ├── inventory.tpl │ ├── network.tf │ ├── nodes.tf │ ├── openstack.tf │ ├── variable.tf │ └── versions.tf └── test_basic.py /.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | exclude_paths: 3 | - ./tests/playbooks/ 4 | - ./.gitlab-ci 5 | skip_list: 6 | - '301' 7 | - no-handler 8 | - 'fqcn-builtins' 9 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | extend-ignore = E501 3 | -------------------------------------------------------------------------------- /.github/workflows/build-test-image.yml: -------------------------------------------------------------------------------- 1 | name: build-test-image 2 | on: 3 | push: 4 | paths: 5 | - 'Dockerfile' 6 | 7 | jobs: 8 | build-test-image: 9 | runs-on: ubuntu-22.04 10 | steps: 11 | - name: Checkout 12 | uses: actions/checkout@v3 13 | 14 | - name: Set up Docker Buildx 15 | uses: docker/setup-buildx-action@v2 16 | 17 | - name: Log in to registry 18 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin 19 | 20 | - name: Build and push 21 | uses: docker/build-push-action@v4 22 | with: 23 | push: true 24 | tags: ghcr.io/${{ github.repository }}:latest 25 | -------------------------------------------------------------------------------- /.github/workflows/run-test.yml: -------------------------------------------------------------------------------- 1 | name: run-test 2 | on: [push] 3 | 4 | env: 5 | KEEP_SERVERS_AFTER_FAIL: "false" 6 | OS_CLOUD: ${{ secrets.OS_CLOUD }} 7 | OS_DOMAIN_NAME: ${{ secrets.OS_DOMAIN_NAME }} 8 | TF_VAR_stem: ${{ github.sha }} 9 | 10 | jobs: 11 | run-test: 12 | runs-on: ubuntu-22.04 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v3 16 | 17 | - name: Log in to registry 18 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin 19 | 20 | - name: Run linting tests 21 | run: docker compose run shell ./scripts/lint.sh 22 | 23 | - name: Prepare worker 24 | run: | 25 | ssh-keygen -t rsa -b 2048 -N '' -f ~/.ssh/id_rsa 26 | mkdir -p ~/.config/openstack 27 | echo "${{ secrets.OS_CLIENT_CONFIG_FILE }}" > ~/.config/openstack/clouds.yaml 28 | 29 | - name: Run tests 30 | run: docker compose run tests 31 | -------------------------------------------------------------------------------- /.github/workflows/weekly.yml: -------------------------------------------------------------------------------- 1 | name: weekly 2 | on: 3 | schedule: 4 | - cron: "0 21 * * 6" 5 | 6 | env: 7 | KEEP_SERVERS_AFTER_FAIL: "false" 8 | OS_CLOUD: ${{ secrets.OS_CLOUD }} 9 | OS_DOMAIN_NAME: ${{ secrets.OS_DOMAIN_NAME }} 10 | TF_VAR_stem: ${{ github.sha }}-weekly 11 | 12 | jobs: 13 | run-test: 14 | runs-on: ubuntu-22.04 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v3 18 | 19 | - name: Log in to registry 20 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin 21 | 22 | - name: Prepare worker 23 | run: | 24 | ssh-keygen -t rsa -b 2048 -N '' -f ~/.ssh/id_rsa 25 | mkdir -p ~/.config/openstack 26 | echo "${{ secrets.OS_CLIENT_CONFIG_FILE }}" > ~/.config/openstack/clouds.yaml 27 | 28 | - name: Run tests 29 | run: docker compose run tests --all-os 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | docker-compose.override.yml 3 | docker-compose.override.yaml 4 | hosts 5 | ansible.cfg 6 | __pycache__ 7 | -------------------------------------------------------------------------------- /.test-ansible/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | ignore: | 3 | .tox/ 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG DOCKER_IMAGE=docker:23.0 2 | FROM ${DOCKER_IMAGE} as docker 3 | FROM ubuntu:22.04 as base 4 | 5 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ 6 | build-essential \ 7 | ca-certificates \ 8 | curl \ 9 | git \ 10 | openssh-client \ 11 | python3 \ 12 | python3-dev \ 13 | python3-pip \ 14 | sshpass \ 15 | sudo \ 16 | && rm -rf /var/lib/apt/lists/* \ 17 | && ln -s /usr/bin/python3 /usr/local/bin/python 18 | RUN pip3 install --no-cache poetry==1.6.1 19 | 20 | COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker 21 | # docker-compose and docker-buildx (unused) 22 | COPY --from=docker /usr/local/libexec/docker/cli-plugins/ /usr/local/libexec/docker/cli-plugins/ 23 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Enix 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ansible-kubeadm 2 | 3 | Aims to manage kubeadm based cluster via ansible 4 | 5 | What ansible-kubeadm can do: 6 | - Install kubeadm on a variety of linux distribution 7 | - lock kubeadm package to avoid upgrade 8 | - init a cluster and join node in a idempotent manner 9 | - upgrade a cluster in an indempotent maner (just add +1 to minor version config and your good to go !) 10 | 11 | What ansible-kubeadm expect to be done and will not do: 12 | - Upgrading distro 13 | - Upgrade the kernel 14 | - install ntp 15 | - installing docker (or whatever CRI) 16 | - disable swap 17 | - remove unattented-upgrade 18 | - configure CNI 19 | 20 | ## Quickstart 21 | 22 | see [Quickstart](docs/quickstart.md) 23 | 24 | ## Configuration 25 | 26 | If you want a customized (ansible-)kubeadm experience there is a number of variables you can use: 27 | 28 | [Variables reference](docs/variables.md) 29 | 30 | ## Flow 31 | 32 | If you're looking for what ansible-kubeadm is doing step-by-step, [hooks && plugins](docs/hooks_and_plugins.md) is a good way to start. 33 | 34 | ## Migration planning 35 | 36 | Long term migration plan, [*] to indicate current phase 37 | 38 | | Reason | Phase 1 | Phase 2 | Phase 3 | 39 | |------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------|----------------------| 40 | | Docker has been deprecated in kubernetes since 1.24+ | [*] haproxy pkg used by default for proxy. Able to install compose-based proxy. Migration from compose to pkg possible | Not able to install compose-based proxy. Migration possible | Migration phased out | 41 | 42 | 43 | ## Tips and Tricks 44 | 45 | [Tips&Tricks](docs/tips_tricks.md) 46 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | CONTROL_PLANE_COUNT = (ENV['CONTROL_PLANE_COUNT'] || 2).to_i 2 | WORKER_COUNT = (ENV['WORKER_COUNT'] || 1).to_i 3 | SKIP_ANSIBLE = ENV['SKIP_ANSIBLE'] || false 4 | BOX_IMAGE = ENV['BOX_IMAGE'] || "generic/ubuntu2204" 5 | 6 | Vagrant.configure("2") do |config| 7 | config.vm.box = BOX_IMAGE 8 | 9 | (1..CONTROL_PLANE_COUNT).each do |i| 10 | config.vm.define "control-plane-#{i}" do |node| 11 | node.vm.hostname = "control-plane-#{i}" 12 | node.vm.synced_folder ".", "/vagrant", disabled: true 13 | if ENV['NETWORK_SWITCH'] 14 | node.vm.network "public_network", bridge: ENV['NETWORK_SWITCH'] 15 | end 16 | end 17 | end 18 | (1..WORKER_COUNT).each do |i| 19 | config.vm.define "worker-#{i}" do |node| 20 | node.vm.hostname = "worker-#{i}" 21 | node.vm.synced_folder ".", "/vagrant", disabled: true 22 | if ENV['NETWORK_SWITCH'] 23 | node.vm.network "public_network", bridge: ENV['NETWORK_SWITCH'] 24 | end 25 | if i == WORKER_COUNT 26 | groups = { 27 | "kube_control_plane" => (1..CONTROL_PLANE_COUNT).map{|i| "control-plane-#{i}"}, 28 | "kube_workers" => (1..WORKER_COUNT).map{|i| "worker-#{i}"}, 29 | "kube:children" => ["kube_control_plane", "kube_workers"], 30 | "kube:vars" => { 31 | "ansible_ssh_pipelining" => true, 32 | "ansible_become" => true, 33 | } 34 | } 35 | node.vm.provision "ansible" do |inventory| 36 | inventory.playbook = "tests/playbooks/prepare.yml" 37 | inventory.limit = "all" 38 | inventory.skip_tags = "always,all" 39 | inventory.groups = groups 40 | end 41 | if not SKIP_ANSIBLE 42 | groups["kube:vars"].update({ 43 | "kubelet_config" => '{"cgroupDriver": "systemd"}', 44 | "apiserver_proxy_use_docker" => false, 45 | "control_plane_endpoint" => "127.0.0.1:7443", 46 | "kube_version" => ENV['KUBE_VERSION'] || "1.23", 47 | }) 48 | node.vm.provision "ansible" do |prepare| 49 | prepare.playbook = "tests/playbooks/prepare.yml" 50 | prepare.limit = "all" 51 | prepare.groups = groups 52 | end 53 | node.vm.provision "ansible" do |proxy| 54 | proxy.playbook = "playbooks/00_apiserver_proxy.yml" 55 | proxy.limit = "all" 56 | proxy.groups = groups 57 | end 58 | node.vm.provision "ansible" do |kubeadm| 59 | kubeadm.playbook = "playbooks/01_site.yml" 60 | kubeadm.limit = "all" 61 | kubeadm.groups = groups 62 | end 63 | end 64 | end 65 | end 66 | end 67 | end 68 | 69 | -------------------------------------------------------------------------------- /docker-compose.override.yaml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | terraform: 4 | volumes: 5 | - ${OS_CLIENT_CONFIG_FILE}:${OS_CLIENT_CONFIG_FILE} 6 | 7 | tests: 8 | volumes: 9 | - ${HOME}/.cache:${HOME}/.cache 10 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | terraform: 4 | image: hashicorp/terraform:${TERRAFORM_VERSION:-1.3.6} 5 | working_dir: "${PWD}/tests/terraform/openstack" 6 | entrypoint: ["${PWD}/scripts/detect-user.sh", "/bin/sh", "-c", "terraform $$@", "--"] 7 | command: ["init"] 8 | volumes: 9 | - /var/run/docker.sock:/var/run/docker.sock 10 | - ${PWD}:${PWD} 11 | - /tmp:/tmp 12 | - ./.test-ansible:${HOME}/.ansible 13 | - ${_SSH_AUTH_SOCK:-${SSH_AUTH_SOCK:-/run}}:${_SSH_AUTH_SOCK:-${SSH_AUTH_SOCK:-/run}} 14 | - ${SSH_KEY:-${HOME}}:${SSH_KEY_MOUNT:-${SSH_KEY:-${HOME}}} 15 | environment: 16 | KEEP_SERVERS: 17 | KEEP_SERVERS_AFTER_FAIL: 18 | HOME: 19 | OS_AUTH_URL: 20 | OS_CLOUD: 21 | OS_CLIENT_CONFIG_FILE: 22 | OS_DOMAIN_NAME: 23 | OS_IDENTITY_API_VERSION: 24 | OS_INTERFACE: 25 | OS_PASSWORD: 26 | OS_PROJECT_DOMAIN_ID: 27 | OS_PROJECT_DOMAIN_NAME: 28 | OS_PROJECT_ID: 29 | OS_PROJECT_NAME: 30 | OS_TENANT_ID: 31 | OS_TENANT_NAME: 32 | OS_TOKEN: 33 | OS_REGION_NAME: 34 | OS_USER_DOMAIN_NAME: 35 | OS_USERNAME: 36 | POETRY_CACHE_DIR: "${POETRY_CACHE_DIR:-/tmp}" 37 | POETRY_VIRTUALENVS_PATH: "${POETRY_VIRTUALENVS_PATH:-/tmp}" 38 | SSH_AUTH_SOCK: "${_SSH_AUTH_SOCK:-${SSH_AUTH_SOCK:-/run}}" 39 | TF_VAR_allocate_private_net: 40 | TF_VAR_control_plane_count: 41 | TF_VAR_floating_pool: 42 | TF_VAR_image_name: 43 | TF_VAR_network_name: 44 | TF_VAR_inventory_dir: 45 | TF_VAR_ssh_key_path: 46 | TF_VAR_stem: 47 | TF_VAR_worker_count: 48 | 49 | 50 | tests: 51 | build: . 52 | image: ${RUN_TEST_IMAGE:-ghcr.io/enix/ansible-kubeadm:latest} 53 | working_dir: "${PWD}" 54 | entrypoint: [ 55 | "${PWD}/scripts/detect-user.sh", 56 | "/bin/bash", "-c", 57 | "poetry install && poetry run pytest --capture=no $$@", "--" 58 | ] 59 | command: ["-x"] 60 | extends: 61 | service: terraform 62 | 63 | shell: 64 | entrypoint: ["${PWD}/scripts/detect-user.sh"] 65 | command: ["/bin/bash"] 66 | extends: 67 | service: tests 68 | terraform_dev: 69 | extends: 70 | service: terraform 71 | environment: 72 | TF_VAR_inventory_dir: "${PWD}/tests/dev" 73 | command: ["apply", "-auto-approve"] 74 | depends_on: 75 | terraform: 76 | condition: service_completed_successfully 77 | dev: 78 | extends: 79 | service: tests 80 | environment: 81 | ANSIBLE_HOST_KEY_CHECKING: false 82 | depends_on: 83 | terraform_dev: 84 | condition: service_completed_successfully 85 | entrypoint: 86 | - "${PWD}/scripts/detect-user.sh" 87 | - "/bin/bash" 88 | - "-c" 89 | - > 90 | poetry install --with ansible && 91 | poetry run ansible-galaxy install -r tests/ansible.requirements.yml && 92 | poetry run ansible-playbook 93 | -i tests/dev/kubeadm-hosts.cfg 94 | tests/playbooks/prepare.yml 95 | playbooks/00_apiserver_proxy.yml 96 | playbooks/01-site.yml 97 | tests/playbooks/cni.yml 98 | $$@ 99 | - "--" 100 | command: [] 101 | -------------------------------------------------------------------------------- /docs/hooks_and_plugins.md: -------------------------------------------------------------------------------- 1 | A plugin allow custom code to be run at key point (*a hook*) of the ansible_kubedm playbooks. 2 | It comes with great possibilities, hence use with caution is required. 3 | 4 | ### Plugins 5 | 6 | Plugins are discovered in `kubeadm_plugins_dir` which default to `{{ inventory_dir }}/kubeadm.plugins.d`. 7 | 8 | Simply create a directory with the name of the plugin (whatever you like exept hidden one). 9 | 10 | Inside this directory create a directory per *hook* name. 11 | 12 | Finally put yaml with tasks list inside hook's directories. 13 | 14 | **Note**: Those are *tasks* like in roles not *playbooks*. 15 | 16 | Here is a sample layout, using the default settings for `kubeadm_plugins_dir`: 17 | 18 | ``` 19 | hosts.cfg # the inventory 20 | group_vars/ 21 | host_vars/ 22 | kubeadm.plugins.d/ 23 | upgrade_os/ 24 | post_nodes_upgrade/ # Will be run on control plane and workers node when upgrade is launched 25 | subdir/ 26 | clean_old_kernel.yml # will not be loaded, but could included 27 | upgrade_os.yml # will be loaded 28 | .upgrade_cri/ 29 | post_nodes_upgrade/ 30 | cri_update.yml # will not be loaded, because plugin name is an hidden directory 31 | ``` 32 | 33 | All the available hooks are listed bellow. 34 | 35 | ## Hooks 36 | 37 | The following sections present each playbook workflow, hooks are marked with `[]` 38 | 39 | Some hooks are present multiple times, so be careful choosing the hook name, when writing plugin. 40 | 41 | #### 00-apiserver-proxy.yml 42 | 43 | - find IP of control plane 44 | - [pre_apiserver_proxy] 45 | - deploy apiserver proxy 46 | - [post_apiserver_proxy] 47 | 48 | In case of running an upgrade to haproxy proxy loadbalancer (first on each control plane then all workers): 49 | 50 | - [pre_proxy_upgrade_haproxy] 51 | - Upgrade mechanism to haproxy 52 | - [post_proxy_upgrade_haproxy] 53 | 54 | 55 | #### 01_site.yml 56 | 57 | The main body execute: 58 | 59 | - [pre_run] 60 | - Do some checks on control plane nodes 61 | - [post_preflight_cp] 62 | - Do some checks on worker nodes 63 | - [post_preflight_nodes] 64 | - Do some initialization tasks on control plane nodes 65 | - [post_first_tasks_cp] 66 | 67 | In case no cluster is found, will init one on a single control plane nodes: 68 | 69 | - [pre_init] 70 | - Init cluster 71 | - [post_init] 72 | 73 | Then continue some indempotent tasks on control plane nodes: 74 | 75 | - [pre_config_update] 76 | - Create bootstrap token if required 77 | - Update kubeadm config 78 | - [post_config_update] 79 | 80 | In case an upgrade of kubernetes is required, run it now: 81 | 82 | - [pre_kube_upgrade] 83 | - Upgrade kubernetes 84 | - [post_kube_upgrade] 85 | 86 | Apply upgrade on control plane nodes (node by node by default): 87 | 88 | - [pre_cp_upgrade, pre_nodes_upgrade] 89 | - Apply upgrade on control plane nodes 90 | - [post_cp_upgrades, post_nodes_upgrade] 91 | 92 | Apply updade on worker nodes (node-by-node by default): 93 | 94 | - [pre_workers_upgrade, pre_nodes_upgrade] 95 | - Apply upgrade on worker nodes 96 | - [post_workers_upgrade, post_nodes_upgrade] 97 | 98 | Then join missing control plane nodes 99 | 100 | - [pre_cp_join, pre_nodes_join] 101 | - Join control plane nodes 102 | - [post_cp_join, post_nodes_join] 103 | 104 | Finally join missing workers nodes: 105 | 106 | - [pre_workers_join, pre_nodes_join] 107 | - Join worker nodes 108 | - [post_workers_join, post_nodes_join] 109 | 110 | Finally executing the last hook: 111 | 112 | - [post_run] 113 | -------------------------------------------------------------------------------- /docs/quickstart.md: -------------------------------------------------------------------------------- 1 | ### Install ansible-kubeadm an 2 | 3 | ``` 4 | ansible-galaxy collection install git+ssh://git@github.com/enix/ansible-kubeadm 5 | ``` 6 | 7 | Also install docker so we can have container running ^^ 8 | ``` 9 | ansible-galaxy install geerlingguy.docker 10 | ``` 11 | 12 | ### Prepare inventory 13 | 14 | create an inventory like this 15 | 16 | ``` 17 | [kube_control_plane] 18 | kubeadm-cp-01 ansible_host=ip-cp1 19 | kubeadm-cp-02 ansible_host=ip-cp2 20 | kubeadm-cp-03 ansible_host=ip-cp3 21 | 22 | [kube_workers] 23 | kubeadm-node-01 ansible_host=ip-no1 24 | # ... more nodes 25 | 26 | [all:vars] 27 | ansible_user=ubuntu 28 | ansible_become=true 29 | ``` 30 | 31 | ### Run 32 | 33 | ``` 34 | ansible -i hosts -m include_role -a"name=geerlingguy.docker" -e 'docker_daemon_options={"exec-opts"=["native.cgroupdriver=systemd"]}' 35 | ansible-playbook -i hosts enix.kubeadm.00_apiserver_proxy.yml enix.kubeadm.01_site.yml 36 | ``` 37 | 38 | You can customize install by adding group_vars, following [Variables references](variables.md) -------------------------------------------------------------------------------- /docs/tips_tricks.md: -------------------------------------------------------------------------------- 1 | 2 | Hidden (and less stable feature), tips and tricks 3 | 4 | ### No inventory ? No problem ! 5 | 6 | ``` 7 | ansible-playbook -i , -b -k -u admin playbooks/00_inventory.yml playbooks/01_site.yml 8 | ``` 9 | 10 | 11 | ### You have openstack, you want a cluster real quick ? 12 | 13 | ``` 14 | export OS_CLOUD=openstack # and any other openstack environment variable 15 | docker compose run dev -e kube_version=1.25 16 | ``` 17 | 18 | A bit of explanations: 19 | 20 | - standard pytest argument `-k` to select the `install` test that does only install ansible-kubeadm 21 | - `--keep-servers` custom argument to keep servers after the test (default to False) 22 | - `-A` custom argument to pass any remaining arguments to ansible (here customize the version of kubernetes to install) 23 | 24 | To tear down manually when you finished 25 | 26 | ``` 27 | export OS_CLOUD=openstack 28 | docker compose run terraform destroy 29 | ``` 30 | 31 | **NOTE**: for mac os users, please add `_SSH_AUTH_SOCK=/run/host-services/ssh-auth.sock` in a `.env` file at the top level directory of the repository 32 | -------------------------------------------------------------------------------- /docs/variables.md: -------------------------------------------------------------------------------- 1 | User facing variables: 2 | 3 | For hooks where a variable-per-hook is exposed, see [hooks && plugins](hooks_and_plugins.md) 4 | 5 | | name | scope | default | usage | 6 | | --------------------------------- | ------------------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 7 | | any_errors_fatal | playbook invocation | true | Restore the default value to keep continue despite of some host's tasks failure. Due to the nature of a linked distribitued system, this has been set as default | 8 | | kube_cp_group | playbook invocation | "kube_control_plane" | name of the ansible group for install control plane nodes | 9 | | kube_worker_group | playbook invocation | "kube_workers" | name of the ansible group for installing pure worker nodes | 10 | | upgrade_cp_serial | playbook invocation | "1" | Specify ansible batch size (https://docs.ansible.com/ansible/latest/user_guide/playbooks_strategies.html#setting-the-batch-size-with-serial) during control plane nodes upgrade phase. Default to 1 (1 node at a time) | 11 | | upgrade_worker_serial | playbook invocation | "1" | Specify ansible batch size (https://docs.ansible.com/ansible/latest/user_guide/playbooks_strategies.html#setting-the-batch-size-with-serial) during pure worker nodes upgrade phase. Defaul to 1 (1 node at a time ) | 12 | | apiserver_manifest | control plane | "/etc/kubernetes/manifests/kube-apiserver.yaml" | filename to stat for presence in the process to discover already running control-plane | 13 | | cluster_config | control plane | {} | config to be used by kubeadm for the `kind: CluserConfiguration` | 14 | | control_plane_endpoint | control plane | "" (let kubeadm default) | control the "controlPlaneEndpoint" entry of the cluster_config. Could also be set as part of the cluster_config. Default to nothing but ansible-kubeadm will fail if not set in case of multi-control-plane nodes cluster | 15 | | cp_health_check_bypass | control_plane | false | Bypass check on control-plane health | 16 | | enable_kubeadm_patches | control plane | true | Deploy patches and pass `kubeadm_patch_dir` to kubeadm so that patch are applied | 17 | | kube_control_plane_cidr | control plane | "" (let kubeadm default) | CIDR (eg "192.168.99.0/24") filter addresses for `_etcd_metrics_bind_address`, `_kube_apiserver_advertise_address`, `_kube_controller_manager_bind_address`, `_kube_scheduler_bind_address` | 18 | | kube_apiserver_advertise_cidr | control plane | "" (let kubeadm default) | CIDR (eg "192.168.99.0/24") filter the advertise address to `_kube_apiserver_advertise_address` (override `kube_control_plane_cidr`) | 19 | | kube_controller_manager_bind_cidr | control plane | "" (let kubeadm default) | CIDR (eg "192.168.99.0/24") filter the bind address for `_kube_controller_manager_bind_address` (override `kube_control_plane_cidr`) | 20 | | kube_scheduler_bind_cidr | control plane | "" (let kubeadm default) | CIDR (eg "192.168.99.0/24") filter the bind address for `_kube_scheduler_bind_address` (override `kube_control_plane_cidr`) | 21 | | kube_version | control plane | "{{ default_kube_version }}" | desired version of kubernetes cluster, and tooling | 22 | | kubeadm_extra_patches | control plane | {} | dictionnary containing extra kubeadm patches to deploy (key = "filename", value = "patch to template") | 23 | | kubeadm_ignore_preflight_errors | control plane | [] | list of errors passed to kubeadm during init, each element generate a `--ignore-preflight-errors={{error}}` command argument | 24 | | kubeadm_patch_dir | control plane | "/etc/kubeadm/directory" | directory containing patch for kubeadm | 25 | | kubeadm_patch_owner | control plane | "root" | owner of the patches created in `kubeadm_patch_dir` | 26 | | kubeadm_patch_group | control plane | "root" | group of the patched created in `kubeadm_patch_dir` | 27 | | kubeadm_patch_mode | control plane | "0750" | permission mode of the patches created in `kubeadm_patch_dir` | 28 | | kubeadm_patch_dir_owner | control plane | "{{ kubeadm_patch_owner }}" | owner of the directory `kubeadm_patch_dir` | 29 | | kubeadm_patch_dir_group | control plane | "{{ kubeadm_patch_group }}" | group of the directory `kubeadm_patch_dir` | 30 | | kubeadm_patch_dir_mode | control plane | "0750" | permission mode of the directory `kubeadm_patch_dir` | 31 | | kubeconfig_admin | control plane | "/etc/kubernetes/admin.conf" | filename of the kubeconfig used for interacting with kubernetes API | 32 | | kubelet_config | control plane | {} | config to be used by kubeadm for the `kind: KubeletConfiguration` | 33 | | kubeproxy_config | control plane | {} | config to be user by kubeadm for the `kind: KubeProxyConfiguration` | 34 | | kubelet_node_cidr | control plane | "" (let kubeadm default) | CIDR (eg "192.168.99.0/24") filter the address for `_kubelet_node_ip` | 35 | | action_reasons_review_skip | nodes | false | skip the ansible `pause` task that occurs when changes are required to a running cluster | 36 | | apiserver_proxy_stack_dir | nodes | "/etc/docker-compose/apiserver-proxy" | directory of docker-compose stack for apiserver_proxy | 37 | | apiserver_proxy_port | nodes | 7443 | listen port for apiserver_proxy | 38 | | apiserver_proxy_apiserver_port | nodes | 6443 | apiserver port targeted by apiserver_proxy | 39 | | apiserver_proxy_use_docker | nodes | false | deploy apiserver_proxy via Docker. When false use haproxy for loadbalancer | 40 | | force_apt_update | nodes | false | force source list refresh | 41 | | kubeadm_plugins_dir | nodes | "{{ inventory_dir }}" | directory where to look for hooks. (Not directly, in a `kubeadm..d` subfolder | 42 | | pause_before_drain | nodes | false | Pause before the drain of the node | 43 | | pause_before_upgrade | nodes | false | Pause before the upgrade of the node | 44 | | pause_after_drain_and_node_upgrade| nodes | false | Pause after the upgrade of the node in case of a drain. Use hook if you need to run pause after upgrade unconditionnaly | 45 | | repo_refresh_time | nodes | 86400 (1 day) | Number of second after which the apt/yum cache is not considered up to date, and forced to be refresh. Set to -1 to not update the repo automatically. Set to 0 for forcing refresh (same as force_apt_update). | 46 | 47 | Internal variables: 48 | 49 | | name | scope | default | usage | 50 | |---------------------------------------|---------------------|--------------------------|----------------------------------------------------| 51 | | _control_plane | roles | false | trigger control_plane fonction of various roles (join_nodes, find_ip, packages) | 52 | | _etcd_metrics_bind_address | roles | | Make etcd bind the `_etcd_metrics_bind_address` to expose prometheus metrics | 53 | | _kube_apiserver_advertise_address | roles | | Interface object| 54 | | _kube_controller_manager_bind_address | roles | | Interface object| 55 | | _kube_scheduler_bind_address | roles | | Interface object| 56 | | _kubelet_node_ip | roles | | Interface object| 57 | | _apiserver_proxy_haproxy_version | nodes | "2.6.*" | HAProxy version to install via package for apiserver_proxy | 58 | -------------------------------------------------------------------------------- /galaxy.yml: -------------------------------------------------------------------------------- 1 | namespace: "enix" 2 | name: "kubeadm" 3 | version: "1.0.12" 4 | readme: "README.md" 5 | authors: 6 | - "Zempashi " 7 | license: 8 | - "MIT" 9 | tags: 10 | - kubeadm 11 | - kubernetes 12 | repository: "https://gitlab.enix.io/kubernetes/ansible-kubeadm" 13 | -------------------------------------------------------------------------------- /playbooks/00-apiserver-proxy.yml: -------------------------------------------------------------------------------- 1 | 00_apiserver_proxy.yml -------------------------------------------------------------------------------- /playbooks/00-inventory.yml: -------------------------------------------------------------------------------- 1 | 00_inventory.yml -------------------------------------------------------------------------------- /playbooks/00_apiserver_proxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' 3 | gather_facts: false 4 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 5 | tags: ['always', 'plugins'] 6 | roles: 7 | - role: hooks_plugins_register 8 | 9 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}' 10 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 11 | vars: 12 | _control_plane: true 13 | roles: 14 | - role: find_ip 15 | 16 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' 17 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 18 | pre_tasks: 19 | - include_role: 20 | name: hooks_call 21 | vars: 22 | kubeadm_hook_list: ['pre_apiserver_proxy'] 23 | roles: 24 | - role: apiserver_haproxy 25 | when: apiserver_proxy_use_docker|bool in [none, false] 26 | - role: apiserver_docker 27 | when: apiserver_proxy_use_docker|bool == true 28 | tasks: 29 | - include_role: 30 | name: hooks_call 31 | vars: 32 | kubeadm_hook_list: ['post_apiserver_proxy'] 33 | 34 | - hosts: 'haproxy_upgrade_group:&{{ kube_cp_group|default("kube_control_plane") }}' 35 | serial: '{{ upgrade_cp_serial|default(1) }}' 36 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 37 | pre_tasks: 38 | - include_role: 39 | name: hooks_call 40 | vars: 41 | kubeadm_hook_list: ['pre_proxy_upgrade_haproxy'] 42 | roles: 43 | - role: apiserver_haproxy_upgrade 44 | tasks: 45 | - include_role: 46 | name: hooks_call 47 | vars: 48 | kubeadm_hook_list: ['post_proxy_upgrade_haproxy'] 49 | 50 | - hosts: 'haproxy_upgrade_group:&{{ kube_worker_group|default("kube_workers") }}' 51 | serial: '{{ upgrade_worker_serial|default(1) }}' 52 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 53 | pre_tasks: 54 | - include_role: 55 | name: hooks_call 56 | vars: 57 | kubeadm_hook_list: ['pre_proxy_upgrade_haproxy'] 58 | roles: 59 | - role: apiserver_haproxy_upgrade 60 | tasks: 61 | - include_role: 62 | name: hooks_call 63 | vars: 64 | kubeadm_hook_list: ['post_proxy_upgrade_haproxy'] 65 | -------------------------------------------------------------------------------- /playbooks/00_inventory.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 4 | gather_facts: false 5 | roles: 6 | - role: discover 7 | -------------------------------------------------------------------------------- /playbooks/01-site.yml: -------------------------------------------------------------------------------- 1 | 01_site.yml -------------------------------------------------------------------------------- /playbooks/01_site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' 3 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 4 | gather_facts: false 5 | tags: ['always', 'plugins'] 6 | roles: 7 | - role: hooks_plugins_register 8 | 9 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}' 10 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 11 | pre_tasks: 12 | - include_role: 13 | name: hooks_call 14 | vars: 15 | kubeadm_hook_list: ['pre_run'] 16 | roles: 17 | - role: find_ip 18 | vars: 19 | _control_plane: true 20 | - role: kubeadm_configs_compute 21 | - role: preflight_check_cp 22 | tasks: 23 | - include_role: 24 | name: hooks_call 25 | vars: 26 | kubeadm_hook_list: ['post_preflight_cp'] 27 | 28 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' 29 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 30 | roles: 31 | - role: find_ip 32 | - role: preflight_check_nodes 33 | tasks: 34 | - include_role: 35 | name: hooks_call 36 | vars: 37 | kubeadm_hook_list: ['post_preflight_nodes'] 38 | 39 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' 40 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 41 | gather_facts: false 42 | roles: 43 | - role: process_reasons 44 | 45 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}' 46 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 47 | gather_facts: false 48 | roles: 49 | - role: control_plane_patches 50 | tasks: 51 | - include_role: 52 | name: hooks_call 53 | vars: 54 | kubeadm_hook_list: ['post_first_tasks_cp'] 55 | 56 | - hosts: cp_init 57 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 58 | gather_facts: false 59 | vars: 60 | _control_plane: true 61 | pre_tasks: 62 | - include_role: 63 | name: hooks_call 64 | vars: 65 | kubeadm_hook_list: ['pre_init'] 66 | roles: 67 | - role: packages 68 | - role: init_cp 69 | tasks: 70 | - include_role: 71 | name: hooks_call 72 | vars: 73 | kubeadm_hook_list: ['post_init'] 74 | 75 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}' 76 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 77 | gather_facts: false 78 | pre_tasks: 79 | - include_role: 80 | name: hooks_call 81 | vars: 82 | kubeadm_hook_list: ['pre_config_update'] 83 | roles: 84 | - role: bootstrap_token 85 | - role: kubeadm_configs_update 86 | tasks: 87 | - include_role: 88 | name: hooks_call 89 | vars: 90 | kubeadm_hook_list: ['post_config_update'] 91 | 92 | # This has to be overly cautious on package upgade 93 | - hosts: cp_upgrade 94 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 95 | gather_facts: false 96 | pre_tasks: 97 | - include_role: 98 | name: hooks_call 99 | vars: 100 | kubeadm_hook_list: ['pre_kube_upgrade'] 101 | roles: 102 | - role: packages 103 | vars: 104 | kubeadm_pkgs: true 105 | node_pkgs: false 106 | - role: upgrade_cp 107 | - role: packages 108 | vars: 109 | kubeadm_pkgs: false 110 | node_pkgs: true 111 | tasks: 112 | - include_role: 113 | name: hooks_call 114 | vars: 115 | kubeadm_hook_list: ['post_kube_upgrade'] 116 | 117 | # Upgrade conrol-plane nodes 118 | - name: 'Upgrade to control plane nodes' 119 | hosts: '{{ kube_cp_group|default("kube_control_plane") }}:&nodes_upgrade' 120 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 121 | serial: '{{ upgrade_cp_serial|default(1) }}' 122 | gather_facts: false 123 | vars: 124 | _control_plane: true 125 | pre_tasks: 126 | - include_role: 127 | name: hooks_call 128 | vars: 129 | kubeadm_hook_list: ['early_cp_upgrade', 'early_nodes_upgrade'] 130 | roles: 131 | - role: packages 132 | vars: 133 | kubeadm_pkgs: true 134 | node_pkgs: false 135 | - role: upgrade_nodes 136 | - role: packages 137 | vars: 138 | kubeadm_pkgs: false 139 | node_pkgs: true 140 | tasks: 141 | - include_role: 142 | name: hooks_call 143 | vars: 144 | kubeadm_hook_list: ['post_cp_upgrade', 'post_nodes_upgrade'] 145 | 146 | # Upgrade worker nodes 147 | - name: 'Upgrade to workers nodes' 148 | hosts: '{{ kube_worker_group|default("kube_workers") }}:&nodes_upgrade' 149 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 150 | serial: '{{ upgrade_worker_serial|default(1) }}' 151 | gather_facts: false 152 | pre_tasks: 153 | - include_role: 154 | name: hooks_call 155 | vars: 156 | kubeadm_hook_list: ['early_workers_upgrade', 'early_nodes_upgrade'] 157 | roles: 158 | - role: packages 159 | vars: 160 | kubeadm_pkgs: true 161 | node_pkgs: false 162 | - role: upgrade_nodes 163 | - role: packages 164 | vars: 165 | kubeadm_pkgs: false 166 | node_pkgs: true 167 | tasks: 168 | - include_role: 169 | name: hooks_call 170 | vars: 171 | kubeadm_hook_list: ['post_workers_upgrade', 'post_nodes_upgrade'] 172 | 173 | # Join control-plane nodes 174 | - name: 'Join new control plane nodes' 175 | hosts: '{{ kube_cp_group|default("kube_control_plane") }}' 176 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 177 | gather_facts: false 178 | vars: 179 | _control_plane: true 180 | pre_tasks: 181 | - include_role: 182 | name: hooks_call 183 | vars: 184 | kubeadm_hook_list: ['pre_cp_join', 'pre_nodes_join'] 185 | roles: 186 | - role: find_ip 187 | - role: packages 188 | - role: control_plane_patches 189 | - role: join_nodes 190 | - role: user_kubeconfig 191 | tasks: 192 | - include_role: 193 | name: hooks_call 194 | vars: 195 | kubeadm_hook_list: ['post_cp_join', 'post_nodes_join'] 196 | 197 | # Join worker nodes 198 | - name: 'Join new workers nodes' 199 | hosts: '{{ kube_worker_group|default("kube_workers") }}' 200 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 201 | gather_facts: false 202 | pre_tasks: 203 | - include_role: 204 | name: hooks_call 205 | vars: 206 | kubeadm_hook_list: ['pre_workers_join', 'pre_nodes_join'] 207 | roles: 208 | - role: find_ip 209 | - role: packages 210 | - role: join_nodes 211 | tasks: 212 | - include_role: 213 | name: hooks_call 214 | vars: 215 | kubeadm_hook_list: ['post_workers_join', 'post_nodes_join'] 216 | 217 | - name: 'Finally executing post_run hook on all hosts' 218 | hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' 219 | any_errors_fatal: '{{ any_errors_fatal|default(true) }}' 220 | gather_facts: false 221 | tasks: 222 | - include_role: 223 | name: hooks_call 224 | vars: 225 | kubeadm_hook_list: ['post_run'] 226 | -------------------------------------------------------------------------------- /playbooks/02-node-upgrade.yml: -------------------------------------------------------------------------------- 1 | 02_node_upgrade.yml -------------------------------------------------------------------------------- /playbooks/02_node_upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: '{{ _nodes_upgrade|default("nodes_upgrade") }}' 3 | serial: '{{ _upgrade_serial|default(1) }}' 4 | gather_facts: false 5 | roles: 6 | - role: find_ip 7 | - role: packages 8 | vars: 9 | kubeadm_pkgs: true 10 | node_pkgs: false 11 | - role: control_plane_patches 12 | - role: upgrade_nodes 13 | - role: packages 14 | vars: 15 | kubeadm_pkgs: false 16 | node_pkgs: true 17 | -------------------------------------------------------------------------------- /playbooks/99-change-cgroup-driver.yml: -------------------------------------------------------------------------------- 1 | 99_change_cgroup_driver.yml -------------------------------------------------------------------------------- /playbooks/99_change_cgroup_driver.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Before running this playbook, please: 3 | # 1/ Adjust kube-proxy taint to NOT tolerate "foo" taint 4 | # Don't forget to at least tolerate master or you will loose service connectivity on master nodes 5 | # ``` 6 | # - effect: NoSchedule 7 | # key: node-role.kubernetes.io/master 8 | # ``` 9 | # 10 | # 2/ Modify kube-system/kubelet-config-1.x configmap to change "cgroupDriver" to the value "systemd" 11 | 12 | 13 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' 14 | serial: '{{ _upgrade_serial|default(1) }}' 15 | vars: 16 | reboot_node: true 17 | delete_local_data: true 18 | pre_tasks: 19 | 20 | - name: 'create backup dir' 21 | command: mkdir -p /etc/kubernetes/manifests_backup/ # noqa 302 22 | args: 23 | removes: /etc/kubernetes/manifests/kube-apiserver.yaml 24 | 25 | - name: 'move static file' 26 | shell: mv /etc/kubernetes/manifests/* /etc/kubernetes/manifests_backup/ 27 | args: 28 | removes: /etc/kubernetes/manifests/kube-apiserver.yaml 29 | 30 | - name: 'taint node' 31 | command: kubectl taint node {{ inventory_hostname }} foo=bar:NoExecute --overwrite=true 32 | delegate_to: '{{ groups[kube_cp_group]|first }}' 33 | roles: 34 | - role: drain_nodes 35 | tasks: 36 | - name: 'verify docker list' 37 | command: docker ps -q 38 | register: docker_ps 39 | check_mode: false 40 | changed_when: false 41 | failed_when: docker_ps.stdout|length != 0 42 | retries: 30 43 | until: docker_ps is not failed 44 | 45 | - name: 'upgrade kubelet config' 46 | command: kubeadm upgrade node 47 | 48 | - name: 'stop kubelet' 49 | service: 50 | name: kubelet 51 | state: stopped 52 | 53 | - name: 'get /etc/docker/daemon.json' 54 | slurp: 55 | src: /etc/docker/daemon.json 56 | register: docker_daemon_json 57 | 58 | - name: 'add croup-driver' 59 | copy: 60 | dest: /etc/docker/daemon.json 61 | content: | 62 | {{ daemon_json|to_nice_json(indent=2) }} 63 | mode: 0644 64 | owner: root 65 | group: root 66 | vars: 67 | daemon_json: >- 68 | {%- set daemon_json = docker_daemon_json.content|b64decode|from_json -%} 69 | {%- set _ = daemon_json.update({ 70 | "exec-opts": ["native.cgroupdriver=systemd"] 71 | }) -%} 72 | {{ daemon_json }} 73 | 74 | - name: 'remove old pods informations' 75 | shell: rm -rf /var/lib/kubelet/pods/* # noqa 302 76 | 77 | - name: 'move back static pods to run when kubelet will restart' 78 | shell: mv /etc/kubernetes/manifests_backup/* /etc/kubernetes/manifests/ 79 | args: 80 | removes: /etc/kubernetes/manifests_backup 81 | 82 | - name: 'untaint node' 83 | command: kubectl taint node {{ inventory_hostname }} foo:NoExecute- 84 | delegate_to: '{{ groups[kube_cp_group]|first }}' 85 | -------------------------------------------------------------------------------- /playbooks/roles: -------------------------------------------------------------------------------- 1 | ../roles -------------------------------------------------------------------------------- /plugins/filter/bootstrap_token.py: -------------------------------------------------------------------------------- 1 | import base64 2 | 3 | try: 4 | import arrow 5 | 6 | HAS_ARROW = True 7 | except ImportError: 8 | HAS_ARROW = False 9 | 10 | 11 | class FilterModule(object): 12 | def filters(self): 13 | return {"bootstrap_token_valid": self.bootstrap_token_valid} 14 | 15 | def bootstrap_token_valid(self, token_list): 16 | if not HAS_ARROW: 17 | raise ValueError("You need to install python-arrow on deployer host") 18 | return list(self._token_filter(token_list)) 19 | 20 | def _token_filter(self, token_list, now=None): 21 | """Return valid token in a token list 22 | 23 | >>> f = FilterModule() 24 | >>> list(f._token_filter([{'data': {'expiration': 'MjAxNy0wMy0xMFQwMzoyMjoxMVoK'}}], '2017-03-10T02:22:11Z')) 25 | [{'data': {'expiration': 'MjAxNy0wMy0xMFQwMzoyMjoxMVoK'}}] 26 | >>> f.bootstrap_token_valid([{'data': {'expiration': 'MjAxNy0wMy0xMFQwMzoyMjoxMVoK'}}]) 27 | [] 28 | """ 29 | if now: 30 | threshold = arrow.get(now) 31 | else: 32 | threshold = arrow.utcnow() 33 | for token in token_list: 34 | try: 35 | if ( 36 | arrow.get(base64.b64decode(token["data"]["expiration"]).decode("utf-8")) 37 | >= threshold 38 | ): 39 | yield token 40 | except KeyError: 41 | continue 42 | 43 | 44 | if __name__ == "__main__": 45 | import doctest 46 | 47 | doctest.testmod() 48 | -------------------------------------------------------------------------------- /plugins/modules/kubectl.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import json 4 | 5 | from ansible.module_utils.basic import AnsibleModule 6 | 7 | 8 | class Kubectl(object): 9 | def __init__( 10 | self, 11 | namespace, 12 | resource_type, 13 | resource_definition, 14 | state, 15 | name, 16 | extra_args=None, 17 | kubeconfig=None, 18 | verify_ssl=True, 19 | ): 20 | self.namespace = namespace 21 | self.resource_type = resource_type 22 | self.name = name 23 | self.resource_definition = resource_definition 24 | self.state = state 25 | self.kubeconfig = kubeconfig 26 | self.extra_args = extra_args 27 | self.verify_ssl = verify_ssl 28 | self._kubectl = None 29 | 30 | @property 31 | def kubectl(self): 32 | if not self._kubectl: 33 | raise KubectlError("No kubectl binary has been found !") 34 | return self._kubectl 35 | 36 | @kubectl.setter 37 | def kubectl(self, binary): 38 | self._kubectl = binary 39 | 40 | def execute(self): 41 | if self.state in ["get", "facts"]: 42 | return self.get() 43 | 44 | def get(self): 45 | if not self.resource_type: 46 | self.module.fail_json(msg="resource_type is required to query cluster") 47 | cmd = ["get", self.resource_type, "-o", "json"] 48 | if self.name: 49 | cmd.append(self.name) 50 | out = self._run_kubectl(cmd) 51 | if self.name: 52 | return {"item": json.loads(out)} 53 | else: 54 | return {"items": json.loads(out)["items"]} 55 | 56 | def _run_kubectl(self, cmd, stdin=None): 57 | args = [self.kubectl] 58 | if self.kubeconfig: 59 | args.append("--kubeconfig=" + self.kubeconfig) 60 | if not self.verify_ssl: 61 | args.append("--insecure-skip-tls-verify=true") 62 | if self.namespace: 63 | args.append("--namespace=" + self.namespace) 64 | args.extend(cmd) 65 | if self.extra_args: 66 | args.extend(self.extra_args) 67 | try: 68 | rc, out, err = self.run_command(args, data=stdin) 69 | if rc != 0: 70 | self.fail_json( 71 | msg="error running kubectl (%s) command (rc=%d), out= " 72 | "'%s', err='%s'" % (" ".join(args), rc, out, err) 73 | ) 74 | except Exception as exc: 75 | self.fail_json( 76 | msg="error running kubectl (%s) command: %s" 77 | % (" ".join(args), str(exc)) 78 | ) 79 | return out 80 | 81 | 82 | class KubectlError(Exception): 83 | """Error from Kubectl Module""" 84 | 85 | 86 | class KubectlAnsible(Kubectl, AnsibleModule): 87 | def __init__(self): 88 | AnsibleModule.__init__( 89 | self, 90 | argument_spec=dict( 91 | namespace=dict(type="str"), 92 | resource_type=dict(type="str"), 93 | name=dict(type="str"), 94 | resource_definition=dict(type="list"), 95 | state=dict( 96 | default="present", choice=["present", "facts", "get", "absent"] 97 | ), 98 | binary=dict(type="str"), 99 | kubeconfig=dict(type="str"), 100 | extra_args=dict(type="list"), 101 | verify_ssl=dict(type="bool", default=True), 102 | ), 103 | supports_check_mode=True, 104 | ) 105 | binary = self.params.pop("binary") 106 | Kubectl.__init__(self, **self.params) 107 | if binary is None: 108 | self.kubectl = self.get_bin_path("kubectl", True) 109 | else: 110 | self.kubectl = binary 111 | 112 | 113 | def main(): 114 | module = KubectlAnsible() 115 | try: 116 | res_dict = module.execute() 117 | except KubectlError as exc: 118 | module.fail_json(msg=exc.args[0]) 119 | else: 120 | module.exit_json(**res_dict) 121 | 122 | 123 | if __name__ == "__main__": 124 | main() 125 | -------------------------------------------------------------------------------- /plugins/modules/pkg_version_match.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import fnmatch 4 | 5 | from ansible.module_utils.basic import AnsibleModule 6 | 7 | try: 8 | import apt 9 | 10 | HAS_APT = True 11 | except ImportError: 12 | HAS_APT = False 13 | 14 | 15 | class PkgVersionMatch(object): 16 | def __init__(self, name, version=None): 17 | self.name = name 18 | self.version = version 19 | 20 | def execute(self): 21 | if HAS_APT: 22 | version = self.find_apt() 23 | return {"version": version} 24 | 25 | def find_apt(self): 26 | cache = apt.Cache() 27 | try: 28 | pkg = cache[self.name] 29 | except KeyError: 30 | raise PkgVersionMatchError( 31 | "package '{}' cannot be found in database".format(self.name) 32 | ) 33 | if not self.version: 34 | return pkg.candidate.version 35 | if "*" in self.version: 36 | match_version = self.version 37 | else: 38 | match_version = "{}*".format(self.version) 39 | for version in pkg.versions: 40 | if fnmatch.fnmatch(version.version, match_version): 41 | return version.version 42 | raise PkgVersionMatchError( 43 | "Can't found matching version '{}' of package '{}' in database".format( 44 | self.name, self.version 45 | ) 46 | ) 47 | 48 | 49 | class PkgVersionMatchError(Exception): 50 | """Error from PkgVersionMatch Module""" 51 | 52 | 53 | class PkgVersionMatchAnsible(AnsibleModule, PkgVersionMatch): 54 | def __init__(self, *args, **kwargs): 55 | AnsibleModule.__init__( 56 | self, 57 | argument_spec=dict( 58 | name=dict(required=True, type="str"), 59 | version=dict(required=False, type="str", default=None), 60 | ), 61 | supports_check_mode=True, 62 | ) 63 | if not HAS_APT: 64 | self.fail_json(msg="Install python-apt") 65 | PkgVersionMatch.__init__(self, **self.params) 66 | 67 | 68 | def main(): 69 | module = PkgVersionMatchAnsible() 70 | try: 71 | res_dict = module.execute() 72 | except PkgVersionMatchError as exc: 73 | module.fail_json(msg=exc.args[0]) 74 | else: 75 | module.exit_json(**res_dict) 76 | 77 | 78 | if __name__ == "__main__": 79 | main() 80 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "ansible-kubeadm" 3 | version = "0.1.0" 4 | description = "Aims to manage kubeadm based cluster via ansible" 5 | authors = ["Enix Dev "] 6 | readme = "README.md" 7 | 8 | [tool.poetry.dependencies] 9 | python = "^3.8.1" 10 | arrow = "^1.2.3" 11 | netaddr = "^0.9.0" 12 | 13 | [tool.poetry.group.tests.dependencies] 14 | pytest = "^7.2.0" 15 | pytest-bdd = "^6.1.1" 16 | ansible-runner = "^2.3.1" 17 | pytest-virtualenv = {git = "https://github.com/man-group/pytest-plugins.git", subdirectory = "pytest-virtualenv"} 18 | python-vagrant = "^1.0.0" 19 | pyyaml = "^6.0" 20 | tenacity = "^8.2.2" 21 | 22 | [tool.poetry.group.lint.dependencies] 23 | yamllint = "^1.29.0" 24 | black = "^23.1.0" 25 | isort = "^5.12.0" 26 | flake8 = "^6.0.0" 27 | 28 | [tool.poetry.group.ansible.dependencies] 29 | ansible = ">=6" 30 | 31 | [build-system] 32 | requires = ["poetry-core"] 33 | build-backend = "poetry.core.masonry.api" 34 | 35 | [tool.isort] 36 | profile = "black" 37 | -------------------------------------------------------------------------------- /roles/apiserver_docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiserver_proxy_stack_dir: '/etc/docker-compose/apiserver-proxy' 3 | apiserver_proxy_port: 7443 4 | apiserver_proxy_apiserver_port: 6443 5 | -------------------------------------------------------------------------------- /roles/apiserver_docker/files/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | services: 4 | loadbalancer: 5 | image: enix/kubernetes-api-loadbalancer:master 6 | network_mode: host 7 | restart: always 8 | env_file: 9 | - ./apiserver-proxy-endpoints.env 10 | -------------------------------------------------------------------------------- /roles/apiserver_docker/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apiserver_docker_common 4 | - role: common_vars 5 | -------------------------------------------------------------------------------- /roles/apiserver_docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Include OS specific variables' 3 | include_vars: '{{ item }}' 4 | with_fileglob: 5 | - 'vars/os_{{ ansible_distribution }}_{{ ansible_distribution_release }}.yml' 6 | - 'vars/os_{{ ansible_distribution }}.yml' 7 | - 'vars/os_{{ ansible_os_family }}.yml' 8 | 9 | - name: 'install docker-python binding' 10 | package: 11 | name: 12 | - docker-compose 13 | - > 14 | {%- if ansible_python.version.major > 2 -%} 15 | {{ python3_docker }} 16 | {%- else -%} 17 | {{ python2_docker }} 18 | {%- endif -%} 19 | install_recommends: >- # docker-compose breaks docker as it recommends docker.io on debian 11 (at least) 20 | {%- if ansible_pkg_mgr == "apt" -%} 21 | false 22 | {%- else -%} 23 | {{ omit }} 24 | {%- endif -%} 25 | state: present 26 | register: compose_installed 27 | 28 | - name: 'create stack directory' 29 | file: 30 | dest: '{{ apiserver_proxy_stack_dir }}' 31 | state: directory 32 | owner: root 33 | group: root 34 | mode: 0700 35 | 36 | - name: 'Install apiserver-proxy' 37 | copy: 38 | src: docker-compose.yaml 39 | dest: '{{ apiserver_proxy_stack_dir }}/docker-compose.yaml' 40 | owner: root 41 | group: root 42 | mode: 0600 43 | 44 | - name: 'Template environement variable' 45 | template: 46 | src: apiserver-proxy-endpoints.env.j2 47 | dest: '{{ apiserver_proxy_stack_dir }}/apiserver-proxy-endpoints.env' 48 | owner: root 49 | group: root 50 | mode: 0600 51 | 52 | - name: 'Start compose stack' 53 | community.docker.docker_compose_v2: 54 | project_src: '{{ apiserver_proxy_stack_dir }}' 55 | state: present 56 | when: >- 57 | not(compose_installed is changed and ansible_check_mode) 58 | vars: 59 | ansible_python_interpreter: >- 60 | {%- if ansible_distribution == "Ubuntu" and ansible_distribution_version == "18.04" -%} 61 | /usr/bin/python2 62 | {%- else -%} 63 | {{ ansible_python.executable }} 64 | {%- endif -%} 65 | 66 | - name: 'export vars' 67 | set_fact: 68 | control_plane_endpoint: "127.0.0.1:{{ apiserver_proxy_port }}" 69 | -------------------------------------------------------------------------------- /roles/apiserver_docker/templates/apiserver-proxy-endpoints.env.j2: -------------------------------------------------------------------------------- 1 | {%- macro apiserver_ips() -%} 2 | {%- for server in groups[kube_cp_group] -%} 3 | {%- if hostvars[server]._kube_apiserver_advertise_address is defined -%} 4 | {{ hostvars[server]._kube_apiserver_advertise_address.ipv4.address }} 5 | {%- else -%} 6 | {{ hostvars[server].ansible_default_ipv4.address }} 7 | {%- endif -%} 8 | {%- if not loop.last %}{{" "}}{% endif -%} 9 | {%- endfor %} 10 | {%- endmacro -%} 11 | K8S_LOADBALANCER_PORT={{ apiserver_proxy_port }} 12 | K8S_MASTERS_IPS={{ apiserver_ips() }} 13 | -------------------------------------------------------------------------------- /roles/apiserver_docker/vars/os_Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | python2_docker: python-docker 3 | python3_docker: python3-docker 4 | docker_compose: docker-compose 5 | -------------------------------------------------------------------------------- /roles/apiserver_docker_common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiserver_proxy_stack_dir: '/etc/docker-compose/apiserver-proxy' 3 | apiserver_proxy_use_docker: # Not true, nor false to force user to make a choice 4 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiserver_proxy_apiserver_port: 6443 3 | _apiserver_proxy_haproxy_version: '2.6.*' 4 | force_apt_update: false 5 | haproxy_repo_url: '{{ haproxy_upstream_repo_url }}' 6 | haproxy_gpg_url: '{{ haproxy_upstream_gpg_url }}' 7 | 8 | # From apiserver_docker 9 | apiserver_proxy_stack_dir: '/etc/docker-compose/apiserver-proxy' 10 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Reload HAProxy' 3 | service: 4 | name: haproxy 5 | state: reloaded 6 | when: 7 | - apiserver_docker_compose.stat.exists != True 8 | - not(_haproxy_repo_just_added is changed and ansible_check_mode) 9 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: packages_common 4 | - role: apiserver_docker_common 5 | - role: common_vars 6 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/tasks/haproxy_repo_Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Install software-properties-common if installing ppa' 3 | apt: 4 | name: software-properties-common 5 | when: haproxy_repo_url.startswith('ppa:') 6 | 7 | - name: 'Create directory to store keys' 8 | file: 9 | dest: /etc/apt/keyrings 10 | state: directory 11 | register: _apt_keyring_directory 12 | when: haproxy_gpg_url is not none and haproxy_gpg_url|length > 0 13 | 14 | - name: 'Add HAProxy repo signing key' 15 | ansible.builtin.get_url: 16 | url: '{{ haproxy_gpg_url }}' 17 | dest: /etc/apt/keyrings/haproxy.asc 18 | owner: root 19 | group: root 20 | mode: 0644 21 | force: true 22 | when: 23 | - haproxy_gpg_url is not none and haproxy_gpg_url|length > 0 24 | - not(_apt_keyring_directory is changed and ansible_check_mode) 25 | 26 | - name: 'Add the HAProxy repository (in dry-run to check change)' 27 | apt_repository: 28 | repo: '{{ haproxy_repo_url }}' 29 | filename: haproxy 30 | state: present 31 | update_cache: false 32 | check_mode: true 33 | register: _haproxy_repo_dry_run 34 | 35 | - name: 'Remove repository file if modification exists.' 36 | file: 37 | dest: /etc/apt/sources.list.d/haproxy.list 38 | state: absent 39 | when: _haproxy_repo_dry_run is changed 40 | 41 | - name: 'Add the HAProxy repository' 42 | apt_repository: 43 | repo: '{{ haproxy_repo_url }}' 44 | filename: haproxy 45 | state: present 46 | update_cache: false 47 | register: _haproxy_repo_just_added 48 | 49 | - name: 'refresh source list' 50 | apt: 51 | update_cache: true 52 | when: >- 53 | _haproxy_repo_just_added is changed 54 | or force_apt_update|bool 55 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Detect if compose-based apiserver proxy exists' 3 | include_tasks: upgrade_from_docker.yml 4 | 5 | - name: 'Include HAproxy OS variables' 6 | include_vars: '{{ file_vars }}' 7 | loop_control: 8 | loop_var: file_vars 9 | with_fileglob: 10 | - 'vars/os_{{ ansible_os_family }}.yml' 11 | - 'vars/os_{{ ansible_distribution }}.yml' 12 | - 'vars/os_{{ ansible_distribution }}_{{ ansible_distribution_release }}.yml' 13 | 14 | - name: 'Add HAProxy repository' 15 | include_tasks: '{{ file_tasks }}' 16 | loop_control: 17 | loop_var: file_tasks 18 | with_first_found: 19 | - 'haproxy_repo_{{ ansible_distribution }}_{{ ansible_distribution_release }}.yml' 20 | - 'haproxy_repo_{{ ansible_distribution }}.yml' 21 | - 'haproxy_repo_{{ ansible_os_family }}.yml' 22 | 23 | - name: 'Install HAProxy package' 24 | include_tasks: '{{ file_tasks }}' 25 | loop_control: 26 | loop_var: file_tasks 27 | with_first_found: 28 | - 'pkg_{{ ansible_distribution }}_{{ ansible_distribution_release }}.yml' 29 | - 'pkg_{{ ansible_distribution }}.yml' 30 | - 'pkg_{{ ansible_os_family }}.yml' 31 | 32 | - name: 'Template HAProxy config' 33 | template: 34 | src: haproxy.cfg.j2 35 | dest: /etc/haproxy/haproxy.cfg 36 | owner: haproxy 37 | group: haproxy 38 | mode: 0600 39 | notify: 'Reload HAProxy' 40 | when: 41 | - not(_haproxy_repo_just_added is changed and ansible_check_mode) 42 | 43 | - name: 'export vars' 44 | set_fact: 45 | control_plane_endpoint: "127.0.0.1:{{ apiserver_proxy_port }}" 46 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/tasks/pkg_Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Get version of haproxy package' 3 | package_facts: 4 | 5 | - name: 'find version of haproxy to install' 6 | pkg_version_match: 7 | name: haproxy 8 | version: '{{ _apiserver_proxy_haproxy_version }}' 9 | when: 10 | - not(_haproxy_repo_just_added is changed and ansible_check_mode) 11 | - packages.haproxy|default([])|select('match', _apiserver_proxy_haproxy_version)|list|length == 0 12 | register: haproxy_version 13 | 14 | - name: "Create pinning files" 15 | copy: 16 | dest: /etc/apt/preferences.d/50-haproxy 17 | content: | 18 | package: haproxy 19 | Pin: version {% if haproxy_version is not skipped -%} 20 | {{ haproxy_version.version }} 21 | {%- else -%} 22 | {{ packages.haproxy.0.version|default('dry-run-unknown') }} 23 | {%- endif %} 24 | 25 | Pin-Priority: 1001 26 | 27 | - name: 'Install HAProxy package' 28 | apt: 29 | name: 'haproxy' 30 | state: 'present' 31 | when: not(_haproxy_repo_just_added is changed and ansible_check_mode) 32 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/tasks/upgrade_from_docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Detect if compose stack is present" 3 | stat: 4 | name: '{{ apiserver_proxy_stack_dir }}//docker-compose.yaml' 5 | register: apiserver_docker_compose 6 | 7 | - name: "Show error when migration is need and apiservers_proxy_use_docker is undefined" 8 | fail: 9 | msg: > 10 | A apiserver proxy base on docker-compose has been detected. As docker has been deprecated in kubernetes 1.24+, 11 | therefore the docker based apiserver proxy will be deprecated in ansible-kubeadm. 12 | If you want to keep the old docker-compose apiserver proxy set "apiserver_proxy_use_docker: true", 13 | if you want to migrate to the default haproxy apiserver set "apiserver_proxy_use_docker: false" (the install default) 14 | to trigger the migration. 15 | when: 16 | - apiserver_docker_compose.stat.exists == True 17 | - apiserver_proxy_use_docker is none 18 | 19 | - name: "Add host to upgrade group" 20 | add_host: 21 | name: "{{ item }}" 22 | group: haproxy_upgrade_group 23 | when: apiserver_docker_compose.stat.exists == True 24 | loop: "{{ ansible_play_hosts_all }}" 25 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/templates/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | global 2 | nbthread 1 3 | maxconn 10000 4 | 5 | defaults 6 | mode tcp 7 | option tcpka 8 | clitcpka-idle 30s 9 | clitcpka-intvl 2s 10 | clitcpka-cnt 5 11 | srvtcpka-idle 30s 12 | srvtcpka-intvl 2s 13 | srvtcpka-cnt 5 14 | timeout queue 250ms 15 | timeout connect 2s 16 | timeout tunnel 24h 17 | timeout client 5s 18 | timeout server 5s 19 | timeout client-fin 5s 20 | timeout server-fin 5s 21 | timeout check 2s 22 | 23 | frontend api 24 | bind 127.0.0.1:{{ apiserver_proxy_port }} 25 | default_backend api 26 | 27 | backend api 28 | {% for server in groups[kube_cp_group] -%} 29 | {%- if hostvars[server]._kube_apiserver_advertise_address is defined -%} 30 | {%- set host_ip = hostvars[server]._kube_apiserver_advertise_address.ipv4.address -%} 31 | {%- else -%} 32 | {%- set host_ip = hostvars[server].ansible_default_ipv4.address -%} 33 | {%- endif %} 34 | server node{{ loop.index }} "{{ host_ip }}:{{ apiserver_proxy_apiserver_port }}" check inter 10s fastinter 2s downinter 2s 35 | {% endfor %} 36 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/vars/os_Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _haproxy_version: '{{ _apiserver_proxy_haproxy_version | regex_replace("^(\d+[.]\d+)[.].+", "\1") }}' 3 | haproxy_upstream_repo_url: 'deb [signed-by=/etc/apt/keyrings/haproxy.asc] http://haproxy.debian.net {{ ansible_distribution_release }}-backports-{{ _haproxy_version }} main' 4 | haproxy_upstream_gpg_url: 'https://haproxy.debian.net/bernat.debian.org.gpg' 5 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy/vars/os_Ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | haproxy_upstream_repo_url: 'ppa:vbernat/haproxy-{{ _haproxy_version }}' 3 | haproxy_upstream_gpg_url: 4 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy_upgrade/defaults/main.yml: -------------------------------------------------------------------------------- 1 | has_docker_compose: true 2 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy_upgrade/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: apiserver_docker_common 4 | -------------------------------------------------------------------------------- /roles/apiserver_haproxy_upgrade/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Upgrade to haproxy" 3 | block: 4 | - name: "Stop compose stack" 5 | community.docker.docker_compose_v2: 6 | project_src: '{{ apiserver_proxy_stack_dir }}' 7 | state: absent 8 | vars: 9 | ansible_python_interpreter: >- 10 | {%- if ansible_distribution == "Ubuntu" and ansible_distribution_version == "18.04" -%} 11 | /usr/bin/python2 12 | {%- else -%} 13 | {{ ansible_python.executable }} 14 | {%- endif -%} 15 | when: has_docker_compose|bool 16 | 17 | - name: "Reload haproxy" 18 | service: 19 | name: haproxy 20 | state: reloaded 21 | when: not(_haproxy_repo_just_added is changed and ansible_check_mode) 22 | 23 | rescue: 24 | - name: "Restart compose stack if error occured" 25 | community.docker.docker_compose_v2: 26 | project_src: '{{ apiserver_proxy_stack_dir }}' 27 | state: present 28 | vars: 29 | ansible_python_interpreter: >- 30 | {%- if ansible_distribution == "Ubuntu" and ansible_distribution_version == "18.04" -%} 31 | /usr/bin/python2 32 | {%- else -%} 33 | {{ ansible_python.executable }} 34 | {%- endif -%} 35 | when: has_docker_compose|bool 36 | 37 | - name: "Remove compose stack to finish upgrade" 38 | file: 39 | dest: "{{ apiserver_proxy_stack_dir }}" 40 | state: absent 41 | -------------------------------------------------------------------------------- /roles/bootstrap_token/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sensitive_debug: false 3 | cluster_config: {} 4 | 5 | kubeadm_config_yaml: '/tmp/kubeadm-config-{{ansible_date_time.iso8601 }}.yaml' 6 | 7 | python2_openssl: python-openssl 8 | python3_openssl: python3-openssl 9 | -------------------------------------------------------------------------------- /roles/bootstrap_token/filter_plugins/bootstrap_token.py: -------------------------------------------------------------------------------- 1 | ../../../plugins/filter/bootstrap_token.py -------------------------------------------------------------------------------- /roles/bootstrap_token/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: kubectl_module 4 | galaxy_info: 5 | author: Julien Girardin 6 | description: Create kubeadm config and init kubernetes cluster 7 | company: Enix 8 | license: Apache 9 | min_ansible_version: 2.7 10 | platforms: 11 | - name: Ubuntu 12 | versions: 13 | - 18.04 14 | - 20.04 15 | galaxy_tags: 16 | - kubernetes 17 | - kubeadm 18 | -------------------------------------------------------------------------------- /roles/bootstrap_token/tasks/bootstrap_token.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Get list of bootstrap token' 3 | kubectl: 4 | state: get 5 | resource_type: secret 6 | namespace: kube-system 7 | extra_args: '--field-selector=type=bootstrap.kubernetes.io/token' 8 | kubeconfig: /etc/kubernetes/admin.conf 9 | run_once: true 10 | delegate_to: '{{ kubeadm_host }}' 11 | register: bootstrap_tokens 12 | when: 13 | - not(found_kubectl.rc == 1 and ansible_check_mode) 14 | 15 | - name: 'Display all bootstrap tokens' 16 | debug: 17 | var: bootstrap_tokens 18 | verbosity: 1 19 | run_once: true 20 | 21 | - name: 'Filter expire token' 22 | set_fact: 23 | valid_bootstrap_tokens: >- 24 | {%- if ansible_collection_name is defined and ansible_collection_name is not none -%} 25 | {%- set filter_name = "enix.kubeadm.bootstrap_token_valid" -%} 26 | {%- else -%} 27 | {%- set filter_name = "bootstrap_token_valid" -%} 28 | {%- endif -%} 29 | {{ [bootstrap_tokens_dry_run["items"] 30 | |selectattr('data.usage-bootstrap-authentication', 'defined')|list] 31 | |map(filter_name)|first }} 32 | run_once: true 33 | vars: 34 | # "items" cannot be defaulted easily as jinja fallback on using method instead 35 | bootstrap_tokens_dry_run: "{{ {'items': []}|combine(bootstrap_tokens) }}" 36 | 37 | - name: 'Display valid bootstrap tokens' 38 | debug: 39 | var: valid_bootstrap_tokens 40 | verbosity: 1 41 | run_once: true 42 | -------------------------------------------------------------------------------- /roles/bootstrap_token/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Select candidate host to run init' 3 | set_fact: 4 | kubeadm_host: '{{ groups.cp_running|default(ansible_play_hosts, true)|first }}' 5 | 6 | - name: 'Retrieve a valid bootstrap token' 7 | import_tasks: bootstrap_token.yml 8 | 9 | - name: 'Create bootstrap token if no valid found' 10 | command: kubeadm token create 11 | run_once: true 12 | delegate_to: '{{ kubeadm_host }}' 13 | when: valid_bootstrap_tokens|length == 0 14 | 15 | - name: 'Retrieve a valid bootstrap token' 16 | import_tasks: bootstrap_token.yml 17 | when: valid_bootstrap_tokens|length == 0 18 | 19 | # TODO: fix two following tasks to be more platform dependent 20 | - name: 'Install python-openssl' 21 | package: 22 | name: >- 23 | {%- if ansible_python.version.major > 2 -%} 24 | {{ python3_openssl }} 25 | {%- else -%} 26 | {{ python2_openssl }} 27 | {%- endif -%} 28 | state: present 29 | run_once: true 30 | delegate_to: '{{ kubeadm_host }}' 31 | 32 | - name: 'Get info from ca' 33 | openssl_certificate_info: 34 | path: /etc/kubernetes/pki/ca.crt 35 | run_once: true 36 | delegate_to: '{{ kubeadm_host }}' 37 | register: ca_info 38 | when: not(groups.cp_init is defined and ansible_check_mode) 39 | 40 | - name: 'Display Kubernetes CA(cert) properties' 41 | debug: 42 | var: ca_info 43 | verbosity: 1 44 | run_once: true 45 | 46 | - name: 'List current nodes' 47 | kubectl: 48 | state: get 49 | resource_type: nodes 50 | kubeconfig: /etc/kubernetes/admin.conf 51 | run_once: true 52 | delegate_to: '{{ kubeadm_host }}' 53 | register: current_nodes 54 | when: 55 | - not(found_kubectl.rc == 1 and ansible_check_mode) 56 | 57 | - name: 'Compute list of "to-join" nodes' 58 | set_fact: 59 | # "items" cannot be defaulted easily as jinja fallback on using method instead 60 | to_join_cp: >- 61 | {{ ansible_play_hosts|difference( 62 | ({"items": []}|combine(current_nodes))["items"]|map(attribute="metadata.name")) }} 63 | cert_encryption_key: >- 64 | {{ lookup('password', '/dev/null length=64 chars=hexdigits') }} 65 | run_once: true 66 | 67 | - name: 'Display list of node that need to be joined' 68 | debug: 69 | var: to_join_cp 70 | verbosity: 1 71 | run_once: true 72 | 73 | - name: 'Upload certificates if control-plane node need to be joined' 74 | command: >- 75 | kubeadm init phase upload-certs 76 | --upload-certs 77 | --certificate-key {{ cert_encryption_key }} 78 | environment: 79 | KUBECONFIG: '/etc/kubernetes/admin.conf' 80 | no_log: '{{ sensitive_debug|bool }}' 81 | run_once: true 82 | delegate_to: '{{ kubeadm_host }}' 83 | when: to_join_cp|length > 0 84 | -------------------------------------------------------------------------------- /roles/bootstrap_token/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | default_kubeadm_config: 3 | apiVersion: kubeadm.k8s.io/v1beta2 4 | kind: ClusterConfiguration 5 | -------------------------------------------------------------------------------- /roles/common_vars/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | enable_kubeadm_patches: true 3 | kubeadm_ignore_preflight_errors: [] 4 | kubeadm_patch_dir: /etc/kubernetes/patches 5 | kube_cp_group: kube_control_plane 6 | kube_worker_group: kube_workers 7 | 8 | cp_node: '{{ (groups.cp_running|default(groups[kube_cp_group]))|first }}' 9 | -------------------------------------------------------------------------------- /roles/common_vars/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | galaxy_info: 4 | author: Julien Girardin 5 | description: Common variable for ansible-kubeadm roles 6 | company: Enix 7 | license: Apache 8 | min_ansible_version: 2.7 9 | platforms: 10 | - name: Ubuntu 11 | versions: 12 | - 18.04 13 | - 20.04 14 | galaxy_tags: 15 | - kubernetes 16 | - kubeadm 17 | -------------------------------------------------------------------------------- /roles/control_plane_patches/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubeadm_patch_dir: /etc/kubernetes/patches 3 | kubeadm_patch_owner: root 4 | kubeadm_patch_group: root 5 | kubeadm_patch_mode: '0750' 6 | kubeadm_patch_dir_owner: '{{ kubeadm_patch_owner }}' 7 | kubeadm_patch_dir_group: '{{ kubeadm_patch_group }}' 8 | kubeadm_patch_dir_mode: '0750' 9 | 10 | kubeadm_extra_patches: {} 11 | -------------------------------------------------------------------------------- /roles/control_plane_patches/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: common_vars 4 | galaxy_info: 5 | author: Julien Girardin 6 | description: Deploy patches for kubeadm to apply 7 | company: Enix 8 | license: Apache 9 | min_ansible_version: 2.7 10 | platforms: 11 | - name: Ubuntu 12 | versions: 13 | - 18.04 14 | - 20.04 15 | galaxy_tags: 16 | - kubernetes 17 | - kubeadm 18 | -------------------------------------------------------------------------------- /roles/control_plane_patches/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: patch.yml 3 | when: enable_kubeadm_patches|bool 4 | -------------------------------------------------------------------------------- /roles/control_plane_patches/tasks/patch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Create patch directory' 3 | file: 4 | name: '{{ kubeadm_patch_dir }}' 5 | state: directory 6 | owner: '{{ kubeadm_patch_dir_owner }}' 7 | group: '{{ kubeadm_patch_dir_group }}' 8 | mode: '{{ kubeadm_patch_dir_mode }}' 9 | 10 | - name: 'Deploy patches' 11 | template: 12 | src: '{{ item.value }}' 13 | dest: '{{ kubeadm_patch_dir }}/{{ item.key.rstrip(".j2") }}' 14 | owner: '{{ kubeadm_patch_owner }}' 15 | group: '{{ kubeadm_patch_group }}' 16 | mode: '{{ kubeadm_patch_mode }}' 17 | with_dict: '{{ kubeadm_default_patches|combine(kubeadm_extra_patches) }}' 18 | register: kubeadm_deployed_patches 19 | vars: 20 | kubeadm_default_patches: >- 21 | { {%- if _kube_controller_manager_bind_address is defined -%} 22 | 'kube-controller-manager+json.yaml.j2': 'kube-controller-manager+json.yaml.j2', 23 | {%- endif -%} 24 | {%- if _kube_scheduler_bind_address is defined -%} 25 | 'kube-scheduler+json.yaml.j2': 'kube-scheduler+json.yaml.j2', 26 | {%- endif -%} 27 | {%- if _etcd_metrics_bind_address is defined -%} 28 | 'etcd+json.yaml.j2': 'etcd+json.yaml.j2', 29 | {%- endif -%} 30 | } 31 | 32 | - name: 'Listing patches' 33 | find: 34 | paths: '{{ kubeadm_patch_dir }}' 35 | recurse: false 36 | register: kubeadm_current_patches 37 | 38 | - name: 'Remove uneeded patches' 39 | file: 40 | path: '{{ item }}' 41 | state: 'absent' 42 | register: kubeadm_removed_patches 43 | when: not ansible_check_mode 44 | with_items: >- 45 | {{ kubeadm_current_patches.files|map(attribute="path")|list 46 | |difference(kubeadm_deployed_patches.results|map(attribute="dest")|list) }} 47 | 48 | - name: 'Add to upgrade group if difference found' 49 | add_host: 50 | groups: nodes_upgrade 51 | name: '{{ item }}' 52 | with_items: '{{ ansible_play_batch }}' 53 | check_mode: false 54 | changed_when: false 55 | when: 56 | - >- 57 | hostvars[item].kubeadm_deployed_patches is changed or 58 | hostvars[item].kubeadm_removed_patches is changed 59 | - groups.cp_running|default([])|length > 0 60 | -------------------------------------------------------------------------------- /roles/control_plane_patches/templates/etcd+json.yaml.j2: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /spec/containers/0/command/- 3 | value: --listen-metrics-urls=http://{{ _etcd_metrics_bind_address.ipv4.address }}:2381 4 | - op: replace 5 | path: /spec/containers/0/livenessProbe/httpGet/host 6 | value: {{ _etcd_metrics_bind_address.ipv4.address }} 7 | - op: replace 8 | path: /spec/containers/0/startupProbe/httpGet/host 9 | value: {{ _etcd_metrics_bind_address.ipv4.address }} 10 | -------------------------------------------------------------------------------- /roles/control_plane_patches/templates/kube-controller-manager+json.yaml.j2: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /spec/containers/0/command/- 3 | value: --bind-address={{ _kube_controller_manager_bind_address.ipv4.address }} 4 | - op: replace 5 | path: /spec/containers/0/livenessProbe/httpGet/host 6 | value: {{ _kube_controller_manager_bind_address.ipv4.address }} 7 | - op: replace 8 | path: /spec/containers/0/startupProbe/httpGet/host 9 | value: {{ _kube_controller_manager_bind_address.ipv4.address }} 10 | -------------------------------------------------------------------------------- /roles/control_plane_patches/templates/kube-scheduler+json.yaml.j2: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /spec/containers/0/command/- 3 | value: --bind-address={{ _kube_scheduler_bind_address.ipv4.address }} 4 | - op: replace 5 | path: /spec/containers/0/livenessProbe/httpGet/host 6 | value: {{ _kube_scheduler_bind_address.ipv4.address }} 7 | - op: replace 8 | path: /spec/containers/0/startupProbe/httpGet/host 9 | value: {{ _kube_scheduler_bind_address.ipv4.address }} 10 | -------------------------------------------------------------------------------- /roles/discover/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | galaxy_info: 4 | author: Julien Girardin 5 | description: Make groups of master and nodes 6 | company: Enix 7 | license: Apache 8 | min_ansible_version: 2.7 9 | platforms: 10 | - name: Ubuntu 11 | versions: 12 | - 18.04 13 | - 20.04 14 | galaxy_tags: 15 | - kubernetes 16 | -------------------------------------------------------------------------------- /roles/discover/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'List all nodes' 3 | command: kubectl get nodes -o yaml 4 | environment: 5 | KUBECONFIG: '/etc/kubernetes/admin.conf' 6 | changed_when: false 7 | check_mode: false 8 | register: all_nodes_cmd 9 | environment: 10 | KUBECONFIG: '/etc/kubernetes/admin.conf' 11 | 12 | - name: 'Display output of "kubectl get nodes"' 13 | debug: 14 | var: all_nodes_cmd 15 | verbosity: 1 16 | 17 | - name: 'Dispatch nodes in groups' 18 | add_host: 19 | name: '{{ item.metadata.name }}' 20 | groups: >- 21 | {%- set roles = item.metadata.labels 22 | |select('match', 'node-role.kubernetes.io/.*') 23 | |map('regex_replace', 'node-role.kubernetes.io/', '')|list -%} 24 | {%- if 'master' in roles -%} 25 | kube_control_plane 26 | {%- else -%} 27 | kube_workers 28 | {%- endif -%} 29 | ansible_host: >- 30 | {{ (item.status.addresses 31 | |selectattr('type', 'eq', 'InternalIP') 32 | |first).address }} 33 | loop_control: 34 | label: '{{ item.metadata.name }}' 35 | changed_when: false 36 | check_mode: false 37 | with_items: '{{ node_list }}' 38 | vars: 39 | node_list: '{{ (all_nodes_cmd.stdout|from_yaml)["items"] }}' 40 | 41 | - name: 'Display groups (displaying discovered nodes)' 42 | debug: 43 | var: groups 44 | verbosity: 1 45 | -------------------------------------------------------------------------------- /roles/drain_nodes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | drain_node: true 3 | reboot_node: false 4 | delete_local_data: false 5 | pause_before_drain: false 6 | pause_after_drain_and_node_upgrade: false 7 | drain_force: false 8 | -------------------------------------------------------------------------------- /roles/drain_nodes/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Pause after upgrade' 3 | pause: 4 | when: pause_after_drain_and_node_upgrade|bool 5 | 6 | - name: 'Restart kubelet' 7 | service: 8 | name: kubelet 9 | state: restarted 10 | when: not reboot_node|bool 11 | 12 | - name: 'Reboot node' 13 | reboot: 14 | when: reboot_node|bool 15 | 16 | - name: 'Wait for node' 17 | wait_for_connection: 18 | when: reboot_node|bool 19 | 20 | - name: 'Uncordon node' 21 | command: kubectl uncordon {{ ansible_nodename }} 22 | environment: 23 | KUBECONFIG: '/etc/kubernetes/admin.conf' 24 | retries: 4 25 | register: uncordon_nodes 26 | until: uncordon_nodes is not failed 27 | delegate_to: '{{ cp_node }}' 28 | when: drain_node|bool 29 | -------------------------------------------------------------------------------- /roles/drain_nodes/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: common_vars 4 | galaxy_info: 5 | author: Julien Girardin 6 | description: Manage node draining and return 7 | company: Enix 8 | license: Apache 9 | min_ansible_version: 2.7 10 | platforms: 11 | - name: Ubuntu 12 | versions: 13 | - 18.04 14 | - 20.04 15 | galaxy_tags: 16 | - kubernetes 17 | - kubeadm 18 | -------------------------------------------------------------------------------- /roles/drain_nodes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Pause before drain' 3 | pause: 4 | when: pause_before_drain|bool 5 | 6 | - name: 'Drain node if needed' 7 | command: >- 8 | kubectl drain 9 | --ignore-daemonsets 10 | {% if delete_local_data|bool %} 11 | --delete-local-data 12 | {% endif %} 13 | {% if drain_force|bool %} 14 | --force 15 | {% endif %} 16 | {{ ansible_nodename }} 17 | delegate_to: '{{ cp_node }}' 18 | when: drain_node|bool 19 | notify: 20 | - Pause after upgrade 21 | - Restart kubelet 22 | - Reboot node 23 | - Wait for node 24 | - Uncordon node 25 | 26 | - include_role: 27 | name: hooks_call 28 | vars: 29 | kubeadm_hook_list: ['pre_nodes_upgrade'] 30 | -------------------------------------------------------------------------------- /roles/find_ip/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _control_plane: false 3 | _skip_node_ip: false 4 | 5 | kube_apiserver_advertise_cidr: '' 6 | kubelet_node_ip_cidr: '' 7 | kube_control_plane_cidr: '' 8 | -------------------------------------------------------------------------------- /roles/find_ip/tasks/find_ip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Find {{ found_address_var }}' 3 | set_fact: 4 | "{{ found_address_var }}": >- 5 | {%- if matched_iface|length == 1 -%} 6 | {{ matched_iface|first }} 7 | {%- elif matched_iface|length == 0 -%} 8 | {{ _error|mandatory('No matched found for ' ~ found_address_var) }} 9 | {%- else -%} 10 | {{ _error|mandatory('Multiple match for ' ~ found_address_var ~ ': ' + matched_iface) }} 11 | {%- endif -%} 12 | when: 13 | - network_cidr is defined 14 | - network_cidr|length > 0 15 | vars: 16 | matched_iface: >- 17 | [ {%- for iface_name in ansible_interfaces -%} 18 | {%- set iface = hostvars[inventory_hostname]["ansible_" ~ iface_name] -%} 19 | {%- if iface.ipv4 is defined and network_cidr|ansible.netcommon.network_in_network(iface.ipv4.address) -%} 20 | {{ iface }}, 21 | {%- endif -%} 22 | {%- endfor -%} ] 23 | -------------------------------------------------------------------------------- /roles/find_ip/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Find kube_apiserver_advertise_address' 3 | import_tasks: find_ip.yml 4 | when: 5 | - _control_plane|bool 6 | vars: 7 | found_address_var: "_kube_apiserver_advertise_address" 8 | network_cidr: "{{ kube_apiserver_advertise_cidr|default(kube_control_plane_cidr, true) }}" 9 | 10 | - name: 'Find etcd_metrics_bind_address' 11 | import_tasks: find_ip.yml 12 | when: 13 | - _control_plane|bool 14 | vars: 15 | found_address_var: "_etcd_metrics_bind_address" 16 | network_cidr: "{{ etcd_metrics_bind_cidr|default(kube_control_plane_cidr, true) }}" 17 | 18 | - name: 'Find kube_controller_manager_bind_address' 19 | import_tasks: find_ip.yml 20 | when: 21 | - _control_plane|bool 22 | vars: 23 | found_address_var: "_kube_controller_manager_bind_address" 24 | network_cidr: "{{ kube_controller_manager_bind_cidr|default(kube_control_plane_cidr, true) }}" 25 | 26 | - name: 'Find kube_scheduler_bind_address' 27 | import_tasks: find_ip.yml 28 | when: 29 | - _control_plane|bool 30 | vars: 31 | found_address_var: "_kube_scheduler_bind_address" 32 | network_cidr: "{{ kube_scheduler_bind_cidr|default(kube_control_plane_cidr, true) }}" 33 | 34 | - name: 'Find kubelet_node_ip' 35 | import_tasks: find_ip.yml 36 | when: 37 | - not _skip_node_ip|bool 38 | vars: 39 | found_address_var: "_kubelet_node_ip" 40 | network_cidr: "{{ kubelet_node_ip_cidr }}" 41 | -------------------------------------------------------------------------------- /roles/find_running_cp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiserver_manifest: '/etc/kubernetes/manifests/kube-apiserver.yaml' 3 | kubeconfig_admin: '/etc/kubernetes/admin.conf' 4 | -------------------------------------------------------------------------------- /roles/find_running_cp/meta/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | galaxy_info: 4 | author: Julien Girardin 5 | description: Find in early stage if a cluster already exist or not 6 | company: Enix 7 | license: Apache 8 | min_ansible_version: 2.7 9 | platforms: 10 | - name: Ubuntu 11 | versions: 12 | - 18.04 13 | - 20.04 14 | galaxy_tags: 15 | - kubernetes 16 | - kubeadm 17 | -------------------------------------------------------------------------------- /roles/find_running_cp/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Find if kube-apiserver manifest is present' 3 | stat: 4 | path: '{{ apiserver_manifest }}' 5 | register: apiserver_manifest_stat 6 | 7 | - name: 'Find if kubeconfig admin is present' 8 | stat: 9 | path: '{{ kubeconfig_admin }}' 10 | register: kubeconfig_admin_stat 11 | 12 | - name: 'Find if kubectl is present' 13 | command: which kubectl 14 | register: found_kubectl 15 | changed_when: false 16 | check_mode: false 17 | failed_when: found_kubectl.rc not in [0, 1] 18 | 19 | - name: 'Find node with manifests (more likely to be running)' 20 | add_host: 21 | name: '{{ item }}' 22 | group: 'cp_running' 23 | changed_when: false 24 | check_mode: false 25 | run_once: true 26 | with_items: >- 27 | {{ ansible_play_hosts|zip( 28 | ansible_play_hosts|map("extract", hostvars, ['apiserver_manifest_stat', 'stat', 'exists']), 29 | ansible_play_hosts|map("extract", hostvars, ['kubeconfig_admin_stat', 'stat', 'exists']), 30 | ansible_play_hosts|map("extract", hostvars, ['found_kubectl', 'rc']), 31 | ) |selectattr(1) 32 | |selectattr(2) 33 | |selectattr(3, "equalto", 0) 34 | |map(attribute='0')|list }} 35 | 36 | - name: 'Display list of machine with a potentially running control-plane' 37 | debug: 38 | var: groups.cp_running 39 | verbosity: 1 40 | -------------------------------------------------------------------------------- /roles/hooks_call/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubeadm_hooks_dir: "{{ inventory_dir }}" 3 | -------------------------------------------------------------------------------- /roles/hooks_call/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | allow_duplicates: true 4 | -------------------------------------------------------------------------------- /roles/hooks_call/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # prepend '_kubeadm_hooks_' to each hook name 3 | # get value from those 4 | # don't care if they don't exist 5 | # flatten the list of list 6 | # include each of them 7 | # 8 | - name: "Call hooks for {{kubeadm_hook_list}}" 9 | include_tasks: "{{ item }}" 10 | loop: >- 11 | {{ ['_kubeadm_hooks_'] | product(kubeadm_hook_list)|map('join')|map('extract', vars)|map('default', [])|list|flatten }} 12 | -------------------------------------------------------------------------------- /roles/hooks_plugins_register/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubeadm_plugins_dir: '{{ inventory_dir }}/kubeadm.plugins.d' 3 | -------------------------------------------------------------------------------- /roles/hooks_plugins_register/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Layout 3 | # {{ kubeadm_plugins_dir }}/ 4 | # plugin_name/ # plugins_names 5 | # hooks1/ # hooks_names 6 | # my_hook1.yml # hooks_filenames 7 | # hooks2/ 8 | # subdir/ 9 | # my_hook2.yml # will not be discovered, but could be included 10 | # .plugin_disabled/ # Will skip hidden files 11 | # hooks/ 12 | # file.yml 13 | # 14 | # {{kubeadm_plugins_dir}}/plugins_name/hooks_name/yaml_file.yml # hooks_files 15 | # 16 | # sorted_hooks 17 | # [0] => hooks_basename => for sorting, hightest weight 18 | # [1] => plugins_name => for sorting, lowest weight 19 | # [2] => hooks_name => for filtering/selecting in next task 20 | # [3] => hooks_absolute_path => the final result of the assotiation for next task 21 | #([] => hooks_file is transient only to compute other values) 22 | # 23 | # This means 24 | # - hooks with "lower" lexicographically filenames will be loaded first, whatever the plugin name 25 | # - if the filenames are the same, hooks belonging to "lower" lexicograhically plugin will be loaded first 26 | # 27 | 28 | - name: 'Sort hooks' 29 | set_fact: 30 | hooks_list: "{{ hooks_name|unique }}" 31 | sorted_hooks: "{{ sorted_hooks|sort }}" 32 | vars: 33 | hooks_file: >- 34 | {{ q('community.general.filetree', kubeadm_plugins_dir) 35 | |selectattr('state', 'eq', 'file') 36 | |selectattr('path', 'match', '[^./][^/]*/[^./][^/]*/[^./][^/]*.ya?ml') 37 | |map(attribute='path')|list 38 | }} 39 | # prepend directory to all relative hook path 40 | hooks_absolute_path: '{{ [kubeadm_plugins_dir]|product(hooks_file)|map("join", "/")|list }}' 41 | plugins_name: >- 42 | {{ hooks_file|map("regex_replace", "([^/]*)/.*", "\g<1>")|list }} 43 | hooks_name: >- 44 | {{ hooks_file|map("regex_replace", "[^/]*/([^/]*)/.*", "\g<1>")|list }} 45 | hooks_basename: "{{ hooks_file|map('basename')|list }}" 46 | sorted_hooks: "{{ hooks_basename|zip(plugins_name, hooks_name, hooks_absolute_path)|list }}" 47 | 48 | - name: 'Set hook names' 49 | set_fact: 50 | '_kubeadm_hooks_{{ item }}': '{{ sorted_hooks|selectattr(2, "eq", item)|map(attribute=3)|list }}' 51 | loop: '{{ hooks_list }}' 52 | -------------------------------------------------------------------------------- /roles/init_cp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubeadm_api_advertise_cidr: '' 3 | kubeadm_node_ip_cidr: '' 4 | -------------------------------------------------------------------------------- /roles/init_cp/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: common_vars 4 | galaxy_info: 5 | author: Julien Girardin 6 | description: Manage kubernetes upgrade via kubeadm 7 | company: Enix 8 | license: Apache 9 | min_ansible_version: 2.7 10 | platforms: 11 | - name: Ubuntu 12 | versions: 13 | - 18.04 14 | - 20.04 15 | galaxy_tags: 16 | - kubernetes 17 | - kubeadm 18 | -------------------------------------------------------------------------------- /roles/init_cp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Compute init-config" 3 | set_fact: 4 | _init_config: '{{ lookup("template", role_path ~ "/templates/init_config.j2") | from_yaml }}' 5 | 6 | - name: "Write kubeadm config" 7 | import_role: 8 | name: kubeadm_configs_update 9 | vars: 10 | init_config: '{{ _init_config }}' 11 | 12 | - name: 'Kubeadm init control plane' 13 | command: >- 14 | kubeadm init --config {{ kubeadm_config_yaml }} 15 | {% if enable_kubeadm_patches -%} 16 | {% if _target_kube_version is version("1.22", ">=") -%} 17 | {% elif _target_kube_version is version("1.19", ">=") -%} 18 | --experimental-patches {{ kubeadm_patch_dir }} 19 | {%- endif %} 20 | {% endif %} 21 | {% for error in kubeadm_ignore_preflight_errors %} 22 | --ignore-preflight-errors={{ error }} 23 | {% endfor %} 24 | -------------------------------------------------------------------------------- /roles/init_cp/templates/init_config.j2: -------------------------------------------------------------------------------- 1 | --- 2 | {% if _target_kube_version is version("1.22", ">=") -%} 3 | apiVersion: kubeadm.k8s.io/v1beta3 4 | {% else %} 5 | apiVersion: kubeadm.k8s.io/v1beta2 6 | {% endif %} 7 | kind: InitConfiguration 8 | {% if _kube_apiserver_advertise_address is defined %} 9 | localAPIEndpoint: 10 | advertiseAddress: "{{ _kube_apiserver_advertise_address.ipv4.address }}" 11 | {% endif %} 12 | {% if _kubelet_node_ip is defined %} 13 | nodeRegistration: 14 | kubeletExtraArgs: 15 | node-ip: {{ _kubelet_node_ip.ipv4.address }} 16 | {% endif %} 17 | {% if enable_kubeadm_patches and _target_kube_version is version("1.22", ">=") -%} 18 | patches: 19 | directory: {{ kubeadm_patch_dir }} 20 | {%- endif %} 21 | -------------------------------------------------------------------------------- /roles/join_nodes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _control_plane: false 3 | -------------------------------------------------------------------------------- /roles/join_nodes/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: kubectl_module 4 | - role: common_vars 5 | galaxy_info: 6 | author: Julien Girardin 7 | description: Join node to a kubernetes cluster 8 | company: Enix 9 | license: Apache 10 | min_ansible_version: 2.7 11 | platforms: 12 | - name: Ubuntu 13 | versions: 14 | - 18.04 15 | - 20.04 16 | galaxy_tags: 17 | - kubernetes 18 | - kubeadm 19 | -------------------------------------------------------------------------------- /roles/join_nodes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Retrieve variable from control-plane' 3 | set_fact: 4 | ca_info: '{{ hostvars[cp_node].ca_info }}' 5 | valid_bootstrap_tokens: '{{ hostvars[cp_node].valid_bootstrap_tokens }}' 6 | control_plane_endpoint: '{{ hostvars[cp_node].control_plane_endpoint }}' 7 | _control_plane: '{{ _control_plane }}' 8 | 9 | - name: "Find node related IP" 10 | include_role: 11 | name: find_ip 12 | 13 | - name: 'List all node' 14 | kubectl: 15 | state: get 16 | resource_type: nodes 17 | kubeconfig: /etc/kubernetes/admin.conf 18 | run_once: true 19 | delegate_to: '{{ cp_node }}' 20 | register: current_nodes 21 | when: 22 | - not(hostvars[cp_node].found_kubectl.rc == 1 and ansible_check_mode) 23 | 24 | - name: 'Display current node' 25 | debug: 26 | var: current_nodes 27 | verbosity: 1 28 | 29 | - name: 'Display JoinConfig' 30 | debug: 31 | msg: '{{ lookup("template", role_path ~ "/templates/join_config.j2").splitlines() }}' 32 | verbosity: 1 33 | 34 | - name: 'Join node that are not already joined' 35 | command: >- 36 | kubeadm join --config=/dev/stdin 37 | {% if _control_plane and enable_kubeadm_patches|bool -%} 38 | {% if _target_kube_version is version("1.22", ">=") -%} 39 | {% elif _target_kube_version is version("1.19", ">=") -%} 40 | --experimental-patches {{ kubeadm_patch_dir }} 41 | {%- endif %} 42 | {%- endif -%} 43 | args: 44 | stdin: '{{ lookup("template", role_path ~ "/templates/join_config.j2") }}' 45 | register: kubeadm_node_join 46 | when: ansible_nodename not in nodes_list 47 | vars: 48 | # "items" cannot be defaulted easily as jinja fallback on using method instead 49 | nodes_list: "{{ ({'items': []}|combine(current_nodes))['items']|map(attribute='metadata.name')|list }}" 50 | 51 | - name: 'Display output of "kubeadm join"' 52 | debug: 53 | var: kubeadm_node_join 54 | verbosity: 1 55 | -------------------------------------------------------------------------------- /roles/join_nodes/templates/join_config.j2: -------------------------------------------------------------------------------- 1 | {%- if valid_bootstrap_tokens|length == 0 -%} 2 | {#- Use a dummy token for dry-run if it didn't exist -#} 3 | {%- set token_data = {"token-id": "ZHJ5LXJ1biB0b2tlbi1pZAo=", "token-secret": "ZHJ5LXJ1biB0b2tlbi1zZWNyZXQK"} -%} 4 | {%- else -%} 5 | {%- set token_data = (valid_bootstrap_tokens|first).data -%} 6 | {%- endif -%} 7 | --- 8 | {% if _target_kube_version is version("1.22", ">=") -%} 9 | apiVersion: kubeadm.k8s.io/v1beta3 10 | {% else %} 11 | apiVersion: kubeadm.k8s.io/v1beta2 12 | {% endif %} 13 | kind: JoinConfiguration 14 | discovery: 15 | bootstrapToken: 16 | token: "{{ token_data['token-id']|b64decode }}.{{ token_data['token-secret']|b64decode }}" 17 | caCertHashes: 18 | - "sha256:{{ (ca_info.public_key_fingerprints.sha256|default('dry-run-sha256')).replace(':', '') }}" 19 | {% if control_plane_endpoint %} 20 | apiServerEndpoint: "{{ control_plane_endpoint }}" 21 | {% else %} 22 | apiServerEndpoint: "{{ hostvars[cp_node].ansible_default_ipv4.address }}:6443" 23 | {% endif %} 24 | {% if _kubelet_node_ip is defined %} 25 | nodeRegistration: 26 | kubeletExtraArgs: 27 | node-ip: "{{ _kubelet_node_ip.ipv4.address }}" 28 | {% endif %} 29 | {% if _control_plane %} 30 | controlPlane: 31 | certificateKey: "{{ cert_encryption_key }}" 32 | {% if _kube_apiserver_advertise_address is defined %} 33 | localAPIEndpoint: 34 | advertiseAddress: "{{ _kube_apiserver_advertise_address.ipv4.address }}" 35 | {% endif %} 36 | {% endif %} 37 | {% if _control_plane and enable_kubeadm_patches|bool and _target_kube_version is version("1.22", ">=") -%} 38 | patches: 39 | directory: {{ kubeadm_patch_dir }} 40 | {%- endif %} 41 | -------------------------------------------------------------------------------- /roles/kubeadm_configs_compute/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _config_upgrade_reasons: {} 3 | 4 | cluster_config: {} 5 | kubelet_config: {} 6 | kubeproxy_config: {} 7 | -------------------------------------------------------------------------------- /roles/kubeadm_configs_compute/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: find_running_cp 4 | - role: common_vars 5 | galaxy_info: 6 | author: Julien Girardin 7 | description: Fetch and update kubeadm configs in memory 8 | company: Enix 9 | license: Apache 10 | min_ansible_version: 2.7 11 | platforms: 12 | - name: Ubuntu 13 | versions: 14 | - 18.04 15 | - 20.04 16 | galaxy_tags: 17 | - kubernetes 18 | - kubeadm 19 | -------------------------------------------------------------------------------- /roles/kubeadm_configs_compute/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Kubeadm config compute' 3 | run_once: true 4 | delegate_to: '{{ cp_node }}' 5 | block: 6 | - name: 'get kubeadm configmap if cluster running' 7 | command: kubectl get cm -o jsonpath='{.data.ClusterConfiguration}' -n kube-system kubeadm-config 8 | changed_when: false 9 | check_mode: false 10 | register: _in_cluster_kubeadm_config 11 | when: 12 | - groups.cp_running|default([])|length > 0 13 | environment: 14 | KUBECONFIG: '{{ kubeconfig_admin }}' 15 | 16 | - name: 'Display current kubeadm-config raw output' 17 | debug: 18 | var: _in_cluster_kubeadm_config 19 | verbosity: 1 20 | 21 | - name: 'Find kubeadm config' 22 | set_fact: 23 | _cluster_config_orig: '{{ _cluster_config_orig }}' 24 | _current_cp_version: >- 25 | {{ _cluster_config_orig.get("kubernetesVersion", "") 26 | |regex_replace("v(.*)", "\g<1>") }} 27 | vars: 28 | _cluster_config_orig: >- 29 | {%- if _in_cluster_kubeadm_config is skipped -%} 30 | {} 31 | {%- else -%} 32 | {{ _in_cluster_kubeadm_config.stdout|from_yaml }} 33 | {%- endif -%} 34 | 35 | - name: 'Display current extracted ClusterConfig' 36 | debug: 37 | var: _cluster_config_orig 38 | verbosity: 1 39 | 40 | - name: 'Get unversioned kubelet configmap if cluster running' 41 | command: >- 42 | kubectl -n kube-system get cm 43 | --ignore-not-found=true 44 | -o jsonpath='{.data.kubelet}' 45 | kubelet-config 46 | changed_when: false 47 | check_mode: false 48 | register: _in_cluster_kubelet_config_unversioned 49 | when: 50 | - groups.cp_running|default([])|length > 0 51 | environment: 52 | KUBECONFIG: '{{ kubeconfig_admin }}' 53 | 54 | - name: 'Get versioned kubelet configmap if cluster running' 55 | command: >- 56 | kubectl -n kube-system get cm 57 | -o jsonpath='{.data.kubelet}' 58 | kubelet-config-{{ (_current_cp_version|regex_findall('(?:\d+)'))[0:2]|join('.') }} 59 | changed_when: false 60 | check_mode: false 61 | register: _in_cluster_kubelet_config_versioned 62 | when: 63 | - groups.cp_running|default([])|length > 0 64 | - _in_cluster_kubelet_config_unversioned.stdout|length == 0 65 | environment: 66 | KUBECONFIG: '{{ kubeconfig_admin }}' 67 | 68 | - name: 'Get kubelet configmap if cluster running' 69 | set_fact: 70 | _in_cluster_kubelet_config: >- 71 | {%- if _in_cluster_kubelet_config_unversioned.stdout|default("")|length > 0 -%} 72 | {{ _in_cluster_kubelet_config_unversioned }} 73 | {%- else -%} 74 | {{ _in_cluster_kubelet_config_versioned }} 75 | {%- endif -%} 76 | 77 | - name: 'get kubeproxy configmap if cluster running' 78 | command: >- 79 | kubectl -n kube-system get cm 80 | -o jsonpath='{.data.config\.conf}' 81 | kube-proxy 82 | changed_when: false 83 | check_mode: false 84 | register: _in_cluster_kubeproxy_config 85 | when: 86 | - groups.cp_running|default([])|length > 0 87 | environment: 88 | KUBECONFIG: '{{ kubeconfig_admin }}' 89 | 90 | - name: 'Set default kubeadm-config if empty set' 91 | set_fact: 92 | control_plane_endpoint: >- 93 | {% if control_plane_endpoint is defined -%} 94 | {{ control_plane_endpoint }} 95 | {%- elif _cluster_config_orig.controlPlaneEndpoint is defined -%} 96 | {{ _cluster_config_orig.controlPlaneEndpoint }} 97 | {%- endif -%} 98 | _kubelet_config_orig: >- 99 | {%- if _in_cluster_kubelet_config is skipped -%} 100 | {} 101 | {%- else -%} 102 | {{ _in_cluster_kubelet_config.stdout|from_yaml }} 103 | {%- endif -%} 104 | _kubeproxy_config_orig: >- 105 | {%- if _in_cluster_kubeproxy_config is skipped -%} 106 | {} 107 | {%- else -%} 108 | {{ _in_cluster_kubeproxy_config.stdout|from_yaml }} 109 | {%- endif -%} 110 | 111 | - name: 'Display wanted version of kuberbenetes' 112 | debug: 113 | var: _target_kube_version 114 | 115 | - name: 'Display computed "control_plane_endpoint' 116 | debug: 117 | var: control_plane_endpoint 118 | verbosity: 1 119 | 120 | - name: 'Display current Kubelet config' 121 | debug: 122 | var: _kubelet_config_orig 123 | verbosity: 1 124 | 125 | - name: 'Display current KubeProxy config' 126 | debug: 127 | var: _kubeproxy_config_orig 128 | verbosity: 1 129 | 130 | - name: 'merge requested kubeadm config with existing one' 131 | set_fact: 132 | _cluster_config_final: >- 133 | {%- if control_plane_endpoint|string|lower == 'false' -%} 134 | {%- set _ = cluster_config_orig.pop('controlPlaneEndpoint', false) -%} 135 | {%- elif control_plane_endpoint -%} 136 | {%- set cluster_config = cluster_config|combine({'controlPlaneEndpoint': control_plane_endpoint}) -%} 137 | {%- endif -%} 138 | {{ _cluster_config_orig|default(default_kubeadm_config, true)|combine(cluster_config, recursive=true) }} 139 | _kubelet_config_final: >- 140 | {{ _kubelet_config_orig|default(default_kubelet_config, true)|combine(kubelet_config, recursive=true) }} 141 | _kubeproxy_config_final: >- 142 | {{ _kubeproxy_config_orig|default(default_kubeproxy_config, true)|combine(kubeproxy_config, recursive=true) }} 143 | _target_kube_version: '{{ _target_kube_version }}' 144 | vars: 145 | _target_kube_version: >- 146 | {%- if _current_cp_version -%} 147 | {%- if _current_cp_version|regex_search(kube_version ~ '.*') or not kube_version -%} 148 | {{ _current_cp_version }} 149 | {%- else -%} 150 | {{ kube_version|default(default_kube_version, true) }} 151 | {%- endif -%} 152 | {%- else -%} 153 | {{ kube_version|default(default_kube_version, true) }} 154 | {%- endif -%} 155 | 156 | - name: 'Make diff cluster_config' 157 | ansible.utils.fact_diff: 158 | before: '{{ _cluster_config_orig }}' 159 | after: '{{ _cluster_config_final }}' 160 | register: _cluster_config_diff 161 | 162 | - name: 'Make diff on kubelet_config' 163 | ansible.utils.fact_diff: 164 | before: '{{ _kubelet_config_orig }}' 165 | after: '{{ _kubelet_config_final }}' 166 | register: _kubelet_config_diff 167 | 168 | - name: 'Make diff on kubeproxy_config' 169 | ansible.utils.fact_diff: 170 | before: '{{ _kubeproxy_config_orig }}' 171 | after: '{{ _kubeproxy_config_final }}' 172 | register: _kubeproxy_config_diff 173 | 174 | - name: "Register diffs as upgrade reasons" 175 | set_fact: 176 | _config_upgrade_reasons: >- 177 | {%- if _cluster_config_diff.diff_lines|length > 0 -%} 178 | {%- set _config_upgrade_reasons = _config_upgrade_reasons|combine( 179 | dict(cluster_config_diff=_cluster_config_diff.diff_lines)) -%} 180 | {%- endif -%} 181 | {%- if _kubelet_config_diff.diff_lines|length > 0 -%} 182 | {%- set _config_upgrade_reasons = _config_upgrade_reasons|combine( 183 | dict(kubelet_config_diff=_kubelet_config_diff.diff_lines)) -%} 184 | {%- endif -%} 185 | {%- if _kubeproxy_config_diff.diff_lines|length > 0 -%} 186 | {%- set _config_upgrade_reasons = _config_upgrade_reasons|combine( 187 | dict(kubeproxy_config_diff=_kubeproxy_config_diff.diff_lines)) -%} 188 | {%- endif -%} 189 | {{ _config_upgrade_reasons }} 190 | -------------------------------------------------------------------------------- /roles/kubeadm_configs_compute/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | default_kubeadm_config: 3 | apiVersion: >- 4 | {%- if _target_kube_version is version("1.22", ">=") -%} 5 | kubeadm.k8s.io/v1beta3 6 | {%- else -%} 7 | kubeadm.k8s.io/v1beta2 8 | {%- endif -%} 9 | kind: ClusterConfiguration 10 | 11 | default_kubelet_config: 12 | apiVersion: kubelet.config.k8s.io/v1beta1 13 | kind: KubeletConfiguration 14 | 15 | default_kubeproxy_config: 16 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 17 | kind: KubeProxyConfiguration 18 | -------------------------------------------------------------------------------- /roles/kubeadm_configs_update/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cluster_config: {} 3 | 4 | kubeadm_config_yaml: '/tmp/kubeadm-config-{{ansible_date_time.iso8601 }}.yaml' 5 | 6 | init_config: {} 7 | -------------------------------------------------------------------------------- /roles/kubeadm_configs_update/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: common_vars 4 | galaxy_info: 5 | author: Julien Girardin 6 | description: Persist updates kubeadm configs in cluster configMaps 7 | company: Enix 8 | license: Apache 9 | min_ansible_version: 2.7 10 | platforms: 11 | - name: Ubuntu 12 | versions: 13 | - 18.04 14 | - 20.04 15 | galaxy_tags: 16 | - kubernetes 17 | - kubeadm 18 | -------------------------------------------------------------------------------- /roles/kubeadm_configs_update/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Update kubeadm config' 3 | run_once: true 4 | delegate_to: '{{ cp_node }}' 5 | block: 6 | - name: 'set some facts' 7 | set_fact: 8 | kubeadm_config_yaml: '{{ kubeadm_config_yaml }}' 9 | 10 | - name: 'write kubeadm config if modification needed' 11 | template: 12 | src: 'kubeadm_config.yaml.j2' 13 | dest: '{{ kubeadm_config_yaml }}' 14 | owner: root 15 | group: root 16 | mode: 0600 17 | register: _kubeadm_configs_write 18 | when: >- 19 | _cluster_config_diff.diff_lines|length > 0 20 | or _kubelet_config_diff.diff_lines|length > 0 21 | or _kubeproxy_config_diff.diff_lines|length > 0 22 | or groups.cp_running|default([])|length == 0 23 | 24 | - name: 'reupload KUBEADM/KUBELET config if cluster running' 25 | command: kubeadm init phase upload-config all --config {{ kubeadm_config_yaml }} 26 | environment: 27 | KUBECONFIG: '/etc/kubernetes/admin.conf' 28 | when: 29 | - _kubeadm_configs_write is changed 30 | - _cluster_config_diff.diff_lines|length > 0 31 | or _kubelet_config_diff.diff_lines|length > 0 32 | - groups.cp_running|default([])|length > 0 33 | 34 | - name: 'reupload KUBEPROXY config if cluster running' 35 | command: kubeadm init phase addon kube-proxy --config {{ kubeadm_config_yaml }} 36 | environment: 37 | KUBECONFIG: '/etc/kubernetes/admin.conf' 38 | when: 39 | - _kubeadm_configs_write is changed 40 | - _kubeproxy_config_diff.diff_lines|length > 0 41 | - groups.cp_running|default([])|length > 0 42 | -------------------------------------------------------------------------------- /roles/kubeadm_configs_update/templates/kubeadm_config.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | {{ _cluster_config_final|to_nice_yaml(indent=2) }} 3 | {% if _kubelet_config_diff.diff_lines|length > 0 4 | or groups.cp_running|default([])|length == 0 %} 5 | --- 6 | {{ _kubelet_config_final|to_nice_yaml(indent=2) }} 7 | {% endif %} 8 | {% if _kubeproxy_config_diff.diff_lines|length > 0 9 | or groups.cp_running|default([])|length == 0 %} 10 | --- 11 | {{ _kubeproxy_config_final|to_nice_yaml(indent=2) }} 12 | {% endif %} 13 | {% if init_config != {} %} 14 | --- 15 | {{ init_config|to_nice_yaml(indent=2) }} 16 | {% endif %} 17 | -------------------------------------------------------------------------------- /roles/kubectl_module/library/kubectl.py: -------------------------------------------------------------------------------- 1 | ../../../plugins/modules/kubectl.py -------------------------------------------------------------------------------- /roles/kubectl_module/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | galaxy_info: 4 | author: Julien Girardin 5 | description: Load module for interacting with kubectl 6 | company: Enix 7 | license: Apache 8 | min_ansible_version: 2.7 9 | platforms: 10 | - name: Ubuntu 11 | versions: 12 | - 18.04 13 | - 20.04 14 | galaxy_tags: 15 | - kubernetes 16 | - kubeadm 17 | - kubectl 18 | -------------------------------------------------------------------------------- /roles/kubectl_module/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/packages/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _control_plane: false 3 | 4 | kubernetes_repository: '{{ kubernetes_upstream_repository }}' 5 | kubeadm_pkgs: true 6 | node_pkgs: true 7 | 8 | enforce_version: true 9 | 10 | force_apt_update: false 11 | pause_before_kube_pkg_install: false 12 | repo_refresh_time: 86400 # 1 day 13 | -------------------------------------------------------------------------------- /roles/packages/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: packages_common 4 | galaxy_info: 5 | author: Julien Girardin 6 | description: Install kubernetes related packages 7 | company: Enix 8 | license: Apache 9 | min_ansible_version: 2.7 10 | platforms: 11 | - name: Ubuntu 12 | versions: 13 | - 18.04 14 | - 20.04 15 | galaxy_tags: 16 | - kubernetes 17 | - kubeadm 18 | - kubelet 19 | - kubectl 20 | -------------------------------------------------------------------------------- /roles/packages/tasks/find_version_Debian.yml: -------------------------------------------------------------------------------- 1 | - name: 'find version of kubeadm to install' 2 | pkg_version_match: 3 | name: kubeadm 4 | version: '{{ _target_kubeadm_version }}' 5 | when: 6 | - _target_kubeadm_version is defined 7 | - not(_kube_repo_just_added is changed and ansible_check_mode) 8 | register: kubeadm_version 9 | 10 | - name: 'Display of complete version of kubeadm' 11 | debug: 12 | var: kubeadm_version 13 | verbosity: 1 14 | 15 | - name: 'Set the same version for all package of Debian' 16 | set_fact: 17 | kubeadm_version: '{{ kubeadm_dry_run_version }}' 18 | kubelet_version: '{{ kubeadm_dry_run_version }}' 19 | kubectl_version: '{{ kubeadm_dry_run_version }}' 20 | vars: 21 | kubeadm_dry_run_version: '{{ {"version": "unknown-version"}|combine(kubeadm_version) }}' 22 | -------------------------------------------------------------------------------- /roles/packages/tasks/kube_repo_Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Create directory to store keys' 3 | file: 4 | dest: /etc/apt/keyrings 5 | state: directory 6 | register: _apt_keyring_directory 7 | 8 | - name: 'Add kubernetes repo signing key' 9 | ansible.builtin.get_url: 10 | url: '{{ kubernetes_repository.gpg_url }}' 11 | dest: '/etc/apt/keyrings/kubernetes-apt-keyring.asc' 12 | owner: root 13 | group: root 14 | mode: 0644 15 | force: true 16 | when: not(_apt_keyring_directory is changed and ansible_check_mode) 17 | 18 | - name: 'Add the kubernetes repository' 19 | apt_repository: 20 | repo: '{{ kubernetes_repository.repo_url }}' 21 | state: present 22 | update_cache: false 23 | register: _kube_repo_just_added 24 | 25 | - name: 'Cleanup old kubernetes repository' 26 | apt_repository: 27 | repo: '{{ kubernetes_old_repository.repo_url }}' 28 | state: absent 29 | update_cache: false 30 | when: kubernetes_repository.repo_url != kubernetes_old_repository.repo_url 31 | 32 | - name: 'refresh source list' 33 | apt: 34 | update_cache: true 35 | cache_valid_time: >- 36 | {{ omit 37 | if force_update or repo_refresh_time|int < 0 38 | else 39 | repo_refresh_time|int }} 40 | when: >- 41 | repo_refresh_time|int >= 0 or force_update 42 | vars: 43 | force_update: '{{ force_apt_update|bool or _kube_repo_just_added is changed }}' 44 | -------------------------------------------------------------------------------- /roles/packages/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Assert required values are set' 3 | assert: 4 | that: 5 | # enforce_version implies that _kube_version is defined 6 | - enforce_version|bool <= (_target_kubeadm_version is defined) 7 | 8 | - name: 'Include OS specific variables' 9 | include_vars: '{{ item }}' 10 | with_fileglob: 11 | - 'vars/os_{{ ansible_distribution }}_{{ ansible_distribution_release }}.yml' 12 | - 'vars/os_{{ ansible_distribution }}.yml' 13 | - 'vars/os_{{ ansible_os_family }}.yml' 14 | 15 | - name: 'Install repository dependencies' 16 | package: 17 | name: '{{ repository_dependencies }}' 18 | state: present 19 | 20 | - name: 'Add kubernetes repository' 21 | include_tasks: '{{ item }}' 22 | with_first_found: 23 | - 'kube_repo_{{ ansible_distribution }}_{{ ansible_distribution_release }}.yml' 24 | - 'kube_repo_{{ ansible_distribution }}.yml' 25 | - 'kube_repo_{{ ansible_os_family }}.yml' 26 | 27 | - name: 'Find package version' 28 | include_tasks: '{{ item }}' 29 | with_first_found: 30 | - 'find_version_{{ ansible_distribution }}_{{ ansible_distribution_release }}.yml' 31 | - 'find_version_{{ ansible_distribution }}.yml' 32 | - 'find_version_{{ ansible_os_family }}.yml' 33 | 34 | - name: 'Compute list of packages' 35 | set_fact: 36 | kube_package_dict: >- 37 | { {%- for pkg in pkg_list -%} 38 | '{{ pkg }}': 39 | '{{ enforce_version|ternary( 40 | packages_force_version[pkg]|default(vars[pkg ~ '_version'].version), 41 | '' 42 | ) }}', 43 | {%- endfor -%} } 44 | vars: 45 | pkg_list: >- 46 | {%- set pkg_list = [] -%} 47 | {%- if kubeadm_pkgs|bool -%} 48 | {%- set _ = pkg_list.extend(kubeadm_packages) -%} 49 | {%- endif -%} 50 | {%- if node_pkgs|bool -%} 51 | {%- set _ = pkg_list.extend(node_packages) -%} 52 | {%- endif -%} 53 | {%- if _control_plane|bool -%} 54 | {%- set _ = pkg_list.extend(control_plane_packages) -%} 55 | {%- endif -%} 56 | {{ pkg_list|unique }} 57 | 58 | - name: 'Display package to install' 59 | debug: 60 | var: kube_package_dict 61 | 62 | - name: 'Pause before installing package' 63 | pause: 64 | when: pause_before_kube_pkg_install|bool 65 | 66 | - name: 'Install kubernetes packages' 67 | include_tasks: '{{ file_tasks }}' 68 | loop_control: 69 | loop_var: file_tasks 70 | with_first_found: 71 | - 'pkg_{{ ansible_distribution }}_{{ ansible_distribution_release }}.yml' 72 | - 'pkg_{{ ansible_distribution }}.yml' 73 | - 'pkg_{{ ansible_os_family }}.yml' 74 | -------------------------------------------------------------------------------- /roles/packages/tasks/pkg_Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Create pinning files" 3 | copy: 4 | dest: /etc/apt/preferences.d/50-{{ item.pkg }} 5 | content: | 6 | package: {{ item.pkg }} 7 | Pin: version {{ item.version }} 8 | Pin-Priority: 1001 9 | loop: "{{ kube_package_dict|dict2items('pkg', 'version')|selectattr('version') }}" 10 | 11 | - name: "Remove pinning files" 12 | file: 13 | dest: /etc/apt/preferences.d/{{ item.pkg }} 14 | state: absent 15 | loop: "{{ kube_package_dict|dict2items('pkg', 'version')|rejectattr('version') }}" 16 | 17 | - name: 'Unhold package before upgrade' 18 | dpkg_selections: 19 | selection: 'install' 20 | name: '{{ item }}' 21 | loop: "{{ kube_package_dict.keys() }}" 22 | ignore_errors: "{{ _kube_repo_just_added is changed or ansible_check_mode }}" 23 | 24 | - name: 'Install kubernetes packages' 25 | apt: 26 | name: "{{ kube_package_dict.keys() }}" 27 | state: "latest" 28 | when: not(_kube_repo_just_added is changed and ansible_check_mode) 29 | register: _kube_pkg_installed 30 | -------------------------------------------------------------------------------- /roles/packages/vars/os_Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _repo_version: '{{ (_target_kubeadm_version|regex_findall("(?:\d+)"))[0:2]|join(".") }}' 3 | kubernetes_upstream_repository: >- 4 | {%- if _repo_version is version("1.24", ">=") -%} 5 | {{ kubernetes_new_repository }} 6 | {%- else -%} 7 | {{ kubernetes_old_repository }} 8 | {%- endif -%} 9 | 10 | kubernetes_new_repository: 11 | gpg_url: 'https://pkgs.k8s.io/core:/stable:/v{{ _repo_version }}/deb/Release.key' 12 | repo_url: 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.asc] https://pkgs.k8s.io/core:/stable:/v{{ _repo_version }}/deb/ /' 13 | 14 | kubernetes_old_repository: 15 | gpg_url: 'https://packages.cloud.google.com/apt/doc/apt-key.gpg' 16 | repo_url: 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.asc] https://apt.kubernetes.io/ kubernetes-xenial main' 17 | 18 | repository_dependencies: 19 | - apt-transport-https 20 | - ca-certificates 21 | - gnupg 22 | 23 | kubeadm_packages: 24 | - kubeadm 25 | - kubectl # As a dependency but without version specification 26 | - kubernetes-cni 27 | 28 | node_packages: 29 | - kubelet 30 | - kubernetes-cni 31 | 32 | control_plane_packages: 33 | - kubectl 34 | 35 | packages_force_version: 36 | kubernetes-cni: 37 | 38 | python2_openssl: python-openssl 39 | python3_openssl: python3-openssl 40 | -------------------------------------------------------------------------------- /roles/packages_common/library/pkg_version_match.py: -------------------------------------------------------------------------------- 1 | ../../../plugins/modules/pkg_version_match.py -------------------------------------------------------------------------------- /roles/preflight_check_cp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _config_upgrade_reasons: {} 3 | _failure_reasons: {} 4 | _upgrade_reasons: {} 5 | cp_health_check_bypass: false 6 | 7 | kube_version: 8 | default_kube_version: '1.19' 9 | -------------------------------------------------------------------------------- /roles/preflight_check_cp/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: kubectl_module 4 | galaxy_info: 5 | author: Julien Girardin 6 | description: Check and vars initialization for kubeadm 7 | company: Enix 8 | license: Apache 9 | min_ansible_version: 2.7 10 | platforms: 11 | - name: Ubuntu 12 | versions: 13 | - 18.04 14 | - 20.04 15 | galaxy_tags: 16 | - kubernetes 17 | - kubeadm 18 | -------------------------------------------------------------------------------- /roles/preflight_check_cp/tasks/check_control_plane_endpoint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Check if control_plane_endpoint is set for multi-nodes control plane cluster' 3 | set_fact: 4 | _failure_reasons: >- 5 | {{ _failure_reasons|combine(dict( 6 | cp_endpoint_multi_nodes = "control_plane_endpoint is not set and you provided multi-nodes control plane. 7 | You need a reliable way to contact all the apiserver with a single endpoint." 8 | )) }} 9 | when: not control_plane_endpoint and ansible_play_hosts | length > 1 10 | -------------------------------------------------------------------------------- /roles/preflight_check_cp/tasks/check_control_plane_health.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'get kubeadm configmap if cluster running' 3 | command: kubectl get nodes -o yaml 4 | changed_when: false 5 | check_mode: false 6 | register: _all_nodes_yaml 7 | environment: 8 | KUBECONFIG: '{{ kubeconfig_admin }}' 9 | 10 | - name: 'Check control-plane health' 11 | set_fact: 12 | _failure_reasons: >- 13 | {%- set cp_unhealthy = [] -%} 14 | {%- for node in (_all_nodes_yaml.stdout|from_yaml)['items'] 15 | if 'node-role.kubernetes.io/control-plane' in node.metadata.labels -%} 16 | {%- if node.status.conditions|selectattr("type", "eq", "Ready") 17 | |rejectattr("status", "eq", "True")|list|length > 0 -%} 18 | 19 | {%- set _ = cp_unhealthy.append(node.metadata.name) -%} 20 | {%- endif -%} 21 | {%- endfor -%} 22 | {%- if cp_unhealthy|length > 0 -%} 23 | {%- set _ = _failure_reasons.update(dict( 24 | control_plane_unhealthy=cp_unhealthy 25 | )) -%} 26 | {%- endif -%} 27 | {{ _failure_reasons }} 28 | when: not cp_health_check_bypass|bool 29 | -------------------------------------------------------------------------------- /roles/preflight_check_cp/tasks/check_version.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Check that version upgrade is one step' 3 | set_fact: 4 | _failure_reasons: >- 5 | {{ _failure_reasons|combine(dict( 6 | version_skew_error = "Version upgrade is not allowed: " ~ _current_cp_version ~ " => " ~ _target_kube_version 7 | )) }} 8 | when: 9 | - _current_cp_version|length > 0 10 | - (_current_major != _target_major) or (_current_minor|int - _target_minor|int)|abs > 1 11 | vars: 12 | _current_version_split: '{{ (_current_cp_version|regex_findall("(?:\d+)"))[0:2] }}' 13 | _current_major: '{{ _current_version_split|first }}' 14 | _current_minor: '{{ _current_version_split|last }}' 15 | _target_version_split: '{{ (_target_kube_version|regex_findall("(?:\d+)"))[0:2] }}' 16 | _target_major: '{{ _target_version_split|first }}' 17 | _target_minor: '{{ _target_version_split|last }}' 18 | 19 | - name: 'Trigger upgrade if targeted version is different from current version' 20 | set_fact: 21 | _config_upgrade_reasons: >- 22 | {{ _config_upgrade_reasons|combine(dict( 23 | upgrade_version = _current_cp_version ~ " != " ~ _target_kube_version 24 | )) }} 25 | when: 26 | - _current_cp_version|length > 0 27 | - not(_current_cp_version|regex_search(_target_kube_version ~ '.*')) 28 | -------------------------------------------------------------------------------- /roles/preflight_check_cp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Get version of kubeadm package' 3 | package_facts: 4 | 5 | - name: 'Display information about all packages' 6 | debug: 7 | var: packages 8 | verbosity: 1 9 | 10 | # Look at the current version of kubernetes (ex: 1.17.8) 11 | # Then look at possible version of kubeadm that match this version (ex: 1.17.8-01) 12 | # Retain the kubeadm version if match found, fallback on kubernetes otherwise 13 | # Last fallback is the default version, if no cluster running 14 | - name: 'Export variable for other hosts' 15 | set_fact: 16 | _target_kubeadm_version: '{{ _kubeadm_version }}' 17 | vars: 18 | _kubeadm_version: >- 19 | {%- set kubeadm_match = ansible_play_hosts 20 | |map('extract', hostvars, ['packages', 'kubeadm', 0, 'version']) 21 | |map('default')|select('match', _target_kube_version ~ '.*')|list -%} 22 | {%- if kubeadm_match|length > 0 -%} 23 | {{ kubeadm_match|first }} 24 | {%- else -%} 25 | {{ _target_kube_version }} 26 | {%- endif -%} 27 | 28 | - name: 'Display wanted version of kubeadm' 29 | debug: 30 | var: _target_kubeadm_version 31 | 32 | - import_tasks: check_version.yml 33 | 34 | - import_tasks: check_control_plane_endpoint.yml 35 | 36 | - import_tasks: check_control_plane_health.yml 37 | run_once: true 38 | delegate_to: '{{ groups.cp_running|first }}' 39 | when: 40 | - groups.cp_running|default([])|length > 0 41 | - not cp_health_check_bypass|bool 42 | -------------------------------------------------------------------------------- /roles/preflight_check_nodes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _upgrade_reasons: {} 3 | _failure_reasons: {} 4 | 5 | kubelet_config_path: /var/lib/kubelet/config.yaml 6 | -------------------------------------------------------------------------------- /roles/preflight_check_nodes/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: common_vars 4 | galaxy_info: 5 | author: Julien Girardin 6 | description: Check and init vars for kubeadm on nodes 7 | company: Enix 8 | license: Apache 9 | min_ansible_version: 2.7 10 | platforms: 11 | - name: Ubuntu 12 | versions: 13 | - 18.04 14 | - 20.04 15 | galaxy_tags: 16 | - kubernetes 17 | - kubeadm 18 | -------------------------------------------------------------------------------- /roles/preflight_check_nodes/tasks/check_kubelet_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Test if local kubelet config exists' 3 | stat: 4 | path: '{{ kubelet_config_path }}' 5 | register: _kubelet_config_stat 6 | 7 | - name: 'Fetch local kubelet config if exist' 8 | slurp: 9 | path: '{{ kubelet_config_path }}' 10 | register: _kubelet_config_fetch 11 | when: _kubelet_config_stat.stat.exists 12 | 13 | - name: 'Display kubelet config diff' 14 | ansible.utils.fact_diff: 15 | # "e30K" means "{}" in base64 16 | before: '{{ (_kubelet_config_fetch.content|default("e30K")|b64decode)|from_yaml }}' 17 | after: '{{ _kubelet_config_final }}' 18 | register: _kubelet_configfile_diff 19 | when: _kubelet_config_fetch.content is defined 20 | 21 | - name: 'Select node for upgrade if diff is found' 22 | set_fact: 23 | _upgrade_reasons: >- 24 | {{ _upgrade_reasons|combine(dict( 25 | kubelet_configfile_diff = _kubelet_configfile_diff.diff_lines 26 | )) }} 27 | when: _kubelet_configfile_diff.diff_lines|default([])|length > 0 28 | -------------------------------------------------------------------------------- /roles/preflight_check_nodes/tasks/check_kubelet_version.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Get version of kubelet package' 3 | package_facts: 4 | 5 | - name: 'Display wanted version of kubeadm' 6 | debug: 7 | var: _target_kubeadm_version 8 | 9 | - name: 'Select node for upgrade if kubelet version mismatch' 10 | set_fact: 11 | _upgrade_reasons: >- 12 | {{ _upgrade_reasons|combine(dict( 13 | kubelet_version_mismatch = "An upgrade is required for kubelet" 14 | )) }} 15 | when: packages.kubelet|default([])|rejectattr('version', 'eq', _target_kubeadm_version)|list|length > 0 16 | -------------------------------------------------------------------------------- /roles/preflight_check_nodes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Check deprecated variable' 3 | fail: 4 | msg: 'do not use "pause_after_drain" variable, use "pause_after_drain_and_node_upgrade"' 5 | when: pause_after_drain is defined 6 | 7 | - name: 'Retrieve variable from control-plane' 8 | set_fact: 9 | _target_kube_version: '{{ hostvars[cp_node]._target_kube_version }}' 10 | _target_kubeadm_version: '{{ hostvars[cp_node]._target_kubeadm_version }}' 11 | _kubelet_config_final: '{{ hostvars[cp_node]._kubelet_config_final }}' 12 | 13 | - import_tasks: check_kubelet_version.yml 14 | 15 | - import_tasks: check_kubelet_config.yml 16 | -------------------------------------------------------------------------------- /roles/process_reasons/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | _config_upgrade_reasons: {} 3 | _failure_reasons: {} 4 | _upgrade_reasons: {} 5 | 6 | action_reasons_review_skip: false 7 | -------------------------------------------------------------------------------- /roles/process_reasons/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: common_vars 4 | -------------------------------------------------------------------------------- /roles/process_reasons/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - debug: 3 | var: _failure_reasons 4 | changed_when: _failure_reasons|length > 0 5 | 6 | - name: 'Fail if failure reasons found' 7 | fail: 8 | msg: 'Look at previous debug for reasons.' 9 | when: _failure_reasons|length > 0 10 | 11 | - name: 'Display config upgrade reasons' 12 | debug: 13 | var: _config_upgrade_reasons 14 | changed_when: _config_upgrade_reasons|length > 0 15 | 16 | - name: 'Display upgrade reasons' 17 | debug: 18 | var: _upgrade_reasons 19 | changed_when: _upgrade_reasons|length > 0 20 | 21 | - name: 'Pause to review diff and upgrade reasons' 22 | pause: 23 | when: 24 | - ansible_play_hosts_all|map('extract', hostvars, '_upgrade_reasons')|map('default', [])|map('length')|select|list|length > 0 25 | or ansible_play_hosts_all|map('extract', hostvars, '_config_upgrade_reasons')|map('default', [])|map('length')|select|list|length > 0 26 | - groups.cp_running|default([])|length > 0 27 | - not action_reasons_review_skip|bool 28 | 29 | - name: 'Add control_plane to cp_init, if no cluster found' 30 | add_host: 31 | name: '{{ groups[kube_cp_group]|first }}' 32 | group: cp_init 33 | changed_when: false 34 | check_mode: false 35 | run_once: true 36 | when: 37 | - groups.cp_running|default([])|length == 0 38 | 39 | - name: 'Add control_plane to cp_upgrade, if upgrade needed' 40 | add_host: 41 | name: '{{ (groups.cp_running|default([0], true))|first }}' 42 | group: cp_upgrade 43 | changed_when: false 44 | check_mode: false 45 | when: 46 | - "'upgrade_version' in _config_upgrade_reasons" 47 | 48 | - name: 'Select node for upgrade if upgrade reasons found' 49 | add_host: 50 | groups: nodes_upgrade 51 | name: '{{ item }}' 52 | run_once: true 53 | check_mode: false 54 | changed_when: false 55 | loop: >- 56 | {{ ansible_play_hosts|zip( 57 | ansible_play_hosts|map('extract', hostvars, '_upgrade_reasons')|map('default', {})) 58 | |selectattr(1)|map(attribute=0) }} 59 | -------------------------------------------------------------------------------- /roles/upgrade_cp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | drain: true 3 | kubeadm_upgrade_apply_force: false 4 | -------------------------------------------------------------------------------- /roles/upgrade_cp/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: drain_nodes 4 | galaxy_info: 5 | author: Julien Girardin 6 | description: Manage kubernetes control-plane upgrade via kubeadm 7 | company: Enix 8 | license: Apache 9 | min_ansible_version: 2.7 10 | platforms: 11 | - name: Ubuntu 12 | versions: 13 | - 18.04 14 | - 20.04 15 | galaxy_tags: 16 | - kubernetes 17 | - kubeadm 18 | -------------------------------------------------------------------------------- /roles/upgrade_cp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Plan upgrade' 3 | command: >- 4 | kubeadm upgrade plan {{ kubeadm_version.version.split( '-' )|first }} 5 | {% for error in kubeadm_ignore_preflight_errors %} 6 | --ignore-preflight-errors={{ error }} 7 | {% endfor %} 8 | environment: 9 | KUBECONFIG: '/etc/kubernetes/admin.conf' 10 | register: plan_upgrade 11 | 12 | - name: 'Display result of "kubectl upgrade plan"' 13 | debug: 14 | msg: >- 15 | {{ dict(stderr=plan_upgrade.stderr_lines, 16 | stdout=plan_upgrade.stdout_lines) }} 17 | 18 | - name: 'Apply upgrade' 19 | command: >- 20 | kubeadm upgrade apply {{ kubeadm_version.version.split( '-' )|first }} -y 21 | {% for error in kubeadm_ignore_preflight_errors %} 22 | --ignore-preflight-errors={{ error }} 23 | {% endfor %} 24 | {% if kubeadm_upgrade_apply_force|bool %} 25 | --force 26 | {% endif %} 27 | register: apply_upgrade 28 | 29 | - name: 'Display result of "kubectl upgrade apply"' 30 | debug: 31 | msg: >- 32 | {{ dict(stderr=apply_upgrade.stderr_lines, 33 | stdout=apply_upgrade.stdout_lines) }} 34 | -------------------------------------------------------------------------------- /roles/upgrade_nodes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | pause_before_upgrade: false 3 | -------------------------------------------------------------------------------- /roles/upgrade_nodes/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: common_vars 4 | - role: drain_nodes 5 | galaxy_info: 6 | author: Julien Girardin 7 | description: Manage kubernetes upgrade via kubeadm 8 | company: Enix 9 | license: Apache 10 | min_ansible_version: 2.7 11 | platforms: 12 | - name: Ubuntu 13 | versions: 14 | - 18.04 15 | - 20.04 16 | galaxy_tags: 17 | - kubernetes 18 | - kubeadm 19 | -------------------------------------------------------------------------------- /roles/upgrade_nodes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Pause before upgrade' 3 | pause: 4 | when: pause_before_upgrade|bool 5 | 6 | - name: 'Apply upgrade' 7 | command: >- 8 | kubeadm upgrade node 9 | {% if _control_plane and enable_kubeadm_patches|bool -%} 10 | {% if _target_kube_version is version("1.23", "<") -%} 11 | {% if _target_kube_version is version("1.22", ">=") -%} 12 | --patches 13 | {%- else -%} 14 | --experimental-patches 15 | {%- endif %} 16 | {{ kubeadm_patch_dir }} 17 | {%- endif %} 18 | {%- endif -%} 19 | {% for error in kubeadm_ignore_preflight_errors %} 20 | --ignore-preflight-errors={{ error }} 21 | {% endfor %} 22 | register: upgrade_node 23 | notify: 24 | - Restart kubelet 25 | 26 | - name: 'Display result of "kubeadm upgrade node"' 27 | debug: 28 | msg: >- 29 | {{ dict(stderr=upgrade_node.stderr_lines, 30 | stdout=upgrade_node.stdout_lines) }} 31 | -------------------------------------------------------------------------------- /roles/user_kubeconfig/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | user_has_kubeconfig: true 3 | -------------------------------------------------------------------------------- /roles/user_kubeconfig/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | galaxy_info: 4 | author: Julien Girardin 5 | description: Copy admin kubeconfig for user 6 | company: Enix 7 | license: Apache 8 | min_ansible_version: 2.7 9 | platforms: 10 | - name: Ubuntu 11 | versions: 12 | - 19.04 13 | - 20.04 14 | galaxy_tags: 15 | - kubernetes 16 | - kubeadm 17 | - kubectl 18 | -------------------------------------------------------------------------------- /roles/user_kubeconfig/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'create .kube directory' 3 | file: # noqa 208 4 | dest: '~/.kube' 5 | state: directory 6 | register: kubeconfig_dir 7 | 8 | - name: 'copy admin kubeconfig for user' 9 | copy: # noqa 208 10 | remote_src: true 11 | mode: preserve 12 | src: "/etc/kubernetes/admin.conf" 13 | dest: "~/.kube/config" 14 | when: 15 | - user_has_kubeconfig|bool 16 | - not (kubeconfig_dir is changed and ansible_check_mode) 17 | -------------------------------------------------------------------------------- /scripts/detect-user.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | set -e 4 | 5 | PWD_UID=$(stat . -c "%u") 6 | if command -v useradd > /dev/null; then 7 | USERADD="useradd -d ${HOME}" 8 | GROUPADD="groupadd" 9 | else 10 | USERADD="adduser -D -H -h ${HOME}" 11 | GROUPADD="addgroup" 12 | fi 13 | 14 | if [ -S /var/run/docker.sock ]; then 15 | DOCKER_GID=$(stat /var/run/docker.sock -c "%g") 16 | if getent group ${DOCKER_GID} > /dev/null ; then 17 | USERADD="${USERADD} -G $(stat /var/run/docker.sock -c "%G")" 18 | else 19 | ${GROUPADD} -g ${DOCKER_GID} docker 20 | USERADD="${USERADD} -G docker" 21 | fi; 22 | fi 23 | 24 | if [ "$(id -u)" -ne "${PWD_UID}" ] ; then 25 | getent passwd ${PWD_UID} || ${USERADD} -u ${PWD_UID} enix 26 | PWD_UNAME=$(stat . -c "%U") 27 | if command -v sudo > /dev/null; then 28 | sudo -HEu ${PWD_UNAME} "$@" 29 | else 30 | exec su ${PWD_UNAME} "$@" 31 | fi; 32 | else 33 | "$@" 34 | fi 35 | -------------------------------------------------------------------------------- /scripts/lint.sh: -------------------------------------------------------------------------------- 1 | 2 | set -e 3 | 4 | poetry install --only=lint 5 | #if ! poetry run sh -c "command -v ansible-lint --version > /dev/null"; then 6 | # poetry run pip install ansible-lint==6.14.3 7 | #fi 8 | 9 | poetry run yamllint . 10 | poetry run isort --check . 11 | poetry run black --check . 12 | poetry run flake8 . 13 | #poetry run ansible-lint 14 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/enix/ansible-kubeadm/be8c956619739699109a13103e220319264fd83a/tests/__init__.py -------------------------------------------------------------------------------- /tests/ansible.requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | roles: 3 | - src: git+https://gitlab.enix.io/ansible/fix-dns 4 | name: fix_dns 5 | - geerlingguy.containerd 6 | - geerlingguy.docker 7 | - geerlingguy.ntp 8 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa: F811 2 | import argparse 3 | import os 4 | import re 5 | 6 | import pytest 7 | import tenacity 8 | import yaml 9 | from pytest_bdd import given, parsers, then, when 10 | 11 | from tests.helpers.ansible import ( 12 | assert_ansible_error, 13 | install_ansible, 14 | install_galaxy_deps, 15 | run_ansible_playbook, 16 | ) 17 | from tests.helpers.terraform import TerraformCompose 18 | from tests.helpers.vagrant import LocalVagrant 19 | 20 | DEFAULT_OS = ["Ubuntu22.04"] 21 | ALL_OS = ["Ubuntu20.04", "Ubuntu22.04", "Debian11"] 22 | 23 | 24 | pytest_plugins = ["tests.helpers.provider"] 25 | 26 | 27 | def pytest_addoption(parser): 28 | TRUE_VALUES = ["true", "yes", "y", "1", True] 29 | parser.addoption( 30 | "--keep-servers", 31 | dest="keep_servers", 32 | nargs="?", 33 | type=bool, 34 | const=True, 35 | default=os.environ.get("KEEP_SERVERS", "false").lower() in TRUE_VALUES, 36 | ) 37 | parser.addoption( 38 | "--keep-servers-after-fail", 39 | dest="keep_servers_after_fail", 40 | nargs="?", 41 | type=lambda arg: arg in TRUE_VALUES, 42 | const=True, 43 | default=os.environ.get("KEEP_SERVERS_AFTER_FAIL", "true").lower() 44 | in TRUE_VALUES, 45 | ) 46 | parser.addoption( 47 | "--all-os", 48 | dest="os_list", 49 | action="store_const", 50 | const=ALL_OS, 51 | help="Run tests on all known OS", 52 | ) 53 | parser.addoption( 54 | "-O", 55 | "--os", 56 | dest="os_list", 57 | nargs="+", 58 | action="extend", 59 | default=[], 60 | help="Select OS to run tests on", 61 | ) 62 | parser.addoption( 63 | "-A", 64 | "--ansible", 65 | dest="ansible_extra_args", 66 | nargs=argparse.REMAINDER, 67 | help="Ansible extra args", 68 | ) 69 | 70 | 71 | def pytest_generate_tests(metafunc): 72 | if "operating_system" in metafunc.fixturenames: 73 | metafunc.parametrize( 74 | "operating_system", 75 | metafunc.config.getoption("os_list") or DEFAULT_OS, 76 | indirect=True, 77 | ) 78 | 79 | 80 | @pytest.fixture 81 | def operating_system(request, provider): 82 | provider.operating_system = request.param 83 | 84 | 85 | @pytest.fixture 86 | def openstack(tmp_path): 87 | return TerraformCompose( 88 | envs={"TF_VAR_inventory_dir": tmp_path}, 89 | mounts={tmp_path: tmp_path}, 90 | ) 91 | 92 | 93 | @pytest.fixture 94 | def vagrant(tmpdir): 95 | return LocalVagrant(inventory_dir_copy=tmpdir) 96 | 97 | 98 | @then("Set cluster {variable}={value}") 99 | @given("The cluster {variable)={value}") 100 | def cluster_set_param(provider, variable, value): 101 | provider.vars[variable] = value 102 | # Refresh infrastructure 103 | provider.apply() 104 | 105 | 106 | @pytest.fixture 107 | def ansible(virtualenv): 108 | install_ansible(virtualenv) 109 | 110 | 111 | @given(parsers.parse("I want ansible {version}"), target_fixture="ansible") 112 | def ansible_with_version(virtualenv, version): 113 | install_ansible(virtualenv, version) 114 | 115 | 116 | @pytest.fixture 117 | def galaxy_deps(ansible, virtualenv): 118 | install_galaxy_deps(virtualenv) 119 | 120 | 121 | @given("Some running VMs", target_fixture="inventory") 122 | def inventory(cluster): 123 | return cluster.inventory 124 | 125 | 126 | @when( 127 | parsers.re( 128 | r"With those group_vars on group (?P[\w-]+):\s*(?P.*)", 129 | re.DOTALL, 130 | ) 131 | ) 132 | def group_vars(inventory, group, vars_snippet): 133 | group_vars_dir = os.path.join(os.path.dirname(inventory), "group_vars") 134 | try: 135 | os.makedirs(group_vars_dir) 136 | except FileExistsError: 137 | if not os.path.isdir(group_vars_dir): 138 | raise 139 | group_vars_file = os.path.join(group_vars_dir, "{}.yml".format(group)) 140 | try: 141 | with open(group_vars_file) as fd: 142 | group_vars = yaml.safe_load(fd) 143 | except FileNotFoundError: 144 | group_vars = {} 145 | vars_dict = yaml.safe_load(vars_snippet) 146 | group_vars.update(vars_dict) 147 | with open(group_vars_file, "w+") as fd: 148 | fd.write(yaml.dump(group_vars)) 149 | 150 | 151 | @pytest.fixture() 152 | def results(): 153 | return {} 154 | 155 | 156 | @pytest.fixture 157 | def ansible_extra_args(request): 158 | return request.config.getoption("ansible_extra_args") 159 | 160 | 161 | @when( 162 | parsers.re( 163 | r"I (?Pdry-)?run the playbooks?:?\s+(?P.+?)(?P\s+with error:?\s+)?(?(with_err)(?P.+)|\Z)", 164 | re.DOTALL, 165 | ) 166 | ) 167 | @tenacity.retry(reraise=True, stop=tenacity.stop_after_attempt(2)) 168 | def ansible_playbook( 169 | inventory, 170 | virtualenv, 171 | galaxy_deps, 172 | ansible_extra_args, 173 | results, 174 | playbooks, 175 | dry_run, 176 | error, 177 | ): 178 | if dry_run == "dry-": 179 | dry_run = True 180 | else: 181 | dry_run = False 182 | playbook_list = re.findall(r"[\w./]+", playbooks) 183 | if not all(os.path.exists(p) for p in playbook_list): 184 | playbook_list_subdir = [os.path.join("playbooks", p) for p in playbook_list] 185 | if all(os.path.exists(p) for p in playbook_list_subdir): 186 | playbook_list = playbook_list_subdir 187 | else: 188 | raise ValueError("All playbooks could not be found") 189 | result = run_ansible_playbook( 190 | virtualenv, 191 | playbook_list, 192 | ansible_extra_args=ansible_extra_args, 193 | inventory=inventory, 194 | dry_run=dry_run, 195 | ) 196 | if error: 197 | assert result.status == "failed" 198 | assert error.strip() in result.stdout.read() 199 | else: 200 | assert_ansible_error(result) 201 | results.setdefault("ansible_run", []).append(result) 202 | 203 | 204 | @then("I should have a working cluster") 205 | @tenacity.retry(reraise=True, stop=tenacity.stop_after_attempt(2)) 206 | def ansible_kubeadm(inventory, virtualenv, galaxy_deps, ansible_extra_args, results): 207 | result = run_ansible_playbook( 208 | virtualenv, 209 | ["tests/playbooks/verify.yml"], 210 | ansible_extra_args=ansible_extra_args, 211 | inventory=inventory, 212 | ) 213 | assert_ansible_error(result) 214 | 215 | 216 | @when("I reset tasks counters") 217 | def reset_counter(results): 218 | results["ansible_run"] = [] 219 | 220 | 221 | @then("I should see no orange/yellow changed tasks") 222 | def check_changed_tasks(results): 223 | for run in results["ansible_run"]: 224 | for host_changed, number_changed in run.stats.get("changed", {}).items(): 225 | assert number_changed == 0 226 | -------------------------------------------------------------------------------- /tests/dev/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | -------------------------------------------------------------------------------- /tests/dev/group_vars/all/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cluster_config: 3 | networking: 4 | podSubnet: 10.95.0.0/16 5 | controllerManager: 6 | extraArgs: 7 | "allocate-node-cidrs": "true" 8 | -------------------------------------------------------------------------------- /tests/features/haproxy.feature: -------------------------------------------------------------------------------- 1 | Feature: Haproxy 2 | A test to migrate from compose to haproxy 3 | 4 | Scenario: Test upgrade to haproxy pkg 5 | Given I want ansible 3 6 | Given Some running VMs 7 | 8 | When With those group_vars on group all: 9 | cri_name: docker 10 | cluster_config: 11 | networking: 12 | podSubnet: 10.95.0.0/16 13 | controllerManager: 14 | extraArgs: 15 | "allocate-node-cidrs": "true" 16 | kubelet_config: 17 | cgroupDriver: "systemd" 18 | apiserver_proxy_use_docker: true 19 | kube_version: 1.23 20 | When I run the playbook tests/playbooks/prepare.yml 21 | When I run the playbooks 00_apiserver_proxy.yml 22 | 01_site.yml 23 | When I run the playbook tests/playbooks/cni.yml 24 | Then I should have a working cluster 25 | 26 | 27 | When With those group_vars on group all: 28 | apiserver_proxy_use_docker: 29 | When I reset tasks counters 30 | When I run the playbooks 00_apiserver_proxy.yml 31 | 01_site.yml 32 | with error: 33 | As docker has been deprecated 34 | 35 | When With those group_vars on group all: 36 | apiserver_proxy_use_docker: false 37 | When I reset tasks counters 38 | When I dry-run the playbooks 00_apiserver_proxy.yml 39 | When I run the playbooks 00_apiserver_proxy.yml 40 | 01_site.yml 41 | Then I should have a working cluster 42 | -------------------------------------------------------------------------------- /tests/features/install.feature: -------------------------------------------------------------------------------- 1 | Feature: Install 2 | A test to install a kubeadm cluster from scratch 3 | 4 | Scenario Outline: Install via ansible-kubeadm 5 | Given I want ansible 3 6 | Given Some running VMs 7 | 8 | When With those group_vars on group all: 9 | cluster_config: 10 | networking: 11 | podSubnet: 10.95.0.0/16 12 | controllerManager: 13 | extraArgs: 14 | "allocate-node-cidrs": "true" 15 | cni: "kube-router" 16 | kubelet_config: 17 | cgroupDriver: "systemd" 18 | kube_version: 19 | When I run the playbook tests/playbooks/prepare.yml 20 | When I dry-run the playbooks 00_apiserver_proxy.yml 21 | 01_site.yml 22 | When I run the playbooks 00_apiserver_proxy.yml 23 | 01_site.yml 24 | When I run the playbook tests/playbooks/cni.yml 25 | Then I should have a working cluster 26 | 27 | When I reset tasks counters 28 | And I run the playbooks 00_apiserver_proxy.yml 29 | 01_site.yml 30 | Then I should see no orange/yellow changed tasks 31 | 32 | Examples: 33 | | version | 34 | | 1.21 | 35 | | 1.27 | 36 | -------------------------------------------------------------------------------- /tests/features/upgrade.feature: -------------------------------------------------------------------------------- 1 | Feature: Upgrade 2 | A test to upgrade a kubeadm cluster 3 | 4 | Scenario Outline: Upgrade via ansible-kubeadm 5 | Given I want ansible 3 6 | Given Some running VMs 7 | 8 | When With those group_vars on group all: 9 | cluster_config: 10 | networking: 11 | podSubnet: 10.95.0.0/16 12 | controllerManager: 13 | extraArgs: 14 | "allocate-node-cidrs": "true" 15 | kubelet_config: 16 | cgroupDriver: "systemd" 17 | apiserver_proxy_use_docker: false 18 | kube_version: 19 | action_reasons_review_skip: true 20 | When I run the playbook tests/playbooks/prepare.yml 21 | When I run the playbooks 00_apiserver_proxy.yml 22 | 01_site.yml 23 | When I run the playbook tests/playbooks/cni.yml 24 | 25 | When With those group_vars on group all: kube_version: 26 | When I run the playbooks 00_apiserver_proxy.yml 27 | 01_site.yml 28 | 29 | Then I should have a working cluster 30 | 31 | Examples: 32 | | from_version | to_version | 33 | | 1.21 | 1.22 | 34 | | 1.23 | 1.24 | 35 | -------------------------------------------------------------------------------- /tests/helpers/ansible.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import os 3 | import re 4 | 5 | import ansible_runner 6 | 7 | 8 | def install_ansible(virtualenv, version=None): 9 | virtualenv.debug = True 10 | requirements_txt = os.path.join(os.path.dirname(__file__), "../requirements.txt") 11 | virtualenv.run(["pip", "install", "-r", requirements_txt]) 12 | if version is None: 13 | virtualenv.install_package("ansible") 14 | else: 15 | virtualenv.install_package("ansible", version=version) 16 | 17 | 18 | def install_galaxy_deps(virtualenv): 19 | test_dir = os.path.join(os.path.dirname(__file__), "..") 20 | virtualenv.run( 21 | [ 22 | "ansible-galaxy", 23 | "install", 24 | "-r", 25 | os.path.join(test_dir, "ansible.requirements.yml"), 26 | "-p", 27 | os.path.join(test_dir, "playbooks/roles"), 28 | ] 29 | ) 30 | 31 | 32 | def run_ansible_playbook( 33 | virtualenv, playbooks, dry_run=False, ansible_extra_args=None, **kwargs 34 | ): 35 | if isinstance(playbooks, str): 36 | playbooks = [playbooks] 37 | playbooks = [ 38 | os.path.join(os.path.dirname(__file__), "../..", pbk) for pbk in playbooks 39 | ] 40 | # ansible_runner has several "bugs": 41 | # - Don't accept multiple playbooks on the parameter "playbook" (which is supposed to accept list) 42 | # - If you pass custom binary it cannot say if ansible or ansible-playbook so doesn't inject playbook anymore 43 | # => thus, pass playbooks as cmdline 44 | envvars = dict(os.environ) 45 | envvars.setdefault("ANSIBLE_HOST_KEY_CHECKING", "false") 46 | envvars.setdefault("ANSIBLE_FORCE_COLOR", "true") 47 | cmdline = " ".join(itertools.chain(ansible_extra_args or [], playbooks)) 48 | if dry_run: 49 | cmdline += " -C" 50 | return ansible_runner.run( 51 | binary=os.path.join(virtualenv.virtualenv, "bin/ansible-playbook"), 52 | cmdline=cmdline, 53 | envvars=envvars, 54 | **kwargs 55 | ) 56 | 57 | 58 | def assert_ansible_error(run): 59 | assert run.status == "successful" 60 | assert len(re.findall(r".*fatal: .*", run.stdout.read())) == 0 61 | for host_failed, number_failed in run.stats.get("failures", {}).items(): 62 | assert number_failed == 0 63 | -------------------------------------------------------------------------------- /tests/helpers/provider.py: -------------------------------------------------------------------------------- 1 | import os 2 | from typing import Dict 3 | 4 | import pytest 5 | import tenacity 6 | from pytest import CollectReport, StashKey 7 | 8 | # https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures 9 | phase_report_key = StashKey[Dict[str, CollectReport]]() 10 | 11 | 12 | @pytest.hookimpl(tryfirst=True, hookwrapper=True) 13 | def pytest_runtest_makereport(item, call): 14 | # execute all other hooks to obtain the report object 15 | outcome = yield 16 | rep = outcome.get_result() 17 | 18 | # store test results for each phase of a call, which can 19 | # be "setup", "call", "teardown" 20 | item.stash.setdefault(phase_report_key, {})[rep.when] = rep 21 | 22 | 23 | @pytest.fixture 24 | def provider(request): 25 | if os.environ.get("OS_CLOUD") is not None: 26 | provider = "openstack" 27 | else: 28 | provider = "vagrant" 29 | # raise RuntimeError("Openstack EnvVar cannot be found") 30 | return request.getfixturevalue(provider) 31 | 32 | 33 | @tenacity.retry(reraise=True, stop=tenacity.stop_after_attempt(2)) 34 | def cluster_spawn(provider, keep_servers): 35 | provider.init() 36 | if not keep_servers: 37 | provider.destroy() 38 | provider.apply() 39 | 40 | 41 | @pytest.fixture 42 | def cluster(request, provider, operating_system): 43 | keep_servers = request.config.getoption("keep_servers") 44 | keep_after_fail = request.config.getoption("keep_servers_after_fail") 45 | try: 46 | cluster_spawn(provider, keep_servers) 47 | yield provider.cluster() 48 | report = request.node.stash[phase_report_key] 49 | if "call" in report and report["call"].failed: 50 | if not keep_after_fail: 51 | provider.destroy() 52 | elif not keep_servers: 53 | provider.destroy() 54 | except Exception: 55 | if not keep_after_fail: 56 | provider.destroy() 57 | -------------------------------------------------------------------------------- /tests/helpers/terraform.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | import subprocess 4 | import typing 5 | 6 | 7 | class TerraformCompose: 8 | KNOWN_OS = {} 9 | 10 | def __init__( 11 | self, 12 | tfdata_dir: str = ".", 13 | service_name: str = "terraform", 14 | compose_file: typing.Optional[str] = None, 15 | envs: dict = {}, 16 | mounts: dict = {}, 17 | version: typing.Optional[str] = None, 18 | ): 19 | self.tfdata_dir = tfdata_dir 20 | self.service_name = service_name 21 | self.compose_file = compose_file 22 | self.envs = envs 23 | self.mounts = mounts 24 | self.version = version 25 | self._operating_system = None 26 | 27 | @property 28 | def base_command(self): 29 | command = ["docker", "compose"] 30 | if self.compose_file: 31 | command += ["-f", self.compose_file] 32 | command += ["run"] 33 | for key, value in self.envs.items(): 34 | command += ["-e", "{}={}".format(key, value)] 35 | for src, dest in self.mounts.items(): 36 | command += ["-v", "{}:{}".format(src, dest)] 37 | if self.version: 38 | command += ["-e", "TERRAFORM_VERSION={}".format(self.version)] 39 | if self.tfdata_dir != ".": 40 | command += ["-w", self.tfdata_dir] 41 | command += [self.service_name] 42 | return command 43 | 44 | @property 45 | def vars(self): 46 | return VarSetter(self.envs) 47 | 48 | @property 49 | def operating_system(self): 50 | return self._operating_system 51 | 52 | @operating_system.setter 53 | def operating_system(self, operating_system): 54 | formated_name = " ".join(re.findall(r"([a-zA-Z]+|[-\d.]+)", operating_system)) 55 | self.vars["image_name"] = self.KNOWN_OS.get(operating_system, formated_name) 56 | self._operating_system = operating_system 57 | 58 | def init(self): 59 | return subprocess.check_call(self.base_command + ["init"]) 60 | 61 | def apply(self): 62 | return subprocess.check_call(self.base_command + ["apply", "-auto-approve"]) 63 | 64 | def cluster(self): 65 | return self 66 | 67 | def output(self): 68 | return json.loads( 69 | subprocess.check_output(self.base_command + ["output", "-json"]) 70 | ) 71 | 72 | @property 73 | def inventory(self): 74 | return self.output()["inventory"]["value"] 75 | 76 | def destroy(self): 77 | return subprocess.check_call(self.base_command + ["destroy", "-auto-approve"]) 78 | 79 | 80 | class VarSetter: 81 | def __init__(self, envs): 82 | self.envs = envs 83 | 84 | def __setitem__(self, variable, value): 85 | self.envs["TF_VAR_{}".format(variable)] = value 86 | -------------------------------------------------------------------------------- /tests/helpers/vagrant.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | import vagrant 5 | 6 | 7 | class LocalVagrant: 8 | KNOWN_OS = {} 9 | 10 | def __init__(self, envs={}, inventory_dir_copy=None): 11 | self.vagrant = vagrant.Vagrant(quiet_stdout=False, quiet_stderr=False) 12 | std_envs = dict(os.environ) # Inherit from current env, not reset it. 13 | std_envs.update(envs) 14 | std_envs.setdefault("SKIP_ANSIBLE", "true") 15 | self.inventory_dir_copy = inventory_dir_copy 16 | self.vagrant.env = std_envs 17 | self._operating_system = None 18 | 19 | @property 20 | def operating_system(self): 21 | return self._operating_system 22 | 23 | @operating_system.setter 24 | def operating_system(self, operating_system): 25 | formated_name = "generic/{}".format(operating_system.replace(".", "").lower()) 26 | self.vars["BOX_IMAGE"] = self.KNOWN_OS.get(operating_system, formated_name) 27 | self._operating_system = operating_system 28 | 29 | @property 30 | def vars(self): 31 | return VagrantVarSetter(self.vagrant) 32 | 33 | def init(self): 34 | pass 35 | 36 | def apply(self): 37 | self.vagrant.up() 38 | 39 | def cluster(self): 40 | return self 41 | 42 | @property 43 | def vagrant_inventory(self): 44 | return os.path.join( 45 | os.path.dirname(__file__), 46 | "..", 47 | "..", 48 | ".vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory", 49 | ) 50 | 51 | @property 52 | def inventory(self): 53 | # give a fresh dir each time, to add group vars 54 | if self.inventory_dir_copy: 55 | new_inventory = os.path.join(self.inventory_dir_copy, "vagrant.cfg") 56 | shutil.copyfile(self.vagrant_inventory, new_inventory) 57 | return new_inventory 58 | else: 59 | return self.vagrant_inventory 60 | 61 | def destroy(self): 62 | self.vagrant.destroy() 63 | 64 | 65 | class VagrantVarSetter: 66 | def __init__(self, vagrant): 67 | self.vagrant = vagrant 68 | 69 | def __setitem__(self, variable, value): 70 | self.vagrant.env[variable.upper()] = value 71 | -------------------------------------------------------------------------------- /tests/playbooks/cni.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}' 3 | vars: 4 | kube_router: >- 5 | https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml 6 | calico: >- 7 | https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/calico-vxlan.yaml 8 | _cni: "{{ cni|default('kube_router') }}" 9 | tasks: 10 | - name: "Install CNI" 11 | command: >- 12 | kubectl apply -f {{ vars[_cni.replace('-', '_')] }} 13 | when: _cni is not false and _cni|trim|length > 0 14 | run_once: true 15 | register: cni_install 16 | 17 | - debug: 18 | var: cni_install 19 | run_once: true 20 | -------------------------------------------------------------------------------- /tests/playbooks/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: false 4 | tasks: 5 | - wait_for_connection: 6 | - command: swapoff -a 7 | 8 | - hosts: all 9 | roles: 10 | - role: fix_dns 11 | when: fix_dns|default(false)|bool == true 12 | 13 | - hosts: all 14 | gather_facts: false 15 | tasks: 16 | - name: "Update cache" 17 | apt: 18 | update_cache: yes 19 | when: ansible_os_family == 'Debian' 20 | 21 | - hosts: all 22 | gather_facts: false 23 | vars: 24 | _cri_name: "{{ cri_name | default('containerd') }}" 25 | tasks: 26 | - group_by: 27 | key: "cri_{{ cri_name | default('containerd') }}" 28 | changed_when: false 29 | 30 | - hosts: cri_containerd 31 | gather_facts: false 32 | vars: 33 | containerd_sysctl_params: 34 | net.bridge.bridge-nf-call-iptables: 1 35 | net.ipv4.ip_forward: 1 36 | net.bridge.bridge-nf-call-ip6tables: 1 37 | containerd_module_load: 38 | - overlay 39 | - br_netfilter 40 | pre_tasks: 41 | - name: 'Persist module load' 42 | copy: 43 | content: | 44 | {{ containerd_module_load|join('\n') }} 45 | dest: /etc/modules-load.d/containerd.conf 46 | 47 | - name: 'load modules for current runtime' 48 | modprobe: 49 | name: '{{ item }}' 50 | state: present 51 | with_items: '{{ containerd_module_load }}' 52 | 53 | - name: 'Set sysctl parameters' 54 | sysctl: 55 | name: '{{ item.key }}' 56 | value: '{{ item.value }}' 57 | sysctl_set: true 58 | sysctl_file: /etc/sysctl.d/99-kubernetes-cri.conf 59 | with_dict: '{{ containerd_sysctl_params }}' 60 | roles: 61 | - role: geerlingguy.containerd 62 | tasks: 63 | - name: "Set SytemdCgroup = true" 64 | lineinfile: 65 | dest: /etc/containerd/config.toml 66 | regexp: '(\s+)SystemdCgroup.*' 67 | backrefs: true 68 | line: '\g<1>SystemdCgroup = true' 69 | notify: ['restart containerd'] 70 | 71 | - hosts: cri_docker 72 | gather_facts: false 73 | roles: 74 | - role: geerlingguy.docker 75 | vars: 76 | docker_install_compose: false 77 | docker_daemon_options: 78 | exec-opts: ["native.cgroupdriver=systemd"] 79 | log-driver: "json-file" 80 | log-opts: 81 | max-size: "100m" 82 | -------------------------------------------------------------------------------- /tests/playbooks/roles/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /tests/playbooks/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}:{{ kube_worker_group|default("kube_workers") }}' 3 | tasks: 4 | - package_facts: 5 | 6 | - debug: 7 | msg: 8 | - "{{ packages.kubectl }}" 9 | - "{{ packages.kubelet }}" 10 | - assert: 11 | that: 12 | - (packages.kubectl|first).version is match(kube_version ~ '.*') 13 | - (packages.kubelet|first).version is match(kube_version ~ '.*') 14 | 15 | - hosts: '{{ kube_cp_group|default("kube_control_plane") }}' 16 | tasks: 17 | - package_facts: 18 | 19 | - debug: 20 | msg: 21 | - "{{ packages.kubeadm }}" 22 | - assert: 23 | that: 24 | - (packages.kubeadm|first).version is match(kube_version ~ '.*') 25 | 26 | - name: 'Check kubeadm status' 27 | command: kubectl get nodes -o yaml 28 | changed_when: false 29 | check_mode: false 30 | register: all_nodes_yaml 31 | failed_when: >- 32 | dict(kubelet_ready).values()|rejectattr("status", "eq", "True")|list|length != 0 33 | until: all_nodes_yaml is not failed 34 | retries: 10 35 | vars: 36 | all_nodes: "{{ (all_nodes_yaml.stdout|from_yaml) }}" 37 | kubelet_ready: >- 38 | {{ all_nodes['items']|map(attribute="metadata.name")|zip( 39 | all_nodes['items']|map(attribute="status.conditions") 40 | |map("selectattr", "type", "eq", "Ready") 41 | |map("first") 42 | |list 43 | ) }} 44 | -------------------------------------------------------------------------------- /tests/requirements.txt: -------------------------------------------------------------------------------- 1 | arrow 2 | netaddr 3 | -------------------------------------------------------------------------------- /tests/terraform/openstack/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform 2 | terraform.tfstate 3 | terraform.tfstate.backup 4 | *.cfg 5 | group_vars 6 | host_vars 7 | -------------------------------------------------------------------------------- /tests/terraform/openstack/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/local" { 5 | version = "2.4.0" 6 | hashes = [ 7 | "h1:R97FTYETo88sT2VHfMgkPU3lzCsZLunPftjSI5vfKe8=", 8 | "zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9", 9 | "zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf", 10 | "zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732", 11 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 12 | "zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35", 13 | "zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04", 14 | "zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406", 15 | "zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6", 16 | "zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7", 17 | "zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2", 18 | "zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc", 19 | "zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce", 20 | ] 21 | } 22 | 23 | provider "registry.terraform.io/terraform-provider-openstack/openstack" { 24 | version = "1.52.1" 25 | constraints = "~> 1.52.1" 26 | hashes = [ 27 | "h1:tzawotEtjBcVWnzA+wAqcbkxW7XnJCfXqod4SBts9vI=", 28 | "zh:037f7ab5a0942daee00d23402e7ccab472380864e13013284910fa7841a6e37c", 29 | "zh:52ac973e6c5cd584c5086494218e9b49d93217f5fbc34fc76fa8a9ddd635447a", 30 | "zh:5acad7b8c7a493fd0b659271743e2853859a4b2669df26f21aecf1b2f60fa706", 31 | "zh:5d9218a7f10849f2227fc11df19f78b3b11cccade6b674c314e804f0e98d4368", 32 | "zh:91ea6bf80ff706e734300041cf22e946c049abf8dcf1bed899f93f20f7779121", 33 | "zh:961d67ebf1116bd539b726ef483f7d67c95351efd09e55fbeb30cd2ca7946a12", 34 | "zh:9d3d8ee11cda45804e9b759064fbc9f47d6f54203bd17654236f2f601424b460", 35 | "zh:a0af7e5bad6114a7a0ac88cee63e2c14558572e293bebcf651ed8d8d9c20dfda", 36 | "zh:a1fd5609f61a43c9c2a403e024042afc3a45fde39935a388009d05105e2d39d3", 37 | "zh:bd84aae9f2ac6eb978837ea5994bb24be221e2e4d69a3e8842eef3fcf62594f0", 38 | "zh:be690e77aa497ab8bb8ed59f7e03018e96805e2e13df334086a8c5ac4290db09", 39 | "zh:c4ee17773e7295b0598e36148ac49b2c61caa6da3f7b02e439aa61ca6486da07", 40 | "zh:c871d03abf9c916584dd8fc6b63ed85bbe41208eba684b2175ac741003bf9d25", 41 | "zh:f1e5c4a5740ad75b9b37376db4ea0e3067b0c2b6871521bbc6a1625bef137abf", 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /tests/terraform/openstack/common.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | ssh_key_glob = tolist(fileset("/", pathexpand(var.ssh_key_path))) 3 | ssh_key_path = "/${length(local.ssh_key_glob) >= 1 ? element(local.ssh_key_glob, 0) : pathexpand(var.ssh_key_path)}" 4 | } 5 | 6 | data "local_file" "ssh_key" { 7 | filename = local.ssh_key_path 8 | } 9 | 10 | resource "openstack_compute_keypair_v2" "ssh_deploy" { 11 | name = var.stem 12 | public_key = data.local_file.ssh_key.content 13 | } 14 | 15 | data "openstack_images_image_v2" "image_name" { 16 | name_regex = "${var.image_name}.*" 17 | most_recent = true 18 | } 19 | -------------------------------------------------------------------------------- /tests/terraform/openstack/control_plane.tf: -------------------------------------------------------------------------------- 1 | resource "openstack_compute_servergroup_v2" "cp_group" { 2 | name = "${var.stem}-cp" 3 | policies = ["soft-anti-affinity"] 4 | } 5 | 6 | resource "openstack_compute_instance_v2" "control_plane" { 7 | name = "${var.stem}-cp-${count.index + 1}" 8 | image_id = data.openstack_images_image_v2.image_name.id 9 | flavor_name = "GP2.2" 10 | key_pair = openstack_compute_keypair_v2.ssh_deploy.name 11 | security_groups = ["default", openstack_compute_secgroup_v2.kubeadm.name] 12 | 13 | dynamic "network" { 14 | for_each = local.network_id_list 15 | content { 16 | uuid = network.value 17 | } 18 | } 19 | 20 | scheduler_hints { 21 | group = openstack_compute_servergroup_v2.cp_group.id 22 | } 23 | 24 | metadata = { 25 | "groups.enix.io" = "kube_control_plane" 26 | } 27 | 28 | count = var.control_plane_count 29 | } 30 | 31 | resource "openstack_networking_floatingip_v2" "floatip_cp" { 32 | pool = var.floating_pool 33 | 34 | count = var.control_plane_count 35 | } 36 | 37 | resource "openstack_compute_floatingip_associate_v2" "cp_pub_ip" { 38 | floating_ip = openstack_networking_floatingip_v2.floatip_cp[count.index].address 39 | instance_id = openstack_compute_instance_v2.control_plane[count.index].id 40 | 41 | count = var.control_plane_count 42 | } 43 | -------------------------------------------------------------------------------- /tests/terraform/openstack/inventory.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | image_match_user = [ 3 | ["U|ubuntu.*", "ubuntu"], 4 | ["D|debian.*", "debian"] 5 | ] 6 | login_user = element([for match in local.image_match_user: match[1] if length(regexall(match[0], var.image_name)) > 0], 1) 7 | } 8 | 9 | resource "local_file" "inventory" { 10 | content = templatefile("${path.module}/inventory.tpl", { 11 | kube_control_plane = zipmap( 12 | openstack_compute_instance_v2.control_plane.*.name, 13 | openstack_networking_floatingip_v2.floatip_cp.*.address 14 | ), 15 | kube_workers = zipmap( 16 | openstack_compute_instance_v2.workers.*.name, 17 | openstack_networking_floatingip_v2.floatip_workers.*.address 18 | ) 19 | allocate_private_net = var.allocate_private_net 20 | private_subnet = var.private_subnet 21 | login_user = local.login_user 22 | }) 23 | filename = "${var.inventory_dir}/${var.stem}-hosts.cfg" 24 | } 25 | 26 | 27 | output "inventory" { 28 | value = local_file.inventory.filename 29 | } 30 | -------------------------------------------------------------------------------- /tests/terraform/openstack/inventory.tpl: -------------------------------------------------------------------------------- 1 | [kube_control_plane] 2 | %{ for server, ip in kube_control_plane ~} 3 | ${server} ansible_host=${ip} 4 | %{ endfor ~} 5 | 6 | [kube_workers] 7 | %{ for server, ip in kube_workers ~} 8 | ${server} ansible_host=${ip} 9 | %{ endfor ~} 10 | 11 | [kube:children] 12 | kube_control_plane 13 | kube_workers 14 | 15 | [kube:vars] 16 | %{ if allocate_private_net == true ~} 17 | kube_control_plane_cidr="${private_subnet}" 18 | kubelet_node_ip_cidr="${private_subnet}" 19 | %{ endif ~} 20 | fix_dns=true 21 | ansible_user="${login_user}" 22 | ansible_become=true 23 | ansible_ssh_pipelining=True 24 | -------------------------------------------------------------------------------- /tests/terraform/openstack/network.tf: -------------------------------------------------------------------------------- 1 | data "openstack_networking_network_v2" "network" { 2 | name = var.network_name 3 | } 4 | 5 | resource "openstack_compute_secgroup_v2" "kubeadm" { 6 | name = var.stem 7 | description = "Interconnection for cluster + ssh" 8 | 9 | rule { 10 | from_port = 22 11 | to_port = 22 12 | ip_protocol = "tcp" 13 | cidr = "0.0.0.0/0" 14 | } 15 | 16 | rule { 17 | from_port = 1 18 | to_port = 65535 19 | ip_protocol = "tcp" 20 | self = true 21 | } 22 | 23 | rule { 24 | from_port = 1 25 | to_port = 65535 26 | ip_protocol = "udp" 27 | self = true 28 | } 29 | } 30 | 31 | resource "openstack_networking_network_v2" "private_net" { 32 | name = var.stem 33 | description = "private network" 34 | count = var.allocate_private_net == true ? 1 : 0 35 | } 36 | 37 | resource "openstack_networking_subnet_v2" "private_subnet" { 38 | name = var.stem 39 | network_id = openstack_networking_network_v2.private_net[0].id 40 | cidr = var.private_subnet 41 | no_gateway = true 42 | ip_version = 4 43 | count = var.allocate_private_net == true ? 1 : 0 44 | } 45 | 46 | locals { 47 | network_id_list = compact([ 48 | data.openstack_networking_network_v2.network.id, 49 | var.allocate_private_net == true ? openstack_networking_network_v2.private_net[0].id : null 50 | ]) 51 | } 52 | -------------------------------------------------------------------------------- /tests/terraform/openstack/nodes.tf: -------------------------------------------------------------------------------- 1 | resource "openstack_compute_servergroup_v2" "workers_group" { 2 | name = "${var.stem}-workers" 3 | policies = ["soft-anti-affinity"] 4 | } 5 | 6 | resource "openstack_compute_instance_v2" "workers" { 7 | name = "${var.stem}-node-${count.index + 1}" 8 | image_id = data.openstack_images_image_v2.image_name.id 9 | flavor_name = "GP2.2" 10 | key_pair = openstack_compute_keypair_v2.ssh_deploy.name 11 | security_groups = ["default", openstack_compute_secgroup_v2.kubeadm.name] 12 | 13 | dynamic "network" { 14 | for_each = local.network_id_list 15 | content { 16 | uuid = network.value 17 | } 18 | } 19 | 20 | scheduler_hints { 21 | group = openstack_compute_servergroup_v2.workers_group.id 22 | } 23 | 24 | metadata = { 25 | "groups.enix.io" = "kube_workers" 26 | } 27 | 28 | count = var.worker_count 29 | } 30 | 31 | resource "openstack_networking_floatingip_v2" "floatip_workers" { 32 | pool = var.floating_pool 33 | 34 | count = var.worker_count 35 | } 36 | 37 | resource "openstack_compute_floatingip_associate_v2" "workers_pub_ip" { 38 | floating_ip = openstack_networking_floatingip_v2.floatip_workers[count.index].address 39 | instance_id = openstack_compute_instance_v2.workers[count.index].id 40 | 41 | count = var.worker_count 42 | } 43 | 44 | -------------------------------------------------------------------------------- /tests/terraform/openstack/openstack.tf: -------------------------------------------------------------------------------- 1 | provider "openstack" {} 2 | -------------------------------------------------------------------------------- /tests/terraform/openstack/variable.tf: -------------------------------------------------------------------------------- 1 | variable "worker_count" { 2 | default = 1 3 | } 4 | 5 | variable "control_plane_count" { 6 | default = 2 7 | } 8 | 9 | variable "image_name" { 10 | default = "Ubuntu 22.04" 11 | } 12 | 13 | variable "floating_pool" { 14 | default = "Public Floating" 15 | } 16 | 17 | variable "network_name" { 18 | default = "internal" 19 | } 20 | 21 | variable "ssh_key_path" { 22 | default = "~/.ssh/id_rsa.pub" 23 | } 24 | 25 | variable "stem" { 26 | default = "kubeadm" 27 | } 28 | 29 | variable "inventory_dir" { 30 | default = "." 31 | } 32 | 33 | variable "allocate_private_net" { 34 | default = true 35 | } 36 | 37 | variable "private_subnet" { 38 | default = "192.168.199.0/24" 39 | } 40 | -------------------------------------------------------------------------------- /tests/terraform/openstack/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.14.0" 3 | required_providers { 4 | openstack = { 5 | source = "terraform-provider-openstack/openstack" 6 | version = "~> 1.52.1" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /tests/test_basic.py: -------------------------------------------------------------------------------- 1 | from pytest_bdd import scenario 2 | 3 | # In order for parametrization on operating_system to work, we need to have fixture 4 | # directly called by the test function 5 | 6 | 7 | @scenario("features/install.feature", "Install via ansible-kubeadm") 8 | def test_install(operating_system): 9 | pass 10 | 11 | 12 | @scenario("features/upgrade.feature", "Upgrade via ansible-kubeadm") 13 | def test_upgrade(operating_system): 14 | pass 15 | 16 | 17 | @scenario("features/haproxy.feature", "Test upgrade to haproxy pkg") 18 | def test_haproxy(operating_system): 19 | pass 20 | --------------------------------------------------------------------------------