├── .github
└── workflows
│ └── main.yaml
├── LICENSE
├── README.md
├── defaults
└── main.yml
├── files
├── calico-net.yaml
├── cert-manager-values.yaml
├── cillium-default-priorityclass.yaml
├── config.toml.tmpl
├── helm.sh
├── is_cluster_ready
├── kubernetes-dashboard.yaml
├── kuberouter-net.yaml
├── weave-net.yaml
└── yunikorn-values.yaml
├── handlers
└── main.yml
├── meta
└── main.yml
├── tasks
├── Debian.yaml
├── RedHat.yaml
├── cert-manager.yaml
├── cri-dockerd.yaml
├── delete_wns.yaml
├── front.yaml
├── helm.yaml
├── helm_chart.yaml
├── ingress.yaml
├── k3s.yaml
├── kube_nets.yaml
├── kubeadm.yaml
├── kubeapps.yaml
├── kyverno.yaml
├── main.yaml
├── nfs-client.yaml
├── wn.yaml
└── yunikorn.yaml
├── templates
├── 10-containerd-net.conflist.j2
├── cilium-values-1.11.6.yaml.j2
├── cilium-values-1.12.0.yaml.j2
├── cilium-values-1.12.3.yaml.j2
├── cilium-values-1.15.6.yaml.j2
├── dns01_secret.j2
├── flannel-net.j2
├── ingress-values.j2
├── kubeadm-config-join.j2
├── kubeadm-config.j2
├── kubeapps-ingress.j2
├── kubernetes-dashboard-ingress.j2
├── kyverno-values.j2
├── longhorn.j2
├── nvidia-device-plugin.j2
├── persistent-volumes.j2
├── prod_issuer.j2
└── wildcard_cert.j2
└── tests
├── inventory
├── test-crio.yml
├── test-docker.yml
└── test.yml
/.github/workflows/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | name: CI
3 |
4 | on:
5 |
6 | pull_request:
7 | branches: [ master ]
8 |
9 | # Allows you to run this workflow manually from the Actions tab
10 | workflow_dispatch:
11 |
12 | jobs:
13 | test-kubeadm:
14 |
15 | runs-on: ubuntu-latest
16 |
17 | steps:
18 | - name: checkout
19 | uses: actions/checkout@v4
20 |
21 | - name: Install python
22 | run: sudo apt update && sudo apt install -y python3 python3-pip python3-setuptools
23 |
24 | - name: Remove pre-installed kubectl to avoid errors
25 | run: |
26 | sudo apt-get purge kube* buildah podman -y
27 | sudo apt autoremove
28 | sudo rm -rf ~/.kube
29 | sudo rm -rf /etc/cni /etc/kubernetes
30 |
31 | - name: Install Ansible
32 | run: sudo pip3 install dnspython ansible==8.7.0
33 |
34 | - name: Create ansible.cfg with correct roles_path
35 | run: sudo printf '[defaults]\nhost_key_checking = False\nroles_path=../' > ansible.cfg
36 |
37 | - name: Install geerlingguy.ntp
38 | run: sudo ansible-galaxy install geerlingguy.ntp grycap.docker grycap.cri_o
39 |
40 | - name: Basic role syntax check
41 | run: sudo ansible-playbook tests/test.yml -i tests/inventory --syntax-check
42 |
43 | - name: Basic role check in front
44 | run: sudo ansible-playbook tests/test.yml -i tests/inventory
45 |
46 | - name: Basic role check in wn
47 | run: sudo ansible-playbook tests/test.yml -i tests/inventory -e kube_type_of_node=wn -e kube_server=localhost
48 |
49 | - name: Test nodes
50 | run: sudo kubectl -s https://localhost:6443 --insecure-skip-tls-verify --kubeconfig /etc/kubernetes/admin.conf get nodes
51 |
52 | - name: Test ingress
53 | run: curl -k https://localhost
54 |
55 | test-k3s:
56 |
57 | runs-on: ubuntu-latest
58 |
59 | steps:
60 | - name: checkout
61 | uses: actions/checkout@v4
62 |
63 | - name: Install python
64 | run: sudo apt update && sudo apt install -y python3 python3-pip python3-setuptools
65 |
66 | - name: Remove pre-installed kubectl to avoid errors
67 | run: sudo apt remove kubectl buildah podman -y
68 |
69 | - name: Install Ansible
70 | run: sudo pip3 install ansible==8.7.0
71 |
72 | - name: Create ansible.cfg with correct roles_path
73 | run: sudo printf '[defaults]\nhost_key_checking = False\nroles_path=../' > ansible.cfg
74 |
75 | - name: Install geerlingguy.ntp
76 | run: sudo ansible-galaxy install geerlingguy.ntp grycap.docker grycap.cri_o
77 |
78 | - name: Basic role syntax check
79 | run: sudo ansible-playbook tests/test.yml -i tests/inventory --syntax-check
80 |
81 | - name: Basic role check in front
82 | run: sudo ansible-playbook tests/test.yml -i tests/inventory -e kube_install_metrics=false -e kube_install_method=k3s -e kube_version=v1.25.2+k3s1
83 |
84 | # - name: Basic role check in wn
85 | # run: sudo ansible-playbook tests/test.yml -i tests/inventory -e kube_type_of_node=wn -e kube_server=localhost -e kube_install_method=k3s -e kube_version=v1.25.2+k3s1 -e kube_k3_exec='--lb-server-port=6445'
86 |
87 | - name: Test nodes
88 | run: sudo kubectl -s https://localhost:6443 --insecure-skip-tls-verify --kubeconfig /etc/rancher/k3s/k3s.yaml get nodes
89 |
90 |
91 | test-crio:
92 |
93 | runs-on: ubuntu-latest
94 | if: false
95 | steps:
96 | - name: checkout
97 | uses: actions/checkout@v4
98 |
99 | - name: Install python
100 | run: sudo apt update && sudo apt install -y python3 python3-pip python3-setuptools
101 |
102 | - name: Remove pre-installed kubectl to avoid errors
103 | run: sudo apt remove kubectl buildah podman -y
104 |
105 | - name: Stop containerd
106 | run: sudo systemctl stop containerd; sudo rm -f /var/run/containerd/containerd.sock
107 |
108 | - name: Install Ansible
109 | run: sudo pip3 install ansible==8.7.0
110 |
111 | - name: Create ansible.cfg with correct roles_path
112 | run: sudo printf '[defaults]\nhost_key_checking = False\nroles_path=../' > ansible.cfg
113 |
114 | - name: Install geerlingguy.ntp
115 | run: sudo ansible-galaxy install geerlingguy.ntp grycap.docker grycap.cri_o
116 |
117 | - name: Basic role syntax check
118 | run: sudo ansible-playbook tests/test-crio.yml -i tests/inventory --syntax-check
119 |
120 | - name: Basic role check in front
121 | run: sudo ansible-playbook tests/test-crio.yml -i tests/inventory
122 |
123 | - name: Basic role check in wn
124 | run: sudo ansible-playbook tests/test-crio.yml -i tests/inventory -e kube_type_of_node=wn -e kube_server=localhost
125 |
126 | - name: Test nodes
127 | run: sudo kubectl -s https://localhost:6443 --insecure-skip-tls-verify --kubeconfig /etc/kubernetes/admin.conf get nodes
128 |
129 | test-docker:
130 |
131 | runs-on: ubuntu-latest
132 |
133 | steps:
134 | - name: checkout
135 | uses: actions/checkout@v4
136 |
137 | - name: Install python
138 | run: sudo apt update && sudo apt install -y python3 python3-pip python3-setuptools
139 |
140 | - name: Remove pre-installed kubectl to avoid errors
141 | run: |
142 | sudo apt-get purge kube* buildah podman -y
143 | sudo apt autoremove
144 | sudo rm -rf ~/.kube
145 | sudo rm -rf /etc/cni /etc/kubernetes
146 |
147 | - name: Install Ansible
148 | run: sudo pip3 install dnspython ansible==8.7.0
149 |
150 | - name: Create ansible.cfg with correct roles_path
151 | run: sudo printf '[defaults]\nhost_key_checking = False\nroles_path=../' > ansible.cfg
152 |
153 | - name: Install geerlingguy.ntp
154 | run: sudo ansible-galaxy install geerlingguy.ntp grycap.docker grycap.cri_o
155 |
156 | - name: Basic role syntax check
157 | run: sudo ansible-playbook tests/test-docker.yml -i tests/inventory --syntax-check
158 |
159 | - name: Basic role check in front
160 | run: sudo ansible-playbook tests/test-docker.yml -i tests/inventory
161 |
162 | - name: Basic role check in wn
163 | run: sudo ansible-playbook tests/test-docker.yml -i tests/inventory -e kube_type_of_node=wn -e kube_server=localhost
164 |
165 | - name: Test nodes
166 | run: sudo kubectl -s https://localhost:6443 --insecure-skip-tls-verify --kubeconfig /etc/kubernetes/admin.conf get nodes
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 | Kubernetes Role
3 | =======================
4 |
5 | This ansible role installs a [Kubernetes](https://kubernetes.io/) cluster.
6 |
7 | This work is co-funded by the [EOSC-hub project](http://eosc-hub.eu/) (Horizon 2020) under Grant number 777536.
8 |
9 |
10 |
11 | Role Variables
12 | ----------------
13 |
14 | The variables that can be passed to this role and a brief description about them are as follows.
15 |
16 | # Version to install or latest (1.24 or higher)
17 | kube_version: 1.24.17
18 | # Type of node front or wn
19 | kube_type_of_node: front
20 | # IP address or name of the Kube front node
21 | kube_server: "{{ ansible_default_ipv4.address }}"
22 | # Security Token
23 | kube_token: "kube01.{{ lookup('password', '/tmp/tokenpass chars=ascii_lowercase,digits length=16') }}"
24 | # Token TTL duration (0 do not expire)
25 | kube_token_ttl: 0
26 | # POD network cidr
27 | kube_pod_network_cidr: 10.244.0.0/16
28 | # Type of network to install: currently supported: flannel, kube-router, calico, weave
29 | kube_network: flannel
30 | # Kubelet extra args
31 | kubelet_extra_args: ''
32 | # Kube API server options
33 | kube_apiserver_options: []
34 | # Helm version
35 | kube_install_helm_version: "v3.8.2"
36 | # Deploy the Dashboard
37 | kube_deploy_dashboard: false
38 | # value to pass to the kubeadm init --apiserver-advertise-address option
39 | kube_api_server: 0.0.0.0
40 | # A set of git repos and paths to be applied in the cluster. Following this format:
41 | # kube_apply_repos: [{repo: "https://github.com/kubernetes-incubator/metrics-server", version: "master", path: "deploy/1.8+/"}]
42 | kube_apply_repos: []
43 | # Flag to set Metrics-Server to be installed
44 | kube_install_metrics: false
45 | # Metrics-Server Helm chart version to install
46 | kube_metrics_chart_version: "3.12.2"
47 | # Flag to set the nginx ingress controller to be installed
48 | kube_install_ingress: false
49 | # Nginx ingress controller Helm chart version to install
50 | kube_ingress_chart_version: "4.12.1"
51 | # Flag to set the kubeapps UI to be installed
52 | kube_install_kubeapps: false
53 | # KubeApps chart version to install (or latest)
54 | kube_kubeapps_chart_version: "7.3.2"
55 | # Flag to set nfs-client-provisioner to be installed
56 | kube_install_nfs_client: false
57 | # NFS path used by nfs-client-provisioner
58 | kube_nfs_path: /pv
59 | # NFS server used by nfs-client-provisioner
60 | kube_nfs_server: kubeserver.localdomain
61 | # Set reclaimPolicy of NFS StorageClass Delete or Retain
62 | kube_nfs_reclaim_policy: Delete
63 | # NFS client Helm chart version to install
64 | kube_nfs_chart_version: "4.0.18"
65 | # Extra options for the flannel plugin
66 | kube_flanneld_extra_args: []
67 | # Enable to install and manage Certificates with Cert-manager
68 | kube_cert_manager: false
69 | # Public IP to use by the cert-manager (not needed if kube_public_dns_name is set)
70 | kube_cert_public_ip: "{{ ansible_default_ipv4.address }}"
71 | # Public DNS name to use in the dashboard tls certificate
72 | kube_public_dns_name: ""
73 | # Email to be used in the Let's Encrypt issuer
74 | kube_cert_user_email: jhondoe@server.com
75 | # Override default docker version
76 | kube_docker_version: ""
77 | # Options to add in the docker.json file
78 | kube_docker_options: {}
79 | # Install docker with pip
80 | kube_install_docker_pip
81 | # Command flags to use for launching k3s in the systemd service
82 | kube_k3_exec: ""
83 | # How to install K8s: kubeadm or k3s
84 | kube_install_method: kubeadm
85 | # Servers to install and configure ntp. If [] ntp will not be configured
86 | kube_ntp_servers: [ntp.upv.es, ntp.uv.es]
87 |
88 | Example Playbook
89 | ----------------
90 |
91 | This an example of how to install this role in the front-end node:
92 |
93 | - hosts: server
94 | roles:
95 | - { role: 'grycap.kubernetes', kube_apiserver_options: [{option: "--insecure-port", value: "8080"}] }
96 |
97 | And in the WNs:
98 |
99 | - hosts: wn
100 | roles:
101 | - { role: 'grycap.kubernetes', kube_type_of_node: 'wn', kube_server: '10.0.0.1' }
102 |
103 | Contributing to the role
104 | ========================
105 | In order to keep the code clean, pushing changes to the master branch has been disabled.
106 | If you want to contribute, you have to create a branch, upload your changes and then create a pull request.
107 | Thanks
108 |
--------------------------------------------------------------------------------
/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # Version to install or latest
2 | kube_version: 1.24.17
3 | # Type of node front or wn
4 | kube_type_of_node: front
5 | # How to install K8s: kubeadm or k3s
6 | kube_install_method: kubeadm
7 | # Servers to install and configure ntp. If [] ntp will not be configured
8 | kube_ntp_servers: [ntp.upv.es, ntp.uv.es]
9 | # IP address or name of the Kube front node
10 | kube_server: "{{ ansible_default_ipv4.address }}"
11 | # Token
12 | kube_token: "kube01.{{ lookup('password', '/var/tmp/tokenpass chars=ascii_lowercase,digits length=16') }}"
13 | # Token TTL duration (0 do not expire)
14 | kube_token_ttl: 0
15 | # POD network cidr
16 | kube_pod_network_cidr: 10.244.0.0/16
17 | # Type of network to install: currently supported: flannel, kube-router, calico, weave
18 | kube_network: flannel
19 | # Kubelet extra args
20 | kubelet_extra_args: '' # deprecated move to kubelet_extra_args_dict
21 | # dict of kubelet extra args, if set kubelet_extra_args is ignored
22 | # A key in this map is the flag name as it appears on the command line except without leading dash(es).
23 | kubelet_extra_args_dict: {}
24 | # Kube API server options
25 | kube_apiserver_options: []
26 | # CRI runtime
27 | kube_cri_runtime: docker # docker, containerd or crio
28 | # Install CRI runtime
29 | kube_cri_runtime_install: true
30 | # CRI dockerd version
31 | kube_cri_dockerd_version: "0.3.16"
32 | # Helm version
33 | kube_install_helm_version: "v3.17.2"
34 | # Deploy the Dashboard
35 | kube_deploy_dashboard: false
36 | # Value to pass to the kubeadm init --apiserver-advertise-address option
37 | kube_api_server: 0.0.0.0
38 | # IP to wait the api server
39 | kube_wait_api_server_ip: 127.0.0.1
40 | # List of values with the information for creating persisten volumes
41 | # Array variables needed: [ namespace : "", name : "", label : "", capacity_storage : "", nfs_path : "" ]
42 | kube_persistent_volumes: []
43 | # A set of git repos and paths to be applied in the cluster. Following this format:
44 | # kube_apply_repos: [{repo: "https://github.com/kubernetes-incubator/metrics-server", version: "master", path: "deploy/1.8+/"}]
45 | kube_apply_repos: []
46 | # Flag to set Metrics-Server to be installed
47 | kube_install_metrics: false
48 | # Metrics-Server Helm chart version to install
49 | kube_metrics_chart_version: "3.12.2"
50 | # Flag to set the nginx ingress controller to be installed
51 | kube_install_ingress: false
52 | # Nginx ingress controller Helm chart version to install
53 | kube_ingress_chart_version: "4.12.1"
54 | # Flag to deploy the ingress controller in the master node
55 | kube_ingress_in_master: false
56 | # Flag to set add the nvidia support to the cluster
57 | kube_nvidia_support: false
58 | # Driver version to install: specific version or "latest"
59 | kube_nvidia_driver_version: "515"
60 | # Tag of the nvidia device plugin docker container
61 | kube_nvidia_device_plugin_version: "v0.17.1"
62 | # Flag to set the kubeapps UI to be installed
63 | kube_install_kubeapps: false
64 | # KubeApps chart version to install (or latest)
65 | kube_kubeapps_chart_version: "12.2.10"
66 | # Flag to set nfs-client-provisioner to be installed
67 | kube_install_nfs_client: false
68 | # NFS path used by nfs-client-provisioner
69 | kube_nfs_path: /pv
70 | # NFS server used by nfs-client-provisioner
71 | kube_nfs_server: kubeserver.localdomain
72 | # Set reclaimPolicy of NFS StorageClass Delete or Retain
73 | kube_nfs_reclaim_policy: Delete
74 | # NFS client Helm chart version to install
75 | kube_nfs_chart_version: "4.0.18"
76 | # Extra options for the flannel plugin
77 | kube_flanneld_extra_args: []
78 | # Enable to install and manage Certificates with Cert-manager
79 | kube_cert_manager: false
80 | # Cert Manager Helm chart version to install
81 | kube_cert_manager_chart_version: "1.17.1"
82 | # Public IP to use by the cert-manager (not needed if kube_public_dns_name is set)
83 | kube_cert_public_ip: "{{ ansible_default_ipv4.address }}"
84 | # Public DNS name to use in the dashboard tls certificate
85 | kube_public_dns_name: ""
86 | # Email to be used in the Let's Encrypt issuer
87 | kube_cert_user_email: jhondoe@server.com
88 | # Cert Manager type of challenge provider: http01 or dns01
89 | kube_cert_manager_challenge: 'http01'
90 | # In case of dns01 only Route53 is supported and this data is required
91 | kube_cert_manager_challenge_dns01_domain: ''
92 | kube_cert_manager_challenge_dns01_ak: ''
93 | kube_cert_manager_challenge_dns01_sk: ''
94 | # Optionally a wildcard dns certificate name can be set
95 | kube_cert_manager_wildcard_cert_dns_name: ''
96 | # Override default docker version
97 | kube_docker_version: ""
98 | # Options to add in the docker.json file
99 | kube_docker_options: {}
100 | # Nvidia docker options to add in the docker.json file
101 | docker_nvidia_options:
102 | default-runtime: nvidia
103 | runtimes:
104 | nvidia:
105 | path: /usr/bin/nvidia-container-runtime
106 | runtimeArgs: []
107 | # Install docker with pip
108 | kube_install_docker_pip: false
109 | # Command flags to use for launching k3s in the systemd service
110 | kube_k3_exec: ""
111 |
112 | kube_install_kyverno: false
113 | kyverno_crds_helm_chart_version: "2.0.3"
114 | kyverno_helm_chart_version: "2.0.3"
115 | kyverno_image: ghcr.io/kyverno/kyverno
116 | kyverno_image_tag: "v1.4.3"
117 | kyverno_initContainer_image: ghcr.io/kyverno/kyvernopre
118 | kyverno_initContainer_image_tag: "v1.4.3"
119 |
120 | # List of resource types to be skipped by kyverno policy engine. See: https://kyverno.io/docs/installation/#resource-filters
121 | kyverno_config_notapplypolicies: '[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][SelfSubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReportChangeRequest,*,*][ClusterReportChangeRequest,*,*]'
122 |
123 | # Set desired pod security level privileged, baseline, restricted, custom. Set to restricted for maximum security for your cluster. See: https://kyverno.io/policies/pod-security/
124 | kyverno_podSecurityStandard: baseline
125 | # Set desired pod security severity low, medium, high. Used severity level in PolicyReportResults for the selected pod security policie
126 | kyverno_podSecuritySeverity: high
127 | # Set to get response in failed validation check. Supported values are audit and enforce. See: https://kyverno.io/docs/writing-policies/validate/
128 | kyverno_validationFailureAction: ensure
129 |
130 | # Option to install Apache Yunikorn
131 | kube_install_yunikorn: false
132 | # Apache Yunikorn's version to be installed
133 | kube_yunikorn_version: 1.1.0
134 |
135 |
136 | cilium_debug_log: true
137 | cilium_k8s_endpoint: "{{ ansible_default_ipv4.address }}"
138 | # https://docs.cilium.io/en/stable/policy/intro/#policy-enforcement-modes. Values: default, always and never
139 | cilium_policyEnforcementMode: "default"
140 | cilium_nodeEncryption: true
141 | cilium_nodeport_enable: true
142 | cilium_nodeport_range: "30000,32767"
143 | cilium_priorityClassName: "network"
144 | cilium_prometheus_enable: false
145 | cilium_hubble_certValidityDuration: 365
146 | cilium_hubble_enable: false
147 | cilium_hubble_ui_enable: false
148 | cilium_hubble_priorityClassName: ""
149 | cilium_hubble_ui_ingress_enable: true
150 | cilium_hubble_ui_ingress_host: ""
151 | cilium_hubble_ui_ingress_tls_host: ""
152 | cilium_hubble_ui_ingress_path: "/"
153 | cilium_hubble_ui_ingress_annotations: {}
154 |
155 | # Longhorn support, if it is activated, the NFS client will be not installed
156 | kube_install_longhorn: false
157 | # Install ingress for Longhorn UI
158 | kube_install_longhorn_ingress: false
159 | # Default pass: longhorn_pass (openssl passwd -stdin -apr1)
160 | kube_longhorn_ingress_auth: "longhorn:$apr1$e6BbrO3Q$llbCJ6cWJS/RWnLGYQhxX."
161 | # Number of replicas for volumes in Longhorn
162 | longhorn_num_replicas: 3
163 | # Set reclaimPolicy of Longhorn StorageClass Delete or Retain
164 | longhorn_reclaim_policy: Delete
165 |
--------------------------------------------------------------------------------
/files/cert-manager-values.yaml:
--------------------------------------------------------------------------------
1 | crds:
2 | enabled: true
3 | tolerations:
4 | - key: node-role.kubernetes.io/master
5 | effect: NoSchedule
6 | - key: node-role.kubernetes.io/control-plane
7 | effect: NoSchedule
8 | webhook:
9 | tolerations:
10 | - key: node-role.kubernetes.io/master
11 | effect: NoSchedule
12 | - key: node-role.kubernetes.io/control-plane
13 | effect: NoSchedule
14 | cainjector:
15 | tolerations:
16 | - key: node-role.kubernetes.io/master
17 | effect: NoSchedule
18 | - key: node-role.kubernetes.io/control-plane
19 | effect: NoSchedule
20 | startupapicheck:
21 | tolerations:
22 | - key: node-role.kubernetes.io/master
23 | effect: NoSchedule
24 | - key: node-role.kubernetes.io/control-plane
25 | effect: NoSchedule
--------------------------------------------------------------------------------
/files/cillium-default-priorityclass.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: scheduling.k8s.io/v1
3 | kind: PriorityClass
4 | metadata:
5 | name: "network"
6 | value: 1000000000
7 | preemptionPolicy: PreemptLowerPriority
8 | globalDefault: false
9 | description: "Ensure Cilium pods have high priority to prevent any unexpected eviction by the Kubernetes scheduler under node pressure"
10 |
--------------------------------------------------------------------------------
/files/config.toml.tmpl:
--------------------------------------------------------------------------------
1 | [plugins.opt]
2 | path = "{{ .NodeConfig.Containerd.Opt }}"
3 |
4 | [plugins.cri]
5 | stream_server_address = "127.0.0.1"
6 | stream_server_port = "10010"
7 |
8 | {{- if .IsRunningInUserNS }}
9 | disable_cgroup = true
10 | disable_apparmor = true
11 | restrict_oom_score_adj = true
12 | {{end}}
13 |
14 | {{- if .NodeConfig.AgentConfig.PauseImage }}
15 | sandbox_image = "{{ .NodeConfig.AgentConfig.PauseImage }}"
16 | {{end}}
17 |
18 | {{- if not .NodeConfig.NoFlannel }}
19 | [plugins.cri.cni]
20 | bin_dir = "{{ .NodeConfig.AgentConfig.CNIBinDir }}"
21 | conf_dir = "{{ .NodeConfig.AgentConfig.CNIConfDir }}"
22 | {{end}}
23 |
24 | [plugins.cri.containerd.runtimes.runc]
25 | # ---- changed from 'io.containerd.runc.v2' for GPU support
26 | runtime_type = "io.containerd.runtime.v1.linux"
27 |
28 | # ---- added for GPU support
29 | [plugins.linux]
30 | runtime = "nvidia-container-runtime"
31 |
32 | {{ if .PrivateRegistryConfig }}
33 | {{ if .PrivateRegistryConfig.Mirrors }}
34 | [plugins.cri.registry.mirrors]{{end}}
35 | {{range $k, $v := .PrivateRegistryConfig.Mirrors }}
36 | [plugins.cri.registry.mirrors."{{$k}}"]
37 | endpoint = [{{range $i, $j := $v.Endpoints}}{{if $i}}, {{end}}{{printf "%q" .}}{{end}}]
38 | {{end}}
39 |
40 | {{range $k, $v := .PrivateRegistryConfig.Configs }}
41 | {{ if $v.Auth }}
42 | [plugins.cri.registry.configs."{{$k}}".auth]
43 | {{ if $v.Auth.Username }}username = "{{ $v.Auth.Username }}"{{end}}
44 | {{ if $v.Auth.Password }}password = "{{ $v.Auth.Password }}"{{end}}
45 | {{ if $v.Auth.Auth }}auth = "{{ $v.Auth.Auth }}"{{end}}
46 | {{ if $v.Auth.IdentityToken }}identitytoken = "{{ $v.Auth.IdentityToken }}"{{end}}
47 | {{end}}
48 | {{ if $v.TLS }}
49 | [plugins.cri.registry.configs."{{$k}}".tls]
50 | {{ if $v.TLS.CAFile }}ca_file = "{{ $v.TLS.CAFile }}"{{end}}
51 | {{ if $v.TLS.CertFile }}cert_file = "{{ $v.TLS.CertFile }}"{{end}}
52 | {{ if $v.TLS.KeyFile }}key_file = "{{ $v.TLS.KeyFile }}"{{end}}
53 | {{end}}
54 | {{end}}
55 | {{end}}
56 |
--------------------------------------------------------------------------------
/files/helm.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Copyright The Helm Authors.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # The install script is based off of the MIT-licensed script from glide,
18 | # the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get
19 |
20 | : ${BINARY_NAME:="helm"}
21 | : ${USE_SUDO:="true"}
22 | : ${HELM_INSTALL_DIR:="/usr/bin"}
23 |
24 | # initArch discovers the architecture for this system.
25 | initArch() {
26 | ARCH=$(uname -m)
27 | case $ARCH in
28 | armv5*) ARCH="armv5";;
29 | armv6*) ARCH="armv6";;
30 | armv7*) ARCH="arm";;
31 | aarch64) ARCH="arm64";;
32 | x86) ARCH="386";;
33 | x86_64) ARCH="amd64";;
34 | i686) ARCH="386";;
35 | i386) ARCH="386";;
36 | esac
37 | }
38 |
39 | # initOS discovers the operating system for this system.
40 | initOS() {
41 | OS=$(echo `uname`|tr '[:upper:]' '[:lower:]')
42 |
43 | case "$OS" in
44 | # Minimalist GNU for Windows
45 | mingw*) OS='windows';;
46 | esac
47 | }
48 |
49 | # runs the given command as root (detects if we are root already)
50 | runAsRoot() {
51 | local CMD="$*"
52 |
53 | if [ $EUID -ne 0 -a $USE_SUDO = "true" ]; then
54 | CMD="sudo $CMD"
55 | fi
56 |
57 | $CMD
58 | }
59 |
60 | # verifySupported checks that the os/arch combination is supported for
61 | # binary builds.
62 | verifySupported() {
63 | local supported="darwin-386\ndarwin-amd64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-ppc64le\nwindows-386\nwindows-amd64"
64 | if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then
65 | echo "No prebuilt binary for ${OS}-${ARCH}."
66 | echo "To build from source, go to https://github.com/helm/helm"
67 | exit 1
68 | fi
69 |
70 | if ! type "curl" > /dev/null && ! type "wget" > /dev/null; then
71 | echo "Either curl or wget is required"
72 | exit 1
73 | fi
74 | }
75 |
76 | # checkDesiredVersion checks if the desired version is available.
77 | checkDesiredVersion() {
78 | if [ "x$DESIRED_VERSION" == "x" ]; then
79 | # Get tag from release URL
80 | local latest_release_url="https://github.com/helm/helm/releases"
81 | if type "curl" > /dev/null; then
82 | TAG=$(curl -Ls $latest_release_url | grep 'href="/helm/helm/releases/tag/v3.[0-9]*.[0-9]*\"' | grep -v no-underline | head -n 1 | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
83 | elif type "wget" > /dev/null; then
84 | TAG=$(wget $latest_release_url -O - 2>&1 | grep 'href="/helm/helm/releases/tag/v3.[0-9]*.[0-9]*\"' | grep -v no-underline | head -n 1 | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}')
85 | fi
86 | else
87 | TAG=$DESIRED_VERSION
88 | fi
89 | }
90 |
91 | # checkHelmInstalledVersion checks which version of helm is installed and
92 | # if it needs to be changed.
93 | checkHelmInstalledVersion() {
94 | if [[ -f "${HELM_INSTALL_DIR}/${BINARY_NAME}" ]]; then
95 | local version=$("${HELM_INSTALL_DIR}/${BINARY_NAME}" version --template="{{ .Version }}")
96 | if [[ "$version" == "$TAG" ]]; then
97 | echo "Helm ${version} is already ${DESIRED_VERSION:-latest}"
98 | return 0
99 | else
100 | echo "Helm ${TAG} is available. Changing from version ${version}."
101 | return 1
102 | fi
103 | else
104 | return 1
105 | fi
106 | }
107 |
108 | # downloadFile downloads the latest binary package and also the checksum
109 | # for that binary.
110 | downloadFile() {
111 | HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz"
112 | DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST"
113 | CHECKSUM_URL="$DOWNLOAD_URL.sha256"
114 | HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)"
115 | HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST"
116 | HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256"
117 | echo "Downloading $DOWNLOAD_URL"
118 | if type "curl" > /dev/null; then
119 | curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE"
120 | elif type "wget" > /dev/null; then
121 | wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL"
122 | fi
123 | if type "curl" > /dev/null; then
124 | curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE"
125 | elif type "wget" > /dev/null; then
126 | wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL"
127 | fi
128 | }
129 |
130 | # installFile verifies the SHA256 for the file, then unpacks and
131 | # installs it.
132 | installFile() {
133 | HELM_TMP="$HELM_TMP_ROOT/$BINARY_NAME"
134 | local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}')
135 | local expected_sum=$(cat ${HELM_SUM_FILE})
136 | if [ "$sum" != "$expected_sum" ]; then
137 | echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting."
138 | exit 1
139 | fi
140 |
141 | mkdir -p "$HELM_TMP"
142 | tar xf "$HELM_TMP_FILE" -C "$HELM_TMP"
143 | HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/helm"
144 | echo "Preparing to install $BINARY_NAME into ${HELM_INSTALL_DIR}"
145 | runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR/$BINARY_NAME"
146 | echo "$BINARY_NAME installed into $HELM_INSTALL_DIR/$BINARY_NAME"
147 | }
148 |
149 | # fail_trap is executed if an error occurs.
150 | fail_trap() {
151 | result=$?
152 | if [ "$result" != "0" ]; then
153 | if [[ -n "$INPUT_ARGUMENTS" ]]; then
154 | echo "Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS"
155 | help
156 | else
157 | echo "Failed to install $BINARY_NAME"
158 | fi
159 | echo -e "\tFor support, go to https://github.com/helm/helm."
160 | fi
161 | cleanup
162 | exit $result
163 | }
164 |
165 | # testVersion tests the installed client to make sure it is working.
166 | testVersion() {
167 | set +e
168 | HELM="$(command -v $BINARY_NAME)"
169 | if [ "$?" = "1" ]; then
170 | echo "$BINARY_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?'
171 | exit 1
172 | fi
173 | set -e
174 | }
175 |
176 | # help provides possible cli installation arguments
177 | help () {
178 | echo "Accepted cli arguments are:"
179 | echo -e "\t[--help|-h ] ->> prints this help"
180 | echo -e "\t[--version|-v ] . When not defined it fetches the latest release from GitHub"
181 | echo -e "\te.g. --version v3.0.0 or -v canary"
182 | echo -e "\t[--no-sudo] ->> install without sudo"
183 | }
184 |
185 | # cleanup temporary files to avoid https://github.com/helm/helm/issues/2977
186 | cleanup() {
187 | if [[ -d "${HELM_TMP_ROOT:-}" ]]; then
188 | rm -rf "$HELM_TMP_ROOT"
189 | fi
190 | }
191 |
192 | # Execution
193 |
194 | #Stop execution on any error
195 | trap "fail_trap" EXIT
196 | set -e
197 |
198 | # Parsing input arguments (if any)
199 | export INPUT_ARGUMENTS="${@}"
200 | set -u
201 | while [[ $# -gt 0 ]]; do
202 | case $1 in
203 | '--version'|-v)
204 | shift
205 | if [[ $# -ne 0 ]]; then
206 | export DESIRED_VERSION="${1}"
207 | else
208 | echo -e "Please provide the desired version. e.g. --version v3.0.0 or -v canary"
209 | exit 0
210 | fi
211 | ;;
212 | '--no-sudo')
213 | USE_SUDO="false"
214 | ;;
215 | '--help'|-h)
216 | help
217 | exit 0
218 | ;;
219 | *) exit 1
220 | ;;
221 | esac
222 | shift
223 | done
224 | set +u
225 |
226 | initArch
227 | initOS
228 | verifySupported
229 | checkDesiredVersion
230 | if ! checkHelmInstalledVersion; then
231 | downloadFile
232 | installFile
233 | fi
234 | testVersion
235 | cleanup
236 |
--------------------------------------------------------------------------------
/files/is_cluster_ready:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | LINES=`ps aux | grep "ansible" | grep -v "\[mux\]" | wc -l`
4 | docker inspect im > /dev/null 2> /dev/null
5 |
6 | if [ $LINES -gt 1 -o "$?" -ne "0" ]
7 | then
8 | echo "Cluster is still configuring."
9 | else
10 | echo "Cluster configured!"
11 | fi
12 |
--------------------------------------------------------------------------------
/files/kubernetes-dashboard.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The Kubernetes Authors.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | apiVersion: v1
16 | kind: Namespace
17 | metadata:
18 | name: kubernetes-dashboard
19 |
20 | ---
21 |
22 | apiVersion: v1
23 | kind: ServiceAccount
24 | metadata:
25 | labels:
26 | k8s-app: kubernetes-dashboard
27 | name: kubernetes-dashboard
28 | namespace: kubernetes-dashboard
29 |
30 | ---
31 |
32 | kind: Service
33 | apiVersion: v1
34 | metadata:
35 | labels:
36 | k8s-app: kubernetes-dashboard
37 | name: kubernetes-dashboard
38 | namespace: kubernetes-dashboard
39 | spec:
40 | ports:
41 | - port: 443
42 | targetPort: 8443
43 | selector:
44 | k8s-app: kubernetes-dashboard
45 |
46 | ---
47 |
48 | apiVersion: v1
49 | kind: Secret
50 | metadata:
51 | labels:
52 | k8s-app: kubernetes-dashboard
53 | name: kubernetes-dashboard-certs
54 | namespace: kubernetes-dashboard
55 | type: Opaque
56 |
57 | ---
58 |
59 | apiVersion: v1
60 | kind: Secret
61 | metadata:
62 | labels:
63 | k8s-app: kubernetes-dashboard
64 | name: kubernetes-dashboard-csrf
65 | namespace: kubernetes-dashboard
66 | type: Opaque
67 | data:
68 | csrf: ""
69 |
70 | ---
71 |
72 | apiVersion: v1
73 | kind: Secret
74 | metadata:
75 | labels:
76 | k8s-app: kubernetes-dashboard
77 | name: kubernetes-dashboard-key-holder
78 | namespace: kubernetes-dashboard
79 | type: Opaque
80 |
81 | ---
82 |
83 | kind: ConfigMap
84 | apiVersion: v1
85 | metadata:
86 | labels:
87 | k8s-app: kubernetes-dashboard
88 | name: kubernetes-dashboard-settings
89 | namespace: kubernetes-dashboard
90 |
91 | ---
92 |
93 | kind: Role
94 | apiVersion: rbac.authorization.k8s.io/v1
95 | metadata:
96 | labels:
97 | k8s-app: kubernetes-dashboard
98 | name: kubernetes-dashboard
99 | namespace: kubernetes-dashboard
100 | rules:
101 | # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
102 | - apiGroups: [""]
103 | resources: ["secrets"]
104 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
105 | verbs: ["get", "update", "delete"]
106 | # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
107 | - apiGroups: [""]
108 | resources: ["configmaps"]
109 | resourceNames: ["kubernetes-dashboard-settings"]
110 | verbs: ["get", "update"]
111 | # Allow Dashboard to get metrics.
112 | - apiGroups: [""]
113 | resources: ["services"]
114 | resourceNames: ["heapster", "dashboard-metrics-scraper"]
115 | verbs: ["proxy"]
116 | - apiGroups: [""]
117 | resources: ["services/proxy"]
118 | resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
119 | verbs: ["get"]
120 |
121 | ---
122 |
123 | kind: ClusterRole
124 | apiVersion: rbac.authorization.k8s.io/v1
125 | metadata:
126 | labels:
127 | k8s-app: kubernetes-dashboard
128 | name: kubernetes-dashboard
129 | rules:
130 | # Allow Metrics Scraper to get metrics from the Metrics server
131 | - apiGroups: ["metrics.k8s.io"]
132 | resources: ["pods", "nodes"]
133 | verbs: ["get", "list", "watch"]
134 |
135 | ---
136 |
137 | apiVersion: rbac.authorization.k8s.io/v1
138 | kind: RoleBinding
139 | metadata:
140 | labels:
141 | k8s-app: kubernetes-dashboard
142 | name: kubernetes-dashboard
143 | namespace: kubernetes-dashboard
144 | roleRef:
145 | apiGroup: rbac.authorization.k8s.io
146 | kind: Role
147 | name: kubernetes-dashboard
148 | subjects:
149 | - kind: ServiceAccount
150 | name: kubernetes-dashboard
151 | namespace: kubernetes-dashboard
152 |
153 | ---
154 |
155 | apiVersion: rbac.authorization.k8s.io/v1
156 | kind: ClusterRoleBinding
157 | metadata:
158 | name: kubernetes-dashboard
159 | roleRef:
160 | apiGroup: rbac.authorization.k8s.io
161 | kind: ClusterRole
162 | name: kubernetes-dashboard
163 | subjects:
164 | - kind: ServiceAccount
165 | name: kubernetes-dashboard
166 | namespace: kubernetes-dashboard
167 |
168 | ---
169 |
170 | kind: Deployment
171 | apiVersion: apps/v1
172 | metadata:
173 | labels:
174 | k8s-app: kubernetes-dashboard
175 | name: kubernetes-dashboard
176 | namespace: kubernetes-dashboard
177 | spec:
178 | replicas: 1
179 | revisionHistoryLimit: 10
180 | selector:
181 | matchLabels:
182 | k8s-app: kubernetes-dashboard
183 | template:
184 | metadata:
185 | labels:
186 | k8s-app: kubernetes-dashboard
187 | spec:
188 | securityContext:
189 | seccompProfile:
190 | type: RuntimeDefault
191 | containers:
192 | - name: kubernetes-dashboard
193 | image: kubernetesui/dashboard:v2.7.0
194 | imagePullPolicy: Always
195 | ports:
196 | - containerPort: 8443
197 | protocol: TCP
198 | args:
199 | - --auto-generate-certificates
200 | - --namespace=kubernetes-dashboard
201 | # Uncomment the following line to manually specify Kubernetes API server Host
202 | # If not specified, Dashboard will attempt to auto discover the API server and connect
203 | # to it. Uncomment only if the default does not work.
204 | # - --apiserver-host=http://my-address:port
205 | volumeMounts:
206 | - name: kubernetes-dashboard-certs
207 | mountPath: /certs
208 | # Create on-disk volume to store exec logs
209 | - mountPath: /tmp
210 | name: tmp-volume
211 | livenessProbe:
212 | httpGet:
213 | scheme: HTTPS
214 | path: /
215 | port: 8443
216 | initialDelaySeconds: 30
217 | timeoutSeconds: 30
218 | securityContext:
219 | allowPrivilegeEscalation: false
220 | readOnlyRootFilesystem: true
221 | runAsUser: 1001
222 | runAsGroup: 2001
223 | volumes:
224 | - name: kubernetes-dashboard-certs
225 | secret:
226 | secretName: kubernetes-dashboard-certs
227 | - name: tmp-volume
228 | emptyDir: {}
229 | serviceAccountName: kubernetes-dashboard
230 | nodeSelector:
231 | "kubernetes.io/os": linux
232 | # Comment the following tolerations if Dashboard must not be deployed on master
233 | tolerations:
234 | - key: node-role.kubernetes.io/master
235 | effect: NoSchedule
236 | - key: node-role.kubernetes.io/control-plane
237 | effect: NoSchedule
238 | ---
239 |
240 | kind: Service
241 | apiVersion: v1
242 | metadata:
243 | labels:
244 | k8s-app: dashboard-metrics-scraper
245 | name: dashboard-metrics-scraper
246 | namespace: kubernetes-dashboard
247 | spec:
248 | ports:
249 | - port: 8000
250 | targetPort: 8000
251 | selector:
252 | k8s-app: dashboard-metrics-scraper
253 |
254 | ---
255 |
256 | kind: Deployment
257 | apiVersion: apps/v1
258 | metadata:
259 | labels:
260 | k8s-app: dashboard-metrics-scraper
261 | name: dashboard-metrics-scraper
262 | namespace: kubernetes-dashboard
263 | spec:
264 | replicas: 1
265 | revisionHistoryLimit: 10
266 | selector:
267 | matchLabels:
268 | k8s-app: dashboard-metrics-scraper
269 | template:
270 | metadata:
271 | labels:
272 | k8s-app: dashboard-metrics-scraper
273 | spec:
274 | securityContext:
275 | seccompProfile:
276 | type: RuntimeDefault
277 | containers:
278 | - name: dashboard-metrics-scraper
279 | image: kubernetesui/metrics-scraper:v1.0.8
280 | ports:
281 | - containerPort: 8000
282 | protocol: TCP
283 | livenessProbe:
284 | httpGet:
285 | scheme: HTTP
286 | path: /
287 | port: 8000
288 | initialDelaySeconds: 30
289 | timeoutSeconds: 30
290 | volumeMounts:
291 | - mountPath: /tmp
292 | name: tmp-volume
293 | securityContext:
294 | allowPrivilegeEscalation: false
295 | readOnlyRootFilesystem: true
296 | runAsUser: 1001
297 | runAsGroup: 2001
298 | serviceAccountName: kubernetes-dashboard
299 | nodeSelector:
300 | "kubernetes.io/os": linux
301 | # Comment the following tolerations if Dashboard must not be deployed on master
302 | tolerations:
303 | - key: node-role.kubernetes.io/master
304 | effect: NoSchedule
305 | - key: node-role.kubernetes.io/control-plane
306 | effect: NoSchedule
307 | volumes:
308 | - name: tmp-volume
309 | emptyDir: {}
310 |
311 | ---
312 | # ------------------- Dashboard Service ------------------- #
313 |
314 | apiVersion: v1
315 | kind: Service
316 | metadata:
317 | namespace: kubernetes-dashboard
318 | name: kubernetes-dashboard
319 | labels:
320 | k8s-app: kubernetes-dashboard
321 | spec:
322 | type: NodePort
323 | ports:
324 | - name: https
325 | port: 8443
326 | nodePort: 30443
327 | selector:
328 | k8s-app: kubernetes-dashboard
329 |
--------------------------------------------------------------------------------
/files/kuberouter-net.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: kube-router-cfg
5 | namespace: kube-system
6 | labels:
7 | tier: node
8 | k8s-app: kube-router
9 | data:
10 | cni-conf.json: |
11 | {
12 | "cniVersion":"0.3.0",
13 | "name":"mynet",
14 | "plugins":[
15 | {
16 | "name":"kubernetes",
17 | "type":"bridge",
18 | "bridge":"kube-bridge",
19 | "isDefaultGateway":true,
20 | "ipam":{
21 | "type":"host-local"
22 | }
23 | }
24 | ]
25 | }
26 | ---
27 | apiVersion: apps/v1
28 | kind: DaemonSet
29 | metadata:
30 | labels:
31 | k8s-app: kube-router
32 | tier: node
33 | name: kube-router
34 | namespace: kube-system
35 | spec:
36 | selector:
37 | matchLabels:
38 | k8s-app: kube-router
39 | tier: node
40 | template:
41 | metadata:
42 | labels:
43 | k8s-app: kube-router
44 | tier: node
45 | spec:
46 | priorityClassName: system-node-critical
47 | serviceAccountName: kube-router
48 | serviceAccount: kube-router
49 | containers:
50 | - name: kube-router
51 | image: docker.io/cloudnativelabs/kube-router
52 | imagePullPolicy: Always
53 | args:
54 | - --run-router=true
55 | - --run-firewall=true
56 | - --run-service-proxy=true
57 | - --bgp-graceful-restart=true
58 | - --kubeconfig=/var/lib/kube-router/kubeconfig
59 | env:
60 | - name: NODE_NAME
61 | valueFrom:
62 | fieldRef:
63 | fieldPath: spec.nodeName
64 | - name: KUBE_ROUTER_CNI_CONF_FILE
65 | value: /etc/cni/net.d/10-kuberouter.conflist
66 | livenessProbe:
67 | httpGet:
68 | path: /healthz
69 | port: 20244
70 | initialDelaySeconds: 10
71 | periodSeconds: 3
72 | resources:
73 | requests:
74 | cpu: 250m
75 | memory: 250Mi
76 | securityContext:
77 | privileged: true
78 | volumeMounts:
79 | - name: lib-modules
80 | mountPath: /lib/modules
81 | readOnly: true
82 | - name: cni-conf-dir
83 | mountPath: /etc/cni/net.d
84 | - name: kubeconfig
85 | mountPath: /var/lib/kube-router
86 | readOnly: true
87 | - name: xtables-lock
88 | mountPath: /run/xtables.lock
89 | readOnly: false
90 | initContainers:
91 | - name: install-cni
92 | image: docker.io/cloudnativelabs/kube-router
93 | imagePullPolicy: Always
94 | command:
95 | - /bin/sh
96 | - -c
97 | - set -e -x;
98 | if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
99 | if [ -f /etc/cni/net.d/*.conf ]; then
100 | rm -f /etc/cni/net.d/*.conf;
101 | fi;
102 | TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
103 | cp /etc/kube-router/cni-conf.json ${TMP};
104 | mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
105 | fi
106 | volumeMounts:
107 | - name: cni-conf-dir
108 | mountPath: /etc/cni/net.d
109 | - name: kube-router-cfg
110 | mountPath: /etc/kube-router
111 | hostNetwork: true
112 | tolerations:
113 | - effect: NoSchedule
114 | operator: Exists
115 | - key: CriticalAddonsOnly
116 | operator: Exists
117 | - effect: NoExecute
118 | operator: Exists
119 | volumes:
120 | - name: lib-modules
121 | hostPath:
122 | path: /lib/modules
123 | - name: cni-conf-dir
124 | hostPath:
125 | path: /etc/cni/net.d
126 | - name: kube-router-cfg
127 | configMap:
128 | name: kube-router-cfg
129 | - name: kubeconfig
130 | configMap:
131 | name: kube-proxy
132 | items:
133 | - key: kubeconfig.conf
134 | path: kubeconfig
135 | - name: xtables-lock
136 | hostPath:
137 | path: /run/xtables.lock
138 | type: FileOrCreate
139 | ---
140 | apiVersion: v1
141 | kind: ServiceAccount
142 | metadata:
143 | name: kube-router
144 | namespace: kube-system
145 | ---
146 | kind: ClusterRole
147 | apiVersion: rbac.authorization.k8s.io/v1beta1
148 | metadata:
149 | name: kube-router
150 | namespace: kube-system
151 | rules:
152 | - apiGroups:
153 | - ""
154 | resources:
155 | - namespaces
156 | - pods
157 | - services
158 | - nodes
159 | - endpoints
160 | verbs:
161 | - list
162 | - get
163 | - watch
164 | - apiGroups:
165 | - "networking.k8s.io"
166 | resources:
167 | - networkpolicies
168 | verbs:
169 | - list
170 | - get
171 | - watch
172 | - apiGroups:
173 | - extensions
174 | resources:
175 | - networkpolicies
176 | verbs:
177 | - get
178 | - list
179 | - watch
180 | ---
181 | kind: ClusterRoleBinding
182 | apiVersion: rbac.authorization.k8s.io/v1beta1
183 | metadata:
184 | name: kube-router
185 | roleRef:
186 | apiGroup: rbac.authorization.k8s.io
187 | kind: ClusterRole
188 | name: kube-router
189 | subjects:
190 | - kind: ServiceAccount
191 | name: kube-router
192 | namespace: kube-system
193 |
--------------------------------------------------------------------------------
/files/weave-net.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: List
3 | items:
4 | - apiVersion: v1
5 | kind: ServiceAccount
6 | metadata:
7 | name: weave-net
8 | annotations:
9 | cloud.weave.works/launcher-info: |-
10 | {
11 | "original-request": {
12 | "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjYiLCBHaXRDb21taXQ6ImRmZjgyZGMwZGU0NzI5OWFiNjZjODNjNjI2ZTA4YjI0NWFiMTkwMzciLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTA3LTE1VDE2OjU4OjUzWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjkiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQo=",
13 | "date": "Fri Sep 11 2020 11:13:33 GMT+0000 (UTC)"
14 | },
15 | "email-address": "support@weave.works"
16 | }
17 | labels:
18 | name: weave-net
19 | namespace: kube-system
20 | - apiVersion: rbac.authorization.k8s.io/v1
21 | kind: ClusterRole
22 | metadata:
23 | name: weave-net
24 | annotations:
25 | cloud.weave.works/launcher-info: |-
26 | {
27 | "original-request": {
28 | "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjYiLCBHaXRDb21taXQ6ImRmZjgyZGMwZGU0NzI5OWFiNjZjODNjNjI2ZTA4YjI0NWFiMTkwMzciLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTA3LTE1VDE2OjU4OjUzWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjkiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQo=",
29 | "date": "Fri Sep 11 2020 11:13:33 GMT+0000 (UTC)"
30 | },
31 | "email-address": "support@weave.works"
32 | }
33 | labels:
34 | name: weave-net
35 | rules:
36 | - apiGroups:
37 | - ''
38 | resources:
39 | - pods
40 | - namespaces
41 | - nodes
42 | verbs:
43 | - get
44 | - list
45 | - watch
46 | - apiGroups:
47 | - networking.k8s.io
48 | resources:
49 | - networkpolicies
50 | verbs:
51 | - get
52 | - list
53 | - watch
54 | - apiGroups:
55 | - ''
56 | resources:
57 | - nodes/status
58 | verbs:
59 | - patch
60 | - update
61 | - apiVersion: rbac.authorization.k8s.io/v1
62 | kind: ClusterRoleBinding
63 | metadata:
64 | name: weave-net
65 | annotations:
66 | cloud.weave.works/launcher-info: |-
67 | {
68 | "original-request": {
69 | "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjYiLCBHaXRDb21taXQ6ImRmZjgyZGMwZGU0NzI5OWFiNjZjODNjNjI2ZTA4YjI0NWFiMTkwMzciLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTA3LTE1VDE2OjU4OjUzWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjkiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQo=",
70 | "date": "Fri Sep 11 2020 11:13:33 GMT+0000 (UTC)"
71 | },
72 | "email-address": "support@weave.works"
73 | }
74 | labels:
75 | name: weave-net
76 | roleRef:
77 | kind: ClusterRole
78 | name: weave-net
79 | apiGroup: rbac.authorization.k8s.io
80 | subjects:
81 | - kind: ServiceAccount
82 | name: weave-net
83 | namespace: kube-system
84 | - apiVersion: rbac.authorization.k8s.io/v1
85 | kind: Role
86 | metadata:
87 | name: weave-net
88 | annotations:
89 | cloud.weave.works/launcher-info: |-
90 | {
91 | "original-request": {
92 | "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjYiLCBHaXRDb21taXQ6ImRmZjgyZGMwZGU0NzI5OWFiNjZjODNjNjI2ZTA4YjI0NWFiMTkwMzciLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTA3LTE1VDE2OjU4OjUzWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjkiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQo=",
93 | "date": "Fri Sep 11 2020 11:13:33 GMT+0000 (UTC)"
94 | },
95 | "email-address": "support@weave.works"
96 | }
97 | labels:
98 | name: weave-net
99 | namespace: kube-system
100 | rules:
101 | - apiGroups:
102 | - ''
103 | resourceNames:
104 | - weave-net
105 | resources:
106 | - configmaps
107 | verbs:
108 | - get
109 | - update
110 | - apiGroups:
111 | - ''
112 | resources:
113 | - configmaps
114 | verbs:
115 | - create
116 | - apiVersion: rbac.authorization.k8s.io/v1
117 | kind: RoleBinding
118 | metadata:
119 | name: weave-net
120 | annotations:
121 | cloud.weave.works/launcher-info: |-
122 | {
123 | "original-request": {
124 | "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjYiLCBHaXRDb21taXQ6ImRmZjgyZGMwZGU0NzI5OWFiNjZjODNjNjI2ZTA4YjI0NWFiMTkwMzciLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTA3LTE1VDE2OjU4OjUzWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjkiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQo=",
125 | "date": "Fri Sep 11 2020 11:13:33 GMT+0000 (UTC)"
126 | },
127 | "email-address": "support@weave.works"
128 | }
129 | labels:
130 | name: weave-net
131 | namespace: kube-system
132 | roleRef:
133 | kind: Role
134 | name: weave-net
135 | apiGroup: rbac.authorization.k8s.io
136 | subjects:
137 | - kind: ServiceAccount
138 | name: weave-net
139 | namespace: kube-system
140 | - apiVersion: apps/v1
141 | kind: DaemonSet
142 | metadata:
143 | name: weave-net
144 | annotations:
145 | cloud.weave.works/launcher-info: |-
146 | {
147 | "original-request": {
148 | "url": "/k8s/v1.16/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiIxOCIsIEdpdFZlcnNpb246InYxLjE4LjYiLCBHaXRDb21taXQ6ImRmZjgyZGMwZGU0NzI5OWFiNjZjODNjNjI2ZTA4YjI0NWFiMTkwMzciLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDIwLTA3LTE1VDE2OjU4OjUzWiIsIEdvVmVyc2lvbjoiZ28xLjEzLjkiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQo=",
149 | "date": "Fri Sep 11 2020 11:13:33 GMT+0000 (UTC)"
150 | },
151 | "email-address": "support@weave.works"
152 | }
153 | labels:
154 | name: weave-net
155 | namespace: kube-system
156 | spec:
157 | minReadySeconds: 5
158 | selector:
159 | matchLabels:
160 | name: weave-net
161 | template:
162 | metadata:
163 | labels:
164 | name: weave-net
165 | spec:
166 | containers:
167 | - name: weave
168 | command:
169 | - /home/weave/launch.sh
170 | env:
171 | - name: HOSTNAME
172 | valueFrom:
173 | fieldRef:
174 | apiVersion: v1
175 | fieldPath: spec.nodeName
176 | image: 'docker.io/weaveworks/weave-kube:2.7.0'
177 | readinessProbe:
178 | httpGet:
179 | host: 127.0.0.1
180 | path: /status
181 | port: 6784
182 | resources:
183 | requests:
184 | cpu: 50m
185 | memory: 100Mi
186 | securityContext:
187 | privileged: true
188 | volumeMounts:
189 | - name: weavedb
190 | mountPath: /weavedb
191 | - name: cni-bin
192 | mountPath: /host/opt
193 | - name: cni-bin2
194 | mountPath: /host/home
195 | - name: cni-conf
196 | mountPath: /host/etc
197 | - name: dbus
198 | mountPath: /host/var/lib/dbus
199 | - name: lib-modules
200 | mountPath: /lib/modules
201 | - name: xtables-lock
202 | mountPath: /run/xtables.lock
203 | - name: weave-npc
204 | env:
205 | - name: HOSTNAME
206 | valueFrom:
207 | fieldRef:
208 | apiVersion: v1
209 | fieldPath: spec.nodeName
210 | image: 'docker.io/weaveworks/weave-npc:2.7.0'
211 | resources:
212 | requests:
213 | cpu: 50m
214 | memory: 100Mi
215 | securityContext:
216 | privileged: true
217 | volumeMounts:
218 | - name: xtables-lock
219 | mountPath: /run/xtables.lock
220 | dnsPolicy: ClusterFirstWithHostNet
221 | hostNetwork: true
222 | hostPID: true
223 | priorityClassName: system-node-critical
224 | restartPolicy: Always
225 | securityContext:
226 | seLinuxOptions: {}
227 | serviceAccountName: weave-net
228 | tolerations:
229 | - effect: NoSchedule
230 | operator: Exists
231 | - effect: NoExecute
232 | operator: Exists
233 | volumes:
234 | - name: weavedb
235 | hostPath:
236 | path: /var/lib/weave
237 | - name: cni-bin
238 | hostPath:
239 | path: /opt
240 | - name: cni-bin2
241 | hostPath:
242 | path: /home
243 | - name: cni-conf
244 | hostPath:
245 | path: /etc
246 | - name: dbus
247 | hostPath:
248 | path: /var/lib/dbus
249 | - name: lib-modules
250 | hostPath:
251 | path: /lib/modules
252 | - name: xtables-lock
253 | hostPath:
254 | path: /run/xtables.lock
255 | type: FileOrCreate
256 | updateStrategy:
257 | type: RollingUpdate
258 |
--------------------------------------------------------------------------------
/files/yunikorn-values.yaml:
--------------------------------------------------------------------------------
1 | service:
2 | type: ClusterIP
3 |
4 | configuration: |
5 | partitions:
6 | - name: default
7 | placementrules:
8 | - name: provided
9 | - name: tag
10 | value: namespace
11 | create: true
12 | queues:
13 | - name: root
14 | submitacl: '*'
15 |
16 | tolerations:
17 | - key: node-role.kubernetes.io/master
18 | effect: NoSchedule
19 | - key: node-role.kubernetes.io/control-plane
20 | effect: NoSchedule
21 |
22 | nodeSelector:
23 | node-role.kubernetes.io/master: ''
24 | node-role.kubernetes.io/control-plane: ''
25 |
26 | admissionController:
27 | tolerations:
28 | - key: node-role.kubernetes.io/master
29 | effect: NoSchedule
30 | - key: node-role.kubernetes.io/control-plane
31 | effect: NoSchedule
32 | nodeSelector:
33 | node-role.kubernetes.io/master: ''
34 | node-role.kubernetes.io/control-plane: ''
--------------------------------------------------------------------------------
/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: sysctl-system
2 | command: sysctl --system
3 |
4 | - name: restart kubelet
5 | service:
6 | name: kubelet
7 | state: restarted
--------------------------------------------------------------------------------
/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | role_name: kubernetes
3 | author: micafer1@upv.es
4 | description: Install Kubernetes cluster
5 | company: GRyCAP
6 | license: Apache
7 | min_ansible_version: 2.0
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 7
12 | - name: Ubuntu
13 | versions:
14 | - trusty
15 | galaxy_tags:
16 | - kubernetes
17 |
18 | dependencies:
19 | - role: 'geerlingguy.ntp'
20 | ntp_servers: "{{ kube_ntp_servers }}"
21 | when: kube_ntp_servers != []
22 | - role: 'grycap.docker'
23 | docker_version: "{{ kube_docker_version | default('latest', true) }}"
24 | docker_config_values: "{{ {'exec-opts': ['native.cgroupdriver=systemd'], 'log-driver': 'json-file', 'log-opts': {'max-size': '100m'}, 'storage-driver': 'devicemapper'} | combine(kube_docker_options) }}"
25 | docker_nvidia_support: '{{ kube_nvidia_support and kube_type_of_node == "wn" }}'
26 | docker_install_criu: false
27 | docker_install_pip: "{{kube_install_docker_pip}}"
28 | docker_compose_version: ""
29 | docker_containerd_only: "{{ (kube_cri_runtime == 'containerd') | bool }}"
30 | docker_nvidia_driver_version: "{{ kube_nvidia_driver_version }}"
31 | when: ansible_os_family == "RedHat" and kube_install_method == 'kubeadm' and kube_cri_runtime != 'crio' and kube_cri_runtime_install
32 | - role: 'grycap.docker'
33 | docker_version: "{{ kube_docker_version | default('latest', true) }}"
34 | docker_config_values: "{{ {'exec-opts': ['native.cgroupdriver=systemd'], 'log-driver': 'json-file', 'log-opts': {'max-size': '100m'}, 'storage-driver': 'overlay2'} | combine(kube_docker_options) }}"
35 | docker_nvidia_support: '{{ kube_nvidia_support and kube_type_of_node == "wn" }}'
36 | docker_install_criu: false
37 | docker_install_pip: "{{kube_install_docker_pip}}"
38 | docker_compose_version: ""
39 | docker_containerd_only: "{{ (kube_cri_runtime == 'containerd') | bool }}"
40 | docker_nvidia_driver_version: "{{ kube_nvidia_driver_version }}"
41 | when: ansible_os_family == "Debian" and kube_install_method == 'kubeadm' and kube_cri_runtime != 'crio' and kube_cri_runtime_install
42 | - role: 'grycap.cri_o'
43 | when: ansible_os_family == "Debian" and kube_install_method == 'kubeadm' and kube_cri_runtime == 'crio' and kube_cri_runtime_install
44 |
--------------------------------------------------------------------------------
/tasks/Debian.yaml:
--------------------------------------------------------------------------------
1 | - fail:
2 | msg: "Kubernetes version {{ kube_version }} is not supported"
3 | when: "kube_version is version('1.24.0', '<')"
4 |
5 | - name: Set kube_minor_version
6 | set_fact:
7 | kube_minor_version: "{{ kube_version.split('.')[0] }}.{{ kube_version.split('.')[1] }}"
8 |
9 | - name: Set kube_key_version
10 | set_fact:
11 | kube_key_version: "{{ kube_minor_version }}"
12 |
13 | # for some reason, the key for 1.27.X or lower is does not work
14 | - name: Set kube_key_version
15 | set_fact:
16 | kube_key_version: "1.30"
17 | when: "kube_version is version('1.28.0', '<')"
18 |
19 | - name: Add repo key
20 | get_url:
21 | url: "https://pkgs.k8s.io/core:/stable:/v{{ kube_key_version }}/deb/Release.key"
22 | dest: /etc/apt/trusted.gpg.d/kubernetes.asc
23 | mode: '0644'
24 |
25 | - name: Add kubernetes repo
26 | apt_repository: repo='deb https://pkgs.k8s.io/core:/stable:/v{{ kube_minor_version }}/deb/ /' state=present update_cache=yes
27 |
28 | - name: Install kubernetes packages
29 | apt: name=kubelet={{ kube_version }}*,kubeadm={{ kube_version }}*,kubectl={{ kube_version }}*
30 | when: kube_version != "latest" and (kubeadm_output.stdout == '' or kubeadm_output.stdout is version(kube_version, '<'))
31 |
32 | - name: Install kubernetes packages
33 | apt: name=kubelet,kubeadm,kubectl state=latest
34 | when: kube_version == "latest"
35 |
36 | - name: Hold "{{ item }}" package
37 | dpkg_selections:
38 | name: "{{ item }}"
39 | selection: hold
40 | with_items:
41 | - kubeadm
42 | - kubelet
43 | - kubectl
44 | ignore_errors: yes
45 |
--------------------------------------------------------------------------------
/tasks/RedHat.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - fail:
3 | msg: "Kubernetes version {{ kube_version }} is not supported"
4 | when: "kube_version is version('1.24', '<')"
5 |
6 | - name: Set kube_minor_version
7 | set_fact:
8 | kube_minor_version: "{{ kube_version.split('.')[0] }}.{{ kube_version.split('.')[1] }}"
9 |
10 | - name: Set repo file
11 | copy:
12 | dest: /etc/yum.repos.d/kubernetes.repo
13 | content: |
14 | [kubernetes]
15 | name=Kubernetes
16 | baseurl=https://pkgs.k8s.io/core:/stable:/v{{ kube_minor_version }}/rpm/
17 | enabled=1
18 | gpgcheck=0
19 | repo_gpgcheck=0
20 | gpgkey=https://pkgs.k8s.io/core:/stable:/v{{ kube_minor_version }}/rpm/repodata/repomd.xml.key
21 | exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
22 |
23 | - name: import gpg key
24 | rpm_key:
25 | key: https://pkgs.k8s.io/core:/stable:/v{{ kube_minor_version }}/rpm/repodata/repomd.xml.key
26 | state: present
27 |
28 |
29 | - name: Install kubernetes packages
30 | package: name=psmisc,kubelet-{{kube_version}},kubeadm-{{kube_version}},kubectl-{{kube_version}},yum-plugin-versionlock state=present update_cache=yes
31 | when: kube_version != "latest" and (kubeadm_output.stdout == '' or kubeadm_output.stdout is version(kube_version, '<='))
32 |
33 | - name: Install kubernetes packages
34 | package: name=psmisc,kubelet,kubeadm,kubectl,yum-plugin-versionlock state=latest
35 | when: kube_version == "latest"
36 |
37 | - name: Version lock kubernetes packages
38 | command: yum versionlock add kubelet kubeadm kubectl cri-tools kubernetes-cni
39 | register: yum_versionlock
40 | changed_when: "'versionlock added: 1' in yum_versionlock.stdout_lines"
41 | ignore_errors: yes
42 |
43 | - name: Start kubelet service
44 | service: name=kubelet state=started enabled=yes
45 |
46 | - lineinfile:
47 | path: /etc/sysctl.d/k8s.conf
48 | regexp: '^net.bridge.bridge-nf-call-ip6tables'
49 | line: 'net.bridge.bridge-nf-call-ip6tables = 1'
50 | create: yes
51 | notify:
52 | - sysctl-system
53 |
54 | - lineinfile:
55 | path: /etc/sysctl.d/k8s.conf
56 | regexp: '^net.bridge.bridge-nf-call-iptables'
57 | line: 'net.bridge.bridge-nf-call-iptables = 1'
58 | create: yes
59 | notify:
60 | - sysctl-system
61 |
62 | - name: sysctl-system
63 | command: sysctl --system
64 |
65 | - name: Stop and disable firewalld
66 | service:
67 | name: firewalld
68 | state: stopped
69 | enabled: False
70 | ignore_errors: yes
71 |
72 |
--------------------------------------------------------------------------------
/tasks/cert-manager.yaml:
--------------------------------------------------------------------------------
1 | - block:
2 | - pip:
3 | name: dnspython
4 | ignore_errors: yes
5 | - name: Try to get the DNS name of the node
6 | set_fact:
7 | public_hostname: "{{ lookup('dig', kube_cert_public_ip + '/PTR', '@8.8.8.8')[:-1] }}"
8 | ignore_errors: yes
9 | when: kube_cert_manager | bool and kube_public_dns_name == ""
10 |
11 | - block:
12 | - debug:
13 | msg: "DNS name = {{ public_hostname }} - Disabling cert_manager."
14 | ignore_errors: yes
15 | - name: Disabling cert_manager
16 | set_fact:
17 | kube_cert_manager: false
18 | when: kube_cert_manager | bool and kube_public_dns_name == "" and (public_hostname is undefined or public_hostname == "NXDOMAI" or public_hostname == "")
19 |
20 | - set_fact:
21 | public_hostname: "{{ kube_public_dns_name }}"
22 | when: kube_cert_manager | bool and kube_public_dns_name != ""
23 |
24 | - block:
25 | - name: Create ingress-values.yaml
26 | copy:
27 | src: cert-manager-values.yaml
28 | dest: /var/tmp/cert-manager-values.yaml
29 |
30 | - name: Install cert-manager helm chart
31 | import_tasks: helm_chart.yaml
32 | vars:
33 | helm_chart_name: cert-manager
34 | helm_chart_namespace: cert-manager
35 | helm_repo_name: jetstack
36 | helm_repo_url: https://charts.jetstack.io
37 | helm_chart_version: "{{ kube_cert_manager_chart_version }}"
38 | helm_values_file: /var/tmp/cert-manager-values.yaml
39 | helm_wait: true
40 | helm_timeout: 600s
41 |
42 | - block:
43 | - template: src=dns01_secret.j2 dest=/tmp/dns01_secret.yaml
44 | - name: Create DNS01 secret
45 | command: kubectl apply -f /tmp/dns01_secret.yaml
46 | environment:
47 | KUBECONFIG: "{{KUBECONFIG}}"
48 | when: kube_cert_manager_challenge == "dns01"
49 |
50 | - template: src=prod_issuer.j2 dest=/tmp/prod_issuer.yaml
51 | - name: Create Let's encrypt Certificate issuer
52 | command: kubectl apply -f /tmp/prod_issuer.yaml
53 | environment:
54 | KUBECONFIG: "{{KUBECONFIG}}"
55 |
56 | - block:
57 | - template: src=wildcard_cert.j2 dest=/tmp/wildcard_cert.yaml
58 | - name: Create Let's encrypt Wildcard Certificate for '{{ kube_cert_manager_wildcard_cert_dns_name }}'
59 | command: kubectl apply -f /tmp/wildcard_cert.yaml
60 | environment:
61 | KUBECONFIG: "{{KUBECONFIG}}"
62 | when: kube_cert_manager_wildcard_cert_dns_name != '' and kube_cert_manager_challenge == 'dns01'
63 |
64 | when: kube_cert_manager | bool
65 |
--------------------------------------------------------------------------------
/tasks/cri-dockerd.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Download cri-dockerd tarball
3 | get_url:
4 | url: https://github.com/Mirantis/cri-dockerd/releases/download/v{{ kube_cri_dockerd_version }}/cri-dockerd-{{ kube_cri_dockerd_version }}.amd64.tgz
5 | dest: /tmp/cri-dockerd-{{ kube_cri_dockerd_version }}.amd64.tgz
6 |
7 | - name: Extract cri-dockerd tarball
8 | unarchive:
9 | src: /tmp/cri-dockerd-{{ kube_cri_dockerd_version }}.amd64.tgz
10 | dest: /tmp
11 | remote_src: yes
12 |
13 | - name: Copy cri-dockerd binary
14 | copy:
15 | src: /tmp/cri-dockerd/cri-dockerd
16 | dest: /usr/bin/cri-dockerd
17 | mode: '0755'
18 | remote_src: true
19 |
20 | - name: Download cri-docker service and socket
21 | get_url:
22 | url: https://raw.githubusercontent.com/Mirantis/cri-dockerd/master/packaging/systemd/{{ item }}
23 | dest: /etc/systemd/system/{{ item }}
24 | mode: '0644'
25 | loop:
26 | - cri-docker.service
27 | - cri-docker.socket
28 |
29 | - name: Enable cri-dockerd service
30 | systemd:
31 | name: "{{ item }}"
32 | enabled: yes
33 | daemon_reload: yes
34 | state: started
35 | loop:
36 | - cri-docker.service
37 | - cri-docker.socket
38 |
--------------------------------------------------------------------------------
/tasks/delete_wns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - set_fact:
3 | all_nodes: []
4 |
5 | - name: Get the FQDN of all nodes
6 | set_fact:
7 | all_nodes: "{{ groups['all']|map('extract', hostvars,'IM_NODE_FQDN')|list }}"
8 | when: IM_NODE_FQDN is defined
9 |
10 | - name: Get list of NotReady nodes
11 | shell: kubectl get nodes | grep NotReady | awk '{print $1}'
12 | register: kube_nodes
13 | changed_when: false
14 | ignore_errors: true
15 |
16 | - name: Delete removed nodes
17 | command: kubectl delete node {{item}}
18 | environment:
19 | KUBECONFIG: "{{KUBECONFIG}}"
20 | when: all_nodes and item not in all_nodes
21 | with_items: "{{ kube_nodes.stdout_lines }}"
22 | ignore_errors: true
23 |
--------------------------------------------------------------------------------
/tasks/front.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Copy 'is_cluster_ready' file
3 | copy: dest=/bin/is_cluster_ready src=is_cluster_ready mode=0755
4 |
5 | - block:
6 |
7 | - name: Create kubeadm-config file
8 | template: src=kubeadm-config.j2 dest=/tmp/kubeadm-config.yml
9 |
10 | - name: Kubeadm init
11 | command: kubeadm init --config /tmp/kubeadm-config.yml creates=/etc/kubernetes/admin.conf
12 |
13 | - name: Set kube_wait_api_server_ip
14 | set_fact:
15 | kube_wait_api_server_ip: "{{kube_api_server}}"
16 | when: kube_api_server != "0.0.0.0"
17 |
18 | - set_fact:
19 | node_port_exists: false
20 |
21 | - set_fact:
22 | node_port_exists: true
23 | when: " '{{item.option}}' == '--service-node-port-range'"
24 | loop: "{{ kube_apiserver_options }}"
25 |
26 | - name: Add Kube API server --service-node-port-range option
27 | set_fact:
28 | kube_apiserver_options: "{{ [{'option': '--service-node-port-range', 'value': '80-35000'}] + kube_apiserver_options }}"
29 | when: kube_install_ingress | bool and not node_port_exists | bool
30 |
31 | - name: Add Kube API server options
32 | lineinfile:
33 | dest: /etc/kubernetes/manifests/kube-apiserver.yaml
34 | line: ' - {{item.option}}={{item.value}}'
35 | regexp: '^ - {{item.option}}='
36 | insertafter: ' - kube-apiserver'
37 | register: add_kube_api_server_options
38 | with_items: "{{ kube_apiserver_options }}"
39 |
40 | - name: Restart Kube API server
41 | block:
42 |
43 | - name: kill kubeapi
44 | command: killall kube-apiserver
45 | ignore_errors: true
46 |
47 | - name: restart kubelet to restart kubeapi
48 | service:
49 | name: kubelet
50 | state: restarted
51 |
52 | - name: Pause for 5 seconds to wait kubelet to restart
53 | pause:
54 | seconds: 5
55 |
56 | when: add_kube_api_server_options is changed
57 |
58 | - name: wait Kube to start on "{{kube_wait_api_server_ip}}"
59 | uri:
60 | url: "https://{{kube_wait_api_server_ip}}:6443/livez"
61 | status_code: 200
62 | validate_certs: false
63 | register: result
64 | until: result.status == 200
65 | retries: 30
66 | delay: 2
67 |
68 | - set_fact:
69 | KUBECONFIG: /etc/kubernetes/admin.conf
70 |
71 | - import_tasks: kube_nets.yaml
72 | environment:
73 | KUBECONFIG: "{{KUBECONFIG}}"
74 |
75 | when: kube_install_method == "kubeadm"
76 |
77 |
78 | - block:
79 |
80 | - set_fact:
81 | k3s_args: "--disable traefik --disable servicelb --disable local-storage {{kube_k3_exec}}"
82 |
83 | - name: Add Kube API server options to K3s
84 | set_fact:
85 | k3s_args: "{{k3s_args}} --kube-apiserver-arg={{item.option}}={{item.value}}"
86 | with_items: "{{ kube_apiserver_options }}"
87 |
88 | - set_fact:
89 | k3s_args: "{{k3s_args}} --service-node-port-range=80-32000"
90 | when: kube_install_ingress | bool
91 |
92 | - name: Install k3s server
93 | command: sh /tmp/k3s.sh {{k3s_args}} creates=/etc/rancher/k3s/k3s.yaml
94 | environment:
95 | INSTALL_K3S_VERSION: "{{ kube_version }}"
96 |
97 | - name: Link kubectl to /usr/bin/kubectl
98 | file:
99 | src: /usr/local/bin/kubectl
100 | dest: /usr/bin/kubectl
101 | state: link
102 |
103 | - name: Update master node label
104 | shell: kubectl get node --selector='node-role.kubernetes.io/master' -o=name | xargs -I ARG kubectl label ARG node-role.kubernetes.io/master= --overwrite
105 |
106 | - set_fact:
107 | KUBECONFIG: /etc/rancher/k3s/k3s.yaml
108 |
109 | when: kube_install_method == "k3s"
110 |
111 | - name: Set KUBECONFIG environment variable
112 | lineinfile:
113 | dest: /etc/environment
114 | line: "KUBECONFIG={{KUBECONFIG}}"
115 |
116 | - import_tasks: helm.yaml
117 | environment:
118 | KUBECONFIG: "{{KUBECONFIG}}"
119 |
120 | - include_tasks: cert-manager.yaml
121 |
122 | - block:
123 | - name: Create kubernetes-dashboard.yaml
124 | copy: src=kubernetes-dashboard.yaml dest=/tmp/kubernetes-dashboard.yaml
125 | - name: Apply kubernetes-dashboard.yaml
126 | command: kubectl apply -f /tmp/kubernetes-dashboard.yaml
127 | environment:
128 | KUBECONFIG: "{{KUBECONFIG}}"
129 | when: kube_deploy_dashboard | bool
130 |
131 | - name: Install metrics-server helm chart
132 | import_tasks: helm_chart.yaml
133 | vars:
134 | helm_chart_name: metrics-server
135 | helm_chart_namespace: kube-system
136 | helm_repo_name: metrics-server
137 | helm_repo_url: https://kubernetes-sigs.github.io/metrics-server
138 | helm_chart_version: "{{ kube_metrics_chart_version }}"
139 | helm_wait: false
140 | when: kube_install_metrics | bool
141 |
142 | - name: Add persistent volumes to kube cluster
143 | block:
144 | - template: src=persistent-volumes.j2 dest=/tmp/kubernetes-persistent-volumes.yml
145 | - command: kubectl apply -f /tmp/kubernetes-persistent-volumes.yml
146 | environment:
147 | KUBECONFIG: "{{KUBECONFIG}}"
148 | when: kube_persistent_volumes != []
149 |
150 | - name: Install Git package
151 | package: name=git
152 | when: kube_apply_repos != []
153 |
154 | - name: Download git repo "{{ item.repo }}"
155 | git:
156 | repo: "{{ item.repo }}"
157 | dest: "/tmp/{{ item.repo | basename }}"
158 | version: "{{ item.version }}"
159 | with_items: "{{ kube_apply_repos }}"
160 |
161 | - name: apply path "{{ item.path }}"
162 | command: kubectl apply -f "{{ item.path }}" chdir="/tmp/{{ item.repo | basename }}"
163 | environment:
164 | KUBECONFIG: "{{KUBECONFIG}}"
165 | with_items: "{{ kube_apply_repos }}"
166 |
167 | - import_tasks: nfs-client.yaml
168 | when: kube_install_nfs_client | bool and not kube_install_longhorn | bool
169 |
170 | - block:
171 | - name: Create longhorn.yaml
172 | template: src=longhorn.j2 dest=/tmp/longhorn.yaml
173 | - name: Apply longhorn.yaml
174 | command: kubectl apply -f /tmp/longhorn.yaml
175 | environment:
176 | KUBECONFIG: "{{KUBECONFIG}}"
177 | when: kube_install_longhorn | bool
178 |
179 | - import_tasks: ingress.yaml
180 | when: kube_install_ingress | bool
181 |
182 | - block:
183 | - name: Create nvidia-device-plugin.yaml
184 | template: src=nvidia-device-plugin.j2 dest=/tmp/nvidia-device-plugin.yml
185 | - name: Apply nvidia-device-plugin.yaml
186 | command: kubectl apply -f /tmp/nvidia-device-plugin.yml
187 | environment:
188 | KUBECONFIG: "{{KUBECONFIG}}"
189 | when: kube_nvidia_support | bool
190 |
191 | - block:
192 | - name: Create kubernetes-dashboard-ingress.yaml
193 | template: src=kubernetes-dashboard-ingress.j2 dest=/tmp/kubernetes-dashboard-ingress.yaml
194 | - name: Apply kubernetes-dashboard-ingress.yaml
195 | command: kubectl apply -f /tmp/kubernetes-dashboard-ingress.yaml
196 | environment:
197 | KUBECONFIG: "{{KUBECONFIG}}"
198 | when: kube_install_ingress | bool and kube_deploy_dashboard | bool
199 |
200 | - import_tasks: kubeapps.yaml
201 | when: kube_install_kubeapps | bool
202 |
203 | - import_tasks: kyverno.yaml
204 | when: kube_install_kyverno | bool
205 |
206 | - import_tasks: yunikorn.yaml
207 | when: kube_install_yunikorn | bool
208 |
209 | - import_tasks: delete_wns.yaml
--------------------------------------------------------------------------------
/tasks/helm.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - stat:
3 | path: /usr/bin/helm
4 | register: helm
5 |
6 | # Install HELM
7 | - block:
8 | - name: Copy helm install script
9 | copy:
10 | src: helm.sh
11 | dest: /tmp/helm.sh
12 | mode: 0755
13 |
14 | - name: Execute helm install script
15 | command: bash /tmp/helm.sh -v {{kube_install_helm_version}}
16 |
17 | when: not helm.stat.exists
18 |
--------------------------------------------------------------------------------
/tasks/helm_chart.yaml:
--------------------------------------------------------------------------------
1 | - name: Add Helm repo
2 | command: helm repo add {{ helm_repo_name }} {{ helm_repo_url }}
3 | environment:
4 | KUBECONFIG: "{{KUBECONFIG}}"
5 |
6 | - name: Set empty extra args
7 | set_fact:
8 | extra_args: ""
9 |
10 | - name: Set values file arg
11 | set_fact:
12 | extra_args: "{{ extra_args }} -f {{ helm_values_file }}"
13 | when: helm_values_file is defined and helm_values_file != ""
14 |
15 | - name: Set chart version arg {{ helm_chart_version }}
16 | set_fact:
17 | extra_args: "{{ extra_args }} --version={{ helm_chart_version }}"
18 | when: helm_chart_version is defined and helm_chart_version != "latest"
19 |
20 | - name: Set wait arg
21 | set_fact:
22 | extra_args: "{{ extra_args }} --wait"
23 | when: helm_wait is defined and helm_wait
24 |
25 | - name: Set timeout arg
26 | set_fact:
27 | extra_args: "{{ extra_args }} --timeout {{ helm_timeout }}"
28 | when: helm_timeout is defined
29 |
30 | - name: Check if {{ helm_chart_name }} is installed
31 | command: helm status {{ helm_chart_name }} -n {{ helm_chart_namespace }}
32 | environment:
33 | KUBECONFIG: "{{KUBECONFIG}}"
34 | register: helm_status
35 | ignore_errors: yes
36 | changed_when: false
37 |
38 | - block:
39 |
40 | - name: Install {{ helm_chart_name }} {{ helm_chart_version }}
41 | command: helm upgrade {{ helm_chart_name }} --install --namespace {{ helm_chart_namespace }} --create-namespace {{ helm_repo_name }}/{{ helm_chart_name }} {{ extra_args }}
42 | environment:
43 | KUBECONFIG: "{{KUBECONFIG}}"
44 | async: 900
45 | poll: 5
46 | ignore_errors: true
47 | register: helm_install
48 |
49 | - name: Undeploy {{ helm_chart_name }} app
50 | command: helm uninstall --namespace {{ helm_chart_namespace }} {{ helm_chart_name }}
51 | environment:
52 | KUBECONFIG: "{{KUBECONFIG}}"
53 | when: helm_install is failed
54 |
55 | - name: Undeploy {{ helm_app_name }} app
56 | fail:
57 | msg: "Error installing {{ helm_app_name }} helm chat: {{ helm_install.stderr }}"
58 | when: helm_install is failed
59 |
60 | when: helm_status.rc != 0
61 |
--------------------------------------------------------------------------------
/tasks/ingress.yaml:
--------------------------------------------------------------------------------
1 | - name: Create ingress-values.yaml
2 | template:
3 | src: ingress-values.j2
4 | dest: /var/tmp/ingress-values.yaml
5 |
6 | - name: Install ingress-nginx helm chart
7 | import_tasks: helm_chart.yaml
8 | vars:
9 | helm_chart_name: ingress-nginx
10 | helm_chart_namespace: ingress-nginx
11 | helm_repo_name: ingress-nginx
12 | helm_repo_url: https://kubernetes.github.io/ingress-nginx
13 | helm_chart_version: "{{ kube_ingress_chart_version }}"
14 | helm_wait: true
15 | helm_timeout: 300s
16 | helm_values_file: /var/tmp/ingress-values.yaml
17 |
--------------------------------------------------------------------------------
/tasks/k3s.yaml:
--------------------------------------------------------------------------------
1 | - name: get k3s bin
2 | get_url:
3 | url: https://get.k3s.io
4 | dest: /tmp/k3s.sh
5 |
6 |
--------------------------------------------------------------------------------
/tasks/kube_nets.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set net.bridge.bridge-nf-call-iptables to 1
3 | lineinfile:
4 | dest: /etc/sysctl.d/k8s.conf
5 | regexp: '^net.bridge.bridge-nf-call-iptables'
6 | line: 'net.bridge.bridge-nf-call-iptables = 1'
7 | create: yes
8 | notify: sysctl-system
9 | when: kube_network == 'flannel' or kube_network == 'kube-router'
10 |
11 | - block:
12 | - name: Create flannel-net.yaml
13 | template: src=flannel-net.j2 dest=/tmp/flannel-net.yml
14 | - name: Apply flannel-net.yaml
15 | command: kubectl apply -f /tmp/flannel-net.yml creates=/etc/cni/net.d/10-flannel.conflist
16 | environment:
17 | KUBECONFIG: /etc/kubernetes/admin.conf
18 | when: kube_network == 'flannel'
19 |
20 | - block:
21 | - name: Create calico-net.yaml
22 | copy: src=calico-net.yaml dest=/tmp/calico-net.yaml
23 | - name: Apply calico-net.yaml
24 | command: kubectl apply -f /tmp/calico-net.yaml creates=/var/etcd/calico-data
25 | environment:
26 | KUBECONFIG: /etc/kubernetes/admin.conf
27 | when: kube_network == 'calico'
28 |
29 | - block:
30 | - name: Create kuberouter-net.yaml
31 | copy: src=kuberouter-net.yaml dest=/tmp/kuberouter-net.yaml
32 | - name: Apply kuberouter-net.yaml
33 | command: kubectl apply -f /tmp/kuberouter-net.yaml creates=/etc/cni/net.d/10-kuberouter.conf
34 | environment:
35 | KUBECONFIG: /etc/kubernetes/admin.conf
36 | when: kube_network == 'kube-router'
37 |
38 | - block:
39 | - name: Create weave-net.yaml
40 | copy: src=weave-net.yaml dest=/tmp/weave-net.yaml
41 | - name: Apply weave-net.yaml
42 | command: kubectl apply -f /tmp/weave-net.yaml creates=/etc/cni/net.d/10-weave.conf
43 | environment:
44 | KUBECONFIG: /etc/kubernetes/admin.conf
45 | when: kube_network == 'weave'
46 |
47 | - block:
48 | # Helm is the installation method of Cilium
49 | - import_tasks: helm.yaml
50 | environment:
51 | KUBECONFIG: "{{KUBECONFIG}}"
52 |
53 | # Cert-manager is used for creating the certificates
54 | - name: Add cert-manager CRDs
55 | shell: kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.8.2/cert-manager.crds.yaml
56 | when: kube_cert_manager | bool
57 |
58 | # Prometheus
59 | - name: Add cert-manager of Prometheus
60 | shell: kubectl apply -f https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
61 | when: cilium_prometheus_enable | bool
62 |
63 | - name: Edit /etc/sysctl.d/99-override_cilium_rp_filter.conf
64 | lineinfile:
65 | dest: /etc/sysctl.d/99-override_cilium_rp_filter.conf
66 | regexp: '^net.ipv4.conf.lxc*.rp_filter'
67 | line: 'net.ipv4.conf.lxc*.rp_filter = 0'
68 | create: yes
69 | notify: sysctl-system
70 |
71 | - block:
72 | # IPSec secret
73 | - name: Check if cilium-ipsec-keys secret exists
74 | command: kubectl -n kube-system get secret cilium-ipsec-keys
75 | register: k_status
76 | ignore_errors: yes
77 | changed_when: false
78 |
79 | - name: Create cilium-ipsec-keys secret
80 | shell: kubectl create -n kube-system secret generic cilium-ipsec-keys --from-literal=keys="3 rfc4106(gcm(aes)) $(echo $(dd if=/dev/urandom count=20 bs=1 2> /dev/null | xxd -p -c 64)) 128"
81 | environment:
82 | KUBECONFIG: /etc/kubernetes/admin.conf
83 | when: k_status.rc != 0
84 |
85 | when: cilium_nodeEncryption | bool
86 |
87 | - name: Check if Cilium cli is installed
88 | command: ls /usr/local/bin/cilium
89 | register: cilium_cli_status
90 | ignore_errors: yes
91 | changed_when: false
92 |
93 | - block:
94 | - name: Download cilium cli
95 | command: curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz{,.sha256sum}
96 |
97 | - name: Install cilium cli
98 | command: tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin
99 |
100 | when: cilium_cli_status.rc != 0
101 |
102 | - name: Check if Cilium is installed
103 | command: helm status cilium -n kube-system
104 | register: helm_status
105 | ignore_errors: yes
106 | changed_when: false
107 |
108 | - block:
109 | - name: Add Cilium Helm repo
110 | command: helm repo add cilium https://helm.cilium.io/
111 | environment:
112 | KUBECONFIG: "{{KUBECONFIG}}"
113 |
114 | - name: Helm update
115 | command: helm repo update
116 | environment:
117 | KUBECONFIG: "{{KUBECONFIG}}"
118 |
119 | - block:
120 | - copy: src=cillium-default-priorityclass.yaml dest=/tmp/cillium-default-priorityclass.yaml
121 | - name: Apply cillium-default-priorityclass.yaml
122 | command: kubectl apply -f /tmp/cillium-default-priorityclass.yaml
123 | environment:
124 | KUBECONFIG: "{{KUBECONFIG}}"
125 | when: cilium_priorityClassName == 'network'
126 |
127 | - name: Create file with Cilium values
128 | template: src=cilium-values-1.15.6.yaml.j2 dest=/tmp/cilium-values-1.15.6.yaml
129 |
130 | - name: Deploy cilium
131 | command: helm install cilium cilium/cilium --version 1.15.6 --namespace kube-system -f /tmp/cilium-values-1.15.6.yaml
132 | environment:
133 | KUBECONFIG: "{{KUBECONFIG}}"
134 |
135 | when: helm_status.rc != 0
136 |
137 | - name: Wait until cilium was deployed
138 | command: cilium status --wait
139 | environment:
140 | KUBECONFIG: "{{KUBECONFIG}}"
141 | #hubble is deployed in WN
142 | when: not cilium_hubble_enable
143 |
144 | when: kube_network == 'cilium'
145 |
--------------------------------------------------------------------------------
/tasks/kubeadm.yaml:
--------------------------------------------------------------------------------
1 | - name: Include cri-docker tasks
2 | include_tasks: "cri-dockerd.yaml"
3 | when: kube_cri_runtime == "docker"
4 |
5 | - name: Check kube version
6 | shell: kubeadm version -o short | cut -d 'v' -f 2
7 | register: kubeadm_output
8 | changed_when: false
9 |
10 | - debug: msg="Kubeadm version installed = {{ kubeadm_output.stdout }}"
11 |
12 | - name: Create /etc/modules-load.d/containerd.conf
13 | register: containerd_json
14 | copy:
15 | content: |
16 | overlay
17 | br_netfilter
18 | dest: /etc/modules-load.d/containerd.conf
19 |
20 | - name: Make modprobes
21 | command: modprobe {{item}}
22 | with_items:
23 | - overlay
24 | - br_netfilter
25 | when: containerd_json is changed
26 |
27 | - name: Create /etc/sysctl.d/99-kubernetes-cri.conf
28 | register: containerd_sysctl
29 | copy:
30 | content: |
31 | net.bridge.bridge-nf-call-iptables = 1
32 | net.ipv4.ip_forward = 1
33 | net.bridge.bridge-nf-call-ip6tables = 1
34 | dest: /etc/sysctl.d/99-kubernetes-cri.conf
35 |
36 | #- name: Containerd CNI for Cilium network
37 | # template: src=10-containerd-net.conflist.j2 dest=/etc/cni/net.d/10-containerd-net.conflist
38 | # notify: sysctl-system
39 | # when: kube_network == 'cilium''
40 |
41 | - name: Apply sysctl params
42 | command: sysctl --system
43 | when: containerd_sysctl is changed
44 |
45 | - name: Include "{{ansible_os_family}}" Kubernetes recipe
46 | include_tasks: "{{ansible_os_family}}.yaml"
--------------------------------------------------------------------------------
/tasks/kubeapps.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install helm chart
3 | import_tasks: helm_chart.yaml
4 | vars:
5 | helm_chart_name: kubeapps
6 | helm_chart_namespace: kubeapps
7 | helm_repo_name: bitnami
8 | helm_repo_url: https://charts.bitnami.com/bitnami
9 | helm_chart_version: "{{ kube_kubeapps_chart_version }}"
10 |
11 | - template: src=kubeapps-ingress.j2 dest=/tmp/kubeapps-ingress.yaml
12 | - command: kubectl apply -f /tmp/kubeapps-ingress.yaml
13 | environment:
14 | KUBECONFIG: "{{KUBECONFIG}}"
15 |
--------------------------------------------------------------------------------
/tasks/kyverno.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add Kyverno Helm repo
3 | command: helm repo add kyverno https://kyverno.github.io/kyverno/
4 | environment:
5 | KUBECONFIG: "{{KUBECONFIG}}"
6 |
7 | - name: Check if kyverno is installed
8 | command: helm status kyverno -n kyverno
9 | register: helm_status
10 | ignore_errors: yes
11 | changed_when: false
12 |
13 | - name: Check if kyverno-crds is installed
14 | command: helm status kyverno-crds -n kyverno
15 | register: helm_status_kyverno_crds
16 | ignore_errors: yes
17 | changed_when: false
18 |
19 | - name: Helm update
20 | command: helm repo update
21 | environment:
22 | KUBECONFIG: "{{KUBECONFIG}}"
23 |
24 | - name: Create namespace kyverno
25 | command: kubectl create namespace kyverno
26 | environment:
27 | KUBECONFIG: "{{KUBECONFIG}}"
28 | when: helm_status.rc != 0
29 |
30 | - name: Install kyverno-crds {{kyverno_crds_helm_chart_version}}
31 | command: helm install kyverno-crds kyverno/kyverno-crds --namespace kyverno --version={{kyverno_crds_helm_chart_version}}
32 | environment:
33 | KUBECONFIG: "{{KUBECONFIG}}"
34 | when: helm_status_kyverno_crds.rc != 0
35 |
36 | - template: src=kyverno-values.j2 dest=/tmp/kyverno-values.yml
37 |
38 | - name: Install kyverno {{kyverno_helm_chart_version}}
39 | command: helm install kyverno --namespace kyverno -f /tmp/kyverno-values.yml kyverno/kyverno --version={{kyverno_helm_chart_version}}
40 | environment:
41 | KUBECONFIG: "{{KUBECONFIG}}"
42 | when: helm_status.rc != 0
--------------------------------------------------------------------------------
/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Disable swap
3 | command: swapoff -a
4 |
5 | - name: Disable SWAP in fstab
6 | replace:
7 | path: /etc/fstab
8 | regexp: '^([^#].*?\sswap\s+sw.*)$'
9 | replace: '# \1'
10 |
11 | - name: Include "{{kube_install_method}}" tasks
12 | include_tasks: "{{kube_install_method}}.yaml"
13 |
14 | - name: force handlers
15 | meta: flush_handlers
16 |
17 | - name: Include "{{kube_type_of_node}}" tasks
18 | include_tasks: "{{kube_type_of_node}}.yaml"
19 |
--------------------------------------------------------------------------------
/tasks/nfs-client.yaml:
--------------------------------------------------------------------------------
1 | - name: Create ingress-values.yaml
2 | copy:
3 | content: |
4 | nfs:
5 | server: {{ kube_nfs_server }}
6 | path: {{ kube_nfs_path }}
7 | reclaimPolicy: {{ kube_nfs_reclaim_policy }}
8 | storageClass:
9 | name: managed-nfs-storage
10 | defaultClass: true
11 | reclaimPolicy: {{ kube_nfs_reclaim_policy }}
12 | dest: /var/tmp/nfs-client-values.yaml
13 |
14 | - name: Install ingress-nginx helm chart
15 | import_tasks: helm_chart.yaml
16 | vars:
17 | helm_chart_name: nfs-subdir-external-provisioner
18 | helm_chart_namespace: nfs-client
19 | helm_repo_name: nfs-subdir-external-provisioner
20 | helm_repo_url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
21 | helm_chart_version: "{{ kube_nfs_chart_version }}"
22 | helm_wait: false
23 | helm_values_file: /var/tmp/nfs-client-values.yaml
24 |
--------------------------------------------------------------------------------
/tasks/wn.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Edit /etc/sysctl.d/99-override_cilium_rp_filter.conf
3 | lineinfile:
4 | dest: /etc/sysctl.d/99-override_cilium_rp_filter.conf
5 | regexp: '^net.ipv4.conf.lxc*.rp_filter'
6 | line: 'net.ipv4.conf.lxc*.rp_filter = 0'
7 | create: yes
8 | notify: sysctl-system
9 | when: kube_network == 'cilium'
10 |
11 | - name: Wait for Kube master
12 | wait_for:
13 | path: /etc/environment
14 | search_regex: "KUBECONFIG="
15 | delegate_to: "{{kube_server}}"
16 |
17 | - block:
18 |
19 | - name: Read node-token from FE
20 | block:
21 | - command: 'cat /var/lib/rancher/k3s/server/node-token'
22 | register: token_read
23 | delegate_to: '{{ kube_server }}'
24 | delegate_facts: true
25 | - set_fact:
26 | node_token: "{{ token_read.stdout }}"
27 |
28 | - name: Install k3s node
29 | command: sh /tmp/k3s.sh creates=/var/lib/rancher/k3s/agent/etc/k3s-agent-load-balancer.json
30 | environment:
31 | INSTALL_K3S_EXEC: "{{kube_k3_exec}}"
32 | K3S_URL: https://{{ kube_server }}:6443
33 | K3S_TOKEN: "{{ node_token }}"
34 |
35 | - name: Copy config.toml.tmpl for NVidia support
36 | copy:
37 | src: config.toml.tmpl
38 | dest: /var/lib/rancher/k3s/agent/etc/containerd/config.toml.tmpl
39 | when: kube_nvidia_support | bool
40 |
41 | when: kube_install_method == "k3s"
42 |
43 | - block:
44 |
45 | # to deprecate and move to kubelet_extra_args_dict
46 | - name: Add KUBELET_EXTRA_ARGS
47 | lineinfile:
48 | dest: "{{item}}/kubelet"
49 | line: 'KUBELET_EXTRA_ARGS=--cgroup-driver=systemd {{kubelet_extra_args}}'
50 | regexp: 'KUBELET_EXTRA_ARGS='
51 | create: yes
52 | notify: restart kubelet
53 | with_first_found:
54 | - files:
55 | - /etc/sysconfig/
56 | - /etc/default/
57 | ignore_errors: true
58 | when: kubelet_extra_args != "" and kubelet_extra_args_dict == {}
59 |
60 | - name: Create kubeadm-config file
61 | template: src=kubeadm-config-join.j2 dest=/tmp/kubeadm-config.yml
62 |
63 | - name: Add node to kube cluster
64 | command: kubeadm join --config /tmp/kubeadm-config.yml creates=/etc/kubernetes/kubelet.conf
65 |
66 | when: kube_install_method == "kubeadm"
67 |
68 |
69 |
--------------------------------------------------------------------------------
/tasks/yunikorn.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add Apache Yunikorn Helm repo
3 | command: helm repo add yunikorn https://apache.github.io/yunikorn-release
4 | environment:
5 | KUBECONFIG: "{{KUBECONFIG}}"
6 |
7 | - name: Check if Apache Yunikorn is installed
8 | command: helm status yunikorn -n yunikorn
9 | register: helm_status
10 | ignore_errors: yes
11 | changed_when: false
12 |
13 | - block:
14 | - name: Helm update
15 | command: helm repo update
16 | environment:
17 | KUBECONFIG: "{{KUBECONFIG}}"
18 | - name: Create namespace
19 | command: kubectl create namespace yunikorn
20 | environment:
21 | KUBECONFIG: "{{KUBECONFIG}}"
22 | - name: Copy Yunikorn values
23 | copy: src=yunikorn-values.yaml dest=/tmp/yunikorn-values.yaml
24 | - name: Install Apache Yunikorn
25 | command: helm install yunikorn yunikorn/yunikorn --namespace yunikorn --version={{kube_yunikorn_version}} --values /tmp/yunikorn-values.yaml
26 | environment:
27 | KUBECONFIG: "{{KUBECONFIG}}"
28 | when: helm_status.rc != 0
29 |
--------------------------------------------------------------------------------
/templates/10-containerd-net.conflist.j2:
--------------------------------------------------------------------------------
1 | {
2 | "cniVersion": "1.0.0",
3 | "name": "containerd-net",
4 | "plugins": [
5 | {
6 | "type": "bridge",
7 | "bridge": "cni0",
8 | "isGateway": true,
9 | "ipMasq": true,
10 | "promiscMode": true,
11 | "ipam": {
12 | "type": "host-local",
13 | "ranges": [
14 | [{
15 | "subnet": "{{ kube_pod_network_cidr }}"
16 | }],
17 | [{
18 | "subnet": "2001:4860:4860::/64"
19 | }]
20 | ],
21 | "routes": [
22 | { "dst": "0.0.0.0/0" },
23 | { "dst": "::/0" }
24 | ]
25 | }
26 | },
27 | {
28 | "type": "portmap",
29 | "capabilities": {"portMappings": true}
30 | }
31 | ]
32 | }
--------------------------------------------------------------------------------
/templates/cilium-values-1.11.6.yaml.j2:
--------------------------------------------------------------------------------
1 | debug:
2 | # -- Enable debug logging
3 | enabled: {{ cilium_debug_log }}
4 | # verbose:
5 |
6 | rbac:
7 | # -- Enable creation of Resource-Based Access Control configuration.
8 | create: true
9 |
10 | # -- Configure image pull secrets for pulling container images
11 | imagePullSecrets:
12 | # - name: "image-pull-secret"
13 |
14 | # kubeConfigPath: ~/.kube/config
15 | k8sServiceHost: {{ cilium_k8s_endpoint }}
16 | k8sServicePort: 6443
17 |
18 | cluster:
19 | # -- Name of the cluster. Only required for Cluster Mesh.
20 | name: default
21 | # -- (int) Unique ID of the cluster. Must be unique across all connected
22 | # clusters and in the range of 1 to 255. Only required for Cluster Mesh.
23 | id:
24 |
25 | # -- Define serviceAccount names for components.
26 | # @default -- Component's fully qualified name.
27 | serviceAccounts:
28 | cilium:
29 | create: true
30 | name: cilium
31 | annotations: {}
32 | etcd:
33 | create: true
34 | name: cilium-etcd-operator
35 | annotations: {}
36 | operator:
37 | create: true
38 | name: cilium-operator
39 | annotations: {}
40 | preflight:
41 | create: true
42 | name: cilium-pre-flight
43 | annotations: {}
44 | relay:
45 | create: true
46 | name: hubble-relay
47 | annotations: {}
48 | ui:
49 | create: true
50 | name: hubble-ui
51 | annotations: {}
52 | clustermeshApiserver:
53 | create: true
54 | name: clustermesh-apiserver
55 | annotations: {}
56 | # -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob
57 | clustermeshcertgen:
58 | create: true
59 | name: clustermesh-apiserver-generate-certs
60 | annotations: {}
61 | # -- Hubblecertgen is used if hubble.tls.auto.method=cronJob
62 | hubblecertgen:
63 | create: true
64 | name: hubble-generate-certs
65 | annotations: {}
66 |
67 | # -- Install the cilium agent resources.
68 | agent: true
69 |
70 | # -- Agent container name.
71 | name: cilium
72 |
73 | # -- Roll out cilium agent pods automatically when configmap is updated.
74 | rollOutCiliumPods: true
75 |
76 | # -- Agent container image.
77 | image:
78 | override: ~
79 | repository: "quay.io/cilium/cilium"
80 | tag: "v1.11.6"
81 | pullPolicy: "IfNotPresent"
82 | # cilium-digest
83 | digest: ""
84 | useDigest: false
85 |
86 | # -- Pod affinity for cilium-agent.
87 | affinity:
88 | nodeAffinity:
89 | requiredDuringSchedulingIgnoredDuringExecution:
90 | nodeSelectorTerms:
91 | - matchExpressions:
92 | - key: kubernetes.io/os
93 | operator: In
94 | values:
95 | - linux
96 | # Compatible with Kubernetes 1.12.x and 1.13.x
97 | - matchExpressions:
98 | - key: beta.kubernetes.io/os
99 | operator: In
100 | values:
101 | - linux
102 | podAntiAffinity:
103 | requiredDuringSchedulingIgnoredDuringExecution:
104 | - labelSelector:
105 | matchExpressions:
106 | - key: k8s-app
107 | operator: In
108 | values:
109 | - cilium
110 | topologyKey: kubernetes.io/hostname
111 |
112 | # -- The priority class to use for cilium-agent.
113 | priorityClassName: "{{ cilium_priorityClassName }}"
114 |
115 | # -- Additional agent container arguments.
116 | extraArgs: []
117 |
118 | # -- Additional agent container environment variables.
119 | extraEnv: {}
120 |
121 | # -- Additional InitContainers to initialize the pod.
122 | extraInitContainers: []
123 |
124 | # -- Additional agent hostPath mounts.
125 | extraHostPathMounts: []
126 | # - name: host-mnt-data
127 | # mountPath: /host/mnt/data
128 | # hostPath: /mnt/data
129 | # hostPathType: Directory
130 | # readOnly: true
131 | # mountPropagation: HostToContainer
132 |
133 | # -- Additional agent ConfigMap mounts.
134 | extraConfigmapMounts: []
135 | # - name: certs-configmap
136 | # mountPath: /certs
137 | # configMap: certs-configmap
138 | # readOnly: true
139 |
140 | # -- extraConfig allows you to specify additional configuration parameters to be
141 | # included in the cilium-config configmap.
142 | extraConfig: {}
143 | # my-config-a: "1234"
144 | # my-config-b: |-
145 | # test 1
146 | # test 2
147 | # test 3
148 |
149 | # -- Node tolerations for agent scheduling to nodes with taints
150 | # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
151 | tolerations:
152 | - operator: Exists
153 | # - key: "key"
154 | # operator: "Equal|Exists"
155 | # value: "value"
156 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
157 |
158 | # -- Annotations to be added to agent pods
159 | podAnnotations: {}
160 |
161 | # -- Labels to be added to agent pods
162 | podLabels: {}
163 |
164 | # -- PodDisruptionBudget settings
165 | # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
166 | podDisruptionBudget:
167 | enabled: true
168 | maxUnavailable: 2
169 |
170 | # -- Agent resource limits & requests
171 | # ref: https://kubernetes.io/docs/user-guide/compute-resources/
172 | resources: {}
173 | # limits:
174 | # cpu: 4000m
175 | # memory: 4Gi
176 | # requests:
177 | # cpu: 100m
178 | # memory: 512Mi
179 |
180 | # -- Security context to be added to agent pods
181 | securityContext: {}
182 | # runAsUser: 0
183 |
184 | # -- Cilium agent update strategy
185 | updateStrategy:
186 | rollingUpdate:
187 | maxUnavailable: 2
188 | type: RollingUpdate
189 |
190 | # Configuration Values for cilium-agent
191 |
192 | # -- Enable installation of PodCIDR routes between worker
193 | # nodes if worker nodes share a common L2 network segment.
194 | autoDirectNodeRoutes: false
195 |
196 | # -- Annotate k8s node upon initialization with Cilium's metadata.
197 | annotateK8sNode: true
198 |
199 | azure:
200 | # -- Enable Azure integration
201 | enabled: false
202 | # resourceGroup: group1
203 | # subscriptionID: 00000000-0000-0000-0000-000000000000
204 | # tenantID: 00000000-0000-0000-0000-000000000000
205 | # clientID: 00000000-0000-0000-0000-000000000000
206 | # clientSecret: 00000000-0000-0000-0000-000000000000
207 | # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000
208 |
209 | alibabacloud:
210 | # -- Enable AlibabaCloud ENI integration
211 | enabled: false
212 |
213 | # -- Optimize TCP and UDP workloads and enable rate-limiting traffic from
214 | # individual Pods with EDT (Earliest Departure Time)
215 | # through the "kubernetes.io/egress-bandwidth" Pod annotation.
216 | bandwidthManager: false
217 |
218 | # -- Configure BGP
219 | bgp:
220 | # -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside
221 | # cilium-agent and cilium-operator
222 | enabled: false
223 | announce:
224 | # -- Enable allocation and announcement of service LoadBalancer IPs
225 | loadbalancerIP: false
226 | # -- Enable announcement of node pod CIDR
227 | podCIDR: false
228 |
229 | bpf:
230 | # -- Enable BPF clock source probing for more efficient tick retrieval.
231 | clockProbe: false
232 |
233 | # -- Enables pre-allocation of eBPF map values. This increases
234 | # memory usage but can reduce latency.
235 | preallocateMaps: false
236 |
237 | # -- Configure the maximum number of entries in the TCP connection tracking
238 | # table.
239 | # ctTcpMax: '524288'
240 |
241 | # -- Configure the maximum number of entries for the non-TCP connection
242 | # tracking table.
243 | # ctAnyMax: '262144'
244 |
245 | # -- Configure the maximum number of service entries in the
246 | # load balancer maps.
247 | lbMapMax: 65536
248 |
249 | # -- Configure the maximum number of entries for the NAT table.
250 | # natMax: 524288
251 |
252 | # -- Configure the maximum number of entries for the neighbor table.
253 | # neighMax: 524288
254 |
255 | # -- Configure the maximum number of entries in endpoint policy map (per endpoint).
256 | policyMapMax: 16384
257 |
258 | # -- Configure auto-sizing for all BPF maps based on available memory.
259 | # ref: https://docs.cilium.io/en/stable/concepts/ebpf/maps/#ebpf-maps
260 | #mapDynamicSizeRatio: 0.0025
261 |
262 | # -- Configure the level of aggregation for monitor notifications.
263 | # Valid options are none, low, medium, maximum.
264 | monitorAggregation: medium
265 |
266 | # -- Configure the typical time between monitor notifications for
267 | # active connections.
268 | monitorInterval: "5s"
269 |
270 | # -- Configure which TCP flags trigger notifications when seen for the
271 | # first time in a connection.
272 | monitorFlags: "all"
273 |
274 | # -- Allow cluster external access to ClusterIP services.
275 | lbExternalClusterIP: false
276 |
277 | # -- Enable native IP masquerade support in eBPF
278 | masquerade: true
279 |
280 | # -- Configure whether direct routing mode should route traffic via
281 | # host stack (true) or directly and more efficiently out of BPF (false) if
282 | # the kernel supports it. The latter has the implication that it will also
283 | # bypass netfilter in the host namespace.
284 | #hostRouting: true
285 |
286 | # -- Configure the eBPF-based TPROXY to reduce reliance on iptables rules
287 | # for implementing Layer 7 policy.
288 | # tproxy: true
289 |
290 | # -- Configure the FIB lookup bypass optimization for nodeport reverse
291 | # NAT handling.
292 | # lbBypassFIBLookup: true
293 |
294 | # -- Configure explicitly allowed VLAN id's for bpf logic bypass.
295 | # [0] will allow all VLAN id's without any filtering.
296 | # vlan-bpf-bypass: []
297 |
298 | # -- Clean all eBPF datapath state from the initContainer of the cilium-agent
299 | # DaemonSet.
300 | #
301 | # WARNING: Use with care!
302 | cleanBpfState: false
303 |
304 | # -- Clean all local Cilium state from the initContainer of the cilium-agent
305 | # DaemonSet. Implies cleanBpfState: true.
306 | #
307 | # WARNING: Use with care!
308 | cleanState: false
309 |
310 | cni:
311 | # -- Install the CNI configuration and binary files into the filesystem.
312 | install: true
313 |
314 | # -- Configure chaining on top of other CNI plugins. Possible values:
315 | # - none
316 | # - generic-veth
317 | # - aws-cni
318 | # - portmap
319 | chainingMode: none
320 |
321 | # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the
322 | # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
323 | # This ensures no Pods can be scheduled using other CNI plugins during Cilium
324 | # agent downtime.
325 | exclusive: true
326 |
327 | # -- Skip writing of the CNI configuration. This can be used if
328 | # writing of the CNI configuration is performed by external automation.
329 | customConf: false
330 |
331 | # -- Configure the path to the CNI configuration directory on the host.
332 | confPath: /etc/cni/net.d
333 |
334 | # -- Configure the path to the CNI binary directory on the host.
335 | binPath: /opt/cni/bin
336 |
337 | # -- Specify the path to a CNI config to read from on agent start.
338 | # This can be useful if you want to manage your CNI
339 | # configuration outside of a Kubernetes environment. This parameter is
340 | # mutually exclusive with the 'cni.configMap' parameter.
341 | # readCniConf: /host/etc/cni/net.d/05-cilium.conf
342 |
343 | # -- When defined, configMap will mount the provided value as ConfigMap and
344 | # interpret the cniConf variable as CNI configuration file and write it
345 | # when the agent starts up
346 | # configMap: cni-configuration
347 |
348 | # -- Configure the key in the CNI ConfigMap to read the contents of
349 | # the CNI configuration from.
350 | configMapKey: cni-config
351 |
352 | # -- Configure the path to where to mount the ConfigMap inside the agent pod.
353 | confFileMountPath: /tmp/cni-configuration
354 |
355 | # -- Configure the path to where the CNI configuration directory is mounted
356 | # inside the agent pod.
357 | hostConfDirMountPath: /host/etc/cni/net.d
358 |
359 | # -- Configure how frequently garbage collection should occur for the datapath
360 | # connection tracking table.
361 | # conntrackGCInterval: "0s"
362 |
363 | # -- Configure container runtime specific integration.
364 | containerRuntime:
365 | # -- Enables specific integrations for container runtimes.
366 | # Supported values:
367 | # - containerd
368 | # - crio
369 | # - docker
370 | # - none
371 | # - auto (automatically detect the container runtime)
372 | integration: auto
373 | # -- Configure the path to the container runtime control socket.
374 | # socketPath: /path/to/runtime.sock
375 |
376 | # crdWaitTimeout: ""
377 |
378 | # -- Tail call hooks for custom eBPF programs.
379 | customCalls:
380 | # -- Enable tail call hooks for custom eBPF programs.
381 | enabled: false
382 |
383 | # -- Configure which datapath mode should be used for configuring container
384 | # connectivity. Valid options are "veth" or "ipvlan". Deprecated, to be removed
385 | # in v1.12.
386 | datapathMode: veth
387 |
388 | daemon:
389 | # -- Configure where Cilium runtime state should be stored.
390 | runPath: "/var/run/cilium"
391 |
392 | # -- Specify which network interfaces can run the eBPF datapath. This means
393 | # that a packet sent from a pod to a destination outside the cluster will be
394 | # masqueraded (to an output device IPv4 address), if the output device runs the
395 | # program. When not specified, probing will automatically detect devices.
396 | # devices: ""
397 |
398 | # -- Chains to ignore when installing feeder rules.
399 | # disableIptablesFeederRules: ""
400 |
401 | # -- Limit egress masquerading to interface selector.
402 | # egressMasqueradeInterfaces: ""
403 |
404 | # -- Whether to enable CNP status updates.
405 | enableCnpStatusUpdates: false
406 |
407 | # -- Configures the use of the KVStore to optimize Kubernetes event handling by
408 | # mirroring it into the KVstore for reduced overhead in large clusters.
409 | enableK8sEventHandover: false
410 |
411 | # TODO: Add documentation
412 | # enableIdentityMark: false
413 |
414 | # enableK8sEndpointSlice: false
415 |
416 | # -- Enable CiliumEndpointSlice feature.
417 | enableCiliumEndpointSlice: false
418 |
419 | # -- Enables the fallback compatibility solution for when the xt_socket kernel
420 | # module is missing and it is needed for the datapath L7 redirection to work
421 | # properly. See documentation for details on when this can be disabled:
422 | # https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel.
423 | enableXTSocketFallback: true
424 |
425 | encryption:
426 | # -- Enable transparent network encryption.
427 | enabled: true
428 |
429 | # -- Encryption method. Can be either ipsec or wireguard.
430 | type: "ipsec"
431 |
432 | # -- Enable encryption for pure node to node traffic.
433 | # This option is only effective when encryption.type is set to ipsec.
434 | nodeEncryption: false
435 |
436 | ipsec:
437 | # -- Name of the key file inside the Kubernetes secret configured via secretName.
438 | keyFile: ""
439 |
440 | # -- Path to mount the secret inside the Cilium pod.
441 | mountPath: ""
442 |
443 | # -- Name of the Kubernetes secret containing the encryption keys.
444 | secretName: "cilium-ipsec-keys"
445 |
446 | # -- The interface to use for encrypted traffic.
447 | interface: ""
448 |
449 | wireguard:
450 | # -- Enables the fallback to the user-space implementation.
451 | userspaceFallback: false
452 |
453 | # -- Deprecated in favor of encryption.ipsec.keyFile.
454 | # Name of the key file inside the Kubernetes secret configured via secretName.
455 | # This option is only effective when encryption.type is set to ipsec.
456 | keyFile: keys
457 |
458 | # -- Deprecated in favor of encryption.ipsec.mountPath.
459 | # Path to mount the secret inside the Cilium pod.
460 | # This option is only effective when encryption.type is set to ipsec.
461 | mountPath: /etc/ipsec
462 |
463 | # -- Deprecated in favor of encryption.ipsec.secretName.
464 | # Name of the Kubernetes secret containing the encryption keys.
465 | # This option is only effective when encryption.type is set to ipsec.
466 | secretName: ""
467 |
468 | # -- Deprecated in favor of encryption.ipsec.interface.
469 | # The interface to use for encrypted traffic.
470 | # This option is only effective when encryption.type is set to ipsec.
471 | interface: ""
472 |
473 | endpointHealthChecking:
474 | # -- Enable connectivity health checking between virtual endpoints.
475 | enabled: true
476 |
477 | # -- Enable endpoint status.
478 | # Status can be: policy, health, controllers, logs and / or state. For 2 or more options use a comma.
479 | endpointStatus:
480 | enabled: false
481 | status: ""
482 |
483 | endpointRoutes:
484 | # -- Enable use of per endpoint routes instead of routing via
485 | # the cilium_host interface.
486 | enabled: true
487 |
488 | eni:
489 | # -- Enable Elastic Network Interface (ENI) integration.
490 | enabled: false
491 | # -- Update ENI Adapter limits from the EC2 API
492 | updateEC2AdapterLimitViaAPI: false
493 | # -- Release IPs not used from the ENI
494 | awsReleaseExcessIPs: false
495 | # -- EC2 API endpoint to use
496 | ec2APIEndpoint: ""
497 | # -- Tags to apply to the newly created ENIs
498 | eniTags: {}
499 | # -- If using IAM role for Service Accounts will not try to
500 | # inject identity values from cilium-aws kubernetes secret.
501 | # Adds annotation to service account if managed by Helm.
502 | # See https://github.com/aws/amazon-eks-pod-identity-webhook
503 | iamRole: ""
504 | # -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs
505 | # Important note: This requires that each instance has an ENI with a matching subnet attached
506 | # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium,
507 | # use the CNI configuration file settings (cni.customConf) instead.
508 | subnetIDsFilter: ""
509 | # -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs
510 | # Important note: This requires that each instance has an ENI with a matching subnet attached
511 | # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium,
512 | # use the CNI configuration file settings (cni.customConf) instead.
513 | subnetTagsFilter: ""
514 |
515 | externalIPs:
516 | # -- Enable ExternalIPs service support.
517 | enabled: false
518 |
519 | # fragmentTracking enables IPv4 fragment tracking support in the datapath.
520 | # fragmentTracking: true
521 |
522 | gke:
523 | # -- Enable Google Kubernetes Engine integration
524 | enabled: false
525 |
526 | # -- Enable connectivity health checking.
527 | healthChecking: true
528 |
529 | # -- TCP port for the agent health API. This is not the port for cilium-health.
530 | healthPort: 9879
531 |
532 | # -- Configure the host firewall.
533 | hostFirewall:
534 | # -- Enables the enforcement of host policies in the eBPF datapath.
535 | enabled: false
536 |
537 | hostPort:
538 | # -- Enable hostPort service support.
539 | enabled: false
540 |
541 | # -- Configure ClusterIP service handling in the host namespace (the node).
542 | hostServices:
543 | # -- Enable host reachable services.
544 | enabled: false
545 |
546 | # -- Supported list of protocols to apply ClusterIP translation to.
547 | protocols: tcp,udp
548 |
549 | # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules.
550 | # hostNamespaceOnly: false
551 |
552 | # -- Configure certificate generation for Hubble integration.
553 | # If hubble.tls.auto.method=cronJob, these values are used
554 | # for the Kubernetes CronJob which will be scheduled regularly to
555 | # (re)generate any certificates not provided manually.
556 | certgen:
557 | image:
558 | override: ~
559 | repository: "quay.io/cilium/certgen"
560 | tag: "v0.1.5@sha256:0c2b71bb3469990e7990e7e26243617aa344b5a69a4ce465740b8577f9d48ab9"
561 | pullPolicy: "IfNotPresent"
562 | # -- Seconds after which the completed job pod will be deleted
563 | ttlSecondsAfterFinished: 1800
564 | # -- Labels to be added to hubble-certgen pods
565 | podLabels: {}
566 |
567 | hubble:
568 | # -- Enable Hubble (true by default).
569 | enabled: {{ cilium_hubble_enable }}
570 |
571 | # -- Buffer size of the channel Hubble uses to receive monitor events. If this
572 | # value is not set, the queue size is set to the default monitor queue size.
573 | # eventQueueSize: ""
574 |
575 | # -- Number of recent flows for Hubble to cache. Defaults to 4095.
576 | # Possible values are:
577 | # 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023,
578 | # 2047, 4095, 8191, 16383, 32767, 65535
579 | # eventBufferCapacity: "4095"
580 |
581 | # -- Hubble metrics configuration.
582 | # See https://docs.cilium.io/en/stable/operations/metrics/#hubble-metrics
583 | # for more comprehensive documentation about Hubble metrics.
584 | metrics:
585 | # -- Configures the list of metrics to collect. If empty or null, metrics
586 | # are disabled.
587 | # Example:
588 | #
589 | # enabled:
590 | # - dns:query;ignoreAAAA
591 | # - drop
592 | # - tcp
593 | # - flow
594 | # - icmp
595 | # - http
596 | #
597 | # You can specify the list of metrics from the helm CLI:
598 | #
599 | # --set metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}"
600 | #
601 | enabled:
602 | - dns
603 | - drop
604 | - flow
605 | - http
606 | - icmp
607 | - port-distribution
608 | - tcp
609 | # -- Configure the port the hubble metric server listens on.
610 | port: 9091
611 | # -- Annotations to be added to hubble-metrics service.
612 | serviceAnnotations: {}
613 | serviceMonitor:
614 | # -- Create ServiceMonitor resources for Prometheus Operator.
615 | # This requires the prometheus CRDs to be available.
616 | # ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
617 | enabled: false
618 | # -- Labels to add to ServiceMonitor hubble
619 | labels: {}
620 | # -- Annotations to add to ServiceMonitor hubble
621 | annotations: {}
622 |
623 | # -- Unix domain socket path to listen to when Hubble is enabled.
624 | socketPath: /var/run/cilium/hubble.sock
625 |
626 | # -- An additional address for Hubble to listen to.
627 | # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that
628 | # Hubble is listening on port 4244.
629 | listenAddress: ":4244"
630 | peerService:
631 | # -- Enable a K8s Service for the Peer service, so that it can be accessed
632 | # by a non-local client
633 | enabled: true
634 | # -- Service Port for the Peer service.
635 | # If not set, it is dynamically assigned to port 443 if TLS is enabled and to
636 | # port 80 if not.
637 | # servicePort: 80
638 | # -- Target Port for the Peer service.
639 | targetPort: 4244
640 | # -- The cluster domain to use to query the Hubble Peer service. It should
641 | # be the local cluster.
642 | clusterDomain: cluster.local
643 | # -- TLS configuration for Hubble
644 | tls:
645 | # -- Enable mutual TLS for listenAddress. Setting this value to false is
646 | # highly discouraged as the Hubble API provides access to potentially
647 | # sensitive network flow metadata and is exposed on the host network.
648 | enabled: true
649 | # -- Configure automatic TLS certificates generation.
650 | auto:
651 | # -- Auto-generate certificates.
652 | # When set to true, automatically generate a CA and certificates to
653 | # enable mTLS between Hubble server and Hubble Relay instances. If set to
654 | # false, the certs for Hubble server need to be provided by setting
655 | # appropriate values below.
656 | enabled: true
657 | # -- Set the method to auto-generate certificates. Supported values:
658 | # - helm: This method uses Helm to generate all certificates.
659 | # - cronJob: This method uses a Kubernetes CronJob the generate any
660 | # certificates not provided by the user at installation
661 | # time.
662 | # - certmanager: This method use cert-manager to generate & rotate certificates.
663 | # method: helm
664 | {% if kube_cert_manager %}
665 | method: certmanager
666 | {% else %}
667 | method: helm
668 | {% endif %}
669 | # -- Generated certificates validity duration in days.
670 | certValidityDuration: 1095
671 | # -- Schedule for certificates regeneration (regardless of their expiration date).
672 | # Only used if method is "cronJob". If nil, then no recurring job will be created.
673 | # Instead, only the one-shot job is deployed to generate the certificates at
674 | # installation time.
675 | #
676 | # Defaults to midnight of the first day of every fourth month. For syntax, see
677 | # https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule
678 | schedule: "0 0 1 */4 *"
679 |
680 | # [Example]
681 | # certManagerIssuerRef:
682 | # group: cert-manager.io
683 | # kind: ClusterIssuer
684 | # name: ca-issuer
685 | # -- certmanager issuer used when hubble.tls.auto.method=certmanager.
686 | # If not specified, a CA issuer will be created.
687 | {% if kube_cert_manager %}
688 | certManagerIssuerRef:
689 | group: cert-manager.io
690 | kind: ClusterIssuer
691 | name: letsencrypt-prod
692 | {% else %}
693 | certManagerIssuerRef: {}
694 | {% endif %}
695 | #certManagerIssuerRef: {}
696 | # -- base64 encoded PEM values for the Hubble CA certificate and private key.
697 | ca:
698 | cert: ""
699 | # -- The CA private key (optional). If it is provided, then it will be
700 | # used by hubble.tls.auto.method=cronJob to generate all other certificates.
701 | # Otherwise, a ephemeral CA is generated if hubble.tls.auto.enabled=true.
702 | key: ""
703 | # -- base64 encoded PEM values for the Hubble server certificate and private key
704 | server:
705 | cert: ""
706 | key: ""
707 | # -- Extra DNS names added to certificate when it's auto generated
708 | extraDnsNames: []
709 | # -- Extra IP addresses added to certificate when it's auto generated
710 | extraIpAddresses: []
711 |
712 | relay:
713 | # -- Enable Hubble Relay (requires hubble.enabled=true)
714 | enabled: {{ cilium_hubble_enable }}
715 |
716 | # -- Roll out Hubble Relay pods automatically when configmap is updated.
717 | rollOutPods: true
718 |
719 | # -- Hubble-relay container image.
720 | image:
721 | override: ~
722 | repository: "quay.io/cilium/hubble-relay"
723 | tag: "v1.11.6"
724 | # hubble-relay-digest
725 | digest: ""
726 | useDigest: false
727 | pullPolicy: "IfNotPresent"
728 |
729 | # -- Specifies the resources for the hubble-relay pods
730 | resources: {}
731 |
732 | # -- Number of replicas run for the hubble-relay deployment.
733 | replicas: 1
734 |
735 | # -- Node labels for pod assignment
736 | # ref: https://kubernetes.io/docs/user-guide/node-selection/
737 | nodeSelector: {}
738 |
739 | # -- Annotations to be added to hubble-relay pods
740 | podAnnotations: {}
741 |
742 | # -- Labels to be added to hubble-relay pods
743 | podLabels: {}
744 |
745 | # -- Node tolerations for pod assignment on nodes with taints
746 | # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
747 | tolerations: []
748 |
749 | # -- The priority class to use for hubble-relay
750 | priorityClassName: {{ cilium_hubble_priorityClassName }}
751 |
752 | # -- hubble-relay update strategy
753 | updateStrategy:
754 | rollingUpdate:
755 | maxUnavailable: 1
756 | type: RollingUpdate
757 |
758 | # -- Host to listen to. Specify an empty string to bind to all the interfaces.
759 | listenHost: ""
760 |
761 | # -- Port to listen to.
762 | listenPort: "4245"
763 |
764 | # -- TLS configuration for Hubble Relay
765 | tls:
766 | # -- base64 encoded PEM values for the hubble-relay client certificate and private key
767 | # This keypair is presented to Hubble server instances for mTLS
768 | # authentication and is required when hubble.tls.enabled is true.
769 | # These values need to be set manually if hubble.tls.auto.enabled is false.
770 | client:
771 | cert: ""
772 | key: ""
773 | # -- base64 encoded PEM values for the hubble-relay server certificate and private key
774 | server:
775 | # When set to true, enable TLS on for Hubble Relay server
776 | # (ie: for clients connecting to the Hubble Relay API).
777 | enabled: true
778 | # These values need to be set manually if hubble.tls.auto.enabled is false.
779 | cert: ""
780 | key: ""
781 | # -- extra DNS names added to certificate when its auto gen
782 | extraDnsNames: []
783 | # -- extra IP addresses added to certificate when its auto gen
784 | extraIpAddresses: []
785 |
786 | # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s").
787 | dialTimeout: ~
788 |
789 | # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s").
790 | retryTimeout: ~
791 |
792 | # -- Max number of flows that can be buffered for sorting before being sent to the
793 | # client (per request) (e.g. 100).
794 | sortBufferLenMax: ~
795 |
796 | # -- When the per-request flows sort buffer is not full, a flow is drained every
797 | # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s").
798 | sortBufferDrainTimeout: ~
799 |
800 | # -- Port to use for the k8s service backed by hubble-relay pods.
801 | # If not set, it is dynamically assigned to port 443 if TLS is enabled and to
802 | # port 80 if not.
803 | # servicePort: 80
804 |
805 | ui:
806 | # -- Whether to enable the Hubble UI.
807 | enabled: {{ cilium_hubble_ui_enable }}
808 |
809 | standalone:
810 | # -- When true, it will allow installing the Hubble UI only, without checking dependencies.
811 | # It is useful if a cluster already has cilium and Hubble relay installed and you just
812 | # want Hubble UI to be deployed.
813 | # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui`
814 | enabled: false
815 |
816 | tls:
817 | # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required
818 | # to provide a volume for mounting the client certificates.
819 | certsVolume: {}
820 | # projected:
821 | # defaultMode: 0400
822 | # sources:
823 | # - secret:
824 | # name: hubble-ui-client-certs
825 | # items:
826 | # - key: tls.crt
827 | # path: client.crt
828 | # - key: tls.key
829 | # path: client.key
830 | # - key: ca.crt
831 | # path: hubble-relay-ca.crt
832 |
833 | # -- Roll out Hubble-ui pods automatically when configmap is updated.
834 | rollOutPods: true
835 |
836 | tls:
837 | # -- base64 encoded PEM values used to connect to hubble-relay
838 | # This keypair is presented to Hubble Relay instances for mTLS
839 | # authentication and is required when hubble.relay.tls.server.enabled is true.
840 | # These values need to be set manually if hubble.tls.auto.enabled is false.
841 | client:
842 | cert: ""
843 | key: ""
844 |
845 | backend:
846 | # -- Hubble-ui backend image.
847 | image:
848 | override: ~
849 | repository: "quay.io/cilium/hubble-ui-backend"
850 | tag: "v0.9.0@sha256:000df6b76719f607a9edefb9af94dfd1811a6f1b6a8a9c537cba90bf12df474b"
851 | pullPolicy: "IfNotPresent"
852 | # [Example]
853 | # resources:
854 | # limits:
855 | # cpu: 1000m
856 | # memory: 1024M
857 | # requests:
858 | # cpu: 100m
859 | # memory: 64Mi
860 | # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment.
861 | resources: {}
862 |
863 | frontend:
864 | # -- Hubble-ui frontend image.
865 | image:
866 | override: ~
867 | repository: "quay.io/cilium/hubble-ui"
868 | tag: "v0.9.0@sha256:0ef04e9a29212925da6bdfd0ba5b581765e41a01f1cc30563cef9b30b457fea0"
869 | pullPolicy: "IfNotPresent"
870 | # [Example]
871 | # resources:
872 | # limits:
873 | # cpu: 1000m
874 | # memory: 1024M
875 | # requests:
876 | # cpu: 100m
877 | # memory: 64Mi
878 | # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment.
879 | resources: {}
880 |
881 | # -- The number of replicas of Hubble UI to deploy.
882 | replicas: 1
883 |
884 | # -- Annotations to be added to hubble-ui pods
885 | podAnnotations: {}
886 |
887 | # -- Labels to be added to hubble-ui pods
888 | podLabels: {}
889 |
890 | # -- Node labels for pod assignment
891 | # ref: https://kubernetes.io/docs/user-guide/node-selection/
892 | nodeSelector: {}
893 |
894 | # -- Node tolerations for pod assignment on nodes with taints
895 | # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
896 | #
897 | tolerations: []
898 |
899 | # -- The priority class to use for hubble-ui
900 | priorityClassName: {{ cilium_hubble_priorityClassName }}
901 |
902 | # -- hubble-ui update strategy.
903 | updateStrategy:
904 | rollingUpdate:
905 | maxUnavailable: 1
906 | type: RollingUpdate
907 |
908 | securityContext:
909 | # -- Whether to set the security context on the Hubble UI pods.
910 | enabled: true
911 |
912 | # -- hubble-ui ingress configuration
913 | ingress:
914 | enabled: {{ cilium_hubble_ui_enable }}
915 | annotations: {}
916 | # kubernetes.io/ingress.class: nginx
917 | # kubernetes.io/tls-acme: "true"
918 | hosts:
919 | - {{ kube_public_dns_name }}
920 | {% if kube_cert_manager %}
921 | tls:
922 | - secretName: {{ kube_public_dns_name }}
923 | hosts:
924 | - {{ kube_public_dns_name }}
925 | {% else %}
926 | tls: []
927 | # - secretName: chart-example-tls
928 | # hosts:
929 | # - chart-example.local
930 | {% endif %}
931 |
932 |
933 |
934 | # -- Method to use for identity allocation (`crd` or `kvstore`).
935 | identityAllocationMode: "crd"
936 |
937 | # TODO: Add documentation
938 | # identityChangeGracePeriod: "5s"
939 |
940 | # TODO: Add documentation
941 | # identityGCInterval:
942 |
943 | # TODO: Add documentation
944 | # identityHeartbeatTimeout: ""
945 |
946 |
947 | # -- Configure whether to install iptables rules to allow for TPROXY
948 | # (L7 proxy injection), iptables-based masquerading and compatibility
949 | # with kube-proxy.
950 | installIptablesRules: true
951 |
952 | # -- Install Iptables rules to skip netfilter connection tracking on all pod
953 | # traffic. This option is only effective when Cilium is running in direct
954 | # routing and full KPR mode. Moreover, this option cannot be enabled when Cilium
955 | # is running in a managed Kubernetes environment or in a chained CNI setup.
956 | installNoConntrackIptablesRules: false
957 |
958 | ipam:
959 | # -- Configure IP Address Management mode.
960 | # ref: https://docs.cilium.io/en/stable/concepts/networking/ipam/
961 | mode: "cluster-pool"
962 | operator:
963 | # -- Deprecated in favor of ipam.operator.clusterPoolIPv4PodCIDRList.
964 | # IPv4 CIDR range to delegate to individual nodes for IPAM.
965 | clusterPoolIPv4PodCIDR: "10.88.0.0/16"
966 | # -- IPv4 CIDR list range to delegate to individual nodes for IPAM.
967 | clusterPoolIPv4PodCIDRList:
968 | - "10.88.0.0/16"
969 | # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM.
970 | clusterPoolIPv4MaskSize: 24
971 | # -- Deprecated in favor of ipam.operator.clusterPoolIPv6PodCIDRList.
972 | # IPv6 CIDR range to delegate to individual nodes for IPAM.
973 | clusterPoolIPv6PodCIDR: "fd00::/104"
974 | # -- IPv6 CIDR list range to delegate to individual nodes for IPAM.
975 | clusterPoolIPv6PodCIDRList: []
976 | # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM.
977 | clusterPoolIPv6MaskSize: 120
978 |
979 | # -- Configure the eBPF-based ip-masq-agent
980 | ipMasqAgent:
981 | enabled: false
982 |
983 | # iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium.
984 | # iptablesLockTimeout: "5s"
985 |
986 | ipv4:
987 | # -- Enable IPv4 support.
988 | enabled: true
989 |
990 | ipv6:
991 | # -- Enable IPv6 support.
992 | enabled: false
993 |
994 | ipvlan:
995 | # -- Enable the IPVLAN datapath (deprecated)
996 | enabled: false
997 |
998 | # -- masterDevice is the name of the device to use to attach secondary IPVLAN
999 | # devices
1000 | # masterDevice: eth0
1001 |
1002 | # -- Configure Kubernetes specific configuration
1003 | k8s: {}
1004 | # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR
1005 | # range via the Kubernetes node resource
1006 | # requireIPv4PodCIDR: false
1007 |
1008 | # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR
1009 | # range via the Kubernetes node resource
1010 | # requireIPv6PodCIDR: false
1011 |
1012 | # -- Keep the deprecated selector labels when deploying Cilium DaemonSet.
1013 | keepDeprecatedLabels: false
1014 |
1015 | # -- Keep the deprecated probes when deploying Cilium DaemonSet
1016 | keepDeprecatedProbes: false
1017 |
1018 | startupProbe:
1019 | # -- failure threshold of startup probe.
1020 | # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
1021 | failureThreshold: 105
1022 | # -- interval between checks of the startup probe
1023 | periodSeconds: 2
1024 | livenessProbe:
1025 | # -- failure threshold of liveness probe
1026 | failureThreshold: 10
1027 | # -- interval between checks of the liveness probe
1028 | periodSeconds: 30
1029 | readinessProbe:
1030 | # -- failure threshold of readiness probe
1031 | failureThreshold: 3
1032 | # -- interval between checks of the readiness probe
1033 | periodSeconds: 30
1034 |
1035 | # -- Configure the kube-proxy replacement in Cilium BPF datapath
1036 | # Valid options are "disabled", "probe", "partial", "strict".
1037 | # ref: https://docs.cilium.io/en/stable/gettingstarted/kubeproxy-free/
1038 | kubeProxyReplacement: "disabled"
1039 |
1040 | # -- healthz server bind address for the kube-proxy replacement.
1041 | # To enable set the value to '0.0.0.0:10256' for all ipv4
1042 | # addresses and this '[::]:10256' for all ipv6 addresses.
1043 | # By default it is disabled.
1044 | kubeProxyReplacementHealthzBindAddr: ""
1045 |
1046 | l2NeighDiscovery:
1047 | # -- Enable L2 neighbor discovery in the agent
1048 | enabled: true
1049 | # -- Override the agent's default neighbor resolution refresh period.
1050 | refreshPeriod: "30s"
1051 |
1052 | # -- Enable Layer 7 network policy.
1053 | l7Proxy: true
1054 |
1055 | # -- Enable Local Redirect Policy.
1056 | localRedirectPolicy: false
1057 |
1058 | # To include or exclude matched resources from cilium identity evaluation
1059 | # labels: ""
1060 |
1061 | # logOptions allows you to define logging options. eg:
1062 | # logOptions:
1063 | # format: json
1064 |
1065 | # -- Enables periodic logging of system load
1066 | logSystemLoad: false
1067 |
1068 |
1069 | # -- Configure maglev consistent hashing
1070 | maglev: {}
1071 | # -- tableSize is the size (parameter M) for the backend table of one
1072 | # service entry
1073 | # tableSize:
1074 |
1075 | # -- hashSeed is the cluster-wide base64 encoded seed for the hashing
1076 | # hashSeed:
1077 |
1078 | # -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
1079 | enableIPv4Masquerade: true
1080 |
1081 | # -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
1082 | enableIPv6Masquerade: true
1083 |
1084 | # -- Enables egress gateway (beta) to redirect and SNAT the traffic that
1085 | # leaves the cluster.
1086 | egressGateway:
1087 | enabled: false
1088 |
1089 | # -- Specify the IPv4 CIDR for native routing (ie to avoid IP masquerade for).
1090 | # This value corresponds to the configured cluster-cidr.
1091 | # Deprecated in favor of ipv4NativeRoutingCIDR, will be removed in 1.12.
1092 | # nativeRoutingCIDR:
1093 |
1094 | # -- Specify the IPv4 CIDR for native routing (ie to avoid IP masquerade for).
1095 | # This value corresponds to the configured cluster-cidr.
1096 | # ipv4NativeRoutingCIDR:
1097 |
1098 | monitor:
1099 | # -- Enable the cilium-monitor sidecar.
1100 | enabled: true
1101 |
1102 | # -- Configure service load balancing
1103 | # loadBalancer:
1104 | # -- standalone enables the standalone L4LB which does not connect to
1105 | # kube-apiserver.
1106 | # standalone: false
1107 |
1108 | # -- algorithm is the name of the load balancing algorithm for backend
1109 | # selection e.g. random or maglev
1110 | # algorithm: random
1111 |
1112 | # -- mode is the operation mode of load balancing for remote backends
1113 | # e.g. snat, dsr, hybrid
1114 | # mode: snat
1115 |
1116 | # -- acceleration is the option to accelerate service handling via XDP
1117 | # e.g. native, disabled
1118 | # acceleration: disabled
1119 |
1120 | # -- dsrDispatch configures whether IP option or IPIP encapsulation is
1121 | # used to pass a service IP and port to remote backend
1122 | # dsrDispatch: opt
1123 |
1124 | # -- serviceTopology enables K8s Topology Aware Hints -based service
1125 | # endpoints filtering
1126 | # serviceTopology: false
1127 |
1128 | # -- Configure N-S k8s service loadbalancing
1129 | nodePort:
1130 | # -- Enable the Cilium NodePort service implementation.
1131 | enabled: {{ cilium_nodeport_enable }}
1132 |
1133 | # -- Port range to use for NodePort services.
1134 | # range: "{{ cilium_nodeport_range }}"
1135 |
1136 | # -- Set to true to prevent applications binding to service ports.
1137 | bindProtection: true
1138 |
1139 | # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral
1140 | # ports is detected.
1141 | autoProtectPortRange: true
1142 |
1143 | # -- Enable healthcheck nodePort server for NodePort services
1144 | enableHealthCheck: true
1145 |
1146 | # policyAuditMode: false
1147 |
1148 | # -- The agent can be put into one of the three policy enforcement modes:
1149 | # default, always and never.
1150 | # ref: https://docs.cilium.io/en/stable/policy/intro/#policy-enforcement-modes
1151 | policyEnforcementMode: "{{ cilium_policyEnforcementMode }}"
1152 |
1153 | pprof:
1154 | # -- Enable Go pprof debugging
1155 | enabled: false
1156 |
1157 | # -- Configure prometheus metrics on the configured port at /metrics
1158 | prometheus:
1159 | enabled: {{ cilium_prometheus_enable }}
1160 | port: 9090
1161 | serviceMonitor:
1162 | # -- Enable service monitors.
1163 | # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
1164 | enabled: {{ cilium_prometheus_enable }}
1165 | # -- Labels to add to ServiceMonitor cilium-agent
1166 | labels: {}
1167 | # -- Annotations to add to ServiceMonitor cilium-agent
1168 | annotations: {}
1169 | # -- Specify the Kubernetes namespace where Prometheus expects to find
1170 | # service monitors configured.
1171 | namespace: "kube-system"
1172 | # -- Metrics that should be enabled or disabled from the default metric
1173 | # list. (+metric_foo to enable metric_foo , -metric_bar to disable
1174 | # metric_bar).
1175 | # ref: https://docs.cilium.io/en/stable/operations/metrics/#exported-metrics
1176 | metrics: ~
1177 |
1178 | # -- Configure Istio proxy options.
1179 | proxy:
1180 | prometheus:
1181 | enabled: true
1182 | port: "9095"
1183 | # -- Regular expression matching compatible Istio sidecar istio-proxy
1184 | # container image names
1185 | sidecarImageRegex: "cilium/istio_proxy"
1186 |
1187 | # -- Enable use of the remote node identity.
1188 | # ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
1189 | remoteNodeIdentity: true
1190 |
1191 | # -- Enable resource quotas for priority classes used in the cluster.
1192 | resourceQuotas:
1193 | enabled: false
1194 | cilium:
1195 | hard:
1196 | # 5k nodes * 2 DaemonSets (Cilium and cilium node init)
1197 | pods: "10k"
1198 | operator:
1199 | hard:
1200 | # 15 "clusterwide" Cilium Operator pods for HA
1201 | pods: "15"
1202 |
1203 | # Need to document default
1204 | ##################
1205 | #sessionAffinity: false
1206 |
1207 | # -- Do not run Cilium agent when running with clean mode. Useful to completely
1208 | # uninstall Cilium as it will stop Cilium from starting and create artifacts
1209 | # in the node.
1210 | sleepAfterInit: false
1211 |
1212 | # -- Configure BPF socket operations configuration
1213 | sockops:
1214 | # enabled enables installation of socket options acceleration.
1215 | enabled: false
1216 |
1217 | # TODO: Add documentation, default value
1218 | # svcSourceRangeCheck:
1219 |
1220 | # synchronizeK8sNodes: true
1221 |
1222 | # -- Configure TLS configuration in the agent.
1223 | tls:
1224 | enabled: true
1225 | secretsBackend: local
1226 |
1227 | # -- Configure the encapsulation configuration for communication between nodes.
1228 | # Possible values:
1229 | # - disabled
1230 | # - vxlan (default)
1231 | # - geneve
1232 | tunnel: "vxlan"
1233 |
1234 | # -- Disable the usage of CiliumEndpoint CRD.
1235 | disableEndpointCRD: "false"
1236 |
1237 | wellKnownIdentities:
1238 | # -- Enable the use of well-known identities.
1239 | enabled: false
1240 |
1241 |
1242 | etcd:
1243 | # -- Enable etcd mode for the agent.
1244 | enabled: false
1245 |
1246 | # -- cilium-etcd-operator image.
1247 | image:
1248 | override: ~
1249 | repository: "quay.io/cilium/cilium-etcd-operator"
1250 | tag: "v2.0.7@sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc"
1251 | pullPolicy: "IfNotPresent"
1252 |
1253 | # -- The priority class to use for cilium-etcd-operator
1254 | priorityClassName: ""
1255 |
1256 | # -- Additional cilium-etcd-operator container arguments.
1257 | extraArgs: []
1258 |
1259 | # -- Additional InitContainers to initialize the pod.
1260 | extraInitContainers: []
1261 |
1262 | # -- Additional cilium-etcd-operator hostPath mounts.
1263 | extraHostPathMounts: []
1264 | # - name: textfile-dir
1265 | # mountPath: /srv/txt_collector
1266 | # hostPath: /var/lib/cilium-etcd-operator
1267 | # readOnly: true
1268 | # mountPropagation: HostToContainer
1269 |
1270 | # -- Additional cilium-etcd-operator ConfigMap mounts.
1271 | extraConfigmapMounts: []
1272 | # - name: certs-configmap
1273 | # mountPath: /certs
1274 | # configMap: certs-configmap
1275 | # readOnly: true
1276 |
1277 | # -- Node tolerations for cilium-etcd-operator scheduling to nodes with taints
1278 | # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
1279 | tolerations:
1280 | - operator: Exists
1281 | # - key: "key"
1282 | # operator: "Equal|Exists"
1283 | # value: "value"
1284 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
1285 |
1286 | # -- Node labels for cilium-etcd-operator pod assignment
1287 | # ref: https://kubernetes.io/docs/user-guide/node-selection/
1288 | nodeSelector: {}
1289 |
1290 | # -- Annotations to be added to cilium-etcd-operator pods
1291 | podAnnotations: {}
1292 |
1293 | # -- Labels to be added to cilium-etcd-operator pods
1294 | podLabels: {}
1295 |
1296 | # -- PodDisruptionBudget settings
1297 | # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
1298 | podDisruptionBudget:
1299 | enabled: true
1300 | maxUnavailable: 2
1301 |
1302 | # -- cilium-etcd-operator resource limits & requests
1303 | # ref: https://kubernetes.io/docs/user-guide/compute-resources/
1304 | resources: {}
1305 | # limits:
1306 | # cpu: 4000m
1307 | # memory: 4Gi
1308 | # requests:
1309 | # cpu: 100m
1310 | # memory: 512Mi
1311 |
1312 | # -- Security context to be added to cilium-etcd-operator pods
1313 | securityContext: {}
1314 | # runAsUser: 0
1315 |
1316 | # -- cilium-etcd-operator update strategy
1317 | updateStrategy:
1318 | rollingUpdate:
1319 | maxSurge: 1
1320 | maxUnavailable: 1
1321 | type: RollingUpdate
1322 |
1323 | # -- If etcd is behind a k8s service set this option to true so that Cilium
1324 | # does the service translation automatically without requiring a DNS to be
1325 | # running.
1326 | k8sService: false
1327 |
1328 | # -- Cluster domain for cilium-etcd-operator.
1329 | clusterDomain: cluster.local
1330 |
1331 | # -- List of etcd endpoints (not needed when using managed=true).
1332 | endpoints:
1333 | - https://CHANGE-ME:2379
1334 |
1335 | # -- Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if
1336 | # managed=true)
1337 | ssl: false
1338 |
1339 | operator:
1340 | # -- Enable the cilium-operator component (required).
1341 | enabled: true
1342 |
1343 | # -- Roll out cilium-operator pods automatically when configmap is updated.
1344 | rollOutPods: true
1345 |
1346 | # -- cilium-operator image.
1347 | image:
1348 | override: ~
1349 | repository: "quay.io/cilium/operator"
1350 | tag: "v1.11.6"
1351 | # operator-generic-digest
1352 | genericDigest: ""
1353 | # operator-azure-digest
1354 | azureDigest: ""
1355 | # operator-aws-digest
1356 | awsDigest: ""
1357 | # operator-alibabacloud-digest
1358 | alibabacloudDigest: ""
1359 | useDigest: false
1360 | pullPolicy: "IfNotPresent"
1361 | suffix: ""
1362 |
1363 | # -- Number of replicas to run for the cilium-operator deployment
1364 | replicas: 1
1365 |
1366 | # -- For using with an existing serviceAccount.
1367 | serviceAccountName: cilium-operator
1368 |
1369 | # -- The priority class to use for cilium-operator
1370 | priorityClassName: ""
1371 |
1372 | # -- cilium-operator update strategy
1373 | updateStrategy:
1374 | rollingUpdate:
1375 | maxSurge: 1
1376 | maxUnavailable: 1
1377 | type: RollingUpdate
1378 |
1379 | # -- cilium-operator affinity
1380 | affinity:
1381 | podAntiAffinity:
1382 | requiredDuringSchedulingIgnoredDuringExecution:
1383 | - labelSelector:
1384 | matchExpressions:
1385 | - key: io.cilium/app
1386 | operator: In
1387 | values:
1388 | - operator
1389 | topologyKey: kubernetes.io/hostname
1390 |
1391 |
1392 | # -- Additional cilium-operator container arguments.
1393 | extraArgs: []
1394 |
1395 | # -- Additional cilium-operator environment variables.
1396 | extraEnv: {}
1397 |
1398 | # -- Additional InitContainers to initialize the pod.
1399 | extraInitContainers: []
1400 |
1401 | # -- Additional cilium-operator hostPath mounts.
1402 | extraHostPathMounts: []
1403 | # - name: host-mnt-data
1404 | # mountPath: /host/mnt/data
1405 | # hostPath: /mnt/data
1406 | # hostPathType: Directory
1407 | # readOnly: true
1408 | # mountPropagation: HostToContainer
1409 |
1410 | # -- Additional cilium-operator ConfigMap mounts.
1411 | extraConfigmapMounts: []
1412 | # - name: certs-configmap
1413 | # mountPath: /certs
1414 | # configMap: certs-configmap
1415 | # readOnly: true
1416 |
1417 | # -- Node tolerations for cilium-operator scheduling to nodes with taints
1418 | # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
1419 | tolerations:
1420 | - operator: Exists
1421 | # - key: "key"
1422 | # operator: "Equal|Exists"
1423 | # value: "value"
1424 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
1425 |
1426 | # -- Node labels for cilium-operator pod assignment
1427 | # ref: https://kubernetes.io/docs/user-guide/node-selection/
1428 | nodeSelector: {}
1429 |
1430 | # -- Annotations to be added to cilium-operator pods
1431 | podAnnotations: {}
1432 |
1433 | # -- Labels to be added to cilium-operator pods
1434 | podLabels: {}
1435 |
1436 | # -- PodDisruptionBudget settings
1437 | # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
1438 | podDisruptionBudget:
1439 | enabled: false
1440 | maxUnavailable: 1
1441 |
1442 | # -- cilium-operator resource limits & requests
1443 | # ref: https://kubernetes.io/docs/user-guide/compute-resources/
1444 | resources: {}
1445 | # limits:
1446 | # cpu: 1000m
1447 | # memory: 1Gi
1448 | # requests:
1449 | # cpu: 100m
1450 | # memory: 128Mi
1451 |
1452 | # -- Security context to be added to cilium-operator pods
1453 | securityContext: {}
1454 | # runAsUser: 0
1455 |
1456 | # -- Interval for endpoint garbage collection.
1457 | endpointGCInterval: "5m0s"
1458 |
1459 | # -- Interval for cilium node garbage collection.
1460 | nodeGCInterval: "5m0s"
1461 |
1462 | # -- Interval for identity garbage collection.
1463 | identityGCInterval: "15m0s"
1464 |
1465 | # -- Timeout for identity heartbeats.
1466 | identityHeartbeatTimeout: "30m0s"
1467 |
1468 | # -- Enable prometheus metrics for cilium-operator on the configured port at
1469 | # /metrics
1470 | prometheus:
1471 | enabled: false
1472 | port: 6942
1473 | serviceMonitor:
1474 | # -- Enable service monitors.
1475 | # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/master/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
1476 | enabled: false
1477 | # -- Labels to add to ServiceMonitor cilium-operator
1478 | labels: {}
1479 | # -- Annotations to add to ServiceMonitor cilium-operator
1480 | annotations: {}
1481 |
1482 | # -- Skip CRDs creation for cilium-operator
1483 | skipCRDCreation: false
1484 |
1485 | # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium
1486 | # pod running.
1487 | removeNodeTaints: true
1488 |
1489 | # -- Set Node condition NetworkUnavailable to 'false' with the reason
1490 | # 'CiliumIsUp' for nodes that have a healthy Cilium pod.
1491 | setNodeNetworkStatus: true
1492 |
1493 | unmanagedPodWatcher:
1494 | # -- Restart any pod that are not managed by Cilium.
1495 | restart: true
1496 | # -- Interval, in seconds, to check if there are any pods that are not
1497 | # managed by Cilium.
1498 | intervalSeconds: 15
1499 |
1500 | nodeinit:
1501 | # -- Enable the node initialization DaemonSet
1502 | enabled: false
1503 |
1504 | # -- node-init image.
1505 | image:
1506 | override: ~
1507 | repository: "quay.io/cilium/startup-script"
1508 | tag: "62bfbe88c17778aad7bef9fa57ff9e2d4a9ba0d8"
1509 | pullPolicy: "IfNotPresent"
1510 |
1511 | # -- The priority class to use for the nodeinit pod.
1512 | priorityClassName: ""
1513 |
1514 | # -- node-init update strategy
1515 | updateStrategy:
1516 | type: RollingUpdate
1517 |
1518 | # -- Additional nodeinit environment variables.
1519 | extraEnv: {}
1520 |
1521 | # -- Additional nodeinit init containers.
1522 | extraInitContainers: []
1523 |
1524 | # -- Additional nodeinit host path mounts.
1525 | extraHostPathMounts: []
1526 | # - name: textfile-dir
1527 | # mountPath: /srv/txt_collector
1528 | # hostPath: /var/lib/nodeinit
1529 | # readOnly: true
1530 | # mountPropagation: HostToContainer
1531 |
1532 | # -- Additional nodeinit ConfigMap mounts.
1533 | extraConfigmapMounts: []
1534 | # - name: certs-configmap
1535 | # mountPath: /certs
1536 | # configMap: certs-configmap
1537 | # readOnly: true
1538 |
1539 | # -- Node tolerations for nodeinit scheduling to nodes with taints
1540 | # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
1541 | tolerations:
1542 | - operator: Exists
1543 | # - key: "key"
1544 | # operator: "Equal|Exists"
1545 | # value: "value"
1546 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
1547 |
1548 | # -- Node labels for nodeinit pod assignment
1549 | # ref: https://kubernetes.io/docs/user-guide/node-selection/
1550 | nodeSelector: {}
1551 |
1552 | # -- Annotations to be added to node-init pods.
1553 | podAnnotations: {}
1554 |
1555 | # -- Labels to be added to node-init pods.
1556 | podLabels: {}
1557 |
1558 | # -- PodDisruptionBudget settings
1559 | # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
1560 | podDisruptionBudget:
1561 | enabled: true
1562 | maxUnavailable: 2
1563 |
1564 | # -- nodeinit resource limits & requests
1565 | # ref: https://kubernetes.io/docs/user-guide/compute-resources/
1566 | resources:
1567 | requests:
1568 | cpu: 100m
1569 | memory: 100Mi
1570 |
1571 | # -- Security context to be added to nodeinit pods.
1572 | securityContext: {}
1573 | # runAsUser: 0
1574 |
1575 | # -- bootstrapFile is the location of the file where the bootstrap timestamp is
1576 | # written by the node-init DaemonSet
1577 | bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time"
1578 |
1579 | preflight:
1580 | # -- Enable Cilium pre-flight resources (required for upgrade)
1581 | enabled: false
1582 |
1583 | # -- Cilium pre-flight image.
1584 | image:
1585 | override: ~
1586 | repository: "quay.io/cilium/cilium"
1587 | tag: "v1.11.6"
1588 | # cilium-digest
1589 | digest: ""
1590 | useDigest: false
1591 | pullPolicy: "IfNotPresent"
1592 |
1593 | # -- The priority class to use for the preflight pod.
1594 | priorityClassName: ""
1595 |
1596 | # -- preflight update strategy
1597 | updateStrategy:
1598 | type: RollingUpdate
1599 |
1600 | # -- Additional preflight environment variables.
1601 | extraEnv: {}
1602 |
1603 | # -- Additional preflight init containers.
1604 | extraInitContainers: []
1605 |
1606 | # -- Additional preflight host path mounts.
1607 | extraHostPathMounts: []
1608 | # - name: textfile-dir
1609 | # mountPath: /srv/txt_collector
1610 | # hostPath: /var/lib/preflight
1611 | # readOnly: true
1612 | # mountPropagation: HostToContainer
1613 |
1614 | # -- Additional preflight ConfigMap mounts.
1615 | extraConfigmapMounts: []
1616 | # - name: certs-configmap
1617 | # mountPath: /certs
1618 | # configMap: certs-configmap
1619 | # readOnly: true
1620 |
1621 | # -- Node tolerations for preflight scheduling to nodes with taints
1622 | # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
1623 | tolerations:
1624 | - effect: NoSchedule
1625 | key: node.kubernetes.io/not-ready
1626 | - effect: NoSchedule
1627 | key: node-role.kubernetes.io/master
1628 | - effect: NoSchedule
1629 | key: node.cloudprovider.kubernetes.io/uninitialized
1630 | value: "true"
1631 | - key: CriticalAddonsOnly
1632 | operator: "Exists"
1633 | # - key: "key"
1634 | # operator: "Equal|Exists"
1635 | # value: "value"
1636 | # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
1637 |
1638 | # -- Node labels for preflight pod assignment
1639 | # ref: https://kubernetes.io/docs/user-guide/node-selection/
1640 | nodeSelector: {}
1641 |
1642 | # -- Annotations to be added to preflight pods
1643 | podAnnotations: {}
1644 |
1645 | # -- Labels to be added to the preflight pod.
1646 | podLabels: {}
1647 |
1648 | # -- PodDisruptionBudget settings
1649 | # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
1650 | podDisruptionBudget:
1651 | enabled: true
1652 | maxUnavailable: 2
1653 |
1654 | # -- preflight resource limits & requests
1655 | # ref: https://kubernetes.io/docs/user-guide/compute-resources/
1656 | resources: {}
1657 | # limits:
1658 | # cpu: 4000m
1659 | # memory: 4Gi
1660 | # requests:
1661 | # cpu: 100m
1662 | # memory: 512Mi
1663 |
1664 | # -- Security context to be added to preflight pods
1665 | securityContext: {}
1666 | # runAsUser: 0
1667 |
1668 | # -- Path to write the `--tofqdns-pre-cache` file to.
1669 | tofqdnsPreCache: ""
1670 | # -- By default we should always validate the installed CNPs before upgrading
1671 | # Cilium. This will make sure the user will have the policies deployed in the
1672 | # cluster with the right schema.
1673 | validateCNPs: true
1674 |
1675 | # -- Explicitly enable or disable priority class.
1676 | # .Capabilities.KubeVersion is unsettable in `helm template` calls,
1677 | # it depends on k8s libraries version that Helm was compiled against.
1678 | # This option allows to explicitly disable setting the priority class, which
1679 | # is useful for rendering charts for gke clusters in advance.
1680 | enableCriticalPriorityClass: true
1681 |
1682 | # disableEnvoyVersionCheck removes the check for Envoy, which can be useful
1683 | # on AArch64 as the images do not currently ship a version of Envoy.
1684 | #disableEnvoyVersionCheck: false
1685 |
1686 | clustermesh:
1687 | # -- Deploy clustermesh-apiserver for clustermesh
1688 | useAPIServer: false
1689 |
1690 | # -- Clustermesh explicit configuration.
1691 | config:
1692 | # -- Enable the Clustermesh explicit configuration.
1693 | enabled: false
1694 | # -- Default dns domain for the Clustermesh API servers
1695 | # This is used in the case cluster addresses are not provided
1696 | # and IPs are used.
1697 | domain: mesh.cilium.io
1698 | # -- List of clusters to be peered in the mesh.
1699 | clusters: []
1700 | # clusters:
1701 | # # -- Name of the cluster
1702 | # - name: cluster1
1703 | # # -- Address of the cluster, use this if you created DNS records for
1704 | # # the cluster Clustermesh API server.
1705 | # address: cluster1.mesh.cilium.io
1706 | # # -- Port of the cluster Clustermesh API server.
1707 | # port: 2379
1708 | # # -- IPs of the cluster Clustermesh API server, use multiple ones when
1709 | # # you have multiple IPs to access the Clustermesh API server.
1710 | # ips:
1711 | # - 172.18.255.201
1712 | # # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority.
1713 | # tls:
1714 | # cert: ""
1715 | # key: ""
1716 |
1717 | apiserver:
1718 | # -- Clustermesh API server image.
1719 | image:
1720 | override: ~
1721 | repository: "quay.io/cilium/clustermesh-apiserver"
1722 | tag: "v1.11.6"
1723 | # clustermesh-apiserver-digest
1724 | digest: ""
1725 | useDigest: false
1726 | pullPolicy: "IfNotPresent"
1727 |
1728 | etcd:
1729 | # -- Clustermesh API server etcd image.
1730 | image:
1731 | override: ~
1732 | repository: "quay.io/coreos/etcd"
1733 | tag: "v3.4.13@sha256:04833b601fa130512450afa45c4fe484fee1293634f34c7ddc231bd193c74017"
1734 | pullPolicy: "IfNotPresent"
1735 |
1736 | service:
1737 | # -- The type of service used for apiserver access.
1738 | type: NodePort
1739 | # -- Optional port to use as the node port for apiserver access.
1740 | nodePort: 32379
1741 | # -- Optional loadBalancer IP address to use with type LoadBalancer.
1742 | # loadBalancerIP:
1743 |
1744 | # -- Annotations for the clustermesh-apiserver
1745 | # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal"
1746 | # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
1747 | annotations: {}
1748 |
1749 | # -- Number of replicas run for the clustermesh-apiserver deployment.
1750 | replicas: 1
1751 |
1752 | # -- Node labels for pod assignment
1753 | # ref: https://kubernetes.io/docs/user-guide/node-selection/
1754 | nodeSelector: {}
1755 |
1756 | # -- Annotations to be added to clustermesh-apiserver pods
1757 | podAnnotations: {}
1758 |
1759 | # -- Labels to be added to clustermesh-apiserver pods
1760 | podLabels: {}
1761 |
1762 | # -- Resource requests and limits for the clustermesh-apiserver container of the clustermesh-apiserver deployment, such as
1763 | # resources:
1764 | # limits:
1765 | # cpu: 1000m
1766 | # memory: 1024M
1767 | # requests:
1768 | # cpu: 100m
1769 | # memory: 64Mi
1770 | resources: {}
1771 |
1772 | # -- Node tolerations for pod assignment on nodes with taints
1773 | # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
1774 | tolerations: []
1775 |
1776 | # -- clustermesh-apiserver update strategy
1777 | updateStrategy:
1778 | rollingUpdate:
1779 | maxUnavailable: 1
1780 | type: RollingUpdate
1781 |
1782 | # -- The priority class to use for clustermesh-apiserver
1783 | priorityClassName: ""
1784 |
1785 | tls:
1786 | # -- Configure automatic TLS certificates generation.
1787 | # A Kubernetes CronJob is used the generate any
1788 | # certificates not provided by the user at installation
1789 | # time.
1790 | auto:
1791 | # -- When set to true, automatically generate a CA and certificates to
1792 | # enable mTLS between clustermesh-apiserver and external workload instances.
1793 | # If set to false, the certs to be provided by setting appropriate values below.
1794 | enabled: true
1795 | # Sets the method to auto-generate certificates. Supported values:
1796 | # - helm: This method uses Helm to generate all certificates.
1797 | # - cronJob: This method uses a Kubernetes CronJob the generate any
1798 | # certificates not provided by the user at installation
1799 | # time.
1800 | # - certmanager: This method use cert-manager to generate & rotate certificates.
1801 | {% if kube_cert_manager %}
1802 | method: certmanager
1803 | {% else %}
1804 | method: helm
1805 | {% endif %}
1806 | # -- Generated certificates validity duration in days.
1807 | certValidityDuration: 1095
1808 | # -- Schedule for certificates regeneration (regardless of their expiration date).
1809 | # Only used if method is "cronJob". If nil, then no recurring job will be created.
1810 | # Instead, only the one-shot job is deployed to generate the certificates at
1811 | # installation time.
1812 | #
1813 | # Due to the out-of-band distribution of client certs to external workloads the
1814 | # CA is (re)regenerated only if it is not provided as a helm value and the k8s
1815 | # secret is manually deleted.
1816 | #
1817 | # Defaults to none. Commented syntax gives midnight of the first day of every
1818 | # fourth month. For syntax, see
1819 | # https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/#schedule
1820 | # schedule: "0 0 1 */4 *"
1821 |
1822 | # [Example]
1823 | # certManagerIssuerRef:
1824 | # group: cert-manager.io
1825 | # kind: ClusterIssuer
1826 | # name: ca-issuer
1827 | # -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager.
1828 | # If not specified, a CA issuer will be created.
1829 | {% if kube_cert_manager %}
1830 | certManagerIssuerRef:
1831 | group: cert-manager.io
1832 | kind: ClusterIssuer
1833 | name: letsencrypt-prod
1834 | {% else %}
1835 | certManagerIssuerRef: {}
1836 | {% endif %}
1837 | # -- base64 encoded PEM values for the ExternalWorkload CA certificate and private key.
1838 | ca:
1839 | # -- Optional CA cert. If it is provided, it will be used by the 'cronJob' method to
1840 | # generate all other certificates. Otherwise, an ephemeral CA is generated.
1841 | cert: ""
1842 | # -- Optional CA private key. If it is provided, it will be used by the 'cronJob' method to
1843 | # generate all other certificates. Otherwise, an ephemeral CA is generated.
1844 | key: ""
1845 | # -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key.
1846 | # Used if 'auto' is not enabled.
1847 | server:
1848 | cert: ""
1849 | key: ""
1850 | # -- Extra DNS names added to certificate when it's auto generated
1851 | extraDnsNames: []
1852 | # -- Extra IP addresses added to certificate when it's auto generated
1853 | extraIpAddresses: []
1854 | # -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key.
1855 | # Used if 'auto' is not enabled.
1856 | admin:
1857 | cert: ""
1858 | key: ""
1859 | # -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key.
1860 | # Used if 'auto' is not enabled.
1861 | client:
1862 | cert: ""
1863 | key: ""
1864 | # -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key.
1865 | # Used if 'auto' is not enabled.
1866 | remote:
1867 | cert: ""
1868 | key: ""
1869 |
1870 | # -- Configure external workloads support
1871 | externalWorkloads:
1872 | # -- Enable support for external workloads, such as VMs (false by default).
1873 | enabled: false
1874 |
1875 | # -- Configure cgroup related configuration
1876 | cgroup:
1877 | autoMount:
1878 | # -- Enable auto mount of cgroup2 filesystem.
1879 | # When `autoMount` is enabled, cgroup2 filesystem is mounted at
1880 | # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod.
1881 | # If users disable `autoMount`, it's expected that users have mounted
1882 | # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the
1883 | # volume will be mounted inside the cilium agent pod at the same path.
1884 | enabled: true
1885 | # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`)
1886 | hostRoot: /run/cilium/cgroupv2
1887 |
1888 | # -- Configure whether to enable auto detect of terminating state for endpoints
1889 | # in order to support graceful termination.
1890 | enableK8sTerminatingEndpoint: true
1891 |
1892 | # -- Configure whether to unload DNS policy rules on graceful shutdown
1893 | # dnsPolicyUnloadOnShutdown: false
1894 |
1895 | # -- Configure the key of the taint indicating that Cilium is not ready on the node.
1896 | # When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up.
1897 | agentNotReadyTaintKey: "node.cilium.io/agent-not-ready"
--------------------------------------------------------------------------------
/templates/cilium-values-1.15.6.yaml.j2:
--------------------------------------------------------------------------------
1 |
2 | debug:
3 | # -- Enable debug logging
4 | enabled: {{ cilium_debug_log }}
5 | # verbose:
6 |
7 | # kubeConfigPath: ~/.kube/config
8 | k8sServiceHost: {{ cilium_k8s_endpoint }}
9 | k8sServicePort: 6443
10 |
11 | # -- Roll out cilium agent pods automatically when configmap is updated.
12 | rollOutCiliumPods: true
13 |
14 | # -- Node selector for cilium-agent.
15 | nodeSelector:
16 | kubernetes.io/os: linux
17 |
18 | # -- The priority class to use for cilium-agent.
19 | priorityClassName: {{ cilium_priorityClassName }}
20 |
21 | # -- Annotate k8s node upon initialization with Cilium's metadata.
22 | annotateK8sNode: true
23 |
24 | bpf:
25 | # -- Enable native IP masquerade support in eBPF
26 | masquerade: true
27 |
28 | ## -- Configure container runtime specific integration.
29 | ## Deprecated in favor of bpf.autoMount.enabled. To be removed in 1.15.
30 | #containerRuntime:
31 | # # -- Enables specific integrations for container runtimes.
32 | # # Supported values:
33 | # # - containerd
34 | # # - crio
35 | # # - docker
36 | # # - none
37 | # # - auto (automatically detect the container runtime)
38 | # integration: auto
39 | # # -- Configure the path to the container runtime control socket.
40 | # # socketPath: /path/to/runtime.sock
41 |
42 | encryption:
43 | # -- Enable transparent network encryption.
44 | enabled: {{ cilium_nodeEncryption }}
45 |
46 | # -- Encryption method. Can be either ipsec or wireguard.
47 | type: ipsec
48 |
49 | ipsec:
50 | # -- Name of the key file inside the Kubernetes secret configured via secretName.
51 | keyFile: "keys"
52 |
53 | # -- Path to mount the secret inside the Cilium pod.
54 | mountPath: "/etc/ipsec"
55 |
56 | # -- Name of the Kubernetes secret containing the encryption keys.
57 | secretName: "cilium-ipsec-keys"
58 |
59 | # -- The interface to use for encrypted traffic.
60 | interface: ""
61 |
62 | endpointRoutes:
63 | # -- Enable use of per endpoint routes instead of routing via
64 | # the cilium_host interface.
65 | enabled: true
66 |
67 | k8sNetworkPolicy:
68 | # -- Enable support for K8s NetworkPolicy
69 | enabled: true
70 |
71 | # -- Configure socket LB
72 | socketLB:
73 | # -- Enable socket LB
74 | # It must be enabled if bpf.masquerade=true
75 | enabled: true
76 |
77 | hubble:
78 | # -- Enable Hubble (true by default).
79 | enabled: {{ cilium_hubble_enable }}
80 |
81 | # -- Hubble metrics configuration.
82 | # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics
83 | # for more comprehensive documentation about Hubble metrics.
84 | metrics:
85 | # -- Configures the list of metrics to collect. If empty or null, metrics
86 | # are disabled.
87 | # Example:
88 | #
89 | # enabled:
90 | # - dns:query;ignoreAAAA
91 | # - drop
92 | # - tcp
93 | # - flow
94 | # - icmp
95 | # - http
96 | #
97 | # You can specify the list of metrics from the helm CLI:
98 | #
99 | # --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}"
100 | #
101 | enabled:
102 | - dns
103 | - drop
104 | - flow
105 | - http
106 | - icmp
107 | - port-distribution
108 | - tcp
109 | # -- Configure the port the hubble metric server listens on.
110 | port: 9965
111 |
112 | # -- TLS configuration for Hubble
113 | tls:
114 | # -- Enable mutual TLS for listenAddress. Setting this value to false is
115 | # highly discouraged as the Hubble API provides access to potentially
116 | # sensitive network flow metadata and is exposed on the host network.
117 | enabled: true
118 | # -- Configure automatic TLS certificates generation.
119 | auto:
120 | # -- Auto-generate certificates.
121 | # When set to true, automatically generate a CA and certificates to
122 | # enable mTLS between Hubble server and Hubble Relay instances. If set to
123 | # false, the certs for Hubble server need to be provided by setting
124 | # appropriate values below.
125 | enabled: true
126 | # -- Set the method to auto-generate certificates. Supported values:
127 | # - helm: This method uses Helm to generate all certificates.
128 | # - cronJob: This method uses a Kubernetes CronJob the generate any
129 | # certificates not provided by the user at installation
130 | # time.
131 | # - certmanager: This method use cert-manager to generate & rotate certificates.
132 | method: helm
133 | # -- Generated certificates validity duration in days.
134 | certValidityDuration: 1095
135 | # -- Schedule for certificates regeneration (regardless of their expiration date).
136 | # Only used if method is "cronJob". If nil, then no recurring job will be created.
137 | # Instead, only the one-shot job is deployed to generate the certificates at
138 | # installation time.
139 | #
140 | # Defaults to midnight of the first day of every fourth month. For syntax, see
141 | # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax
142 | schedule: "0 0 1 */4 *"
143 |
144 | # [Example]
145 | # certManagerIssuerRef:
146 | # group: cert-manager.io
147 | # kind: ClusterIssuer
148 | # name: ca-issuer
149 | # -- certmanager issuer used when hubble.tls.auto.method=certmanager.
150 | certManagerIssuerRef: {}
151 |
152 | # -- base64 encoded PEM values for the Hubble server certificate and private key
153 | server:
154 | cert: ""
155 | key: ""
156 | # -- Extra DNS names added to certificate when it's auto generated
157 | extraDnsNames: []
158 | # -- Extra IP addresses added to certificate when it's auto generated
159 | extraIpAddresses: []
160 |
161 | relay:
162 | # -- Enable Hubble Relay (requires hubble.enabled=true)
163 | enabled: {{ cilium_hubble_enable }}
164 |
165 | # -- Roll out Hubble Relay pods automatically when configmap is updated.
166 | rollOutPods: true
167 |
168 | # -- Node labels for pod assignment
169 | # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
170 | nodeSelector:
171 | kubernetes.io/os: linux
172 |
173 | # -- The priority class to use for hubble-relay
174 | priorityClassName: {{ cilium_hubble_priorityClassName }}
175 |
176 | # -- TLS configuration for Hubble Relay
177 | tls:
178 | # -- base64 encoded PEM values for the hubble-relay client certificate and private key
179 | # This keypair is presented to Hubble server instances for mTLS
180 | # authentication and is required when hubble.tls.enabled is true.
181 | # These values need to be set manually if hubble.tls.auto.enabled is false.
182 | client:
183 | cert: ""
184 | key: ""
185 | # -- base64 encoded PEM values for the hubble-relay server certificate and private key
186 | server:
187 | # When set to true, enable TLS on for Hubble Relay server
188 | # (ie: for clients connecting to the Hubble Relay API).
189 | enabled: false
190 | # These values need to be set manually if hubble.tls.auto.enabled is false.
191 | cert: ""
192 | key: ""
193 | # -- extra DNS names added to certificate when its auto gen
194 | extraDnsNames: []
195 | # -- extra IP addresses added to certificate when its auto gen
196 | extraIpAddresses: []
197 |
198 | ui:
199 | # -- Whether to enable the Hubble UI.
200 | enabled: {{ cilium_hubble_ui_enable }}
201 |
202 | # -- Roll out Hubble-ui pods automatically when configmap is updated.
203 | rollOutPods: true
204 |
205 | tls:
206 | # -- base64 encoded PEM values used to connect to hubble-relay
207 | # This keypair is presented to Hubble Relay instances for mTLS
208 | # authentication and is required when hubble.relay.tls.server.enabled is true.
209 | # These values need to be set manually if hubble.tls.auto.enabled is false.
210 | client:
211 | cert: ""
212 | key: ""
213 |
214 | # -- Node labels for pod assignment
215 | # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
216 | nodeSelector:
217 | kubernetes.io/os: linux
218 |
219 | # -- The priority class to use for hubble-ui
220 | priorityClassName: {{ cilium_hubble_priorityClassName }}
221 |
222 | # -- hubble-ui service configuration.
223 | service:
224 | # --- The type of service used for Hubble UI access, either ClusterIP or NodePort.
225 | type: ClusterIP
226 | # --- The port to use when the service type is set to NodePort.
227 | nodePort: 31235
228 |
229 | # -- Defines base url prefix for all hubble-ui http requests.
230 | # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path.
231 | # Trailing `/` is required for custom path, ex. `/service-map/`
232 | baseUrl: "{{ cilium_hubble_ui_ingress_path }}"
233 |
234 | # -- hubble-ui ingress configuration.
235 | ingress:
236 | enabled: {{ cilium_hubble_ui_ingress_enable }}
237 | annotations: {{ cilium_hubble_ui_ingress_annotations }}
238 | # kubernetes.io/ingress.class: nginx
239 | # kubernetes.io/tls-acme: "true"
240 | #cert-manager.io/cluster-issuer: letsencrypt-prod
241 | className: ""
242 | hosts:
243 | - {{ cilium_hubble_ui_ingress_host }}
244 | tls:
245 | - secretName: {{ cilium_hubble_ui_ingress_tls_host }}
246 | hosts:
247 | - {{ cilium_hubble_ui_ingress_tls_host }}
248 |
249 | ipam:
250 | # -- Configure IP Address Management mode.
251 | # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/
252 | mode: "cluster-pool"
253 | operator:
254 | # -- IPv4 CIDR list range to delegate to individual nodes for IPAM.
255 | clusterPoolIPv4PodCIDRList:
256 | - {{ kube_pod_network_cidr }}
257 | # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM.
258 | clusterPoolIPv4MaskSize: 24
259 | # -- IPv6 CIDR list range to delegate to individual nodes for IPAM.
260 | clusterPoolIPv6PodCIDRList: ["fd00::/104"]
261 | # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM.
262 | clusterPoolIPv6MaskSize: 120
263 |
264 | # -- cilium-monitor sidecar.
265 | monitor:
266 | # -- Enable the cilium-monitor sidecar.
267 | enabled: true
268 |
269 | # -- Configure N-S k8s service loadbalancing
270 | nodePort:
271 | # -- Enable the Cilium NodePort service implementation.
272 | enabled: {{ cilium_nodeport_enable }}
273 |
274 | # -- Port range to use for NodePort services.
275 | # range: "{{ cilium_nodeport_range }}"
276 |
277 | # -- Set to true to prevent applications binding to service ports.
278 | bindProtection: true
279 |
280 | # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral
281 | # ports is detected.
282 | autoProtectPortRange: true
283 |
284 | # -- Enable healthcheck nodePort server for NodePort services
285 | enableHealthCheck: true
286 |
287 | # Configure Cilium Envoy options.
288 | envoy:
289 | # -- Enable Envoy Proxy in standalone DaemonSet.
290 | enabled: false
291 |
292 | # -- Roll out cilium envoy pods automatically when configmap is updated.
293 | rollOutPods: true
294 |
295 | # -- Node selector for cilium-envoy.
296 | nodeSelector:
297 | kubernetes.io/os: linux
298 |
299 | # -- The priority class to use for cilium-envoy.
300 | priorityClassName: {{ cilium_priorityClassName }}
301 |
302 | # -- Configure TLS configuration in the agent.
303 | tls:
304 | # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies
305 | # (namely the secrets referenced by terminatingTLS and originatingTLS).
306 | # Possible values:
307 | # - local
308 | # - k8s
309 | secretsBackend: k8s
310 |
311 | # -- Base64 encoded PEM values for the CA certificate and private key.
312 | # This can be used as common CA to generate certificates used by hubble and clustermesh components.
313 | # It is neither required nor used when cert-manager is used to generate the certificates.
314 | ca:
315 | # -- Optional CA cert. If it is provided, it will be used by cilium to
316 | # generate all other certificates. Otherwise, an ephemeral CA is generated.
317 | cert: ""
318 |
319 | # -- Optional CA private key. If it is provided, it will be used by cilium to
320 | # generate all other certificates. Otherwise, an ephemeral CA is generated.
321 | key: ""
322 |
323 | # -- Generated certificates validity duration in days. This will be used for auto generated CA.
324 | certValidityDuration: 1095
325 |
326 | operator:
327 | # -- Enable the cilium-operator component (required).
328 | enabled: true
329 |
330 | # -- Roll out cilium-operator pods automatically when configmap is updated.
331 | rollOutPods: true
332 |
333 | # -- Number of replicas to run for the cilium-operator deployment
334 | replicas: 2
335 |
336 | # -- The priority class to use for cilium-operator
337 | priorityClassName: {{ cilium_priorityClassName }}
338 |
339 | # -- Node labels for cilium-operator pod assignment
340 | # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
341 | nodeSelector:
342 | kubernetes.io/os: linux
343 |
344 |
--------------------------------------------------------------------------------
/templates/dns01_secret.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | namespace: cert-manager
5 | name: prod-route53-credentials-secret
6 | type: Opaque
7 | data:
8 | sk: {{ kube_cert_manager_challenge_dns01_sk | b64encode }}
9 |
10 |
--------------------------------------------------------------------------------
/templates/flannel-net.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | labels:
6 | k8s-app: flannel
7 | pod-security.kubernetes.io/enforce: privileged
8 | name: kube-flannel
9 | ---
10 | apiVersion: v1
11 | kind: ServiceAccount
12 | metadata:
13 | labels:
14 | k8s-app: flannel
15 | name: flannel
16 | namespace: kube-flannel
17 | ---
18 | apiVersion: rbac.authorization.k8s.io/v1
19 | kind: ClusterRole
20 | metadata:
21 | labels:
22 | k8s-app: flannel
23 | name: flannel
24 | rules:
25 | - apiGroups:
26 | - ""
27 | resources:
28 | - pods
29 | verbs:
30 | - get
31 | - apiGroups:
32 | - ""
33 | resources:
34 | - nodes
35 | verbs:
36 | - get
37 | - list
38 | - watch
39 | - apiGroups:
40 | - ""
41 | resources:
42 | - nodes/status
43 | verbs:
44 | - patch
45 | - apiGroups:
46 | - networking.k8s.io
47 | resources:
48 | - clustercidrs
49 | verbs:
50 | - list
51 | - watch
52 | ---
53 | apiVersion: rbac.authorization.k8s.io/v1
54 | kind: ClusterRoleBinding
55 | metadata:
56 | labels:
57 | k8s-app: flannel
58 | name: flannel
59 | roleRef:
60 | apiGroup: rbac.authorization.k8s.io
61 | kind: ClusterRole
62 | name: flannel
63 | subjects:
64 | - kind: ServiceAccount
65 | name: flannel
66 | namespace: kube-flannel
67 | ---
68 | apiVersion: v1
69 | data:
70 | cni-conf.json: |
71 | {
72 | "name": "cbr0",
73 | "cniVersion": "0.3.1",
74 | "plugins": [
75 | {
76 | "type": "flannel",
77 | "delegate": {
78 | "hairpinMode": true,
79 | "isDefaultGateway": true
80 | }
81 | },
82 | {
83 | "type": "portmap",
84 | "capabilities": {
85 | "portMappings": true
86 | }
87 | }
88 | ]
89 | }
90 | net-conf.json: |
91 | {
92 | "Network": "{{kube_pod_network_cidr}}",
93 | "Backend": {
94 | "Type": "vxlan"
95 | }
96 | }
97 | kind: ConfigMap
98 | metadata:
99 | labels:
100 | app: flannel
101 | k8s-app: flannel
102 | tier: node
103 | name: kube-flannel-cfg
104 | namespace: kube-flannel
105 | ---
106 | apiVersion: apps/v1
107 | kind: DaemonSet
108 | metadata:
109 | labels:
110 | app: flannel
111 | k8s-app: flannel
112 | tier: node
113 | name: kube-flannel-ds
114 | namespace: kube-flannel
115 | spec:
116 | selector:
117 | matchLabels:
118 | app: flannel
119 | k8s-app: flannel
120 | template:
121 | metadata:
122 | labels:
123 | app: flannel
124 | k8s-app: flannel
125 | tier: node
126 | spec:
127 | affinity:
128 | nodeAffinity:
129 | requiredDuringSchedulingIgnoredDuringExecution:
130 | nodeSelectorTerms:
131 | - matchExpressions:
132 | - key: kubernetes.io/os
133 | operator: In
134 | values:
135 | - linux
136 | containers:
137 | - args:
138 | - --ip-masq
139 | - --kube-subnet-mgr
140 | {% for arg in kube_flanneld_extra_args %}
141 | - {{ arg }}
142 | {% endfor %}
143 | command:
144 | - /opt/bin/flanneld
145 | env:
146 | - name: POD_NAME
147 | valueFrom:
148 | fieldRef:
149 | fieldPath: metadata.name
150 | - name: POD_NAMESPACE
151 | valueFrom:
152 | fieldRef:
153 | fieldPath: metadata.namespace
154 | - name: EVENT_QUEUE_DEPTH
155 | value: "5000"
156 | image: docker.io/flannel/flannel:v0.24.2
157 | name: kube-flannel
158 | resources:
159 | requests:
160 | cpu: 100m
161 | memory: 50Mi
162 | securityContext:
163 | capabilities:
164 | add:
165 | - NET_ADMIN
166 | - NET_RAW
167 | privileged: false
168 | volumeMounts:
169 | - mountPath: /run/flannel
170 | name: run
171 | - mountPath: /etc/kube-flannel/
172 | name: flannel-cfg
173 | - mountPath: /run/xtables.lock
174 | name: xtables-lock
175 | hostNetwork: true
176 | initContainers:
177 | - args:
178 | - -f
179 | - /flannel
180 | - /opt/cni/bin/flannel
181 | command:
182 | - cp
183 | image: docker.io/flannel/flannel-cni-plugin:v1.4.0-flannel1
184 | name: install-cni-plugin
185 | volumeMounts:
186 | - mountPath: /opt/cni/bin
187 | name: cni-plugin
188 | - args:
189 | - -f
190 | - /etc/kube-flannel/cni-conf.json
191 | - /etc/cni/net.d/10-flannel.conflist
192 | command:
193 | - cp
194 | image: docker.io/flannel/flannel:v0.24.2
195 | name: install-cni
196 | volumeMounts:
197 | - mountPath: /etc/cni/net.d
198 | name: cni
199 | - mountPath: /etc/kube-flannel/
200 | name: flannel-cfg
201 | priorityClassName: system-node-critical
202 | serviceAccountName: flannel
203 | tolerations:
204 | - effect: NoSchedule
205 | operator: Exists
206 | volumes:
207 | - hostPath:
208 | path: /run/flannel
209 | name: run
210 | - hostPath:
211 | path: /opt/cni/bin
212 | name: cni-plugin
213 | - hostPath:
214 | path: /etc/cni/net.d
215 | name: cni
216 | - configMap:
217 | name: kube-flannel-cfg
218 | name: flannel-cfg
219 | - hostPath:
220 | path: /run/xtables.lock
221 | type: FileOrCreate
222 | name: xtables-lock
223 |
--------------------------------------------------------------------------------
/templates/ingress-values.j2:
--------------------------------------------------------------------------------
1 | controller:
2 | service:
3 | type: NodePort
4 | nodePorts:
5 | http: 80
6 | https: 443
7 | tolerations:
8 | - key: node-role.kubernetes.io/master
9 | effect: NoSchedule
10 | - key: node-role.kubernetes.io/control-plane
11 | effect: NoSchedule
12 | ingressClassResource:
13 | default: true
14 | nodeSelector:
15 | kubernetes.io/os: linux
16 | {% if kube_ingress_in_master %}
17 | node-role.kubernetes.io/control-plane: ""
18 | {% endif %}
19 | admissionWebhooks:
20 | patch:
21 | tolerations:
22 | - key: node-role.kubernetes.io/master
23 | effect: NoSchedule
24 | - key: node-role.kubernetes.io/control-plane
25 | effect: NoSchedule
26 | defaultBackend:
27 | tolerations:
28 | - key: node-role.kubernetes.io/master
29 | effect: NoSchedule
30 | - key: node-role.kubernetes.io/control-plane
31 | effect: NoSchedule
32 | nodeSelector:
33 | kubernetes.io/os: linux
34 | {% if kube_ingress_in_master %}
35 | node-role.kubernetes.io/control-plane: ""
36 | {% endif %}
37 |
--------------------------------------------------------------------------------
/templates/kubeadm-config-join.j2:
--------------------------------------------------------------------------------
1 | ---
2 | kind: JoinConfiguration
3 | {% if kube_version is version_compare('1.22.0', '<') %}
4 | apiVersion: kubeadm.k8s.io/v1beta2
5 | {% else %}
6 | apiVersion: kubeadm.k8s.io/v1beta3
7 | {% endif %}
8 | nodeRegistration:
9 | kubeletExtraArgs:
10 | cgroup-driver: systemd
11 | {% for key, value in kubelet_extra_args_dict.items() %}
12 | {{key}}: {{value}}
13 | {% endfor %}
14 | {% if kube_cri_runtime == "docker" %}
15 | criSocket: "unix:///run/cri-dockerd.sock"
16 | {% endif %}
17 | discovery:
18 | bootstrapToken:
19 | token: "{{kube_token}}"
20 | apiServerEndpoint: {{kube_server}}:6443
21 | unsafeSkipCAVerification: true
22 |
--------------------------------------------------------------------------------
/templates/kubeadm-config.j2:
--------------------------------------------------------------------------------
1 | ---
2 | kind: ClusterConfiguration
3 | {% if kube_version is version_compare('1.22.0', '<') %}
4 | apiVersion: kubeadm.k8s.io/v1beta2
5 | {% else %}
6 | apiVersion: kubeadm.k8s.io/v1beta3
7 | {% endif %}
8 | networking:
9 | podSubnet: "{{kube_pod_network_cidr}}" # --pod-network-cidr
10 | apiServer:
11 | extraArgs:
12 | advertise-address: "{{kube_api_server}}" # --apiserver-advertise-address
13 | {% if IM_NODE_PUBLIC_IP is defined %}
14 | certSANs:
15 | - "{{ IM_NODE_PUBLIC_IP }}"
16 | {% if kube_api_server != "0.0.0.0" %}
17 | - "{{ kube_api_server }}"
18 | {% endif %}
19 | {% if kube_public_dns_name != "" %}
20 | - "{{ kube_public_dns_name }}"
21 | {% endif %}
22 | {% if IM_NODE_PRIVATE_IP is defined %}
23 | - "{{ IM_NODE_PRIVATE_IP }}"
24 | {% endif %}
25 | {% endif %}
26 | {% if kube_api_server != "0.0.0.0" %}
27 | controlPlaneEndpoint: "{{kube_api_server}}:6443"
28 | {% endif %}
29 | ---
30 | kind: KubeletConfiguration
31 | apiVersion: kubelet.config.k8s.io/v1beta1
32 | cgroupDriver: systemd
33 | ---
34 | kind: InitConfiguration
35 | {% if kube_version is version_compare('1.22.0', '<') %}
36 | apiVersion: kubeadm.k8s.io/v1beta2
37 | {% else %}
38 | apiVersion: kubeadm.k8s.io/v1beta3
39 | {% endif %}
40 | bootstrapTokens:
41 | - token: "{{kube_token}}" # --token
42 | description: "kubeadm bootstrap token"
43 | ttl: "{{kube_token_ttl}}" # --token-ttl
44 | {% if kube_cri_runtime == "docker" %}
45 | nodeRegistration:
46 | criSocket: "unix:///run/cri-dockerd.sock"
47 | {% endif %}
--------------------------------------------------------------------------------
/templates/kubeapps-ingress.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | namespace: kubeapps
6 | name: kubeapps
7 | annotations:
8 | kubernetes.io/ingress.class: nginx
9 | nginx.ingress.kubernetes.io/proxy-read-timeout: "600"
10 | {% if kube_cert_manager %}
11 | cert-manager.io/cluster-issuer: "letsencrypt-prod"
12 | {% if kube_cert_manager_challenge == 'dns01' %}
13 | certmanager.k8s.io/acme-challenge-type: dns01
14 | certmanager.k8s.io/acme-dns01-provider: route53
15 | {% endif %}
16 | {% endif %}
17 | spec:
18 | {% if kube_cert_manager %}
19 | tls:
20 | - hosts:
21 | - {{ public_hostname }}
22 | {% if kube_cert_manager_wildcard_cert_dns_name == '' %}
23 | secretName: {{ public_hostname }}
24 | {% endif %}
25 | {% endif %}
26 | rules:
27 | {% if kube_cert_manager %}
28 | - host: {{ public_hostname }}
29 | http:
30 | {% else %}
31 | - http:
32 | {% endif %}
33 | paths:
34 | - backend:
35 | service:
36 | name: kubeapps
37 | port:
38 | number: 80
39 | pathType: Prefix
40 | path: /kubeapps
--------------------------------------------------------------------------------
/templates/kubernetes-dashboard-ingress.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: kubernetes-dashboard
6 | namespace: kubernetes-dashboard
7 | annotations:
8 | nginx.ingress.kubernetes.io/use-regex: "true"
9 | nginx.ingress.kubernetes.io/rewrite-target: /$1
10 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
11 | {% if kube_cert_manager %}
12 | cert-manager.io/cluster-issuer: "letsencrypt-prod"
13 | {% if kube_cert_manager_challenge == 'dns01' %}
14 | certmanager.k8s.io/acme-challenge-type: dns01
15 | certmanager.k8s.io/acme-dns01-provider: route53
16 | {% endif %}
17 | {% endif %}
18 | spec:
19 | ingressClassName: nginx
20 | {% if kube_cert_manager and public_hostname is defined %}
21 | tls:
22 | - hosts:
23 | - {{ public_hostname }}
24 | {% if kube_cert_manager_wildcard_cert_dns_name == '' %}
25 | secretName: {{ public_hostname }}
26 | {% endif %}
27 | {% endif %}
28 | rules:
29 | {% if kube_cert_manager and public_hostname is defined %}
30 | - host: {{ public_hostname }}
31 | http:
32 | {% else %}
33 | - http:
34 | {% endif %}
35 | paths:
36 | - path: /dashboard/?(.*)
37 | backend:
38 | service:
39 | name: kubernetes-dashboard
40 | port:
41 | number: 8443
42 | pathType: ImplementationSpecific
--------------------------------------------------------------------------------
/templates/kyverno-values.j2:
--------------------------------------------------------------------------------
1 | namespace: kyverno
2 | createSelfSignedCert: false
3 | config:
4 | resourceFilters: "{{ kyverno_config_notapplypolicies }}"
5 |
6 | rbac:
7 | create: true
8 | serviceAccount:
9 | create: true
10 | name: kyverno
11 |
12 | image:
13 | repository: {{ kyverno_image }}
14 | tag: {{ kyverno_image_tag }}
15 |
16 | initImage:
17 | repository: {{ kyverno_initContainer_image }}
18 | tag: {{ kyverno_initContainer_image_tag }}
19 |
20 | serviceMonitor:
21 | enabled: false
22 |
23 | podSecurityStandard: {{ kyverno_podSecurityStandard }}
24 | podSecuritySeverity: {{ kyverno_podSecuritySeverity}}
25 | validationFailureAction: {{ kyverno_validationFailureAction }}
--------------------------------------------------------------------------------
/templates/nvidia-device-plugin.j2:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: nvidia-device-plugin-daemonset
5 | namespace: kube-system
6 | spec:
7 | selector:
8 | matchLabels:
9 | name: nvidia-device-plugin-ds
10 | updateStrategy:
11 | type: RollingUpdate
12 | template:
13 | metadata:
14 | labels:
15 | name: nvidia-device-plugin-ds
16 | spec:
17 | tolerations:
18 | - key: nvidia.com/gpu
19 | operator: Exists
20 | effect: NoSchedule
21 | # Mark this pod as a critical add-on; when enabled, the critical add-on
22 | # scheduler reserves resources for critical add-on pods so that they can
23 | # be rescheduled after a failure.
24 | # See https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
25 | priorityClassName: "system-node-critical"
26 | containers:
27 | - image: nvcr.io/nvidia/k8s-device-plugin:{{kube_nvidia_device_plugin_version}}
28 | name: nvidia-device-plugin-ctr
29 | env:
30 | - name: FAIL_ON_INIT_ERROR
31 | value: "false"
32 | securityContext:
33 | allowPrivilegeEscalation: false
34 | capabilities:
35 | drop: ["ALL"]
36 | volumeMounts:
37 | - name: device-plugin
38 | mountPath: /var/lib/kubelet/device-plugins
39 | volumes:
40 | - name: device-plugin
41 | hostPath:
42 | path: /var/lib/kubelet/device-plugins
43 |
--------------------------------------------------------------------------------
/templates/persistent-volumes.j2:
--------------------------------------------------------------------------------
1 | # Persistent Volumes in NFS
2 | {% for item in kube_persistent_volumes %}
3 | ---
4 | apiVersion: v1
5 | kind: PersistentVolume
6 | metadata:
7 | namespace: "{{item.namespace}}"
8 | name: "{{item.name}}"
9 | labels:
10 | app: "{{item.label}}"
11 | spec:
12 | capacity:
13 | storage: "{{item.capacity_storage}}"
14 | accessModes:
15 | - ReadWriteOnce
16 | mountOptions:
17 | - nolock
18 | - hard
19 | nfs:
20 | path: "{{item.nfs_path}}"
21 | server: kubeserver
22 | {% endfor %}
--------------------------------------------------------------------------------
/templates/prod_issuer.j2:
--------------------------------------------------------------------------------
1 | apiVersion: cert-manager.io/v1
2 | kind: ClusterIssuer
3 | metadata:
4 | name: letsencrypt-prod
5 | namespace: cert-manager
6 | spec:
7 | acme:
8 | # The ACME server URL
9 | server: https://acme-v02.api.letsencrypt.org/directory
10 | # Email address used for ACME registration
11 | email: {{ kube_cert_user_email }}
12 | # Name of a secret used to store the ACME account private key
13 | privateKeySecretRef:
14 | name: letsencrypt-prod
15 | solvers:
16 | {% if kube_cert_manager_challenge == "dns01" %}
17 | # Enable the DNS-01 challenge provider
18 | - selector:
19 | dnsZones:
20 | - "{{ kube_cert_manager_challenge_dns01_domain }}"
21 | dns01:
22 | route53:
23 | region: us-east-1
24 | accessKeyID: {{ kube_cert_manager_challenge_dns01_ak }}
25 | secretAccessKeySecretRef:
26 | name: prod-route53-credentials-secret
27 | key: sk
28 | {% else %}
29 | # Enable the HTTP-01 challenge provider
30 | - http01:
31 | ingress:
32 | class: nginx
33 | {% endif %}
34 |
--------------------------------------------------------------------------------
/templates/wildcard_cert.j2:
--------------------------------------------------------------------------------
1 | apiVersion: cert-manager.io/v1
2 | kind: Certificate
3 | metadata:
4 | name: le-crt
5 | spec:
6 | secretName: tls-secret
7 | issuerRef:
8 | kind: ClusterIssuer
9 | name: letsencrypt-prod
10 | commonName: "{{ kube_cert_manager_wildcard_cert_dns_name }}"
11 | dnsNames:
12 | - "{{ kube_cert_manager_wildcard_cert_dns_name }}"
13 |
--------------------------------------------------------------------------------
/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost ansible_connection=local
2 |
--------------------------------------------------------------------------------
/tests/test-crio.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | roles:
4 | - role: ansible-role-kubernetes
5 | kube_install_metrics: true
6 | kube_cert_manager: false
7 | kube_install_kubeapps: false
8 | kube_install_kyverno: false
9 | kube_deploy_dashboard: true
10 | kube_install_ingress: true
11 | kube_public_dns_name: test.domain.com
12 | kube_version: 1.28.3
13 | kube_cri_runtime: crio
14 | kubelet_extra_args_dict:
15 | node-labels: somelabel
16 |
--------------------------------------------------------------------------------
/tests/test-docker.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | roles:
4 | - role: ansible-role-kubernetes
5 | kube_install_metrics: true
6 | kube_cert_manager: true
7 | kube_install_kubeapps: false
8 | kube_install_kyverno: false
9 | kube_deploy_dashboard: false
10 | kube_install_ingress: false
11 | kube_public_dns_name: test.domain.com
12 | kube_version: 1.30.2
13 | kube_cri_runtime: docker
14 | kube_cri_runtime_install: false
15 | kube_install_docker_pip: false
16 | kubelet_extra_args_dict:
17 | node-labels: somelabel
18 |
--------------------------------------------------------------------------------
/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | roles:
4 | - role: ansible-role-kubernetes
5 | kube_install_metrics: false
6 | kube_cert_manager: false
7 | kube_install_kubeapps: false
8 | kube_install_kyverno: false
9 | kube_deploy_dashboard: true
10 | kube_install_ingress: true
11 | kube_public_dns_name: test.domain.com
12 | kube_version: 1.31.1
13 | kube_cri_runtime: containerd
14 | kube_install_docker_pip: true
15 | kubelet_extra_args_dict:
16 | node-labels: somelabel
17 |
--------------------------------------------------------------------------------