├── .gitignore
├── README.md
├── ansible
├── ansible.cfg
├── hosts
│ ├── credentials
│ │ └── kubeadm_certificate_key.creds
│ ├── group_vars
│ │ ├── all
│ │ │ ├── all.yml
│ │ │ ├── aws.yml
│ │ │ ├── azure.yml
│ │ │ ├── containerd.yml
│ │ │ ├── coreos.yml
│ │ │ ├── cri-o.yml
│ │ │ ├── docker.yml
│ │ │ ├── gcp.yml
│ │ │ ├── oci.yml
│ │ │ ├── offline.yml
│ │ │ ├── openstack.yml
│ │ │ └── vsphere.yml
│ │ ├── etcd.yml
│ │ └── k8s_cluster
│ │ │ ├── addons.yml
│ │ │ ├── k8s-cluster.yml
│ │ │ ├── k8s-net-calico.yml
│ │ │ ├── k8s-net-canal.yml
│ │ │ ├── k8s-net-cilium.yml
│ │ │ ├── k8s-net-flannel.yml
│ │ │ ├── k8s-net-kube-router.yml
│ │ │ ├── k8s-net-macvlan.yml
│ │ │ └── k8s-net-weave.yml
│ ├── groups
│ └── inventory_aws_ec2.yml
├── infra.yaml
├── ping.yaml
└── requirements.txt
├── keys
└── .keep
├── pictures
├── kube.png
└── kube2.png
└── terraform
├── 0-aws.tf
├── 1-vpc.tf
├── 2-etcd.tf
├── 3-workers.tf
├── 4-controllers.tf
├── 5-iam.tf
└── variables.tf
/.gitignore:
--------------------------------------------------------------------------------
1 | .terraform
2 | *.pem
3 | *.pub
4 | tf-kube
5 | terraform.tfstate
6 | terraform.tfstate.backup
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Kubernetes Setup using AWS and Terraform
2 |
3 |
4 |
5 | This repository is copied from opencredo/k8s-terraform-ansible-sample, but it didn't consider kubespray.
6 |
7 | > https://github.com/opencredo/k8s-terraform-ansible-sample
8 |
9 | Also, This repository will create 3 workers, 3 master, and 3 etcd Kubernetes cluster by default. You can adjust the number of each node by changing below **Variables**.
10 |
11 |
12 |
13 | ## Step 1. Install Terraform
14 |
15 | 1. All steps will be conducted under Docker container for beginners.
16 |
17 | ```
18 | # docker run -it --name terraform-aws-kube -h terraform-aws-kube ubuntu:16.04
19 | ```
20 |
21 | 2. Install required packages.
22 |
23 | ```
24 | $ apt update && apt install git python python-pip unzip wget vim -y && \
25 | git clone https://github.com/alicek106/aws-terraform-kubernetes.git && \
26 | cd aws-terraform-kubernetes/terraform
27 | ```
28 |
29 | 3. Download terraform binary.
30 |
31 | ```
32 | $ wget https://releases.hashicorp.com/terraform/1.0.11/terraform_1.0.11_linux_amd64.zip && \
33 | unzip terraform_1.0.11_linux_amd64.zip && \
34 | rm terraform_1.0.11_linux_amd64.zip && \
35 | mv terraform /usr/bin && chmod +x /usr/bin/terraform
36 | ```
37 |
38 | 4. Export your own AWS Access / Secret keys
39 |
40 | ```
41 | $ export AWS_ACCESS_KEY_ID=
42 | $ export AWS_SECRET_ACCESS_KEY=
43 | ```
44 |
45 | 5. Initialize terraform and generate your SSH key pair for aws_key_pair
46 |
47 | ```
48 | $ terraform init && ssh-keygen -t rsa -N "" -f ../keys/tf-kube
49 | ```
50 |
51 | 6. Adjust the number of ```etcd```, ```worker```, and ```master``` nodes using **Step 2** as shown below.
52 | 7. Create all objects in AWS. It will trigger to create VPC, Subnet, EC2 instances, etc.
53 |
54 | ```
55 | $ terraform apply
56 | ```
57 |
58 |
59 |
60 | ## Step 2. Set Variables in variables.tf
61 |
62 | You can change configuration of file ```variables.tf```, such as the number of each node.
63 |
64 | - **number_of_controller** : The number of master nodes that act only as a master role.
65 | - **number_of_etcd** : The number of etcd nodes that act only as a etcd role.
66 | - **number_of_controller_etcd** : The number of nodes that run etcd and master **at the same time**.
67 | - **number_of_worker** : The number of workers.
68 |
69 | It is recommended that the number of **[etcd + controller_etcd]**, **[controller + controller_etcd]** to be odd. For example, below setting is desirable and can be converted into inventory as shown below. Note that below inventory and setting is just example, not really written configuration.
70 |
71 | ```
72 | number_of_controller = 2
73 |
74 | number_of_etcd = 2
75 |
76 | number_of_controller_etcd = 1
77 | ```
78 |
79 | .. is same to
80 |
81 | ```
82 | [kube_control_plane:children]
83 | Instnace-A # (1st master)
84 | Instnace-B # (2nd master)
85 | Instnace-C # (3rd master) (1st etcd)
86 |
87 | [etcd:children]
88 | Instnace-C # (3rd master) (1st etcd)
89 | Instnace-D # (2nd etcd)
90 | Instnace-E # (3rd etcd)
91 |
92 | [etcd:children]
93 | ...
94 |
95 | ```
96 |
97 | Above example is just example. In [groups](./ansible/hosts/groups), instances are represented as `group`, such as `_controller`. These groups are filtered by [inventory_aws_ec2.yml](./ansible/hosts/inventory_aws_ec2.yml) using `filters`. If you want to change filters to choose instance correctly (e.g. because of change of Owner tag, or change of region, default to ap-northeast-2), edit inventory_aws_ec2.yml file.
98 |
99 | [Optional] if you want to change ClusterID, set ```cluster_id_tag``` to another value, not ```alice```.
100 |
101 | ## Step 3. Ansible and Kubespray
102 |
103 | First of all, edit `inventory_aws_ec2.yml` file to match your own configurations, e.g. region and other tags.
104 |
105 | 1. In ansible directory, install all dependencies package.
106 |
107 | ```
108 | $ cd ../ansible && pip3 install -r requirements.txt
109 | ```
110 |
111 | 2. Install python related modules using **raw** ansible module to all EC2 instances.
112 |
113 | ```
114 | $ ansible-playbook --private-key ../keys/tf-kube infra.yaml
115 | ```
116 |
117 | To check whether it works, use below ansible **ping** module
118 |
119 | ```
120 | $ ansible --private-key ../keys/tf-kube -m ping all
121 | ```
122 |
123 | 3. Download kubespray. You can adjust proper version, but I used v2.18.1 kubespray :D
124 |
125 | ```
126 | $ wget https://github.com/kubernetes-sigs/kubespray/archive/v2.18.1.zip && \
127 | unzip v2.18.1.zip && rm v2.18.1.zip
128 | ```
129 | ----
130 | **Warning!** Variables of Kubespray (ansible/hosts/group_vars/) is copied from v2.18.1. **If you want to use another version of kubespray**, you have to remove ansible/hosts/group_vars directory and copy sample variables directory from specific kubespray version. It is usally located in kubespray-x.x.x/inventory/sample/group_vars.
131 |
132 |
133 | 4. Install Kubernetes. Thats all.
134 |
135 | ```
136 | $ ansible-playbook -b --private-key \
137 | ../keys/tf-kube kubespray-2.18.1/cluster.yml
138 | ```
139 |
140 | ## Test
141 |
142 | SSH to your master instance, and get nodes.
143 |
144 | ```
145 | root@aws-kube:/aws-terraform-kubernetes/ansible# ssh -i ../keys/tf-kube ubuntu@
146 |
147 | ...
148 | Last login: Mon Mar 25 10:03:32 2019 from 13.124.49.60
149 | ubuntu@ip-10-43-0-40:~$ sudo su
150 | root@ip-10-43-0-40:/home/ubuntu# kubectl get nodes
151 | NAME STATUS ROLES AGE VERSION
152 | ec2-13-125-117-199.ap-northeast-2.compute.amazonaws.com Ready 4m13s v1.22.8
153 | ec2-13-125-54-209.ap-northeast-2.compute.amazonaws.com Ready control-plane,master 5m34s v1.22.8
154 | ec2-13-209-20-227.ap-northeast-2.compute.amazonaws.com Ready 4m12s v1.22.8
155 | ec2-3-34-94-130.ap-northeast-2.compute.amazonaws.com Ready control-plane,master 6m1s v1.22.8
156 | ec2-3-38-165-142.ap-northeast-2.compute.amazonaws.com Ready control-plane,master 5m22s v1.22.8
157 | ec2-52-79-249-245.ap-northeast-2.compute.amazonaws.com Ready 4m13s v1.22.8
158 | ```
159 |
160 | 
161 |
162 | ## Cleanup
163 |
164 | In terraform directory, use below command. It will destroy all objects, including EC2 Instances
165 |
166 | ```
167 | $ terraform destroy
168 | ```
169 |
170 | ## Limitations
171 |
172 | - It assumes that **master** acts as an **etcd** node. It should be modified to separate **etcd** and **master** role.(solved)
173 | - Health check of master node is impossible using https:6443 in ELB. (It is recommended to use another proxy such as nginx in Master Node for healthcheck. Health check proxy should be deployed by yourself :D)
174 | - Node IP range is limited beacuse node IP is allocated between VPC CIDR + 10, 20, 30... etc. It should be changed if you want to use in production environment.
175 |
--------------------------------------------------------------------------------
/ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | remote_user = ubuntu
3 | host_key_checking = False
4 | inventory = ./hosts/
5 |
--------------------------------------------------------------------------------
/ansible/hosts/credentials/kubeadm_certificate_key.creds:
--------------------------------------------------------------------------------
1 | DceadE86480BfDbb109C4Af021Ac3aAc0c79BE9D06e0c193FbDaCFF9cAd8f911
2 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/all.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Directory where etcd data stored
3 | etcd_data_dir: /var/lib/etcd
4 |
5 | ## Experimental kubeadm etcd deployment mode. Available only for new deployment
6 | etcd_kubeadm_enabled: false
7 |
8 | ## Directory where the binaries will be installed
9 | bin_dir: /usr/local/bin
10 |
11 | ## The access_ip variable is used to define how other nodes should access
12 | ## the node. This is used in flannel to allow other flannel nodes to see
13 | ## this node for example. The access_ip is really useful AWS and Google
14 | ## environments where the nodes are accessed remotely by the "public" ip,
15 | ## but don't know about that address themselves.
16 | # access_ip: 1.1.1.1
17 |
18 |
19 | ## External LB example config
20 | ## apiserver_loadbalancer_domain_name: "elb.some.domain"
21 | # loadbalancer_apiserver:
22 | # address: 1.2.3.4
23 | # port: 1234
24 |
25 | ## Internal loadbalancers for apiservers
26 | # loadbalancer_apiserver_localhost: true
27 | # valid options are "nginx" or "haproxy"
28 | # loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
29 |
30 | ## If the cilium is going to be used in strict mode, we can use the
31 | ## localhost connection and not use the external LB. If this parameter is
32 | ## not specified, the first node to connect to kubeapi will be used.
33 | # use_localhost_as_kubeapi_loadbalancer: true
34 |
35 | ## Local loadbalancer should use this port
36 | ## And must be set port 6443
37 | loadbalancer_apiserver_port: 6443
38 |
39 | ## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
40 | loadbalancer_apiserver_healthcheck_port: 8081
41 |
42 | ### OTHER OPTIONAL VARIABLES
43 |
44 | ## Upstream dns servers
45 | # upstream_dns_servers:
46 | # - 8.8.8.8
47 | # - 8.8.4.4
48 |
49 | ## There are some changes specific to the cloud providers
50 | ## for instance we need to encapsulate packets with some network plugins
51 | ## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
52 | ## When openstack is used make sure to source in the openstack credentials
53 | ## like you would do when using openstack-client before starting the playbook.
54 | # cloud_provider:
55 |
56 | ## When cloud_provider is set to 'external', you can set the cloud controller to deploy
57 | ## Supported cloud controllers are: 'openstack' and 'vsphere'
58 | ## When openstack or vsphere are used make sure to source in the required fields
59 | # external_cloud_provider:
60 |
61 | ## Set these proxy values in order to update package manager and docker daemon to use proxies
62 | # http_proxy: ""
63 | # https_proxy: ""
64 |
65 | ## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
66 | # no_proxy: ""
67 |
68 | ## Some problems may occur when downloading files over https proxy due to ansible bug
69 | ## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
70 | ## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
71 | # download_validate_certs: False
72 |
73 | ## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
74 | # additional_no_proxy: ""
75 |
76 | ## If you need to disable proxying of os package repositories but are still behind an http_proxy set
77 | ## skip_http_proxy_on_os_packages to true
78 | ## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
79 | ## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
80 | # skip_http_proxy_on_os_packages: false
81 |
82 | ## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
83 | ## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
84 | ## no_proxy variable, set below to true:
85 | no_proxy_exclude_workers: false
86 |
87 | ## Certificate Management
88 | ## This setting determines whether certs are generated via scripts.
89 | ## Chose 'none' if you provide your own certificates.
90 | ## Option is "script", "none"
91 | # cert_management: script
92 |
93 | ## Set to true to allow pre-checks to fail and continue deployment
94 | # ignore_assert_errors: false
95 |
96 | ## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
97 | # kube_read_only_port: 10255
98 |
99 | ## Set true to download and cache container
100 | # download_container: true
101 |
102 | ## Deploy container engine
103 | # Set false if you want to deploy container engine manually.
104 | # deploy_container_engine: true
105 |
106 | ## Red Hat Enterprise Linux subscription registration
107 | ## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
108 | ## Update RHEL subscription purpose usage, role and SLA if necessary
109 | # rh_subscription_username: ""
110 | # rh_subscription_password: ""
111 | # rh_subscription_org_id: ""
112 | # rh_subscription_activation_key: ""
113 | # rh_subscription_usage: "Development"
114 | # rh_subscription_role: "Red Hat Enterprise Server"
115 | # rh_subscription_sla: "Self-Support"
116 |
117 | ## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
118 | # ping_access_ip: true
119 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/aws.yml:
--------------------------------------------------------------------------------
1 | ## To use AWS EBS CSI Driver to provision volumes, uncomment the first value
2 | ## and configure the parameters below
3 | # aws_ebs_csi_enabled: true
4 | # aws_ebs_csi_enable_volume_scheduling: true
5 | # aws_ebs_csi_enable_volume_snapshot: false
6 | # aws_ebs_csi_enable_volume_resizing: false
7 | # aws_ebs_csi_controller_replicas: 1
8 | # aws_ebs_csi_plugin_image_tag: latest
9 | # aws_ebs_csi_extra_volume_tags: "Owner=owner,Team=team,Environment=environment'
10 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/azure.yml:
--------------------------------------------------------------------------------
1 | ## When azure is used, you need to also set the following variables.
2 | ## see docs/azure.md for details on how to get these values
3 |
4 | # azure_cloud:
5 | # azure_tenant_id:
6 | # azure_subscription_id:
7 | # azure_aad_client_id:
8 | # azure_aad_client_secret:
9 | # azure_resource_group:
10 | # azure_location:
11 | # azure_subnet_name:
12 | # azure_security_group_name:
13 | # azure_security_group_resource_group:
14 | # azure_vnet_name:
15 | # azure_vnet_resource_group:
16 | # azure_route_table_name:
17 | # azure_route_table_resource_group:
18 | # supported values are 'standard' or 'vmss'
19 | # azure_vmtype: standard
20 |
21 | ## Azure Disk CSI credentials and parameters
22 | ## see docs/azure-csi.md for details on how to get these values
23 |
24 | # azure_csi_tenant_id:
25 | # azure_csi_subscription_id:
26 | # azure_csi_aad_client_id:
27 | # azure_csi_aad_client_secret:
28 | # azure_csi_location:
29 | # azure_csi_resource_group:
30 | # azure_csi_vnet_name:
31 | # azure_csi_vnet_resource_group:
32 | # azure_csi_subnet_name:
33 | # azure_csi_security_group_name:
34 | # azure_csi_use_instance_metadata:
35 | # azure_csi_tags: "Owner=owner,Team=team,Environment=environment'
36 |
37 | ## To enable Azure Disk CSI, uncomment below
38 | # azure_csi_enabled: true
39 | # azure_csi_controller_replicas: 1
40 | # azure_csi_plugin_image_tag: latest
41 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/containerd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Please see roles/container-engine/containerd/defaults/main.yml for more configuration options
3 |
4 | # containerd_storage_dir: "/var/lib/containerd"
5 | # containerd_state_dir: "/run/containerd"
6 | # containerd_oom_score: 0
7 |
8 | # containerd_default_runtime: "runc"
9 | # containerd_snapshotter: "native"
10 |
11 | # containerd_runc_runtime:
12 | # name: runc
13 | # type: "io.containerd.runc.v2"
14 | # engine: ""
15 | # root: ""
16 |
17 | # containerd_additional_runtimes:
18 | # Example for Kata Containers as additional runtime:
19 | # - name: kata
20 | # type: "io.containerd.kata.v2"
21 | # engine: ""
22 | # root: ""
23 |
24 | # containerd_grpc_max_recv_message_size: 16777216
25 | # containerd_grpc_max_send_message_size: 16777216
26 |
27 | # containerd_debug_level: "info"
28 |
29 | # containerd_metrics_address: ""
30 |
31 | # containerd_metrics_grpc_histogram: false
32 |
33 | ## An obvious use case is allowing insecure-registry access to self hosted registries.
34 | ## Can be ipaddress and domain_name.
35 | ## example define mirror.registry.io or 172.19.16.11:5000
36 | ## set "name": "url". insecure url must be started http://
37 | ## Port number is also needed if the default HTTPS port is not used.
38 | # containerd_insecure_registries:
39 | # "localhost": "http://127.0.0.1"
40 | # "172.19.16.11:5000": "http://172.19.16.11:5000"
41 |
42 | # containerd_registries:
43 | # "docker.io": "https://registry-1.docker.io"
44 |
45 | # containerd_max_container_log_line_size: -1
46 |
47 | # containerd_registry_auth:
48 | # - registry: 10.0.0.2:5000
49 | # username: user
50 | # password: pass
51 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/coreos.yml:
--------------------------------------------------------------------------------
1 | ## Does coreos need auto upgrade, default is true
2 | # coreos_auto_upgrade: true
3 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/cri-o.yml:
--------------------------------------------------------------------------------
1 | # crio_insecure_registries:
2 | # - 10.0.0.2:5000
3 | # crio_registry_auth:
4 | # - registry: 10.0.0.2:5000
5 | # username: user
6 | # password: pass
7 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/docker.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Uncomment this if you want to force overlay/overlay2 as docker storage driver
3 | ## Please note that overlay2 is only supported on newer kernels
4 | # docker_storage_options: -s overlay2
5 |
6 | ## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
7 | docker_container_storage_setup: false
8 |
9 | ## It must be define a disk path for docker_container_storage_setup_devs.
10 | ## Otherwise docker-storage-setup will be executed incorrectly.
11 | # docker_container_storage_setup_devs: /dev/vdb
12 |
13 | ## Uncomment this if you want to change the Docker Cgroup driver (native.cgroupdriver)
14 | ## Valid options are systemd or cgroupfs, default is systemd
15 | # docker_cgroup_driver: systemd
16 |
17 | ## Only set this if you have more than 3 nameservers:
18 | ## If true Kubespray will only use the first 3, otherwise it will fail
19 | docker_dns_servers_strict: false
20 |
21 | # Path used to store Docker data
22 | docker_daemon_graph: "/var/lib/docker"
23 |
24 | ## Used to set docker daemon iptables options to true
25 | docker_iptables_enabled: "false"
26 |
27 | # Docker log options
28 | # Rotate container stderr/stdout logs at 50m and keep last 5
29 | docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
30 |
31 | # define docker bin_dir
32 | docker_bin_dir: "/usr/bin"
33 |
34 | # keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
35 | # kubespray deletes the docker package on each run, so caching the package makes sense
36 | docker_rpm_keepcache: 1
37 |
38 | ## An obvious use case is allowing insecure-registry access to self hosted registries.
39 | ## Can be ipaddress and domain_name.
40 | ## example define 172.19.16.11 or mirror.registry.io
41 | # docker_insecure_registries:
42 | # - mirror.registry.io
43 | # - 172.19.16.11
44 |
45 | ## Add other registry,example China registry mirror.
46 | # docker_registry_mirrors:
47 | # - https://registry.docker-cn.com
48 | # - https://mirror.aliyuncs.com
49 |
50 | ## If non-empty will override default system MountFlags value.
51 | ## This option takes a mount propagation flag: shared, slave
52 | ## or private, which control whether mounts in the file system
53 | ## namespace set up for docker will receive or propagate mounts
54 | ## and unmounts. Leave empty for system default
55 | # docker_mount_flags:
56 |
57 | ## A string of extra options to pass to the docker daemon.
58 | ## This string should be exactly as you wish it to appear.
59 | # docker_options: ""
60 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/gcp.yml:
--------------------------------------------------------------------------------
1 | ## GCP compute Persistent Disk CSI Driver credentials and parameters
2 | ## See docs/gcp-pd-csi.md for information about the implementation
3 |
4 | ## Specify the path to the file containing the service account credentials
5 | # gcp_pd_csi_sa_cred_file: "/my/safe/credentials/directory/cloud-sa.json"
6 |
7 | ## To enable GCP Persistent Disk CSI driver, uncomment below
8 | # gcp_pd_csi_enabled: true
9 | # gcp_pd_csi_controller_replicas: 1
10 | # gcp_pd_csi_driver_image_tag: "v0.7.0-gke.0"
11 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/oci.yml:
--------------------------------------------------------------------------------
1 | ## When Oracle Cloud Infrastructure is used, set these variables
2 | # oci_private_key:
3 | # oci_region_id:
4 | # oci_tenancy_id:
5 | # oci_user_id:
6 | # oci_user_fingerprint:
7 | # oci_compartment_id:
8 | # oci_vnc_id:
9 | # oci_subnet1_id:
10 | # oci_subnet2_id:
11 | ## Override these default/optional behaviors if you wish
12 | # oci_security_list_management: All
13 | ## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
14 | # oci_security_lists:
15 | # ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
16 | # ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
17 | ## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
18 | # oci_use_instance_principals: false
19 | # oci_cloud_controller_version: 0.6.0
20 | ## If you would like to control OCI query rate limits for the controller
21 | # oci_rate_limit:
22 | # rate_limit_qps_read:
23 | # rate_limit_qps_write:
24 | # rate_limit_bucket_read:
25 | # rate_limit_bucket_write:
26 | ## Other optional variables
27 | # oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
28 | # oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
29 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/offline.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Global Offline settings
3 | ### Private Container Image Registry
4 | # registry_host: "myprivateregisry.com"
5 | # files_repo: "http://myprivatehttpd"
6 | ### If using CentOS, RedHat, AlmaLinux or Fedora
7 | # yum_repo: "http://myinternalyumrepo"
8 | ### If using Debian
9 | # debian_repo: "http://myinternaldebianrepo"
10 | ### If using Ubuntu
11 | # ubuntu_repo: "http://myinternalubunturepo"
12 |
13 | ## Container Registry overrides
14 | # kube_image_repo: "{{ registry_host }}"
15 | # gcr_image_repo: "{{ registry_host }}"
16 | # github_image_repo: "{{ registry_host }}"
17 | # docker_image_repo: "{{ registry_host }}"
18 | # quay_image_repo: "{{ registry_host }}"
19 |
20 | ## Kubernetes components
21 | # kubeadm_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubeadm"
22 | # kubectl_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubectl"
23 | # kubelet_download_url: "{{ files_repo }}/kubernetes/{{ kube_version }}/kubelet"
24 |
25 | ## CNI Plugins
26 | # cni_download_url: "{{ files_repo }}/kubernetes/cni/cni-plugins-linux-{{ image_arch }}-{{ cni_version }}.tgz"
27 |
28 | ## cri-tools
29 | # crictl_download_url: "{{ files_repo }}/kubernetes/cri-tools/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
30 |
31 | ## [Optional] etcd: only if you **DON'T** use etcd_deployment=host
32 | # etcd_download_url: "{{ files_repo }}/kubernetes/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
33 |
34 | # [Optional] Calico: If using Calico network plugin
35 | # calicoctl_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
36 | # [Optional] Calico with kdd: If using Calico network plugin with kdd datastore
37 | # calico_crds_download_url: "{{ files_repo }}/kubernetes/calico/{{ calico_version }}.tar.gz"
38 |
39 | # [Optional] Flannel: If using Falnnel network plugin
40 | # flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}"
41 |
42 | # [Optional] helm: only if you set helm_enabled: true
43 | # helm_download_url: "{{ files_repo }}/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz"
44 |
45 | # [Optional] crun: only if you set crun_enabled: true
46 | # crun_download_url: "{{ files_repo }}/containers/crun/releases/download/{{ crun_version }}/crun-{{ crun_version }}-linux-{{ image_arch }}"
47 |
48 | # [Optional] kata: only if you set kata_containers_enabled: true
49 | # kata_containers_download_url: "{{ files_repo }}/kata-containers/runtime/releases/download/{{ kata_containers_version }}/kata-static-{{ kata_containers_version }}-{{ ansible_architecture }}.tar.xz"
50 |
51 | # [Optional] cri-o: only if you set container_manager: crio
52 | # crio_download_base: "download.opensuse.org/repositories/devel:kubic:libcontainers:stable"
53 | # crio_download_crio: "http://{{ crio_download_base }}:/cri-o:/"
54 |
55 | # [Optional] runc,containerd: only if you set container_runtime: containerd
56 | # runc_download_url: "{{ files_repo }}/{{ runc_version }}/runc.{{ image_arch }}"
57 | # containerd_download_url: "{{ files_repo }}/containerd/v{{ containerd_version }}/containerd-{{ containerd_version }}-linux-{{ image_arch }}.tar.gz"
58 | # nerdctl_download_url: "{{ files_repo }}/nerdctl/v{{ nerdctl_version }}/nerdctl-{{ nerdctl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz"
59 |
60 | ## CentOS/Redhat/AlmaLinux
61 | ### For EL7, base and extras repo must be available, for EL8, baseos and appstream
62 | ### By default we enable those repo automatically
63 | # rhel_enable_repos: false
64 | ### Docker / Containerd
65 | # docker_rh_repo_base_url: "{{ yum_repo }}/docker-ce/$releasever/$basearch"
66 | # docker_rh_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
67 |
68 | ## Fedora
69 | ### Docker
70 | # docker_fedora_repo_base_url: "{{ yum_repo }}/docker-ce/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}"
71 | # docker_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
72 | ### Containerd
73 | # containerd_fedora_repo_base_url: "{{ yum_repo }}/containerd"
74 | # containerd_fedora_repo_gpgkey: "{{ yum_repo }}/docker-ce/gpg"
75 |
76 | ## Debian
77 | ### Docker
78 | # docker_debian_repo_base_url: "{{ debian_repo }}/docker-ce"
79 | # docker_debian_repo_gpgkey: "{{ debian_repo }}/docker-ce/gpg"
80 | ### Containerd
81 | # containerd_debian_repo_base_url: "{{ ubuntu_repo }}/containerd"
82 | # containerd_debian_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
83 | # containerd_debian_repo_repokey: 'YOURREPOKEY'
84 |
85 | ## Ubuntu
86 | ### Docker
87 | # docker_ubuntu_repo_base_url: "{{ ubuntu_repo }}/docker-ce"
88 | # docker_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/docker-ce/gpg"
89 | ### Containerd
90 | # containerd_ubuntu_repo_base_url: "{{ ubuntu_repo }}/containerd"
91 | # containerd_ubuntu_repo_gpgkey: "{{ ubuntu_repo }}/containerd/gpg"
92 | # containerd_ubuntu_repo_repokey: 'YOURREPOKEY'
93 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/openstack.yml:
--------------------------------------------------------------------------------
1 | ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
2 | # openstack_blockstorage_version: "v1/v2/auto (default)"
3 | # openstack_blockstorage_ignore_volume_az: yes
4 | ## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
5 | # openstack_lbaas_enabled: True
6 | # openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
7 | ## To enable automatic floating ip provisioning, specify a subnet.
8 | # openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
9 | ## Override default LBaaS behavior
10 | # openstack_lbaas_use_octavia: False
11 | # openstack_lbaas_method: "ROUND_ROBIN"
12 | # openstack_lbaas_provider: "haproxy"
13 | # openstack_lbaas_create_monitor: "yes"
14 | # openstack_lbaas_monitor_delay: "1m"
15 | # openstack_lbaas_monitor_timeout: "30s"
16 | # openstack_lbaas_monitor_max_retries: "3"
17 |
18 | ## Values for the external OpenStack Cloud Controller
19 | # external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP"
20 | # external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP"
21 | # external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from"
22 | # external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from"
23 | # external_openstack_lbaas_method: "ROUND_ROBIN"
24 | # external_openstack_lbaas_provider: "octavia"
25 | # external_openstack_lbaas_create_monitor: false
26 | # external_openstack_lbaas_monitor_delay: "1m"
27 | # external_openstack_lbaas_monitor_timeout: "30s"
28 | # external_openstack_lbaas_monitor_max_retries: "3"
29 | # external_openstack_lbaas_manage_security_groups: false
30 | # external_openstack_lbaas_internal_lb: false
31 | # external_openstack_network_ipv6_disabled: false
32 | # external_openstack_network_internal_networks: []
33 | # external_openstack_network_public_networks: []
34 | # external_openstack_metadata_search_order: "configDrive,metadataService"
35 |
36 | ## Application credentials to authenticate against Keystone API
37 | ## Those settings will take precedence over username and password that might be set your environment
38 | ## All of them are required
39 | # external_openstack_application_credential_name:
40 | # external_openstack_application_credential_id:
41 | # external_openstack_application_credential_secret:
42 |
43 | ## The tag of the external OpenStack Cloud Controller image
44 | # external_openstack_cloud_controller_image_tag: "latest"
45 |
46 | ## To use Cinder CSI plugin to provision volumes set this value to true
47 | ## Make sure to source in the openstack credentials
48 | # cinder_csi_enabled: true
49 | # cinder_csi_controller_replicas: 1
50 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/all/vsphere.yml:
--------------------------------------------------------------------------------
1 | ## Values for the external vSphere Cloud Provider
2 | # external_vsphere_vcenter_ip: "myvcenter.domain.com"
3 | # external_vsphere_vcenter_port: "443"
4 | # external_vsphere_insecure: "true"
5 | # external_vsphere_user: "administrator@vsphere.local" # Can also be set via the `VSPHERE_USER` environment variable
6 | # external_vsphere_password: "K8s_admin" # Can also be set via the `VSPHERE_PASSWORD` environment variable
7 | # external_vsphere_datacenter: "DATACENTER_name"
8 | # external_vsphere_kubernetes_cluster_id: "kubernetes-cluster-id"
9 |
10 | ## Vsphere version where located VMs
11 | # external_vsphere_version: "6.7u3"
12 |
13 | ## Tags for the external vSphere Cloud Provider images
14 | ## gcr.io/cloud-provider-vsphere/cpi/release/manager
15 | # external_vsphere_cloud_controller_image_tag: "latest"
16 | ## gcr.io/cloud-provider-vsphere/csi/release/syncer
17 | # vsphere_syncer_image_tag: "v2.4.0"
18 | ## k8s.gcr.io/sig-storage/csi-attacher
19 | # vsphere_csi_attacher_image_tag: "v3.3.0"
20 | ## gcr.io/cloud-provider-vsphere/csi/release/driver
21 | # vsphere_csi_controller: "v2.4.0"
22 | ## k8s.gcr.io/sig-storage/livenessprobe
23 | # vsphere_csi_liveness_probe_image_tag: "v2.4.0"
24 | ## k8s.gcr.io/sig-storage/csi-provisioner
25 | # vsphere_csi_provisioner_image_tag: "v3.0.0"
26 | ## k8s.gcr.io/sig-storage/csi-resizer
27 | ## makes sense only for vSphere version >=7.0
28 | # vsphere_csi_resizer_tag: "v1.3.0"
29 |
30 | ## To use vSphere CSI plugin to provision volumes set this value to true
31 | # vsphere_csi_enabled: true
32 | # vsphere_csi_controller_replicas: 1
33 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/etcd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Etcd auto compaction retention for mvcc key value store in hour
3 | # etcd_compaction_retention: 0
4 |
5 | ## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
6 | # etcd_metrics: basic
7 |
8 | ## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
9 | ## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
10 | # etcd_memory_limit: "512M"
11 |
12 | ## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
13 | ## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
14 | ## etcd documentation for more information.
15 | # etcd_quota_backend_bytes: "2147483648"
16 |
17 | ### ETCD: disable peer client cert authentication.
18 | # This affects ETCD_PEER_CLIENT_CERT_AUTH variable
19 | # etcd_peer_client_auth: true
20 |
21 | ## Settings for etcd deployment type
22 | # Set this to docker if you are using container_manager: docker
23 | etcd_deployment_type: host
24 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/k8s_cluster/addons.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Kubernetes dashboard
3 | # RBAC required. see docs/getting-started.md for access details.
4 | # dashboard_enabled: false
5 |
6 | # Helm deployment
7 | helm_enabled: false
8 |
9 | # Registry deployment
10 | registry_enabled: false
11 | # registry_namespace: kube-system
12 | # registry_storage_class: ""
13 | # registry_disk_size: "10Gi"
14 |
15 | # Metrics Server deployment
16 | metrics_server_enabled: false
17 | # metrics_server_resizer: false
18 | # metrics_server_kubelet_insecure_tls: true
19 | # metrics_server_metric_resolution: 15s
20 | # metrics_server_kubelet_preferred_address_types: "InternalIP"
21 |
22 | # Rancher Local Path Provisioner
23 | local_path_provisioner_enabled: false
24 | # local_path_provisioner_namespace: "local-path-storage"
25 | # local_path_provisioner_storage_class: "local-path"
26 | # local_path_provisioner_reclaim_policy: Delete
27 | # local_path_provisioner_claim_root: /opt/local-path-provisioner/
28 | # local_path_provisioner_debug: false
29 | # local_path_provisioner_image_repo: "rancher/local-path-provisioner"
30 | # local_path_provisioner_image_tag: "v0.0.19"
31 | # local_path_provisioner_helper_image_repo: "busybox"
32 | # local_path_provisioner_helper_image_tag: "latest"
33 |
34 | # Local volume provisioner deployment
35 | local_volume_provisioner_enabled: false
36 | # local_volume_provisioner_namespace: kube-system
37 | # local_volume_provisioner_nodelabels:
38 | # - kubernetes.io/hostname
39 | # - topology.kubernetes.io/region
40 | # - topology.kubernetes.io/zone
41 | # local_volume_provisioner_storage_classes:
42 | # local-storage:
43 | # host_dir: /mnt/disks
44 | # mount_dir: /mnt/disks
45 | # volume_mode: Filesystem
46 | # fs_type: ext4
47 | # fast-disks:
48 | # host_dir: /mnt/fast-disks
49 | # mount_dir: /mnt/fast-disks
50 | # block_cleaner_command:
51 | # - "/scripts/shred.sh"
52 | # - "2"
53 | # volume_mode: Filesystem
54 | # fs_type: ext4
55 |
56 | # CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots
57 | # currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller
58 | # Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray
59 | # csi_snapshot_controller_enabled: false
60 |
61 | # CephFS provisioner deployment
62 | cephfs_provisioner_enabled: false
63 | # cephfs_provisioner_namespace: "cephfs-provisioner"
64 | # cephfs_provisioner_cluster: ceph
65 | # cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
66 | # cephfs_provisioner_admin_id: admin
67 | # cephfs_provisioner_secret: secret
68 | # cephfs_provisioner_storage_class: cephfs
69 | # cephfs_provisioner_reclaim_policy: Delete
70 | # cephfs_provisioner_claim_root: /volumes
71 | # cephfs_provisioner_deterministic_names: true
72 |
73 | # RBD provisioner deployment
74 | rbd_provisioner_enabled: false
75 | # rbd_provisioner_namespace: rbd-provisioner
76 | # rbd_provisioner_replicas: 2
77 | # rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
78 | # rbd_provisioner_pool: kube
79 | # rbd_provisioner_admin_id: admin
80 | # rbd_provisioner_secret_name: ceph-secret-admin
81 | # rbd_provisioner_secret: ceph-key-admin
82 | # rbd_provisioner_user_id: kube
83 | # rbd_provisioner_user_secret_name: ceph-secret-user
84 | # rbd_provisioner_user_secret: ceph-key-user
85 | # rbd_provisioner_user_secret_namespace: rbd-provisioner
86 | # rbd_provisioner_fs_type: ext4
87 | # rbd_provisioner_image_format: "2"
88 | # rbd_provisioner_image_features: layering
89 | # rbd_provisioner_storage_class: rbd
90 | # rbd_provisioner_reclaim_policy: Delete
91 |
92 | # Nginx ingress controller deployment
93 | ingress_nginx_enabled: false
94 | # ingress_nginx_host_network: false
95 | ingress_publish_status_address: ""
96 | # ingress_nginx_nodeselector:
97 | # kubernetes.io/os: "linux"
98 | # ingress_nginx_tolerations:
99 | # - key: "node-role.kubernetes.io/master"
100 | # operator: "Equal"
101 | # value: ""
102 | # effect: "NoSchedule"
103 | # - key: "node-role.kubernetes.io/control-plane"
104 | # operator: "Equal"
105 | # value: ""
106 | # effect: "NoSchedule"
107 | # ingress_nginx_namespace: "ingress-nginx"
108 | # ingress_nginx_insecure_port: 80
109 | # ingress_nginx_secure_port: 443
110 | # ingress_nginx_configmap:
111 | # map-hash-bucket-size: "128"
112 | # ssl-protocols: "TLSv1.2 TLSv1.3"
113 | # ingress_nginx_configmap_tcp_services:
114 | # 9000: "default/example-go:8080"
115 | # ingress_nginx_configmap_udp_services:
116 | # 53: "kube-system/coredns:53"
117 | # ingress_nginx_extra_args:
118 | # - --default-ssl-certificate=default/foo-tls
119 | # ingress_nginx_termination_grace_period_seconds: 300
120 | # ingress_nginx_class: nginx
121 |
122 | # ALB ingress controller deployment
123 | ingress_alb_enabled: false
124 | # alb_ingress_aws_region: "us-east-1"
125 | # alb_ingress_restrict_scheme: "false"
126 | # Enables logging on all outbound requests sent to the AWS API.
127 | # If logging is desired, set to true.
128 | # alb_ingress_aws_debug: "false"
129 |
130 | # Cert manager deployment
131 | cert_manager_enabled: false
132 | # cert_manager_namespace: "cert-manager"
133 | # cert_manager_trusted_internal_ca: |
134 | # -----BEGIN CERTIFICATE-----
135 | # [REPLACE with your CA certificate]
136 | # -----END CERTIFICATE-----
137 |
138 | # MetalLB deployment
139 | metallb_enabled: false
140 | metallb_speaker_enabled: true
141 | # metallb_ip_range:
142 | # - "10.5.0.50-10.5.0.99"
143 | # metallb_pool_name: "loadbalanced"
144 | # matallb_auto_assign: true
145 | # metallb_speaker_nodeselector:
146 | # kubernetes.io/os: "linux"
147 | # metallb_controller_nodeselector:
148 | # kubernetes.io/os: "linux"
149 | # metallb_speaker_tolerations:
150 | # - key: "node-role.kubernetes.io/master"
151 | # operator: "Equal"
152 | # value: ""
153 | # effect: "NoSchedule"
154 | # - key: "node-role.kubernetes.io/control-plane"
155 | # operator: "Equal"
156 | # value: ""
157 | # effect: "NoSchedule"
158 | # metallb_controller_tolerations:
159 | # - key: "node-role.kubernetes.io/master"
160 | # operator: "Equal"
161 | # value: ""
162 | # effect: "NoSchedule"
163 | # - key: "node-role.kubernetes.io/control-plane"
164 | # operator: "Equal"
165 | # value: ""
166 | # effect: "NoSchedule"
167 | # metallb_version: v0.10.3
168 | # metallb_protocol: "layer2"
169 | # metallb_port: "7472"
170 | # metallb_memberlist_port: "7946"
171 | # metallb_additional_address_pools:
172 | # kube_service_pool:
173 | # ip_range:
174 | # - "10.5.1.50-10.5.1.99"
175 | # protocol: "layer2"
176 | # auto_assign: false
177 | # metallb_protocol: "bgp"
178 | # metallb_peers:
179 | # - peer_address: 192.0.2.1
180 | # peer_asn: 64512
181 | # my_asn: 4200000000
182 | # - peer_address: 192.0.2.2
183 | # peer_asn: 64513
184 | # my_asn: 4200000000
185 |
186 |
187 | argocd_enabled: false
188 | # argocd_version: v2.1.6
189 | # argocd_namespace: argocd
190 | # Default password:
191 | # - https://argoproj.github.io/argo-cd/getting_started/#4-login-using-the-cli
192 | # ---
193 | # The initial password is autogenerated to be the pod name of the Argo CD API server. This can be retrieved with the command:
194 | # kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2
195 | # ---
196 | # Use the following var to set admin password
197 | # argocd_admin_password: "password"
198 |
199 | # The plugin manager for kubectl
200 | krew_enabled: false
201 | krew_root_dir: "/usr/local/krew"
202 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/k8s_cluster/k8s-cluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Kubernetes configuration dirs and system namespace.
3 | # Those are where all the additional config stuff goes
4 | # the kubernetes normally puts in /srv/kubernetes.
5 | # This puts them in a same location and namespace.
6 | # Editing those values will almost surely break something.
7 | kube_config_dir: /etc/kubernetes
8 | kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
9 | kube_manifest_dir: "{{ kube_config_dir }}/manifests"
10 |
11 | # This is where all the cert scripts and certs will be located
12 | kube_cert_dir: "{{ kube_config_dir }}/ssl"
13 |
14 | # This is where all of the bearer tokens will be stored
15 | kube_token_dir: "{{ kube_config_dir }}/tokens"
16 |
17 | kube_api_anonymous_auth: true
18 |
19 | ## Change this to use another Kubernetes version, e.g. a current beta release
20 | kube_version: v1.22.8
21 |
22 | # Where the binaries will be downloaded.
23 | # Note: ensure that you've enough disk space (about 1G)
24 | local_release_dir: "/tmp/releases"
25 | # Random shifts for retrying failed ops like pushing/downloading
26 | retry_stagger: 5
27 |
28 | # This is the group that the cert creation scripts chgrp the
29 | # cert files to. Not really changeable...
30 | kube_cert_group: kube-cert
31 |
32 | # Cluster Loglevel configuration
33 | kube_log_level: 2
34 |
35 | # Directory where credentials will be stored
36 | credentials_dir: "{{ inventory_dir }}/credentials"
37 |
38 | ## It is possible to activate / deactivate selected authentication methods (oidc, static token auth)
39 | # kube_oidc_auth: false
40 | # kube_token_auth: false
41 |
42 |
43 | ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
44 | ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
45 |
46 | # kube_oidc_url: https:// ...
47 | # kube_oidc_client_id: kubernetes
48 | ## Optional settings for OIDC
49 | # kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
50 | # kube_oidc_username_claim: sub
51 | # kube_oidc_username_prefix: 'oidc:'
52 | # kube_oidc_groups_claim: groups
53 | # kube_oidc_groups_prefix: 'oidc:'
54 |
55 | ## Variables to control webhook authn/authz
56 | # kube_webhook_token_auth: false
57 | # kube_webhook_token_auth_url: https://...
58 | # kube_webhook_token_auth_url_skip_tls_verify: false
59 |
60 | ## For webhook authorization, authorization_modes must include Webhook
61 | # kube_webhook_authorization: false
62 | # kube_webhook_authorization_url: https://...
63 | # kube_webhook_authorization_url_skip_tls_verify: false
64 |
65 | # Choose network plugin (cilium, calico, weave or flannel. Use cni for generic cni plugin)
66 | # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
67 | kube_network_plugin: calico
68 |
69 | # Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
70 | kube_network_plugin_multus: false
71 |
72 | # Kubernetes internal network for services, unused block of space.
73 | kube_service_addresses: 10.233.0.0/18
74 |
75 | # internal network. When used, it will assign IP
76 | # addresses from this range to individual pods.
77 | # This network must be unused in your network infrastructure!
78 | kube_pods_subnet: 10.233.64.0/18
79 |
80 | # internal network node size allocation (optional). This is the size allocated
81 | # to each node for pod IP address allocation. Note that the number of pods per node is
82 | # also limited by the kubelet_max_pods variable which defaults to 110.
83 | #
84 | # Example:
85 | # Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node:
86 | # - kube_pods_subnet: 10.233.64.0/18
87 | # - kube_network_node_prefix: 24
88 | # - kubelet_max_pods: 110
89 | #
90 | # Example:
91 | # Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node:
92 | # - kube_pods_subnet: 10.233.64.0/18
93 | # - kube_network_node_prefix: 25
94 | # - kubelet_max_pods: 110
95 | kube_network_node_prefix: 24
96 |
97 | # Configure Dual Stack networking (i.e. both IPv4 and IPv6)
98 | enable_dual_stack_networks: false
99 |
100 | # Kubernetes internal network for IPv6 services, unused block of space.
101 | # This is only used if enable_dual_stack_networks is set to true
102 | # This provides 4096 IPv6 IPs
103 | kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116
104 |
105 | # Internal network. When used, it will assign IPv6 addresses from this range to individual pods.
106 | # This network must not already be in your network infrastructure!
107 | # This is only used if enable_dual_stack_networks is set to true.
108 | # This provides room for 256 nodes with 254 pods per node.
109 | kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112
110 |
111 | # IPv6 subnet size allocated to each for pods.
112 | # This is only used if enable_dual_stack_networks is set to true
113 | # This provides room for 254 pods per node.
114 | kube_network_node_prefix_ipv6: 120
115 |
116 | # The port the API Server will be listening on.
117 | kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
118 | kube_apiserver_port: 6443 # (https)
119 | # kube_apiserver_insecure_port: 8080 # (http)
120 | # Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
121 | kube_apiserver_insecure_port: 0 # (disabled)
122 |
123 | # Kube-proxy proxyMode configuration.
124 | # Can be ipvs, iptables
125 | kube_proxy_mode: ipvs
126 |
127 | # configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
128 | # must be set to true for MetalLB to work
129 | kube_proxy_strict_arp: false
130 |
131 | # A string slice of values which specify the addresses to use for NodePorts.
132 | # Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
133 | # The default empty string slice ([]) means to use all local addresses.
134 | # kube_proxy_nodeport_addresses_cidr is retained for legacy config
135 | kube_proxy_nodeport_addresses: >-
136 | {%- if kube_proxy_nodeport_addresses_cidr is defined -%}
137 | [{{ kube_proxy_nodeport_addresses_cidr }}]
138 | {%- else -%}
139 | []
140 | {%- endif -%}
141 |
142 | # If non-empty, will use this string as identification instead of the actual hostname
143 | # kube_override_hostname: >-
144 | # {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
145 | # {%- else -%}
146 | # {{ inventory_hostname }}
147 | # {%- endif -%}
148 |
149 | ## Encrypting Secret Data at Rest (experimental)
150 | kube_encrypt_secret_data: false
151 |
152 | # Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/
153 | # kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow
154 | # non-critical podsa to also terminate gracefully
155 | # kubelet_shutdown_grace_period: 60s
156 | # kubelet_shutdown_grace_period_critical_pods: 20s
157 |
158 | # DNS configuration.
159 | # Kubernetes cluster name, also will be used as DNS domain
160 | cluster_name: cluster.local
161 | # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
162 | ndots: 2
163 | # Can be coredns, coredns_dual, manual or none
164 | dns_mode: coredns
165 | # Set manual server if using a custom cluster DNS server
166 | # manual_dns_server: 10.x.x.x
167 | # Enable nodelocal dns cache
168 | enable_nodelocaldns: true
169 | enable_nodelocaldns_secondary: false
170 | nodelocaldns_ip: 169.254.25.10
171 | nodelocaldns_health_port: 9254
172 | nodelocaldns_second_health_port: 9256
173 | nodelocaldns_bind_metrics_host_ip: false
174 | nodelocaldns_secondary_skew_seconds: 5
175 | # nodelocaldns_external_zones:
176 | # - zones:
177 | # - example.com
178 | # - example.io:1053
179 | # nameservers:
180 | # - 1.1.1.1
181 | # - 2.2.2.2
182 | # cache: 5
183 | # - zones:
184 | # - https://mycompany.local:4453
185 | # nameservers:
186 | # - 192.168.0.53
187 | # cache: 0
188 | # Enable k8s_external plugin for CoreDNS
189 | enable_coredns_k8s_external: false
190 | coredns_k8s_external_zone: k8s_external.local
191 | # Enable endpoint_pod_names option for kubernetes plugin
192 | enable_coredns_k8s_endpoint_pod_names: false
193 |
194 | # Can be docker_dns, host_resolvconf or none
195 | resolvconf_mode: host_resolvconf
196 | # Deploy netchecker app to verify DNS resolve as an HTTP service
197 | deploy_netchecker: false
198 | # Ip address of the kubernetes skydns service
199 | skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
200 | skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
201 | dns_domain: "{{ cluster_name }}"
202 |
203 | ## Container runtime
204 | ## docker for docker, crio for cri-o and containerd for containerd.
205 | ## Default: containerd
206 | container_manager: containerd
207 |
208 | # Additional container runtimes
209 | kata_containers_enabled: false
210 |
211 | kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
212 |
213 | # K8s image pull policy (imagePullPolicy)
214 | k8s_image_pull_policy: IfNotPresent
215 |
216 | # audit log for kubernetes
217 | kubernetes_audit: false
218 |
219 | # dynamic kubelet configuration
220 | # Note: Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA.
221 | # It is planned to be removed from Kubernetes in the version 1.23.
222 | # Please use alternative ways to update kubelet configuration.
223 | dynamic_kubelet_configuration: false
224 |
225 | # define kubelet config dir for dynamic kubelet
226 | # kubelet_config_dir:
227 | default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
228 | dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
229 |
230 | # pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
231 | podsecuritypolicy_enabled: false
232 |
233 | # Custom PodSecurityPolicySpec for restricted policy
234 | # podsecuritypolicy_restricted_spec: {}
235 |
236 | # Custom PodSecurityPolicySpec for privileged policy
237 | # podsecuritypolicy_privileged_spec: {}
238 |
239 | # Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
240 | # kubeconfig_localhost: false
241 | # Download kubectl onto the host that runs Ansible in {{ bin_dir }}
242 | # kubectl_localhost: false
243 |
244 | # A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
245 | # Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
246 | # kubelet_enforce_node_allocatable: pods
247 |
248 | ## Optionally reserve resources for OS system daemons.
249 | # system_reserved: true
250 | ## Uncomment to override default values
251 | # system_memory_reserved: 512Mi
252 | # system_cpu_reserved: 500m
253 | ## Reservation for master hosts
254 | # system_master_memory_reserved: 256Mi
255 | # system_master_cpu_reserved: 250m
256 |
257 | # An alternative flexvolume plugin directory
258 | # kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
259 |
260 | ## Supplementary addresses that can be added in kubernetes ssl keys.
261 | ## That can be useful for example to setup a keepalived virtual IP
262 | # supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
263 |
264 | ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
265 | ## See https://github.com/kubernetes-sigs/kubespray/issues/2141
266 | ## Set this variable to true to get rid of this issue
267 | volume_cross_zone_attachment: false
268 | ## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI,
269 | ## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI)
270 | persistent_volumes_enabled: false
271 |
272 | ## Container Engine Acceleration
273 | ## Enable container acceleration feature, for example use gpu acceleration in containers
274 | # nvidia_accelerator_enabled: true
275 | ## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
276 | ## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
277 | ## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers.
278 | ## Labels and taints won't be set to nodes if they are not in the array.
279 | # nvidia_gpu_nodes:
280 | # - kube-gpu-001
281 | # nvidia_driver_version: "384.111"
282 | ## flavor can be tesla or gtx
283 | # nvidia_gpu_flavor: gtx
284 | ## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io.
285 | # nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
286 | # nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
287 | ## NVIDIA GPU device plugin image.
288 | # nvidia_gpu_device_plugin_container: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
289 |
290 | ## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13.
291 | # tls_min_version: ""
292 |
293 | ## Support tls cipher suites.
294 | # tls_cipher_suites: {}
295 | # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
296 | # - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256
297 | # - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
298 | # - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA
299 | # - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
300 | # - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
301 | # - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
302 | # - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA
303 | # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA
304 | # - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
305 | # - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
306 | # - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA
307 | # - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
308 | # - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
309 | # - TLS_ECDHE_RSA_WITH_RC4_128_SHA
310 | # - TLS_RSA_WITH_3DES_EDE_CBC_SHA
311 | # - TLS_RSA_WITH_AES_128_CBC_SHA
312 | # - TLS_RSA_WITH_AES_128_CBC_SHA256
313 | # - TLS_RSA_WITH_AES_128_GCM_SHA256
314 | # - TLS_RSA_WITH_AES_256_CBC_SHA
315 | # - TLS_RSA_WITH_AES_256_GCM_SHA384
316 | # - TLS_RSA_WITH_RC4_128_SHA
317 |
318 | ## Amount of time to retain events. (default 1h0m0s)
319 | event_ttl_duration: "1h0m0s"
320 |
321 | ## Automatically renew K8S control plane certificates on first Monday of each month
322 | auto_renew_certificates: false
323 | # First Monday of each month
324 | # auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00"
325 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/k8s_cluster/k8s-net-calico.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/calico/defaults/main.yml
2 |
3 | ## With calico it is possible to distributed routes with border routers of the datacenter.
4 | ## Warning : enabling router peering will disable calico's default behavior ('node mesh').
5 | ## The subnets of each nodes will be distributed by the datacenter router
6 | # peer_with_router: false
7 |
8 | # Enables Internet connectivity from containers
9 | # nat_outgoing: true
10 |
11 | # Enables Calico CNI "host-local" IPAM plugin
12 | # calico_ipam_host_local: true
13 |
14 | # add default ippool name
15 | # calico_pool_name: "default-pool"
16 |
17 | # add default ippool blockSize (defaults kube_network_node_prefix)
18 | # calico_pool_blocksize: 24
19 |
20 | # add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise)
21 | # calico_pool_cidr: 1.2.3.4/5
22 |
23 | # Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set.
24 | # calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112
25 |
26 | # Global as_num (/calico/bgp/v1/global/as_num)
27 | # global_as_num: "64512"
28 |
29 | # If doing peering with node-assigned asn where the globas does not match your nodes, you want this
30 | # to be true. All other cases, false.
31 | # calico_no_global_as_num: false
32 |
33 | # You can set MTU value here. If left undefined or empty, it will
34 | # not be specified in calico CNI config, so Calico will use built-in
35 | # defaults. The value should be a number, not a string.
36 | # calico_mtu: 1500
37 |
38 | # Configure the MTU to use for workload interfaces and tunnels.
39 | # - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440)
40 | # - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450)
41 | # - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480)
42 | # - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500)
43 | # calico_veth_mtu: 1440
44 |
45 | # Advertise Cluster IPs
46 | # calico_advertise_cluster_ips: true
47 |
48 | # Advertise Service External IPs
49 | # calico_advertise_service_external_ips:
50 | # - x.x.x.x/24
51 | # - y.y.y.y/32
52 |
53 | # Adveritse Service LoadBalancer IPs
54 | # calico_advertise_service_loadbalancer_ips:
55 | # - x.x.x.x/24
56 | # - y.y.y.y/16
57 |
58 | # Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
59 | # calico_datastore: "kdd"
60 |
61 | # Choose Calico iptables backend: "Legacy", "Auto" or "NFT"
62 | # calico_iptables_backend: "Legacy"
63 |
64 | # Use typha (only with kdd)
65 | # typha_enabled: false
66 |
67 | # Generate TLS certs for secure typha<->calico-node communication
68 | # typha_secure: false
69 |
70 | # Scaling typha: 1 replica per 100 nodes is adequate
71 | # Number of typha replicas
72 | # typha_replicas: 1
73 |
74 | # Set max typha connections
75 | # typha_max_connections_lower_limit: 300
76 |
77 | # Set calico network backend: "bird", "vxlan" or "none"
78 | # bird enable BGP routing, required for ipip mode.
79 | # calico_network_backend: bird
80 |
81 | # IP in IP and VXLAN is mutualy exclusive modes.
82 | # set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never"
83 | # calico_ipip_mode: 'Always'
84 |
85 | # set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never"
86 | # calico_vxlan_mode: 'Never'
87 |
88 | # set VXLAN port and VNI
89 | # calico_vxlan_vni: 4096
90 | # calico_vxlan_port: 4789
91 |
92 | # Cenable eBPF mode
93 | # calico_bpf_enabled: false
94 |
95 | # If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of:
96 | # * can-reach=DESTINATION
97 | # * interface=INTERFACE-REGEX
98 | # see https://docs.projectcalico.org/reference/node/configuration
99 | # calico_ip_auto_method: "interface=eth.*"
100 | # Choose the iptables insert mode for Calico: "Insert" or "Append".
101 | # calico_felix_chaininsertmode: Insert
102 |
103 | # If you want use the default route interface when you use multiple interface with dynamique route (iproute2)
104 | # see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS
105 | # calico_use_default_route_src_ipaddr: false
106 |
107 | # Enable calico traffic encryption with wireguard
108 | # calico_wireguard_enabled: false
109 |
110 | # Under certain situations liveness and readiness probes may need tunning
111 | # calico_node_livenessprobe_timeout: 10
112 | # calico_node_readinessprobe_timeout: 10
113 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/k8s_cluster/k8s-net-canal.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/canal/defaults/main.yml
2 |
3 | # The interface used by canal for host <-> host communication.
4 | # If left blank, then the interface is choosing using the node's
5 | # default route.
6 | # canal_iface: ""
7 |
8 | # Whether or not to masquerade traffic to destinations not within
9 | # the pod network.
10 | # canal_masquerade: "true"
11 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/k8s_cluster/k8s-net-cilium.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/cilium/defaults/main.yml
2 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/k8s_cluster/k8s-net-flannel.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/flannel/defaults/main.yml
2 |
3 | ## interface that should be used for flannel operations
4 | ## This is actually an inventory cluster-level item
5 | # flannel_interface:
6 |
7 | ## Select interface that should be used for flannel operations by regexp on Name or IP
8 | ## This is actually an inventory cluster-level item
9 | ## example: select interface with ip from net 10.0.0.0/23
10 | ## single quote and escape backslashes
11 | # flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
12 |
13 | # You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
14 | # for experimental backend
15 | # please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
16 | # flannel_backend_type: "vxlan"
17 | # flannel_vxlan_vni: 1
18 | # flannel_vxlan_port: 8472
19 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/k8s_cluster/k8s-net-kube-router.yml:
--------------------------------------------------------------------------------
1 | # See roles/network_plugin/kube-router//defaults/main.yml
2 |
3 | # Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
4 | # kube_router_run_router: true
5 |
6 | # Enables Network Policy -- sets up iptables to provide ingress firewall for pods
7 | # kube_router_run_firewall: true
8 |
9 | # Enables Service Proxy -- sets up IPVS for Kubernetes Services
10 | # see docs/kube-router.md "Caveats" section
11 | # kube_router_run_service_proxy: false
12 |
13 | # Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
14 | # kube_router_advertise_cluster_ip: false
15 |
16 | # Add External IP of service to the RIB so that it gets advertised to the BGP peers.
17 | # kube_router_advertise_external_ip: false
18 |
19 | # Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
20 | # kube_router_advertise_loadbalancer_ip: false
21 |
22 | # Adjust manifest of kube-router daemonset template with DSR needed changes
23 | # kube_router_enable_dsr: false
24 |
25 | # Array of arbitrary extra arguments to kube-router, see
26 | # https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
27 | # kube_router_extra_args: []
28 |
29 | # ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
30 | # kube_router_peer_router_asns: ~
31 |
32 | # The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
33 | # kube_router_peer_router_ips: ~
34 |
35 | # The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
36 | # kube_router_peer_router_ports: ~
37 |
38 | # Setups node CNI to allow hairpin mode, requires node reboots, see
39 | # https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
40 | # kube_router_support_hairpin_mode: false
41 |
42 | # Select DNS Policy ClusterFirstWithHostNet, ClusterFirst, etc.
43 | # kube_router_dns_policy: ClusterFirstWithHostNet
44 |
45 | # Array of annotations for master
46 | # kube_router_annotations_master: []
47 |
48 | # Array of annotations for every node
49 | # kube_router_annotations_node: []
50 |
51 | # Array of common annotations for every node
52 | # kube_router_annotations_all: []
53 |
54 | # Enables scraping kube-router metrics with Prometheus
55 | # kube_router_enable_metrics: false
56 |
57 | # Path to serve Prometheus metrics on
58 | # kube_router_metrics_path: /metrics
59 |
60 | # Prometheus metrics port to use
61 | # kube_router_metrics_port: 9255
62 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/k8s_cluster/k8s-net-macvlan.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # private interface, on a l2-network
3 | macvlan_interface: "eth1"
4 |
5 | # Enable nat in default gateway network interface
6 | enable_nat_default_gateway: true
7 |
--------------------------------------------------------------------------------
/ansible/hosts/group_vars/k8s_cluster/k8s-net-weave.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/weave/defaults/main.yml
2 |
3 | # Weave's network password for encryption, if null then no network encryption.
4 | # weave_password: ~
5 |
6 | # If set to 1, disable checking for new Weave Net versions (default is blank,
7 | # i.e. check is enabled)
8 | # weave_checkpoint_disable: false
9 |
10 | # Soft limit on the number of connections between peers. Defaults to 100.
11 | # weave_conn_limit: 100
12 |
13 | # Weave Net defaults to enabling hairpin on the bridge side of the veth pair
14 | # for containers attached. If you need to disable hairpin, e.g. your kernel is
15 | # one of those that can panic if hairpin is enabled, then you can disable it by
16 | # setting `HAIRPIN_MODE=false`.
17 | # weave_hairpin_mode: true
18 |
19 | # The range of IP addresses used by Weave Net and the subnet they are placed in
20 | # (CIDR format; default 10.32.0.0/12)
21 | # weave_ipalloc_range: "{{ kube_pods_subnet }}"
22 |
23 | # Set to 0 to disable Network Policy Controller (default is on)
24 | # weave_expect_npc: "{{ enable_network_policy }}"
25 |
26 | # List of addresses of peers in the Kubernetes cluster (default is to fetch the
27 | # list from the api-server)
28 | # weave_kube_peers: ~
29 |
30 | # Set the initialization mode of the IP Address Manager (defaults to consensus
31 | # amongst the KUBE_PEERS)
32 | # weave_ipalloc_init: ~
33 |
34 | # Set the IP address used as a gateway from the Weave network to the host
35 | # network - this is useful if you are configuring the addon as a static pod.
36 | # weave_expose_ip: ~
37 |
38 | # Address and port that the Weave Net daemon will serve Prometheus-style
39 | # metrics on (defaults to 0.0.0.0:6782)
40 | # weave_metrics_addr: ~
41 |
42 | # Address and port that the Weave Net daemon will serve status requests on
43 | # (defaults to disabled)
44 | # weave_status_addr: ~
45 |
46 | # Weave Net defaults to 1376 bytes, but you can set a smaller size if your
47 | # underlying network has a tighter limit, or set a larger size for better
48 | # performance if your network supports jumbo frames (e.g. 8916)
49 | # weave_mtu: 1376
50 |
51 | # Set to 1 to preserve the client source IP address when accessing Service
52 | # annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
53 | # only with Weave IPAM (default).
54 | # weave_no_masq_local: true
55 |
56 | # set to nft to use nftables backend for iptables (default is iptables)
57 | # weave_iptables_backend: iptables
58 |
59 | # Extra variables that passing to launch.sh, useful for enabling seed mode, see
60 | # https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
61 | # weave_extra_args: ~
62 |
63 | # Extra variables for weave_npc that passing to launch.sh, useful for change log level, ex --log-level=error
64 | # weave_npc_extra_args: ~
65 |
--------------------------------------------------------------------------------
/ansible/hosts/groups:
--------------------------------------------------------------------------------
1 | # ## Configure 'ip' variable to bind kubernetes services on a
2 | # ## different ip than the default iface
3 | # ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value.
4 |
5 | [_controller]
6 | [_controller_etcd]
7 | [_etcd]
8 | [_worker]
9 |
10 | # ## configure a bastion host if your nodes are not directly reachable
11 | # [bastion]
12 | # bastion ansible_host=x.x.x.x ansible_user=some_user
13 |
14 | [kube_control_plane:children]
15 | _controller
16 | _controller_etcd
17 | # node1
18 | # node2
19 | # node3
20 |
21 | [etcd:children]
22 | _controller_etcd
23 | _etcd
24 | # node1
25 | # node2
26 | # node3
27 |
28 | [kube_node:children]
29 | _worker
30 | # node2
31 | # node3
32 | # node4
33 | # node5
34 | # node6
35 |
36 | [calico_rr]
37 |
38 | [k8s_cluster:children]
39 | kube_control_plane
40 | kube_node
41 | calico_rr
42 |
--------------------------------------------------------------------------------
/ansible/hosts/inventory_aws_ec2.yml:
--------------------------------------------------------------------------------
1 | plugin: aws_ec2
2 | regions:
3 | - ap-northeast-2
4 | keyed_groups:
5 | - key: tags.ansibleNodeType
6 | filters:
7 | instance-state-name : running
8 | tag:Owner: alicek106
9 | compose:
10 | ansible_host: private_ip_address
11 |
--------------------------------------------------------------------------------
/ansible/infra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | #####################
3 | # Install Python
4 | #####################
5 |
6 | - hosts: all
7 | gather_facts: false # As Python is not yet installed, we cannot gather host facts
8 |
9 | tasks:
10 | - name: Install Python
11 | raw: "apt update && apt-get -y -q install python3 python3-pip"
12 | become: true
13 | retries: 10
14 | delay: 20
15 | # If you run this playbook immediately after Terraform, ssh may not be ready to respond yet
16 |
--------------------------------------------------------------------------------
/ansible/ping.yaml:
--------------------------------------------------------------------------------
1 | # Test playbook to check groups
2 | # ansible-playbook --private-key ping.yaml
3 | ---
4 | - name: ping them all
5 | hosts: kube_control_plane
6 | tasks:
7 | - name: pinging
8 | ping:
9 |
--------------------------------------------------------------------------------
/ansible/requirements.txt:
--------------------------------------------------------------------------------
1 | ansible>=2.7.8
2 | jinja2>=2.9.6
3 | netaddr
4 | pbr>=1.6
5 | hvac
6 | boto
7 |
--------------------------------------------------------------------------------
/keys/.keep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alicek106/aws-terraform-kubernetes/7bca384a56a4f8e2f5213067edef802cf74bfcf3/keys/.keep
--------------------------------------------------------------------------------
/pictures/kube.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alicek106/aws-terraform-kubernetes/7bca384a56a4f8e2f5213067edef802cf74bfcf3/pictures/kube.png
--------------------------------------------------------------------------------
/pictures/kube2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alicek106/aws-terraform-kubernetes/7bca384a56a4f8e2f5213067edef802cf74bfcf3/pictures/kube2.png
--------------------------------------------------------------------------------
/terraform/0-aws.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.region
3 | }
4 |
--------------------------------------------------------------------------------
/terraform/1-vpc.tf:
--------------------------------------------------------------------------------
1 | ############
2 | ## VPC
3 | ############
4 |
5 | resource "aws_vpc" "kubernetes" {
6 | cidr_block = var.vpc_cidr
7 | enable_dns_hostnames = true
8 |
9 | tags = merge(
10 | local.common_tags,
11 | {
12 | "Name" = "${var.vpc_name}",
13 | "Owner" = "${var.owner}"
14 | }
15 | )
16 | }
17 |
18 | # DHCP Options are not actually required, being identical to the Default Option Set
19 | resource "aws_vpc_dhcp_options" "dns_resolver" {
20 | domain_name = "${var.region}.compute.internal"
21 | domain_name_servers = ["AmazonProvidedDNS"]
22 |
23 | tags = merge(
24 | local.common_tags,
25 | {
26 | "Name" = "${var.vpc_name}",
27 | "Owner" = "${var.owner}"
28 | }
29 | )
30 | }
31 |
32 | resource "aws_vpc_dhcp_options_association" "dns_resolver" {
33 | vpc_id = aws_vpc.kubernetes.id
34 | dhcp_options_id = aws_vpc_dhcp_options.dns_resolver.id
35 | }
36 |
37 | ##########
38 | # Keypair
39 | ##########
40 |
41 | resource "aws_key_pair" "default_keypair" {
42 | key_name = var.default_keypair_name
43 | public_key = local.default_keypair_public_key
44 | }
45 |
46 |
47 | ############
48 | ## Subnets
49 | ############
50 |
51 | # Subnet (public)
52 | resource "aws_subnet" "kubernetes" {
53 | vpc_id = aws_vpc.kubernetes.id
54 | cidr_block = var.vpc_cidr
55 | availability_zone = var.zone
56 |
57 | tags = merge(
58 | local.common_tags,
59 | {
60 | "Name" = "${var.vpc_name}",
61 | "Owner" = "${var.owner}"
62 | }
63 | )
64 | }
65 |
66 | resource "aws_internet_gateway" "gw" {
67 | vpc_id = aws_vpc.kubernetes.id
68 |
69 | tags = merge(
70 | local.common_tags,
71 | {
72 | "Name" = "${var.vpc_name}",
73 | "Owner" = "${var.owner}"
74 | }
75 | )
76 | }
77 |
78 | ############
79 | ## Routing
80 | ############
81 |
82 | resource "aws_route_table" "kubernetes" {
83 | vpc_id = aws_vpc.kubernetes.id
84 |
85 | # Default route through Internet Gateway
86 | route {
87 | cidr_block = "0.0.0.0/0"
88 | gateway_id = aws_internet_gateway.gw.id
89 | }
90 |
91 | tags = merge(
92 | local.common_tags,
93 | {
94 | "Name" = "${var.vpc_name}",
95 | "Owner" = "${var.owner}"
96 | }
97 | )
98 | }
99 |
100 | resource "aws_route_table_association" "kubernetes" {
101 | subnet_id = aws_subnet.kubernetes.id
102 | route_table_id = aws_route_table.kubernetes.id
103 | }
104 |
105 |
106 | ############
107 | ## Security
108 | ############
109 |
110 | resource "aws_security_group" "kubernetes" {
111 | vpc_id = aws_vpc.kubernetes.id
112 | name = "kubernetes"
113 |
114 | # Allow all outbound
115 | egress {
116 | from_port = 0
117 | to_port = 0
118 | protocol = "-1"
119 | cidr_blocks = ["0.0.0.0/0"]
120 | }
121 |
122 | # Allow ICMP from control host IP
123 | ingress {
124 | from_port = 8
125 | to_port = 0
126 | protocol = "icmp"
127 | cidr_blocks = ["${var.control_cidr}"]
128 | }
129 |
130 | # Allow all internal
131 | ingress {
132 | from_port = 0
133 | to_port = 0
134 | protocol = "-1"
135 | cidr_blocks = ["${var.vpc_cidr}"]
136 | }
137 |
138 | # Allow all traffic from the API ELB
139 | ingress {
140 | from_port = 0
141 | to_port = 0
142 | protocol = "-1"
143 | security_groups = ["${aws_security_group.kubernetes_api.id}"]
144 | }
145 |
146 | # Allow all traffic from control host IP
147 | ingress {
148 | from_port = 0
149 | to_port = 0
150 | protocol = "-1"
151 | cidr_blocks = ["${var.control_cidr}"]
152 | }
153 |
154 | tags = merge(
155 | local.common_tags,
156 | {
157 | "Name" = "${var.vpc_name}",
158 | "Owner" = "${var.owner}"
159 | }
160 | )
161 | }
162 |
--------------------------------------------------------------------------------
/terraform/2-etcd.tf:
--------------------------------------------------------------------------------
1 | #########################
2 | # etcd cluster instances
3 | #########################
4 | # Delete the below comments to activate etcd.
5 | # But I commented because I'm using kubespray for only 1 master, 1 etcd in 1 instance (default)
6 | resource "aws_instance" "etcd" {
7 | count = var.number_of_etcd
8 | ami = lookup(var.amis, var.region)
9 | instance_type = var.etcd_instance_type
10 |
11 | subnet_id = aws_subnet.kubernetes.id
12 | private_ip = cidrhost(var.vpc_cidr, 10 + count.index)
13 | associate_public_ip_address = true # Instances have public, dynamic IP
14 |
15 | availability_zone = var.zone
16 | vpc_security_group_ids = ["${aws_security_group.kubernetes.id}"]
17 | key_name = var.default_keypair_name
18 | tags = merge(
19 | local.common_tags,
20 | {
21 | "Owner" = "${var.owner}",
22 | "Name" = "etcd-${count.index}",
23 | "ansibleFilter" = "${var.ansibleFilter}",
24 | "ansibleNodeType" = "etcd",
25 | "ansibleNodeName" = "etcd.${count.index}"
26 | }
27 | )
28 | }
29 |
--------------------------------------------------------------------------------
/terraform/3-workers.tf:
--------------------------------------------------------------------------------
1 |
2 | ############################################
3 | # K8s Worker (aka Nodes, Minions) Instances
4 | ############################################
5 |
6 | resource "aws_instance" "worker" {
7 | count = var.number_of_worker
8 | ami = lookup(var.amis, var.region)
9 | instance_type = var.worker_instance_type
10 |
11 | iam_instance_profile = aws_iam_instance_profile.kubernetes.id
12 |
13 | subnet_id = aws_subnet.kubernetes.id
14 | private_ip = cidrhost(var.vpc_cidr, 30 + count.index)
15 | associate_public_ip_address = true # Instances have public, dynamic IP
16 | source_dest_check = false # TODO Required??
17 |
18 | availability_zone = var.zone
19 | vpc_security_group_ids = ["${aws_security_group.kubernetes.id}"]
20 | key_name = var.default_keypair_name
21 |
22 | tags = merge(
23 | local.common_tags,
24 | {
25 | "Owner" = "${var.owner}",
26 | "Name" = "worker-${count.index}",
27 | "ansibleFilter" = "${var.ansibleFilter}",
28 | "ansibleNodeType" = "worker",
29 | "ansibleNodeName" = "worker.${count.index}"
30 | }
31 | )
32 | }
33 |
34 | output "kubernetes_workers_public_ip" {
35 | value = join(",", aws_instance.worker.*.public_ip)
36 | }
37 |
--------------------------------------------------------------------------------
/terraform/4-controllers.tf:
--------------------------------------------------------------------------------
1 | ############################
2 | # K8s Control Pane instances
3 | ############################
4 |
5 | resource "aws_instance" "controller" {
6 | count = var.number_of_controller
7 | ami = lookup(var.amis, var.region)
8 | instance_type = var.controller_instance_type
9 |
10 | iam_instance_profile = aws_iam_instance_profile.kubernetes.id
11 |
12 | subnet_id = aws_subnet.kubernetes.id
13 | private_ip = cidrhost(var.vpc_cidr, 20 + count.index)
14 | associate_public_ip_address = true # Instances have public, dynamic IP
15 | source_dest_check = false # TODO Required??
16 |
17 | availability_zone = var.zone
18 | vpc_security_group_ids = ["${aws_security_group.kubernetes.id}"]
19 | key_name = var.default_keypair_name
20 | tags = merge(
21 | local.common_tags,
22 | {
23 | "Owner" = "${var.owner}"
24 | "Name" = "controller-${count.index}"
25 | "ansibleFilter" = "${var.ansibleFilter}"
26 | "ansibleNodeType" = "controller"
27 | "ansibleNodeName" = "controller.${count.index}"
28 | }
29 | )
30 | }
31 |
32 | resource "aws_instance" "controller_etcd" {
33 | count = var.number_of_controller_etcd
34 | ami = lookup(var.amis, var.region)
35 | instance_type = var.controller_instance_type
36 |
37 | iam_instance_profile = aws_iam_instance_profile.kubernetes.id
38 |
39 | subnet_id = aws_subnet.kubernetes.id
40 | private_ip = cidrhost(var.vpc_cidr, 40 + count.index)
41 | associate_public_ip_address = true # Instances have public, dynamic IP
42 | source_dest_check = false # TODO Required??
43 |
44 | availability_zone = var.zone
45 | vpc_security_group_ids = ["${aws_security_group.kubernetes.id}"]
46 | key_name = var.default_keypair_name
47 |
48 | tags = merge(
49 | local.common_tags,
50 | {
51 | "Owner" = "${var.owner}",
52 | "Name" = "controller-etcd-${count.index}",
53 | "ansibleFilter" = "${var.ansibleFilter}",
54 | "ansibleNodeType" = "controller.etcd",
55 | "ansibleNodeName" = "controller.etcd.${count.index}"
56 | }
57 | )
58 | }
59 |
60 | ###############################
61 | ## Kubernetes API Load Balancer
62 | ###############################
63 |
64 | resource "aws_elb" "kubernetes_api" {
65 | name = var.elb_name
66 | instances = aws_instance.controller[*].id
67 | subnets = ["${aws_subnet.kubernetes.id}"]
68 | cross_zone_load_balancing = false
69 |
70 | security_groups = ["${aws_security_group.kubernetes_api.id}"]
71 |
72 | listener {
73 | lb_port = 6443
74 | instance_port = 6443
75 | lb_protocol = "TCP"
76 | instance_protocol = "TCP"
77 | }
78 |
79 | health_check {
80 | healthy_threshold = 2
81 | unhealthy_threshold = 2
82 | timeout = 15
83 | target = "HTTPS:6443/"
84 | interval = 30
85 | }
86 |
87 | tags = merge(
88 | local.common_tags,
89 | {
90 | "Name" = "kubernetes",
91 | "Owner" = "${var.owner}"
92 | }
93 | )
94 | }
95 |
96 | ############
97 | ## Security
98 | ############
99 |
100 | resource "aws_security_group" "kubernetes_api" {
101 | vpc_id = aws_vpc.kubernetes.id
102 | name = "kubernetes-api"
103 |
104 | # Allow inbound traffic to the port used by Kubernetes API HTTPS
105 | ingress {
106 | from_port = 6443
107 | to_port = 6443
108 | protocol = "TCP"
109 | cidr_blocks = ["${var.control_cidr}"]
110 | }
111 |
112 | # Allow all outbound traffic
113 | egress {
114 | from_port = 0
115 | to_port = 0
116 | protocol = "-1"
117 | cidr_blocks = ["0.0.0.0/0"]
118 | }
119 |
120 | tags = merge(
121 | local.common_tags,
122 | {
123 | "Name" = "kubernetes-api",
124 | "Owner" = "${var.owner}"
125 | }
126 | )
127 | }
128 |
129 | ############
130 | ## Outputs
131 | ############
132 |
133 | output "kubernetes_api_dns_name" {
134 | value = aws_elb.kubernetes_api.dns_name
135 | }
136 |
--------------------------------------------------------------------------------
/terraform/5-iam.tf:
--------------------------------------------------------------------------------
1 | ##########################
2 | # IAM: Policies and Roles
3 | ##########################
4 |
5 | # The following Roles and Policy are mostly for future use
6 |
7 | resource "aws_iam_role" "kubernetes" {
8 | name = "kubernetes"
9 | assume_role_policy = <