├── images ├── workernodes.png ├── CF-infrastructure.png ├── weavenetworkpods.png └── controller-deployment-test.png ├── deployments ├── ansible.cfg ├── env.yaml ├── deploy_weavenet.yml ├── deploy_controlermanager.yml ├── client_tools.yml ├── kubectl_remote.yml ├── cert_vars.yml ├── deploy_healthz.yml ├── rbac_authorization.yml ├── deploy_nginx.yml ├── distribute_k8s_files.yml ├── smoke_test.yml ├── deploy_scheduler.yml ├── inventory.sh ├── deploy_api-server.yml ├── deploy_etcd_cluster.yml ├── create_kubeconfigs.yml ├── create_ca_certs.yml └── workernodes.yml ├── easyWay.md ├── coreDNS.md ├── easy_script.sh ├── coredns-1.9.3.yaml ├── README.md ├── LICENSE └── infrastructure └── k8s_aws_instances.yml /images/workernodes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/berry2012/kubernetes-the-hard-way-on-aws/HEAD/images/workernodes.png -------------------------------------------------------------------------------- /images/CF-infrastructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/berry2012/kubernetes-the-hard-way-on-aws/HEAD/images/CF-infrastructure.png -------------------------------------------------------------------------------- /images/weavenetworkpods.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/berry2012/kubernetes-the-hard-way-on-aws/HEAD/images/weavenetworkpods.png -------------------------------------------------------------------------------- /images/controller-deployment-test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/berry2012/kubernetes-the-hard-way-on-aws/HEAD/images/controller-deployment-test.png -------------------------------------------------------------------------------- /deployments/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking=false 3 | 4 | #[ssh_connection] 5 | #ssh_args = -F ./ssh.cfg -o ControlMaster=auto -o ControlPersist=30m 6 | #control_path = ~/.ssh/ansible-%%r@%%h:%%p -------------------------------------------------------------------------------- /deployments/env.yaml: -------------------------------------------------------------------------------- 1 | # define your environment variables 2 | # ----static variables--------- 3 | 4 | # all cloud servers IP and hostnames 5 | CERT_HOSTNAME: "10.32.0.1,127.0.0.1,localhost,kubernetes.default,{{ worker1_hostnames }},{{ controller2_hostnames }},{{ worker2_hostnames }},{{ controller_api_server_lb_hostnames }},{{ controller1_hostnames }}" 6 | 7 | # vpc cidr 8 | POD_CIDR: "192.168.0.0/16" 9 | 10 | 11 | # ------ leave these variables untouched 12 | controller_keys: 13 | - ca-key.pem 14 | - ca.pem 15 | - kubernetes-key.pem 16 | - kubernetes.pem 17 | - service-account-key.pem 18 | - service-account.pem 19 | 20 | initial_cluster: "controller1=https://{{ controller1_private_ip }}:2380,controller2=https://{{ controller2_private_ip }}:2380" 21 | 22 | kubeconfigs: 23 | - admin.kubeconfig 24 | - kube-controller-manager.kubeconfig 25 | - kube-scheduler.kubeconfig 26 | 27 | worker1_keys: 28 | - worker1.pem 29 | - worker1-key.pem 30 | 31 | worker2_keys: 32 | - worker2.pem 33 | - worker2-key.pem 34 | 35 | controller_config_files: "encryption-config.yaml" 36 | 37 | artifacts_location: "/tmp" 38 | 39 | 40 | # -------Dynamic variables here ---------- # 41 | -------------------------------------------------------------------------------- /deployments/deploy_weavenet.yml: -------------------------------------------------------------------------------- 1 | - hosts: workers 2 | become: true 3 | become_user: root 4 | gather_facts: true 5 | 6 | vars: 7 | config_dir: "/etc/kubernetes/config" 8 | cert_remote_location: "/home/ubuntu" 9 | vars_files: 10 | - ./env.yaml 11 | 12 | tasks: 13 | - name: enable IP forwarding 14 | shell: | 15 | sysctl net.ipv4.conf.all.forwarding=1 16 | echo "net.ipv4.conf.all.forwarding=1" | sudo tee -a /etc/sysctl.conf 17 | 18 | - name: Install Weave Network from Kubectl Remote Machine 19 | hosts: localhost 20 | become: true 21 | become_user: ubuntu 22 | gather_facts: false 23 | 24 | tasks: 25 | - name: Install Weave Net 26 | shell: kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.IPALLOC_RANGE=10.200.0.0/16" 27 | 28 | - name: Pause for 1 minute to build containers 29 | pause: 30 | minutes: 1 31 | 32 | - name: Get Weave Net pods 33 | shell: kubectl get pods -n kube-system 34 | register: weave_pods 35 | 36 | - name: Show Weave Net pods 37 | debug: 38 | msg: 39 | - "{{ weave_pods.stdout_lines }}" -------------------------------------------------------------------------------- /easyWay.md: -------------------------------------------------------------------------------- 1 | 2 | # Kubernetes the hard way on AWS made easy 3 | 4 | **This is an easier option: After preparing the Ansible inventory, execute the command below:** 5 | 6 | `ubuntu@ip-10-192-10-137:~$ bash easy_script.sh` 7 | 8 | output: 9 | ``` 10 | TASK [Get logs from pod] **************************************************************** 11 | changed: [localhost] => {"changed": true, "cmd": "POD_NAME=$(kubectl get pods -l run=nginx -o jsonpath=\"{.items[0].metadata.name}\")\nkubectl exec -i $POD_NAME -- nginx -v\n", "delta": "0:00:00.265038", "end": "2022-05-06 20:19:15.959191", "rc": 0, "start": "2022-05-06 20:19:15.694153", "stderr": "nginx version: nginx/1.21.6", "stderr_lines": ["nginx version: nginx/1.21.6"], "stdout": "", "stdout_lines": []} 12 | 13 | PLAY RECAP ****************************************************************************** 14 | controller1 : ok=2 changed=1 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 15 | localhost : ok=8 changed=5 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 16 | 17 | 18 | easy deployment completed!!! 19 | 20 | 21 | ubuntu@ip-10-192-10-137:~$ kubectl get nodes 22 | NAME STATUS ROLES AGE VERSION 23 | worker1 Ready 15m v1.21.0 24 | worker2 Ready 14m v1.21.0 25 | ubuntu@ip-10-192-10-137:~$ 26 | ``` 27 | 28 | -------------------------------------------------------------------------------- /coreDNS.md: -------------------------------------------------------------------------------- 1 | # Setup CoreDNS and Test 2 | 3 | **Follow the steps below to setup core DNS in the kubernetes cluster from the ansible server as kubectl remote host** 4 | 5 | 6 | ``` 7 | Deploy DNS Cluster Add-ons 8 | [ubuntu@ip-192-168-91-186 ~]$ kubectl apply -f https://raw.githubusercontent.com/berry2012/kubernetes-the-hard-way-on-aws/release-1.23.9/coredns-1.9.3.yaml 9 | 10 | [ubuntu@ip-192-168-91-186 ~]$ kubectl get pods -l k8s-app=kube-dns -n kube-system 11 | NAME READY STATUS RESTARTS AGE 12 | coredns-8494f9c688-hr5hq 1/1 Running 0 18s 13 | coredns-8494f9c688-wxp2r 1/1 Running 0 18s 14 | 15 | [ubuntu@ip-192-168-91-186 ~]$ kubectl run busybox --image=busybox:1.28 --command -- sleep 3600 16 | pod/busybox created 17 | 18 | [ubuntu@ip-192-168-91-186 ~]$ kubectl get pods -l run=busybox 19 | NAME READY STATUS RESTARTS AGE 20 | busybox 1/1 Running 0 8s 21 | 22 | [ubuntu@ip-192-168-91-186 ~]$ kubectl exec -ti busybox -- nslookup kubernetes 23 | Server: 10.32.0.10 24 | Address 1: 10.32.0.10 kube-dns.kube-system.svc.cluster.local 25 | 26 | Name: kubernetes 27 | Address 1: 10.32.0.1 kubernetes.default.svc.cluster.local 28 | [ubuntu@ip-192-168-91-186 ~]$ 29 | 30 | [ubuntu@ip-192-168-91-186 ~]$ kubectl exec -it busybox -- cat /etc/resolv.conf 31 | search default.svc.cluster.local svc.cluster.local cluster.local eu-west-1.compute.internal 32 | nameserver 10.32.0.10 33 | options ndots:5 34 | ``` -------------------------------------------------------------------------------- /deployments/deploy_controlermanager.yml: -------------------------------------------------------------------------------- 1 | # deploy controller manager 2 | - name: copy kubernetes keys to remote hosts only 3 | copy: 4 | src: "/home/ubuntu/{{ item }}" 5 | dest: /var/lib/kubernetes/ 6 | remote_src: yes 7 | loop: 8 | - kube-controller-manager.kubeconfig 9 | 10 | - name: Generate the kube-controller-manager unit file for systemd 11 | shell: | 12 | cat << EOF | sudo tee /etc/systemd/system/kube-controller-manager.service 13 | [Unit] 14 | Description=Kubernetes Controller Manager 15 | Documentation=https://github.com/kubernetes/kubernetes 16 | 17 | [Service] 18 | ExecStart=/usr/local/bin/kube-controller-manager \\ 19 | --address=0.0.0.0 \\ 20 | --cluster-cidr=10.200.0.0/16 \\ 21 | --cluster-name=kubernetes \\ 22 | --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\ 23 | --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\ 24 | --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\ 25 | --leader-elect=true \\ 26 | --root-ca-file=/var/lib/kubernetes/ca.pem \\ 27 | --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \\ 28 | --service-cluster-ip-range=10.32.0.0/24 \\ 29 | --use-service-account-credentials=true \\ 30 | --v=2 31 | Restart=on-failure 32 | RestartSec=5 33 | 34 | [Install] 35 | WantedBy=multi-user.target 36 | EOF -------------------------------------------------------------------------------- /deployments/client_tools.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | become: true 3 | become_user: ubuntu 4 | gather_facts: true 5 | 6 | vars: 7 | RELEASE: "v1.23.9" 8 | 9 | tasks: 10 | - name: Install CFSSL 11 | get_url: 12 | url: "{{ item }}" 13 | dest: /usr/local/bin/ 14 | mode: a+x 15 | loop: 16 | - https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssl 17 | - https://storage.googleapis.com/kubernetes-the-hard-way/cfssl/1.4.1/linux/cfssljson 18 | become_user: root 19 | 20 | - name: get cfssl version 21 | shell: cfssl version 22 | register: cfssl_version 23 | 24 | - name: Show cfssl version 25 | debug: 26 | msg: 27 | - "{{ cfssl_version.stdout_lines }}" 28 | 29 | - name: get cfssljson version 30 | shell: cfssljson --version 31 | register: cfssljson_version 32 | 33 | - name: Show cfssljson version 34 | debug: 35 | msg: 36 | - "{{ cfssljson_version.stdout_lines }}" 37 | 38 | - name: Install kubectl 39 | get_url: 40 | url: https://storage.googleapis.com/kubernetes-release/release/{{ RELEASE }}/bin/linux/amd64/kubectl 41 | dest: /usr/local/bin/ 42 | mode: a+x 43 | become_user: root 44 | 45 | - name: get kubectl version 46 | shell: kubectl version --client 47 | register: kubectl_version 48 | 49 | - name: Show kubectl version 50 | debug: 51 | msg: 52 | - "{{ kubectl_version.stdout_lines }}" -------------------------------------------------------------------------------- /easy_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o pipefail 4 | set -o errexit 5 | 6 | echo "Install Client Tools" 7 | ansible-playbook -i inventory -v client_tools.yml 8 | echo "Prepare variables needed for Certificates" 9 | ansible-playbook -i inventory -v cert_vars.yml 10 | echo "Update the existing environment variable files" 11 | cat variables.text >> env.yaml 12 | echo "Provision the CA and generate TLS certificates" 13 | ansible-playbook -i inventory -v create_ca_certs.yml 14 | echo "Generate Kubernetes Configuration files" 15 | ansible-playbook -i inventory -v create_kubeconfigs.yml 16 | echo "Distribute the Kubernetes configuration files to all nodes" 17 | ansible-playbook -i inventory -v distribute_k8s_files.yml 18 | echo "Bootsrap etcd cluster" 19 | ansible-playbook -i inventory -v deploy_etcd_cluster.yml 20 | echo "Bootsrap the Kubernetes Control plane" 21 | ansible-playbook -i inventory -v deploy_api-server.yml 22 | ansible-playbook -i inventory -v rbac_authorization.yml 23 | ansible-playbook -i inventory -v deploy_nginx.yml 24 | echo "Bootsrap the Kubernetes Workernodes" 25 | ansible-playbook -i inventory -v workernodes.yml 26 | echo "Configuring kubectl for Remote Access" 27 | ansible-playbook -i inventory -v kubectl_remote.yml 28 | echo "Configure Networking" 29 | ansible-playbook -i inventory -v deploy_weavenet.yml 30 | echo "Deploy DNS Cluster Add-ons" 31 | kubectl apply -f https://raw.githubusercontent.com/berry2012/kubernetes-the-hard-way-on-aws/release-1.23.9/coredns-1.9.3.yaml 32 | sleep 15 33 | kubectl get pods -l k8s-app=kube-dns -n kube-system 34 | echo "Perform Smoke Test in the cluster" 35 | ansible-playbook -i inventory -v smoke_test.yml 36 | echo "easy deployment completed!!!" 37 | -------------------------------------------------------------------------------- /deployments/kubectl_remote.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | become: true 3 | become_user: ubuntu 4 | gather_facts: true 5 | 6 | vars: 7 | config_dir: "/etc/kubernetes/config" 8 | cert_remote_location: "/home/ubuntu/certs" 9 | vars_files: 10 | - ./env.yaml 11 | 12 | tasks: 13 | - name: Show Control Plane API Endpoint IP 14 | debug: 15 | msg: "API Endpoint: {{ controller_api_server_lb_private_ip }}" 16 | 17 | - name: configure your local kubectl 18 | shell: | 19 | kubectl config set-cluster kubernetes-the-hard-way \ 20 | --certificate-authority={{ cert_remote_location }}/ca.pem \ 21 | --embed-certs=true \ 22 | --server=https://{{ controller_api_server_lb_private_ip }}:6443 23 | 24 | - name: configure admin kubectl user 25 | shell: | 26 | kubectl config set-credentials admin \ 27 | --client-certificate={{ cert_remote_location }}/admin.pem \ 28 | --client-key={{ cert_remote_location }}/admin-key.pem 29 | 30 | - name: Set admin kubectl user 31 | shell: | 32 | kubectl config set-context kubernetes-the-hard-way \ 33 | --cluster=kubernetes-the-hard-way \ 34 | --user=admin 35 | 36 | - name: Use context 37 | shell: kubectl config use-context kubernetes-the-hard-way 38 | 39 | - name: Use kubectl to get pods 40 | shell: kubectl get pods 41 | register: pods 42 | no_log: True 43 | 44 | - name: Show pods results 45 | debug: 46 | msg: 47 | - "{{ pods.stdout_lines }}" 48 | 49 | - name: Use kubectl to get nodes 50 | shell: kubectl get nodes 51 | register: nodes 52 | no_log: True 53 | 54 | - name: Show nodes results 55 | debug: 56 | msg: 57 | - "{{ nodes.stdout_lines }}" -------------------------------------------------------------------------------- /deployments/cert_vars.yml: -------------------------------------------------------------------------------- 1 | - hosts: k8s 2 | gather_facts: true 3 | 4 | vars: 5 | file_store: "/home/ubuntu/variables.text" 6 | 7 | tasks: 8 | 9 | - name: Get internal IP DNS NAME 10 | shell: INTERNAL_IP_DNS=$(curl http://169.254.169.254/latest/meta-data/local-hostname) && echo $INTERNAL_IP_DNS 11 | register: internal_ip_dns 12 | 13 | - name: Setting INTERNAL_IP_DNS as fact 14 | set_fact: INTERNAL_IP_DNS="{{ internal_ip_dns.stdout }}" 15 | 16 | - name: Comment file 17 | lineinfile: 18 | path: "{{ file_store }}" 19 | line: "#--------------------------------Start Dynamic Variables------------------------------------------------------------------#" 20 | create: yes 21 | delegate_to: localhost 22 | 23 | - name: Add hostnames line to a variables files if the file does not exist 24 | lineinfile: 25 | path: "{{ file_store }}" 26 | line: "{{ inventory_hostname }}_hostnames: '{{ inventory_hostname }},{{ ansible_host }},{{ ansible_hostname }},{{ INTERNAL_IP_DNS }}'" 27 | create: yes 28 | delegate_to: localhost 29 | 30 | - name: Add IPs line to a variables files if the file does not exist 31 | lineinfile: 32 | path: "{{ file_store }}" 33 | line: "{{ inventory_hostname }}_private_ip: '{{ ansible_host }}'" 34 | create: yes 35 | delegate_to: localhost 36 | 37 | - name: Comment file 38 | lineinfile: 39 | path: "{{ file_store }}" 40 | line: "#-------------------------------End Dynamic Variables---------------------------------------------------------------#" 41 | create: yes 42 | delegate_to: localhost 43 | 44 | # view the file saved to localhost 45 | - debug: msg="{{lookup('file', '{{ file_store }}') }}" 46 | delegate_to: localhost 47 | -------------------------------------------------------------------------------- /deployments/deploy_healthz.yml: -------------------------------------------------------------------------------- 1 | # this is useful for GCP lb 2 | 3 | - hosts: controllers 4 | become: true 5 | become_user: root 6 | gather_facts: true 7 | 8 | vars: 9 | config_dir: "/etc/kubernetes/config" 10 | cert_remote_location: "/home/ubuntu" 11 | ssh_opts: "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" 12 | vars_files: 13 | - ./env.yaml 14 | 15 | tasks: 16 | - name: Install nginx 17 | apt: 18 | name: nginx 19 | state: present 20 | 21 | - name: Create an nginx configuration for the health check proxy 22 | shell: | 23 | cat > kubernetes.default.svc.cluster.local << EOF 24 | server { 25 | listen 80; 26 | server_name kubernetes.default.svc.cluster.local; 27 | 28 | location /healthz { 29 | proxy_pass https://127.0.0.1:6443/healthz; 30 | proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem; 31 | } 32 | } 33 | EOF 34 | 35 | - name: Set up the proxy configuration so that it is loaded by nginx 36 | block: 37 | - shell: mv kubernetes.default.svc.cluster.local /etc/nginx/sites-available/kubernetes.default.svc.cluster.local 38 | 39 | - shell: ln -s /etc/nginx/sites-available/kubernetes.default.svc.cluster.local /etc/nginx/sites-enabled/ 40 | 41 | - shell: systemctl restart nginx 42 | 43 | - shell: systemctl enable nginx 44 | 45 | - name: Verify health check 46 | uri: 47 | url: http://127.0.0.1/healthz 48 | headers: 49 | Host: kubernetes.default.svc.cluster.local 50 | return_content: yes 51 | register: health_check 52 | 53 | - name: Print health_check result 54 | debug: 55 | msg: "{{ health_check.content }}" 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /deployments/rbac_authorization.yml: -------------------------------------------------------------------------------- 1 | - hosts: controllers[0] 2 | become: true 3 | become_user: root 4 | gather_facts: true 5 | 6 | vars: 7 | config_dir: "/etc/kubernetes/config" 8 | cert_remote_location: "/home/ubuntu" 9 | vars_files: 10 | - ./env.yaml 11 | 12 | tasks: 13 | 14 | - name: Create a role with the necessary permissions 15 | shell: | 16 | cat << EOF | kubectl apply --kubeconfig admin.kubeconfig -f - 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRole 19 | metadata: 20 | annotations: 21 | rbac.authorization.kubernetes.io/autoupdate: "true" 22 | labels: 23 | kubernetes.io/bootstrapping: rbac-defaults 24 | name: system:kube-apiserver-to-kubelet 25 | rules: 26 | - apiGroups: 27 | - "" 28 | resources: 29 | - nodes/proxy 30 | - nodes/stats 31 | - nodes/log 32 | - nodes/spec 33 | - nodes/metrics 34 | verbs: 35 | - "*" 36 | EOF 37 | register: role 38 | 39 | - name: Bind the role to the kubernetes user 40 | shell: | 41 | cat << EOF | kubectl apply --kubeconfig admin.kubeconfig -f - 42 | apiVersion: rbac.authorization.k8s.io/v1 43 | kind: ClusterRoleBinding 44 | metadata: 45 | name: system:kube-apiserver 46 | namespace: "" 47 | roleRef: 48 | apiGroup: rbac.authorization.k8s.io 49 | kind: ClusterRole 50 | name: system:kube-apiserver-to-kubelet 51 | subjects: 52 | - apiGroup: rbac.authorization.k8s.io 53 | kind: User 54 | name: kubernetes 55 | EOF 56 | register: rolebinding 57 | 58 | - name: Show result 59 | debug: 60 | msg: 61 | - "{{ role.stdout_lines }}" 62 | - "{{ rolebinding.stdout_lines }}" -------------------------------------------------------------------------------- /deployments/deploy_nginx.yml: -------------------------------------------------------------------------------- 1 | 2 | # deploy lb to api server controllers 3 | - hosts: lb 4 | become: true 5 | become_user: root 6 | gather_facts: true 7 | 8 | vars: 9 | config_dir: "/etc/kubernetes/config" 10 | cert_remote_location: "/home/ubuntu" 11 | ssh_opts: "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" 12 | vars_files: 13 | - ./env.yaml 14 | 15 | tasks: 16 | - name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago 17 | ansible.builtin.apt: 18 | update_cache: yes 19 | cache_valid_time: 3600 20 | 21 | - name: Install nginx 22 | apt: 23 | name: nginx 24 | state: present 25 | 26 | - name: Enable service nginx, and not touch the state 27 | service: 28 | name: nginx 29 | enabled: yes 30 | 31 | - name: Create nginx confd 32 | shell: mkdir -p /etc/nginx/tcpconf.d 33 | 34 | - name: Add the following to the end of nginx.conf 35 | lineinfile: 36 | path: /etc/nginx/nginx.conf 37 | line: 'include /etc/nginx/tcpconf.d/*;' 38 | 39 | - name: Setting variables as fact 40 | set_fact: 41 | CONTROLLER0_IP: "{{ controller1_private_ip }}" 42 | CONTROLLER1_IP: "{{ controller2_private_ip }}" 43 | 44 | - name: Create an nginx configuration for the health check proxy 45 | shell: | 46 | cat << EOF | sudo tee /etc/nginx/tcpconf.d/kubernetes.conf 47 | stream { 48 | upstream kubernetes { 49 | server {{ CONTROLLER0_IP }}:6443; 50 | server {{ CONTROLLER1_IP }}:6443; 51 | } 52 | 53 | server { 54 | listen 6443; 55 | listen 443; 56 | proxy_pass kubernetes; 57 | } 58 | } 59 | EOF 60 | 61 | - name: Reload the nginx configuration 62 | shell: nginx -s reload 63 | 64 | - name: Verify cluster 65 | uri: 66 | url: https://localhost:6443/version 67 | validate_certs: no 68 | return_content: yes 69 | register: cluster_result 70 | 71 | - name: Print cluster_result 72 | debug: 73 | msg: "{{ cluster_result.content }}" 74 | 75 | -------------------------------------------------------------------------------- /deployments/distribute_k8s_files.yml: -------------------------------------------------------------------------------- 1 | - hosts: workers 2 | serial: 2 3 | become: true 4 | become_user: root 5 | gather_facts: true 6 | 7 | vars: 8 | cert_location: "/home/ubuntu/certs" 9 | cert_remote_location: "/home/ubuntu" 10 | vars_files: 11 | - ./env.yaml 12 | 13 | tasks: 14 | 15 | - name: copy ca.pem to worker nodes 16 | copy: 17 | src: "{{ cert_location }}/ca.pem" 18 | dest: "{{ cert_remote_location }}" 19 | owner: ubuntu 20 | group: ubuntu 21 | 22 | - name: copy kube-proxy.kubeconfig to worker nodes 23 | copy: 24 | src: "{{ cert_location }}/kube-proxy.kubeconfig" 25 | dest: "{{ cert_remote_location }}" 26 | owner: ubuntu 27 | group: ubuntu 28 | 29 | - name: copy worker1 files to worker node 1 30 | copy: 31 | src: "{{ item }}" 32 | dest: "{{ cert_remote_location }}" 33 | owner: ubuntu 34 | group: ubuntu 35 | loop: 36 | - "{{ cert_location }}/worker1-key.pem" 37 | - "{{ cert_location }}/worker1.pem" 38 | - "{{ cert_location }}/worker1.kubeconfig" 39 | when: inventory_hostname == "worker1" 40 | 41 | - name: copy worker2 files to worker node 2 42 | copy: 43 | src: "{{ item }}" 44 | dest: "{{ cert_remote_location }}" 45 | owner: ubuntu 46 | group: ubuntu 47 | loop: 48 | - "{{ cert_location }}/worker2-key.pem" 49 | - "{{ cert_location }}/worker2.pem" 50 | - "{{ cert_location }}/worker2.kubeconfig" 51 | when: inventory_hostname == "worker2" 52 | 53 | 54 | - hosts: controllers 55 | serial: 2 56 | become: true 57 | become_user: root 58 | gather_facts: true 59 | 60 | vars: 61 | cert_location: "/home/ubuntu/certs" 62 | cert_remote_location: "/home/ubuntu" 63 | vars_files: 64 | - ./env.yaml 65 | 66 | tasks: 67 | 68 | - name: copy controller keys to control planes 69 | copy: 70 | src: "{{ cert_location }}/{{ item }}" 71 | dest: "{{ cert_remote_location }}" 72 | owner: ubuntu 73 | group: ubuntu 74 | loop: "{{ controller_keys }}" 75 | 76 | - name: copy controller kubeconfigs to control planes 77 | copy: 78 | src: "{{ cert_location }}/{{ item }}" 79 | dest: "{{ cert_remote_location }}" 80 | owner: ubuntu 81 | group: ubuntu 82 | loop: "{{ kubeconfigs }}" 83 | 84 | - name: copy {{ controller_config_files }} to control planes 85 | copy: 86 | src: "{{ cert_location }}/{{ controller_config_files }}" 87 | dest: "{{ cert_remote_location }}" 88 | owner: ubuntu 89 | group: ubuntu -------------------------------------------------------------------------------- /deployments/smoke_test.yml: -------------------------------------------------------------------------------- 1 | - name: Secret Smoke Test 2 | hosts: localhost 3 | become: true 4 | become_user: ubuntu 5 | gather_facts: false 6 | 7 | tasks: 8 | - name: Create a test secret 9 | shell: kubectl create secret generic kubernetes-the-hard-way --from-literal="mykey=mydata" 10 | 11 | - name: Pause for 5 sec 12 | pause: 13 | seconds: 5 14 | 15 | 16 | - hosts: controllers[0] 17 | become: true 18 | become_user: root 19 | gather_facts: false 20 | 21 | vars: 22 | cert_location: "/home/ubuntu/certs" 23 | cert_remote_location: "/home/ubuntu" 24 | vars_files: 25 | - ./env.yaml 26 | 27 | tasks: 28 | - name: Use kubectl to get nodes on one controller 29 | shell: | 30 | sudo ETCDCTL_API=3 etcdctl get \ 31 | --endpoints=https://127.0.0.1:2379 \ 32 | --cacert=/etc/etcd/ca.pem \ 33 | --cert=/etc/etcd/kubernetes.pem \ 34 | --key=/etc/etcd/kubernetes-key.pem\ 35 | /registry/secrets/default/kubernetes-the-hard-way | hexdump -C 36 | register: output 37 | 38 | - name: Show result 39 | debug: 40 | msg: 41 | - "{{ output.stdout_lines }}" 42 | 43 | - name: Validate if etcd key is prefixed with k8s:enc:aescbc:v1:key1 44 | fail: 45 | msg: 46 | - "aescbc provider was not used to encrypt the data" 47 | when: '"k8s:enc:aescbc" not in output.stdout' 48 | 49 | 50 | 51 | - name: Deployment Smoke Test 52 | hosts: localhost 53 | become: true 54 | become_user: ubuntu 55 | gather_facts: false 56 | 57 | tasks: 58 | - name: Create a a simple nginx deployment 59 | shell: kubectl run nginx --image=nginx 60 | 61 | - name: Pause for 10 sec to build containers 62 | pause: 63 | seconds: 10 64 | 65 | - name: Verify that the deployment created a pod and that the pod is running 66 | shell: kubectl get pods -l run=nginx 67 | register: pod_status 68 | 69 | - name: Validate if nginx pod ran 70 | fail: 71 | msg: 72 | - "nginx pod did not run" 73 | when: "'nginx' not in pod_status.stdout" 74 | 75 | - name: Get logs from pod 76 | shell: | 77 | POD_NAME=$(kubectl get pods -l run=nginx -o jsonpath="{.items[0].metadata.name}") 78 | kubectl logs $POD_NAME 79 | register: pod_logs 80 | 81 | - name: Get the logs from the nginx pod 82 | debug: 83 | msg: 84 | - "{{ pod_logs.stdout_lines }}" 85 | 86 | - name: Get logs from pod 87 | shell: | 88 | POD_NAME=$(kubectl get pods -l run=nginx -o jsonpath="{.items[0].metadata.name}") 89 | kubectl exec -i $POD_NAME -- nginx -v 90 | register: pod_exec 91 | -------------------------------------------------------------------------------- /deployments/deploy_scheduler.yml: -------------------------------------------------------------------------------- 1 | # deploy kube scheduler and start services 2 | - name: copy kube config to remote hosts only 3 | copy: 4 | src: "/home/ubuntu/{{ item }}" 5 | dest: /var/lib/kubernetes/ 6 | remote_src: yes 7 | loop: 8 | - kube-scheduler.kubeconfig 9 | 10 | - name: Create the kube-scheduler systemd unit file 11 | shell: | 12 | cat << EOF | sudo tee /etc/systemd/system/kube-scheduler.service 13 | [Unit] 14 | Description=Kubernetes Scheduler 15 | Documentation=https://github.com/kubernetes/kubernetes 16 | 17 | [Service] 18 | ExecStart=/usr/local/bin/kube-scheduler \\ 19 | --config=/etc/kubernetes/config/kube-scheduler.yaml \\ 20 | --v=2 21 | Restart=on-failure 22 | RestartSec=5 23 | 24 | [Install] 25 | WantedBy=multi-user.target 26 | EOF 27 | 28 | - name: Generate the kube-scheduler yaml config file. 29 | shell: | 30 | cat < encryption-config.yaml << EOF 141 | kind: EncryptionConfig 142 | apiVersion: v1 143 | resources: 144 | - resources: 145 | - secrets 146 | providers: 147 | - aescbc: 148 | keys: 149 | - name: key1 150 | secret: ${ENCRYPTION_KEY} 151 | - identity: {} 152 | EOF -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kubernetes-the-hard-way-on-aws 2 | **Setup Kubernetes the hard way on AWS** 3 | 4 | *This is intended for audience that wants to understand how Kubernetes all fits together in AWS before going to production.* 5 | 6 | *In this tutorial, I deployed the infrastructure as code on AWS using AWS CloudFormation. I configured all the needed packages using Ansible for Configuration as Code.* 7 | 8 | 9 | # Pre-requisites: 10 | - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) 11 | - [AWS Account](https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account/) 12 | 13 | # Cluster Details 14 | - kubernetes v1.23.9 15 | - containerd v1.6.8 16 | - coredns v1.9.3 17 | - cni v1.1.1 18 | - etcd v3.4.20 19 | - weavenetwork 1.23 20 | 21 | ## Node Details 22 | - All the provisioned instances run the same OS 23 | 24 | ``` 25 | ubuntu@ip-10-192-10-110:~$ cat /etc/os-release 26 | NAME="Ubuntu" 27 | VERSION="20.04.4 LTS (Focal Fossa)" 28 | ID=ubuntu 29 | ID_LIKE=debian 30 | PRETTY_NAME="Ubuntu 20.04.4 LTS" 31 | VERSION_ID="20.04" 32 | HOME_URL="https://www.ubuntu.com/" 33 | SUPPORT_URL="https://help.ubuntu.com/" 34 | BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 35 | PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 36 | VERSION_CODENAME=focal 37 | UBUNTU_CODENAME=focal 38 | 39 | ``` 40 | # Usage Instructions 41 | 42 | 43 | ## Deploying the Infrastructure with CloudFormation 44 | 45 | - Goto AWS Console > Choose Region (e.g. eu-west-1) > CloudFormation > Create Stack 46 | - Use the CF Yaml template in *infrastructure/k8s_aws_instances.yml* 47 | - See image below: 48 | 49 | ![Create Infrastructure](./images/CF-infrastructure.png) 50 | 51 | 52 | 53 | ## 1. Accessing the EC2 instances 54 | - Define your global variables 55 | ``` 56 | export LOCAL_SSH_KEY_FILE="~/.ssh/key.pem" 57 | export REGION="eu-west-2" 58 | ``` 59 | 60 | ## Setting up for deployments 61 | - Confirm the instances created and the Public IP of the Ansible controller server 62 | 63 | ``` 64 | aws ec2 describe-instances --filters "Name=tag:project,Values=k8s-hardway" --query 'Reservations[*].Instances[*].[Placement.AvailabilityZone, State.Name, InstanceId, PrivateIpAddress, PublicIpAddress, [Tags[?Key==`Name`].Value] [0][0]]' --output text --region ${REGION} 65 | 66 | ``` 67 | - Define your Ansible server environment variable 68 | ``` 69 | export ANSIBLE_SERVER_PUBLIC_IP="" 70 | ``` 71 | 72 | - You can use SSH or AWS SSM to access the Ansible Controller Server or any other nodes that were created with the CloudFormation Template 73 | - Connecting via [AWS SSM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/session-manager.html) e.g. 74 | 75 | 76 | ``` 77 | aws ssm start-session --target --region ${REGION} 78 | ``` 79 | 80 | - Transfer your SSH key to the Ansible Server. This will be need in the Ansible Inventory file. 81 | 82 | ``` 83 | echo "scp -i ${LOCAL_SSH_KEY_FILE} ${LOCAL_SSH_KEY_FILE} ubuntu@${ANSIBLE_SERVER_PUBLIC_IP}:~/.ssh/" 84 | inspect and execute the output 85 | ``` 86 | 87 | 88 | 89 | - To Create inventory file. Edit the inventory.sh and update the variable SSH_KEY_FILE and REGION accordingly 90 | 91 | ``` 92 | vi deployments/inventory.sh 93 | chmod +x deployments/inventory.sh 94 | bash deployments/inventory.sh 95 | 96 | ``` 97 | 98 | - Transfer all playbooks in deployments/playbooks to the ansible server 99 | 100 | ``` 101 | cd kubernetes-the-hard-way-on-aws/deployments 102 | 103 | scp -i ${LOCAL_SSH_KEY_FILE} *.yml *.yaml ../inventory *.cfg ubuntu@${ANSIBLE_SERVER_PUBLIC_IP}:~ 104 | 105 | scp -i ${LOCAL_SSH_KEY_FILE} ../easy_script.sh ubuntu@${ANSIBLE_SERVER_PUBLIC_IP}:~ 106 | 107 | ``` 108 | 109 | - Connect to the Ansible Server 110 | ``` 111 | ssh -i ${LOCAL_SSH_KEY_FILE} ubuntu@${ANSIBLE_SERVER_PUBLIC_IP} 112 | 113 | chmod +x easy_script.sh 114 | 115 | LOCAL_SSH_KEY_FILE="~/.ssh/key.pem" # your ssh key 116 | 117 | chmod 400 "~/.ssh/key.pem" # your ssh key 118 | ``` 119 | 120 | 121 | 122 | - After building the inventory file, test if all hosts are reachable 123 | 124 | 1. list all hosts to confirm that the inventory file is properly configured 125 | 126 | ``` 127 | ansible all --list-hosts -i inventory 128 | 129 | hosts (5): 130 | controller1 131 | controller2 132 | worker1 133 | worker2 134 | controller_api_server_lb 135 | 136 | ``` 137 | 138 | 2. Test ping on all the hosts 139 | 140 | ``` 141 | ansible -i inventory k8s -m ping 142 | 143 | worker1 | SUCCESS => { 144 | "ansible_facts": { 145 | "discovered_interpreter_python": "/usr/bin/python3" 146 | }, 147 | "changed": false, 148 | "ping": "pong" 149 | } 150 | controller_api_server_lb | SUCCESS => { 151 | "ansible_facts": { 152 | "discovered_interpreter_python": "/usr/bin/python3" 153 | }, 154 | "changed": false, 155 | "ping": "pong" 156 | } 157 | controller2 | SUCCESS => { 158 | "ansible_facts": { 159 | "discovered_interpreter_python": "/usr/bin/python3" 160 | }, 161 | "changed": false, 162 | "ping": "pong" 163 | } 164 | controller1 | SUCCESS => { 165 | "ansible_facts": { 166 | "discovered_interpreter_python": "/usr/bin/python3" 167 | }, 168 | "changed": false, 169 | "ping": "pong" 170 | } 171 | worker2 | SUCCESS => { 172 | "ansible_facts": { 173 | "discovered_interpreter_python": "/usr/bin/python3" 174 | }, 175 | "changed": false, 176 | "ping": "pong" 177 | } 178 | ubuntu@ip-10-192-10-137:~$ 179 | 180 | ``` 181 | 182 | ## Configuring the Servers with Ansible 183 | **From the Ansible server, execute the Ansible playbook in the following order or For an easier 1 click deployment option, see [instruction](./easyWay.md)** 184 | 185 | 186 | 1. `ansible-playbook -i inventory -v client_tools.yml` 187 | 2. `ansible-playbook -i inventory -v cert_vars.yml` 188 | 3. `cat variables.text >> env.yaml` 189 | 4. `ansible-playbook -i inventory -v create_ca_certs.yml` 190 | 5. `ansible-playbook -i inventory -v create_kubeconfigs.yml` 191 | 6. `ansible-playbook -i inventory -v distribute_k8s_files.yml` 192 | 7. `ansible-playbook -i inventory -v deploy_etcd_cluster.yml` 193 | 8. `ansible-playbook -i inventory -v deploy_api-server.yml` See API Server Bootstrap results below. 194 | 9. `ansible-playbook -i inventory -v rbac_authorization.yml` 195 | 10. `ansible-playbook -i inventory -v deploy_nginx.yml` 196 | 11. `ansible-playbook -i inventory -v workernodes.yml` See Worker Nodes Bootstrap results below. 197 | 12. `ansible-playbook -i inventory -v kubectl_remote.yml` 198 | 13. `ansible-playbook -i inventory -v deploy_weavenet.yml` See Weavenetwork pods results below. 199 | 14. [Setup coreDNS](./coreDNS.md) 200 | 15. `ansible-playbook -i inventory -v smoke_test.yml` 201 | 202 | 203 | 204 | ### API Server Bootstrap Results: 205 | ![Successful Controller Deployment ](./images/controller-deployment-test.png) 206 | 207 | 208 | ### Worker Nodes Bootstrap Results. Nodes are in NotReady state because we haven't configured networking. 209 | ![Successful Worker Nodes Bootstrapping ](./images/workernodes.png) 210 | 211 | ### Weavenetwork pods results 212 | ![Successful Nodes Networking ](./images/weavenetworkpods.png) 213 | 214 | # Clean Up 215 | 216 | *Delete the AWS CloudFormation Stack* 217 | 218 | >`aws cloudformation delete-stack --stack-name k8s-hardway` 219 | 220 | 221 | *Check if the AWS CloudFormation Stack still exist to confirm deletion* 222 | 223 | >`aws cloudformation list-stacks --stack-status-filter CREATE_COMPLETE --region eu-west-1 --query 'StackSummaries[*].{Name:StackName,Date:CreationTime,Status:StackStatus}' --output text | grep k8s-hardway` 224 | 225 | 226 | 227 | 228 | -------------------------------------------------------------------------------- /deployments/create_ca_certs.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | become: true 3 | become_user: ubuntu 4 | gather_facts: true 5 | 6 | vars: 7 | cert_location: "/home/ubuntu/certs" 8 | cert_remote_location: "/home/ubuntu" 9 | vars_files: 10 | - ./env.yaml 11 | 12 | tasks: 13 | - name: Create certificate directory if it does not exist 14 | file: 15 | path: "{{ cert_location }}" 16 | state: directory 17 | mode: '0755' 18 | 19 | - name: generate the certificate authority 20 | shell: | 21 | cd "{{ cert_location }}" && 22 | { 23 | 24 | cat > ca-config.json << EOF 25 | { 26 | "signing": { 27 | "default": { 28 | "expiry": "8760h" 29 | }, 30 | "profiles": { 31 | "kubernetes": { 32 | "usages": ["signing", "key encipherment", "server auth", "client auth"], 33 | "expiry": "8760h" 34 | } 35 | } 36 | } 37 | } 38 | EOF 39 | 40 | cat > ca-csr.json << EOF 41 | { 42 | "CN": "Kubernetes", 43 | "key": { 44 | "algo": "rsa", 45 | "size": 2048 46 | }, 47 | "names": [ 48 | { 49 | "C": "IR", 50 | "L": "Dublin", 51 | "O": "Kubernetes", 52 | "OU": "CA", 53 | "ST": "Dublin" 54 | } 55 | ] 56 | } 57 | EOF 58 | 59 | cfssl gencert -initca ca-csr.json | cfssljson -bare ca 60 | 61 | } 62 | 63 | 64 | - name: generate the Admin Client Certificate 65 | shell: | 66 | cd "{{ cert_location }}" && 67 | { 68 | 69 | cat > admin-csr.json << EOF 70 | { 71 | "CN": "admin", 72 | "key": { 73 | "algo": "rsa", 74 | "size": 2048 75 | }, 76 | "names": [ 77 | { 78 | "C": "IR", 79 | "L": "Dublin", 80 | "O": "system:masters", 81 | "OU": "Kubernetes The Hard Way", 82 | "ST": "Dublin" 83 | } 84 | ] 85 | } 86 | EOF 87 | 88 | cfssl gencert \ 89 | -ca=ca.pem \ 90 | -ca-key=ca-key.pem \ 91 | -config=ca-config.json \ 92 | -profile=kubernetes \ 93 | admin-csr.json | cfssljson -bare admin 94 | 95 | } 96 | 97 | - name: Generate a certificate and private key for each Kubernetes worker node 98 | shell: | 99 | cd "{{ cert_location }}" && 100 | { 101 | cat > worker1-csr.json << EOF 102 | { 103 | "CN": "system:node:worker1", 104 | "key": { 105 | "algo": "rsa", 106 | "size": 2048 107 | }, 108 | "names": [ 109 | { 110 | "C": "IR", 111 | "L": "Dublin", 112 | "O": "system:nodes", 113 | "OU": "Kubernetes The Hard Way", 114 | "ST": "Dublin" 115 | } 116 | ] 117 | } 118 | EOF 119 | 120 | cfssl gencert \ 121 | -ca=ca.pem \ 122 | -ca-key=ca-key.pem \ 123 | -config=ca-config.json \ 124 | -hostname={{ worker1_hostnames }} \ 125 | -profile=kubernetes \ 126 | worker1-csr.json | cfssljson -bare worker1 127 | 128 | cat > worker2-csr.json << EOF 129 | { 130 | "CN": "system:node:worker2", 131 | "key": { 132 | "algo": "rsa", 133 | "size": 2048 134 | }, 135 | "names": [ 136 | { 137 | "C": "IR", 138 | "L": "Dublin", 139 | "O": "system:nodes", 140 | "OU": "Kubernetes The Hard Way", 141 | "ST": "Dublin" 142 | } 143 | ] 144 | } 145 | EOF 146 | 147 | cfssl gencert \ 148 | -ca=ca.pem \ 149 | -ca-key=ca-key.pem \ 150 | -config=ca-config.json \ 151 | -hostname={{ worker2_hostnames }} \ 152 | -profile=kubernetes \ 153 | worker2-csr.json | cfssljson -bare worker2 154 | 155 | } 156 | 157 | 158 | - name: Controller Manager Client certificate 159 | shell: | 160 | cd "{{ cert_location }}" && 161 | { 162 | 163 | cat > kube-controller-manager-csr.json << EOF 164 | { 165 | "CN": "system:kube-controller-manager", 166 | "key": { 167 | "algo": "rsa", 168 | "size": 2048 169 | }, 170 | "names": [ 171 | { 172 | "C": "IR", 173 | "L": "Dublin", 174 | "O": "system:kube-controller-manager", 175 | "OU": "Kubernetes The Hard Way", 176 | "ST": "Dublin" 177 | } 178 | ] 179 | } 180 | EOF 181 | 182 | cfssl gencert \ 183 | -ca=ca.pem \ 184 | -ca-key=ca-key.pem \ 185 | -config=ca-config.json \ 186 | -profile=kubernetes \ 187 | kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager 188 | 189 | } 190 | 191 | - name: Kube Proxy Client certificate 192 | shell: | 193 | cd "{{ cert_location }}" && 194 | { 195 | 196 | cat > kube-proxy-csr.json << EOF 197 | { 198 | "CN": "system:kube-proxy", 199 | "key": { 200 | "algo": "rsa", 201 | "size": 2048 202 | }, 203 | "names": [ 204 | { 205 | "C": "IR", 206 | "L": "Dublin", 207 | "O": "system:node-proxier", 208 | "OU": "Kubernetes The Hard Way", 209 | "ST": "Dublin" 210 | } 211 | ] 212 | } 213 | EOF 214 | 215 | cfssl gencert \ 216 | -ca=ca.pem \ 217 | -ca-key=ca-key.pem \ 218 | -config=ca-config.json \ 219 | -profile=kubernetes \ 220 | kube-proxy-csr.json | cfssljson -bare kube-proxy 221 | 222 | } 223 | 224 | - name: Kube Scheduler Client Certificate 225 | shell: | 226 | cd "{{ cert_location }}" && 227 | { 228 | 229 | cat > kube-scheduler-csr.json << EOF 230 | { 231 | "CN": "system:kube-scheduler", 232 | "key": { 233 | "algo": "rsa", 234 | "size": 2048 235 | }, 236 | "names": [ 237 | { 238 | "C": "IR", 239 | "L": "Dublin", 240 | "O": "system:kube-scheduler", 241 | "OU": "Kubernetes The Hard Way", 242 | "ST": "Dublin" 243 | } 244 | ] 245 | } 246 | EOF 247 | 248 | cfssl gencert \ 249 | -ca=ca.pem \ 250 | -ca-key=ca-key.pem \ 251 | -config=ca-config.json \ 252 | -profile=kubernetes \ 253 | kube-scheduler-csr.json | cfssljson -bare kube-scheduler 254 | 255 | } 256 | 257 | - name: Generate service account keys 258 | shell: | 259 | cd "{{ cert_location }}" && 260 | { 261 | 262 | cat > service-account-csr.json << EOF 263 | { 264 | "CN": "service-accounts", 265 | "key": { 266 | "algo": "rsa", 267 | "size": 2048 268 | }, 269 | "names": [ 270 | { 271 | "C": "IR", 272 | "L": "Dublin", 273 | "O": "Kubernetes", 274 | "OU": "Kubernetes The Hard Way", 275 | "ST": "Dublin" 276 | } 277 | ] 278 | } 279 | EOF 280 | 281 | cfssl gencert \ 282 | -ca=ca.pem \ 283 | -ca-key=ca-key.pem \ 284 | -config=ca-config.json \ 285 | -profile=kubernetes \ 286 | service-account-csr.json | cfssljson -bare service-account 287 | 288 | } 289 | 290 | - debug: 291 | msg: "{{ CERT_HOSTNAME }}" 292 | 293 | - name: Generate Kubernetes cluster server certificate for the Kubernetes API 294 | shell: | 295 | cd "{{ cert_location }}" && 296 | { 297 | 298 | cat > kubernetes-csr.json << EOF 299 | { 300 | 301 | "CN": "kubernetes", 302 | "key": { 303 | "algo": "rsa", 304 | "size": 2048 305 | }, 306 | "names": [ 307 | { 308 | "C": "IR", 309 | "L": "Dublin", 310 | "O": "Kubernetes", 311 | "OU": "Kubernetes The Hard Way", 312 | "ST": "Dublin" 313 | } 314 | ] 315 | } 316 | EOF 317 | 318 | cfssl gencert \ 319 | -ca=ca.pem \ 320 | -ca-key=ca-key.pem \ 321 | -config=ca-config.json \ 322 | -hostname={{ CERT_HOSTNAME }} \ 323 | -profile=kubernetes \ 324 | kubernetes-csr.json | cfssljson -bare kubernetes 325 | 326 | } 327 | 328 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /deployments/workernodes.yml: -------------------------------------------------------------------------------- 1 | - hosts: workers 2 | become_user: ubuntu 3 | serial: 1 4 | become: true 5 | gather_facts: true 6 | 7 | vars: 8 | config_dir: "/etc/kubernetes/config" 9 | cert_remote_location: "/home/ubuntu" 10 | RELEASE: "v1.23.9" 11 | vars_files: 12 | - ./env.yaml 13 | 14 | tasks: 15 | - name: Setting HOSTNAME as fact 16 | set_fact: 17 | HOSTNAME: "{{ 'worker1' if inventory_hostname == 'worker1' else 'worker2' if inventory_hostname == 'worker2' else 'UNKNOWN' }}" 18 | 19 | - name: Prints HOSTNAME variable set 20 | debug: 21 | msg: 22 | - "This machine HOSTNAME is: '{{ HOSTNAME }}'" 23 | 24 | - name: Create Workaround for CoreDNS if Ubuntu 16 - ideally use Ubuntu 18 25 | shell: | 26 | sudo mkdir -p /run/systemd/resolve 27 | sudo ln -s /run/resolvconf/resolv.conf /run/systemd/resolve/resolv.conf 28 | when: ansible_distribution_major_version == "16" 29 | 30 | - name: Install packages 31 | apt: 32 | name: "{{ item }}" 33 | state: present 34 | loop: 35 | - socat 36 | - conntrack 37 | - ipset 38 | become: true 39 | become_user: root 40 | 41 | - name: Download and install the worker binaries to each worker node 42 | get_url: 43 | url: "{{ item }}" 44 | dest: /usr/local/bin/ 45 | mode: a+x 46 | loop: 47 | - https://storage.googleapis.com/gvisor/releases/release/latest/x86_64/runsc 48 | - https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64 49 | - https://storage.googleapis.com/kubernetes-release/release/{{ RELEASE }}/bin/linux/amd64/kubectl 50 | - https://storage.googleapis.com/kubernetes-release/release/{{ RELEASE }}/bin/linux/amd64/kube-proxy 51 | - https://storage.googleapis.com/kubernetes-release/release/{{ RELEASE }}/bin/linux/amd64/kubelet 52 | become: true 53 | become_user: root 54 | 55 | - name: create config dir if it doesn't exist 56 | shell: sudo mkdir -p "{{ item }}" 57 | loop: 58 | - /var/lib/kubernetes/ 59 | - /etc/cni/net.d 60 | - /opt/cni/bin 61 | - /var/lib/kubelet 62 | - /var/lib/kube-proxy 63 | - /var/run/kubernetes 64 | 65 | - name: Download and install the worker binaries archive to each worker node 66 | get_url: 67 | url: "{{ item }}" 68 | dest: "{{ artifacts_location }}" 69 | loop: 70 | - https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.22.1/crictl-v1.22.1-linux-amd64.tar.gz 71 | - https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz 72 | - https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz 73 | 74 | - name: Extract crictl-v1.22.1-linux-amd64.tar.gz into /usr/local/bin/ 75 | unarchive: 76 | src: "{{ artifacts_location }}/crictl-v1.22.1-linux-amd64.tar.gz" 77 | dest: /usr/local/bin/ 78 | remote_src: yes 79 | become: true 80 | become_user: root 81 | 82 | - name: Make crictl executable 83 | shell: sudo chmod +x /usr/local/bin/crictl 84 | 85 | - name: Extract cni-plugins-linux-amd64-v1.1.1.tgz into /opt/cni/bin/ 86 | shell: "sudo tar -xvzf {{ artifacts_location }}/cni-plugins-linux-amd64-v1.1.1.tgz -C /opt/cni/bin/" 87 | 88 | - name: Create temp containerd folder 89 | shell: mkdir -p /tmp/containerd/ 90 | 91 | - name: Extract containerd-1.6.8-linux-amd64.tar.gz into /tmp/containerd 92 | unarchive: 93 | src: "{{ artifacts_location }}/containerd-1.6.8-linux-amd64.tar.gz" 94 | dest: /tmp/containerd/ 95 | remote_src: yes 96 | 97 | - name: Move containerd to bin 98 | shell: sudo mv -v /tmp/containerd/bin/* /bin/ 99 | 100 | - name: Move runc.amd64 to runc 101 | shell: sudo mv -v /usr/local/bin/runc.amd64 /usr/local/bin/runc 102 | 103 | - name: Create containerd folder 104 | shell: sudo mkdir -p /etc/containerd/ 105 | 106 | - name: Create the containerd config.toml 107 | shell: | 108 | cat << EOF | sudo tee /etc/containerd/config.toml 109 | [plugins] 110 | [plugins.cri.containerd] 111 | snapshotter = "overlayfs" 112 | [plugins.cri.containerd.default_runtime] 113 | runtime_type = "io.containerd.runtime.v1.linux" 114 | runtime_engine = "/usr/local/bin/runc" 115 | runtime_root = "" 116 | [plugins.cri.containerd.untrusted_workload_runtime] 117 | runtime_type = "io.containerd.runtime.v1.linux" 118 | runtime_engine = "/usr/local/bin/runsc" 119 | runtime_root = "/run/containerd/runsc" 120 | EOF 121 | become: true 122 | become_user: root 123 | 124 | - name: Create the containerd unit file 125 | shell: | 126 | cat <