├── .gitignore ├── resources ├── tp4-whoami-svc.yml ├── tp4-gateway-pod.yml ├── tp5-whoami-ingress.yml └── tp3-deploy-whoami.yaml ├── README.md ├── .strigo └── config.yml ├── tp ├── tp1-minikube.md ├── tp5-ingress.md ├── tp2-kubectl.md ├── tp4-service.md └── tp3-deployment.md └── strigo └── init.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ -------------------------------------------------------------------------------- /resources/tp4-whoami-svc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | 4 | metadata: 5 | name: whoami 6 | 7 | spec: 8 | selector: 9 | app: whoami 10 | ports: 11 | - port: 8080 12 | targetPort: 80 13 | -------------------------------------------------------------------------------- /resources/tp4-gateway-pod.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: gateway 5 | spec: 6 | containers: 7 | - name: shell-in-pod 8 | image: zenika/k8s-training-tools:v5 9 | command: 10 | - bash 11 | - -c 12 | - sleep infinity 13 | -------------------------------------------------------------------------------- /resources/tp5-whoami-ingress.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: whoami 6 | spec: 7 | rules: 8 | - host: whoami.FIXME.sslip.io 9 | http: 10 | paths: 11 | - path: / 12 | pathType: Prefix 13 | backend: 14 | service: 15 | name: whoami 16 | port: 17 | number: 8080 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # trainingClazz Kubernetes 2 | 3 | - [Slides](https://drive.google.com/file/d/1mn0taR2qykWRYtqR7j6rLwv6OhzIlAOV/view?usp=drive_link) 4 | - [Strigo](https://strigo.io) 5 | 6 | ## Lab 7 | 8 | - [Lab 1 - minikube usage](tp/tp1-minikube.md) 9 | - [Lab 2 - kubectl run](tp/tp2-kubectl.md) 10 | - [Lab 3 - Deployment](tp/tp3-deployment.md) 11 | - [Lab 4 - Service and test](tp/tp4-service.md) 12 | - [Lab 5 - Ingress](tp/tp5-ingress.md) 13 | -------------------------------------------------------------------------------- /.strigo/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | classes: 3 | - exercises: 4 | - title: Lab 1 - minikube usage 5 | file: ../tp/tp1-minikube.md 6 | - title: Lab 2 - kubectl run 7 | file: ../tp/tp2-kubectl.md 8 | - title: Lab 3 - Deployment 9 | file: ../tp/tp3-deployment.md 10 | - title: Lab 4 - Service and test 11 | file: ../tp/tp4-service.md 12 | - title: Lab 5 - Ingress 13 | file: ../tp/tp5-ingress.md 14 | -------------------------------------------------------------------------------- /resources/tp3-deploy-whoami.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: whoami 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: whoami 11 | template: 12 | metadata: 13 | labels: 14 | app: whoami 15 | spec: 16 | containers: 17 | - name: whoami 18 | image: containous/whoami 19 | resources: 20 | limits: 21 | memory: "128Mi" 22 | cpu: "100m" 23 | ports: 24 | - containerPort: 80 25 | -------------------------------------------------------------------------------- /tp/tp1-minikube.md: -------------------------------------------------------------------------------- 1 | ## Lab 1 - minikube setup 2 | 3 | Goal: Start a `kubernetes` cluster on the VM. 4 | 5 | ### minikube startup 6 | 7 | - Launch minikube with the following command: 8 | 9 | ```shell 10 | minikube start --kubernetes-version v1.30.0 --nodes 3 11 | ``` 12 | 13 | - Use the following command to check that minikube is running. 14 | 15 | ```shell 16 | minikube status 17 | ``` 18 | 19 | ### Download resources 20 | 21 | Download the lab from the [zenika's github](https://github.com/Zenika/trainingclazz-k8s/tree/refonte). 22 | 23 | ```shell 24 | mkdir -p ~/workspace 25 | cd ~/workspace 26 | git clone -b refonte https://github.com/Zenika/trainingclazz-k8s 27 | cd ~/workspace/trainingclazz-k8s/resources 28 | ``` 29 | 30 | ℹ️ You can use this repo anytime outside this lab ;-) 31 | -------------------------------------------------------------------------------- /tp/tp5-ingress.md: -------------------------------------------------------------------------------- 1 | # Lab 5 - Ingress 2 | 3 | In the previous exercise, you have exposed the *whoami* application internally using a service of type ClusterIP. 4 | Now you want to go public and open your application to external clients. One way to do this is by using a Kubernetes Ingress. 5 | 6 | Ingress exposes HTTP and HTTPS routes from outside the cluster to services within the cluster. 7 | To be able to create Ingress objects in your cluster, you need to first install an *Ingress Controller*. 8 | You will use Traefik, a powerful Ingress Controller and reverse proxy (https://containo.us/traefik/) 9 | 10 | ## Ingress Controller 11 | 12 | - Activate the `ingress` addon: 13 | 14 | ```shell 15 | minikube addons enable ingress 16 | ``` 17 | 18 | - Wait for the controller to be ready by running the following command: 19 | 20 | ```shell 21 | kubectl wait --namespace ingress-nginx\ 22 | --for=condition=ready pod \ 23 | --selector=app.kubernetes.io/component=controller\ 24 | --timeout=90s 25 | ``` 26 | 27 | - Display all objects created in the `ingress-nginx` Namespace with the following command: 28 | 29 | ```shell 30 | kubectl get pod,svc,deploy,rs,job -n ingress-nginx 31 | ``` 32 | 33 | - Expose the Ingress Controller: 34 | 35 | ```shell 36 | docker container run --name expose-ingress-controller --detach --network minikube --publish 80:80 alpine/socat tcp-listen:80,fork,reuseaddr tcp-connect:minikube:80 37 | ``` 38 | 39 | ## Ingress whoami 40 | 41 | - Edit the file `tp5-whoami-ingress.yml` in order to replace `FIXME` with the public IP of yourmachine: `${PUBLIC_IP}` 42 | - Expose the `whoami` Service by an _Ingress_ which will have to respond on the url `whoami.FIXME.sslip.io` 43 | 44 | ```shell 45 | kubectl apply -f tp5-whoami-ingress.yml 46 | ``` 47 | 48 | - Check that the _Ingress_ is correctly configured with `kubectl get ingress` 49 | - Test the url `http://whoami.FIXME.sslip.io/`, check by refreshing the page that you arrive alternately on the different Pods of the Service (see `Hostname`) 50 | 51 | - Observe the logs of the Pod `ingress-nginx-controller-...` of the Namespace `ingress-nginx` 52 | 53 | ## Conclusion 54 | 55 | Congratulations ! you have successfully deployed your application on Kubernetes and made it public ! 56 | 57 | Enjoy ! 58 | -------------------------------------------------------------------------------- /tp/tp2-kubectl.md: -------------------------------------------------------------------------------- 1 | ## Lab 2 - kubectl run 2 | 3 | Goal: 4 | - create a Pod without a descriptor 5 | - troubleshoot a Pod's start 6 | 7 | ### Getting started 8 | 9 | - Display the help for the `kubectl run` command: 10 | ```shell 11 | kubectl run --help 12 | ``` 13 | 14 | ### Start Pod 15 | 16 | - Launch the following command to start a Pod 17 | ```shell 18 | kubectl run whoami --image=traefik/whoami:v1.10 --port=80 19 | ``` 20 | 21 | It will: 22 | - start a `whoami` Pod 23 | - from the `traefik/whoami:v1.10` image 24 | - and reference the `80` port 25 | - Check the Pod start with `watch kubectl get po` (__ctrl+c__ to exit the __watch__ loop) 26 | - What can you see? 27 | - Display more details on the Pod with `kubectl describe` and get its IP address which you will need later 28 | - Check that the application is working with the following command: 29 | ```shell 30 | # We have to enter in a node of the cluster 31 | minikube ssh -- curl :80/api 32 | ``` 33 | 34 | ### Troubleshoot a Pod startup 35 | 36 | - Launch the following command to start a new Pod 37 | ```shell 38 | kubectl run faulty-whoami --image=traefik/whoami:nil --port=80 39 | ``` 40 | 41 | It will: 42 | - start a __Pod__ (we will cover the Pod's detail later) 43 | - with the name `faulty-whoami` 44 | - from the image `traefik/whoami:nil` 45 | - and reference the `80` port 46 | - Check the Pod's startup with `watch kubectl get po` (__ctrl+c__ to exit the __watch__ loop) 47 | - Check that the Pod fails to start 48 | - Display more info on the Pod and check the `Events` with `kubectl describe` to determine the error cause 49 | - Delete this __Pod__ with `kubectl delete pod faulty-whoami` 50 | 51 | ### Start a Pod shell 52 | 53 | - Launch the following command to start a new Pod 54 | ```shell 55 | kubectl run training-shell --image=zenika/k8s-training-tools:v5 --command -- sleep infinity 56 | ``` 57 | It will: 58 | - start a `training-shell` Pod 59 | - from the image `zenika/k8s-training-tools:v5` 60 | - whose main process will be `sleep infinity` 61 | - Check the Pod's startup 62 | - List all created Pods 63 | - Launch `kubectl exec training-shell -- curl -s :80/api` 64 | - Display the help for `kubectl exec` 65 | 66 | ### k9s 67 | 68 | [`K9s`](https://k9scli.io/) is a TUI ("Text User Interface") used to browse Kubernetes resources without `kubectl` commands. 69 | 70 | - Launch `k9s` to find the created __Pod__ and browse the interface. The keyboard shortcuts are displayed on top of the screen. Use `CTRL+C` to exit. 71 | -------------------------------------------------------------------------------- /tp/tp4-service.md: -------------------------------------------------------------------------------- 1 | # Lab 4 - Service and test 2 | 3 | A Kubernetes *Service* exposes an application to its consumers, whether internal or external. 4 | 5 | ## whoami service 6 | 7 | The *whoami* described in the file `tp4-whoami-svc.yml` handles the pods of the *whoami* deployment using their labels *app: whoami* 8 | (see the selector entry). 9 | 10 | Create the *whoami* service from the file `tp4-whoami-svc.yml`: 11 | ```shell script 12 | kubectl apply -f tp4-whoami-svc.yml 13 | ``` 14 | 15 | List the available services, and make sure the *whoami* has been created: 16 | ```shell script 17 | kubectl get svc 18 | ``` 19 | 20 | Inspect the service whoami: 21 | ```shell script 22 | kubectl describe svc whoami 23 | ``` 24 | 25 | Notice the *Endpoints* section. It contains the IPs and ports of the service's destination pods. 26 | 27 | ## Scale the deployment and check the service's Endpoints 28 | Scale your deployment to 2 replicas: 29 | ```shell script 30 | kubectl scale deploy whoami --replicas=2 31 | ``` 32 | 33 | Check the Endpoints section of the Service: 34 | ```shell script 35 | kubectl describe svc whoami 36 | ``` 37 | 38 | Scale up to 3 replicas: 39 | ```shell script 40 | kubectl scale deploy whoami --replicas=3 41 | ``` 42 | 43 | And check the Endpoints section again: 44 | ```shell 45 | kubectl describe svc whoami 46 | ``` 47 | What do you observe ? 48 | 49 | ## Request the whoami service from within the gateway pod 50 | 51 | Create the gateway Pod described in the file `tp4-gateway-pod.yml`: 52 | ```shell script 53 | kubectl apply -f tp4-gateway-pod.yml 54 | ``` 55 | 56 | Check that the gateway pod has been created and wait until it is in a *Running* state: 57 | ```shell script 58 | kubectl get pods 59 | ``` 60 | 61 | Connect to the gateway pod using: 62 | ```shell script 63 | kubectl exec -it gateway -- bash 64 | ``` 65 | 66 | From within the gateway pod, request the whoami using the curl client: 67 | ```shell script 68 | curl whoami:8080 69 | ``` 70 | 71 | Notice that the service has been reached using its DNS name *whoami*, using the right port. 72 | You don't need to bother to find the pod IPs behind this service. It's done automatically for us. 73 | 74 | Execute the above command several times. You'll see different responses from different pods if you do have many replicas in your deployment. 75 | Exit the container with `exit`. 76 | 77 | You can check the DNS response directly: 78 | 79 | ```shell 80 | kubectl exec gateway -- nslookup whoami 81 | kubectl exec gateway -- nslookup whoami.default 82 | kubectl exec gateway -- nslookup whoami.default.svc 83 | kubectl exec gateway -- nslookup whoami.default.svc.cluster.local 84 | ``` 85 | 86 | ## Conclusion 87 | 88 | Kubernetes services expose your application in a discoverable and persistent way. 89 | They load-balance the requests automatically on the different pods present in the Endpoints list of the service. 90 | The Endpoints list is updated dynamically and transparently by Kubernetes. 91 | 92 | In this exercise you have seen the Kubernetes default type of services which is called ClusterIP. 93 | There are other types of services in Kubernetes: [NodePort, LoadBalancer...] (https://kubernetes.io/fr/docs/concepts/services-networking/service/). 94 | Apart from some subtleties, regarding external exposition for example, all the services share the same concepts as seen here. 95 | 96 | ## Bonus 97 | If you have some time left at the end of this session, try to figure out what is behind the *kubernetes* service which appears when you list the services. 98 | -------------------------------------------------------------------------------- /tp/tp3-deployment.md: -------------------------------------------------------------------------------- 1 | # Lab 3 - Deployment 2 | 3 | [Deployments](https://kubernetes.io/fr/docs/concepts/workloads/controllers/deployment/) allow you to manage [Pods](https://kubernetes.io/fr/docs/concepts/workloads/pods/pod/). With this resource, you'll be able to update and scale quickly and easily your applications. 4 | 5 | From now on, we'll be interacting with the cluster thanks to the [`kubectl` command](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands). 6 | 7 | ## Deploy the application 8 | 9 | In this lab, we're going to deploy the [whoami](https://github.com/containous/whoami) application. It's a tiny webserver that prints out information about the node it's deployed on. 10 | 11 | To do that, we're going to *apply* the yaml descriptor [tp2-deploy-whoami.yaml](./tp2-deploy-whoami.yaml) : 12 | ```bash 13 | kubectl apply -f tp3-deploy-whoami.yaml 14 | ``` 15 | 16 | As you can see in the yaml, this descriptor will create a deployment called **whoami**. This deployment is going to control **3 pod replicas** based on the whoami docker image. 17 | 18 | ## Check 19 | 20 | * Check that the deployment was successfully applied 21 | ```bash 22 | kubectl rollout status deploy whoami 23 | # It should print the following 24 | deployment "whoami" successfully rolled out 25 | ``` 26 | 27 | * List all your deployments 28 | ```bash 29 | kubectl get deploy 30 | # It should print something similar to below 31 | NAME READY UP-TO-DATE AVAILABLE AGE 32 | whoami 3/3 3 3 5m4s 33 | ``` 34 | Here you can see your `whoami` deployment. You can also see the number of replicas currently running. 35 | 36 | * Get the details of that deployments 37 | ```bash 38 | kubectl describe deploy whoami 39 | ``` 40 | This commands allows to get more info about your deployment, especially the number of replicas and the pod template. 41 | 42 | * List the pods 43 | ```bash 44 | kubectl get po 45 | # It should print something similar to below 46 | NAME READY STATUS RESTARTS AGE 47 | pod/whoami-66688d8f77-7cg4r 1/1 Running 0 14m 48 | pod/whoami-66688d8f77-mh946 1/1 Running 0 14m 49 | pod/whoami-66688d8f77-w9pxj 1/1 Running 0 14m 50 | ``` 51 | Here you can see that 3 pods were created as requested. 52 | 53 | * Now what happens if we delete a pod ? Lets try it. 54 | 55 | 1. Pick a pod from the previous command and delete it 56 | ```bash 57 | kubectl delete po whoami-66688d8f77-w9pxj 58 | ``` 59 | 2. Now let's print the pods again 60 | ```bash 61 | kubectl get po 62 | # It should print something similar to below 63 | NAME READY STATUS RESTARTS AGE 64 | whoami-66688d8f77-7cg4r 1/1 Running 0 21m 65 | whoami-66688d8f77-dbs5j 1/1 Running 0 8s 66 | whoami-66688d8f77-mh946 1/1 Running 0 21m 67 | ``` 68 | As you can see, you still have 3 replicas. Also note that the age is different on one pod. That's the one that the deployment created automatically when you deleted a pod. 69 | Why ? Because in your deployment descriptor, you requested 3 replicas. So whatever happens, the kubernetes controller manager will do everything to maintain that number. 70 | 71 | * Change the number of replicas : 72 | ```bash 73 | # Lets increase the number of replicas 74 | kubectl scale deploy whoami --replicas=5 75 | # Get the deployment again 76 | kubectl get deploy whoami 77 | # It should print something like below 78 | NAME READY UP-TO-DATE AVAILABLE AGE 79 | whoami 5/5 5 5 28m 80 | ``` 81 | As you can see, there is now 5 replicas attached to your deployment. 82 | 83 | Now go ahead and print all the pods ! 84 | -------------------------------------------------------------------------------- /strigo/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "--------- Start ubuntu-20.04-apt-lock-fix.sh" 4 | 5 | #!/bin/bash 6 | 7 | echo 'DPkg::Lock::Timeout "900";' > /etc/apt/apt.conf.d/99-dpkg-timeout 8 | 9 | apt-get remove -y unattended-upgrades 10 | 11 | wget -q -O - https://dl.google.com/linux/linux_signing_key.pub | apt-key add - 12 | 13 | echo "--------- End ubuntu-20.04-apt-lock-fix.sh" 14 | 15 | echo "--------- Start strigo.sh" 16 | 17 | #!/bin/bash 18 | 19 | hostnamectl set-hostname '{{ .STRIGO_RESOURCE_NAME }}' 20 | sed --in-place "0,/^127.0.0.1/s/$/ $(hostnamectl status --static) $(hostnamectl status --static).zenika.labs.strigo.io/" /etc/hosts 21 | 22 | apt-get update 23 | apt-get install -y cloud-guest-utils 24 | 25 | cat <<\EOF > /etc/profile.d/00_strigo_context.sh 26 | export INSTANCE_NAME='{{ .STRIGO_RESOURCE_NAME }}' 27 | export PUBLIC_DNS={{ .STRIGO_RESOURCE_DNS }} 28 | export PUBLIC_IP=$(ec2metadata --public-ipv4) 29 | export PRIVATE_DNS={{ .STRIGO_RESOURCE_DNS }} 30 | export PRIVATE_IP=$(ec2metadata --local-ipv4) 31 | export HOSTNAME='{{ .STRIGO_RESOURCE_NAME }}' 32 | EOF 33 | . /etc/profile.d/00_strigo_context.sh 34 | 35 | echo "--------- End strigo.sh" 36 | 37 | echo "--------- Start docker.sh" 38 | 39 | #!/bin/bash 40 | 41 | dpkg --purge docker docker-engine docker.io containerd runc 42 | apt-get autoremove --purge -y 43 | apt-get update 44 | apt-get install -y \ 45 | jq \ 46 | apt-transport-https \ 47 | ca-certificates \ 48 | curl \ 49 | gnupg \ 50 | lsb-release 51 | 52 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg 53 | echo \ 54 | "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ 55 | $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list 56 | 57 | apt-get update 58 | apt-get install -y docker-ce docker-ce-cli containerd.io 59 | 60 | docker_base_settings='{ 61 | "log-driver": "json-file", 62 | "log-opts": { 63 | "max-size": "5k", 64 | "max-file": "3", 65 | "compress": "true" 66 | } 67 | }' 68 | echo "${docker_base_settings}" "${docker_extra_settings:-{\}}" | jq --slurp '.[0] * .[1]' > /etc/docker/daemon.json 69 | systemctl restart docker 70 | 71 | usermod -aG docker ubuntu 72 | loginctl terminate-user ubuntu 73 | 74 | echo "--------- End docker.sh" 75 | 76 | echo "--------- Start tls-certificate.sh" 77 | ZEROSSL_EAB_KID="d3GmAeDzGx3v9UI3ZUgOlQ" 78 | ZEROSSL_EAB_HMAC_KEY="Nn90w7WR3RUjsrdlB0IJnes94SWhcuAyvndk0YPczKw7dJDS4esHz42Ng4tM4kEDQzaBzccNLyKqc4WD9xb_EQ" 79 | #!/bin/bash 80 | 81 | apt-get update 82 | apt-get install -y --no-install-recommends certbot acl 83 | 84 | if [[ -n "${ZEROSSL_EAB_KID}" && -n "${ZEROSSL_EAB_HMAC_KEY}" ]]; then 85 | # https://github.com/zerossl/zerossl-bot/blob/master/zerossl-bot.sh 86 | ZEROSSL_OPTS="--eab-kid ${ZEROSSL_EAB_KID} --eab-hmac-key ${ZEROSSL_EAB_HMAC_KEY} --server https://acme.zerossl.com/v2/DV90" 87 | fi 88 | certbot certonly --non-interactive --agree-tos --register-unsafely-without-email \ 89 | ${ZEROSSL_OPTS} \ 90 | --standalone --cert-name labs.strigo.io --domain '{{ .STRIGO_RESOURCE_DNS }}' || true 91 | 92 | if [ ! -f /etc/letsencrypt/live/labs.strigo.io/privkey.pem ]; then 93 | # Fallback to self-signed certificate 94 | mkdir -p /etc/letsencrypt/{live,archive}/labs.strigo.io/ 95 | openssl req -newkey rsa:2048 -x509 -days 7 -nodes \ 96 | -subj "/CN={{ .STRIGO_RESOURCE_DNS }}" \ 97 | -addext "subjectAltName=DNS:{{ .STRIGO_RESOURCE_DNS }},IP:${PUBLIC_IP:-127.0.0.1}" \ 98 | -addext "keyUsage=critical,digitalSignature,keyEncipherment" \ 99 | -addext "extendedKeyUsage=serverAuth,clientAuth" \ 100 | -addext "certificatePolicies=2.23.140.1.2.1" \ 101 | -keyout /etc/letsencrypt/archive/labs.strigo.io/privkey.pem \ 102 | -out /etc/letsencrypt/archive/labs.strigo.io/cert.pem 103 | cp /etc/letsencrypt/archive/labs.strigo.io/cert.pem /etc/letsencrypt/archive/labs.strigo.io/chain.pem 104 | cp /etc/letsencrypt/archive/labs.strigo.io/cert.pem /etc/letsencrypt/archive/labs.strigo.io/fullchain.pem 105 | ln -s /etc/letsencrypt/archive/labs.strigo.io/* /etc/letsencrypt/live/labs.strigo.io/ 106 | fi 107 | 108 | setfacl --modify user:ubuntu:rX /etc/letsencrypt/{live,archive} 109 | setfacl --modify user:ubuntu:rX /etc/letsencrypt/archive/labs.strigo.io/privkey*.pem 110 | 111 | cat <<\EOF > /etc/profile.d/tls_certificate.sh 112 | export TLS_PRIVKEY=/etc/letsencrypt/live/labs.strigo.io/privkey.pem 113 | export TLS_CERT=/etc/letsencrypt/live/labs.strigo.io/cert.pem 114 | export TLS_CHAIN=/etc/letsencrypt/live/labs.strigo.io/chain.pem 115 | export TLS_FULLCHAIN=/etc/letsencrypt/live/labs.strigo.io/fullchain.pem 116 | EOF 117 | . /etc/profile.d/tls_certificate.sh 118 | 119 | echo "--------- End tls-certificate.sh" 120 | 121 | echo "--------- Start code-server.sh" 122 | code_server_settings="{\"telemetry.telemetryLevel\": \"off\", \"files.exclude\": { \"**/.*\": true }, \"vs-kubernetes\": { \"disable-linters\": [ \"resource-limits\" ] }, \"yaml.schemas\": { \"kubernetes\": [\"*.yml\"] }}" 123 | code_server_extensions="ms-kubernetes-tools.vscode-kubernetes-tools oderwat.indent-rainbow tomoki1207.pdf" 124 | code_server_tls_key_path="${TLS_PRIVKEY}" 125 | code_server_tls_cert_path="${TLS_FULLCHAIN}" 126 | #!/bin/bash 127 | 128 | set -e 129 | 130 | apt-get update 131 | apt-get install -y curl jq 132 | last_code_server_release=$(curl -sL -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/coder/code-server/releases/latest | grep -Po '/code-server/releases/tag/v\K[^"]*') 133 | code_server_version=${code_server_version:-${last_code_server_release}} 134 | curl -fsSLo /tmp/code-server.deb "https://github.com/coder/code-server/releases/download/v${code_server_version}/code-server_${code_server_version}_amd64.deb" 135 | apt-get install -y /tmp/code-server.deb 136 | 137 | mkdir --parent /home/ubuntu/.config/code-server/ 138 | cat << EOF > /home/ubuntu/.config/code-server/config.yaml 139 | bind-addr: {{ .STRIGO_RESOURCE_DNS }}:${code_server_port:-9999} 140 | auth: password 141 | password: '{{ .STRIGO_WORKSPACE_ID }}' 142 | disable-telemetry: true 143 | EOF 144 | if [ -n "${code_server_tls_cert_path}" ] && [ -n "${code_server_tls_key_path}" ]; then 145 | cat << EOF >> /home/ubuntu/.config/code-server/config.yaml 146 | cert: ${code_server_tls_cert_path} 147 | cert-key: ${code_server_tls_key_path} 148 | EOF 149 | elif [ -n "${code_server_tls_cert_path}" ] || [ -n "${code_server_tls_chain_path}" ]; then 150 | echo "One of TLS key or cert is missing, skipping TLS configuration" >&2 151 | fi 152 | chown -R ubuntu: /home/ubuntu/.config/ 153 | 154 | systemctl enable --now code-server@ubuntu 155 | 156 | cat <<\EOF > /etc/profile.d/code-server-terminal.sh 157 | if [ "$USER" = "ubuntu" ]; then 158 | echo -ne '\nCode-server ' 159 | grep '^password:' ~/.config/code-server/config.yaml 160 | echo 161 | fi 162 | EOF 163 | 164 | if [ "{{ .STRIGO_USER_EMAIL }}" = "{{ .STRIGO_EVENT_HOST_EMAIL }}" ]; then 165 | # defines the message about the code-server version 166 | if [ "${code_server_version}" = "${last_code_server_release}" ]; then 167 | code_server_version_message="The last release version of code server is installed (${last_code_server_release})" 168 | else 169 | code_server_version_message="!!! Version ${code_server_version} of code server is installed, but a newer release exists (${last_code_server_release})" 170 | fi 171 | 172 | cat << EOF >> /etc/profile.d/code-server-terminal.sh 173 | if [ "\$USER" = "ubuntu" ]; then 174 | echo -e "${code_server_version_message}. Trainees do not see this message." 175 | fi 176 | EOF 177 | fi 178 | 179 | if [[ ${code_server_extensions} && ${code_server_extensions-_} ]]; then 180 | code_server_extensions_array=($code_server_extensions) 181 | for code_server_extension in ${code_server_extensions_array[@]}; do 182 | sudo -iu ubuntu code-server --install-extension ${code_server_extension} 183 | 184 | if [ "${code_server_extension}" = "coenraads.bracket-pair-colorizer-2" ]; then 185 | # fixes bracket colorization (https://github.com/coder/code-server/issues/544#issuecomment-776139127) until code-server 3.11? 186 | sudo ln -s /usr/lib/code-server/lib/vscode/node_modules /usr/lib/code-server/lib/vscode/node_modules.asar 187 | fi 188 | done 189 | fi 190 | 191 | code_server_base_settings='{ 192 | "workbench.startupEditor": "none", 193 | "security.workspace.trust.enabled": false, 194 | "telemetry.enableTelemetry": false, 195 | "telemetry.telemetryLevel": "off", 196 | "files.exclude": { 197 | "**/.*": { "when": ".bashrc" } 198 | } 199 | }' 200 | mkdir --parent /home/ubuntu/.local/share/code-server/User/ 201 | echo "${code_server_base_settings}" "${code_server_settings:-{\}}" | jq --slurp '.[0] * .[1]' > /home/ubuntu/.local/share/code-server/User/settings.json 202 | chown -R ubuntu: /home/ubuntu/.local/ 203 | 204 | loginctl terminate-user ubuntu 205 | 206 | echo "--------- End code-server.sh" 207 | 208 | echo "--------- Start http-server.sh" 209 | 210 | #!/usr/bin/env sh 211 | 212 | apt-get update 213 | apt-get install -y --no-install-recommends python3 214 | 215 | sudo -u ubuntu mkdir --parent ~ubuntu/public/ 216 | 217 | cat << EOF > /lib/systemd/system/http-server@.service 218 | [Unit] 219 | Description=Simple static public web server 220 | After=network.target 221 | 222 | [Service] 223 | Type=exec 224 | ExecStart=/usr/bin/python3 -m http.server ${http_server_port:-9997} --directory /home/%i/public/ 225 | Restart=always 226 | User=%i 227 | 228 | [Install] 229 | WantedBy=default.target 230 | EOF 231 | systemctl enable --now http-server@ubuntu 232 | 233 | echo "--------- End http-server.sh" 234 | 235 | echo "--------- Start materials-helper.sh" 236 | 237 | #!/bin/bash 238 | 239 | apt-get update 240 | apt-get install -y --no-install-recommends unzip python3 python3-pip 241 | pip3 install gdown==5.1.0 242 | 243 | GDOWN_RETRY_MAX=5 244 | 245 | function gdown { 246 | local retries=${GDOWN_RETRY_MAX} 247 | until [ ${retries} -lt 1 ]; do 248 | if command gdown $@; then 249 | break 250 | fi 251 | retries=$(( retries - 1 )) 252 | echo "gdown failed, retrying in 1 seconds..." >&2 253 | sleep 1 254 | done 255 | if [ ${retries} -lt 1 ]; then 256 | echo "gdown failed ${GDOWN_RETRY_MAX} times, giving up." >&2 257 | return 1 258 | fi 259 | } 260 | 261 | echo "--------- End materials-helper.sh" 262 | 263 | echo "--------- Start Installation/strigo/init_kubernetes.sh" 264 | #!/bin/bash 265 | 266 | apt-get update 267 | apt-get install -y \ 268 | curl \ 269 | socat \ 270 | software-properties-common \ 271 | zip \ 272 | jq \ 273 | conntrack \ 274 | tree 275 | 276 | apt-get remove -y command-not-found 277 | 278 | mkdir -p /usr/local/bin/ 279 | 280 | curl -fsSLo /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 \ 281 | && chmod +x /usr/local/bin/yq 282 | 283 | KUBERNETES_VERSION=1.30.0 284 | curl -fsSLo /usr/local/bin/kubectl https://dl.k8s.io/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl \ 285 | && chmod +x /usr/local/bin/kubectl 286 | kubectl completion bash > /etc/bash_completion.d/kubectl 287 | kubectl version --client 288 | 289 | curl -fsSLo /usr/local/bin/minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ 290 | && chmod +x /usr/local/bin/minikube 291 | minikube completion bash > /etc/bash_completion.d/minikube 292 | minikube version 293 | 294 | cat < /etc/profile.d/minikube.sh 295 | export MINIKUBE_DRIVER=docker 296 | export MINIKUBE_WANTUPDATENOTIFICATION=false 297 | export MINIKUBE_WANTREPORTERRORPROMPT=false 298 | export MINIKUBE_IN_STYLE=true 299 | export MINIKUBE_KUBERNETES_VERSION=${KUBERNETES_VERSION} 300 | 301 | alias k='kubectl' 302 | EOF 303 | echo 'complete -o default -F __start_kubectl k' >> /etc/bash_completion.d/minikube 304 | 305 | curl -fsSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash 306 | helm completion bash > /etc/bash_completion.d/helm 307 | helm version 308 | 309 | ( 310 | set -x; cd "$(mktemp -d)" && 311 | OS="$(uname | tr '[:upper:]' '[:lower:]')" && 312 | ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" && 313 | KREW="krew-${OS}_${ARCH}" && 314 | curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" && 315 | tar zxvf "${KREW}.tar.gz" && 316 | mv "$KREW" /usr/local/bin/kubectl-krew 317 | rm -f "${KREW}.tar.gz" 318 | ) 319 | echo 'export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"' >> /home/ubuntu/.bashrc 320 | 321 | sudo -u ubuntu kubectl-krew install ctx ns 322 | cat <> /home/ubuntu/.bashrc 323 | alias kubens='kubectl ns' 324 | alias kubectx='kubectl ctx' 325 | alias k=kubectl 326 | EOF 327 | 328 | curl -o /tmp/k9s.tar.gz -L https://github.com/derailed/k9s/releases/latest/download/k9s_Linux_amd64.tar.gz 329 | tar -xvzf /tmp/k9s.tar.gz -C /tmp/ 330 | mv /tmp/k9s /usr/local/bin/ 331 | 332 | apt-get install -y source-highlight 333 | echo "export LESSOPEN='|/usr/share/source-highlight/src-hilite-lesspipe.sh %s' 334 | export LESS='RN'" >> /home/ubuntu/.bashrc 335 | echo 'yaml = yaml.lang 336 | yml = yaml.lang' >> /usr/share/source-highlight/lang.map 337 | curl -fsSLo /usr/share/source-highlight/yaml.lang https://git.savannah.gnu.org/cgit/src-highlite.git/plain/src/yaml.lang 338 | 339 | pip install yamllint 340 | echo 'export PATH="/home/ubuntu/.local/bin:$PATH"' >> /home/ubuntu/.bashrc 341 | mkdir -p /home/ubuntu/.config/yamllint 342 | cat <> /home/ubuntu/.config/yamllint/config 343 | extends: relaxed 344 | 345 | rules: 346 | line-length: disable 347 | EOF 348 | 349 | sed --in-place 's/^#force_color_prompt=yes/force_color_prompt=yes/' /home/ubuntu/.bashrc 350 | curl -fsSLo /etc/bash_completion.d/kube-ps1 https://github.com/jonmosco/kube-ps1/raw/master/kube-ps1.sh 351 | echo 'PS1='\''${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\] $(kube_ps1)\$ '\''' >> /home/ubuntu/.bashrc 352 | 353 | git clone https://github.com/Yggdroot/indentLine.git /home/ubuntu/.vim/pack/vendor/start/indentLine 354 | sudo -u ubuntu -- vim -u NONE -c "helptags ~/.vim/pack/vendor/start/indentLine/doc" -c "q" 355 | 356 | cat <> /home/ubuntu/.vimrc 357 | set paste 358 | set background=dark 359 | set number 360 | set listchars=tab:!·,trail:· 361 | set invlist 362 | highlight CursorLine cterm=NONE ctermbg=darkblue ctermfg=white 363 | set cursorline 364 | set expandtab 365 | set sw=2 366 | set ts=2 367 | set scrolloff=5 368 | set sts=2 369 | let g:indentLine_char = '⦙' 370 | EOF 371 | cat <> /home/ubuntu/.bashrc 372 | alias pres='vim -c "setl filetype=yaml" -' 373 | EOF 374 | 375 | chown -R ubuntu:ubuntu /home/ubuntu/.vimrc /home/ubuntu/.config /home/ubuntu/.vim 376 | 377 | killall -9 /home/ubuntu/.strigo/tmux 378 | rm -f /home/ubuntu/tmux-* 379 | 380 | echo "--------- End Installation/strigo/init_kubernetes.sh" 381 | --------------------------------------------------------------------------------