├── lab01 ├── kubek │ ├── public │ │ └── test.txt │ ├── server │ │ ├── handler.js │ │ ├── router.js │ │ └── index.js │ └── Dockerfile ├── kubek-pod.yaml └── lab01.txt ├── lab03 ├── kubek-ns.yaml ├── kubek-pod-namespaced.yaml └── lab03.txt ├── lab18 ├── ebs-storageclass.yaml └── lab18.txt ├── lab15 ├── fortune-config.yaml ├── kubek-pod-cm-volume.yaml ├── configmaps │ └── public │ │ ├── lorem.txt │ │ └── ipsum.txt ├── fortune-kubek-pod-cm.yaml └── lab15.txt ├── lab08 ├── kubek-svc.yaml ├── kubek-svc-session-affinity.yaml ├── kubek-svc-multiple-ports.yaml ├── kubek-svc-named-ports.yaml ├── kubek-pod-named-ports.yaml ├── kubetools │ └── Dockerfile └── lab08.txt ├── lab16 ├── kubek-secret.yaml ├── kubek-pod-secret.yaml └── lab16.txt ├── lab09 ├── kubek-svc.yaml └── lab09.txt ├── lab02 ├── kubek-pod-gpu.yaml ├── kubek-pod-labeled.yaml └── lab02.txt ├── lab13 ├── mongodb-pvc.yaml ├── mongodb-pv-hostpath.yaml ├── mongodb-pv-gcepd.yaml ├── mongodb-pv-aws.yaml ├── mongodb-pod-pvc.yaml └── lab13.txt ├── lab11 ├── fortune │ ├── Dockerfile │ └── fortuneloop.sh ├── fortune-kubek-pod.yaml └── lab11.txt ├── lab17 ├── kubek-deployment-v2.yaml ├── kubek-deployment-svc-v1.yaml ├── kubek-deployment-v3-readinesscheck.yaml └── lab17.txt ├── lab04 ├── kubek-pod-liveness.yaml └── lab04.txt ├── lab19 ├── auth0-secret.yaml ├── home-scs.yaml ├── items-scs.yaml ├── scs.yaml ├── mongodb.yaml ├── account-scs.yaml └── lab19.txt ├── lab12 ├── mongodb-pod-hostpath.yaml ├── mongodb-pod-nfs.yaml ├── mongodb-pod-gcepd.yaml ├── mongodb-pod-aws.yaml └── lab12.txt ├── lab06 ├── kubek-rc.yaml └── lab06.txt ├── lab14 ├── fortune-kubek-pod-args.yaml ├── fortune-kubek-pod-env.yaml └── lab14.txt ├── lab07 ├── kubek-rs.yaml ├── kubek-rs-matchexpressions.yaml └── lab07.txt ├── lab10 ├── kubek-rc.yaml └── lab10.txt ├── LICENSE ├── lab05 └── lab05.txt └── README.md /lab01/kubek/public/test.txt: -------------------------------------------------------------------------------- 1 | This is a test file -------------------------------------------------------------------------------- /lab03/kubek-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: kubek-ns 5 | -------------------------------------------------------------------------------- /lab18/ebs-storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: ebs 5 | provisioner: docker.io/hostpath 6 | -------------------------------------------------------------------------------- /lab15/fortune-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: fortune-config 5 | data: 6 | sleep-interval: "25" 7 | fortune-length: "50" 8 | -------------------------------------------------------------------------------- /lab08/kubek-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubek 5 | spec: 6 | ports: 7 | - port: 8080 8 | targetPort: 8000 9 | selector: 10 | app: kubek 11 | -------------------------------------------------------------------------------- /lab01/kubek-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubek 5 | spec: 6 | containers: 7 | - image: jagin/kubek 8 | name: kubek 9 | ports: 10 | - containerPort: 8000 11 | -------------------------------------------------------------------------------- /lab16/kubek-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: kubek-secret 5 | stringData: 6 | user: myuser 7 | password: mypasswd 8 | # data: 9 | # user: bXl1c2Vy 10 | # password: bXlwYXNzd2Q= -------------------------------------------------------------------------------- /lab08/kubek-svc-session-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubek 5 | spec: 6 | sessionAffinity: ClientIP 7 | ports: 8 | - port: 8080 9 | targetPort: 8000 10 | selector: 11 | app: kubek 12 | -------------------------------------------------------------------------------- /lab09/kubek-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubek 5 | spec: 6 | type: NodePort 7 | ports: 8 | - port: 8080 9 | targetPort: 8000 10 | nodePort: 30123 11 | selector: 12 | app: kubek 13 | -------------------------------------------------------------------------------- /lab02/kubek-pod-gpu.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubek-gpu 5 | spec: 6 | nodeSelector: 7 | gpu: "true" 8 | containers: 9 | - image: jagin/kubek 10 | name: kubek 11 | ports: 12 | - containerPort: 8000 -------------------------------------------------------------------------------- /lab13/mongodb-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mongodb-pvc 5 | spec: 6 | storageClassName: "" 7 | resources: 8 | requests: 9 | storage: 1Gi 10 | accessModes: 11 | - ReadWriteOnce 12 | -------------------------------------------------------------------------------- /lab03/kubek-pod-namespaced.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubek-namespaced 5 | namespace: kubek-ns 6 | spec: 7 | containers: 8 | - image: jagin/kubek 9 | name: kubek 10 | ports: 11 | - containerPort: 8000 12 | -------------------------------------------------------------------------------- /lab01/kubek/server/handler.js: -------------------------------------------------------------------------------- 1 | exports.createHandler = function (method) { 2 | return new Handler(method) 3 | } 4 | 5 | Handler = function(method) { 6 | this.process = function(req, res) { 7 | return method.apply(this, [req, res]) 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /lab11/fortune/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | RUN apt-get update ; apt-get -y install fortune fortunes-off 4 | ADD fortuneloop.sh /bin/fortuneloop.sh 5 | RUN chmod +x /bin/fortuneloop.sh 6 | 7 | ENTRYPOINT ["/bin/fortuneloop.sh"] 8 | CMD ["-s"] 9 | 10 | -------------------------------------------------------------------------------- /lab02/kubek-pod-labeled.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubek-labeled 5 | labels: 6 | creation_method: manual 7 | env: dev 8 | spec: 9 | containers: 10 | - image: jagin/kubek 11 | name: kubek 12 | ports: 13 | - containerPort: 8000 14 | -------------------------------------------------------------------------------- /lab08/kubek-svc-multiple-ports.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubek 5 | spec: 6 | ports: 7 | - name: http 8 | port: 80 9 | targetPort: 8000 10 | - name: https 11 | port: 443 12 | targetPort: 8443 13 | selector: 14 | app: kubek 15 | -------------------------------------------------------------------------------- /lab08/kubek-svc-named-ports.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubek 5 | spec: 6 | ports: 7 | - name: http 8 | port: 80 9 | targetPort: http 10 | - name: https 11 | port: 443 12 | targetPort: https 13 | selector: 14 | app: kubek 15 | -------------------------------------------------------------------------------- /lab08/kubek-pod-named-ports.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubek 5 | spec: 6 | containers: 7 | - image: jagin/kubek 8 | name: kubek 9 | ports: 10 | - name: http 11 | containerPort: 8000 12 | - name: https 13 | containerPort: 8443 14 | -------------------------------------------------------------------------------- /lab13/mongodb-pv-hostpath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mongodb-pv 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | - ReadOnlyMany 11 | persistentVolumeReclaimPolicy: Retain 12 | hostPath: 13 | path: /tmp/mongodb 14 | -------------------------------------------------------------------------------- /lab17/kubek-deployment-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: kubek 5 | spec: 6 | template: 7 | metadata: 8 | name: kubek 9 | labels: 10 | app: kubek 11 | spec: 12 | containers: 13 | - image: jagin/kubek:v2 14 | name: kubek 15 | -------------------------------------------------------------------------------- /lab04/kubek-pod-liveness.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubek-liveness 5 | spec: 6 | containers: 7 | - image: jagin/kubek 8 | name: kubek 9 | livenessProbe: 10 | httpGet: 11 | path: /health-check 12 | port: 8000 13 | initialDelaySeconds: 15 14 | 15 | -------------------------------------------------------------------------------- /lab13/mongodb-pv-gcepd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mongodb-pv 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | - ReadOnlyMany 11 | persistentVolumeReclaimPolicy: Retain 12 | gcePersistentDisk: 13 | pdName: mongodb 14 | fsType: nfs4 15 | -------------------------------------------------------------------------------- /lab13/mongodb-pv-aws.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mongodb-pv 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | - ReadOnlyMany 11 | persistentVolumeReclaimPolicy: Retain 12 | awsElasticBlockStore: 13 | volumeId: my-volume 14 | fsType: ext4 15 | -------------------------------------------------------------------------------- /lab08/kubetools/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:trusty 2 | 3 | RUN apt-get update \ 4 | && apt-get install --yes --force-yes \ 5 | curl \ 6 | dnsutils \ 7 | httpie \ 8 | iputils-ping \ 9 | jq \ 10 | mongodb-clients \ 11 | mysql-client \ 12 | net-tools \ 13 | postgresql-client \ 14 | redis-tools \ 15 | telnet \ 16 | vim -------------------------------------------------------------------------------- /lab19/auth0-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: auth0-secret 5 | stringData: 6 | auth0-client-id: Rl1UDl5VgYwTOGZqAQRGZN4lIZEA0726 7 | auth0-client-secret: UvKVk0ibulu9_8-8R1D3GFhRVkYnFHznD4PMTpemOxhmxa8GZbSir9mbySiSjyev 8 | auth0-domain: jagin.eu.auth0.com 9 | auth0-cookie-password: secret_cookie_encryption_password 10 | -------------------------------------------------------------------------------- /lab12/mongodb-pod-hostpath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mongodb 5 | spec: 6 | volumes: 7 | - name: mongodb-data 8 | hostPath: 9 | path: /tmp/mongodb 10 | containers: 11 | - image: mongo 12 | name: mongodb 13 | volumeMounts: 14 | - name: mongodb-data 15 | mountPath: /data/db 16 | ports: 17 | - containerPort: 27017 18 | 19 | -------------------------------------------------------------------------------- /lab01/kubek/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:10 2 | 3 | # set working directory 4 | WORKDIR /app 5 | 6 | # set environment 7 | ENV HOST=0.0.0.0 8 | ENV PORT=8000 9 | 10 | # copy app sources for production 11 | COPY server server 12 | COPY public public 13 | 14 | # create healthy file indicator 15 | RUN touch /var/healthy 16 | 17 | # expose port and define ENTRYPOINT 18 | ENTRYPOINT ["node", "server"] 19 | 20 | -------------------------------------------------------------------------------- /lab13/mongodb-pod-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mongodb 5 | spec: 6 | containers: 7 | - image: mongo 8 | name: mongodb 9 | volumeMounts: 10 | - name: mongodb-data 11 | mountPath: /data/db 12 | ports: 13 | - containerPort: 27017 14 | volumes: 15 | - name: mongodb-data 16 | persistentVolumeClaim: 17 | claimName: mongodb-pvc 18 | 19 | -------------------------------------------------------------------------------- /lab15/kubek-pod-cm-volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubek 5 | spec: 6 | containers: 7 | - image: jagin/kubek 8 | name: kubek 9 | volumeMounts: 10 | - name: config 11 | mountPath: /app/public 12 | readOnly: true 13 | ports: 14 | - containerPort: 8000 15 | volumes: 16 | - name: config 17 | configMap: 18 | name: kubek-config 19 | -------------------------------------------------------------------------------- /lab12/mongodb-pod-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mongodb 5 | spec: 6 | volumes: 7 | - name: mongodb-data 8 | nfs: 9 | server: 1.2.3.4 10 | path: /some/path 11 | containers: 12 | - image: mongo 13 | name: mongodb 14 | volumeMounts: 15 | - name: mongodb-data 16 | mountPath: /data/db 17 | ports: 18 | - containerPort: 27017 19 | 20 | -------------------------------------------------------------------------------- /lab11/fortune/fortuneloop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | trap "exit" SIGINT 3 | 4 | INTERVAL=${INTERVAL:-10} 5 | 6 | echo Configured to generate new fortune every $INTERVAL seconds 7 | 8 | mkdir /var/fortune 9 | 10 | while : 11 | do 12 | echo $(date) Writing fortune to /var/fortune/fortune.txt 13 | /usr/games/fortune "$@" > /var/fortune/fortune.txt 14 | cat /var/fortune/fortune.txt 15 | sleep $INTERVAL 16 | done 17 | 18 | -------------------------------------------------------------------------------- /lab12/mongodb-pod-gcepd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mongodb 5 | spec: 6 | volumes: 7 | - name: mongodb-data 8 | gcePersistentDisk: 9 | pdName: mongodb 10 | fsType: nfs4 11 | containers: 12 | - image: mongo 13 | name: mongodb 14 | volumeMounts: 15 | - name: mongodb-data 16 | mountPath: /data/db 17 | ports: 18 | - containerPort: 27017 19 | 20 | -------------------------------------------------------------------------------- /lab12/mongodb-pod-aws.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mongodb 5 | spec: 6 | volumes: 7 | - name: mongodb-data 8 | awsElasticBlockStore: 9 | volumeId: my-volume 10 | fsType: ext4 11 | containers: 12 | - image: mongo 13 | name: mongodb 14 | volumeMounts: 15 | - name: mongodb-data 16 | mountPath: /data/db 17 | ports: 18 | - containerPort: 27017 19 | 20 | -------------------------------------------------------------------------------- /lab15/configmaps/public/lorem.txt: -------------------------------------------------------------------------------- 1 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. -------------------------------------------------------------------------------- /lab16/kubek-pod-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kubek 5 | spec: 6 | containers: 7 | - image: jagin/kubek 8 | env: 9 | - name: AUTH_USER 10 | valueFrom: 11 | secretKeyRef: 12 | name: kubek-secret 13 | key: user 14 | - name: AUTH_PASSWORD 15 | valueFrom: 16 | secretKeyRef: 17 | name: kubek-secret 18 | key: password 19 | name: kubek 20 | ports: 21 | - containerPort: 8000 22 | 23 | -------------------------------------------------------------------------------- /lab11/fortune-kubek-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune-kubek 5 | spec: 6 | containers: 7 | - image: jagin/fortune 8 | name: fortune 9 | volumeMounts: 10 | - name: public 11 | mountPath: /var/fortune 12 | - image: jagin/kubek 13 | name: kubek 14 | volumeMounts: 15 | - name: public 16 | mountPath: /app/public 17 | readOnly: true 18 | ports: 19 | - containerPort: 8000 20 | volumes: 21 | - name: public 22 | emptyDir: {} 23 | 24 | -------------------------------------------------------------------------------- /lab06/kubek-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kubek 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: kubek 9 | template: 10 | metadata: 11 | labels: 12 | app: kubek 13 | spec: 14 | containers: 15 | - name: kubek 16 | image: jagin/kubek 17 | ports: 18 | - containerPort: 8000 19 | livenessProbe: 20 | httpGet: 21 | path: /health-check 22 | port: 8000 23 | initialDelaySeconds: 5 24 | -------------------------------------------------------------------------------- /lab17/kubek-deployment-svc-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: kubek 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | name: kubek 10 | labels: 11 | app: kubek 12 | spec: 13 | containers: 14 | - image: jagin/kubek:v1 15 | name: kubek 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: kubek 21 | spec: 22 | type: NodePort 23 | ports: 24 | - port: 8080 25 | targetPort: 8000 26 | selector: 27 | app: kubek 28 | 29 | -------------------------------------------------------------------------------- /lab14/fortune-kubek-pod-args.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune-kubek 5 | spec: 6 | containers: 7 | - image: jagin/fortune 8 | args: ["-n", "50", "-o"] 9 | name: fortune 10 | volumeMounts: 11 | - name: public 12 | mountPath: /var/fortune 13 | - image: jagin/kubek 14 | name: kubek 15 | volumeMounts: 16 | - name: public 17 | mountPath: /app/public 18 | readOnly: true 19 | ports: 20 | - containerPort: 8000 21 | volumes: 22 | - name: public 23 | emptyDir: {} 24 | 25 | -------------------------------------------------------------------------------- /lab07/kubek-rs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: ReplicaSet 3 | metadata: 4 | name: kubek 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: kubek 10 | template: 11 | metadata: 12 | labels: 13 | app: kubek 14 | spec: 15 | containers: 16 | - name: kubek 17 | image: jagin/kubek 18 | ports: 19 | - containerPort: 8000 20 | livenessProbe: 21 | httpGet: 22 | path: /health-check 23 | port: 8000 24 | initialDelaySeconds: 5 25 | -------------------------------------------------------------------------------- /lab17/kubek-deployment-v3-readinesscheck.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: kubek 5 | spec: 6 | replicas: 3 7 | minReadySeconds: 15 8 | strategy: 9 | rollingUpdate: 10 | maxSurge: 1 11 | maxUnavailable: 0 12 | type: RollingUpdate 13 | template: 14 | metadata: 15 | name: kubek 16 | labels: 17 | app: kubek 18 | spec: 19 | containers: 20 | - image: jagin/kubek:v3 21 | name: kubek 22 | readinessProbe: 23 | periodSeconds: 1 24 | httpGet: 25 | path: / 26 | port: 8000 27 | -------------------------------------------------------------------------------- /lab07/kubek-rs-matchexpressions.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: ReplicaSet 3 | metadata: 4 | name: kubek 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchExpressions: 9 | - key: app 10 | operator: In 11 | values: 12 | - kubek 13 | template: 14 | metadata: 15 | labels: 16 | app: kubek 17 | spec: 18 | containers: 19 | - name: kubek 20 | image: jagin/kubek 21 | ports: 22 | - containerPort: 8000 23 | livenessProbe: 24 | httpGet: 25 | path: /health-check 26 | port: 8000 27 | initialDelaySeconds: 5 28 | -------------------------------------------------------------------------------- /lab14/fortune-kubek-pod-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune-kubek 5 | spec: 6 | containers: 7 | - image: jagin/fortune 8 | env: 9 | - name: INTERVAL 10 | value: "5" 11 | args: ["-n", "50", "-o"] 12 | name: fortune 13 | volumeMounts: 14 | - name: public 15 | mountPath: /var/fortune 16 | - image: jagin/kubek 17 | name: kubek 18 | volumeMounts: 19 | - name: public 20 | mountPath: /app/public 21 | readOnly: true 22 | ports: 23 | - containerPort: 8000 24 | volumes: 25 | - name: public 26 | emptyDir: {} 27 | 28 | -------------------------------------------------------------------------------- /lab10/kubek-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kubek 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: kubek 9 | template: 10 | metadata: 11 | labels: 12 | app: kubek 13 | spec: 14 | containers: 15 | - name: kubek 16 | image: jagin/kubek 17 | ports: 18 | - containerPort: 8000 19 | livenessProbe: 20 | httpGet: 21 | path: /health-check 22 | port: 8000 23 | initialDelaySeconds: 5 24 | readinessProbe: 25 | exec: 26 | command: 27 | - ls 28 | - /var/ready -------------------------------------------------------------------------------- /lab11/lab11.txt: -------------------------------------------------------------------------------- 1 | Volume 2 | Lab 11: emptyDir 3 | ================ 4 | 5 | // emptyDir: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir 6 | 7 | // First we will build some content generator 8 | // See: http://manpages.ubuntu.com/manpages/cosmic/man6/fortune.6.html 9 | $ cd fortune 10 | $ cat fortuneloop.sh 11 | $ cat Dockerfile 12 | $ docker build -t {docker-repo}/fortune . 13 | $ docker push {docker-repo}/fortune 14 | 15 | $ cd .. 16 | 17 | $ cat fortune-kubek-pod.yaml 18 | $ kubectl create -f fortune-kubek-pod.yaml 19 | // - wait till pod is running 20 | $ kubectl port-forward fortune-kubek 8080:8000 21 | $ while true; do curl http://localhost:8080/fortune.txt; sleep 10; done -------------------------------------------------------------------------------- /lab04/lab04.txt: -------------------------------------------------------------------------------- 1 | Pods 2 | Lab 4: Liveness probe 3 | ===================== 4 | 5 | // Configure Liveness and Readiness Probes: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ 6 | 7 | // Creating liveness probe based on HTTP 8 | $ cat kubek-pod-liveness.yaml 9 | $ kubectl create -f kubek-pod-liveness.yaml 10 | 11 | // Check the health of the pod 12 | $ kubectl get po kubek-liveness 13 | $ kubectl logs kubek-liveness -f 14 | 15 | $ kubectl exec -it kubek-liveness bash 16 | # rm /var/healthy 17 | 18 | // When you want to figure out why the previous container terminated, you’ll want to 19 | // see those logs instead of the current container’s logs. This can be done by using 20 | // the --previous option 21 | $ kubectl logs kubek-liveness --previous 22 | $ kubectl describe po kubek-liveness -------------------------------------------------------------------------------- /lab15/configmaps/public/ipsum.txt: -------------------------------------------------------------------------------- 1 | Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? -------------------------------------------------------------------------------- /lab15/fortune-kubek-pod-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fortune-kubek 5 | spec: 6 | containers: 7 | - image: jagin/fortune 8 | env: 9 | - name: INTERVAL 10 | valueFrom: 11 | configMapKeyRef: 12 | name: fortune-config 13 | key: sleep-interval 14 | - name: FORTUNE_LENGTH 15 | valueFrom: 16 | configMapKeyRef: 17 | name: fortune-config 18 | key: fortune-length 19 | args: ["-n", "$(FORTUNE_LENGTH)", "-o"] 20 | name: fortune 21 | volumeMounts: 22 | - name: public 23 | mountPath: /var/fortune 24 | - image: jagin/kubek 25 | name: kubek 26 | volumeMounts: 27 | - name: public 28 | mountPath: /app/public 29 | readOnly: true 30 | ports: 31 | - containerPort: 8000 32 | volumes: 33 | - name: public 34 | emptyDir: {} 35 | 36 | -------------------------------------------------------------------------------- /lab19/home-scs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: home-scs 5 | labels: 6 | app: home-scs 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | name: home-scs 12 | labels: 13 | app: home-scs 14 | spec: 15 | containers: 16 | - image: jagin/home-scs 17 | env: 18 | - name: NODE_ENV 19 | value: development 20 | - name: JWT_KEY 21 | valueFrom: 22 | secretKeyRef: 23 | name: auth0-secret 24 | key: auth0-client-secret 25 | name: home-scs 26 | --- 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: home-scs 31 | labels: 32 | app: home-scs 33 | spec: 34 | type: ClusterIP 35 | ports: 36 | - port: 8080 37 | targetPort: 8000 38 | selector: 39 | app: home-scs 40 | 41 | -------------------------------------------------------------------------------- /lab19/items-scs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: items-scs 5 | labels: 6 | app: items-scs 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | name: items-scs 12 | labels: 13 | app: items-scs 14 | spec: 15 | containers: 16 | - image: jagin/items-scs 17 | env: 18 | - name: NODE_ENV 19 | value: development 20 | - name: JWT_KEY 21 | valueFrom: 22 | secretKeyRef: 23 | name: auth0-secret 24 | key: auth0-client-secret 25 | - name: DATABASE_URL 26 | value: mongodb://mongodb:27017 27 | - name: DATABASE_NAME 28 | value: items-db 29 | name: items-scs 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: items-scs 35 | labels: 36 | app: items-scs 37 | spec: 38 | type: ClusterIP 39 | ports: 40 | - port: 8080 41 | targetPort: 8000 42 | selector: 43 | app: items-scs 44 | 45 | -------------------------------------------------------------------------------- /lab07/lab07.txt: -------------------------------------------------------------------------------- 1 | ReplicaSet 2 | Lab 7: Creating a ReplicaSet 3 | ============================ 4 | 5 | // ReplicaSet: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ 6 | 7 | // Comparing a ReplicaSet to a ReplicationController 8 | // - another API version 9 | // - spec.selector.matchLabels 10 | $ diff ../lab06/kubek-rc.yaml kubek-rs.yaml 11 | 12 | // Creating and examining a ReplicaSet 13 | $ cat kubek-rs.yaml 14 | $ kubectl create -f kubek-rs.yaml 15 | $ kubectl describe replicaset 16 | // - they’re still the same three pods we had before. 17 | 18 | // Using the ReplicaSet’s more expressive label selectors 19 | // - spec.selector.matchExpressions 20 | $ cat kubek-rs-matchexpressions.yaml 21 | // - operators: In, NotIn, Exists, DoesNotExists 22 | // - if you specify multiple expressions, all those expressions must evaluate to true for the 23 | // selector to match a pod 24 | // See: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#resources-that-support-set-based-requirements 25 | 26 | // Wrapping up ReplicaSets 27 | $ kubectl delete rs kubek 28 | $ kubectl get pods -------------------------------------------------------------------------------- /lab09/lab09.txt: -------------------------------------------------------------------------------- 1 | Service 2 | Lab 9: Creating NodePort Service 3 | ================================ 4 | 5 | // Type NodePort: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport 6 | // Type LoadBalancer: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer 7 | 8 | // Creating NodePort service 9 | $ cat kubek-svc.yaml 10 | $ kubectl create -f kubek-svc.yaml 11 | // $ kubectl expose rc kubek --port=8080 --target-port=8000 --type=NodePort --name=kubek 12 | $ kubectl get svc kubek 13 | 14 | // The service is accessible through any node IP (in our case it is localhost). 15 | $ curl http://localhost:${kubek-svc-node-port} 16 | 17 | // The service is accessible through the proxy 18 | $ curl http://localhost:8001/api/v1/namespaces/default/services/http:kubek:/proxy/ 19 | 20 | // The service is also accessiple through the Cluster IP 21 | $ kubectl run kubetools --image=jagin/kubetools --rm -it --restart=Never --command -- curl -s http://{cluster-ip}:8080 22 | 23 | // In the case one node is out of service we have to use IP address of another node. 24 | // The solution is LoadBalancer. 25 | 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Jarosław Gilewski 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /lab10/lab10.txt: -------------------------------------------------------------------------------- 1 | Service 2 | Lab 10: Readiness probe 3 | ======================= 4 | 5 | // Configure Liveness and Readiness Probes: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ 6 | 7 | // Add the readiness probe to the current RC descriptor 8 | $ cat kubek-rc.yaml 9 | $ kubectl replace -f kubek-rc.yaml 10 | // - this time we use replace command to update RC kubek resource 11 | 12 | // or 13 | // $ kubectl edit rc kubia 14 | 15 | // Check existing pods and endpoints 16 | $ kubectl get po 17 | // - existing pods are ready because they are running based on the old template 18 | // - we need to remove them to have created the new one 19 | $ kubectl delete po --all 20 | $ kubectl get po 21 | $ kubectl get endpoints kubek 22 | // - there is no pod IP's in the ENDPOINTS column 23 | $ kubectl exec kubek-${pod-id} -- touch /var/ready 24 | // - it will take about 10 seconds to have the pod ready 25 | // (that's the default period time to run readiness probe) 26 | $ kubectl get endpoints kubia 27 | // - notice ENDPOINTS 28 | $ curl http://localhost:${svc-node-port} 29 | // - curl hit the ready pod all the time 30 | 31 | // Set other pods as ready 32 | // Try to remove /var/ready file from one of the pods -------------------------------------------------------------------------------- /lab01/kubek/server/router.js: -------------------------------------------------------------------------------- 1 | const handlerFactory = require('./handler') 2 | const fs = require('fs') 3 | const parser = require('url') 4 | const handlers = {} 5 | 6 | exports.clear = function() { 7 | handlers = {} 8 | } 9 | 10 | exports.register = function(url, method) { 11 | handlers[url] = handlerFactory.createHandler(method) 12 | } 13 | 14 | exports.route = function(req) { 15 | url = parser.parse(req.url, true) 16 | console.log(`Handling: ${url.pathname}`) 17 | let handler = handlers[url.pathname] 18 | if (!handler) handler = this.missing(req) 19 | return handler 20 | } 21 | 22 | exports.missing = function(req) { 23 | const url = parser.parse(req.url, true) 24 | const path = process.cwd() + '/public' + url.pathname 25 | 26 | try { 27 | data = fs.readFileSync(path) 28 | return handlerFactory.createHandler(function(req, res) { 29 | res.writeHead(200, {'Content-Type': 'text/plain'}) 30 | res.end(data) 31 | }) 32 | } catch (e) { 33 | return handlerFactory.createHandler(function(req, res) { 34 | console.log(`Not found: ${url.pathname}`) 35 | res.writeHead(404, {'Content-Type': 'text/plain'}) 36 | res.end(url.pathname + ' not found!') 37 | }) 38 | } 39 | } -------------------------------------------------------------------------------- /lab03/lab03.txt: -------------------------------------------------------------------------------- 1 | Pods 2 | Lab 3: Namespace 3 | ================ 4 | 5 | // Namespaces: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ 6 | 7 | // Discovering other namespaces and their pods 8 | $ kubectl get ns 9 | $ kubectl get po --namespace kube-system 10 | 11 | // Creating a namespace with the imperative command 12 | // $ kubectl create namespace kubek-ns 13 | 14 | // Creating a namespace with the descriptor 15 | $ cat kubek-ns.yaml 16 | $ kubectl create -f kubek-ns.yaml 17 | 18 | // Managing objects in other namespaces 19 | $ kubectl create -f ../lab01/kubek-pod.yaml -n kubek-ns 20 | $ kubectl get pods --namespace kubek-ns 21 | 22 | // Running pods in the selected namespace 23 | $ cat kubek-pod-namespaced.yaml 24 | $ kubectl create -f kubek-pod-namespaced.yaml 25 | $ kubectl get pods -n kubek-ns 26 | 27 | // Switching the namespace 28 | $ kubectl config set-context $(kubectl config current-context) --namespace ${my-namespace} 29 | 30 | // To quickly switch to a different namespace, you can set up the following alias: 31 | $ alias kcd='kubectl config set-context $(kubectl config current-context) --namespace ' 32 | // Then switch between namespaces using: 33 | $ kcd ${my-namespace} 34 | 35 | // See also: https://github.com/ahmetb/kubectx -------------------------------------------------------------------------------- /lab12/lab12.txt: -------------------------------------------------------------------------------- 1 | Volume 2 | Lab 12: hostPath 3 | ================ 4 | 5 | // hostPath: https://kubernetes.io/docs/concepts/storage/volumes/#hostpath 6 | 7 | // Creating MongoDB with hostPath 8 | $ cat mongodb-pod-hostpath.yaml 9 | $ kubectl create -f mongodb-pod-hostpath.yaml 10 | $ kubectl exec -it mongodb mongo 11 | > use mystore 12 | > db.foo.insert({name:'foo'}) 13 | > db.foo.find() 14 | > exit 15 | $ kubectl delete pod mongodb 16 | // - data should be preserved 17 | $ kubectl create -f mongodb-pod-hostpath.yaml 18 | $ kubectl exec -it mongodb mongo 19 | > use mystore 20 | > db.foo.find() 21 | > exit 22 | 23 | $ kubectl delete po mongodb --wait=false 24 | 25 | ------------------------------------------------------ 26 | 27 | // Persistent storage 28 | 29 | // awsElasticBlockStore: https://kubernetes.io/docs/concepts/storage/volumes/#awselasticblockstore 30 | // - creating MongoDB with AWS EBS 31 | $ cat mongodb-pod-aws.yaml 32 | 33 | // gcePersistentDisk: https://kubernetes.io/docs/concepts/storage/volumes/#gcepersistentdisk 34 | // - creating MongoDB on Google Cloud with GCE Persistence Disk 35 | $ cat mongodb-pod-gcepd.yaml 36 | 37 | // nfs: https://kubernetes.io/docs/concepts/storage/volumes/#nfs 38 | // - creating MongoDB with NFS 39 | $ cat mongodb-pod-nfs.yaml 40 | -------------------------------------------------------------------------------- /lab05/lab05.txt: -------------------------------------------------------------------------------- 1 | Pods 2 | Lab 5: Stopping and removing pods 3 | ================================= 4 | 5 | // Deleting a pod by name 6 | $ kubectl delete po kubek-gpu 7 | // - you can also delete more than one pod by specifying multiple, space-sep- 8 | // arated names (for example, kubectl delete po pod1 pod2) 9 | // - all the containers that are part of that pod are terminated 10 | // - Kubernetes sends a SIGTERM signal to the process and waits a certain 11 | // number of seconds (30 by default) for it to shut down gracefully 12 | // - if it doesn’t shut down in time, the process is then killed through SIGKILL. 13 | // - to make sure your processes are always shut down gracefully, 14 | // they need to handle the SIGTERM signal properly. 15 | 16 | // Deleting pods using label selectors 17 | $ kubectl get po -L creation_method 18 | $ kubectl delete po -l creation_method=manual 19 | 20 | // Deleting pods by deleting the whole namespace 21 | $ kubectl get pods -n kubek-ns 22 | $ kubectl delete ns kubek-ns 23 | 24 | // Deleting all pods in a namespace, while keeping the namespace 25 | $ kubectl get pods 26 | $ kubectl delete po --all 27 | 28 | // Deleting (almost) all resources in a namespace 29 | $ kubectl delete all --all 30 | // - certain resources (like Secrets) are preserved and need to be deleted explicitly -------------------------------------------------------------------------------- /lab19/scs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: scs 5 | labels: 6 | app: scs 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | name: scs 12 | labels: 13 | app: scs 14 | spec: 15 | containers: 16 | - image: jagin/scs 17 | env: 18 | - name: NGINX_HOST 19 | value: "0.0.0.0" 20 | - name: NGINX_PORT 21 | value: "8000" 22 | - name: NGINX_RESOLVER 23 | value: "kube-dns.kube-system.svc.cluster.local ipv6=off valid=5s" 24 | - name: NGINX_HOME_SCS_HOST 25 | value: home-scs.scs.svc.cluster.local 26 | - name: NGINX_HOME_SCS_PORT 27 | value: "8080" 28 | - name: NGINX_ACCOUNT_SCS_HOST 29 | value: account-scs.scs.svc.cluster.local 30 | - name: NGINX_ACCOUNT_SCS_PORT 31 | value: "8080" 32 | - name: NGINX_ITEMS_SCS_HOST 33 | value: items-scs.scs.svc.cluster.local 34 | - name: NGINX_ITEMS_SCS_PORT 35 | value: "8080" 36 | name: scs 37 | --- 38 | apiVersion: v1 39 | kind: Service 40 | metadata: 41 | name: scs 42 | labels: 43 | app: scs 44 | spec: 45 | type: NodePort 46 | ports: 47 | - port: 8000 48 | targetPort: 8000 49 | selector: 50 | app: scs 51 | -------------------------------------------------------------------------------- /lab19/mongodb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mongodb-pv 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | accessModes: 9 | - ReadWriteOnce 10 | - ReadOnlyMany 11 | persistentVolumeReclaimPolicy: Retain 12 | hostPath: 13 | path: /tmp/mongodb 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: mongodb-pvc 19 | labels: 20 | app: mongodb 21 | spec: 22 | storageClassName: "" 23 | resources: 24 | requests: 25 | storage: 1Gi 26 | accessModes: 27 | - ReadWriteOnce 28 | --- 29 | apiVersion: apps/v1beta1 30 | kind: Deployment 31 | metadata: 32 | name: mongodb 33 | labels: 34 | app: mongodb 35 | spec: 36 | replicas: 1 37 | template: 38 | metadata: 39 | name: mongodb 40 | labels: 41 | app: mongodb 42 | spec: 43 | containers: 44 | - image: mongo 45 | name: mongodb 46 | volumeMounts: 47 | - name: mongodb-data 48 | mountPath: /data/db 49 | ports: 50 | - containerPort: 27017 51 | volumes: 52 | - name: mongodb-data 53 | persistentVolumeClaim: 54 | claimName: mongodb-pvc 55 | --- 56 | apiVersion: v1 57 | kind: Service 58 | metadata: 59 | name: mongodb 60 | labels: 61 | app: mongodb 62 | spec: 63 | type: ClusterIP 64 | ports: 65 | - port: 27017 66 | targetPort: 27017 67 | protocol: TCP 68 | selector: 69 | app: mongodb 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /lab19/account-scs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: account-scs 5 | labels: 6 | app: account-scs 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | name: account-scs 12 | labels: 13 | app: account-scs 14 | spec: 15 | containers: 16 | - image: jagin/account-scs 17 | env: 18 | - name: NODE_ENV 19 | value: development 20 | - name: JWT_KEY 21 | valueFrom: 22 | secretKeyRef: 23 | name: auth0-secret 24 | key: auth0-client-secret 25 | - name: AUTH0_CLIENT_ID 26 | valueFrom: 27 | secretKeyRef: 28 | name: auth0-secret 29 | key: auth0-client-id 30 | - name: AUTH0_CLIENT_SECRET 31 | valueFrom: 32 | secretKeyRef: 33 | name: auth0-secret 34 | key: auth0-client-secret 35 | - name: AUTH0_DOMAIN 36 | valueFrom: 37 | secretKeyRef: 38 | name: auth0-secret 39 | key: auth0-domain 40 | - name: AUTH0_COOKIE_PASSWORD 41 | valueFrom: 42 | secretKeyRef: 43 | name: auth0-secret 44 | key: auth0-cookie-password 45 | name: account-scs 46 | --- 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | name: account-scs 51 | labels: 52 | app: account-scs 53 | spec: 54 | type: ClusterIP 55 | ports: 56 | - port: 8080 57 | targetPort: 8000 58 | selector: 59 | app: account-scs 60 | 61 | -------------------------------------------------------------------------------- /lab02/lab02.txt: -------------------------------------------------------------------------------- 1 | Pods 2 | Lab 3: Labels and annotations 3 | ============================= 4 | 5 | // Labels and Selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ 6 | // Annotations: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ 7 | 8 | // Specifying labels when creating a pod 9 | $ cat kubek-pod-labeled.yaml 10 | $ kubectl create -f kubek-pod-labeled.yaml 11 | $ kubectl get po --show-labels 12 | $ kubectl get po -L creation_method,env 13 | 14 | // Modifying labels of existing pods 15 | $ kubectl label po kubek creation_method=manual 16 | $ kubectl label po kubek-labeled env=debug --overwrite 17 | $ kubectl get po -L creation_method,env 18 | 19 | // Listing subsets of pods through label selectors 20 | // - label selectors allow you to select a subset of pods tagged with certain 21 | // labels and perform an operation on those pods 22 | $ kubectl get po -l creation_method=manual 23 | $ kubectl get po -l env 24 | $ kubectl get po -l '!env' 25 | 26 | // Using multiple conditions in a label selector 27 | $ kubectl get po -l creation_method=manual,env=debug 28 | 29 | // Using labels and selectors to constrain pod scheduling 30 | $ kubectl get nodes --show-labels 31 | $ kubectl label node docker-desktop gpu=true 32 | $ kubectl get nodes -l gpu=true 33 | 34 | $ cat kubek-pod-gpu.yaml 35 | $ kubectl create -f kubek-pod-gpu.yaml 36 | 37 | // We can assign pod to the selected node using kubernetes.io/hostname label 38 | $ kubectl get nodes -L kubernetes.io/hostname 39 | 40 | // Adnotowanie podów 41 | $ kubectl annotate pod kubek mycompany.com/someannotation="foo bar" 42 | $ kubectl describe pod kubek -------------------------------------------------------------------------------- /lab14/lab14.txt: -------------------------------------------------------------------------------- 1 | Application configuration 2 | Lab 14: Configuraton through command-line arguments and environment variables 3 | ============================================================================= 4 | 5 | // Lets' look again at the fortune from lab11 6 | // See: http://manpages.ubuntu.com/manpages/cosmic/man6/fortune.6.html 7 | $ cat ../lab11/fortune/fortuneloop.sh 8 | $ cat ../lab11/fortune/Dockerfile 9 | 10 | // Overriding the command and arguments in Kubernetes 11 | $ cat fortune-kubek-pod-args.yaml 12 | $ kubectl create -f fortune-kubek-pod-args.yaml 13 | // - wait till pod is running 14 | $ kubectl port-forward fortune-kubek 8080:8000 15 | $ while true; do curl http://localhost:8080/fortune.txt; sleep 10; done 16 | 17 | // Specifying the executable and its arguments in Docker vs Kubernetes 18 | // - ENTRYPOINT -> command 19 | // - CMD -> args 20 | 21 | $ kubectl delete pod fortune-kubek 22 | 23 | // Specifying environment variables in a container definition 24 | $ cat fortune-kubek-pod-env.yaml 25 | $ kubectl create -f fortune-kubek-pod-env.yaml 26 | $ kubectl port-forward fortune-kubek 8080:8000 27 | $ while true; do curl http://localhost:8080/fortune.txt; sleep 5; done 28 | // - the environment variable is set inside the container definition, not at the pod level. 29 | // - Kubernetes also automatically exposes environment variables for each service in the same namespace 30 | 31 | // Referring to other environment variables in a variable’s value 32 | // env: 33 | // - name: FIRST_VAR 34 | // value: "foo" 35 | // - name: SECOND_VAR 36 | // value: "$(FIRST_VAR)bar" 37 | 38 | // Having values effectively hardcoded in the pod definition means you need to have 39 | // separate pod definitions for your production and your development pods. 40 | // But there is a ConfigMap! -------------------------------------------------------------------------------- /lab16/lab16.txt: -------------------------------------------------------------------------------- 1 | Application configuration 2 | Lab 16: Secret 3 | ============== 4 | 5 | // Secrets: https://kubernetes.io/docs/concepts/configuration/secret/ 6 | 7 | // Inspecting default-token Secret 8 | $ kubectl get secrets 9 | $ kubectl describe secrets 10 | $ kubectl describe pod kubek 11 | $ kubectl exec kubek ls /var/run/secrets/kubernetes.io/serviceaccount/ 12 | 13 | // Creating a Secret 14 | $ kubectl create secret -h 15 | // $ kubectl create secret generic kubek-secret --from-literal=user=myuser --from-literal=password=mypasswd 16 | $ cat kubek-secret.yaml 17 | $ kubectl create -f kubek-secret.yaml 18 | $ kubectl get secret kubek-secret -o yaml 19 | 20 | $ kubectl create -f kubek-pod-secret.yaml 21 | $ kubectl port-forward kubek 8080:8000 22 | $ curl http://localhost:8080/secret 23 | $ curl http://user:passwd@localhost:8080/secret 24 | 25 | // Creating a secret for authentication with a Docker Registry 26 | $ kubectl create secret docker-registry mydockerhubsecret \ 27 | --docker-username=myusername --docker-password=mypassword \ 28 | --docker-email=my.email@provider.com 29 | 30 | // Example: 31 | // 32 | // apiVersion: v1 33 | // kind: Pod 34 | // metadata: 35 | // name: private-pod 36 | // spec: 37 | // imagePullSecrets: 38 | // - name: mydockerhubsecret 39 | // containers: 40 | // - image: username/private:tag 41 | // name: main 42 | 43 | // Creating a secret for authentication with a AWS ECR Registry: 44 | // $ export AWS_ACCOUNT_ID=your-account-id 45 | // $ export AWS_REGION=eu-west-1 46 | // $ export DOCKER_REGISTRY_SERVER=https://$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com 47 | // $ export DOCKER_USER=AWS 48 | // $ export DOCKER_PASSWORD=`aws ecr get-login --region $AWS_REGION --registry-ids $AWS_ACCOUNT_ID | cut -d' ' -f6` 49 | // $ kubectl create secret docker-registry docker-credentials \ 50 | // --docker-server=$DOCKER_REGISTRY_SERVER \ 51 | // --docker-username=$DOCKER_USER \ 52 | // --docker-password=$DOCKER_PASSWORD \ 53 | // --docker-email=no@email.local -------------------------------------------------------------------------------- /lab18/lab18.txt: -------------------------------------------------------------------------------- 1 | StatefulSet 2 | Lab 18: Using a StatefulSet 3 | =========================== 4 | 5 | // StatefulSet: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ 6 | 7 | // The Lab based on: 8 | // - https://blog.openshift.com/kubernetes-statefulset-in-action/ 9 | // - https://github.com/mhausenblas/mehdb 10 | 11 | // Create a namespace 12 | $ kubectl create ns mehdb 13 | 14 | // mehdb setup assumes that a storage class ebs exists 15 | $ kubectl create -f ebs-storageclass.yaml 16 | 17 | // Creating the mehdb StatefulSet 18 | $ kubectl -n mehdb apply -f https://raw.githubusercontent.com/mhausenblas/mehdb/master/app.yaml 19 | 20 | // First, let’s verify that StatefulSet has created the leader (mehdb-0) 21 | // and follower pod (mehdb-1) and that the persistent volumes are in place 22 | $ kubectl get all -n mehdb 23 | $ kubectl -n mehdb get sts,po,pvc -o wide 24 | 25 | // Check the headless service 26 | $ kubectl -n mehdb describe svc/mehdb 27 | 28 | $ kubectl -n mehdb run kubetools --image=jagin/kubetools --rm -it --restart=Never --command nslookup mehdb 29 | // - the headless service itself has no cluster IP and created two endpoints for the pods mehdb-0 and mehdb-1 respectively 30 | 31 | // Writing data to mehdb 32 | $ kubectl -n mehdb run kubetools --image=jagin/kubetools --rm -it --restart=Never --command bash 33 | # curl -L -X PUT -d "test data" mehdb:9876/set/test 34 | // - -L option in above curl command makes sure that if we happen to hit the 35 | // follower shard we get redirected to the leader shard and the write goes through 36 | 37 | // Reading data directly from the follower shard and the leader 38 | # curl mehdb-1.mehdb:9876/get/test 39 | # curl mehdb-0.mehdb:9876/get/test 40 | 41 | // Scaling up 42 | $ kubectl -n mehdb scale sts mehdb --replicas 4 43 | // - this can take several minutes until the readiness probes pass 44 | 45 | $ kubectl -n mehdb get sts 46 | 47 | // Let's simulate a failure, for example by deleting one of the pods 48 | $ kubectl -n mehdb get pods 49 | $ kubectl -n mehdb delete po/mehdb-1 50 | $ kubectl -n mehdb get pods 51 | // - StatefulSet detected that mehdb-1 is gone, created a replacement for it with a new IP address 52 | // - we can still get the data from this shard via curl mehdb-1.mehdb:9876/get/test thanks to the persistent volume 53 | 54 | // Cleanup 55 | $ kubectl delete ns mehdb -------------------------------------------------------------------------------- /lab13/lab13.txt: -------------------------------------------------------------------------------- 1 | PersistenceVolume, PersistenceVolumeClaim 2 | Lab 13: PV, PVC in use 3 | ====================== 4 | 5 | // Persistent Volumes: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ 6 | 7 | // Creating a PersistentVolume 8 | $ cat mongodb-pv-hostpath.yaml 9 | $ cat mongodb-pv-aws.yaml 10 | $ cat mongodb-pv-gcepd.yaml 11 | $ kubectl create -f mongodb-pv-hostpath.yaml 12 | $ kubectl get pv 13 | // - available, because you haven’t yet claim it. 14 | // - PV don’t belong to any namespace 15 | 16 | // Claiming a PersistentVolume by creating a PersistentVolumeClaim 17 | $ cat mongodb-pvc.yaml 18 | $ kubectl create -f mongodb-pvc.yaml 19 | // - the PV capacity must be large enough to accommodate what the claim requests 20 | // - Kubernetes finds the matching PV and bound it with PVC 21 | $ kubectl get pvc 22 | 23 | // Access modes 24 | // - RWO—ReadWriteOnce: only a single node can mount the volume for reading and writing. 25 | // - ROX—ReadOnlyMany: multiple nodes can mount the volume for reading. 26 | // - RWX—ReadWriteMany: multiple nodes can mount the volume for both reading and writing. 27 | 28 | $ kubectl get pv 29 | // - PV is now Bound instead of Available 30 | // - nobody else can claim the same volume until you release it 31 | 32 | // Using a PersistentVolumeClaim in a pod 33 | $ cat mongodb-pod-pvc.yaml 34 | $ kubectl create -f mongodb-pod-pvc.yaml 35 | $ kubectl exec -it mongodb mongo 36 | > use mystore 37 | > db.foo.find() 38 | > exit 39 | // - we have used the same hostPath (from previous lab) as a PV 40 | 41 | // Recycling PersistentVolumes 42 | $ kubectl delete pod mongodb 43 | $ kubectl delete pvc mongodb-pvc 44 | $ kubectl create -f mongodb-pvc.yaml 45 | $ kubectl get pvc 46 | // - the claim’s status is shown as Pending 47 | $ kubectl get pv 48 | // - The STATUS column shows the PV as Released, not Available like before. 49 | // - the volume may contain data and shouldn’t be bound to a completely new claim without 50 | // giving the cluster admin a chance to clean it up 51 | // - why? because spec.persistentVolumeReclaimPolicy Retein 52 | $ kubectl explain pv.spec.persistentVolumeReclaimPolicy 53 | 54 | // Reclaiming PV 55 | // - the only way to manually recycle the PV to make it available 56 | // again is to delete and recreate the PV resource 57 | 58 | // Other reclaiming strategies 59 | // - Recycle: deletes the volume’s contents and makes the volume available to be claimed again 60 | // - Delete: deletes the underlying storage 61 | // - this options may or may not be supported 62 | // - you can change reclaim policy on existing PV (for ex. Delete -> Retain to prevent losing data) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Podstawy Kubernetes 2 | 3 | ## Adresaci szkolenia 4 | 5 | Szkolenie adresowane jest do architektów, analityków programistów i testerów pragnących poznać mozliwości tworzenia, uruchamiania i zarządzania aplikacjami wykorzystującymi kontenery z użyciem platformy Kubernetes. 6 | 7 | ## Cel szkolenia 8 | 9 | - Wprowadzenie do platformy Kubernetes i jej zasobów. 10 | - Poznanie dobrych praktyk związanych z wdrażaniem aplikacji wykorzystujących kontenery, w szczególności opartych o architekturę mikroserwisów i Self-Contained Systems (SCS). 11 | - Nabycie podstawowych umiejętności wykorzystania Kubernetes w zakresie monitorowania, tuningu i skalowania aplikacji. 12 | 13 | ## Wymagania 14 | 15 | - Podstawowa wiedza z zakresu konteneryzacji (Docker), sieci komputerowych oraz podstaw Linuxa. 16 | - Zainstalowane narzędzia: Git, Docker (z Kubernetes), Node (opcjonalnie). 17 | - Konto na [Docker Hub](https://cloud.docker.com). 18 | 19 | ## Parametry szkolenia 20 | 21 | 16 godzin wykładów i warsztatów. 22 | 23 | ## Agenda 24 | 25 | 1. Wprowadzenie do Kubernetes 26 | 2. Instalacja Kubernetes 27 | 3. Uruchamianie kontenerów: Pods 28 | 4. Replikacja i zarządzanie podami: ReplicationController, ReplicaSet, DaemonSet, Job, CronJob 29 | 5. Odnajdywanie i komunikacja z podami: Service, Ingress 30 | 6. Przestrzeń danych dla kontenerów: Volume 31 | 7. Konfiguracja aplikacji: ConfigMaps, Secrets 32 | 8. Aktualizacja aplikacji: Deployments 33 | 9. Replikacja stanowych podów: StatefulSet 34 | 10. Wdrożenie przykładowej aplikacji 35 | 36 | ## Zasoby 37 | - [Materiały szkoleniowe](https://docs.google.com/presentation/d/e/2PACX-1vSYwasw13m90o8-l1lPu6QDvQViAO_3ax9wr_ir0z5Vvwza8nIoXSAdtkrKRYoiwQ43X8a7plDqfuFw/pub?start=false&loop=false&delayms=3000) 38 | - [Docker Desktop ze wsparciem dla Kubernetes](https://www.docker.com/products/docker-desktop) 39 | - [Minikube](https://github.com/kubernetes/minikube) 40 | - [AWS EKS](https://github.com/kubernetes/minikube) 41 | - [Google GKE](https://cloud.google.com/kubernetes-engine/) 42 | - [Azure AKS](https://azure.microsoft.com/pl-pl/services/kubernetes-service/) 43 | - [kops](https://github.com/kubernetes/kops) 44 | - [Kubespray](https://github.com/kubernetes-incubator/kubespray) 45 | - [OKD (Kubernetes with OpenShift)](http://okd.io) 46 | - [Kubernetes klient (kubectl)](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 47 | - [Awesome-Kubernetes](https://github.com/ramitsurana/awesome-kubernetes) 48 | - [Kubernetes Bootcamp](https://kubernetesbootcamp.github.io/kubernetes-bootcamp/index.html) 49 | - [Learn Kubernetes using Interactive Browser-Based Scenarios](https://www.katacoda.com/courses/kubernetes) 50 | - [Is NoOps the End of DevOps? Think Again ](https://blog.appdynamics.com/engineering/is-noops-the-end-of-devops-think-again/) 51 | - [Kubernetes in Acion](https://www.manning.com/books/kubernetes-in-action) -------------------------------------------------------------------------------- /lab01/kubek/server/index.js: -------------------------------------------------------------------------------- 1 | const http = require('http') 2 | const os = require('os') 3 | const fs = require('fs') 4 | const router = require('./router') 5 | 6 | const port = process.env.PORT || 8000 7 | const host = process.env.HOST || 'localhost' 8 | 9 | const authUser = process.env.AUTH_USER || 'user' 10 | const authPassword = process.env.AUTH_PASSWORD || 'passwd' 11 | 12 | let requestCount = 0 13 | const maxRequestCount = 0 14 | 15 | // Handle your routes here, put static pages in ./public and they will server 16 | router.register('/', (req, res) => { 17 | const error = maxRequestCount && ++requestCount > maxRequestCount 18 | 19 | res.writeHead(error ? 500 : 200, {'Content-Type': 'text/plain'}) 20 | res.end(`${error? 'ERROR!' : 'OK.'} You\'ve hit ${os.hostname()}\n`) 21 | }) 22 | 23 | router.register('/health-check', (req, res) => { 24 | if(fs.existsSync('/var/healthy')) { 25 | console.log('I\'m healthy.') 26 | res.writeHead(200, {'Content-Type': 'text/plain'}) 27 | res.end('I\'m healthy.\n') 28 | } else { 29 | console.log('Feel sick. Please restart me!') 30 | res.writeHead(500, {'Content-Type': 'text/plain'}) 31 | res.end('Feel sick. Please restart me!\n') 32 | } 33 | }) 34 | 35 | router.register('/secret', (req, res) => { 36 | const auth = req.headers['authorization'] // auth is in base64(username:password) so we need to decode the base64 37 | 38 | if(!auth) { 39 | res.writeHead(401, { 40 | 'Content-Type': 'text/plain', 41 | 'WWW-Authenticate': 'Basic realm="Secure Area"' 42 | }) 43 | res.end('Credentials needed!') 44 | } else if(auth) { 45 | const tmp = auth.split(' ') // Split on a space, the original auth looks like "Basic Y2hhcmxlczoxMjM0NQ==" and we need the 2nd part 46 | const buf = new Buffer(tmp[1], 'base64') // create a buffer and tell it the data coming in is base64 47 | const plain_auth = buf.toString() // read it back out as a string 48 | 49 | // At this point plain_auth = "username:password" 50 | const creds = plain_auth.split(':') // split on a ':' 51 | const username = creds[0] 52 | const password = creds[1] 53 | 54 | if((username == authUser) && (password == authPassword)) { // Is the username/password correct? 55 | res.writeHead(200, {'Content-Type': 'text/plain'}) 56 | res.end('You are authorized!') 57 | } 58 | else { 59 | res.writeHead(403, { 60 | 'Content-Type': 'text/plain', 61 | 'WWW-Authenticate': 'Basic realm="Secure Area"' 62 | }) 63 | res.end('You shall not pass!') 64 | } 65 | } 66 | }) 67 | 68 | // We need a server which relies on our router 69 | const server = http.createServer((req, res) => { 70 | handler = router.route(req) 71 | handler.process(req, res) 72 | }) 73 | 74 | // Start the server 75 | server.listen(port, host) 76 | 77 | console.log(`Server listening on http://${host}:${port}`) -------------------------------------------------------------------------------- /lab06/lab06.txt: -------------------------------------------------------------------------------- 1 | ReplicationController 2 | Lab 6: Creating a ReplicationController 3 | ======================================= 4 | 5 | // ReplicationController: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/ 6 | 7 | // Creating a RC as a command 8 | $ kubectl run kubek --image={docker-repo}/kubek --port=8000 --replicas=2 --generator=run/v1 9 | $ kubectl get pods 10 | // Seeing the RC respond to a deleted pod 11 | $ kubectl delete po kubek-{pod-id} 12 | $ kubectl get pods 13 | 14 | // Remove RC 15 | $ kubectl delete rc kubek 16 | 17 | // Creating a ReplicationController from the descriptor 18 | $ cat kubek-rc.yaml 19 | // - the pod labels in the template must match the label selector of the ReplicationController 20 | // - you can ommit the selector at all. In that case, it will be configured automatically 21 | // from the labels in the pod template 22 | $ kubectl create -f kubek-rc.yaml 23 | 24 | // Getting information about created resources 25 | $ kubectl get pods --show-labels 26 | $ kubectl get rc --show-labels 27 | $ kubectl describe rc kubek 28 | $ kubectl get all 29 | 30 | // Moving pods in and out of the scope of a ReplicationController 31 | // - RC doesn’t care if you add additional labels to its managed pods 32 | $ kubectl label pod kubek-${pod-id} type=special 33 | $ kubectl get pods --show-labels 34 | 35 | // Changing the labels of a managed pod 36 | $ kubectl label pod kubek-${pod-id} app=foo --overwrite 37 | $ kubectl get pods -L app 38 | // - new pod is created by RC but this one is now unmanaged 39 | 40 | // Changing the pod template 41 | $ kubectl edit rc kubek 42 | // - add an additional label to the metadata and save the template 43 | // - list pods and their labels again and confirm that they haven’t changed 44 | // - delete the pods and wait for their replacements to be created, you’ll see the new label 45 | 46 | // Configuring kubectl edit to use a different text editor 47 | $ export KUBE_EDITOR="/usr/bin/nano" 48 | 49 | // Horizontally scaling pods 50 | // Scaling up 51 | $ kubectl scale rc kubek --replicas=10 52 | // or 53 | $ kubectl edit rc kubek 54 | // - change spec.replicas to 10 55 | // - when you save the file and close the editor, the RC is updated and it 56 | // immediately scales the number of pods to 10 57 | 58 | // Scaling down 59 | $ kubectl scale rc kubek --replicas=3 60 | // - horizontally scaling pods in Kubernetes is a matter of stating your desire 61 | // - it's a declarative approach 62 | 63 | // Deleting a ReplicationController 64 | // - when you delete a RC through kubectl delete, the pods are also deleted 65 | // - it is possible to delete only the RC and leave the pods running 66 | // - this may be useful when you initially have a set of pods managed by a RC, 67 | // and then decide to replace the ReplicationController with a ReplicaSet 68 | // - you can always create a new RC with the proper label selector and make 69 | // them managed again 70 | $ kubectl delete rc kubek --cascade=false 71 | // - let's leave the pods unmanaged, we will switch them to a ReplicaSet -------------------------------------------------------------------------------- /lab15/lab15.txt: -------------------------------------------------------------------------------- 1 | Application configuration 2 | Lab 15: ConfigMap 3 | ================= 4 | 5 | // Configure a Pod to Use a ConfigMap: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/ 6 | 7 | // Creating a ConfigMap 8 | $ kubectl create configmap -h 9 | $ kubectl create configmap fortune-config --from-literal=sleep-interval=25 --from-literal=fortune-length=50 10 | // - keys may only contain alphanumeric characters, dashes, underscores and dots 11 | $ kubectl get configmap fortune-config -o yaml 12 | 13 | $ cat fortune-config.yaml 14 | $ kubectl create -f fortune-config.yaml 15 | 16 | // Passing a ConfigMap entry to a container as an environment variable and command-line argument 17 | $ cat fortune-kubek-pod-cm.yaml 18 | $ konfig create -f fortune-kubek-pod-cm.yaml 19 | $ kubectl port-forward fortune-kubek 8080:8000 20 | $ while true; do curl http://localhost:8080/fortune.txt; sleep 5; done 21 | 22 | // Referencing non-existing ConfigMaps in a pod 23 | // - Kubernetes schedules the pod normally and tries to run its containers. 24 | // - The container referencing the non-existing ConfigMap will fail to start, but the other 25 | // container will start normally. 26 | // - If you then create the missing ConfigMap, the failed container is started without 27 | // requiring you to recreate the pod. 28 | // - You can allow to start the container even if the CM doesn't exists by setting 29 | // configMapKeyRef.optional: true 30 | 31 | // Passing all entries of a ConfigMap as environment variables at once 32 | // spec: 33 | // containers: 34 | // - image: some-image 35 | // envFrom: 36 | // - prefix: CONFIG_ 37 | // configMapRef: 38 | // name: my-config-map 39 | 40 | // Using a configMap volume to expose ConfigMap entries as files 41 | $ kubectl create configmap kubek-config --from-file=configmaps/public 42 | $ kubectl get configmap kubek-config -o yaml 43 | $ kubectl create -f kubek-pod-cm-volume.yaml 44 | $ kubectl exec kubek ls /app/public 45 | $ kubectl port-forward kubek 8080:8000 46 | $ curl http://localhost:8080/lorem.txt 47 | $ curl http://localhost:8080/ipsum.txt 48 | 49 | // Exposing certain ConfigMap entries in the volume 50 | // volumes: 51 | // - name: config 52 | // configMap: 53 | // name: kubek-config 54 | // items: 55 | // - key: lorem.txt 56 | // path: my-lorem.txt 57 | 58 | // Mountig a directory hides existing files in that directory. 59 | // Instead of mounting the whole volume, you can mount part of it using subPath. 60 | // Example: 61 | // spec: 62 | // containers: 63 | // - image: some/image 64 | // volumeMounts: 65 | // - name: myvolume 66 | // mountPath: /etc/someconfig.conf 67 | // subPath: myconfig.conf 68 | 69 | // Setting the file permission for files in a ConfigMap volume (defaultMode) 70 | // - default is 644 (-rw-r-r--) 71 | // Example: 72 | // volumes: 73 | // - name: config 74 | // configMap: 75 | // name: fortune-config 76 | // defaultMode: "6600" 77 | 78 | // Updating an app’s config without having to restart the app 79 | // - Using a ConfigMap and exposing it through a volume allow to update the configuration 80 | // without having to recreate the pod or even restart the container 81 | // - application can watch the changes and reconfigure 82 | // - If you’ve mounted a single file in the container instead of the whole volume, the file will not be updated 83 | 84 | // Editing a ConfigMap 85 | $ kubectl edit configmap kubek-config 86 | // - change the content of lorem.txt 87 | $ kubectl exec kubek cat /app/public/lorem.txt 88 | // - the update can take up to one minue right now 89 | 90 | // It is wrong to modify ConfigMap of the running containers especially if it's their configuration. -------------------------------------------------------------------------------- /lab08/lab08.txt: -------------------------------------------------------------------------------- 1 | Service 2 | Lab 8: Discover and talk to pods 3 | ================================ 4 | 5 | // Services: https://kubernetes.io/docs/concepts/services-networking/ 6 | 7 | // First we will need some extra tools 8 | $ cd kubetools 9 | $ docker build -t {docker-repo}/kubetools . 10 | $ docker push {docker-repo}/kubetools 11 | $ cd .. 12 | 13 | // Creating Cluster IP service 14 | // - let's first create ReplicationController with 3 pod replicas 15 | $ kubectl create -f ../lab06/kubek-rc.yaml 16 | // - expose RC through Cluster IP service (default type) 17 | $ cat kubek-svc.yaml 18 | $ kubectl create -f kubek-svc.yaml 19 | // $ kubectl expose rc kubek --port=8080 --target-port=8000 --name kubek 20 | $ kubectl get svc 21 | $ kubectl get svc kubek 22 | // - because this is the cluster IP service, it’s only accessible from inside the cluster. 23 | // - you can ssh into one of the Kubernetes nodes and use the curl command 24 | // - you can execute the curl command inside one of your existing pods through the kubectl exec command 25 | // - you can create a pod that will send the request to the service’s cluster IP and log the response 26 | 27 | // Create a pod that will send the request to the service’s cluster IP 28 | $ kubectl run kubetools --image={docker-repo}/kubetools --rm -it --restart=Never --command -- curl -s http://{cluster-ip}:8080 29 | // or 30 | $ kubectl run kubetools --image={docker-repo}/kubetools --rm -it --restart=Never --command -- bash 31 | # curl http://{cluster-ip}:8080 32 | // - notice different host names 33 | # exit 34 | // or 35 | $ kubectl run kubetools --image={docker-repo}/kubetools --generator=run-pod/v1 --command -- sleep infinity 36 | $ kubectl exec kubetools curl http://{cluster-ip}:8080 37 | 38 | // Configure Session affinity on the service 39 | $ cat kubek-svc-session-affinity.yaml 40 | 41 | // Exposing multiple ports in the same service 42 | $ cat kubek-svc-multiple-ports.yaml 43 | // - The label selector applies to the whole service 44 | // - it can’t be configured for each port individually 45 | // - if you want different ports to map to different subsets of pods, 46 | // you need to create two services 47 | 48 | // Using named ports 49 | // - first define port names in the pod 50 | $ cat kubek-pod-named-ports.yaml 51 | // - refer to those ports by name in the service spec 52 | $ cat kubek-svc-named-ports.yaml 53 | // - benefit of doing so is that it enables you to change port numbers later 54 | // without having to change the service spec 55 | 56 | // Discovering services through environment variables 57 | $ kubectl run kubetools --image={docker-repo}/kubetools --rm -it --restart=Never --command bash 58 | # env 59 | # env | grep KUBEK 60 | # curl http://$KUBEK_SERVICE_HOST:$KUBEK_SERVICE_PORT 61 | 62 | // Discovering services through DNS 63 | # nslookup kubek 64 | // - Kubernetes automatically configure pod by modifying each container’s /etc/resolv.conf file 65 | # cat /etc/resolv.conf 66 | # curl http://kubek.default.svc.cluster.local:8080 67 | # curl http://kubek.default:8080 68 | # curl http://kubek:8080 69 | // - each service gets a DNS entry in the internal DNS server, and client pods that know 70 | // the name of the service can access it through its fully qualified domain name (FQDN) 71 | // instead of resorting to environment variables. 72 | // - but the client must still know the service’s port number which can be 73 | // obtained from the environment variable 74 | // - the service’s cluster IP is a virtual IP, and only has meaning when combined 75 | // with the service port. You cannot ping the service. 76 | # exit 77 | 78 | // Service endpoints 79 | $ kubectl describe svc kubek 80 | $ kubectl get endpoints kubek 81 | // - when a client connects to a service, the service proxy selects one of those IP and port pairs and redirects the 82 | // incoming connection to the server listening at that location. 83 | 84 | $ kubectl delete svc kubek -------------------------------------------------------------------------------- /lab19/lab19.txt: -------------------------------------------------------------------------------- 1 | Self-Contained Systems 2 | Lab 19: Build and running 3 | ========================= 4 | 5 | // Clone the SCS repository 6 | // https://github.com/jagin/scs 7 | // https://github.com/jagin/home-scs 8 | // https://github.com/jagin/account-scs 9 | // https://github.com/jagin/items-scs 10 | 11 | $ git clone --recurse-submodules https://github.com/jagin/scs.git 12 | 13 | // Problems with some git versions? 14 | // See: https://stackoverflow.com/questions/3796927/how-to-git-clone-including-submodules 15 | 16 | $ cd scs 17 | 18 | // Home SCS setup 19 | $ cd home-scs 20 | $ npm install 21 | $ cp .env.template .env 22 | $ vi .env 23 | // Set: 24 | // JWT_KEY=UvKVk0ibulu9_8-8R1D3GFhRVkYnFHznD4PMTpemOxhmxa8GZbSir9mbySiSjyev 25 | // and save 26 | 27 | // Run Home SCS from docker-compose (development mode) 28 | $ docker-compose up 29 | $ docker-compose up --build 30 | // http://localhost:8000 31 | 32 | // Stopping and removing Home SCS 33 | $ docker-compose down 34 | 35 | // Building the container image (optional - docker-compose have already done it) 36 | $ docker build -t home-scs . 37 | 38 | // Running the container image 39 | $ docker run --name home-scs -p 8000:8000 \ 40 | -e "JWT_KEY=UvKVk0ibulu9_8-8R1D3GFhRVkYnFHznD4PMTpemOxhmxa8GZbSir9mbySiSjyev" -d home-scs 41 | // Look at the logs if there are any errors 42 | $ docker logs home-scs 43 | // http://localhost:8080/home/pl 44 | 45 | // Listing all running containers 46 | $ docker ps 47 | 48 | // Getting additional information about the container 49 | $ docker inspect home-scs 50 | 51 | // Exploring the inside of a running container 52 | $ docker exec -it home-scs bash 53 | # ps aux 54 | # ls 55 | # exit 56 | 57 | // Stopping and removing a container 58 | $ docker stop home-scs 59 | $ docker ps 60 | $ docker ps -a 61 | $ docker rm home-scs 62 | 63 | // Pushing the image to an image registry 64 | $ docker tag home-scs {docker-repo}/home-scs 65 | $ docker images | head 66 | $ docker push {docker-repo}/home-scs 67 | 68 | $ cd .. 69 | 70 | // Items SCS setup 71 | $ cd items-scs 72 | $ npm install 73 | $ cp .env.template .env 74 | $ vi .env 75 | // Set 76 | // JWT_KEY=UvKVk0ibulu9_8-8R1D3GFhRVkYnFHznD4PMTpemOxhmxa8GZbSir9mbySiSjyev 77 | // and save 78 | 79 | // Run Items SCS from docker-compose (development mode) 80 | $ docker-compose up 81 | // http://localhost:8000 82 | 83 | // Stopping and removing Items SCS 84 | $ docker-compose down 85 | 86 | // Pushing the image to an image registry 87 | $ docker tag items-scs {docker-repo}/items-scs 88 | $ docker push {docker-repo}/items-scs 89 | 90 | $ cd .. 91 | 92 | // Account SCS setup 93 | $ cd account-scs 94 | $ npm install 95 | $ cp .env.template .env 96 | $ vi .env 97 | // Set: 98 | // JWT_KEY=UvKVk0ibulu9_8-8R1D3GFhRVkYnFHznD4PMTpemOxhmxa8GZbSir9mbySiSjyev 99 | // AUTH0_CLIENT_ID=Rl1UDl5VgYwTOGZqAQRGZN4lIZEA0726 100 | // AUTH0_CLIENT_SECRET=UvKVk0ibulu9_8-8R1D3GFhRVkYnFHznD4PMTpemOxhmxa8GZbSir9mbySiSjyev 101 | // AUTH0_DOMAIN=jagin.eu.auth0.com 102 | // and save 103 | 104 | // Run Account SCS from docker-compose (development mode) 105 | $ docker-compose up 106 | // http://localhost:8000 107 | 108 | // Stopping and removing Account SCS 109 | $ docker-compose down 110 | 111 | // Pushing the image to an image registry 112 | $ docker tag account-scs {docker-repo}/account-scs 113 | $ docker push {docker-repo}/account-scs 114 | 115 | $ cd .. 116 | 117 | // SCS .env setup 118 | $ cp .env.template .env 119 | 120 | // Build locally SCS images for modules (optional - we've already built the images) 121 | $ ./build-scs 122 | 123 | // Run SCS Application (all modules) 124 | $ docker-compose up -d 125 | 126 | // Watch logs 127 | $ docker-compose logs -f 128 | $ docker-compose logs -f --tail=10 129 | $ docker-compose logs -f home-scs 130 | 131 | // http://localhost:8000 132 | 133 | // Listing all running containers 134 | $ docker ps 135 | 136 | // Stop Items SCS 137 | $ docker stop items-scs 138 | 139 | // http://localhost:8000/home/pl 140 | // Items SCS not available 141 | 142 | // Start Items SCS again 143 | $ docker start items-scs 144 | 145 | // Stopping and removing all SCS modules containers 146 | $ docker-compose down 147 | 148 | $ docker images | head 149 | 150 | // Pushing the image to an image registry 151 | $ docker tag scs {docker-repo}/scs 152 | $ docker push {docker-repo}/scs 153 | 154 | // Kubernetes deployment 155 | $ kubectl create ns scs 156 | $ kubectl -n scs apply -f auth0-secret.yaml 157 | $ kubectl -n scs apply -f home-scs.yaml 158 | $ kubectl -n scs apply -f account-scs.yaml 159 | $ kubectl -n scs apply -f mongodb.yaml 160 | $ kubectl -n scs apply -f items-scs.yaml 161 | $ kubectl -n scs apply -f scs.yaml -------------------------------------------------------------------------------- /lab01/lab01.txt: -------------------------------------------------------------------------------- 1 | Pods 2 | Lab 1: Running containers 3 | ========================= 4 | 5 | // Pods: https://kubernetes.io/docs/concepts/workloads/pods/pod/ 6 | 7 | // Overview of kubectl: https://kubernetes.io/docs/reference/kubectl/overview/ 8 | // kubectl Usage Conventions: https://kubernetes.io/docs/reference/kubectl/conventions/ 9 | // kubectl [command] [TYPE] [NAME] [flags] 10 | $ kubectl help 11 | 12 | // If you have kubectl already installed and pointing to some other environment, 13 | // be sure to change context so that kubectl is pointing to docker-for-desktop: 14 | $ kubectl config get-contexts 15 | $ kubectl config use-context docker-for-desktop 16 | 17 | // Verify that Kubernetes on Docker is properly configured 18 | // Settings -> Kubernetes 19 | // Enable Kubernetes: true 20 | $ kubectl cluster-info 21 | $ kubectl get nodes 22 | 23 | // Dashboard (local) 24 | // https://github.com/kubernetes/dashboard 25 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml 26 | $ kubectl proxy 27 | // http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/ 28 | // Click 'Skip' at login prompt - no longer works but we can use kubernetes-dashboard-token 29 | $ kubectl -n kube-system get secret | grep dashboard-token 30 | $ kubectl -n kube-system describe secrets kubernetes-dashboard-token-{hash} 31 | // Use the token to login 32 | 33 | // Building Kubek application 34 | cd kubek 35 | // Building the container image 36 | $ docker build -t kubek . 37 | 38 | // Running the container image 39 | $ docker run --name kubek -p 8080:8000 -d kubek 40 | // Look at the logs if there are any errors 41 | $ docker logs kubek 42 | $ curl http://localhost:8080 43 | 44 | // Listing all running containers 45 | $ docker ps 46 | 47 | // Getting additional information about the container 48 | $ docker inspect kubek 49 | 50 | // Exploring the inside of a running container 51 | $ docker exec -it kubek bash 52 | # ps aux 53 | # ls 54 | # exit 55 | 56 | // Attaching to the current process 57 | $ docker attach kubek 58 | // - to exit the terminal Ctrl+p + Ctrl+q 59 | // On MacOs it is not working! Use: 60 | $ docker attach --sig-proxy=false kubek 61 | // - and Ctrl+c will exit the terminal 62 | 63 | // Stopping and removing the container 64 | $ docker stop kubek 65 | $ docker ps 66 | $ docker ps -a 67 | $ docker rm kubek 68 | 69 | // Pushing the image to an image registry 70 | $ docker tag kubek {docker-repo}/kubek 71 | $ docker images | head 72 | $ docker push {docker-repo}/kubek 73 | 74 | cd .. 75 | 76 | // Running kubek pod 77 | $ kubectl run kubek --image={docker-repo}/kubek --port=8000 --generator=run-pod/v1 78 | // --generator=run-pod/v1 uruchamia samego poda 79 | 80 | // List runnig pods 81 | // - from the default namespace 82 | $ kubectl get pods 83 | // - from all namepsaces 84 | $ kubectl get pods --all-namespaces 85 | 86 | // List runnig pods with a given name 87 | $ kubectl get po kubek 88 | $ kubectl get po kubek -o wide 89 | $ kubectl describe pod kubek 90 | $ kubectl get po kubek -o yaml 91 | // - metadata 92 | // - spec 93 | // - status 94 | 95 | // Retrieving pod logs 96 | $ kubectl logs kubek 97 | // - container logs are automatically rotated daily and every time the log file reaches 10MB in size 98 | // - the kubectl logs command only shows the log entries from the last rotation 99 | // - when a pod is deleted, its logs are also deleted 100 | // - to make a pod’s logs available even after the pod is deleted, you need to set up centralized, 101 | // cluster-wide logging, which stores all the logs into a central store 102 | 103 | // Gettings logs of a multi-container pod you must specify the container name 104 | $ kubectl logs kubek -c kubek 105 | 106 | // Sending requests to the pod 107 | // Forwarding a local network port to a port in the pod 108 | $ kubectl port-forward kubek 8080:8000 109 | $ curl http://localhost:8080 110 | 111 | // Access pod through proxy 112 | $ curl -L http://localhost:8001/api/v1/namespaces/default/pods/http:kubek:/proxy 113 | // -L - flag instructs curl to follow any redirect so that you reach the eventual endpoint. 114 | 115 | // Remove the pod. We will recreate it from the descriptor 116 | $ kubectl delete po kubek 117 | 118 | // Running the pod descriptor 119 | $ cat kubek-pod.yaml 120 | $ kubectl create -f kubek-pod.yaml 121 | // $ kubectl apply -f kubek-pod.yaml 122 | // Kubernetes Object Management: https://kubernetes.io/docs/concepts/overview/object-management-kubectl/overview/ 123 | 124 | $ kubectl logs kubek 125 | 126 | // Using kubectl explain to discover possible API object fields 127 | $ kubectl explain pods 128 | $ kubectl explain pod.spec 129 | -------------------------------------------------------------------------------- /lab17/lab17.txt: -------------------------------------------------------------------------------- 1 | Updating application 2 | Lab 17: Deploymet 3 | ================= 4 | 5 | // Deployments: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ 6 | 7 | // Prepare different kubek versions 8 | // docker build -t {docker-repo}/kubek:v1 . && docker push {docker-repo}/kubek:v1 9 | 10 | // First remove all kubek pods, ReplicationControllers and Services 11 | $ kubectl delete all --all 12 | 13 | // Creating a Deployment 14 | $ cat kubek-deployment-svc-v1.yaml 15 | $ kubectl create -f kubek-deployment-svc-v1.yaml --record 16 | // - --record option will record the command in the revision history, which will be useful later 17 | $ kubectl get deployment 18 | $ kubectl describe deployment 19 | 20 | // Check the status of the deployment rollout 21 | $ kubectl rollout status deployment kubek 22 | 23 | // Check the resources 24 | $ kubectl get po 25 | $ kubectl get rs 26 | // - The RC name also contains the hash value of its pod template 27 | // - Deployment creates multiple ReplicaSets - one for each version of the pod template 28 | $ kubectl get svc kubek -o wide 29 | 30 | // Updating a Deployment 31 | // - The only thing you need to do is modify the pod template defined in the Deployment resource 32 | // - Kubernetes will take all the steps necessary to get the actual system state to what’s defined in the resource 33 | // - There are two strategies: RollingUpdate (default) and Recreate 34 | // - Recreate is used when your application doesn’t support running multiple versions in parallel and 35 | // requires the old version to be stopped completely before the new one is started 36 | // - update is triggered when we change pod template 37 | 38 | // Slowing down the rolling update for the lab purpose 39 | $ kubectl patch deployment kubek -p '{"spec": {"minReadySeconds": 10}}' 40 | // - we use patch to modify a single property or limited number of them without editing the definition 41 | // - minReadySeconds is not for slowing down deloyment 42 | 43 | $ while true; do curl http://localhost:${kubek-svc-node-port}; done 44 | 45 | $ kubectl set image deployment kubek kubek={docker-repo}/kubek:v2 46 | // - checkout the curl loop 47 | 48 | $ kubectl rollout status deployment kubek 49 | 50 | // Other kubectl commands to update the deployment 51 | // - kubectl edit 52 | // $ kubectl edit deployment kubek 53 | // - kubectl patch 54 | // $ kubectl patch deployment kubek -p '{"spec":{"template": {"spec": {"containers": [{"name": "kubek", "image": "jagin/kubek:v2"}]}}}}' 55 | // - kubectl apply 56 | // $ kubectl apply -f kubek-deployment-v2.yaml 57 | // - kubectl replace 58 | // $ kubectl replace -f kubek-deployment-v2.yaml 59 | // - kubectl set image 60 | // $ kubectl set image deployment kubek kubek={docker-repo}/kubek:v2 61 | 62 | $ kubectl get rs 63 | // - can still see the old ReplicaSet next to the new one 64 | // - RC are managed by Deployment (we should not mess arround with it) 65 | 66 | // Undoing a rollout 67 | $ kubectl set image deployment kubek kubek={docker-repo}/kubek:v3 68 | $ kubectl rollout status deployment kubek 69 | 70 | // Roll the Deployment back to the previous revision 71 | $ kubectl rollout undo deployment kubek 72 | // - the undo command can also be used while the rollout process is still in 73 | // progress to essentially abort the rollout 74 | 75 | $ kubectl get rs 76 | 77 | // Displaying a deployment's rollout history 78 | $ kubectl rollout history deployment kubek 79 | // - without --record CHANGE-CAUSE column in the revision history would be empty 80 | 81 | // Rolling back to the specified deployment revision 82 | $ kubectl rollout undo deployment kubek --to-revision=1 83 | // - RC represent the complete revision history in the Deployment, 84 | // - the revision history is limited by the revisionHistoryLimit property on the Deployment resource 85 | 86 | // Controlling the rate of the rollout 87 | // - by default a new pod is created and when it became available, one of the old pods was deleted and another new pod was created 88 | // - this continued until there were no old pods left 89 | // - this can be configured by maxSurge and maxUnavailable 90 | // - https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#max-unavailable 91 | // - https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#max-surge 92 | // Example: 93 | // 94 | // spec: 95 | // strategy: 96 | // rollingUpdate: 97 | // maxSurge: 1 98 | // maxUnavailable: 0 99 | // type: RollingUpdate 100 | // - maxSurge: determines how many pod instances you allow to exist above the desired replica 101 | // count configured on the Deployment 102 | // - maxUnavailable: determines how many pod instances can be unavailable relative to the desired 103 | // replica count during the update 104 | 105 | // Pausing the rollout process 106 | // Run the following two commands one by one immediately (within a few seconds) 107 | $ kubectl set image deployment kubek kubek={docker-repo}/kubek:v4 108 | $ kubectl rollout pause deployment kubek 109 | // - a single new pod should have been created, but all original pods should also still be running 110 | // - we can verify whether the new version is working fine on smaller number of users (close to canary release) 111 | // - the undo command won’t undo it until you resume the Deployment 112 | 113 | // Resuming the rollout 114 | $ kubectl rollout resume deployment kubek 115 | 116 | // Blocking rollouts of bad versions 117 | // - minReadySeconds is to prevent deploying malfunctioning versions 118 | // - minReadySeconds property specifies how long a newly created pod should be 119 | // ready before the pod is treated as available 120 | // - if a new pod isn’t functioning properly and its readiness probe starts failing 121 | // before minReadySeconds have passed, the rollout of the new version will effectively be blocked 122 | 123 | $ kubectl apply -f kubek-deployment-v3-readinesscheck.yaml 124 | 125 | $ kubectl rollout status deployment kubia 126 | 127 | $ kubectl get po 128 | 129 | // Configuring a deadline for the rollouts 130 | $ kubectl describe deploy kubek 131 | // - see Conditions 132 | // - by default the Deployment is set to failed after 10 minutes (we can change it through progressDeadlineSeconds property) 133 | 134 | // Aborting a bad rollout 135 | $ kubectl rollout undo deployment kubek 136 | // - in future versions, the rollout will be aborted automatically when the time specified in progressDeadlineSeconds is exceeded --------------------------------------------------------------------------------