├── .DS_Store ├── .gitignore ├── 01-lesson ├── README.md ├── demo.sh ├── getting_started.md └── images │ ├── image-01-01.png │ ├── image-01-02.png │ ├── image-01-03.png │ ├── image-01-04.png │ ├── image-01-05.png │ ├── image-01-06.png │ ├── image-01-07.png │ └── image-01-08.png ├── 02-lesson ├── Dockerfile ├── README.md ├── app.py ├── db-pod.yaml ├── db-svc.yaml ├── demo.sh ├── docker-compose.yaml ├── images │ ├── image-02-01.png │ ├── image-02-02.png │ ├── image-02-03.png │ ├── image-02-04.png │ ├── image-02-05.png │ ├── image-02-06.png │ └── image-02-07.png ├── kubernetes_architecture.md ├── web-pod.yaml └── web-rc.yaml ├── 03-lesson ├── Dockerfile ├── README.md ├── app.py ├── db-pod.yaml ├── db-svc.yaml ├── demo.sh ├── docker-compose.yaml ├── helper.sh ├── images │ ├── image-03-01.png │ ├── image-03-02.png │ └── image-03-03.png ├── mypod.yaml ├── requirements.txt ├── web-pod-1.yaml ├── web-pod-2.yaml ├── web-rc.yaml └── web-svc.yaml ├── 04-lesson ├── Deploy │ ├── Dockerfile │ ├── app.js │ ├── color-pod.yaml │ ├── color-rc.yaml │ ├── color-srv.yaml │ └── color-svc.yaml ├── README.md ├── images │ ├── image-04-01.png │ ├── image-04-02.png │ ├── image-04-03.png │ ├── image-04-04.png │ ├── image-04-05.png │ ├── image-04-06.png │ └── image-04-07.png └── todo-app │ ├── app.js │ ├── db-pod.yaml │ ├── db-svc.yaml │ ├── db.js │ ├── package.json │ ├── public │ ├── favicon.ico │ ├── images │ │ ├── delete.png │ │ └── dreamerslab.png │ ├── javascripts │ │ └── ga.js │ ├── robots.txt │ └── stylesheets │ │ └── screen.css │ ├── routes │ └── index.js │ ├── utils.js │ ├── views │ ├── edit.ejs │ ├── index.ejs │ └── layout.ejs │ ├── web-pod.yaml │ ├── web-rc.yaml │ └── web-svc.yaml ├── 05-lesson ├── README.md ├── demo.sh ├── demo2.sh ├── j-hello-svc.yaml ├── j-hello.yaml └── nginx-deployment.yaml ├── 06-lesson ├── README.md ├── demo.sh ├── images │ ├── image-06-01.png │ ├── image-06-02.png │ └── image-06-03.png ├── my-pod.yaml ├── my-pv.yaml ├── my-pvc.yaml ├── nfs-demo.sh ├── pod-vol-cloud.yaml └── pod-vol-local.yaml ├── 07-lesson ├── README.md ├── cluster-role-binding.yaml ├── cluster-role.yaml ├── images │ ├── demo0.png │ ├── demo1.png │ └── rbac0.png ├── role-binding.yaml ├── role.yaml └── utils.sh ├── 08-lesson ├── README.md ├── helper.yaml ├── images │ └── stateful0.png ├── mongo-headless.yaml ├── mongo-role.yaml └── mongo-statefulset.yaml ├── 09-lesson ├── ConfigMaps │ ├── configmap.yaml │ ├── demo.sh │ ├── demo2.sh │ ├── pod-cmd.yaml │ ├── pod-env.yaml │ ├── pod-vol.yaml │ └── redis.yaml ├── README.md └── Secrets │ ├── Demo1 │ ├── demo.sh │ ├── password.txt │ ├── secret-pod.yaml │ └── username.txt │ └── Demo2 │ ├── demo.sh │ ├── my-secret.yaml │ └── secret-env-pod.yaml ├── 10-lesson ├── DaemonSet │ ├── demo.sh │ ├── nginx-ds.yaml │ └── nginx-rs.yaml ├── Jobs │ ├── Cron │ │ ├── cron.yaml │ │ └── demo.sh │ └── OneTime │ │ ├── db-init-job.yaml │ │ ├── demo.sh │ │ └── mysql.yaml ├── README.md └── images │ ├── image-10-01.png │ ├── image-10-02.png │ └── image-10-03.png ├── 11-lesson ├── README.md ├── backup │ ├── nginx-pod.json │ └── nginx-svc.json ├── db-pod.json ├── db-pod.yaml ├── db-svc.json ├── db-svc.yaml ├── demo.sh ├── nginx-pod.json ├── nginx-svc.json ├── web-pod.json ├── web-pod.yaml ├── web-rc.json ├── web-rc.yaml ├── web-svc.json └── web-svc.yaml └── README.md /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | links.txt 2 | build.sh 3 | *.zip 4 | *.tar 5 | *.rar 6 | index.html 7 | index2.html 8 | Reading-Kubernetes 9 | *.zip 10 | *.tar 11 | *.rar 12 | *.key 13 | *.csr 14 | *.crt 15 | 16 | # new releases 17 | 12-lesson 18 | 13-lesson 19 | 14-lesson 20 | 15-lesson 21 | -------------------------------------------------------------------------------- /01-lesson/demo.sh: -------------------------------------------------------------------------------- 1 | # Deploy Nginx container 2 | kubectl run my-web --image=nginx --port=80 3 | 4 | # Expose Nginx container 5 | kubectl expose deployment my-web --target-port=80 --type=NodePort 6 | 7 | # Get the node IP for minikube 8 | minikube ip 9 | 10 | # Check the NodePort 11 | kubectl describe svc my-web 12 | 13 | # Access Ngnix 14 | PORT=$(kubectl get svc my-web -o go-template='{{(index .spec.ports 0).nodePort}}') 15 | -------------------------------------------------------------------------------- /01-lesson/images/image-01-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/01-lesson/images/image-01-01.png -------------------------------------------------------------------------------- /01-lesson/images/image-01-02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/01-lesson/images/image-01-02.png -------------------------------------------------------------------------------- /01-lesson/images/image-01-03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/01-lesson/images/image-01-03.png -------------------------------------------------------------------------------- /01-lesson/images/image-01-04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/01-lesson/images/image-01-04.png -------------------------------------------------------------------------------- /01-lesson/images/image-01-05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/01-lesson/images/image-01-05.png -------------------------------------------------------------------------------- /01-lesson/images/image-01-06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/01-lesson/images/image-01-06.png -------------------------------------------------------------------------------- /01-lesson/images/image-01-07.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/01-lesson/images/image-01-07.png -------------------------------------------------------------------------------- /01-lesson/images/image-01-08.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/01-lesson/images/image-01-08.png -------------------------------------------------------------------------------- /02-lesson/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7-onbuild 2 | EXPOSE 5000 3 | CMD [ "python, "app.py" ] -------------------------------------------------------------------------------- /02-lesson/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Architecture 2 | 3 | ## Objectives 4 | * A closer look at Kubernetes cluster 5 | * Master components 6 | * Nodw components 7 | * Pods 8 | * Labels & Selectors 9 | * Replication Controllers 10 | * Services 11 | 12 | --- 13 | ## Kubernetes Architecture 14 | ![Kubernetes Architecture](./images/image-02-01.png) 15 | 16 | ## Kubernetes Master 17 | ![Kubernetes Master](./images/image-02-02.png) 18 | 19 | ### [Kubernetes Design and Architecture](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) 20 | * API Server 21 | * Scheduler 22 | * Controller 23 | * Coordinator, making sure that the pods are running 24 | * etcd 25 | * Its a key-value databse 26 | ## Kubernetes Node 27 | ![Kubernetes Node](./images/image-02-03.png) 28 | * Pod 29 | * Docker 30 | * kubelet 31 | * kube-proxy 32 | * Supervisord 33 | * fluentd 34 | * Addons (DNS, UI, PetSets) 35 | 36 | ## A Simple Containerized Application 37 | ![A Simple Containerized Application](./images/image-02-04.png) 38 | 39 | ## Kubernetes Pod 40 | * Group of one or more containers that are always co-located, co-scheduled, and run in a shared context 41 | * Containers in the same pod have the same hostname 42 | * Each pod is isolated by 43 | * Process ID (PID) namespace 44 | * Network namespace 45 | * Interprocess Communication (IPC) namespace 46 | * Unix Time Sharing (UTS) namespace 47 | * Alternative to a VM with multiple processes 48 | 49 | ## Labels & Selectors 50 | * Key/value pairs associated with Kubernetes objects 51 | * Used to organize and select subsets of objects 52 | * Attached to objects at creation time but modified at any time. 53 | * Labels are the essential glue to associate one API object with other 54 | * Relication Controller -> Pods 55 | * Service -> Pods 56 | * Pods -> Nodes 57 | ## Deploying a Pod 58 | ![Deploying a Pod](./images/image-02-05.png) 59 | 60 | ## Services 61 | * An abstraction to define a logical set of Pods bound by a policy to access them 62 | * Services are exposed through internal and external endpoints 63 | * Services can also point to non-Kubernetes endpoints through a Virtual-IP-Bridge 64 | * Supports TCP and UDP 65 | * Interfaces with kube-proxy to manipulate iptables 66 | * Service can be exposed internal or external to the cluster 67 | 68 | ## Exposing Services 69 | ![Exposing Services](./images/image-02-06.png) 70 | 71 | ```console 72 | $ kubectl config get-contexts 73 | CURRENT NAME CLUSTER AUTHINFO NAMESPACE 74 | * minikube minikube minikube 75 | mycluster.icp-context mycluster.icp mycluster.icp-user default 76 | $ docker ps 77 | CONTAINER ID IMAGE COMMAND 78 | CREATED STATUS PORTS NAMES 79 | 95b8ee8eaf69 nginx "nginx -g 'daemon ..." 80 | 5 hours ago Up 4 hours k8s_my-web_my-web-84b5767c98-vlmb4_default_85d923f1-dd7e-11e7-ace5-08002720cfab_0 81 | 4bc51a5774b4 gcr.io/google_containers/pause-amd64:3.0 "/pause" 82 | 5 hours ago Up 4 hours k8s_POD_my-web-84b5767c98-vlmb4_default_85d923f1-dd7e-11e7-ace5-08002720cfab_0 83 | 34dca730a6e4 gcr.io/google_containers/k8s-dns-sidecar-amd64 "/sidecar --v=2 --..." 84 | 9 hours ago Up 9 hours k8s_sidecar_kube-dns-86f6f55dd5-twgwv_kube-system_044ce763-d76b-11e7-bd99-08002720cfab_7 85 | c1a8ebdb8599 gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64 "/dnsmasq-nanny -v..." 86 | 9 hours ago Up 9 hours k8s_dnsmasq_kube-dns-86f6f55dd5-twgwv_kube-system_044ce763-d76b-11e7-bd99-08002720cfab_7 87 | 7e396db1bab6 gcr.io/kubernetes-helm/tiller "/tiller" 88 | 9 hours ago Up 9 hours k8s_tiller_tiller-deploy-7cb6884b74-w2pqw_kube-system_1c30ffb5-d9ba-11e7-9ad0-08002720cfab_3 89 | f54298206cfd gcr.io/google_containers/kubernetes-dashboard-amd64 "/dashboard --inse..." 90 | 9 hours ago Up 9 hours k8s_kubernetes-dashboard_kubernetes-dashboard-klx49_kube-system_04316921-d76b-11e7-bd99-08002720cfab_8 91 | def26e1204cf gcr.io/google_containers/k8s-dns-kube-dns-amd64 "/kube-dns --domai..." 92 | 9 hours ago Up 9 hours k8s_kubedns_kube-dns-86f6f55dd5-twgwv_kube-system_044ce763-d76b-11e7-bd99-08002720cfab_7 93 | 1737981b8f9b grafana/grafana "/run.sh" 94 | 9 hours ago Up 9 hours k8s_grafana_grafana-6c9655ff8c-2gcbj_monitoring_a329f6ff-d9b1-11e7-9ad0-08002720cfab_3 95 | 3ca5cc9f90ca gcr.io/k8s-minikube/storage-provisioner "/storage-provisioner" 96 | 9 hours ago Up 9 hours k8s_storage-provisioner_storage-provisioner_kube-system_03dd8fc9-d76b-11e7-bd99-08002720cfab_7 97 | a729e69e1770 quay.io/prometheus/prometheus "/bin/prometheus -..." 98 | 9 hours ago Up 9 hours k8s_prometheus_prometheus-7bb476b94c-wslx2_monitoring_2afe2e0a-d9b0-11e7-9ad0-08002720cfab_3 99 | fc52189b160e gcr.io/google_containers/pause-amd64:3.0 "/pause" 100 | 9 hours ago Up 9 hours k8s_POD_tiller-deploy-7cb6884b74-w2pqw_kube-system_1c30ffb5-d9ba-11e7-9ad0-08002720cfab_3 101 | 7a1721744d42 gcr.io/google_containers/pause-amd64:3.0 "/pause" 102 | 9 hours ago Up 9 hours k8s_POD_kubernetes-dashboard-klx49_kube-system_04316921-d76b-11e7-bd99-08002720cfab_7 103 | 771b6d3ca8ac gcr.io/google_containers/pause-amd64:3.0 "/pause" 104 | 9 hours ago Up 9 hours k8s_POD_storage-provisioner_kube-system_03dd8fc9-d76b-11e7-bd99-08002720cfab_7 105 | 2246c44ff557 gcr.io/google_containers/pause-amd64:3.0 "/pause" 106 | 9 hours ago Up 9 hours k8s_POD_grafana-6c9655ff8c-2gcbj_monitoring_a329f6ff-d9b1-11e7-9ad0-08002720cfab_3 107 | e932a94d49a2 gcr.io/google_containers/pause-amd64:3.0 "/pause" 108 | 9 hours ago Up 9 hours k8s_POD_kube-dns-86f6f55dd5-twgwv_kube-system_044ce763-d76b-11e7-bd99-08002720cfab_7 109 | 57c3b5f3170d gcr.io/google_containers/pause-amd64:3.0 "/pause" 110 | 9 hours ago Up 9 hours k8s_POD_prometheus-7bb476b94c-wslx2_monitoring_2afe2e0a-d9b0-11e7-9ad0-08002720cfab_3 111 | 9935e8ea659e gcr.io/google-containers/kube-addon-manager "/opt/kube-addons.sh" 112 | 9 hours ago Up 9 hours k8s_kube-addon-manager_kube-addon-manager-minikube_kube-system_7b19c3ba446df5355649563d32723e4f_7 113 | 348d1a35a2d3 gcr.io/google_containers/pause-amd64:3.0 "/pause" 114 | 9 hours ago Up 9 hours k8s_POD_kube-addon-manager-minikube_kube-system_7b19c3ba446df5355649563d32723e4f_7 115 | $ kubectl cluster-info 116 | Kubernetes master is running at https://192.168.99.100:8443 117 | 118 | To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. 119 | $ kubectl get cs 120 | NAME STATUS MESSAGE ERROR 121 | scheduler Healthy ok 122 | controller-manager Healthy ok 123 | etcd-0 Healthy {"health": "true"} 124 | ``` 125 | 126 | Let's look at a new demo, before doing so, let's make sure that we have a clean environment with the necesary files for the demo. 127 | ```console 128 | $ kubectl get pod 129 | NAME READY STATUS RESTARTS AGE 130 | my-web-84b5767c98-vlmb4 1/1 Running 0 5h 131 | $ kubectl delete pod my-web 132 | pod "my-web" deleted 133 | $ kubectl delete svc my-web 134 | service "my-web" deleted 135 | $ kubectl get pod 136 | No resources found. 137 | $ kubectl get rc 138 | No resources found. 139 | $ kubectl get svc 140 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 141 | kubernetes ClusterIP 10.96.0.1 443/TCP 7d 142 | $ ls 143 | Dockerfile db-pod.yaml demo.sh images/ web-pod.yaml 144 | app.py db-svc.yaml docker-compose.yaml kubernetes_architecture.md 145 | ``` 146 | Now that we have a clean environment we can begin with the demo... 147 | 148 | ```console 149 | $ kubectl create -f db-pod.yaml 150 | pod "redis" created 151 | $ kubectl get pod 152 | NAME READY STATUS RESTARTS AGE 153 | redis 1/1 Running 0 51s 154 | $ kubectl create -f db-svc.yaml 155 | service "redis" created 156 | $ kubectl get svc 157 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 158 | kubernetes ClusterIP 10.96.0.1 443/TCP 7d 159 | redis ClusterIP 10.111.14.203 6379/TCP 1m 160 | $ kubectl create -f web-pod.yaml 161 | pod "web" created 162 | $ kubectl get pods 163 | NAME READY STATUS RESTARTS AGE 164 | redis 1/1 Running 0 6m 165 | web 0/1 ContainerCreating 0 41s 166 | $ kubectl get pods 167 | NAME READY STATUS RESTARTS AGE 168 | redis 1/1 Running 0 7m 169 | web 1/1 Running 0 2m 170 | $ kubectl exec -it web /bin/bash 171 | root@web:/usr/src/app# ls 172 | app.py build.sh flask requirements.txt 173 | root@web:/usr/src/app# cat app.py 174 | from flask import Flask 175 | from redis import Redis 176 | import os 177 | app = Flask(__name__) 178 | redis = Redis(host=os.environ.get('REDIS_HOST', 'redis'), port=6379) 179 | 180 | @app.route('/') 181 | def hello(): 182 | redis.incr('hits') 183 | return 'Hello Container World! I have been seen %s times.\n' % redis.get('hits') 184 | 185 | if __name__ == "__main__": 186 | app.run(host="0.0.0.0", port=5000, debug=True)root@web:/usr/src/app# env 187 | REDIS_PORT_6379_TCP_PROTO=tcp 188 | HOSTNAME=web 189 | GPG_KEY=C01E1CAD5EA2C4F0B8E3571504C367C218ADD4FF 190 | REDIS_SERVICE_PORT_REDIS=6379 191 | KUBERNETES_PORT_443_TCP_PORT=443 192 | KUBERNETES_PORT=tcp://10.96.0.1:443 193 | REDIS_SERVICE_PORT=6379 194 | KUBERNETES_SERVICE_PORT=443 195 | KUBERNETES_SERVICE_HOST=10.96.0.1 196 | REDIS_PORT_6379_TCP_ADDR=10.111.14.203 197 | REDIS_PORT_6379_TCP_PORT=6379 198 | PYTHON_VERSION=2.7.12 199 | PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 200 | PWD=/usr/src/app 201 | LANG=C.UTF-8 202 | REDIS_PORT_6379_TCP=tcp://10.111.14.203:6379 203 | PYTHON_PIP_VERSION=8.1.2 204 | SHLVL=1 205 | HOME=/root 206 | REDIS_PORT=tcp://10.111.14.203:6379 207 | KUBERNETES_PORT_443_TCP_PROTO=tcp 208 | KUBERNETES_SERVICE_PORT_HTTPS=443 209 | REDIS_SERVICE_HOST=10.111.14.203 210 | KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1 211 | KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443 212 | _=/usr/bin/env 213 | root@web:/usr/src/app# ping redis 214 | PING redis.default.svc.cluster.local (10.111.14.203): 56 data bytes 215 | ^C--- redis.default.svc.cluster.local ping statistics --- 216 | 40 packets transmitted, 0 packets received, 100% packet loss 217 | root@web:/usr/src/app# curl localhost:5000 218 | Hello Container World! I have been seen 1 times. 219 | root@web:/usr/src/app# exit 220 | exit 221 | $ 222 | ``` 223 | 224 | We have exited the container and back to our Kubernetes program... 225 | 226 | ```console 227 | λ kubectl describe pod web 228 | Name: web 229 | Namespace: default 230 | Node: minikube/192.168.99.100 231 | Start Time: Sun, 10 Dec 2017 21:23:53 +0800 232 | Labels: app=demo 233 | name=web 234 | Annotations: 235 | Status: Running 236 | IP: 172.17.0.8 237 | Containers: 238 | web: 239 | Container ID: docker://63074cbd2d04bc7f021681a4ac661c027b44372cdae54d0491813bad7829074f 240 | Image: janakiramm/web 241 | Image ID: docker-pullable://janakiramm/web@sha256:6e913c17fb1d6f230655abd7db47e55c0bf274c4074d410fdd2cad183c2ab49e 242 | Port: 5000/TCP 243 | State: Running 244 | Started: Sun, 10 Dec 2017 21:25:33 +0800 245 | Ready: True 246 | Restart Count: 0 247 | Environment: 248 | Mounts: 249 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-zk79b (ro) 250 | Conditions: 251 | Type Status 252 | Initialized True 253 | Ready True 254 | PodScheduled True 255 | Volumes: 256 | default-token-zk79b: 257 | Type: Secret (a volume populated by a Secret) 258 | SecretName: default-token-zk79b 259 | Optional: false 260 | QoS Class: BestEffort 261 | Node-Selectors: 262 | Tolerations: 263 | Events: 264 | Type Reason Age From Message 265 | ---- ------ ---- ---- ------- 266 | Normal Scheduled 41m default-scheduler Successfully assigned web to minikube 267 | Normal SuccessfulMountVolume 41m kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-zk79b" 268 | Normal Pulling 41m kubelet, minikube pulling image "janakiramm/web" 269 | Normal Pulled 40m kubelet, minikube Successfully pulled image "janakiramm/web" 270 | Normal Created 40m kubelet, minikube Created container 271 | Normal Started 39m kubelet, minikube Started container 272 | ``` 273 | 274 | ## Replication Controller 275 | * Ensures that a Pod or homogeneous set of Pods are always up and available 276 | * Always maintains desired number of Pods 277 | * If there are excess Pods, they get killed 278 | * New pods are launched when they fail, get deleted, or terminated 279 | * Creating a replication controller with a count of 1 ensures that a Pod is always available 280 | * Replication controller and Pods are associated through Labels 281 | 282 | ## Scaling Pods with Replication Controller 283 | ![Scaling Pods with Replication Controller](./images/image-02-07.png) 284 | 285 | ## Demo 286 | ### Scaling Pods with Replication Controller 287 | 288 | ```console 289 | $ kubectl create -f web-rc.yaml 290 | replicationcontroller "web" created 291 | $ kubectl get rc 292 | NAME DESIRED CURRENT READY AGE 293 | web 2 2 2 35s 294 | $ kubectl get po 295 | NAME READY STATUS RESTARTS AGE 296 | redis 1/1 Running 0 1h 297 | web 1/1 Running 0 1h 298 | web-vtxdz 1/1 Running 0 1m 299 | $ kubectl delete pod web-vtxdz 300 | pod "web-vtxdz" deleted 301 | $ kubectl get po 302 | NAME READY STATUS RESTARTS AGE 303 | redis 1/1 Running 0 1h 304 | web 1/1 Running 0 1h 305 | web-zdmcg 1/1 Running 0 29s 306 | $ kubectl scale rc web --replicas=10 307 | replicationcontroller "web" scaled 308 | $ kubectl get po 309 | NAME READY STATUS RESTARTS AGE 310 | redis 1/1 Running 0 1h 311 | web 1/1 Running 0 1h 312 | web-2q2nh 0/1 ContainerCreating 0 23s 313 | web-8tfzt 1/1 Running 0 23s 314 | web-cckb9 0/1 ContainerCreating 0 23s 315 | web-l2sz6 1/1 Running 0 23s 316 | web-l8vq4 0/1 ContainerCreating 0 23s 317 | web-rbj92 1/1 Running 0 23s 318 | web-v42qq 0/1 ContainerCreating 0 23s 319 | web-xgpqq 1/1 Running 0 23s 320 | web-zdmcg 1/1 Running 0 3m 321 | $ kubectl get po 322 | NAME READY STATUS RESTARTS AGE 323 | redis 1/1 Running 0 1h 324 | web 1/1 Running 0 1h 325 | web-2q2nh 1/1 Running 0 1m 326 | web-8tfzt 1/1 Running 0 1m 327 | web-cckb9 1/1 Running 0 1m 328 | web-l2sz6 1/1 Running 0 1m 329 | web-l8vq4 1/1 Running 0 1m 330 | web-rbj92 1/1 Running 0 1m 331 | web-v42qq 1/1 Running 0 1m 332 | web-xgpqq 1/1 Running 0 1m 333 | web-zdmcg 1/1 Running 0 4m 334 | $ kubectl get nodes 335 | NAME STATUS ROLES AGE VERSION 336 | minikube Ready 8d v1.8.0 337 | ``` 338 | 339 | ## Summary 340 | * Kubernetes Master runs the API, Scheduler and Controller services 341 | * Each Node is responsible for running one or more Pods 342 | * Pods are the unit of deployment in Kubernetes 343 | * Labels associate one Kubernetes object with the other 344 | * Replication Controller ensures high availability of Pods 345 | * Services expose Pods to internal and external consumers 346 | 347 | Reference: 348 | * [Kubernetes Webinar Series - Kubernetes Architecture 101](https://www.youtube.com/watch?v=zeS6OyDoy78&index=2&list=PLF3s2WICJlqOiymMaTLjwwHz-MSVbtJPQ) 349 | 350 | -------------------------------------------------------------------------------- /02-lesson/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from redis import Redis 3 | import os 4 | app = Flask(__name__) 5 | redis = Redis(host=os.environ.get('REDIS_HOST', 'redis'), port=6379) 6 | 7 | @app.route('/') 8 | def hello(): 9 | redis.incr('hits') 10 | return 'Hello Container World! I have been seen %s times. \n' % redis.get('hits') 11 | 12 | if __name__ == "__main__": 13 | app.run(host="0.0.0.0", port=5000, debug=True) -------------------------------------------------------------------------------- /02-lesson/db-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: redis 5 | labels: 6 | name: redis 7 | app: demo 8 | spec: 9 | containers: 10 | - name: redis 11 | image: redis:latest 12 | ports: 13 | - containerPort: 6379 14 | protocol: TCP -------------------------------------------------------------------------------- /02-lesson/db-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis 5 | labels: 6 | name: redis 7 | app: demo 8 | spec: 9 | ports: 10 | - port: 6379 11 | name: redis 12 | targetPort: 6379 13 | selector: 14 | name: redis 15 | app: demo 16 | -------------------------------------------------------------------------------- /02-lesson/demo.sh: -------------------------------------------------------------------------------- 1 | # Deploy Nginx container 2 | kubectl run my-web --image=nginx --port=80 3 | 4 | # Expose Nginx container 5 | kubectl expose deployment my-web --target-port=80 --type=NodePort 6 | 7 | # Get the node IP for minikube 8 | minikube ip 9 | 10 | # Check the NodePort 11 | kubectl describe svc my-web 12 | 13 | # Access Ngnix 14 | PORT=$(kubectl get svc my-web -o go-template='{{(index .spec.ports 0).nodePort}}') 15 | -------------------------------------------------------------------------------- /02-lesson/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | web: 3 | image: janakiramm/web 4 | ports: 5 | - "3000:5000" 6 | links: 7 | - redis 8 | redis: 9 | image: redis:latest 10 | container_name: redis -------------------------------------------------------------------------------- /02-lesson/images/image-02-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/02-lesson/images/image-02-01.png -------------------------------------------------------------------------------- /02-lesson/images/image-02-02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/02-lesson/images/image-02-02.png -------------------------------------------------------------------------------- /02-lesson/images/image-02-03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/02-lesson/images/image-02-03.png -------------------------------------------------------------------------------- /02-lesson/images/image-02-04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/02-lesson/images/image-02-04.png -------------------------------------------------------------------------------- /02-lesson/images/image-02-05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/02-lesson/images/image-02-05.png -------------------------------------------------------------------------------- /02-lesson/images/image-02-06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/02-lesson/images/image-02-06.png -------------------------------------------------------------------------------- /02-lesson/images/image-02-07.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/02-lesson/images/image-02-07.png -------------------------------------------------------------------------------- /02-lesson/kubernetes_architecture.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Architecture 2 | 3 | ## Objectives 4 | * A closer look at Kubernetes cluster 5 | * Master components 6 | * Nodw components 7 | * Pods 8 | * Labels & Selectors 9 | * Replication Controllers 10 | * Services 11 | 12 | --- 13 | ## Kubernetes Architecture 14 | ![Kubernetes Architecture](./images/image-02-01.png) 15 | 16 | ## Kubernetes Master 17 | ![Kubernetes Master](./images/image-02-02.png) 18 | 19 | ### [Kubernetes Design and Architecture](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) 20 | * API Server 21 | * Scheduler 22 | * Controller 23 | * Coordinator, making sure that the pods are running 24 | * etcd 25 | * Its a key-value databse 26 | ## Kubernetes Node 27 | ![Kubernetes Node](./images/image-02-03.png) 28 | * Pod 29 | * Docker 30 | * kubelet 31 | * kube-proxy 32 | * Supervisord 33 | * fluentd 34 | * Addons (DNS, UI, PetSets) 35 | 36 | ## A Simple Containerized Application 37 | ![A Simple Containerized Application](./images/image-02-04.png) 38 | 39 | ## Kubernetes Pod 40 | * Group of one or more containers that are always co-located, co-scheduled, and run in a shared context 41 | * Containers in the same pod have the same hostname 42 | * Each pod is isolated by 43 | * Process ID (PID) namespace 44 | * Network namespace 45 | * Interprocess Communication (IPC) namespace 46 | * Unix Time Sharing (UTS) namespace 47 | * Alternative to a VM with multiple processes 48 | 49 | ## Labels & Selectors 50 | * Key/value pairs associated with Kubernetes objects 51 | * Used to organize and select subsets of objects 52 | * Attached to objects at creation time but modified at any time. 53 | * Labels are the essential glue to associate one API object with other 54 | * Relication Controller -> Pods 55 | * Service -> Pods 56 | * Pods -> Nodes 57 | ## Deploying a Pod 58 | ![Deploying a Pod](./images/image-02-05.png) 59 | 60 | ## Services 61 | * An abstraction to define a logical set of Pods bound by a policy to access them 62 | * Services are exposed through internal and external endpoints 63 | * Services can also point to non-Kubernetes endpoints through a Virtual-IP-Bridge 64 | * Supports TCP and UDP 65 | * Interfaces with kube-proxy to manipulate iptables 66 | * Service can be exposed internal or external to the cluster 67 | 68 | ## Exposing Services 69 | ![Exposing Services](./images/image-02-06.png) 70 | 71 | ```console 72 | $ kubectl config get-contexts 73 | CURRENT NAME CLUSTER AUTHINFO NAMESPACE 74 | * minikube minikube minikube 75 | mycluster.icp-context mycluster.icp mycluster.icp-user default 76 | $ docker ps 77 | CONTAINER ID IMAGE COMMAND 78 | CREATED STATUS PORTS NAMES 79 | 95b8ee8eaf69 nginx "nginx -g 'daemon ..." 80 | 5 hours ago Up 4 hours k8s_my-web_my-web-84b5767c98-vlmb4_default_85d923f1-dd7e-11e7-ace5-08002720cfab_0 81 | 4bc51a5774b4 gcr.io/google_containers/pause-amd64:3.0 "/pause" 82 | 5 hours ago Up 4 hours k8s_POD_my-web-84b5767c98-vlmb4_default_85d923f1-dd7e-11e7-ace5-08002720cfab_0 83 | 34dca730a6e4 gcr.io/google_containers/k8s-dns-sidecar-amd64 "/sidecar --v=2 --..." 84 | 9 hours ago Up 9 hours k8s_sidecar_kube-dns-86f6f55dd5-twgwv_kube-system_044ce763-d76b-11e7-bd99-08002720cfab_7 85 | c1a8ebdb8599 gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64 "/dnsmasq-nanny -v..." 86 | 9 hours ago Up 9 hours k8s_dnsmasq_kube-dns-86f6f55dd5-twgwv_kube-system_044ce763-d76b-11e7-bd99-08002720cfab_7 87 | 7e396db1bab6 gcr.io/kubernetes-helm/tiller "/tiller" 88 | 9 hours ago Up 9 hours k8s_tiller_tiller-deploy-7cb6884b74-w2pqw_kube-system_1c30ffb5-d9ba-11e7-9ad0-08002720cfab_3 89 | f54298206cfd gcr.io/google_containers/kubernetes-dashboard-amd64 "/dashboard --inse..." 90 | 9 hours ago Up 9 hours k8s_kubernetes-dashboard_kubernetes-dashboard-klx49_kube-system_04316921-d76b-11e7-bd99-08002720cfab_8 91 | def26e1204cf gcr.io/google_containers/k8s-dns-kube-dns-amd64 "/kube-dns --domai..." 92 | 9 hours ago Up 9 hours k8s_kubedns_kube-dns-86f6f55dd5-twgwv_kube-system_044ce763-d76b-11e7-bd99-08002720cfab_7 93 | 1737981b8f9b grafana/grafana "/run.sh" 94 | 9 hours ago Up 9 hours k8s_grafana_grafana-6c9655ff8c-2gcbj_monitoring_a329f6ff-d9b1-11e7-9ad0-08002720cfab_3 95 | 3ca5cc9f90ca gcr.io/k8s-minikube/storage-provisioner "/storage-provisioner" 96 | 9 hours ago Up 9 hours k8s_storage-provisioner_storage-provisioner_kube-system_03dd8fc9-d76b-11e7-bd99-08002720cfab_7 97 | a729e69e1770 quay.io/prometheus/prometheus "/bin/prometheus -..." 98 | 9 hours ago Up 9 hours k8s_prometheus_prometheus-7bb476b94c-wslx2_monitoring_2afe2e0a-d9b0-11e7-9ad0-08002720cfab_3 99 | fc52189b160e gcr.io/google_containers/pause-amd64:3.0 "/pause" 100 | 9 hours ago Up 9 hours k8s_POD_tiller-deploy-7cb6884b74-w2pqw_kube-system_1c30ffb5-d9ba-11e7-9ad0-08002720cfab_3 101 | 7a1721744d42 gcr.io/google_containers/pause-amd64:3.0 "/pause" 102 | 9 hours ago Up 9 hours k8s_POD_kubernetes-dashboard-klx49_kube-system_04316921-d76b-11e7-bd99-08002720cfab_7 103 | 771b6d3ca8ac gcr.io/google_containers/pause-amd64:3.0 "/pause" 104 | 9 hours ago Up 9 hours k8s_POD_storage-provisioner_kube-system_03dd8fc9-d76b-11e7-bd99-08002720cfab_7 105 | 2246c44ff557 gcr.io/google_containers/pause-amd64:3.0 "/pause" 106 | 9 hours ago Up 9 hours k8s_POD_grafana-6c9655ff8c-2gcbj_monitoring_a329f6ff-d9b1-11e7-9ad0-08002720cfab_3 107 | e932a94d49a2 gcr.io/google_containers/pause-amd64:3.0 "/pause" 108 | 9 hours ago Up 9 hours k8s_POD_kube-dns-86f6f55dd5-twgwv_kube-system_044ce763-d76b-11e7-bd99-08002720cfab_7 109 | 57c3b5f3170d gcr.io/google_containers/pause-amd64:3.0 "/pause" 110 | 9 hours ago Up 9 hours k8s_POD_prometheus-7bb476b94c-wslx2_monitoring_2afe2e0a-d9b0-11e7-9ad0-08002720cfab_3 111 | 9935e8ea659e gcr.io/google-containers/kube-addon-manager "/opt/kube-addons.sh" 112 | 9 hours ago Up 9 hours k8s_kube-addon-manager_kube-addon-manager-minikube_kube-system_7b19c3ba446df5355649563d32723e4f_7 113 | 348d1a35a2d3 gcr.io/google_containers/pause-amd64:3.0 "/pause" 114 | 9 hours ago Up 9 hours k8s_POD_kube-addon-manager-minikube_kube-system_7b19c3ba446df5355649563d32723e4f_7 115 | $ kubectl cluster-info 116 | Kubernetes master is running at https://192.168.99.100:8443 117 | 118 | To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. 119 | $ kubectl get cs 120 | NAME STATUS MESSAGE ERROR 121 | scheduler Healthy ok 122 | controller-manager Healthy ok 123 | etcd-0 Healthy {"health": "true"} 124 | ``` 125 | 126 | Let's look at a new demo, before doing so, let's make sure that we have a clean environment with the necesary files for the demo. 127 | ```console 128 | $ kubectl get pod 129 | NAME READY STATUS RESTARTS AGE 130 | my-web-84b5767c98-vlmb4 1/1 Running 0 5h 131 | $ kubectl delete pod my-web 132 | pod "my-web" deleted 133 | $ kubectl delete svc my-web 134 | service "my-web" deleted 135 | $ kubectl get pod 136 | No resources found. 137 | $ kubectl get rc 138 | No resources found. 139 | $ kubectl get svc 140 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 141 | kubernetes ClusterIP 10.96.0.1 443/TCP 7d 142 | $ ls 143 | Dockerfile db-pod.yaml demo.sh images/ web-pod.yaml 144 | app.py db-svc.yaml docker-compose.yaml kubernetes_architecture.md 145 | ``` 146 | Now that we have a clean environment we can begin with the demo... 147 | 148 | ```console 149 | $ kubectl create -f db-pod.yaml 150 | pod "redis" created 151 | $ kubectl get pod 152 | NAME READY STATUS RESTARTS AGE 153 | redis 1/1 Running 0 51s 154 | $ kubectl create -f db-svc.yaml 155 | service "redis" created 156 | $ kubectl get svc 157 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 158 | kubernetes ClusterIP 10.96.0.1 443/TCP 7d 159 | redis ClusterIP 10.111.14.203 6379/TCP 1m 160 | $ kubectl create -f web-pod.yaml 161 | pod "web" created 162 | $ kubectl get pods 163 | NAME READY STATUS RESTARTS AGE 164 | redis 1/1 Running 0 6m 165 | web 0/1 ContainerCreating 0 41s 166 | $ kubectl get pods 167 | NAME READY STATUS RESTARTS AGE 168 | redis 1/1 Running 0 7m 169 | web 1/1 Running 0 2m 170 | $ kubectl exec -it web /bin/bash 171 | root@web:/usr/src/app# ls 172 | app.py build.sh flask requirements.txt 173 | root@web:/usr/src/app# cat app.py 174 | from flask import Flask 175 | from redis import Redis 176 | import os 177 | app = Flask(__name__) 178 | redis = Redis(host=os.environ.get('REDIS_HOST', 'redis'), port=6379) 179 | 180 | @app.route('/') 181 | def hello(): 182 | redis.incr('hits') 183 | return 'Hello Container World! I have been seen %s times.\n' % redis.get('hits') 184 | 185 | if __name__ == "__main__": 186 | app.run(host="0.0.0.0", port=5000, debug=True)root@web:/usr/src/app# env 187 | REDIS_PORT_6379_TCP_PROTO=tcp 188 | HOSTNAME=web 189 | GPG_KEY=C01E1CAD5EA2C4F0B8E3571504C367C218ADD4FF 190 | REDIS_SERVICE_PORT_REDIS=6379 191 | KUBERNETES_PORT_443_TCP_PORT=443 192 | KUBERNETES_PORT=tcp://10.96.0.1:443 193 | REDIS_SERVICE_PORT=6379 194 | KUBERNETES_SERVICE_PORT=443 195 | KUBERNETES_SERVICE_HOST=10.96.0.1 196 | REDIS_PORT_6379_TCP_ADDR=10.111.14.203 197 | REDIS_PORT_6379_TCP_PORT=6379 198 | PYTHON_VERSION=2.7.12 199 | PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 200 | PWD=/usr/src/app 201 | LANG=C.UTF-8 202 | REDIS_PORT_6379_TCP=tcp://10.111.14.203:6379 203 | PYTHON_PIP_VERSION=8.1.2 204 | SHLVL=1 205 | HOME=/root 206 | REDIS_PORT=tcp://10.111.14.203:6379 207 | KUBERNETES_PORT_443_TCP_PROTO=tcp 208 | KUBERNETES_SERVICE_PORT_HTTPS=443 209 | REDIS_SERVICE_HOST=10.111.14.203 210 | KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1 211 | KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443 212 | _=/usr/bin/env 213 | root@web:/usr/src/app# ping redis 214 | PING redis.default.svc.cluster.local (10.111.14.203): 56 data bytes 215 | ^C--- redis.default.svc.cluster.local ping statistics --- 216 | 40 packets transmitted, 0 packets received, 100% packet loss 217 | root@web:/usr/src/app# curl localhost:5000 218 | Hello Container World! I have been seen 1 times. 219 | root@web:/usr/src/app# exit 220 | exit 221 | $ 222 | ``` 223 | 224 | We have exited the container and back to our Kubernetes program... 225 | 226 | ```console 227 | λ kubectl describe pod web 228 | Name: web 229 | Namespace: default 230 | Node: minikube/192.168.99.100 231 | Start Time: Sun, 10 Dec 2017 21:23:53 +0800 232 | Labels: app=demo 233 | name=web 234 | Annotations: 235 | Status: Running 236 | IP: 172.17.0.8 237 | Containers: 238 | web: 239 | Container ID: docker://63074cbd2d04bc7f021681a4ac661c027b44372cdae54d0491813bad7829074f 240 | Image: janakiramm/web 241 | Image ID: docker-pullable://janakiramm/web@sha256:6e913c17fb1d6f230655abd7db47e55c0bf274c4074d410fdd2cad183c2ab49e 242 | Port: 5000/TCP 243 | State: Running 244 | Started: Sun, 10 Dec 2017 21:25:33 +0800 245 | Ready: True 246 | Restart Count: 0 247 | Environment: 248 | Mounts: 249 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-zk79b (ro) 250 | Conditions: 251 | Type Status 252 | Initialized True 253 | Ready True 254 | PodScheduled True 255 | Volumes: 256 | default-token-zk79b: 257 | Type: Secret (a volume populated by a Secret) 258 | SecretName: default-token-zk79b 259 | Optional: false 260 | QoS Class: BestEffort 261 | Node-Selectors: 262 | Tolerations: 263 | Events: 264 | Type Reason Age From Message 265 | ---- ------ ---- ---- ------- 266 | Normal Scheduled 41m default-scheduler Successfully assigned web to minikube 267 | Normal SuccessfulMountVolume 41m kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-zk79b" 268 | Normal Pulling 41m kubelet, minikube pulling image "janakiramm/web" 269 | Normal Pulled 40m kubelet, minikube Successfully pulled image "janakiramm/web" 270 | Normal Created 40m kubelet, minikube Created container 271 | Normal Started 39m kubelet, minikube Started container 272 | ``` 273 | 274 | ## Replication Controller 275 | * Ensures that a Pod or homogeneous set of Pods are always up and available 276 | * Always maintains desired number of Pods 277 | * If there are excess Pods, they get killed 278 | * New pods are launched when they fail, get deleted, or terminated 279 | * Creating a replication controller with a count of 1 ensures that a Pod is always available 280 | * Replication controller and Pods are associated through Labels 281 | 282 | ## Scaling Pods with Replication Controller 283 | ![Scaling Pods with Replication Controller](./images/image-02-07.png) 284 | 285 | ## Demo 286 | ### Scaling Pods with Replication Controller 287 | 288 | ```console 289 | $ kubectl create -f web-rc.yaml 290 | replicationcontroller "web" created 291 | $ kubectl get rc 292 | NAME DESIRED CURRENT READY AGE 293 | web 2 2 2 35s 294 | $ kubectl get po 295 | NAME READY STATUS RESTARTS AGE 296 | redis 1/1 Running 0 1h 297 | web 1/1 Running 0 1h 298 | web-vtxdz 1/1 Running 0 1m 299 | $ kubectl delete pod web-vtxdz 300 | pod "web-vtxdz" deleted 301 | $ kubectl get po 302 | NAME READY STATUS RESTARTS AGE 303 | redis 1/1 Running 0 1h 304 | web 1/1 Running 0 1h 305 | web-zdmcg 1/1 Running 0 29s 306 | $ kubectl scale rc web --replicas=10 307 | replicationcontroller "web" scaled 308 | $ kubectl get po 309 | NAME READY STATUS RESTARTS AGE 310 | redis 1/1 Running 0 1h 311 | web 1/1 Running 0 1h 312 | web-2q2nh 0/1 ContainerCreating 0 23s 313 | web-8tfzt 1/1 Running 0 23s 314 | web-cckb9 0/1 ContainerCreating 0 23s 315 | web-l2sz6 1/1 Running 0 23s 316 | web-l8vq4 0/1 ContainerCreating 0 23s 317 | web-rbj92 1/1 Running 0 23s 318 | web-v42qq 0/1 ContainerCreating 0 23s 319 | web-xgpqq 1/1 Running 0 23s 320 | web-zdmcg 1/1 Running 0 3m 321 | $ kubectl get po 322 | NAME READY STATUS RESTARTS AGE 323 | redis 1/1 Running 0 1h 324 | web 1/1 Running 0 1h 325 | web-2q2nh 1/1 Running 0 1m 326 | web-8tfzt 1/1 Running 0 1m 327 | web-cckb9 1/1 Running 0 1m 328 | web-l2sz6 1/1 Running 0 1m 329 | web-l8vq4 1/1 Running 0 1m 330 | web-rbj92 1/1 Running 0 1m 331 | web-v42qq 1/1 Running 0 1m 332 | web-xgpqq 1/1 Running 0 1m 333 | web-zdmcg 1/1 Running 0 4m 334 | $ kubectl get nodes 335 | NAME STATUS ROLES AGE VERSION 336 | minikube Ready 8d v1.8.0 337 | ``` 338 | 339 | ## Summary 340 | * Kubernetes Master runs the API, Scheduler and Controller services 341 | * Each Node is responsible for running one or more Pods 342 | * Pods are the unit of deployment in Kubernetes 343 | * Labels associate one Kubernetes object with the other 344 | * Replication Controller ensures high availability of Pods 345 | * Services expose Pods to internal and external consumers 346 | 347 | Reference: 348 | * [Kubernetes Webinar Series - Kubernetes Architecture 101](https://www.youtube.com/watch?v=zeS6OyDoy78&index=2&list=PLF3s2WICJlqOiymMaTLjwwHz-MSVbtJPQ) 349 | 350 | -------------------------------------------------------------------------------- /02-lesson/web-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | app: demo 8 | spec: 9 | containers: 10 | - name: web 11 | image: janakiramm/web 12 | ports: 13 | - containerPort: 5000 14 | name: http 15 | protocol: TCP -------------------------------------------------------------------------------- /02-lesson/web-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | app: demo 8 | spec: 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | name: web 14 | spec: 15 | containers: 16 | - name: web 17 | image: janakiramm/web 18 | ports: 19 | - containerPort: 5000 20 | name: http 21 | protocol: TCP 22 | -------------------------------------------------------------------------------- /03-lesson/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7-onbuild 2 | EXPOSE 5000 3 | CMD [ "python", "app.py" ] -------------------------------------------------------------------------------- /03-lesson/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Webinar Series - A Closer Look at Pods and Replicas 2 | 3 | ## Objectives 4 | 5 | * Understand the concept of Pods 6 | * Explore multi-container Pods 7 | * Closer look at the use cases and scenarios for multi-container pods 8 | * Scalling Pods through Replica Sets 9 | 10 | --- 11 | ## What is Pod? 12 | 13 | * A group one or more containers that are always co-located and co-scheduled that share the context 14 | * Containers in a pod share the same IP address, ports, hostname and storage 15 | * Modeled like a virtual machine: 16 | * Each container represents one process 17 | * Tightly coupled with other containers in the same pod 18 | * Pods are scheduled in Nodes 19 | * Fundamental unit of deployment in Kubernetes 20 | 21 | ## Demo 22 | ### Creating First Pod 23 | ```console 24 | $ kubectl config get-contexts 25 | CURRENT NAME CLUSTER AUTHINFO NAMESPACE 26 | mycluster.icp-context mycluster.icp mycluster.icp-user default 27 | * minikube minikube minikube 28 | $ kubectl get cs 29 | NAME STATUS MESSAGE ERROR 30 | controller-manager Healthy ok 31 | scheduler Healthy ok 32 | etcd-0 Healthy {"health": "true"} 33 | $ kubectl get nodes 34 | NAME STATUS ROLES AGE VERSION 35 | minikube Ready 8d v1.8.0 36 | $ kubectl get po 37 | No resources found. 38 | $ kubectl create -f mypod.yaml 39 | pod "mypod" created 40 | $ kubectl get po 41 | NAME READY STATUS RESTARTS AGE 42 | mypod 0/1 ContainerCreating 0 0s 43 | $ kubectl get po 44 | NAME READY STATUS RESTARTS AGE 45 | mypod 1/1 Running 0 37s 46 | $ kubectl describe pod mypod 47 | Name: mypod 48 | Namespace: default 49 | Node: minikube/192.168.99.100 50 | Start Time: Mon, 11 Dec 2017 08:30:34 +0800 51 | Labels: app=demo 52 | env=test 53 | Annotations: 54 | Status: Running 55 | IP: 172.17.0.3 56 | Containers: 57 | nginx: 58 | Container ID: docker://1f16bf664b53913c74f6444378e08737ff5763a5987d5a61894dd8eeafd7f707 59 | Image: nginx 60 | Image ID: docker-pullable://nginx@sha256:b81f317384d7388708a498555c28a7cce778a8f291d90021208b3eba3fe74887 61 | Port: 80/TCP 62 | State: Running 63 | Started: Mon, 11 Dec 2017 08:30:46 +0800 64 | Ready: True 65 | Restart Count: 0 66 | Environment: 67 | Mounts: 68 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-zk79b (ro) 69 | Conditions: 70 | Type Status 71 | Initialized True 72 | Ready True 73 | PodScheduled True 74 | Volumes: 75 | default-token-zk79b: 76 | Type: Secret (a volume populated by a Secret) 77 | SecretName: default-token-zk79b 78 | Optional: false 79 | QoS Class: BestEffort 80 | Node-Selectors: 81 | Tolerations: 82 | Events: 83 | Type Reason Age From Message 84 | ---- ------ ---- ---- ------- 85 | Normal Scheduled 1m default-scheduler Successfully assigned mypod to minikube 86 | 87 | Normal SuccessfulMountVolume 1m kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-zk79b" 88 | Normal Pulling 1m kubelet, minikube pulling image "nginx" 89 | Normal Pulled 1m kubelet, minikube Successfully pulled image "nginx" 90 | Normal Created 1m kubelet, minikube Created container 91 | Normal Started 1m kubelet, minikube Started container 92 | ``` 93 | At a later stage we will go through the details of creating a service similar to the way we built a pod via a declarative file, but for the time being we will simply create a service from the command line using `kubectl expose pod`, the right syntax will be shown below: 94 | 95 | ```console 96 | $ kubectl expose pod mypod --type=NodePort 97 | service "mypod" exposed 98 | ``` 99 | 100 | We can see that the pod was created with a tag `--type=NodePort` this indicates that we can access the pod from external to the pod, we will elaborate on this later. 101 | 102 | ```console 103 | $ kubectl get svc 104 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 105 | kubernetes ClusterIP 10.96.0.1 443/TCP 8d 106 | mypod NodePort 10.111.38.217 80:32364/TCP 29s 107 | ``` 108 | Look closely at the second line, you see this under PORT(S) **80:32364/TCP** this indicates that we can access the service at port **32364**. 109 | There a quick way to access this from minikube by simply typing: 110 | ```console 111 | $ minikube service mypod 112 | Opening kubernetes service default/mypod in default browser... 113 | ``` 114 | A browser would have opened with the service... in my case. 115 | 116 | ![Welcome to nginx](./images/image-03-01.png) 117 | 118 | Say in the case that I'm not using minikube, I can access the service via the `describe` command, as shown below: 119 | ```console 120 | $ kubectl describe svc mypod 121 | Name: mypod 122 | Namespace: default 123 | Labels: app=demo 124 | env=test 125 | Annotations: 126 | Selector: app=demo,env=test 127 | Type: NodePort 128 | IP: 10.111.38.217 129 | Port: 80/TCP 130 | TargetPort: 80/TCP 131 | NodePort: 32364/TCP 132 | Endpoints: 172.17.0.3:80 133 | Session Affinity: None 134 | External Traffic Policy: Cluster 135 | Events: 136 | ``` 137 | It's the same port as shown before, _NodePort: 32364/TCP_ 138 | Now, I can access the site by issuing these commands: 139 | ```console 140 | $ minikube ip 141 | 192.168.99.100 142 | $ curl 192.168.99.100:32364 143 | 144 | 145 | 146 | Welcome to nginx! 147 | 154 | 155 | 156 |

Welcome to nginx!

157 |

If you see this page, the nginx web server is successfully installed and 158 | working. Further configuration is required.

159 | 160 |

For online documentation and support please refer to 161 | nginx.org.
162 | Commercial support is available at 163 | nginx.com.

164 | 165 |

Thank you for using nginx.

166 | 167 | 168 | ``` 169 | ## What is a Pod? 170 | ![What is a Pod?](./images/image-03-02.png){:height="245px" width="252px"} 171 | A multi-container pod that contains a file puller and a web server that uses a persistent volume for a shared storage between the containers. 172 | * Containers within the same pod communicate with each other using IPC 'Inter Process Communication' 173 | * Containers can find each other via _localhost_ 174 | * Each container inherits the name of the pod 175 | * Each pod has an IP address in a flat shared networking space 176 | * Volumes are shared by containers in a pod 177 | 178 | ## Use Cases for Pod 179 | * Content management systems, file and data loaders, local cache managers, etc. 180 | * Log and checkpoint backup, compression, rotation, snapshotting, etc. 181 | * Data change watchers, log tailers, logging and monitoring adapters, event publishers, etc. 182 | * Proxies, bridges and adapters 183 | * Controllers, managers, configurators, and updaters 184 | ## Multi-Container Pod with Python and Redis 185 | ![Multi-Container Pod with Python and Redis](./images/image-03-03.png) 186 | ## Demo 187 | ### Exploring a Multi-Container Pod 188 | ```console 189 | $ kubectl create -f db-pod.yaml 190 | pod "mysql" created 191 | $ kubectl create -f db-svc.yaml 192 | service "mysql" created 193 | $ kubectl create -f web-pod-1.yaml 194 | pod "web1" created 195 | $ kubectl create -f web-svc.yaml 196 | service "web" created 197 | $ kubectl describe svc web 198 | Name: web 199 | Namespace: default 200 | Labels: app=demo 201 | name=web 202 | Annotations: 203 | Selector: name=web 204 | Type: NodePort 205 | IP: 10.109.0.27 206 | Port: http 80/TCP 207 | TargetPort: 5000/TCP 208 | NodePort: http 31282/TCP 209 | Endpoints: 172.17.0.8:5000 210 | Session Affinity: None 211 | External Traffic Policy: Cluster 212 | Events: 213 | $ kubectl get po 214 | NAME READY STATUS RESTARTS AGE 215 | mypod 1/1 Running 0 2h 216 | mysql 1/1 Running 0 14m 217 | web1 2/2 Running 0 3m 218 | ``` 219 | Let's delete the mypod pod and svc, there are no longer needed: 220 | ```console 221 | $ kubectl delete pod mypod 222 | pod "mypod" deleted 223 | $ kubectl get po 224 | NAME READY STATUS RESTARTS AGE 225 | mysql 1/1 Running 0 18m 226 | web1 2/2 Running 0 7m 227 | $ kubectl delete svc mypod 228 | service "mypod" deleted 229 | $ kubectl get svc 230 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 231 | kubernetes ClusterIP 10.96.0.1 443/TCP 8d 232 | mysql ClusterIP 10.98.129.95 3306/TCP 13m 233 | web NodePort 10.109.0.27 80:31282/TCP 7m 234 | ``` 235 | Now, we can look at our newly created pods by issuing the `describe` command: 236 | ```console 237 | λ kubectl describe po 238 | Name: mysql 239 | Namespace: default 240 | Node: minikube/192.168.99.100 241 | Start Time: Mon, 11 Dec 2017 10:18:11 +0800 242 | Labels: app=demo 243 | name=mysql 244 | Annotations: 245 | Status: Running 246 | IP: 172.17.0.7 247 | Containers: 248 | mysql: 249 | Container ID: docker://cbdc2a59d9def1727fbbfbe068bc44e4d78178b4531847f21eca6d2de0dc4fb7 250 | Image: mysql:latest 251 | Image ID: docker-pullable://mysql@sha256:ed6e70fcd9126d7b246d2beb512aa7cb0aafac4bfda11ee9d8a4f5c488c437f9 252 | Port: 3306/TCP 253 | State: Running 254 | Started: Mon, 11 Dec 2017 10:18:47 +0800 255 | Ready: True 256 | Restart Count: 0 257 | Environment: 258 | MYSQL_ROOT_PASSWORD: password 259 | Mounts: 260 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-zk79b (ro) 261 | Conditions: 262 | Type Status 263 | Initialized True 264 | Ready True 265 | PodScheduled True 266 | Volumes: 267 | default-token-zk79b: 268 | Type: Secret (a volume populated by a Secret) 269 | SecretName: default-token-zk79b 270 | Optional: false 271 | QoS Class: BestEffort 272 | Node-Selectors: 273 | Tolerations: 274 | Events: 275 | Type Reason Age From Message 276 | ---- ------ ---- ---- ------- 277 | Normal Scheduled 21m default-scheduler Successfully assigned mysql to minikube 278 | 279 | Normal SuccessfulMountVolume 21m kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-zk79b" 280 | Normal Pulling 21m kubelet, minikube pulling image "mysql:latest" 281 | Normal Pulled 21m kubelet, minikube Successfully pulled image "mysql:latest" 282 | Normal Created 21m kubelet, minikube Created container 283 | Normal Started 21m kubelet, minikube Started container 284 | 285 | 286 | Name: web1 287 | Namespace: default 288 | Node: minikube/192.168.99.100 289 | Start Time: Mon, 11 Dec 2017 10:29:13 +0800 290 | Labels: app=demo 291 | name=web 292 | Annotations: 293 | Status: Running 294 | IP: 172.17.0.8 295 | Containers: 296 | redis: 297 | Container ID: docker://c4f245654d7d5362a7a91da840cb4e13183cf740d01bde5f9740e08e4a788e3a 298 | Image: redis 299 | Image ID: docker-pullable://redis@sha256:de4e675f62e4f3f71f43e98ae46a67dba92459ff950de4428d13289b69328f96 300 | Port: 6379/TCP 301 | State: Running 302 | Started: Mon, 11 Dec 2017 10:29:20 +0800 303 | Ready: True 304 | Restart Count: 0 305 | Environment: 306 | Mounts: 307 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-zk79b (ro) 308 | python: 309 | Container ID: docker://fbd8ee67a7e6440b960585e91415bf0ce4ed9cced9928864b502baaa4a77407c 310 | Image: janakiramm/py-red 311 | Image ID: docker-pullable://janakiramm/py-red@sha256:54961a3b9d64322fd0b0e6312d10075f97b2b4911509aa1f61a54b7d7b9d26d5 312 | Port: 5000/TCP 313 | State: Running 314 | Started: Mon, 11 Dec 2017 10:29:33 +0800 315 | Ready: True 316 | Restart Count: 0 317 | Environment: 318 | REDIS_HOST: localhost 319 | Mounts: 320 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-zk79b (ro) 321 | Conditions: 322 | Type Status 323 | Initialized True 324 | Ready True 325 | PodScheduled True 326 | Volumes: 327 | default-token-zk79b: 328 | Type: Secret (a volume populated by a Secret) 329 | SecretName: default-token-zk79b 330 | Optional: false 331 | QoS Class: BestEffort 332 | Node-Selectors: 333 | Tolerations: 334 | Events: 335 | Type Reason Age From Message 336 | ---- ------ ---- ---- ------- 337 | Normal Scheduled 10m default-scheduler Successfully assigned web1 to minikube 338 | Normal SuccessfulMountVolume 10m kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-zk79b" 339 | Normal Pulling 10m kubelet, minikube pulling image "redis" 340 | Normal Pulled 10m kubelet, minikube Successfully pulled image "redis" 341 | Normal Created 10m kubelet, minikube Created container 342 | Normal Started 10m kubelet, minikube Started container 343 | Normal Pulling 10m kubelet, minikube pulling image "janakiramm/py-red" 344 | Normal Pulled 10m kubelet, minikube Successfully pulled image "janakiramm/py-red" 345 | Normal Created 10m kubelet, minikube Created container 346 | Normal Started 10m kubelet, minikube Started container 347 | 348 | ``` 349 | 350 | checkpoint 40' into the video 351 | ## Replication Controller 352 | * Ensures that a Pod or homogeneous set of Pods are always up and available 353 | * Always maintains desired number of Pods 354 | * If there are excess Pods, they get killed 355 | * New pods are launched when they fail, get deleted, or terminated 356 | * Creating a replication controller with a count of 1 ensures that a Pod is always available 357 | * Replication Controller and Pods are associated through Labels 358 | ## Replica Set 359 | * Replica Sets are the next generation Replication Controllers 360 | * Ensures specified number of pods are always running 361 | * Pods are replaced by Replica Sets when a failure occurs 362 | * New pods automatically scheduled 363 | * Labels and Selectors are used for associating Pods with Replica Sets 364 | * Usually combined with Pods when defining the deployment 365 | ## Demo 366 | ### Scalling Replica Sets 367 | ```console 368 | $ kubectl get po 369 | NAME READY STATUS RESTARTS AGE 370 | mysql 1/1 Running 0 1h 371 | web1 2/2 Running 0 1h 372 | web2 2/2 Running 0 1m 373 | $ kubectl delete po web2 374 | pod "web2" deleted 375 | $ kubectl get po 376 | NAME READY STATUS RESTARTS AGE 377 | mysql 1/1 Running 0 1h 378 | web1 2/2 Running 0 1h 379 | $ kubectl create -f web-rc.yaml 380 | replicationcontroller "web" created 381 | $ kubectl get po 382 | NAME READY STATUS RESTARTS AGE 383 | mysql 1/1 Running 0 1h 384 | web-5cds7 0/2 ContainerCreating 0 39s 385 | web-7scl5 0/2 ContainerCreating 0 39s 386 | web1 2/2 Running 0 1h 387 | $ kubectl get po 388 | NAME READY STATUS RESTARTS AGE 389 | mysql 1/1 Running 0 1h 390 | web-5cds7 2/2 Running 0 1m 391 | web-7scl5 2/2 Running 0 1m 392 | web1 2/2 Running 0 1h 393 | ``` 394 | 395 | You would have noticed that 2 new pods were created with arbitrary name attached to web, it's simply because the replicationcontroller file had stated 3 replicas. Mind you the replicationcontroller has spawn 2 pods, given that web1 already existed, now, how did it know that web1 was part of the replicationcontroller, this is due to the labels and selectors; it matches the same metadata under the labels in the web-rc.yaml file. 396 | 397 | Let's try to delete one of the pods and see what happens, previously when we simply killed the pod it just got deleted, now this is different, as the pods are associated with the replicationcontroller and replicationcontroller has a desired number and that number must be maintained at all time. 398 | 399 | ```console 400 | $ kubectl delete pod web-5cds7 401 | pod "web-5cds7" deleted 402 | $ kubectl get po 403 | NAME READY STATUS RESTARTS AGE 404 | mysql 1/1 Running 0 2h 405 | web-5cds7 0/2 Terminating 0 17m 406 | web-djv98 0/2 ContainerCreating 0 4s 407 | web1 2/2 Running 0 1h 408 | ``` 409 | 410 | While pod web-5cds7 is getting deleted at the same time a new pod web-djv98 is being generated in making sure that the total pod available remains at 10, as 10 is the desired number of pods that was declared in the web-rc.yaml file. 411 | 412 | We can also use the command line to scale the pod via replicationcontroller, as shown below. 413 | 414 | ```console 415 | $ kubectl scale rc web --replicas=4 416 | replicationcontroller "web" scaled 417 | $ kubectl get po 418 | NAME READY STATUS RESTARTS AGE 419 | mysql 1/1 Running 0 2h 420 | web-5cds7 0/2 Running 0 17m 421 | web-djv98 0/2 Running 0 4s 422 | web-dp86d 0/2 ContainerCreating 0 3s 423 | web1 2/2 Running 0 1h 424 | ``` 425 | 426 | Time to put to test out python and redis application. 427 | 428 | ```console 429 | $ kubectl get svc 430 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 431 | kubernetes ClusterIP 10.96.0.1 443/TCP 8d 432 | mysql ClusterIP 10.98.129.95 3306/TCP 2h 433 | web NodePort 10.109.0.27 80:31282/TCP 2h 434 | ``` 435 | 436 | Before we run the test let's delete everything and start from scratch, to do that I'll need to delete the services, pods and replicationcontrollers. 437 | 438 | ```console 439 | $ kubectl delete -f db-pod.yaml -f db-svc.yaml -f web-rc.yaml -f web-svc.yaml 440 | pod "mysql" deleted 441 | service "mysql" deleted 442 | replicationcontroller "web" deleted 443 | service "web" deleted 444 | ``` 445 | 446 | Now that the everything is clean, let's start. 447 | 448 | ```console 449 | $ kubectl create -f db-pod.yaml -f db-svc.yaml -f web-rc.yaml -f web-svc.yaml 450 | pod "mysql" created 451 | service "mysql" created 452 | replicationcontroller "web" created 453 | service "web" created 454 | $ kubectl get po 455 | NAME READY STATUS RESTARTS AGE 456 | mysql 1/1 Running 0 20s 457 | web-mvg7d 0/2 ContainerCreating 0 20s 458 | web-sq9np 0/2 ContainerCreating 0 20s 459 | web-w6h86 0/2 ContainerCreating 0 20s 460 | $ kubectl get svc 461 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 462 | kubernetes ClusterIP 10.96.0.1 443/TCP 8d 463 | mysql ClusterIP 10.111.177.68 3306/TCP 33s 464 | web NodePort 10.106.191.29 80:32286/TCP 33s 465 | ``` 466 | 467 | We have a series of inserts to issue to populate the database. 468 | 469 | ```console 470 | curl http://192.168.99.100:32286/init 471 | ``` 472 | 473 | Insert user 474 | 475 | ```console 476 | curl -H "Content-Type: application/json" -X POST -d '{"uid": "1", "user": "jon snow"}' http://192.168.99.100:32286/users/add 477 | ``` 478 | 479 | Query user 480 | 481 | ```console 482 | curl http://192.168.99.100:32286/users/1 483 | $ jon snow 484 | ``` 485 | 486 | ## Clear Lab 487 | 488 | ```console 489 | kubectl delete -f db-pod.yaml -f db-svc.yaml -f web-rc.yaml -f web-svc.yaml 490 | $ pod "mysql" deleted 491 | $ service "mysql" deleted 492 | $ replicationcontroller "web" deleted 493 | $ service "web" deleted 494 | ``` 495 | 496 | ## Summary 497 | 498 | * Pods are the smallest unit of deployment in Kubernetes 499 | * Multiple containers share the context of a Pod 500 | * Replica Set are the next generation Replication Controllers 501 | * Replication Controllers ensure high availability of Pods 502 | 503 | Reference: 504 | * [Kubernetes Webinar Series - A Closer Look at Pods and Replicas](https://www.youtube.com/watch?v=CU-nNEY6Hfg&index=3&list=PLF3s2WICJlqOiymMaTLjwwHz-MSVbtJPQ) -------------------------------------------------------------------------------- /03-lesson/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from flask import Response 3 | from flask import request 4 | from redis import Redis 5 | from datetime import datetime 6 | import MySQLdb 7 | import sys 8 | import redis 9 | import time 10 | import hashlib 11 | import os 12 | import json 13 | 14 | app = Flask(__name__) 15 | startTime = datetime.now() 16 | R_SERVER = redis.Redis(host=os.environ.get('REDIS_HOST', 'redis'), port=6379) 17 | db = MySQLdb.connect("mysql","root","password") 18 | cursor = db.cursor() 19 | 20 | @app.route('/init') 21 | def init(): 22 | cursor.execute("DROP DATABASE IF EXISTS USERDB") 23 | cursor.execute("CREATE DATABASE USERDB") 24 | cursor.execute("USE USERDB") 25 | sql = """CREATE TABLE users ( 26 | ID int, 27 | USER char(30) 28 | )""" 29 | cursor.execute(sql) 30 | db.commit() 31 | return "DB Init done" 32 | 33 | @app.route("/users/add", methods=['POST']) 34 | def add_users(): 35 | req_json = request.get_json() 36 | cursor.execute("INSERT INTO USERDB.users (ID, USER) VALUES (%s,%s)", (req_json['uid'], req_json['user'])) 37 | db.commit() 38 | return Response("Added", status=200, mimetype='application/json') 39 | 40 | @app.route('/users/') 41 | def get_users(uid): 42 | hash = hashlib.sha224(str(uid)).hexdigest() 43 | key = "sql_cache:" + hash 44 | 45 | if (R_SERVER.get(key)): 46 | return R_SERVER.get(key) + "(c)" 47 | else: 48 | cursor.execute("select USER from USERDB.users where ID=" + str(uid)) 49 | data = cursor.fetchone() 50 | if data: 51 | R_SERVER.set(key,data[0]) 52 | R_SERVER.expire(key, 36); 53 | return R_SERVER.get(key) 54 | else: 55 | return "Record not found" 56 | 57 | if __name__ == "__main__": 58 | app.run(host="0.0.0.0", port=5000, debug=True) -------------------------------------------------------------------------------- /03-lesson/db-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql 5 | labels: 6 | name: mysql 7 | app: demo 8 | spec: 9 | containers: 10 | - name: mysql 11 | image: mysql:5.7.26 12 | ports: 13 | - containerPort: 3306 14 | protocol: TCP 15 | env: 16 | - name: "MYSQL_ROOT_PASSWORD" 17 | value: "password" -------------------------------------------------------------------------------- /03-lesson/db-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mysql 5 | labels: 6 | name: mysql 7 | app: demo 8 | spec: 9 | ports: 10 | - port: 3306 11 | name: mysql 12 | targetPort: 3306 13 | selector: 14 | name: mysql 15 | app: demo -------------------------------------------------------------------------------- /03-lesson/demo.sh: -------------------------------------------------------------------------------- 1 | # Deploy Nginx container 2 | kubectl run my-web --image=nginx --port=80 3 | 4 | # Expose Nginx container 5 | kubectl expose deployment my-web --target-port=80 --type=NodePort 6 | 7 | # Get the node IP for minikube 8 | minikube ip 9 | 10 | # Check the NodePort 11 | kubectl describe svc my-web 12 | 13 | # Access Ngnix 14 | PORT=$(kubectl get svc my-web -o go-template='{{(index .spec.ports 0).nodePort}}') 15 | -------------------------------------------------------------------------------- /03-lesson/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | mysql: 2 | image: mysql:latest 3 | container_name: mysql 4 | environment: 5 | MYSQL_ROOT_PASSWORD: password 6 | redis: 7 | image: redis:latest 8 | container_name: redis 9 | web: 10 | build: . 11 | ports: 12 | - "5000:5000" 13 | links: 14 | - redis:redis 15 | - mysql:mysql 16 | -------------------------------------------------------------------------------- /03-lesson/helper.sh: -------------------------------------------------------------------------------- 1 | export NODE_IP=192.168.99.100 2 | export NODE_PORT=31265 3 | 4 | curl http://192.168.99.100:31265/users/add/init 5 | 6 | curl -i -H "Content-Type: application/json" -X POST -d '{"uid": "1", "user":"John Doe"}' http://192.168.99.100:31265/users/add 7 | curl -i -H "Content-Type: application/json" -X POST -d '{"uid": "2", "user":"Jane Doe"}' http://192.168.99.100:31265/users/add 8 | curl -i -H "Content-Type: application/json" -X POST -d '{"uid": "3", "user":"Bill Collins"}' http://192.168.99.100:31265/users/add 9 | curl -i -H "Content-Type: application/json" -X POST -d '{"uid": "4", "user":"Mike Taylor"}' http://192.168.99.100:31265/users/add 10 | 11 | curl http://192.168.99.100:31265/users/add/users/1 12 | 13 | 14 | curl -i -H "Content-Type: application/json" -X POST -d '{"uid": "1", "user":"John Doe"}' http://192.168.99.100:31265/users/add -------------------------------------------------------------------------------- /03-lesson/images/image-03-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/03-lesson/images/image-03-01.png -------------------------------------------------------------------------------- /03-lesson/images/image-03-02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/03-lesson/images/image-03-02.png -------------------------------------------------------------------------------- /03-lesson/images/image-03-03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/03-lesson/images/image-03-03.png -------------------------------------------------------------------------------- /03-lesson/mypod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mypod 5 | labels: 6 | app: demo 7 | env: test 8 | spec: 9 | containers: 10 | - name: nginx 11 | image: nginx 12 | ports: 13 | - name: http 14 | containerPort: 80 15 | protocol: TCP 16 | -------------------------------------------------------------------------------- /03-lesson/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | redis 3 | mysql-python -------------------------------------------------------------------------------- /03-lesson/web-pod-1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: web1 5 | labels: 6 | name: web 7 | app: demo 8 | spec: 9 | containers: 10 | - name: redis 11 | image: redis 12 | ports: 13 | - containerPort: 6379 14 | name: redis 15 | protocol: TCP 16 | - name: python 17 | image: janakiramm/py-red 18 | env: 19 | - name: "REDIS_HOST" 20 | value: "localhost" 21 | ports: 22 | - containerPort: 5000 23 | name: http 24 | protocol: TCP -------------------------------------------------------------------------------- /03-lesson/web-pod-2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: web2 5 | labels: 6 | name: web 7 | app: demo 8 | spec: 9 | containers: 10 | - name: redis 11 | image: redis 12 | ports: 13 | - containerPort: 6379 14 | name: redis 15 | protocol: TCP 16 | - name: python 17 | image: janakiramm/py-red 18 | env: 19 | - name: "REDIS_HOST" 20 | value: "localhost" 21 | ports: 22 | - containerPort: 5000 23 | name: http 24 | protocol: TCP -------------------------------------------------------------------------------- /03-lesson/web-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | app: demo 8 | spec: 9 | replicas: 3 10 | template: 11 | metadata: 12 | labels: 13 | name: web 14 | spec: 15 | containers: 16 | - name: redis 17 | image: redis 18 | ports: 19 | - containerPort: 6379 20 | name: redis 21 | protocol: TCP 22 | - name: python 23 | image: janakiramm/py-red 24 | env: 25 | - name: "REDIS_HOST" 26 | value: "localhost" 27 | ports: 28 | - containerPort: 5000 29 | name: http 30 | protocol: TCP 31 | -------------------------------------------------------------------------------- /03-lesson/web-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | app: demo 8 | spec: 9 | selector: 10 | name: web 11 | type: NodePort 12 | ports: 13 | - port: 80 14 | name: http 15 | targetPort: 5000 16 | protocol: TCP -------------------------------------------------------------------------------- /04-lesson/Deploy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:8.16.0-alpine 2 | RUN mkdir -p /usr/src/app 3 | WORKDIR /usr/src/app 4 | COPY app.js ./ 5 | # COPY package.json ./ 6 | RUN chmod +x . 7 | RUN npm install express@4.17.0 8 | EXPOSE 8080 9 | CMD ["node", "app.js"] -------------------------------------------------------------------------------- /04-lesson/Deploy/app.js: -------------------------------------------------------------------------------- 1 | var express = require('express'); 2 | var app = express(); 3 | 4 | var port = process.env.PORT || 8080; 5 | var color = process.env.COLOR || 'no color assigned yet'; 6 | var router = express.Router(); 7 | 8 | router.get('/', function(req, res){ 9 | res.json({ 'color': color}); 10 | }); 11 | 12 | app.use('/', router); 13 | 14 | app.listen(port); 15 | console.log('Server Started at ' + port + ' ' + 'color :' + color); 16 | 17 | 18 | -------------------------------------------------------------------------------- /04-lesson/Deploy/color-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: red 5 | labels: 6 | color: red 7 | spec: 8 | containers: 9 | - image: aldredb/node_color:1 10 | name: red 11 | env: 12 | - name: "COLOR" 13 | value: "red" 14 | ports: 15 | - containerPort: 8080 16 | --- 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: green 21 | labels: 22 | color: green 23 | spec: 24 | containers: 25 | - image: aldredb/node_color:1 26 | name: green 27 | env: 28 | - name: "COLOR" 29 | value: "green" 30 | ports: 31 | - containerPort: 8080 32 | --- 33 | apiVersion: v1 34 | kind: Pod 35 | metadata: 36 | name: blue 37 | labels: 38 | color: blue 39 | spec: 40 | containers: 41 | - image: aldredb/node_color:1 42 | name: blue 43 | env: 44 | - name: "COLOR" 45 | value: "blue" 46 | ports: 47 | - containerPort: 8080 48 | --- 49 | apiVersion: v1 50 | kind: Pod 51 | metadata: 52 | name: yellow 53 | labels: 54 | color: yellow 55 | spec: 56 | containers: 57 | - image: aldredb/node_color:1 58 | name: yellow 59 | env: 60 | - name: "COLOR" 61 | value: "yellow" 62 | ports: 63 | - containerPort: 8080 -------------------------------------------------------------------------------- /04-lesson/Deploy/color-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: red 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | color: red 11 | spec: 12 | containers: 13 | - image: aldredb/node_color:1 14 | name: red 15 | env: 16 | - name: "COLOR" 17 | value: "red" 18 | ports: 19 | - containerPort: 8080 20 | --- 21 | apiVersion: v1 22 | kind: ReplicationController 23 | metadata: 24 | name: green 25 | spec: 26 | replicas: 3 27 | template: 28 | metadata: 29 | labels: 30 | color: green 31 | spec: 32 | containers: 33 | - image: aldredb/node_color:1 34 | name: green 35 | env: 36 | - name: "COLOR" 37 | value: "green" 38 | ports: 39 | - containerPort: 8080 40 | --- 41 | apiVersion: v1 42 | kind: ReplicationController 43 | metadata: 44 | name: blue 45 | spec: 46 | replicas: 3 47 | template: 48 | metadata: 49 | labels: 50 | color: blue 51 | spec: 52 | containers: 53 | - image: aldredb/node_color:1 54 | name: blue 55 | env: 56 | - name: "COLOR" 57 | value: "blue" 58 | ports: 59 | - containerPort: 8080 60 | --- 61 | apiVersion: v1 62 | kind: ReplicationController 63 | metadata: 64 | name: yellow 65 | spec: 66 | replicas: 3 67 | template: 68 | metadata: 69 | labels: 70 | color: yellow 71 | spec: 72 | containers: 73 | - image: aldredb/node_color:1 74 | name: yellow 75 | env: 76 | - name: "COLOR" 77 | value: "yellow" 78 | ports: 79 | - containerPort: 8080 -------------------------------------------------------------------------------- /04-lesson/Deploy/color-srv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: red 5 | spec: 6 | selector: 7 | color: red 8 | type: NodePort 9 | ports: 10 | - name: http 11 | nodePort: 31001 12 | port: 80 13 | targetPort: 8080 14 | protocol: TCP 15 | --- 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: green 20 | spec: 21 | selector: 22 | color: green 23 | type: NodePort 24 | ports: 25 | - name: http 26 | nodePort: 31002 27 | port: 80 28 | targetPort: 8080 29 | protocol: TCP 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: blue 35 | spec: 36 | selector: 37 | color: blue 38 | type: NodePort 39 | ports: 40 | - name: http 41 | nodePort: 31003 42 | port: 80 43 | targetPort: 8080 44 | protocol: TCP 45 | --- 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: yellow 50 | spec: 51 | selector: 52 | color: yellow 53 | type: NodePort 54 | ports: 55 | - name: http 56 | nodePort: 31004 57 | port: 80 58 | targetPort: 8080 59 | protocol: TCP -------------------------------------------------------------------------------- /04-lesson/Deploy/color-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: red 5 | spec: 6 | selector: 7 | color: red 8 | type: NodePort 9 | ports: 10 | - name: http 11 | nodePort: 31001 12 | port: 80 13 | targetPort: 8080 14 | protocol: TCP 15 | --- 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: green 20 | spec: 21 | selector: 22 | color: green 23 | type: NodePort 24 | ports: 25 | - name: http 26 | nodePort: 31002 27 | port: 80 28 | targetPort: 8080 29 | protocol: TCP 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: blue 35 | spec: 36 | selector: 37 | color: blue 38 | type: NodePort 39 | ports: 40 | - name: http 41 | nodePort: 31003 42 | port: 80 43 | targetPort: 8080 44 | protocol: TCP 45 | --- 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: yellow 50 | spec: 51 | selector: 52 | color: yellow 53 | type: NodePort 54 | ports: 55 | - name: http 56 | nodePort: 31004 57 | port: 80 58 | targetPort: 8080 59 | protocol: TCP -------------------------------------------------------------------------------- /04-lesson/README.md: -------------------------------------------------------------------------------- 1 | # Understanding Service Discovery in Kubernetes 2 | 3 | ## Objectives 4 | * What is a Kubernetes Service 5 | * Service Discovery through Environment Variables 6 | * Service Discovery through DNS 7 | * Service Types 8 | * ClusterIP 9 | * NodePort 10 | * LoadBalancer 11 | 12 | --- 13 | ## Quick Recap - Pods and Replication Controllers? 14 | * Pods are fundamental units of deployment 15 | * Each Pod has one or more containers that may expose ports 16 | * Each Pod has a routable IP address assigned to it 17 | * Labels are used to logically identify Pods that match a specific criterion 18 | * Replication Controller's Selector matches the Pods based on Labels 19 | * Replication Controller maintains the desired count of Pods all the time 20 | * Pod IP address may change during its lifetime 21 | 22 | ## What is a Kubernetes Service? 23 | * A Service is an abstraction of logical set of Pods defined by a policy 24 | * It acts as intermediary for Pods to talks to each other 25 | * Selectors are used for accessing all the Pods that match a specific Label 26 | * Service is an object in Kubernetes - similar to Pods and RCs (replicationcontroller) 27 | * Each Service exposes one of more _ports_ and _targetPorts_ 28 | * The _targetPort_ is mapped to the port exposed by matching Pods 29 | * Kuberetes Services support TCP or UDP protocol 30 | ## Understanding Services 31 | ![Understanding Services](./images/image-04-01.png) 32 | 33 | ## Demo 34 | ### Creating a Service 35 | No Pods, Services and ReplicationControllers are running, as all were deleted before we started the exercise. 36 | 37 | ```console 38 | $ kubectl get po 39 | No resources found. 40 | $ cd Deploy && ls 41 | Dockerfile app.js color-pod.yaml color-rc.yaml color-svc.yaml 42 | $ kubectl create -f color-pod.yaml -f color-rc.yaml 43 | pod "red" created 44 | pod "green" created 45 | pod "blue" created 46 | pod "yellow" created 47 | replicationcontroller "red" created 48 | replicationcontroller "green" created 49 | replicationcontroller "blue" created 50 | replicationcontroller "yellow" created 51 | $ kubectl get po 52 | NAME READY STATUS RESTARTS AGE 53 | blue 0/1 ContainerCreating 0 51s 54 | blue-594mg 0/1 ContainerCreating 0 51s 55 | blue-tzwl6 0/1 ContainerCreating 0 51s 56 | green 0/1 ContainerCreating 0 51s 57 | green-695g9 0/1 ContainerCreating 0 51s 58 | green-9blsq 0/1 ContainerCreating 0 51s 59 | red 0/1 ContainerCreating 0 51s 60 | red-2zr2k 0/1 ContainerCreating 0 51s 61 | red-hsg88 0/1 ContainerCreating 0 51s 62 | yellow 0/1 ContainerCreating 0 51s 63 | yellow-79mr6 0/1 ContainerCreating 0 51s 64 | yellow-vhxjk 0/1 ContainerCreating 0 51s 65 | $ kubectl describe pod red 66 | Name: red 67 | Namespace: default 68 | Node: minikube/192.168.99.100 69 | Start Time: Mon, 11 Dec 2017 17:09:00 +0800 70 | Labels: color=red 71 | Annotations: 72 | Status: Pending 73 | IP: 74 | Controlled By: ReplicationController/red 75 | Containers: 76 | red: 77 | Container ID: 78 | Image: janakiramm/color 79 | Image ID: 80 | Port: 8080/TCP 81 | State: Waiting 82 | Reason: ContainerCreating 83 | Ready: False 84 | Restart Count: 0 85 | Environment: 86 | COLOR: red 87 | Mounts: 88 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-zk79b (ro) 89 | Conditions: 90 | Type Status 91 | Initialized True 92 | Ready False 93 | PodScheduled True 94 | Volumes: 95 | default-token-zk79b: 96 | Type: Secret (a volume populated by a Secret) 97 | SecretName: default-token-zk79b 98 | Optional: false 99 | QoS Class: BestEffort 100 | Node-Selectors: 101 | Tolerations: 102 | Events: 103 | Type Reason Age From Message 104 | ---- ------ ---- ---- ------- 105 | Normal Scheduled 2m default-scheduler Successfully assigned red to minikube 106 | Normal SuccessfulMountVolume 2m kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-zk79b" 107 | Normal Pulling 2m kubelet, minikube pulling image "janakiramm/color" 108 | $ kubectl create -f color-svc.yaml 109 | service "red" created 110 | service "green" created 111 | service "blue" created 112 | service "yellow" created 113 | $ kubectl get svc 114 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 115 | blue NodePort 10.102.44.99 80:31003/TCP 2m 116 | green NodePort 10.100.116.176 80:31002/TCP 2m 117 | kubernetes ClusterIP 10.96.0.1 443/TCP 8d 118 | red NodePort 10.104.43.49 80:31001/TCP 2m 119 | yellow NodePort 10.105.240.38 80:31004/TCP 2m 120 | $ kubectl describe svc red 121 | Name: red 122 | Namespace: default 123 | Labels: 124 | Annotations: 125 | Selector: color=red 126 | Type: NodePort 127 | IP: 10.104.43.49 128 | Port: http 80/TCP 129 | TargetPort: 8080/TCP 130 | NodePort: http 31001/TCP 131 | Endpoints: 172.17.0.13:8080,172.17.0.15:8080,172.17.0.3:8080 132 | Session Affinity: None 133 | External Traffic Policy: Cluster 134 | Events: 135 | $ curl 192.168.99.100:31001 136 | {"color":"red"} 137 | ``` 138 | 139 | This is a tangible proof that the color coded services work... 140 | 141 | |![Red](./images/image-04-02.png) |![Green](./images/image-04-03.png) |![Blue](./images/image-04-04.png) |![Yellow](./images/image-04-05.png)| 142 | 143 | Clear the environment 144 | 145 | ```console 146 | kubectl delete -f color-pod.yaml -f color-rc.yaml -f color-svc.yaml 147 | $ pod "red" deleted 148 | $ pod "green" deleted 149 | $ pod "blue" deleted 150 | $ pod "yellow" deleted 151 | $ replicationcontroller "red" deleted 152 | $ replicationcontroller "green" deleted 153 | $ replicationcontroller "blue" deleted 154 | $ replicationcontroller "yellow" deleted 155 | $ service "red" deleted 156 | $ service "green" deleted 157 | $ service "blue" deleted 158 | $ service "yellow" deleted 159 | ``` 160 | 161 | ## Discovering Service - DNS 162 | 163 | * The DNS Server watches Kubernetes API for new Services 164 | * The DNS Server creates a set of DNS records for each Service 165 | * Services can be resolved by the name within the same namespace 166 | * Pods in other namespaces can access the Service by adding the namespace to the DNS path 167 | * _my-service.my-namespace_ 168 | 169 | ## Discovering Services - Env Vars 170 | * Kubernetes creates Docker Link compatible environment variables in all Pods 171 | * Containers can use the environment variable to talk to the service endpoint. 172 | ## Service Types 173 | * **ClusterIP** 174 | * Service is reachable only from inside of the cluster. 175 | * **NodePort** 176 | * Service is reachable through :NodePort address. 177 | * **LoadBalancer** 178 | * Service is reachable through an external load balancer mapped to :NodePort address 179 | 180 | ## Demo 181 | ### Exposing a Service Internally and Externally 182 | ```console 183 | cd ../todo-app 184 | $ kubectl create -f db-pod.yaml -f db-svc.yaml -f web-pod.yaml -f web-rc.yaml -f web-svc.yaml 185 | pod "db" created 186 | service "db" created 187 | pod "web" created 188 | replicationcontroller "web" created 189 | service "web" created 190 | $ kubectl get po 191 | NAME READY STATUS RESTARTS AGE 192 | db 1/1 Running 0 32s 193 | web 1/1 Running 0 32s 194 | web-vk4tx 1/1 Running 0 32s 195 | $ kubectl get svc 196 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 197 | db ClusterIP 10.110.111.84 27017/TCP 1m 198 | kubernetes ClusterIP 10.96.0.1 443/TCP 8d 199 | web NodePort 10.108.13.113 80:30653/TCP 1m 200 | $ kubectl describe svc web 201 | Name: web 202 | Namespace: default 203 | Labels: app=todoapp 204 | name=web 205 | Annotations: 206 | Selector: name=web 207 | Type: NodePort 208 | IP: 10.108.13.113 209 | Port: http 80/TCP 210 | TargetPort: 3000/TCP 211 | NodePort: http 30653/TCP 212 | Endpoints: 172.17.0.3:3000,172.17.0.8:3000 213 | Session Affinity: None 214 | External Traffic Policy: Cluster 215 | Events: 216 | ``` 217 | 218 | I'll call the service via minikube. 219 | 220 | ```console 221 | λ minikube service web 222 | Opening kubernetes service default/web in default browser... 223 | ``` 224 | 225 | ![Express Todo Example](./images/image-04-07.png) 226 | Now, it's time to access the image via `docker exec` command: 227 | ```console 228 | $ kubectl exec -it web /bin/sh 229 | # pwd 230 | /usr/src/app 231 | # env 232 | KUBERNETES_SERVICE_PORT=443 233 | KUBERNETES_PORT=tcp://10.96.0.1:443 234 | NODE_VERSION=0.10.40 235 | HOSTNAME=web 236 | DB_SERVICE_PORT=27017 237 | DB_PORT=tcp://10.110.111.84:27017 238 | WEB_SERVICE_PORT=80 239 | WEB_PORT=tcp://10.108.13.113:80 240 | HOME=/root 241 | DB_PORT_27017_TCP_ADDR=10.110.111.84 242 | DB_PORT_27017_TCP_PORT=27017 243 | DB_PORT_27017_TCP_PROTO=tcp 244 | WEB_PORT_80_TCP_ADDR=10.108.13.113 245 | WEB_PORT_80_TCP_PORT=80 246 | WEB_PORT_80_TCP_PROTO=tcp 247 | DB_PORT_27017_TCP=tcp://10.110.111.84:27017 248 | KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1 249 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 250 | KUBERNETES_PORT_443_TCP_PORT=443 251 | NPM_VERSION=2.14.1 252 | KUBERNETES_PORT_443_TCP_PROTO=tcp 253 | WEB_PORT_80_TCP=tcp://10.108.13.113:80 254 | KUBERNETES_SERVICE_PORT_HTTPS=443 255 | KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443 256 | WEB_SERVICE_PORT_HTTP=80 257 | KUBERNETES_SERVICE_HOST=10.96.0.1 258 | PWD=/usr/src/app 259 | DB_SERVICE_HOST=10.110.111.84 260 | DB_SERVICE_PORT_DB=27017 261 | WEB_SERVICE_HOST=10.108.13.113 262 | # ping db 263 | PING db.default.svc.cluster.local (10.110.111.84): 56 data bytes 264 | ``` 265 | 266 | ## Clear Lab 267 | 268 | ```console 269 | kubectl delete -f db-pod.yaml -f db-svc.yaml -f web-pod.yaml -f web-rc.yaml -f web-svc.yaml 270 | $ pod "db" deleted 271 | $ service "db" deleted 272 | $ pod "web" deleted 273 | $ replicationcontroller "web" deleted 274 | $ service "web" deleted 275 | ``` 276 | 277 | ## Summary 278 | * What is a Kubernetes Service 279 | * Service Discovery through Environment Variables 280 | * Service Discovery through DNS 281 | * Service Types 282 | * ClusterIP 283 | * NodePort 284 | * LoadBalancer 285 | 286 | Reference: 287 | * [Understanding Service Discovery in Kubernetes](https://www.youtube.com/watch?v=NrzrpyMLWes&list=PLF3s2WICJlqOiymMaTLjwwHz-MSVbtJPQ&index=4) 288 | -------------------------------------------------------------------------------- /04-lesson/images/image-04-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/images/image-04-01.png -------------------------------------------------------------------------------- /04-lesson/images/image-04-02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/images/image-04-02.png -------------------------------------------------------------------------------- /04-lesson/images/image-04-03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/images/image-04-03.png -------------------------------------------------------------------------------- /04-lesson/images/image-04-04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/images/image-04-04.png -------------------------------------------------------------------------------- /04-lesson/images/image-04-05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/images/image-04-05.png -------------------------------------------------------------------------------- /04-lesson/images/image-04-06.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/images/image-04-06.png -------------------------------------------------------------------------------- /04-lesson/images/image-04-07.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/images/image-04-07.png -------------------------------------------------------------------------------- /04-lesson/todo-app/app.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Module dependencies. 3 | */ 4 | 5 | // mongoose setup 6 | require( './db' ); 7 | 8 | var express = require( 'express' ); 9 | var http = require( 'http' ); 10 | var path = require( 'path' ); 11 | var engine = require( 'ejs-locals' ); 12 | var favicon = require( 'serve-favicon' ); 13 | var cookieParser = require( 'cookie-parser' ); 14 | var bodyParser = require( 'body-parser' ); 15 | var methodOverride = require( 'method-override' ); 16 | var logger = require( 'morgan' ); 17 | var errorHandler = require( 'errorhandler' ); 18 | var static = require( 'serve-static' ); 19 | 20 | var app = express(); 21 | var routes = require( './routes' ); 22 | 23 | 24 | // all environments 25 | app.set( 'port', process.env.PORT || 3000 ); 26 | app.engine( 'ejs', engine ); 27 | app.set( 'views', path.join( __dirname, 'views' )); 28 | app.set( 'view engine', 'ejs' ); 29 | app.use( favicon( __dirname + '/public/favicon.ico' )); 30 | app.use( logger( 'dev' )); 31 | app.use( methodOverride()); 32 | app.use( cookieParser()); 33 | app.use( bodyParser.json()); 34 | app.use( bodyParser.urlencoded({ extended : true })); 35 | 36 | // Routes 37 | //app.use( routes.current_user ); 38 | app.get( '/', routes.index ); 39 | app.post( '/create', routes.create ); 40 | app.get( '/destroy/:id', routes.destroy ); 41 | app.get( '/edit/:id', routes.edit ); 42 | app.post( '/update/:id', routes.update ); 43 | 44 | app.use( static( path.join( __dirname, 'public' ))); 45 | 46 | // development only 47 | if( 'development' == app.get( 'env' )){ 48 | app.use( errorHandler()); 49 | } 50 | 51 | http.createServer( app ).listen( app.get( 'port' ), function (){ 52 | console.log( 'Express server listening on port ' + app.get( 'port' )); 53 | }); 54 | -------------------------------------------------------------------------------- /04-lesson/todo-app/db-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: db 5 | labels: 6 | name: mongo 7 | app: todoapp 8 | spec: 9 | containers: 10 | - image: mongo:4.0.9-xenial 11 | name: mongo 12 | ports: 13 | - name: mongo 14 | containerPort: 27017 -------------------------------------------------------------------------------- /04-lesson/todo-app/db-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: db 5 | labels: 6 | name: mongo 7 | app: todoapp 8 | spec: 9 | selector: 10 | name: mongo 11 | type: ClusterIP 12 | ports: 13 | - name: db 14 | port: 27017 15 | targetPort: 27017 16 | -------------------------------------------------------------------------------- /04-lesson/todo-app/db.js: -------------------------------------------------------------------------------- 1 | var mongoose = require( 'mongoose' ); 2 | var Schema = mongoose.Schema; 3 | var DBHost=process.env["DBHOST"]; 4 | var Todo = new Schema({ 5 | user_id : String, 6 | content : String, 7 | updated_at : Date 8 | }); 9 | 10 | mongoose.model( 'Todo', Todo ); 11 | 12 | mongoose.connect( 'mongodb://db/express-todo' ); 13 | -------------------------------------------------------------------------------- /04-lesson/todo-app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "todo", 3 | "version" : "0.0.1", 4 | "private" : true, 5 | "dependencies" : { 6 | "body-parser" : "1.9.0", 7 | "cookie-parser" : "1.3.3", 8 | "express" : "4.9.5", 9 | "ejs" : "1.0.0", 10 | "errorhandler" : "1.2.0", 11 | "method-override" : "2.2.0", 12 | "ejs-locals" : "1.0.2", 13 | "mongoose" : "4.4.12", 14 | "morgan" : "1.3.2", 15 | "serve-favicon" : "2.1.5", 16 | "serve-static" : "1.6.3" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /04-lesson/todo-app/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/todo-app/public/favicon.ico -------------------------------------------------------------------------------- /04-lesson/todo-app/public/images/delete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/todo-app/public/images/delete.png -------------------------------------------------------------------------------- /04-lesson/todo-app/public/images/dreamerslab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/04-lesson/todo-app/public/images/dreamerslab.png -------------------------------------------------------------------------------- /04-lesson/todo-app/public/javascripts/ga.js: -------------------------------------------------------------------------------- 1 | // Change UA-XXXXX-X to be your site's ID 2 | var _gaq=[['_setAccount','UA-20960410-1'],['_trackPageview']]; 3 | (function(d,t){var g=d.createElement(t),s=d.getElementsByTagName(t)[0];g.async=1; 4 | g.src=('https:'==location.protocol?'//ssl':'//www')+'.google-analytics.com/ga.js'; 5 | s.parentNode.insertBefore(g,s);}(document,'script')); -------------------------------------------------------------------------------- /04-lesson/todo-app/public/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | -------------------------------------------------------------------------------- /04-lesson/todo-app/public/stylesheets/screen.css: -------------------------------------------------------------------------------- 1 | /* line 17, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ 2 | html, body, div, span, applet, object, iframe, 3 | h1, h2, h3, h4, h5, h6, p, blockquote, pre, 4 | a, abbr, acronym, address, big, cite, code, 5 | del, dfn, em, img, ins, kbd, q, s, samp, 6 | small, strike, strong, sub, sup, tt, var, 7 | b, u, i, center, 8 | dl, dt, dd, ol, ul, li, 9 | fieldset, form, label, legend, 10 | table, caption, tbody, tfoot, thead, tr, th, td, 11 | article, aside, canvas, details, embed, 12 | figure, figcaption, footer, header, hgroup, 13 | menu, nav, output, ruby, section, summary, 14 | time, mark, audio, video { 15 | margin: 0; 16 | padding: 0; 17 | border: 0; 18 | font-size: 100%; 19 | font: inherit; 20 | vertical-align: baseline; 21 | } 22 | 23 | /* line 20, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ 24 | body { 25 | line-height: 1; 26 | } 27 | 28 | /* line 22, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ 29 | ol, ul { 30 | list-style: none; 31 | } 32 | 33 | /* line 24, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ 34 | table { 35 | border-collapse: collapse; 36 | border-spacing: 0; 37 | } 38 | 39 | /* line 26, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ 40 | caption, th, td { 41 | text-align: left; 42 | font-weight: normal; 43 | vertical-align: middle; 44 | } 45 | 46 | /* line 28, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ 47 | q, blockquote { 48 | quotes: none; 49 | } 50 | /* line 101, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ 51 | q:before, q:after, blockquote:before, blockquote:after { 52 | content: ""; 53 | content: none; 54 | } 55 | 56 | /* line 30, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ 57 | a img { 58 | border: none; 59 | } 60 | 61 | /* line 114, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ 62 | article, aside, details, figcaption, figure, footer, header, hgroup, menu, nav, section, summary { 63 | display: block; 64 | } 65 | 66 | /* line 7, ../sass/screen.sass */ 67 | body { 68 | font: 14px "Lucida Grande", "Lucida Sans Unicode", sans-serif; 69 | } 70 | 71 | /* line 10, ../sass/screen.sass */ 72 | #page-title { 73 | color: #666666; 74 | background-color: #f8f8f8; 75 | font-size: 32px; 76 | line-height: 1.35; 77 | padding: 20px 0; 78 | text-align: center; 79 | text-shadow: 0 1px 1px white; 80 | } 81 | 82 | /* line 19, ../sass/screen.sass */ 83 | .del-btn, .del-btn-edit { 84 | display: inline; 85 | float: right; 86 | background: url("/images/delete.png") no-repeat bottom left; 87 | font: 0/0 serif; 88 | text-shadow: none; 89 | color: transparent; 90 | width: 16px; 91 | height: 18px; 92 | } 93 | 94 | /* line 28, ../sass/screen.sass */ 95 | .del-btn-edit { 96 | width: 20px; 97 | height: 21px; 98 | } 99 | 100 | /* line 32, ../sass/screen.sass */ 101 | #list { 102 | width: 283px; 103 | margin: 0 auto; 104 | padding: 20px 0 15px; 105 | position: relative; 106 | } 107 | 108 | /* line 38, ../sass/screen.sass */ 109 | .item, .item-new { 110 | overflow: hidden; 111 | *zoom: 1; 112 | background-color: #f9f9f9; 113 | border: 1px solid #eeeeee; 114 | border-radius: 6px 6px 6px 6px; 115 | list-style: none outside none; 116 | margin: 6px 0 0; 117 | padding: 8px 9px 9px; 118 | position: relative; 119 | text-shadow: 1px 1px 0 white; 120 | width: 250px; 121 | } 122 | /* line 49, ../sass/screen.sass */ 123 | .item:hover, .item-new:hover { 124 | border-color: #9be0f9; 125 | box-shadow: 0 0 5px #a6d5fd; 126 | } 127 | 128 | /* line 53, ../sass/screen.sass */ 129 | .item-new { 130 | padding: 4px 5px; 131 | width: 258px; 132 | } 133 | 134 | /* line 57, ../sass/screen.sass */ 135 | .input, .update-input { 136 | border: 1px solid #cccccc; 137 | color: #666666; 138 | font-family: "Lucida Grande", "Lucida Sans Unicode", sans-serif; 139 | font-size: 15px; 140 | padding: 3px 4px; 141 | width: 248px; 142 | height: 19px; 143 | } 144 | 145 | /* line 66, ../sass/screen.sass */ 146 | .update-input { 147 | width: 220px; 148 | } 149 | 150 | /* line 69, ../sass/screen.sass */ 151 | .content { 152 | color: #777777; 153 | font-size: 1.2em; 154 | text-shadow: 1px 1px 0 white; 155 | } 156 | 157 | /* line 74, ../sass/screen.sass */ 158 | .update-link { 159 | display: inline; 160 | float: left; 161 | font-family: "Lucida Grande", "Lucida Sans Unicode", sans-serif; 162 | color: #666666; 163 | text-decoration: none; 164 | overflow: hidden; 165 | white-space: nowrap; 166 | max-width: 15em; 167 | text-overflow: ellipsis; 168 | } 169 | /* line 83, ../sass/screen.sass */ 170 | .update-link:hover { 171 | color: #333333; 172 | } 173 | 174 | /* line 86, ../sass/screen.sass */ 175 | .update-form { 176 | display: inline; 177 | float: left; 178 | } 179 | 180 | /* line 89, ../sass/screen.sass */ 181 | #footer-wrap { 182 | background-color: #f8f8f8; 183 | } 184 | 185 | /* line 92, ../sass/screen.sass */ 186 | #footer { 187 | overflow: hidden; 188 | *zoom: 1; 189 | width: 210px; 190 | margin: 0 auto; 191 | position: relative; 192 | top: 17px; 193 | } 194 | 195 | /* line 99, ../sass/screen.sass */ 196 | #dreamerslab { 197 | display: inline; 198 | float: left; 199 | font-family: Georgia; 200 | background: url("/images/dreamerslab.png") no-repeat top left; 201 | padding: 0 0 3px 16px; 202 | color: #333333; 203 | text-decoration: none; 204 | font-weight: bold; 205 | } 206 | /* line 107, ../sass/screen.sass */ 207 | #dreamerslab:hover { 208 | text-decoration: underline; 209 | } 210 | 211 | /* line 110, ../sass/screen.sass */ 212 | .footer-content { 213 | display: inline; 214 | float: left; 215 | color: #333333; 216 | font-family: Georgia; 217 | padding: 0 4px; 218 | } 219 | 220 | /* line 116, ../sass/screen.sass */ 221 | #github-link { 222 | color: #666666; 223 | text-decoration: none; 224 | } 225 | /* line 119, ../sass/screen.sass */ 226 | #github-link:hover { 227 | text-decoration: underline; 228 | } 229 | 230 | /* line 10, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/layout/_sticky-footer.scss */ 231 | html, body { 232 | height: 100%; 233 | } 234 | 235 | /* line 12, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/layout/_sticky-footer.scss */ 236 | #layout { 237 | clear: both; 238 | min-height: 100%; 239 | height: auto !important; 240 | height: 100%; 241 | margin-bottom: -48px; 242 | } 243 | /* line 18, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/layout/_sticky-footer.scss */ 244 | #layout #layout-footer { 245 | height: 48px; 246 | } 247 | 248 | /* line 20, ../../../../../Users/fred/.rvm/gems/ruby-1.9.3-p0/gems/compass-0.12.1/frameworks/compass/stylesheets/compass/layout/_sticky-footer.scss */ 249 | #footer-wrap { 250 | clear: both; 251 | position: relative; 252 | height: 48px; 253 | } 254 | -------------------------------------------------------------------------------- /04-lesson/todo-app/routes/index.js: -------------------------------------------------------------------------------- 1 | var utils = require( '../utils' ); 2 | var mongoose = require( 'mongoose' ); 3 | var Todo = mongoose.model( 'Todo' ); 4 | 5 | exports.index = function ( req, res, next ){ 6 | var user_id = req.cookies ? 7 | req.cookies.user_id : undefined; 8 | 9 | Todo. 10 | find({ user_id : user_id }). 11 | sort( '-updated_at' ). 12 | exec( function ( err, todos ){ 13 | if( err ) return next( err ); 14 | 15 | res.render( 'index', { 16 | title : 'Express Todo Example', 17 | todos : todos 18 | }); 19 | }); 20 | }; 21 | 22 | exports.create = function ( req, res, next ){ 23 | new Todo({ 24 | user_id : req.cookies.user_id, 25 | content : req.body.content, 26 | updated_at : Date.now() 27 | }).save( function ( err, todo, count ){ 28 | if( err ) return next( err ); 29 | 30 | res.redirect( '/' ); 31 | }); 32 | }; 33 | 34 | exports.destroy = function ( req, res, next ){ 35 | Todo.findById( req.params.id, function ( err, todo ){ 36 | var user_id = req.cookies ? 37 | req.cookies.user_id : undefined; 38 | 39 | if( todo.user_id !== user_id ){ 40 | return utils.forbidden( res ); 41 | } 42 | 43 | todo.remove( function ( err, todo ){ 44 | if( err ) return next( err ); 45 | 46 | res.redirect( '/' ); 47 | }); 48 | }); 49 | }; 50 | 51 | exports.edit = function( req, res, next ){ 52 | var user_id = req.cookies ? 53 | req.cookies.user_id : undefined; 54 | 55 | Todo. 56 | find({ user_id : user_id }). 57 | sort( '-updated_at' ). 58 | exec( function ( err, todos ){ 59 | if( err ) return next( err ); 60 | 61 | res.render( 'edit', { 62 | title : 'Express Todo Example', 63 | todos : todos, 64 | current : req.params.id 65 | }); 66 | }); 67 | }; 68 | 69 | exports.update = function( req, res, next ){ 70 | Todo.findById( req.params.id, function ( err, todo ){ 71 | var user_id = req.cookies ? 72 | req.cookies.user_id : undefined; 73 | 74 | if( todo.user_id !== user_id ){ 75 | return utils.forbidden( res ); 76 | } 77 | 78 | todo.content = req.body.content; 79 | todo.updated_at = Date.now(); 80 | todo.save( function ( err, todo, count ){ 81 | if( err ) return next( err ); 82 | 83 | res.redirect( '/' ); 84 | }); 85 | }); 86 | }; 87 | 88 | // ** express turns the cookie key to lowercase ** 89 | exports.current_user = function ( req, res, next ){ 90 | var user_id = req.cookies ? 91 | req.cookies.user_id : undefined; 92 | 93 | if( !user_id ){ 94 | res.cookie( 'user_id', utils.uid( 32 )); 95 | } 96 | 97 | next(); 98 | }; 99 | -------------------------------------------------------------------------------- /04-lesson/todo-app/utils.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | 3 | ran_no : function ( min, max ){ 4 | return Math.floor( Math.random() * ( max - min + 1 )) + min; 5 | }, 6 | 7 | uid : function ( len ){ 8 | var str = ''; 9 | var src = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; 10 | var src_len = src.length; 11 | var i = len; 12 | 13 | for( ; i-- ; ){ 14 | str += src.charAt( this.ran_no( 0, src_len - 1 )); 15 | } 16 | 17 | return str; 18 | }, 19 | 20 | forbidden : function ( res ){ 21 | var body = 'Forbidden'; 22 | res.statusCode = 403; 23 | 24 | res.setHeader( 'Content-Type', 'text/plain' ); 25 | res.setHeader( 'Content-Length', body.length ); 26 | res.end( body ); 27 | } 28 | }; 29 | -------------------------------------------------------------------------------- /04-lesson/todo-app/views/edit.ejs: -------------------------------------------------------------------------------- 1 | <% layout( 'layout' ) -%> 2 | 3 |

<%= title %>

4 |
5 |
6 |
7 | 8 |
9 |
10 | 11 | <% todos.forEach( function ( todo ){ %> 12 | <% if( todo._id == current ){ %> 13 |
14 | <% }else{ %> 15 |
16 | <% } %> 17 | 18 | <% if( todo._id == current ){ %> 19 |
20 | 21 |
22 | <% }else{ %> 23 | <%= todo.content %> 24 | <% } %> 25 | 26 | <% if( todo._id == current ){ %> 27 | Delete 28 | <% }else{ %> 29 | Delete 30 | <% } %> 31 |
32 | <% }); %> 33 |
34 | -------------------------------------------------------------------------------- /04-lesson/todo-app/views/index.ejs: -------------------------------------------------------------------------------- 1 | <% layout( 'layout' ) -%> 2 | 3 |

<%= title %>

4 | 5 |
6 |
7 |
8 | 9 |
10 |
11 | 12 | <% todos.forEach( function ( todo ){ %> 13 |
14 | <%= todo.content %> 15 | Delete 16 |
17 | <% }); %> 18 |
19 | -------------------------------------------------------------------------------- /04-lesson/todo-app/views/layout.ejs: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | <%= title %> 5 | 6 | 9 | 10 | 11 |
12 | <%- body %> 13 | 14 |
15 | 16 | 17 | -------------------------------------------------------------------------------- /04-lesson/todo-app/web-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | app: todoapp 8 | spec: 9 | containers: 10 | - image: janakiramm/todo-app 11 | name: myweb 12 | ports: 13 | - containerPort: 3000 -------------------------------------------------------------------------------- /04-lesson/todo-app/web-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | app: todoapp 8 | spec: 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | name: web 14 | spec: 15 | containers: 16 | - image: janakiramm/todo-app 17 | name: web 18 | ports: 19 | - containerPort: 3000 -------------------------------------------------------------------------------- /04-lesson/todo-app/web-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | app: todoapp 8 | spec: 9 | selector: 10 | name: web 11 | type: NodePort 12 | ports: 13 | - name: http 14 | port: 80 15 | targetPort: 3000 16 | protocol: TCP -------------------------------------------------------------------------------- /05-lesson/demo.sh: -------------------------------------------------------------------------------- 1 | # location of demo 2 | # D:\MyLearningProjects\kubernetes\Demos\janakirammsv\Demos\05-lesson 3 | 4 | # Create the deployment 5 | kubectl create -f j-hello.yaml -f j-hello-svc.yaml --validate=false 6 | 7 | # List the deployment 8 | kubectl get deployments 9 | 10 | # Describe the deployment 11 | kubectl describe deployments 12 | 13 | # List the pods 14 | kubectl get pods 15 | 16 | # List the pods with labels 17 | kubectl get pods --show-labels 18 | 19 | # List the Replica Sets 20 | kubectl get rs --show-labels 21 | 22 | # Access the pod 23 | export NODE_PORT=30001 # set NODE_PORT=30001 curl 192.168.99.100:$NODE_PORT # 24 | curl 192.168.99.100:$NODE_PORT #http://192.168.99.100:30001/ 25 | 26 | # Scale the deployment 27 | kubectl scale deployment j-hello --replicas 10 28 | 29 | # Check the status of deployment 30 | kubectl rollout status deploy/j-hello 31 | 32 | # Pause the deployment 33 | kubectl rollout pause deploy/j-hello 34 | 35 | # Check the current version of deployment 36 | while true; do curl 192.168.99.100:$NODE_PORT; printf '%s\r\n'; sleep 1; done 37 | 38 | # Check the current version of deployment and resume from the pause 39 | kubectl rollout resume deployment/j-hello 40 | 41 | # Upgrade to version 2 42 | kubectl set image deployment j-hello j-hello=janakiramm/j-hello:2 43 | 44 | watch kubectl get pods 45 | 46 | # Check the history 47 | kubectl rollout history deployment j-hello 48 | 49 | # Undo the previous upgrade 50 | kubectl rollout undo deploy/j-hello 51 | 52 | # Clean up 53 | kubectl delete deployment j-hello 54 | kubectl delete service j-hello 55 | 56 | :' 57 | kubectl set image deployment j-hello j-hello=janakiramm/j-hello:2 58 | 59 | kubectl rollout pause deploy/j-hello 60 | 61 | kubectl rollout resume deploy/j-hello 62 | ' 63 | 64 | 65 | -------------------------------------------------------------------------------- /05-lesson/demo2.sh: -------------------------------------------------------------------------------- 1 | # Create the deployment 2 | kubectl create -f j-hello.yaml -f j-hello-svc.yaml --validate=false 3 | 4 | # List the deployment 5 | kubectl get deployments --watch 6 | 7 | # Access the pod 8 | export NODE_PORT=30001 9 | curl 192.168.99.100:$NODE_PORT 10 | 11 | # Scale the deployment 12 | kubectl scale deployment j-hello --replicas 10 13 | 14 | # Check the status of deployment 15 | kubectl rollout status deploy/j-hello 16 | 17 | # Upgrade to version 2 18 | kubectl set image deployment j-hello j-hello=janakiramm/j-hello:2 19 | 20 | # Pause the deployment 21 | kubectl rollout pause deploy/j-hello 22 | 23 | # Check the current version of deployment 24 | while true; do curl 192.168.99.100:$NODE_PORT; printf '%s\r\n'; sleep 1; done 25 | 26 | # Check the current version of deployment and resume from the pause 27 | kubectl rollout resume deployment/j-hello 28 | 29 | # Clean up 30 | kubectl delete deployment j-hello 31 | kubectl delete service j-hello 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /05-lesson/j-hello-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: j-hello 5 | labels: 6 | app: helloworld 7 | spec: 8 | selector: 9 | app: helloworld 10 | type: NodePort 11 | ports: 12 | - port: 80 13 | name: http 14 | targetPort: 3000 15 | nodePort: 30001 16 | protocol: TCP 17 | -------------------------------------------------------------------------------- /05-lesson/j-hello.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: j-hello 5 | labels: 6 | app: helloworld 7 | spec: 8 | replicas: 3 9 | template: 10 | metadata: 11 | labels: 12 | app: helloworld 13 | spec: 14 | containers: 15 | - name: j-hello 16 | image: janakiramm/j-hello 17 | ports: 18 | - name: nodejs-port 19 | containerPort: 3000 -------------------------------------------------------------------------------- /05-lesson/nginx-deployment.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: apps/v1beta1 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: nginx-deployment 6 | spec: 7 | replicas: 3 8 | template: 9 | metadata: 10 | labels: 11 | app: nginx 12 | spec: 13 | containers: 14 | - name: nginx 15 | image: nginx:1.7.9 16 | ports: 17 | - containerPort: 80 18 | -------------------------------------------------------------------------------- /06-lesson/README.md: -------------------------------------------------------------------------------- 1 | # Dealing with Storage and Persistence 2 | 3 | ## Objectives 4 | - Adding persistence to Pods 5 | - Dealing with block storage in the cloud 6 | - Understanding Persistence Volumes and Claims 7 | - Demos 8 | --- 9 | ## Persistence in the Pods 10 | - Pods are ephemeral and stateless 11 | - Volumes bring persistence to Pods 12 | - Kubernetes volumes are similar to Docker volumes, but managed differently 13 | - All containers in a Pod can access the volume 14 | - Volumes are associated with the lifecycle of Pod 15 | - Directories in the host are exposed as volumes 16 | - Volumes may be based on a variety of storage backends 17 | ## Pods and Volumes 18 | ![Pods and Volumes](./images/image-06-01.png) 19 | 20 | ![Pods and Volumes](./images/image-06-02.png) 21 | 22 | With storage back-end 23 | ## Kubernetes Volumes Types 24 | - **Host-based** 25 | - EmptyDir 26 | - HostPath 27 | - **Block Storage** 28 | - Amazon EBS 29 | - GCE Persistent Disk 30 | - Azure Disk 31 | - vSphere Volume 32 | - ... 33 | - **Distributed File System** 34 | - NFS 35 | - Ceph 36 | - Gluster 37 | - Amazon EFS 38 | - Azure File System 39 | - ... 40 | - **Other** 41 | - Flocker 42 | - iScsi 43 | - Git Repo 44 | - Quobyte 45 | - ... 46 | ## Demo 47 | ### Host-based Volumes Block Storage-based Volumes 48 | ```console 49 | $ kubectl create -f pod-vol-local.yaml 50 | pod "nginx" created 51 | $ kubectl get po 52 | NAME READY STATUS RESTARTS AGE 53 | nginx 1/1 Running 0 2m 54 | ``` 55 | Let's have a look at the newly created nginx pod created 56 | ```console 57 | λ kubectl describe po nginx 58 | Name: nginx 59 | Namespace: default 60 | Node: minikube/192.168.99.100 61 | Start Time: Tue, 12 Dec 2017 14:57:23 +0800 62 | Labels: env=dev 63 | Annotations: 64 | Status: Running 65 | IP: 172.17.0.4 66 | Containers: 67 | nginx: 68 | Container ID: docker://8b2bf6e1a049d92da7ea1dd8769ca230c8b386b9bd0d5517f72a9809ac720b93 69 | Image: nginx 70 | Image ID: docker-pullable://nginx@sha256:d2b543f6f358a592c42f2085ae69fba138fd1a9da2c15806611145b22bcfd7ab 71 | Port: 80/TCP 72 | State: Running 73 | Started: Tue, 12 Dec 2017 14:57:42 +0800 74 | Ready: True 75 | Restart Count: 0 76 | Environment: 77 | Mounts: 78 | /usr/share/nginx/html from my-vol (rw) 79 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-zk79b (ro) 80 | Conditions: 81 | Type Status 82 | Initialized True 83 | Ready True 84 | PodScheduled True 85 | Volumes: 86 | my-vol: 87 | Type: HostPath (bare host directory volume) 88 | Path: /var/lib/my-data 89 | default-token-zk79b: 90 | Type: Secret (a volume populated by a Secret) 91 | SecretName: default-token-zk79b 92 | Optional: false 93 | QoS Class: BestEffort 94 | Node-Selectors: 95 | Tolerations: 96 | Events: 97 | Type Reason Age From Message 98 | ---- ------ ---- ---- ------- 99 | Normal Scheduled 3m default-scheduler Successfully assigned nginx to minikube 100 | 101 | Normal SuccessfulMountVolume 3m kubelet, minikube MountVolume.SetUp succeeded for volume "my-vol" 102 | Normal SuccessfulMountVolume 3m kubelet, minikube MountVolume.SetUp succeeded for volume "default-token-zk79b" 103 | Normal Pulling 3m kubelet, minikube pulling image "nginx" 104 | Normal Pulled 2m kubelet, minikube Successfully pulled image "nginx" 105 | Normal Created 2m kubelet, minikube Created container 106 | Normal Started 2m kubelet, minikube Started container 107 | ``` 108 | Now, that we know that the nginx pod was created, let's see if the directory in question is accessible. 109 | ```console 110 | $ minikube ssh 111 | _ _ 112 | _ _ ( ) ( ) 113 | ___ ___ (_) ___ (_)| |/') _ _ | |_ __ 114 | /' _ ` _ `\| |/' _ `\| || , < ( ) ( )| '_`\ /'__`\ 115 | | ( ) ( ) || || ( ) || || |\`\ | (_) || |_) )( ___/ 116 | (_) (_) (_)(_)(_) (_)(_)(_) (_)`\___/'(_,__/'`\____) 117 | 118 | $ sudo -i #act as root user 119 | $ ls -al /var/lib/my-data 120 | total 0 121 | drwxr-xr-x 2 root root 0 Dec 12 06:57 . 122 | drwxr-xr-x 16 root root 0 Dec 12 06:57 .. 123 | ``` 124 | 125 | We are going to create a html file from /var/lib/my-data 126 | 127 | ```console 128 | $ echo "

Hello from host

" > /var/lib/my-data/index.html 129 | $ cat /var/lib/my-data/index.html 130 | $

Hello from host

131 | ``` 132 | 133 | We will test to see if the file is persistent and while we delete the pod! 134 | 135 | ```console 136 | $ kubectl delete -f pod-vol-local.yaml 137 | pod "nginx" deleted 138 | ``` 139 | 140 | I'm back on the minikube console, where I'll test to see if the index.html file still exists on /var/lib/my-data/. 141 | 142 | ```console 143 | $ ls -al /var/lib/my-data 144 | total 4 145 | drwxr-xr-x 2 root root 0 Dec 12 07:18 . 146 | drwxr-xr-x 16 root root 0 Dec 12 06:57 .. 147 | -rw-r--r-- 1 root root 12 Dec 12 07:18 index.html 148 | $ cat /var/lib/my-data/index.html 149 |

Hello from host

150 | ``` 151 | 152 | Recreate the pod and service 153 | 154 | ```console 155 | kubectl create -f pod-vol-local.yaml 156 | ``` 157 | 158 | See the result 159 | 160 | ```console 161 | minikube service nginx 162 | $ Opening kubernetes service default/nginx in default browser... 163 | ``` 164 | 165 | Remove pod and service 166 | 167 | ```console 168 | kubectl delete -f pod-vol-local.yaml 169 | ``` 170 | 171 | ## Understanding Persistent Volumes & Claims 172 | 173 | - PersistentVolume(PV) 174 | - Networked storage in the cluster pre-provisioned by an administrator 175 | - PersistentVolumeClaim (PVC) 176 | - Storageresource requested by a user 177 | - StorageClass 178 | - Types of supported storage profiles offered by administrators 179 | 180 | ## Storage Provisioning Workflow 181 | 182 | ![Storage Provisioning Workflow](./images/image-06-03.png) 183 | 184 | Lifecycle of a Presistent Volume 185 | 186 | - Provisioning 187 | - Binding 188 | - Using 189 | - Releasing 190 | - Reclaiming 191 | 192 | ## Demo 193 | 194 | ### Provisioning and Claiming NFS-based Volumes 195 | 196 | ```console 197 | $ kubectl create -f my-pv.yaml 198 | persistentvolume "my-pv" created 199 | $ kubectl get pv 200 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON 201 | AGE 202 | my-pv 1Gi RWO Recycle Available 203 | 5m 204 | ``` 205 | 206 | Let's have a different view of the Persistent Volume 207 | 208 | ```console 209 | $ kubectl describe pv 210 | Name: my-pv 211 | Labels: type=local 212 | Annotations: 213 | Finalizers: [kubernetes.io/pv-protection] 214 | StorageClass: manual 215 | Status: Available 216 | Claim: 217 | Reclaim Policy: Retain 218 | Access Modes: RWX 219 | VolumeMode: Filesystem 220 | Capacity: 1Gi 221 | Node Affinity: 222 | Message: 223 | Source: 224 | Type: HostPath (bare host directory volume) 225 | Path: /mypv 226 | HostPathType: 227 | Events: 228 | ``` 229 | 230 | Let's create the `PersistentVolumeClaim` 231 | 232 | ```console 233 | $ kubectl create -f my-pvc.yaml 234 | persistentvolumeclaim "my-pvc" created 235 | ``` 236 | 237 | View the PersistentVolumeClaim 238 | 239 | ```console 240 | $ kubectl get pvc 241 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 242 | my-pvc Bound my-pv 1Gi RWX manual 8m56s 243 | ``` 244 | 245 | Note that the status of pvc is `Bound` and the volume is `my-pv` 246 | 247 | Describe the `PersistentVolumeClaim` 248 | 249 | ```console 250 | $ kubectl describe pvc 251 | Name: my-pvc 252 | Namespace: default 253 | StorageClass: manual 254 | Status: Bound 255 | Volume: my-pv 256 | Labels: 257 | Annotations: pv.kubernetes.io/bind-completed: yes 258 | pv.kubernetes.io/bound-by-controller: yes 259 | Finalizers: [kubernetes.io/pvc-protection] 260 | Capacity: 1Gi 261 | Access Modes: RWX 262 | VolumeMode: Filesystem 263 | Events: 264 | Mounted By: 265 | ``` 266 | 267 | The `PersistentVolume` and `PersistentVolumeClaim` are created, it's time to create the Pod with that. 268 | 269 | ```console 270 | $ kubectl create -f my-pod.yaml 271 | pod "my-pod" created 272 | $ kubectl get pod 273 | NAME READY STATUS RESTARTS AGE 274 | my-pod 1/1 Running 0 1m 275 | ``` 276 | 277 | Let's have a closer look at the Pod by looking at the describe value... 278 | 279 | ```console 280 | $ kubectl describe po 281 | Name: my-pod 282 | Namespace: default 283 | Priority: 0 284 | PriorityClassName: 285 | Node: minikube/10.0.2.15 286 | Start Time: Tue, 21 May 2019 10:42:59 +0800 287 | Labels: env=web 288 | Annotations: 289 | Status: Running 290 | IP: 172.17.0.6 291 | Containers: 292 | web: 293 | Container ID: docker://e04f73277cadb6181b2d4665b7c95f2abe9dfce90c5c324f182f1b8050d87449 294 | Image: nginx:1.7.9 295 | Image ID: docker-pullable://nginx@sha256:e3456c851a152494c3e4ff5fcc26f240206abac0c9d794affb40e0714846c451 296 | Port: 80/TCP 297 | Host Port: 0/TCP 298 | State: Running 299 | Started: Tue, 21 May 2019 10:43:00 +0800 300 | Ready: True 301 | Restart Count: 0 302 | Environment: 303 | Mounts: 304 | /usr/share/nginx/html from mypd (rw) 305 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-98ltk (ro) 306 | Conditions: 307 | Type Status 308 | Initialized True 309 | Ready True 310 | ContainersReady True 311 | PodScheduled True 312 | Volumes: 313 | mypd: 314 | Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) 315 | ClaimName: my-pvc 316 | ReadOnly: false 317 | default-token-98ltk: 318 | Type: Secret (a volume populated by a Secret) 319 | SecretName: default-token-98ltk 320 | Optional: false 321 | QoS Class: BestEffort 322 | Node-Selectors: 323 | Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s 324 | node.kubernetes.io/unreachable:NoExecute for 300s 325 | Events: 326 | Type Reason Age From Message 327 | ---- ------ ---- ---- ------- 328 | Normal Scheduled 2s default-scheduler Successfully assigned default/my-pod to minikube 329 | Normal Pulled 1s kubelet, minikube Container image "nginx:1.7.9" already present on machine 330 | Normal Created 1s kubelet, minikube Created container web 331 | Normal Started 1s kubelet, minikube Started container web 332 | ``` 333 | 334 | Visit `192.168.99.100:30100` and you should see `Forbidden` errors. Let's create a simple `html` page: 335 | 336 | ```console 337 | minikube ssh 338 | $ _ _ 339 | $ _ _ ( ) ( ) 340 | $ ___ ___ (_) ___ (_)| |/') _ _ | |_ __ 341 | $ /' _ ` _ `\| |/' _ `\| || , < ( ) ( )| '_`\ /'__`\ 342 | $ | ( ) ( ) || || ( ) || || |\`\ | (_) || |_) )( ___/ 343 | $ (_) (_) (_)(_)(_) (_)(_)(_) (_)`\___/'(_,__/'`\____) 344 | 345 | sudo -i 346 | echo "

Hello from host

" > /mypv/index.html 347 | exit 348 | $ logout 349 | exit 350 | ``` 351 | 352 | Visit `192.168.99.100:30100` again and you should see `Hello from host` 353 | 354 | Now, clear the environment 355 | 356 | ```console 357 | kubectl delete -f my-pod.yaml -f my-pv.yaml -f my-pvc.yaml 358 | ``` 359 | 360 | ## Summary 361 | 362 | - Adding persistence to Pods 363 | - Dealing with block storage in the cloud 364 | - Understanding Persistence Volumes and Claims 365 | - Demos 366 | 367 | Reference: 368 | - [Kubernetes Webinar Series - Dealing with Storage and Persistence](https://www.youtube.com/watch?v=n06kKYS6LZE&index=6&list=PLF3s2WICJlqOiymMaTLjwwHz-MSVbtJPQ) 369 | 370 | -------------------------------------------------------------------------------- /06-lesson/demo.sh: -------------------------------------------------------------------------------- 1 | 2 | kubectl create -f pod-vol-local.yaml 3 | 4 | kubectl describe pod nginx 5 | 6 | vagrant status 7 | 8 | kubectl config get-contexts 9 | 10 | minikube ssh 11 | 12 | kubectl exec -it nginx /bin/sh 13 | 14 | kubectl config use-context minikube 15 | 16 | # On the Master Node of the Kubernetes Cluster 17 | cat /etc/exports 18 | /opt/data 10.245.1.2/24(rw,sync,no_root_squash,no_all_squash) 19 | 20 | kubectl get pv 21 | kubectl get pvc 22 | 23 | kubectl create -f my-pv.yaml 24 | 25 | kubectl describe pod my-pod 26 | 27 | kubectl exec -it my-pod /bin/sh 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /06-lesson/images/image-06-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/06-lesson/images/image-06-01.png -------------------------------------------------------------------------------- /06-lesson/images/image-06-02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/06-lesson/images/image-06-02.png -------------------------------------------------------------------------------- /06-lesson/images/image-06-03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/06-lesson/images/image-06-03.png -------------------------------------------------------------------------------- /06-lesson/my-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-pod 5 | labels: 6 | env: web 7 | spec: 8 | containers: 9 | - name: web 10 | image: nginx:1.7.9 11 | ports: 12 | - containerPort: 80 13 | name: http-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /usr/share/nginx/html 17 | name: mypd 18 | volumes: 19 | - name: mypd 20 | persistentVolumeClaim: 21 | claimName: my-pvc 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: nginx 27 | labels: 28 | env: web 29 | spec: 30 | selector: 31 | env: web 32 | type: NodePort 33 | ports: 34 | - name: http 35 | port: 80 36 | targetPort: 80 37 | nodePort: 30100 38 | protocol: TCP -------------------------------------------------------------------------------- /06-lesson/my-pv.yaml: -------------------------------------------------------------------------------- 1 | # apiVersion: v1 2 | # kind: PersistentVolume 3 | # metadata: 4 | # name: my-pv 5 | # labels: 6 | # type: local 7 | # spec: 8 | # capacity: 9 | # storage: 1Gi 10 | # accessModes: 11 | # - ReadWriteOnce 12 | # persistentVolumeReclaimPolicy: Recycle 13 | # nfs: 14 | # path: /opt/data/web 15 | # server: 192.168.99.101 16 | 17 | apiVersion: v1 18 | kind: PersistentVolume 19 | metadata: 20 | name: my-pv 21 | labels: 22 | type: local 23 | spec: 24 | capacity: 25 | storage: 1Gi 26 | accessModes: 27 | - ReadWriteMany 28 | storageClassName: manual 29 | hostPath: 30 | path: "/mypv" -------------------------------------------------------------------------------- /06-lesson/my-pvc.yaml: -------------------------------------------------------------------------------- 1 | # kind: PersistentVolumeClaim 2 | # apiVersion: v1 3 | # metadata: 4 | # name: my-pvc 5 | # spec: 6 | # accessModes: 7 | # - ReadWriteOnce 8 | # resources: 9 | # requests: 10 | # storage: 1Gi 11 | 12 | --- 13 | kind: PersistentVolumeClaim 14 | apiVersion: v1 15 | metadata: 16 | name: my-pvc 17 | spec: 18 | accessModes: 19 | - ReadWriteMany 20 | storageClassName: manual 21 | resources: 22 | requests: 23 | storage: 1Gi 24 | selector: 25 | matchLabels: 26 | type: local -------------------------------------------------------------------------------- /06-lesson/nfs-demo.sh: -------------------------------------------------------------------------------- 1 | # How to Share Files Using NFS: Linux Server Training 101 2 | # https://www.youtube.com/watch?v=c3dL0ULEH-s 3 | 4 | # 1) install these commands 5 | sudo apt-get install nfs-kernel-server nfs-common rpcbind -y 6 | 7 | # 2) start the daemon 8 | sudo /etc/init.d/rpcbind restart 9 | 10 | # 3) 11 | sudo echo "iface enp0s3 inte dhcp" | tee -a /etc/network/interfaces 12 | sudo echo "address 191.168.1.1" | tee -a /etc/network/interfaces 13 | sudo echo "netmask 255.255.255.0" | tee -a /etc/network/interfaces 14 | 15 | 16 | sudo /etc/init.d/nfs-kernel-server restart 17 | 18 | showmount -e 19 | sudo apt-get update 20 | # if error occures and that file is locked with this message (/var/lib/dpkg), is another process using it? 21 | sudo rm /var/lib/apt/lists/lock 22 | 23 | sudo apt-get install rpcbind nfs-common -y 24 | 25 | 26 | sudo iptables -A INPUT -s 192.168.0.0/16 -p tcp -m multiport --ports 111,2000,2001,2049,37611,37328 -j ACCEPT 27 | sudo iptables -A INPUT -s 192.168.0.0/16 -p udp -m multiport --ports 111,2000,2002,2049,37611,37328 -j ACCEPT 28 | 29 | 30 | # How to get NFS working with Ubuntu-CE-Firewall 31 | # https://wiki.ubuntu.com/How%20to%20get%20NFS%20working%20with%20Ubuntu-CE-Firewall 32 | 33 | 34 | sudo mount -t nfs -o proto=tcp,port=2049 -v 192.168.99.101:/opt/data /mnt/opt/data 35 | 36 | 37 | -------------------------------------------------------------------------------- /06-lesson/pod-vol-cloud.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | env: dev 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx 11 | ports: 12 | - containerPort: 80 13 | name: http 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /usr/share/nginx/html 17 | name: my-vol 18 | volumes: 19 | - name: my-vol 20 | gcePersistentDisk: 21 | pdName: my-data-disk 22 | fsType: ext4 -------------------------------------------------------------------------------- /06-lesson/pod-vol-local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | env: dev 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:1.7.9 11 | ports: 12 | - containerPort: 80 13 | name: http 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /usr/share/nginx/html 17 | name: my-vol 18 | volumes: 19 | - name: my-vol 20 | hostPath: 21 | path: /var/lib/my-data 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: nginx 27 | labels: 28 | env: dev 29 | spec: 30 | selector: 31 | env: dev 32 | type: NodePort 33 | ports: 34 | - name: http 35 | port: 80 36 | targetPort: 80 37 | nodePort: 30100 38 | protocol: TCP -------------------------------------------------------------------------------- /07-lesson/README.md: -------------------------------------------------------------------------------- 1 | # Controlling user access using Role Based Access Control (RBAC) 2 | 3 | Role management provides the necessary framework for enterprises for effective access governance of sensitive data and it is also recognized as the best practice for strict control of employee’s lifecycle. 4 | 5 | In Kubernetes' RBAC, we can: 6 | 7 | * Have multiple users with different properties, establishing a proper authentication mechanism. 8 | * Have full control over which operations each user or group of users can execute. 9 | * Have full control over which operations each process inside a pod can execute. 10 | * Limit the visibility of certain resources of namespaces. 11 | 12 | ![Secrets](./images/rbac0.png) 13 | 14 | Kubernetes objects associated with RBAC: 15 | 16 | * **Roles** 17 | 18 | A `Role` can only be used to grant access to resources within a single namespace 19 | 20 | * **ClusterRoles** 21 | 22 | A `ClusterRole` can be used to grant the same permissions as a Role, but because they are cluster-scoped, they can also be used to grant access to: 23 | * cluster-scoped resources (like nodes) 24 | * non-resource endpoints (like “/healthz”) 25 | * namespaced resources (like pods) across all namespaces (needed to run kubectl get pods --all-namespaces, for example) 26 | 27 | * **RoleBindings** 28 | 29 | A `RoleBinding` grants the permissions defined in a role to a user or set of users 30 | 31 | * **ClusterRoleBindings** 32 | 33 | Similar to `RoleBinding` but cluster-scoped 34 | 35 | ## Demo - Prequisities 36 | 37 | As a prerequisite to demonstrate access control, we are going to: 38 | 39 | 1. Create 2 secrets, `jonsnow` and `dany`, placed in namespace `default` and `default2` respectively. 40 | ![Secrets](./images/demo0.png) 41 | 2. Create 3 users (certificate and key pairs): `user1` and `user2` which belongs to group `operators` and `manager1` which belongs to group `managers`. 42 | ![Users and Groups](./images/demo1.png) 43 | 44 | ### Create Namespace and Secrets 45 | 46 | Create a new namespace `default2` 47 | 48 | ```console 49 | $ kubectl create namespace default2 50 | namespace/default2 created 51 | ``` 52 | 53 | Create secret `jonsnow` in namespace `default` 54 | 55 | ```console 56 | $ kubectl create secret generic jonsnow --from-literal=username=jon --from-literal=password=backtothenorth -n default 57 | secret/jonsnow created 58 | $ kubectl get secret 59 | NAME TYPE DATA AGE 60 | default-token-x7hpm kubernetes.io/service-account-token 3 2d18h 61 | jonsnow Opaque 2 4s 62 | ``` 63 | 64 | Create secret `dany` in namespace `default2` 65 | 66 | ```console 67 | $ kubectl create secret generic dany --from-literal=username=dany --from-literal=password=themadqueen -n default2 68 | secret/dany created 69 | $ kubectl get secret -n default2 70 | NAME TYPE DATA AGE 71 | dany Opaque 2 5s 72 | default-token-ht7gn kubernetes.io/service-account-token 3 54s 73 | ``` 74 | 75 | ### Create Users 76 | 77 | Check if `ca.crt` and `ca.key` exists. These are the minikube CA certificate and key which will be used to sign users' certificates. 78 | 79 | ```console 80 | $ ls ~/.minikube | grep ^ca.* 81 | ca.crt # <-- make sure this exists 82 | ca.key # <-- make sure this exists 83 | ca.pem 84 | cache 85 | ``` 86 | 87 | Load utility scripts 88 | 89 | ```console 90 | source utils.sh 91 | ``` 92 | 93 | Generate key and certificate for `user1` using the utility script. Note that in Kubernetes context, `CN` represents the name and `O` represents the group 94 | 95 | ```console 96 | $ create_user user1 operators 97 | Generating RSA private key, 2048 bit long modulus 98 | ................................................................+++ 99 | ....................................................................................+++ 100 | e is 65537 (0x10001) 101 | Signature ok 102 | subject=/CN=user1/O=operators 103 | Getting CA Private Key 104 | ``` 105 | 106 | Set user and create context for `user1` 107 | 108 | ```console 109 | $ kubectl config set-credentials user1 --client-certificate=user1.crt --client-key=user1.key 110 | User "user1" set. 111 | $ kubectl config set-context user1-context --cluster=minikube --namespace=default --user=user1 112 | Context "user1-context" created. 113 | ``` 114 | 115 | Generate key and certificate for `user2` using the utility script 116 | 117 | ```console 118 | $ create_user user2 operators 119 | Generating RSA private key, 2048 bit long modulus 120 | ..................................................................+++ 121 | .....................+++ 122 | e is 65537 (0x10001) 123 | Signature ok 124 | subject=/CN=user2/O=operators 125 | Getting CA Private Key 126 | ``` 127 | 128 | Set user and create context for `user2` 129 | 130 | ```console 131 | $ kubectl config set-credentials user2 --client-certificate=user2.crt --client-key=user2.key 132 | User "user2" set. 133 | $ kubectl config set-context user2-context --cluster=minikube --namespace=default --user=user2 134 | Context "user2-context" created. 135 | ``` 136 | 137 | Generate key and certificate for `manager1` using the utility script 138 | 139 | ```console 140 | $ create_user manager1 managers 141 | Generating RSA private key, 2048 bit long modulus 142 | .........+++ 143 | ......................................................+++ 144 | e is 65537 (0x10001) 145 | Signature ok 146 | subject=/CN=manager1/O=managers 147 | Getting CA Private Key 148 | ``` 149 | 150 | Set user and create context for `manager1` 151 | 152 | ```console 153 | $ kubectl config set-credentials manager1 --client-certificate=manager1.crt --client-key=manager1.key 154 | User "manager1" set. 155 | $ kubectl config set-context manager1-context --cluster=minikube --namespace=default --user=manager1 156 | Context "manager1-context" created. 157 | ``` 158 | 159 | View the configuration. There should be 3 *contextes* aka profiles which points to its respective users. We can switch profiles when interacting with the cluster, as shown in later steps. 160 | 161 | ```console 162 | $ kubectl config view 163 | ... 164 | - context: 165 | cluster: minikube 166 | namespace: default 167 | user: user1 # <-- reference to user1 168 | name: user1-context # <-- context name 169 | - context: 170 | cluster: minikube 171 | namespace: default 172 | user: user2 173 | name: user2-context 174 | - context: 175 | cluster: minikube 176 | namespace: default 177 | user: manager1 178 | name: manager1-context 179 | ... 180 | - name: user1 <-- referred from user1-context 181 | user: 182 | client-certificate: /path/to/k8s-basics/08-rbac/user1.crt 183 | client-key: /path/to/k8s-basics/08-rbac/user1.key 184 | - name: user2 185 | user: 186 | client-certificate: /path/to/k8s-basics/08-rbac/user2.crt 187 | client-key: /path/to/k8s-basics/08-rbac/user2.key 188 | - name: manager1 189 | user: 190 | client-certificate: /path/to/k8s-basics/08-rbac/manager1.crt 191 | client-key: /path/to/k8s-basics/08-rbac/manager1.key 192 | ``` 193 | 194 | ## Demo - Role and RoleBinding 195 | 196 | Create Role `secret-reader`, which specifies *read-only* access to `secrets` in namespace `default` 197 | 198 | ```console 199 | $ kubectl config use-context minikube 200 | Switched to context "minikube". 201 | $ kubectl create -f role.yaml 202 | role.rbac.authorization.k8s.io/secret-reader created 203 | ``` 204 | 205 | Create RoleBinding `read-secrets`, which binds the Role `secret-reader` to User `user1` in namespace `default` (Observe the `subjects` section in the yaml file) 206 | 207 | ```console 208 | $ kubectl create -f role-binding.yaml 209 | rolebinding.rbac.authorization.k8s.io/read-secrets created 210 | ``` 211 | 212 | Now switch context to `user1-context`, which means that we interact with the cluster as `user1` 213 | 214 | ```console 215 | $ kubectl config use-context user1-context 216 | Switched to context "user1-context". 217 | $ kubectl config current-context 218 | user1-context 219 | ``` 220 | 221 | Try to view secrets in the namespace `default` and it is successful, as `user1` is given the access to read Secret in namespace `default` 222 | 223 | ```console 224 | $ kubectl get secret -n default 225 | NAME TYPE DATA AGE 226 | default-token-x7hpm kubernetes.io/service-account-token 3 2d18h 227 | jonsnow Opaque 2 2m30s 228 | ``` 229 | 230 | However when `user1` tries to see secrets in the namespace `default2`, the cluster rejects as `user` is not given access to read Secret in namespace `default2` 231 | 232 | ```console 233 | $ kubectl get secret -n default2 234 | Error from server (Forbidden): secrets is forbidden: User "user1" cannot list resource "secrets" in API group "" in the namespace "default2" 235 | ``` 236 | 237 | ## Demo - ClusterRoleBinding 238 | 239 | Create ClusterRole `secret-reader`, which specifies *read-only* access to `secrets` in **all** namespaces. Note we need to switch back to `minikube` context to do this 240 | 241 | ```console 242 | $ kubectl config use-context minikube 243 | Switched to context "minikube". 244 | $ kubectl create -f cluster-role.yaml 245 | clusterrole.rbac.authorization.k8s.io/secret-reader created 246 | ``` 247 | 248 | Create ClusterRoleBinding `read-secrets-global`, which binds the ClusterRole `secret-reader` to: 249 | 250 | * User `user2` 251 | * Group `managers` (`manager1` is part of this group) 252 | 253 | ```console 254 | $ kubectl create -f cluster-role-binding.yaml 255 | clusterrolebinding.rbac.authorization.k8s.io/read-secrets-global created 256 | ``` 257 | 258 | Switch to `user2` 259 | 260 | ```console 261 | $ kubectl config use-context user2-context 262 | Switched to context "user2-context". 263 | $ kubectl config current-context 264 | user2-context 265 | ``` 266 | 267 | `user2` can view secrets in **all** namespaces as ClusterRole is used 268 | 269 | ```console 270 | $ kubectl get secret -n default 271 | NAME TYPE DATA AGE 272 | default-token-x7hpm kubernetes.io/service-account-token 3 2d18h 273 | jonsnow Opaque 2 3m43s 274 | $ kubectl get secret -n default2 275 | NAME TYPE DATA AGE 276 | dany Opaque 2 3m30s 277 | default-token-ht7gn kubernetes.io/service-account-token 3 4m19s 278 | ``` 279 | 280 | `user2` cannot create secrets, as the permission is only to read 281 | 282 | ```console 283 | $ kubectl create secret generic bran --from-literal=username=bran --from-literal=password=didnothing -n default 284 | Error from server (Forbidden): secrets is forbidden: User "user2" cannot create resource "secrets" in API group "" in the namespace "default" 285 | ``` 286 | 287 | Switch to `manager1`. Note that manager1 belongs to group `managers`. 288 | 289 | ```console 290 | $ kubectl config use-context manager1-context 291 | Switched to context "manager1-context". 292 | $ kubectl config current-context 293 | manager1-context 294 | ``` 295 | 296 | `manager1` is able to view secrets in all namespaces, as it belongs to group `managers`, which is given read access to secrets in all namespaces 297 | 298 | ```console 299 | $ kubectl get secret -n default 300 | NAME TYPE DATA AGE 301 | default-token-x7hpm kubernetes.io/service-account-token 3 2d18h 302 | jonsnow Opaque 2 4m42s 303 | $ kubectl get secret -n default2 304 | NAME TYPE DATA AGE 305 | dany Opaque 2 4m37s 306 | default-token-ht7gn kubernetes.io/service-account-token 3 5m26s 307 | ``` 308 | 309 | ## Clean up 310 | 311 | ```console 312 | $ kubectl config use-context minikube 313 | $ kubectl delete secret jonsnow -n default && kubectl delete secret dany -n default2 314 | $ kubectl delete -f role.yaml -f role-binding.yaml -f cluster-role.yaml -f cluster-role-binding.yaml 315 | role.rbac.authorization.k8s.io "secret-reader" deleted 316 | rolebinding.rbac.authorization.k8s.io "read-secrets" deleted 317 | clusterrole.rbac.authorization.k8s.io "secret-reader" deleted 318 | clusterrolebinding.rbac.authorization.k8s.io "read-secrets-global" deleted 319 | $ kubectl delete ns default2 320 | namespace "default2" deleted 321 | $ kubectl config delete-context user1-context && kubectl config delete-context user2-context && kubectl config delete-context manager1-context 322 | deleted context user1-context from /path/to/.kube/config 323 | deleted context user2-context from /path/to/.kube/config 324 | deleted context manager1-context from /path/to/.kube/config 325 | $ kubectl config unset users.user1 && kubectl config unset users.user2 && kubectl config unset users.manager1 326 | Property "users.user1" unset. 327 | Property "users.user2" unset. 328 | Property "users.manager1" unset. 329 | $ rm user1.* user2.* manager1.* 330 | ``` 331 | 332 | ## References 333 | 334 | * [RBAC with Kubernetes in Minikube](https://medium.com/@HoussemDellai/rbac-with-kubernetes-in-minikube-4deed658ea7b) 335 | * -------------------------------------------------------------------------------- /07-lesson/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | # This cluster role binding allows anyone in the "manager" group to read secrets in any namespace. 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: read-secrets-global 6 | subjects: 7 | - kind: Group 8 | name: managers # Name is case sensitive 9 | apiGroup: rbac.authorization.k8s.io 10 | - kind: User 11 | name: user2 # Name is case sensitive 12 | apiGroup: rbac.authorization.k8s.io 13 | roleRef: 14 | kind: ClusterRole 15 | name: secret-reader 16 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /07-lesson/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | # "namespace" omitted since ClusterRoles are not namespaced 5 | name: secret-reader 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["secrets"] 9 | verbs: ["get", "watch", "list"] -------------------------------------------------------------------------------- /07-lesson/images/demo0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/07-lesson/images/demo0.png -------------------------------------------------------------------------------- /07-lesson/images/demo1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/07-lesson/images/demo1.png -------------------------------------------------------------------------------- /07-lesson/images/rbac0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/07-lesson/images/rbac0.png -------------------------------------------------------------------------------- /07-lesson/role-binding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: read-secrets 5 | namespace: default 6 | subjects: 7 | - kind: User 8 | name: user1 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: Role 12 | name: secret-reader # <-- refers to role name 13 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /07-lesson/role.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | namespace: default 5 | name: secret-reader 6 | rules: 7 | - apiGroups: [""] # the core API group 8 | resources: ["secrets"] 9 | verbs: ["get", "watch", "list"] -------------------------------------------------------------------------------- /07-lesson/utils.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | function create_user() 4 | { 5 | NAME=$1 6 | GROUP=$2 7 | openssl genrsa -out $NAME.key 2048 8 | openssl req -new -key $NAME.key -out $NAME.csr -subj "/CN=$NAME/O=$GROUP" 9 | openssl x509 -req -in $NAME.csr -CA ~/.minikube/ca.crt -CAkey ~/.minikube/ca.key -CAcreateserial -out $NAME.crt -days 500 10 | } 11 | -------------------------------------------------------------------------------- /08-lesson/README.md: -------------------------------------------------------------------------------- 1 | # Deploying Stateful Application with StatefulSet 2 | 3 | ## Overview 4 | 5 | ### Persistence and Containers 6 | 7 | * Containers are designed to be stateless 8 | * Containers use ephemeral storage 9 | * Pods can be made stateful through volumes 10 | * Running databases could be challenging 11 | * Lack of stable naming convention 12 | * Lack of stable persistent storage per Pod 13 | 14 | ### Introducing StatefulSets 15 | 16 | * Bringing the concept of ReplicaSets to stateful Pods 17 | * Enables running Pods in a "clusteres mode" 18 | * Ideal for deploying highly available database workloads 19 | 20 | StatefulSets are valuable for applications that require one or more of the following: 21 | 22 | * Stable, unique network identifiers. 23 | * Stable, persistent storage. 24 | * Ordered, graceful deployment and scaling. 25 | * Ordered, automated rolling updates. 26 | 27 | In the above, stable is synonymous with persistence across Pod (re)scheduling. If an application doesn’t require any stable identifiers or ordered deployment, deletion, or scaling, you should deploy your application with a controller that provides a set of stateless replicas. Controllers such as Deployment or ReplicaSet may be better suited to your stateless needs. 28 | 29 | Consider a MongoDB cluster. Each MongoDB pod needs to be named deterministically as each pod needs to be aware of other pod's hostname to send keep-alive messages and perform replication. 30 | 31 | `Deployment` with 3 replicas will create 3 pods with random prefix and is exposed as 1 `Service`. The DNS server will only have 1 entry and thus the pods will not be able to communicate with each other. The pods will share storage. Thus `Deployment` is suitable for stateless application. 32 | 33 | `StatefulSet` with 3 replicas will create 3 pods with ordered (and hence deterministic) prefix. The DSN server will also have 3 corresponding entries and thus the pods will be able to communicate with each other. The pods will have its own separate storage. 34 | 35 | ![Deploying MongoDB with StatefulSet vs Deployment](./images/stateful0.png) 36 | 37 | ## Demo 38 | 39 | Create a `Role Binding` to allow `mongodb-sidecar` container to view the MongoDB pods in order to configure replication. RBAC will be covered in a separate topic, so we don't have to discuss in detail now. 40 | 41 | ```console 42 | $ kubectl create -f mongo-role.yaml 43 | clusterrolebinding.rbac.authorization.k8s.io/default-view created 44 | ``` 45 | 46 | Create a "helper" pod to perform `nslookup` for demonstrations in later steps. Note that this pod has no impact on the overall deployment 47 | 48 | ```console 49 | $ kubectl create -f helper.yaml 50 | pod/helper created 51 | ``` 52 | 53 | Deploy a headless service. The StatefulSet feature is used with a dedicated “service” that points to each of its member pods. This service should be “headless,” meaning that it doesn’t create ClusterIP for load balancing, but is used for static DNS naming of pods that will be launched. This service name will be referenced in “spec: serviceName: ” section of the StatefulSet configuration file. It will cause the creation of enumerated DNS records in this format: mongo-0,” mongo-1,” mongo-2” etc. We will see this in effect in the later steps. 54 | 55 | ```console 56 | kubectl create -f mongo-headless.yaml 57 | service/mongo created 58 | ``` 59 | 60 | Observe storageclass `standard`. When provisioned, a PV with type `HostPath` will be created and a PVC will be automatically bound to said PV. This is specified in the `statefulset` yaml [file](./mongo-statefulset.yaml) 61 | 62 | ```console 63 | $ kubectl get storageclass standard 64 | NAME PROVISIONER AGE 65 | standard (default) k8s.io/minikube-hostpath 45h 66 | ``` 67 | 68 | Create MongoDB `statefulset` 69 | 70 | ```console 71 | $ kubectl create -f mongo-statefulset.yaml 72 | statefulset.apps/mongo created 73 | ``` 74 | 75 | As we can see below, the pod name is ordered i.e. `mongo-1`, `mongo-2`, etc. Compare this to `Deployments`, whereby the pod name is non-deterministic i.e. there is always a random string prefix e.g. `mongo-a4s2` 76 | 77 | ```console 78 | $ kubectl get pods 79 | NAME READY STATUS RESTARTS AGE 80 | helper 1/1 Running 0 43s 81 | mongo-0 2/2 Running 0 16s 82 | mongo-1 2/2 Running 0 11s 83 | mongo-2 2/2 Running 0 6s 84 | ``` 85 | 86 | PVC is automatically created for each pod 87 | 88 | ```console 89 | $ kubectl get pvc 90 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 91 | mongo-persistent-storage-mongo-0 Bound pvc-aa331c71-7d1a-11e9-a88e-080027cf8df0 2Gi RWO standard 66s 92 | mongo-persistent-storage-mongo-1 Bound pvc-adc435fd-7d1a-11e9-a88e-080027cf8df0 2Gi RWO standard 60s 93 | mongo-persistent-storage-mongo-2 Bound pvc-b177d0fb-7d1a-11e9-a88e-080027cf8df0 2Gi RWO standard 53s 94 | ``` 95 | 96 | Each PVC is bound to PV 97 | 98 | ```console 99 | $ kubectl get pv 100 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE 101 | pvc-aa331c71-7d1a-11e9-a88e-080027cf8df0 2Gi RWO Delete Bound default/mongo-persistent-storage-mongo-0 standard 22s 102 | pvc-adc435fd-7d1a-11e9-a88e-080027cf8df0 2Gi RWO Delete Bound default/mongo-persistent-storage-mongo-1 standard 17s 103 | pvc-b177d0fb-7d1a-11e9-a88e-080027cf8df0 2Gi RWO Delete Bound default/mongo-persistent-storage-mongo-2 standard 10s 104 | ``` 105 | 106 | Try to perform `nslookup` on `mongo`, which is the headless service name. The DNS entries for the pods are automatically added: 107 | 108 | ```console 109 | $ kubectl exec helper nslookup mongo 110 | Server: 10.96.0.10 111 | Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local 112 | 113 | Name: mongo 114 | Address 1: 172.17.0.7 mongo-1.mongo.default.svc.cluster.local 115 | Address 2: 172.17.0.6 mongo-0.mongo.default.svc.cluster.local 116 | Address 3: 172.17.0.8 mongo-2.mongo.default.svc.cluster.local 117 | ``` 118 | 119 | View the replication configuration. The configuration is performed automatically by `mongo-sidecar` container, which detects all the MongoDB pods in the namespace. 120 | 121 | ```console 122 | $ kubectl exec -it mongo-0 mongo 123 | Welcome to the MongoDB shell. 124 | rs0:PRIMARY> 125 | $ rs.conf() 126 | { 127 | "_id" : "rs0", 128 | "version" : 5, 129 | "protocolVersion" : NumberLong(1), 130 | "members" : [ 131 | { 132 | "_id" : 0, 133 | "host" : "mongo-0.mongo.default.svc.cluster.local:27017", 134 | "arbiterOnly" : false, 135 | "buildIndexes" : true, 136 | "hidden" : false, 137 | "priority" : 1, 138 | "tags" : { 139 | 140 | }, 141 | "slaveDelay" : NumberLong(0), 142 | "votes" : 1 143 | }, 144 | { 145 | "_id" : 1, 146 | "host" : "mongo-1.mongo.default.svc.cluster.local:27017", 147 | "arbiterOnly" : false, 148 | "buildIndexes" : true, 149 | "hidden" : false, 150 | "priority" : 1, 151 | "tags" : { 152 | 153 | }, 154 | "slaveDelay" : NumberLong(0), 155 | "votes" : 1 156 | }, 157 | { 158 | "_id" : 2, 159 | "host" : "mongo-2.mongo.default.svc.cluster.local:27017", 160 | "arbiterOnly" : false, 161 | "buildIndexes" : true, 162 | "hidden" : false, 163 | "priority" : 1, 164 | "tags" : { 165 | 166 | }, 167 | "slaveDelay" : NumberLong(0), 168 | "votes" : 1 169 | } 170 | ], 171 | "settings" : { 172 | "chainingAllowed" : true, 173 | "heartbeatIntervalMillis" : 2000, 174 | "heartbeatTimeoutSecs" : 10, 175 | "electionTimeoutMillis" : 10000, 176 | "catchUpTimeoutMillis" : 60000, 177 | "getLastErrorModes" : { 178 | 179 | }, 180 | "getLastErrorDefaults" : { 181 | "w" : 1, 182 | "wtimeout" : 0 183 | }, 184 | "replicaSetId" : ObjectId("5ce61664ff641f44004e9eb1") 185 | } 186 | } 187 | $ exit 188 | bye 189 | ``` 190 | 191 | Scale up to 4 192 | 193 | ```console 194 | $ kubectl scale --replicas=4 statefulset mongo 195 | statefulset.apps/mongo scaled 196 | ``` 197 | 198 | As expected, the number of pods increase to 4 199 | 200 | ```console 201 | $ kubectl get pods 202 | helper 1/1 Running 0 6m16s 203 | mongo-0 2/2 Running 0 5m49s 204 | mongo-1 2/2 Running 0 5m44s 205 | mongo-2 2/2 Running 0 5m39s 206 | mongo-3 2/2 Running 0 38s 207 | ``` 208 | 209 | DNS entry for the new MongoDB pod is also added automatically 210 | 211 | ```console 212 | $ kubectl exec helper nslookup mongo 213 | Server: 10.96.0.10 214 | Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local 215 | 216 | Name: mongo 217 | Address 1: 172.17.0.6 mongo-0.mongo.default.svc.cluster.local 218 | Address 2: 172.17.0.8 mongo-2.mongo.default.svc.cluster.local 219 | Address 3: 172.17.0.7 mongo-1.mongo.default.svc.cluster.local 220 | Address 4: 172.17.0.9 mongo-3.mongo.default.svc.cluster.local 221 | ``` 222 | 223 | Recheck the replication configuration and observe a new host, `mongo-3.mongo.default.svc.cluster.local:27017` 224 | 225 | ```console 226 | $ kubectl exec -it mongo-0 mongo 227 | Welcome to the MongoDB shell. 228 | rs0:PRIMARY> 229 | $ rs.conf() # observe that there is a new host 230 | $ exit 231 | bye 232 | ``` 233 | 234 | Now let's see what happens when `mongo-2` is deleted. 235 | 236 | ```console 237 | kubectl delete pod mongo-2 238 | pod "mongo-2" deleted 239 | ``` 240 | 241 | A new pod with the same name will be created. 242 | 243 | ```console 244 | kubectl get pod 245 | NAME READY STATUS RESTARTS AGE 246 | helper 1/1 Running 0 8m16s 247 | mongo-0 2/2 Running 0 7m49s 248 | mongo-1 2/2 Running 0 7m44s 249 | mongo-2 0/2 ContainerCreating 0 1s 250 | mongo-3 2/2 Running 0 2m38s 251 | ``` 252 | 253 | Clear up `statefulset` and its dependencies 254 | 255 | ```console 256 | $ kubectl delete -f helper.yaml -f mongo-headless.yaml -f mongo-role.yaml -f mongo-statefulset.yaml 257 | pod "helper" deleted 258 | service "mongo" deleted 259 | clusterrolebinding.rbac.authorization.k8s.io "default-view" deleted 260 | statefulset.apps "mongo" deleted 261 | ``` 262 | 263 | Delete `pvc` and `pv` 264 | 265 | ```console 266 | $ kubectl delete pvc --all 267 | persistentvolumeclaim "mongo-persistent-storage-mongo-0" deleted 268 | persistentvolumeclaim "mongo-persistent-storage-mongo-1" deleted 269 | persistentvolumeclaim "mongo-persistent-storage-mongo-2" deleted 270 | persistentvolumeclaim "mongo-persistent-storage-mongo-3" deleted 271 | $ kubectl get pv 272 | No resources found. 273 | ``` -------------------------------------------------------------------------------- /08-lesson/helper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: helper 5 | spec: 6 | containers: 7 | - name: helper 8 | image: gcr.io/google_containers/busybox:1.27.2 9 | command: ["/bin/sh", "-c", "--" ] 10 | args: [ "while true; do sleep 30; done;" ] 11 | restartPolicy: Never -------------------------------------------------------------------------------- /08-lesson/images/stateful0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/08-lesson/images/stateful0.png -------------------------------------------------------------------------------- /08-lesson/mongo-headless.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mongo 5 | labels: 6 | name: mongo 7 | spec: 8 | ports: 9 | - port: 27017 10 | targetPort: 27017 11 | clusterIP: None # <-- setting this to none signifies Headless Service 12 | selector: 13 | role: mongo -------------------------------------------------------------------------------- /08-lesson/mongo-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: default-view 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: view 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: default -------------------------------------------------------------------------------- /08-lesson/mongo-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: StatefulSet 3 | metadata: 4 | name: mongo 5 | spec: 6 | serviceName: "mongo" # <--matches the name of mongo headless service 7 | replicas: 3 8 | template: 9 | metadata: 10 | labels: 11 | role: mongo 12 | environment: test 13 | spec: 14 | terminationGracePeriodSeconds: 10 15 | containers: 16 | - name: mongo 17 | image: mongo:3.4.20-xenial 18 | command: 19 | - mongod 20 | - "--replSet" 21 | - rs0 22 | - "--bind_ip" 23 | - 0.0.0.0 24 | - "--smallfiles" 25 | - "--noprealloc" 26 | ports: 27 | - containerPort: 27017 28 | volumeMounts: 29 | - name: mongo-persistent-storage 30 | mountPath: /data/db 31 | - name: mongo-sidecar 32 | # This sidecar will configure the MongoDB replica set automatically. 33 | # A "sidecar" is a helper container which helps the main container do its work. 34 | image: cvallance/mongo-k8s-sidecar 35 | env: 36 | - name: MONGO_SIDECAR_POD_LABELS 37 | value: "role=mongo,environment=test" 38 | - name: KUBERNETES_MONGO_SERVICE_NAME 39 | value: mongo 40 | volumeClaimTemplates: 41 | - metadata: 42 | name: mongo-persistent-storage 43 | annotations: 44 | volume.beta.kubernetes.io/storage-class: "standard" #<-- use minikube storage provisioner to automatically create pvc and pv 45 | spec: 46 | accessModes: [ "ReadWriteOnce" ] 47 | resources: 48 | requests: 49 | storage: 2Gi -------------------------------------------------------------------------------- /09-lesson/ConfigMaps/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: log-config 5 | namespace: default 6 | data: 7 | log.level: INFO 8 | log.location: LOCAL 9 | -------------------------------------------------------------------------------- /09-lesson/ConfigMaps/demo.sh: -------------------------------------------------------------------------------- 1 | 2 | # Deploy the ConfigMap 3 | kubectl create -f configmap.yaml 4 | 5 | # Create the Pod with Env Var 6 | kubectl create -f pod-cmd.yaml --validate=false 7 | 8 | # check the logs 9 | kubectl logs test-pod-cmd 10 | 11 | # Create the Pod with Env Var 12 | kubectl create -f pod-env.yaml 13 | 14 | # Check the env vars 15 | kubectl exec -it test-pod-env /bin/sh 16 | 17 | # Create thePod with Env Var 18 | kubectl create -f pod-vol.yaml 19 | 20 | # Check logs 21 | kubectl logs test-pod-vol 22 | 23 | # Access the shell 24 | kubectl exec -it test-pod-vol /bin/sh 25 | 26 | # Check the files 27 | cd /etc/config 28 | cat log.level 29 | cat log.location 30 | 31 | # Exit & clean up 32 | exit 33 | 34 | # additional content 35 | 36 | kubectl get configmap 37 | 38 | kubectl get configmap -o yaml 39 | 40 | kubectl get pod --show-all 41 | 42 | kubectl get pod -a 43 | 44 | env 45 | -------------------------------------------------------------------------------- /09-lesson/ConfigMaps/demo2.sh: -------------------------------------------------------------------------------- 1 | # Create the ConfigMap from the configuration file 2 | kubectl create configmap example-redis-config --from-file=redis-config 3 | 4 | # Show the configMap in YAML format 5 | kubectl get configmap example-redis-config -o yaml 6 | 7 | # Create the Redis Pod 8 | kubectl create -f redis.yaml 9 | 10 | # Check the Pod; Wait for the Pod to be created 11 | kubectl get pods 12 | 13 | # get log file from redis container 14 | kubectl logs redis 15 | 16 | # Check the configuration 17 | kubectl exec -it redis redis-cli 18 | 19 | # Exectute the following commands in the Redis shell at 127.0.0.1;6379> 20 | CONFIG GET maxmemory 21 | CONFIG GET maxmemory-policy 22 | 23 | # Clean up 24 | kubectl delete configmap example-redis-config 25 | kubectl delete pod redis 26 | -------------------------------------------------------------------------------- /09-lesson/ConfigMaps/pod-cmd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: test-pod-cmd 5 | spec: 6 | containers: 7 | - name: test-container 8 | image: gcr.io/google_containers/busybox 9 | command: ["/bin/sh", "-c", "echo Error Level: $(LOG_LEVEL) - Error Location $(LOG_LOCATION)" ] 10 | env: 11 | - name: LOG_LEVEL 12 | valueFrom: 13 | configMapKeyRef: 14 | name: log-config 15 | key: log.level 16 | - name: LOG_LOCATION 17 | valueFrom: 18 | configMapKeyRef: 19 | name: log-config 20 | key: log.location 21 | restartPolicy: Never 22 | -------------------------------------------------------------------------------- /09-lesson/ConfigMaps/pod-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: test-pod-env 5 | spec: 6 | containers: 7 | - name: test-container 8 | image: gcr.io/google_containers/busybox 9 | command: ["/bin/sh", "-c", "--" ] 10 | args: [ "while true; do sleep 30; done;" ] 11 | env: 12 | - name: LOG_LEVEL 13 | valueFrom: 14 | configMapKeyRef: 15 | name: log-config 16 | key: log.level 17 | - name: LOG_LOCATION 18 | valueFrom: 19 | configMapKeyRef: 20 | name: log-config 21 | key: log.location 22 | restartPolicy: Never -------------------------------------------------------------------------------- /09-lesson/ConfigMaps/pod-vol.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: test-pod-vol 5 | spec: 6 | containers: 7 | - name: test-container 8 | image: gcr.io/google_containers/busybox 9 | command: ["/bin/sh", "-c", "--" ] 10 | args: [ "while true; do sleep 30; done;" ] 11 | volumeMounts: 12 | - name: config-volume 13 | mountPath: /etc/config 14 | volumes: 15 | - name: config-volume 16 | configMap: 17 | name: log-config 18 | restartPolicy: Never -------------------------------------------------------------------------------- /09-lesson/ConfigMaps/redis.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: redis 5 | spec: 6 | containers: 7 | - name: redis 8 | image: redis:5.0.5-alpine 9 | command: 10 | - redis-server 11 | - /myredis/redis.conf 12 | env: 13 | - name: MASTER 14 | value: "true" 15 | ports: 16 | - containerPort: 6379 17 | resources: 18 | limits: 19 | cpu: "0.1" 20 | volumeMounts: 21 | - mountPath: /redis-master-data 22 | name: data 23 | - mountPath: /myredis 24 | name: config 25 | volumes: 26 | - name: data 27 | emptyDir: {} 28 | - name: config 29 | configMap: 30 | name: example-redis-config 31 | items: 32 | - key: redis-config 33 | path: redis.conf -------------------------------------------------------------------------------- /09-lesson/README.md: -------------------------------------------------------------------------------- 1 | # Using ConfigMaps & Secrets in Kubernetes 2 | 3 | ## Ojectives 4 | - What are ConfigMaps 5 | - When to use ConfigMaps 6 | - Use cases for ConfigMaps 7 | - Overview of Secrets 8 | - Using Secrets 9 | - Key Takeaways 10 | --- 11 | 12 | ## Configuring Containerized Applications 13 | - Applications expect configuration from 14 | - Configuration files 15 | - Command line arguments 16 | - Environment variables 17 | - Configuration is always decoupled from applications 18 | - INI 19 | - XML 20 | - JSON 21 | - Custom Format 22 | - Container Images shouldn't hold application configuration 23 | - Essential for keeping containerized applications portable 24 | ## What are ConfigMaps? 25 | - Kubernetes objects for injecting containers with configuration data 26 | - ConfigMaps keep containers agnostic of Kubernetes 27 | - They can be used to store fine-grained or coarse-grained configuration 28 | - Individual properties 29 | - Entire configuration file 30 | - JSON files 31 | - ConfigMaps hold configuration in Key-Value pairs accessible to Pods 32 | - Similar to /etc directory and files in Linux OS 33 | ## Accessing ConfigMaps from Pods 34 | - Configuration data can be consumed in pods in a variety of ways 35 | - ConfigMap can be used to: 36 | **1. Populate the value of environment variables** 37 | **2. Set command-line arguments in a container** 38 | **3. Populate configuration files in a volume** 39 | - Users and system components may store configuration data in a ConfigMap 40 | ## Demo 41 | ### Using ConfigMaps 42 | Working from these set of comands: 43 | ```bash 44 | cd ConfigMaps 45 | # Deploy the ConfigMap 46 | kubectl create -f configmap.yaml 47 | 48 | # Create the Pod with Env Var 49 | kubectl create -f pod-cmd.yaml 50 | 51 | # check the logs 52 | kubectl logs test-pod-cmd 53 | 54 | # Create the Pod with Env Var 55 | kubectl create -f pod-env.yaml 56 | 57 | # Check the env vars 58 | kubectl exec -it test-pod-env /bin/sh 59 | 60 | # Create the Pod with Env Var 61 | kubectl create -f pod-vol.yaml 62 | 63 | # Access the shell 64 | kubectl exec -it test-pod-vol /bin/sh 65 | 66 | # Check the files 67 | cd /etc/config 68 | cat log.level 69 | cat log.location 70 | 71 | # Exit & clean up 72 | exit 73 | 74 | # additional content 75 | 76 | kubectl get configmap 77 | 78 | kubectl get configmap -o yaml 79 | 80 | kubectl get pod --show-all 81 | 82 | kubectl get pod -a 83 | 84 | env 85 | ``` 86 | 87 | Creating a ConfigMap from console 88 | 89 | ```console 90 | $ kubectl create -f configmap.yaml 91 | configmap "log-config" created 92 | $ kubectl get configmap 93 | NAME DATA AGE 94 | log-config 2 3m 95 | $ kubectl describe configmap log-config 96 | Name: log-config 97 | Namespace: default 98 | Labels: 99 | Annotations: 100 | 101 | Data 102 | ==== 103 | log.level: 104 | ---- 105 | INFO 106 | log.location: 107 | ---- 108 | LOCAL 109 | Events: 110 | ``` 111 | 112 | Creating the Pod for the command line. 113 | 114 | ```console 115 | $ kubectl create -f pod-cmd.yaml --validate=false 116 | pod "test-pod-cmd" created 117 | $ kubectl get pod -a 118 | NAME READY STATUS RESTARTS AGE 119 | test-pod-cmd 0/1 Completed 0 1m 120 | ``` 121 | 122 | Creating the Pod with environment variables. 123 | 124 | ```console 125 | $ kubectl exec -it test-pod-env /bin/sh 126 | / # env 127 | KUBERNETES_SERVICE_PORT=443 128 | KUBERNETES_PORT=tcp://10.96.0.1:443 129 | LOG_LEVEL=INFO 130 | HOSTNAME=test-pod-env 131 | SHLVL=1 132 | HOME=/root 133 | KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1 134 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 135 | KUBERNETES_PORT_443_TCP_PORT=443 136 | KUBERNETES_PORT_443_TCP_PROTO=tcp 137 | KUBERNETES_SERVICE_PORT_HTTPS=443 138 | KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443 139 | LOG_LOCATION=LOCAL 140 | PWD=/ 141 | KUBERNETES_SERVICE_HOST=10.96.0.1 142 | / # exit 143 | ``` 144 | 145 | Creating Pods with volume files 146 | 147 | ```console 148 | $ kubectl create -f pod-vol.yaml 149 | pod "test-pod-vol" created 150 | $ kubectl get pod test-pod-vol 151 | NAME READY STATUS RESTARTS AGE 152 | test-pod-vol 1/1 Running 0 51s 153 | ``` 154 | 155 | Access the log files log.level and log.location 156 | 157 | ```console 158 | $ kubectl exec -it test-pod-vol /bin/sh 159 | /etc/config # ls 160 | log.level log.location 161 | /etc/config # cat log.level 162 | INFO/etc/config # cat log.location 163 | LOCAL/etc/config # exit 164 | $ kubectl get po 165 | NAME READY STATUS RESTARTS AGE 166 | test-pod-env 1/1 Running 0 22m 167 | test-pod-vol 1/1 Running 0 8m 168 | ``` 169 | 170 | Second demo with Redis file... 171 | 172 | ```console 173 | $ kubectl create configmap example-redis-config --from-file=redis-config 174 | configmap "example-redis-config" created 175 | λ kubectl get configmap example-redis-config -o yaml 176 | apiVersion: v1 177 | data: 178 | redis-config: "maxmemory 5mb\r\nmaxmemory-policy allkeys-lru\r\n" 179 | kind: ConfigMap 180 | metadata: 181 | creationTimestamp: 2017-12-13T09:16:08Z 182 | name: example-redis-config 183 | namespace: default 184 | resourceVersion: "363254" 185 | selfLink: /api/v1/namespaces/default/configmaps/example-redis-config 186 | uid: 414ede6c-dfe6-11e7-bb6c-08002720cfab 187 | $ kubectl create -f redis.yaml 188 | pod "redis" created 189 | $ kubectl logs redis 190 | 1:C 21 May 2019 05:50:53.730 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo 191 | 1:C 21 May 2019 05:50:53.730 # Redis version=5.0.5, bits=64, commit=00000000, modified=0, pid=1, just started 192 | 1:C 21 May 2019 05:50:53.730 # Configuration loaded 193 | 1:M 21 May 2019 05:50:53.765 * Running mode=standalone, port=6379. 194 | 1:M 21 May 2019 05:50:53.765 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128. 195 | 1:M 21 May 2019 05:50:53.765 # Server initialized 196 | 1:M 21 May 2019 05:50:53.765 * Ready to accept connections 197 | $ kubectl exec -it redis redis-cli 198 | 127.0.0.1:6379> CONFIG GET maxmemory 199 | 1) "maxmemory" 200 | 2) "5242880" 201 | 127.0.0.1:6379> CONFIG GET maxmemory-policy 202 | 1) "maxmemory-policy" 203 | 2) "allkeys-lru" 204 | 127.0.0.1:6379> exit 205 | ``` 206 | 207 | Time to do some clean up so that we can move to the section of the demo... 208 | 209 | ```console 210 | $ kubectl delete configmap example-redis-config log-config 211 | configmap "example-redis-config" deleted 212 | configmap "log-config" deleted 213 | $ kubectl delete pod redis test-pod-cmd test-pod-env test-pod-vol 214 | pod "redis" deleted 215 | pod "test-pod-cmd" deleted 216 | pod "test-pod-env" deleted 217 | pod "test-pod-vol" deleted 218 | ``` 219 | 220 | ## Using Secrets 221 | - Secret is an object that contains a small amount of sensitive data such as a password, a token, or a key 222 | - Secrets reduce the risk of exposing sensitive data to unwanted entities 223 | - Like ConfigMaps, Secrets are Kubernetes API objects created outside of Pods 224 | - Secrets belong to a specific Kubernetes Namespace 225 | - The size of each Secret cannot exceed 1MB 226 | - Secrets are registered with Kubernetes Master 227 | - Secrets can be mounted as Volumes or exposed as environment variables 228 | - Secret is only sent to the Node hosting the Pod that requires access 229 | - Each Secret is stored in a tempfs volumes that restrict access to the rest of the applications in the Node 230 | - Communication between the Kubernetes API Server and Node is secured through SSL/TLS 231 | 232 | ## Demo 233 | ### Using Secrets 234 | ```bash 235 | # Create a generic secret from files 236 | $ kubectl create secret generic dbsecret --from-file=./username.txt --from-file=./password.txt 237 | 238 | # Check the creation of Secret 239 | $ kubectl get secret 240 | 241 | # Check the creation of Secret in YAML 242 | $ kubectl get secret dbsecret -o yaml 243 | 244 | # Decode the secret 245 | $ echo UzBtZVBAc3N3MHJE | base64 -D 246 | 247 | # Create the Pod 248 | $ kubectl create -f secret-pod.yaml 249 | 250 | # Access the Secret in the Pod 251 | $ kubectl exec -it secret-pod /bin/sh 252 | 253 | # Clean up 254 | $ kubectl delete secret dbsecret 255 | $ kubectl delete -f secret-pod.yaml 256 | ``` 257 | 258 | Create a generic secret from files 259 | 260 | ```console 261 | $ cd Secrets/Demo1 262 | $ kubectl create secret generic dbsecret --from-file=./username.txt --from-file=./password.txt 263 | secret "dbsecret" created 264 | $ kubectl get secret 265 | NAME TYPE DATA AGE 266 | dbsecret Opaque 2 54s 267 | default-token-zk79b kubernetes.io/service-account-token 3 10d 268 | $ kubectl get secret dbsecret -o yaml 269 | apiVersion: v1 270 | data: 271 | password.txt: UzBtZVBAc3N3MHJE 272 | username.txt: YWRtaW4= 273 | kind: Secret 274 | metadata: 275 | creationTimestamp: 2019-05-21T06:10:36Z 276 | name: dbsecret 277 | namespace: default 278 | resourceVersion: "109510" 279 | selfLink: /api/v1/namespaces/default/secrets/dbsecret 280 | uid: 267a97c3-7b8f-11e9-b797-080027466657 281 | type: Opaque 282 | ``` 283 | 284 | Let's look on how Kubernetes encode the login and password, taking the data from `kubectl get secret -o yaml` 285 | data: 286 | password.txt: UzBtZVBAc3N3MHJE 287 | username.txt: YWRtaW4= 288 | 289 | Like you may see these are the exact values shown from file password.txt and username.txt 290 | 291 | ```console 292 | $ echo UzBtZVBAc3N3MHJE | base64 -D 293 | S0meP@ssw0rD 294 | $ echo YWRtaW4= | base64 -D 295 | admin 296 | ``` 297 | 298 | Next step is to test the creation of the pod with its secret file... 299 | 300 | ```console 301 | $ kubectl create -f secret-pod.yaml 302 | pod "secret-pod" created 303 | $ kubectl get po secret-pod 304 | NAME READY STATUS RESTARTS AGE 305 | secret-pod 1/1 Running 0 54s 306 | $ kubectl exec -it secret-pod /bin/sh 307 | # cd /etc/foo 308 | # ls 309 | password.txt username.txt 310 | # cat password.txt 311 | S0meP@ssw0rD# 312 | # cat username.txt 313 | admin# exit 314 | ``` 315 | 316 | Let's clean up secret and pod. 317 | 318 | ```console 319 | $ kubectl delete secret dbsecret 320 | secret "dbsecret" deleted 321 | $ kubectl delete -f secret-pod.yaml 322 | pod "secret-pod" deleted 323 | ``` 324 | 325 | Demo II 326 | 327 | ```console 328 | $ cd Secrets/Demo2 329 | $ kubectl create -f my-secret.yaml 330 | secret "mysecret" created 331 | $ kubectl create -f secret-env-pod.yaml 332 | pod "secret-env-pod" created 333 | $ kubectl exec -it secret-env-pod /bin/sh 334 | # env 335 | KUBERNETES_SERVICE_PORT=443 336 | KUBERNETES_PORT=tcp://10.96.0.1:443 337 | HOSTNAME=secret-env-pod 338 | REDIS_DOWNLOAD_SHA=769b5d69ec237c3e0481a262ff5306ce30db9b5c8ceb14d1023491ca7be5f6fa 339 | HOME=/root 340 | SECRET_PASSWORD=S0meP@ssw0rD 341 | KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1 342 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 343 | KUBERNETES_PORT_443_TCP_PORT=443 344 | KUBERNETES_PORT_443_TCP_PROTO=tcp 345 | SECRET_USERNAME=admin 346 | REDIS_DOWNLOAD_URL=http://download.redis.io/releases/redis-4.0.6.tar.gz 347 | KUBERNETES_SERVICE_PORT_HTTPS=443 348 | KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443 349 | REDIS_VERSION=4.0.6 350 | GOSU_VERSION=1.10 351 | KUBERNETES_SERVICE_HOST=10.96.0.1 352 | PWD=/data 353 | # exit 354 | ``` 355 | 356 | Clean up 357 | 358 | ```console 359 | kubectl delete -f secret-env-pod.yaml -f my-secret.yaml 360 | ``` 361 | 362 | ## Key Things to Remember 363 | 364 | - Secrets feature is not entirely foolproof 365 | - API Server stores Secrets in plain text 366 | - During replication accross etcd clusters, Secrets are sent in plain text 367 | - Secret definitions may still get exposed to outside world 368 | 369 | Reference: 370 | - [Kubernetes Webinar Series - Using ConfigMaps and Secrets](https://www.youtube.com/watch?v=GoITFljdJdo&index=9&list=PLF3s2WICJlqOiymMaTLjwwHz-MSVbtJPQ) 371 | -------------------------------------------------------------------------------- /09-lesson/Secrets/Demo1/demo.sh: -------------------------------------------------------------------------------- 1 | # Create a generic secret from files 2 | kubectl create secret generic dbsecret --from-file=./username.txt --from-file=./password.txt 3 | 4 | # Check the creation of Secret 5 | kubectl get secret 6 | 7 | # Check the creation of Secret in YAML 8 | kubectl get secret -o yaml 9 | 10 | # Decode the secret 11 | echo UzBtZVBAc3N3MHJE | base64 -D 12 | 13 | # Create the Pod 14 | kubectl create -f secret-pod.yaml 15 | 16 | # Access the Secret in the Pod 17 | kubectl exec -it secret-pod /bin/sh 18 | cd /etc/foo 19 | 20 | # Clean up 21 | kubectl delete secret dbsecret 22 | kubectl delete -f secret-pod.yaml 23 | -------------------------------------------------------------------------------- /09-lesson/Secrets/Demo1/password.txt: -------------------------------------------------------------------------------- 1 | S0meP@ssw0rD -------------------------------------------------------------------------------- /09-lesson/Secrets/Demo1/secret-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secret-pod 5 | spec: 6 | containers: 7 | - name: redis 8 | image: redis:5.0.5-alpine 9 | volumeMounts: 10 | - name: foo 11 | mountPath: "/etc/foo" 12 | readOnly: true 13 | volumes: 14 | - name: foo 15 | secret: 16 | secretName: dbsecret 17 | -------------------------------------------------------------------------------- /09-lesson/Secrets/Demo1/username.txt: -------------------------------------------------------------------------------- 1 | admin -------------------------------------------------------------------------------- /09-lesson/Secrets/Demo2/demo.sh: -------------------------------------------------------------------------------- 1 | # Create base64 encoded username 2 | echo admin | base64 # YWRtaW4= 3 | 4 | # Create base64 encoded password 5 | echo S0meP@ssw0rd | base64 # UzBtZVBAc3N3MHJE 6 | 7 | # Create a generic secret from YAML file 8 | kubectl create -f my-secret.yaml 9 | 10 | # Create the Pod 11 | kubectl create -f secret-env-pod.yaml 12 | 13 | # Access the Secret in the Pod 14 | kubectl exec -it secret-env-pod /bin/sh 15 | env 16 | 17 | # Clean up 18 | kubectl delete -f my-secret.yaml -f secret-env-pod.yaml 19 | 20 | # https://www.json2yaml.com/ 21 | -------------------------------------------------------------------------------- /09-lesson/Secrets/Demo2/my-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mysecret 5 | type: Opaque 6 | data: 7 | username: YWRtaW4= 8 | password: UzBtZVBAc3N3MHJE 9 | -------------------------------------------------------------------------------- /09-lesson/Secrets/Demo2/secret-env-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secret-env-pod 5 | spec: 6 | containers: 7 | - name: mycontainer 8 | image: gcr.io/google_containers/busybox 9 | command: ["/bin/sh", "-c", "--" ] 10 | args: [ "while true; do sleep 30; done;" ] 11 | env: 12 | - name: SECRET_USERNAME 13 | valueFrom: 14 | secretKeyRef: 15 | name: mysecret 16 | key: username 17 | - name: SECRET_PASSWORD 18 | valueFrom: 19 | secretKeyRef: 20 | name: mysecret 21 | key: password 22 | restartPolicy: Never 23 | -------------------------------------------------------------------------------- /10-lesson/DaemonSet/demo.sh: -------------------------------------------------------------------------------- 1 | # Deploy RC 2 | kubectl create -f nginx-rs.yaml 3 | kubectl get pods -o wide 4 | kubectl scale --replicas=10 rs/nginx 5 | kubectl delete rs nginx 6 | 7 | # Deploy DS 8 | kubectl create -f nginx-ds.yaml 9 | kubectl get pods -o wide 10 | kubectl scale --replicas=10 ds/nginx 11 | kubectl delete ds nginx 12 | 13 | # Install Sematext agent as DS 14 | kubectl create -f sema.yaml 15 | kubectl get pods -o wide 16 | kubectl get pods ds sematext-agent 17 | 18 | # Test the agent 19 | kubectl create -f nginx-rs.yaml 20 | kubectl scale --recplicas=10 rs/nginx 21 | 22 | # Clean up 23 | 24 | -------------------------------------------------------------------------------- /10-lesson/DaemonSet/nginx-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app: nginx 6 | name: nginx 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | app: nginx 12 | spec: 13 | containers: 14 | - name: nginx 15 | image: nginx:1.7.9 16 | -------------------------------------------------------------------------------- /10-lesson/DaemonSet/nginx-rs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: ReplicaSet 3 | metadata: 4 | name: nginx 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx:1.7.9 15 | -------------------------------------------------------------------------------- /10-lesson/Jobs/Cron/cron.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hello 5 | spec: 6 | schedule: "*/1 * * * *" # every 1 minute 7 | jobTemplate: 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: hello 13 | image: busybox 14 | args: 15 | - /bin/sh 16 | - -c 17 | - date; echo Hello from Kubernetes cluster 18 | restartPolicy: OnFailure -------------------------------------------------------------------------------- /10-lesson/Jobs/Cron/demo.sh: -------------------------------------------------------------------------------- 1 | # Create the Cron Job 2 | kubectl create -f cron.yaml 3 | 4 | # Alternative form 5 | kubectl run hello \ 6 | --schedule="*/1 * * * *" \ 7 | --restart=OnFailure \ 8 | --image=busybox \ 9 | -- /bin/sh -c "date; echo Hello from Kubernetes cluster" 10 | 11 | # Get the Cron Job 12 | kubectl get cronjob hello 13 | 14 | # Get the Job details 15 | kubectl logs hello- 16 | kubectl get jobs --watch 17 | 18 | # Clean up 19 | kubectl delete cronjob hello 20 | -------------------------------------------------------------------------------- /10-lesson/Jobs/OneTime/db-init-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: db-init 5 | labels: 6 | app: myapp 7 | tier: data 8 | spec: 9 | template: 10 | metadata: 11 | name: db-init 12 | spec: 13 | containers: 14 | - name: db-init 15 | image: janakiramm/db-init 16 | env: 17 | - name: USERNAME 18 | value: root 19 | - name: PASSWORD 20 | value: password 21 | - name: HOST 22 | value: mysql 23 | - name: PORT 24 | value: "3306" 25 | restartPolicy: Never 26 | backoffLimit: 2 -------------------------------------------------------------------------------- /10-lesson/Jobs/OneTime/demo.sh: -------------------------------------------------------------------------------- 1 | # Deploy MySQL 2 | kubectl create -f mysql.yaml 3 | 4 | # Check the deployment 5 | kubectl get po 6 | kubectl get svc 7 | 8 | # Check the databases in MySQL 9 | export NODEPORT=31949 10 | 11 | mysql -u root -ppassword -h 192.168.99.100 -P $NODEPORT -e "show databases" 12 | 13 | # Run the DB Init Job 14 | kubectl create -f db-init-job.yaml 15 | 16 | # Check the Pods 17 | kubectl get po -a 18 | 19 | # Access the logs 20 | kubectl logs db-init* 21 | 22 | # Check the databases in MySQL 23 | mysql -u root -p password -h localhost -P $NODEPORT -e "show databases" 24 | 25 | kubectl describe job db-init -------------------------------------------------------------------------------- /10-lesson/Jobs/OneTime/mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: myapp 7 | tier: data 8 | spec: 9 | containers: 10 | - name: mysql 11 | image: mysql:5.7.26 12 | env: 13 | - name: MYSQL_ROOT_PASSWORD 14 | value: password 15 | ports: 16 | - containerPort: 3306 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: mysql 22 | labels: 23 | app: myapp 24 | tier: data 25 | spec: 26 | type: NodePort 27 | ports: 28 | - port: 3306 29 | targetPort: 3306 30 | nodePort: 30306 31 | protocol: TCP 32 | selector: 33 | app: myapp 34 | 35 | 36 | -------------------------------------------------------------------------------- /10-lesson/README.md: -------------------------------------------------------------------------------- 1 | # Exploring Daemon Sets and Jobs 2 | ## Objectives 3 | - Recap of Kubernetes Controllers 4 | - What are Daemon Sets? 5 | - Daemon Set vs Replica Sets 6 | - Use Cases for Daemon Sets 7 | - Configurating Run to Completion Jobs 8 | - Scheduling Jobs with Cron 9 | 10 | --- 11 | ## Kubernetes Controllers 12 | - Replica Sets 13 | - Deployments 14 | - Stateful Sets 15 | - Daemon Sets 16 | - Jobs (One time) 17 | - Cron Jobs 18 | ## Overview of Daemon Set 19 | - Daemon Set runs a copy of a pod on every node in a Kubernetes cluster 20 | - Daemon Set and Replica Set are similar but have different use cases 21 | - New nodes automatically gets a Daemon Set pod 22 | - When a node is removed, the Daemon Set pod will not be rescheduled 23 | 24 | **The number of pods in the Daemon Set == The number of nodes in cluster** 25 | ##Daemon Set vs Replica Set 26 | ![Daemon Set vs Replica Set](./images/image-10-01.png) 27 | **Replica Set** 28 | ![Daemon Set vs Replica Set](./images/image-10-02.png) 29 | **Daemon Set** 30 | ## Daemon Set Use Cases 31 | - Logging Aggregators 32 | - Monitoring 33 | - Load Balancers / Reverse Proxies / API Gateways 34 | - Generic background job that needs to be run on each node 35 | ## Demo 36 | ### Exploring Daemon Set 37 | Let's build the replicaset 38 | ```console 39 | $ cd DaemonSet 40 | $ kubectl create -f nginx-rs.yaml 41 | replicaset "nginx" created 42 | ``` 43 | Let's see how many pods are running, we specified 3 on the replicaset; thus, 3 should be the number of pods. 44 | ```console 45 | λ kubectl get pods -o wide 46 | NAME READY STATUS RESTARTS AGE IP NODE 47 | nginx-hcnck 1/1 Running 0 14s 172.17.0.9 minikube 48 | nginx-lbbh5 1/1 Running 0 14s 172.17.0.8 minikube 49 | nginx-sml9m 1/1 Running 0 14s 172.17.0.7 minikube 50 | ``` 51 | Because I'm running everything from minikube it's not possible to put to text the Daemon set, but for now we can see that all the pods are running on minikube, as I only have one node, for replicaset that works fine. 52 | 53 | I'm going to scale this to 10 replicas. 54 | ```console 55 | $ kubectl scale --replicas=10 rs/nginx 56 | replicaset "nginx" scaled 57 | $ kubectl get pods -o wide 58 | NAME READY STATUS RESTARTS AGE IP NODE 59 | nginx-6gcqt 1/1 Running 0 24s 172.17.0.14 minikube 60 | nginx-8wv8j 1/1 Running 0 24s 172.17.0.11 minikube 61 | nginx-g65pk 1/1 Running 0 24s 172.17.0.16 minikube 62 | nginx-hcnck 1/1 Running 0 2m 172.17.0.9 minikube 63 | nginx-l4ggn 1/1 Running 0 24s 172.17.0.12 minikube 64 | nginx-lbbh5 1/1 Running 0 2m 172.17.0.8 minikube 65 | nginx-qkjjq 1/1 Running 0 24s 172.17.0.13 minikube 66 | nginx-sml9m 1/1 Running 0 2m 172.17.0.7 minikube 67 | nginx-wjdvv 1/1 Running 0 24s 172.17.0.15 minikube 68 | nginx-x764k 1/1 Running 0 24s 172.17.0.10 minikube 69 | ``` 70 | I think we undestand replicaset, let's delete all to start playing with daemonsets 71 | ```console 72 | $ kubectl delete rs/nginx 73 | replicaset "nginx" deleted 74 | $ kubectl get po -o wide 75 | No resources found. 76 | ``` 77 | Ok, now we start working on the daemonset; as stated earlier, because I only have one node this exercise will not show much as how the daemonset set works, it will only spawn one pod. 78 | 79 | ```console 80 | $ kubectl create -f nginx-ds.yaml 81 | daemonset "nginx" created 82 | $ kubectl get pods -o wide 83 | NAME READY STATUS RESTARTS AGE IP NODE 84 | nginx-6ctfr 1/1 Running 0 39m 172.17.0.7 minikube 85 | $ kubectl scale --replicas=10 ds/nginx 86 | Error from server (NotFound): the server could not find the requested resource 87 | $ kubectl delete ds nginx 88 | daemonset "nginx" deleted 89 | ``` 90 | 91 | ## Jobs in Kubernetes 92 | - There are two types of job controllers in Kubernetes 93 | - One time / Run on completion 94 | - Scheduled Jobs 95 | - Both are extremly useful in performing batch operations 96 | - Jobs complement other controllers like Replica Set and Daemon Set 97 | ## Run to Completion Jobs 98 | - Each Job creates one or more pods and ensures that they are successfully terminated 99 | - If a pod or node fails during the execution, job controller will restart or reschedules the pod 100 | - Job can also be used to run multiple pods in parallel 101 | - A job can be scaled up using the _kubectl scale_ command 102 | - Job's spec may define the parallelism value for running multiple pods in parallel 103 | ## Use case for Jobs 104 | - One time, initialization of resources 105 | - Databases 106 | - File Systems 107 | - Cache 108 | - Configuration 109 | - Multiple workers to process messages in a queue 110 | ![Use case for Jobs](./images/image-10-03.png) 111 | ## Demo 112 | ### Using Run to Complete Jobs 113 | ```console 114 | $ cd Jobs/OneTime 115 | $ kubectl create -f mysql.yaml 116 | pod/mysql created 117 | service/mysql created 118 | $ kubectl get po 119 | NAME READY STATUS RESTARTS AGE 120 | mysql 1/1 Running 0 3m 121 | $ kubectl get svc 122 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 123 | kubernetes ClusterIP 10.96.0.1 443/TCP 17h 124 | mysql NodePort 10.96.56.40 3306:30306/TCP 24s 125 | ``` 126 | 127 | Now, it's time to test the mysql connection 128 | 129 | ```console 130 | $ kubectl exec mysql -- mysql -u root -ppassword -e 'show databases' 131 | mysql: [Warning] Using a password on the command line interface can be insecure. 132 | Database 133 | information_schema 134 | mysql 135 | performance_schema 136 | sys 137 | ``` 138 | 139 | ```console 140 | $ kubectl create -f db-init-job.yaml 141 | job "db-init" created 142 | $ kubectl get po -a 143 | NAME READY STATUS RESTARTS AGE 144 | db-init-b8mpr 0/1 Completed 0 3m 145 | mysql 1/1 Running 0 5h 146 | $ kubectl logs db-init-b8mpr 147 | mysql: [Warning] Using a password on the command line interface can be insecure. 148 | $ kubectl exec mysql -- mysql -u root -ppassword -e 'show databases' 149 | mysql: [Warning] Using a password on the command line interface can be insecure. 150 | Database 151 | information_schema 152 | cloudshop 153 | mysql 154 | performance_schema 155 | sys 156 | $ kubectl describe job db-init 157 | Name: db-init 158 | Namespace: default 159 | Selector: controller-uid=a238d063-e0b0-11e7-a8a8-08002720cfab 160 | Labels: app=myapp 161 | tier=data 162 | Annotations: 163 | Parallelism: 1 164 | Completions: 1 165 | Start Time: Thu, 14 Dec 2017 17:24:49 +0800 166 | Pods Statuses: 0 Running / 1 Succeeded / 0 Failed 167 | Pod Template: 168 | Labels: controller-uid=a238d063-e0b0-11e7-a8a8-08002720cfab 169 | job-name=db-init 170 | Containers: 171 | db-init: 172 | Image: janakiramm/db-init 173 | Port: 174 | Environment: 175 | USERNAME: root 176 | PASSWORD: password 177 | HOST: mysql 178 | PORT: 3306 179 | Mounts: 180 | Volumes: 181 | Events: 182 | Type Reason Age From Message 183 | ---- ------ ---- ---- ------- 184 | Normal SuccessfulCreate 7m job-controller Created pod: db-init-b8mpr 185 | ``` 186 | 187 | Clear environment 188 | 189 | ```console 190 | kubectl delete -f mysql.yaml -f db-init-job.yaml 191 | ``` 192 | 193 | ## Configuring Cron Jobs 194 | 195 | - A Cron Job manages time based Jobs 196 | - Once at a specified point in time 197 | - Repeatedly at a specified point in time 198 | - Each Cron Job is similar to one line of a crontab (cron table) file 199 | - A typical use cases include 200 | - Schedule a job execution at a given point in time 201 | - Create a periodic job 202 | - Database backup 203 | - Sending emails 204 | ##### Works only clusters with _--runtime-config=batch/v2alpha1=true_ 205 | 206 | ## Demo 207 | ### Using Cron Jobs 208 | ```console 209 | $ cd Jobs/Cron 210 | $ kubectl create -f cron.yaml 211 | cronjob.batch/hello created 212 | $ kubectl get cronjob hello 213 | NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE 214 | hello */1 * * * * False 0 10s 93s 215 | $ kubectl get job --watch 216 | NAME COMPLETIONS DURATION AGE 217 | hello-1558490820 1/1 7s 104s 218 | hello-1558490880 1/1 6s 44s 219 | $ pod=$(kubectl get pods --selector=job-name=hello-1558490820 --output=jsonpath={.items[].metadata.name}) 220 | $ kubectl logs $pod 221 | Wed May 22 02:07:06 UTC 2019 222 | Hello from Kubernetes cluster 223 | ``` 224 | 225 | Clean up 226 | 227 | ```console 228 | λ kubectl delete cronjob hello 229 | cronjob "hello" deleted 230 | ``` 231 | 232 | Reference: 233 | - [Kubernetes Webinar Series - Exploring Daemon Sets and Jobs](https://www.youtube.com/watch?v=qYvXwWT-13w&index=10&list=PLF3s2WICJlqOiymMaTLjwwHz-MSVbtJPQ) -------------------------------------------------------------------------------- /10-lesson/images/image-10-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/10-lesson/images/image-10-01.png -------------------------------------------------------------------------------- /10-lesson/images/image-10-02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/10-lesson/images/image-10-02.png -------------------------------------------------------------------------------- /10-lesson/images/image-10-03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/10-lesson/images/image-10-03.png -------------------------------------------------------------------------------- /11-lesson/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ernesen/kubernetes/60a02f4906eb247ecafab3d6e8df0424df890d42/11-lesson/README.md -------------------------------------------------------------------------------- /11-lesson/backup/nginx-pod.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Pod", 3 | "apiVersion": "v1beta3", 4 | "metadata": { 5 | "name": "pod-nginx-01", 6 | "labels": { 7 | "name": "pod-nginx" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [{ 12 | "name": "pod-nginx-01", 13 | "image": "dockerfile/nginx", 14 | "ports": [{ 15 | "containerPort": 80 16 | }], 17 | "livenessProbe": { 18 | "enabled": true, 19 | "type": "http", 20 | "initialDelaySeconds": 30, 21 | "httpGet": { 22 | "path": "/", 23 | "port": "80" 24 | } 25 | } 26 | }] 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /11-lesson/backup/nginx-svc.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "kind": "Service", 4 | "apiVersion": "v1beta3", 5 | "metadata": { 6 | "name": "service-nginx", 7 | "labels": { 8 | "name": "service-nginx" 9 | } 10 | }, 11 | "spec": { 12 | "selector": { 13 | "name": "pod-nginx" 14 | }, 15 | "ports": [{"port":80, "targetPort": "http-port"}], 16 | "publicIPs":["%NODE_PUBLIC_IP%"] 17 | } 18 | } -------------------------------------------------------------------------------- /11-lesson/db-pod.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod", 4 | "metadata": { 5 | "name": "db", 6 | "labels": { 7 | "name": "mongo" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [ 12 | { 13 | "image": "mongo", 14 | "name": "mongo", 15 | "ports": [ 16 | { 17 | "name": "mongo", 18 | "containerPort": 27017, 19 | "hostPort": 27017 20 | } 21 | ], 22 | "volumeMounts": [ 23 | { 24 | "name": "mongo-storage", 25 | "mountPath": "/data/db" 26 | } 27 | ] 28 | } 29 | ], 30 | "Volumes": [ 31 | { 32 | "name": "mongo-storage", 33 | "hostPath": { 34 | "path": "/data/db" 35 | } 36 | } 37 | ] 38 | } 39 | } -------------------------------------------------------------------------------- /11-lesson/db-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: db 5 | labels: 6 | name: mongo 7 | #app: todoapp 8 | spec: 9 | containers: 10 | - image: mongo 11 | name: mongo 12 | ports: 13 | - name: mongo 14 | containerPort: 27017 15 | hostPort: 27017 16 | volumeMounts: 17 | - name: mongo-storage 18 | mountPath: /data/db 19 | Volumes: 20 | - name: mongo-storage 21 | hostPath: 22 | path: /data/db -------------------------------------------------------------------------------- /11-lesson/db-svc.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Service", 4 | "metadata": { 5 | "name": "db", 6 | "labels": { 7 | "name": "mongo" 8 | } 9 | }, 10 | "spec": { 11 | "selector": { 12 | "name": "mongo" 13 | }, 14 | "type": "ClusterIP", 15 | "ports": [ 16 | { 17 | "name": "db", 18 | "port": 27017, 19 | "targetPort": 27017 20 | } 21 | ] 22 | } 23 | } -------------------------------------------------------------------------------- /11-lesson/db-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: db 5 | labels: 6 | name: mongo 7 | #app: todoapp 8 | spec: 9 | selector: 10 | name: mongo 11 | type: ClusterIP 12 | ports: 13 | - name: db 14 | port: 27017 15 | targetPort: 27017 16 | -------------------------------------------------------------------------------- /11-lesson/demo.sh: -------------------------------------------------------------------------------- 1 | # cordoning Node 2 | kubectl get nodes 3 | kubectl get pods -o wide 4 | 5 | kubectl scale --replicas=5 rs/myemp 6 | kubectl get pods -o wide 7 | 8 | kubectl cordon 192.168.27.102 9 | kubectl scale --replicas=10 rs/myemp 10 | kubectl get pods -o wide 11 | 12 | kubectl undordon 192.168.27.101 13 | kubectl scale --replicas=20 rs/myemp 14 | kubectl get pods -o wide 15 | kubectl scale --replicas=5 rs/myemp 16 | 17 | # Draining Node and move to another Node 18 | kubectl get pods -o wide 19 | kubectl drain 192.168.27.102 --force 20 | kubectl get pods -o wide 21 | 22 | kubectl uncordon 192.168.27.101 23 | kubectl scale --replicas=10 rs/myemp 24 | kubectl get pods -o wide 25 | 26 | # Watching Pod Status 27 | kubectl get pods --watch-only 28 | kubectl scale --replicas=20 rs/myemp 29 | kubectl scale --replicas=5 rs/myemp 30 | 31 | # Port Forwarding 32 | kubectl get svc myemp 33 | kubectl port-forward myemp 3000:3000 34 | 35 | # Copying Files from Host 36 | kubectl exec -it myemp /bin/sh 37 | cd public 38 | ls 39 | kubectl cp ./test.html myemp:/usr/src/app/public/test.html 40 | ls 41 | kubectl cp myemp:/usr/src/app/public/test.html ./test.html 42 | 43 | # Explain Objects 44 | kubectl explain 45 | kubectl explain po 46 | kubectl explain scv 47 | 48 | # Formart Output 49 | kubectl get pod myemp -o=yaml 50 | kubectl get pod myemp -o=json 51 | 52 | # List Containers in a Pod 53 | kubectl get pods myemp -o jsonpath={.spec.containers[*].name} 54 | 55 | # Sort by Name 56 | kubectl get services --sort-by=.metadata.name 57 | 58 | # List Pods along with the Node 59 | kubectl get pod -o wide | awk -F" " '{ print $1 " " $7 }' | column -t 60 | 61 | # Edit Objects 62 | kubectl edit pod/myemp 63 | KUBE_EDITOR="sublime" kubectl edit pod/myemp 64 | 65 | # Proxy 66 | kubectl proxy 67 | kubectl proxy --port=8000 68 | open http://localhost:8000/ui 69 | curl http://localhost:8000/api 70 | curl -s http://localhost:8000/api/v1/nodes | jq '.items[] .metadata.labels' 71 | 72 | # List exposed APIs 73 | kubectl api-versions 74 | 75 | # Create Pod and Service through API 76 | kubectl get pods 77 | curl -s http://localhost:8000/api/v1/namespaces/default/pods -XPOST -H 'Content-Type: application/json' -d@nginx-pod.json | jq '.status' 78 | curl -s http://localhost:8000/api/v1/namespaces/default/pods -XPOST -H "Content-Type: application/json" -d@db-pod.json | jq ".status" 79 | kubectl get pods 80 | curl -s http://localhost:8000/api/v1/namespaces/default/services -XPOST -H 'Content-Type: application/json' -d@nginx-svc.json | jq '.spec.clusterIP' 81 | curl -s http://localhost:8000/api/v1/namespaces/default/services -XPOST -H "Content-Type: application/json" -d@db-svc.json | jq ".spec.clusterIP" 82 | kubectl get svc 83 | curl http://localhost:8000/api/v1/namespaces/default/services/nginx-service -XDELETE 84 | kubectl get svc 85 | kubectl get pods 86 | curl http://localhost:8000/api/v1/namespaces/default/pods/nginx -XDELETE 87 | kubectl get pods 88 | 89 | curl nginx.default.svc.cluster.local:8080 90 | 91 | 92 | 93 | docker run -it ibmcom/secure-gateway-client --net="host" ixLpPDsMNEB_prod_ng -t eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjb25maWd1cmF0aW9uX2lkIjoiaXhMcFBEc01ORUJfcHJvZF9uZyIsInJlZ2lvbiI6InVzLXNvdXRoIiwiaWF0IjoxNTEyMzk1NTE3LCJleHAiOjE1MjAxNzE1MTd9.hX5eXCdBM18VjWPIdtHwDaNQvUmJ48Q70Q-lXa5bs5o 94 | 95 | docker run -it ibmcom/secure-gateway-client --net="host" p9MTlTSy1vN_prod_ng -t eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJjb25maWd1cmF0aW9uX2lkIjoicDlNVGxUU3kxdk5fcHJvZF9uZyIsInJlZ2lvbiI6InVzLXNvdXRoIiwiaWF0IjoxNTEyMzk2OTE1LCJleHAiOjE1MjAxNzI5MTV9.niKk7iJYnUZfSavVZerY0qzhVQm89HJPK2zl9RVmc-s -------------------------------------------------------------------------------- /11-lesson/nginx-pod.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Pod", 3 | "apiVersion": "v1beta3", 4 | "metadata": { 5 | "name": "pod-nginx-01", 6 | "labels": { 7 | "name": "pod-nginx" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [{ 12 | "name": "pod-nginx-01", 13 | "image": "dockerfile/nginx", 14 | "ports": [{ 15 | "containerPort": 80 16 | }], 17 | "livenessProbe": { 18 | "enabled": true, 19 | "type": "http", 20 | "initialDelaySeconds": 30, 21 | "httpGet": { 22 | "path": "/", 23 | "port": "80" 24 | } 25 | } 26 | }] 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /11-lesson/nginx-svc.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "kind": "Service", 4 | "apiVersion": "v1beta3", 5 | "metadata": { 6 | "name": "service-nginx", 7 | "labels": { 8 | "name": "service-nginx" 9 | } 10 | }, 11 | "spec": { 12 | "selector": { 13 | "name": "pod-nginx" 14 | }, 15 | "ports": [{"port":80, "targetPort": "http-port"}], 16 | "publicIPs":["%NODE_PUBLIC_IP%"] 17 | } 18 | } -------------------------------------------------------------------------------- /11-lesson/web-pod.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod", 4 | "metadata": { 5 | "name": "web", 6 | "labels": { 7 | "name": "web" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [ 12 | { 13 | "image": "janakiramm/todo-app", 14 | "name": "myweb", 15 | "ports": [ 16 | { 17 | "containerPort": 3000 18 | } 19 | ] 20 | } 21 | ] 22 | } 23 | } -------------------------------------------------------------------------------- /11-lesson/web-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | #app: todoapp 8 | spec: 9 | containers: 10 | - image: janakiramm/todo-app 11 | name: myweb 12 | ports: 13 | - containerPort: 3000 -------------------------------------------------------------------------------- /11-lesson/web-rc.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "ReplicationController", 4 | "metadata": { 5 | "name": "web", 6 | "labels": { 7 | "name": "web" 8 | } 9 | }, 10 | "spec": { 11 | "replicas": 2, 12 | "template": { 13 | "metadata": { 14 | "labels": { 15 | "name": "web" 16 | } 17 | }, 18 | "spec": { 19 | "containers": [ 20 | { 21 | "image": "janakiramm/todo-app", 22 | "name": "web", 23 | "ports": [ 24 | { 25 | "containerPort": 3000 26 | } 27 | ] 28 | } 29 | ] 30 | } 31 | } 32 | } 33 | } -------------------------------------------------------------------------------- /11-lesson/web-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | #app: todoapp 8 | spec: 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | name: web 14 | spec: 15 | containers: 16 | - image: janakiramm/todo-app 17 | name: web 18 | ports: 19 | - containerPort: 3000 -------------------------------------------------------------------------------- /11-lesson/web-svc.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Service", 4 | "metadata": { 5 | "name": "web", 6 | "labels": { 7 | "name": "web" 8 | } 9 | }, 10 | "spec": { 11 | "selector": { 12 | "name": "web" 13 | }, 14 | "type": "NodePort", 15 | "ports": [ 16 | { 17 | "name": "http", 18 | "port": 80, 19 | "targetPort": 3000, 20 | "protocol": "TCP" 21 | } 22 | ] 23 | } 24 | } -------------------------------------------------------------------------------- /11-lesson/web-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: web 5 | labels: 6 | name: web 7 | #app: todoapp 8 | spec: 9 | selector: 10 | name: web 11 | type: NodePort 12 | ports: 13 | - name: http 14 | port: 80 15 | targetPort: 3000 16 | protocol: TCP -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kubernetes 2 | 3 | Learning Kubernetes from Kubernetes Webinar Series 4 | 5 | ## List of lessons 6 | 7 | [01 - Kubernetes Webinar Series - Getting Started with Kubernetes](./01-lesson/README.md) 8 | 9 | [02 - Kubernetes Webinar Series - Kubernetes Architecture 101](./02-lesson/README.md) 10 | 11 | [03 - Kubernetes Webinar Series - A Closer Look at Pods and Replicas](./03-lesson/README.md) 12 | 13 | [04 - Kubernetes Webinar Series - Understanding Service Discovery in Kubernetes](./04-lesson/README.md) 14 | 15 | [05 - Kubernetes Webinar Series - Scaling and Managing Deployments](./05-lesson/README.md) 16 | 17 | [06 - Kubernetes Webinar Series - Dealing with Storage and Persistence](./06-lesson/README.md) 18 | 19 | [07 - Kubernetes Webinar Series - Controlling user access using Role Based Access Control (RBAC)](./07-lesson/README.md) 20 | 21 | [08 - Kubernetes Webinar Series - Deploying Stateful Application with StatefulSet](./08-lesson/README.md) 22 | 23 | [09 - Kubernetes Webinar Series - Using ConfigMaps & Secrets in Kubernetes](./09-lesson/README.md) 24 | 25 | [10 - Kubernetes Webinar Series - Exploring Daemon Sets and Jobs](./10-lesson/README.md) 26 | 27 | [11 - Kubernetes Webinar Series - Tips and Tricks of Using Kubectl, the Kubernetes CLI](./11-lesson/README.md) 28 | 29 | [12 - Kubernetes Webinar Series - Everything About Ingress](./12-lesson/README.md) 30 | 31 | [13 - Kubernetes Webinar Series - Building CI CD Pipelines with Jenkins and Kubernetes](./13-lesson/README.md) 32 | 33 | [14 - Kubernetes Webinar Series - Helm & Monocular - Discover & deploy your favorite applications on Kubernetes](./14-lesson/README.md) 34 | 35 | [15 - Kubernetes Webinar Series - Kubernetes Webinar Series - Continuous Deployment on Kubernetes with Spinnaker](./15-lesson/README.md) 36 | 37 | [16 - Kubernetes Webinar Series - Kubernetes for CloudFoundry users](./README.md) 38 | 39 | [17 - Kubernetes Webinar Series - DevOps Concepts](./README.md) 40 | 41 | [18 - Kubernetes Webinar Series - Continuous Integration](./README.md) 42 | 43 | [19 - Kubernetes Webinar Series - Continuous Delivery and Deployment](./README.md) 44 | 45 | [20 - Kubernetes Webinar Series - OpenShift Tips and Tricks of Using oc, the OpenShift CLI](./README.md) 46 | 57 | --------------------------------------------------------------------------------