├── audits ├── audit-policy.yaml └── kube-apiserver.yaml ├── calico-config-kubernetes └── README.md ├── certificates └── README.md ├── containerization ├── docker-compose-demo │ └── README.md ├── docker-demo │ ├── Dockerfile │ ├── README.md │ ├── alpine-minirootfs-3.9.2-x86_64.tar │ ├── default.conf │ └── index.html └── scratch-containers │ └── README.md ├── controllers ├── deployments │ ├── mysql_deployment.yaml │ ├── mysql_deployment_recreate.yaml │ └── rollout_rollbacks.cmd ├── replicaset │ ├── mysql_rs1.yaml │ └── mysql_rs2.yaml ├── replicationcontroller │ ├── replicationcontroller.yaml │ └── replicationcontrollerupdate.cmd └── secret.conf ├── dashboard ├── README.md ├── clusterrolebinding.yaml ├── podsecuritypolicy.yaml └── serviceaccount.yaml ├── etcd └── README.md ├── hpa ├── README.md ├── metrics-server │ ├── aggregated-metrics-reader.yaml │ ├── auth-delegator.yaml │ ├── auth-reader.yaml │ ├── metrics-apiservice.yaml │ ├── metrics-server-deployment.yaml │ ├── metrics-server-service.yaml │ └── resource-reader.yaml └── nginx.yaml ├── ingress-nginx ├── README.md ├── _config.yml ├── app1.yaml ├── app1 │ ├── Dockerfile │ ├── default.conf │ └── index.html ├── app2.yaml ├── app2 │ ├── Dockerfile │ ├── default.conf │ └── index.html ├── app3.yaml ├── app3 │ ├── Dockerfile │ ├── default.conf │ └── index.html ├── ingress.yaml ├── service.yaml └── t1.yaml ├── jobs ├── README.md ├── add-number-docker.sh ├── cronjob.yaml ├── job.yaml ├── jobs-completion-count-prime.yaml ├── jobs-parallelism.yaml ├── jobs-simple-prime.yaml ├── parallelism │ ├── Dockerfile │ ├── pod-parallelism.yaml │ ├── redis-pod.yaml │ ├── redis-service.yaml │ ├── rediswq.py │ └── worker.py └── pod.yaml ├── kubernetes-pods ├── README.md ├── initcontainers │ └── initcontainer.yaml ├── inter-pod-communication │ ├── Dockerfile │ ├── app.yaml │ ├── default.conf │ ├── index.php │ └── select.php ├── multicontainerpod │ ├── adapterpod.yaml │ └── sidecar.yaml ├── mysql-secrets │ ├── mysql.yaml │ ├── secret.conf │ └── secretfile_mysql.yaml └── mysqlpod │ ├── db.txt │ └── mysqlpod.yaml ├── logging-efk ├── README.md ├── clusterrole-fluentd.yaml ├── clusterrolebinding-fluentd.yaml ├── counter.yaml ├── elasticsearch_statefulset.yaml ├── elasticsearch_svc.yaml ├── fluentd_daemonset.yaml ├── kibana.yaml ├── pv.yaml ├── pvc.yaml └── sa-fluentd.yaml ├── mariadb-statefulset ├── etcd-cluster.yml ├── mariadb-pv.yml ├── mariadb-pvc.yml ├── mariadb-rs.yml ├── mariadb-ss.yml └── mariadb-ss_updated.yml ├── multi-master-hard-way ├── README.md ├── certs │ ├── admin-csr.json │ ├── ca-config.json │ ├── ca-csr.json │ ├── createnodecert.sh │ ├── kube-controller-manager-csr.json │ ├── kube-proxy-csr.json │ ├── kube-scheduler-csr.json │ ├── kubernetes-csr.json │ ├── node-csr.json │ ├── node.cfg │ ├── node1-csr.json │ └── service-account-csr.json ├── createnodecert.sh ├── encryption-config.yaml ├── genkubeletcert.sh ├── kube-proxy-config.yaml ├── kube-scheduler.yaml ├── kubelet-rbac-clusterrole.yaml ├── kubelet-rbac-clusterrolebinding.yaml └── node.cfg ├── networkpolicies ├── Dockerfile ├── README.md ├── allow-ingress-db.yaml ├── allow-ingress-webserver.yaml ├── app.yaml ├── app2.yaml ├── db.txt ├── default.conf ├── deny-all-egress.yaml ├── deny-ingress-db.yaml ├── deny-ingress.yaml ├── egress-allow-dns.yaml ├── index.php ├── mysqlpod.yaml └── select.php ├── pod-assignment ├── README.md ├── interpodaffinity │ ├── nginx.yaml │ ├── redis-cache-2.yaml │ ├── redis-cache-web.yaml │ └── redis-cache.yaml ├── nodeaffinity │ └── nginx-nodeaffinity.yaml └── nodeselector │ └── nginx-nodeselector.yaml ├── pod-priority-preemption ├── README.md ├── high-priority-nginx.yaml └── priorityclass.yaml ├── probes ├── configmap-defaultconf.yaml ├── default.conf ├── mysqlpod-tcpsocket.yaml ├── mysqlpod.yaml ├── mysqlpod_2.yaml ├── mysqlpod_noprobe.yaml ├── mysqlpod_noprobe2.yaml ├── nginx-bad.yaml ├── nginx.yaml └── nginx2.yaml ├── prometheus ├── basic-prometheus-deployment.yaml └── prometheus.yml ├── resource-metrics └── README.md ├── security-context └── README.md ├── services └── README.md ├── storageclass └── README.md ├── test ├── testfilecollaborator └── usecase-answers.md /audits/audit-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: audit.k8s.io/v1 # This is required. 2 | kind: Policy 3 | # Don't generate audit events for all requests in RequestReceived stage. 4 | omitStages: 5 | - "RequestReceived" 6 | rules: 7 | # Log pod changes at RequestResponse level 8 | - level: RequestResponse 9 | resources: 10 | - group: "" 11 | # Resource "pods" doesn't match requests to any subresource of pods, 12 | # which is consistent with the RBAC policy. 13 | resources: ["pods"] 14 | # Log "pods/log", "pods/status" at Metadata level 15 | - level: Metadata 16 | resources: 17 | - group: "" 18 | resources: ["pods/log", "pods/status"] 19 | 20 | # Don't log requests to a configmap called "controller-leader" 21 | - level: None 22 | resources: 23 | - group: "" 24 | resources: ["configmaps"] 25 | resourceNames: ["controller-leader"] 26 | 27 | # Don't log watch requests by the "system:kube-proxy" on endpoints or services 28 | - level: None 29 | users: ["system:kube-proxy"] 30 | verbs: ["watch"] 31 | resources: 32 | - group: "" # core API group 33 | resources: ["endpoints", "services"] 34 | 35 | # Don't log authenticated requests to certain non-resource URL paths. 36 | - level: None 37 | userGroups: ["system:authenticated"] 38 | nonResourceURLs: 39 | - "/api*" # Wildcard matching. 40 | - "/version" 41 | 42 | # Log the request body of configmap changes in kube-system. 43 | - level: Request 44 | resources: 45 | - group: "" # core API group 46 | resources: ["configmaps"] 47 | # This rule only applies to resources in the "kube-system" namespace. 48 | # The empty string "" can be used to select non-namespaced resources. 49 | namespaces: ["kube-system"] 50 | 51 | # Log configmap and secret changes in all other namespaces at the Metadata level. 52 | - level: Metadata 53 | resources: 54 | - group: "" # core API group 55 | resources: ["secrets", "configmaps"] 56 | 57 | # Log all other resources in core and extensions at the Request level. 58 | - level: Request 59 | resources: 60 | - group: "" # core API group 61 | - group: "extensions" # Version of group should NOT be included. 62 | 63 | # A catch-all rule to log all other requests at the Metadata level. 64 | - level: Metadata 65 | # Long-running requests like watches that fall under this rule will not 66 | # generate an audit event in RequestReceived. 67 | omitStages: 68 | - "RequestReceived" 69 | -------------------------------------------------------------------------------- /audits/kube-apiserver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | scheduler.alpha.kubernetes.io/critical-pod: "" 6 | creationTimestamp: null 7 | labels: 8 | component: kube-apiserver 9 | tier: control-plane 10 | name: kube-apiserver 11 | namespace: kube-system 12 | spec: 13 | containers: 14 | - command: 15 | - kube-apiserver 16 | - --authorization-mode=Node,RBAC 17 | - --advertise-address=10.168.0.2 18 | - --allow-privileged=true 19 | - --client-ca-file=/etc/kubernetes/pki/ca.crt 20 | - --enable-admission-plugins=NodeRestriction 21 | - --enable-bootstrap-token-auth=true 22 | - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt 23 | - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt 24 | - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key 25 | - --etcd-servers=https://127.0.0.1:2379 26 | - --insecure-port=0 27 | - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt 28 | - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key 29 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 30 | - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt 31 | - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key 32 | - --requestheader-allowed-names=front-proxy-client 33 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt 34 | - --requestheader-extra-headers-prefix=X-Remote-Extra- 35 | - --requestheader-group-headers=X-Remote-Group 36 | - --requestheader-username-headers=X-Remote-User 37 | - --secure-port=6443 38 | - --service-account-key-file=/etc/kubernetes/pki/sa.pub 39 | - --service-cluster-ip-range=10.96.0.0/12 40 | - --audit-policy-file=/etc/kubernetes/pki/audit-policy.yaml 41 | - --audit-log-path=/etc/auditlog/audit.log 42 | - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt 43 | - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key 44 | image: k8s.gcr.io/kube-apiserver:v1.12.2 45 | imagePullPolicy: IfNotPresent 46 | livenessProbe: 47 | failureThreshold: 8 48 | httpGet: 49 | host: 10.168.0.2 50 | path: /healthz 51 | port: 6443 52 | scheme: HTTPS 53 | initialDelaySeconds: 15 54 | timeoutSeconds: 15 55 | name: kube-apiserver 56 | resources: 57 | requests: 58 | cpu: 250m 59 | volumeMounts: 60 | - mountPath: /etc/ca-certificates 61 | name: etc-ca-certificates 62 | readOnly: true 63 | - mountPath: /etc/kubernetes/pki 64 | name: k8s-certs 65 | readOnly: true 66 | - mountPath: /etc/auditlog 67 | name: auditlog 68 | - mountPath: /etc/ssl/certs 69 | name: ca-certs 70 | readOnly: true 71 | - mountPath: /usr/share/ca-certificates 72 | name: usr-share-ca-certificates 73 | readOnly: true 74 | - mountPath: /usr/local/share/ca-certificates 75 | name: usr-local-share-ca-certificates 76 | readOnly: true 77 | hostNetwork: true 78 | priorityClassName: system-cluster-critical 79 | volumes: 80 | - hostPath: 81 | path: /etc/kubernetes/pki 82 | type: DirectoryOrCreate 83 | name: k8s-certs 84 | - hostPath: 85 | path: /etc/ssl/certs 86 | type: DirectoryOrCreate 87 | name: ca-certs 88 | - hostPath: 89 | path: /usr/share/ca-certificates 90 | type: DirectoryOrCreate 91 | name: usr-share-ca-certificates 92 | - hostPath: 93 | path: /usr/local/share/ca-certificates 94 | type: DirectoryOrCreate 95 | name: usr-local-share-ca-certificates 96 | - hostPath: 97 | path: /etc/ca-certificates 98 | type: DirectoryOrCreate 99 | name: etc-ca-certificates 100 | - hostPath: 101 | path: /audit_kubeapiserver 102 | type: DirectoryOrCreate 103 | name: auditlog 104 | status: {} 105 | -------------------------------------------------------------------------------- /calico-config-kubernetes/README.md: -------------------------------------------------------------------------------- 1 | When using Calico IPAM, each workload is assigned an address from the selection of configured IP pools. You may want to modify the CIDR of the IP pool of a running cluster for one of the following reasons: 2 | 3 | * To move to a larger CIDR that can accommodate more workloads. 4 | * To move off of a CIDR that was used accidentally. 5 | 6 | 7 | While Calico supports changing IP pools, not all orchestrators do. Be sure to consult the documentation of the orchestrator you are using to ensure it supports changing the workload CIDR. 8 | 9 | For example, in Kubernetes, all three of the following arguments must be equal to, or contain, the Calico IP pool CIDRs: 10 | 11 | kube-apiserver: `--pod-network-cidr` 12 | kube-proxy: `--cluster-cidr` 13 | kube-controller-manager: `--cluster-cidr` 14 | 15 | Removing an IP pool without following this migration procedure can cause network connectivity disruptions in any running workloads with addresses from that IP pool. Namely: 16 | 17 | * If IP-in-IP or VXLAN was enabled on the IP pool, those workloads will no longer have their traffic encapsulated. 18 | * If nat-outgoing was enabled on the IP pool, those workloads will no longer have their traffic NAT’d. 19 | * If using Calico BGP routing, routes to pods will no longer be aggregated. 20 | 21 | In this example, we created a cluster with kubeadm. We wanted the pods to use IPs in the range 10.0.0.0/16 so we set --pod-network-cidr=10.0.0.0/16 when running kubeadm init. However, we installed Calico without setting the default IP pool to match. Running calicoctl get ippool -o wide shows Calico created its default IP pool of 192.168.0.0/16: 22 | 23 | ``` 24 | NAME CIDR NAT IPIPMODE VXLANMODE DISABLED 25 | default-ipv4-ippool 192.168.0.0/16 true Always Never false 26 | 27 | NAMESPACE WORKLOAD NODE NETWORKS INTERFACE 28 | kube-system kube-dns-6f4fd4bdf-8q7zp vagrant 192.168.52.130/32 cali800a63073ed 29 | ``` 30 | 31 | * Add a new IP pool: 32 | 33 | ``` 34 | calicoctl create -f -< pool.yaml 53 | 54 | ``` 55 | 56 | Edit the file pool.yaml - adding `disabled: true` to the `default-ipv4-ippool` IP pool. Apply the config using `calicoctl apply -f pool.yaml` 57 | 58 | * Recreate all existing workload 59 | 60 | ``` 61 | kubectl delete pod -n kube-system kube-dns-6f4fd4bdf-8q7zp 62 | 63 | ``` 64 | 65 | * Delete the old IP pool 66 | 67 | ``` 68 | calicoctl delete pool default-ipv4-ippool 69 | 70 | ``` 71 | -------------------------------------------------------------------------------- /containerization/docker-compose-demo/README.md: -------------------------------------------------------------------------------- 1 | # docker-compose-demo 2 | 3 | ~~~~ 4 | version: '2.0' 5 | services: 6 | db: 7 | image: mysql:5.7 8 | volumes: 9 | - db_data:/var/lib/mysql 10 | restart: always 11 | environment: 12 | MYSQL_ROOT_PASSWORD: somewordpress 13 | MYSQL_DATABASE: wordpress 14 | MYSQL_USER: wordpress 15 | MYSQL_PASSWORD: wordpress 16 | wordpress: 17 | depends_on: 18 | - db 19 | image: wordpress:latest 20 | ports: 21 | - "8000:80" 22 | restart: always 23 | environment: 24 | WORDPRESS_DB_HOST: db:3306 25 | WORDPRESS_DB_USER: wordpress 26 | WORDPRESS_DB_PASSWORD: wordpress 27 | volumes: 28 | db_data: 29 | ~~~~ 30 | -------------------------------------------------------------------------------- /containerization/docker-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine-baseimage 2 | Maintainer "Harshal Sharma" 3 | RUN apk update && apk upgrade 4 | RUN apk add openrc --no-cache 5 | RUN apk add nginx && mkdir -p /usr/share/nginx/html && mkdir -p /run/nginx 6 | COPY default.conf /etc/nginx/conf.d/default.conf 7 | COPY index.html /usr/share/nginx/html/index.html 8 | EXPOSE 80 9 | CMD ["nginx", "-g", "daemon off;"] 10 | -------------------------------------------------------------------------------- /containerization/docker-demo/README.md: -------------------------------------------------------------------------------- 1 | # Docker Demo 2 | 3 | ## Build Base image 4 | 5 | 1. Download the repository 6 | 7 | 2. alpine-minirootfs-3.9.2-x86_64.tar - This is the rootfs for alpine 8 | 9 | 3. Create the base image 10 | 11 | docker import alpine-minirootfs-3.9.2-x86_64.tar alpine-baseimage 12 | 13 | 4. docker images 14 | 15 | REPOSITORY TAG IMAGE ID CREATED SIZE 16 | 17 | alpine-baseimage latest 0c0b4476ca3e 17 seconds ago 5.53MB 18 | 19 | 5. Test the image 20 | 21 | docker run -d -it alpine-baseimage sh 22 | 23 | docker ps 24 | 25 | docker exec -it {{ container-id }} sh 26 | 27 | ## Utilize Base Image to create a new nginx image 28 | 29 | 1. Create a dockerfile using alpine-baseimage (attached in repository) 30 | 31 | 2. Install nginx and upload the configurations from default.conf and index.html file 32 | 33 | 3. Build the image 34 | 35 | docker build . -t nginx-alpine 36 | 37 | 4. Test the image 38 | 39 | docker run --name nginx -d -p 8080:80 nginx-alpine 40 | 41 | curl localhost:8080 -- THis should give the output of the index.html file 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /containerization/docker-demo/alpine-minirootfs-3.9.2-x86_64.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hub-kubernetes/kubernetes-CKA/b2e4d2fb317678eb6557465b54a266abce31afb8/containerization/docker-demo/alpine-minirootfs-3.9.2-x86_64.tar -------------------------------------------------------------------------------- /containerization/docker-demo/default.conf: -------------------------------------------------------------------------------- 1 | 2 | server { 3 | listen 80; 4 | location / { 5 | root /usr/share/nginx/html; 6 | index index.html index.htm; 7 | try_files $uri $uri/ /index.html =404; 8 | } 9 | } 10 | 11 | -------------------------------------------------------------------------------- /containerization/docker-demo/index.html: -------------------------------------------------------------------------------- 1 | This is a test 2 | -------------------------------------------------------------------------------- /containerization/scratch-containers/README.md: -------------------------------------------------------------------------------- 1 | # Experimenting with containers from scratch - debian / ubuntu 2 | 3 | ## What is a minimal container? 4 | 5 | > A minimal container contains the least amount of packages. A rootfs can be treated with the most basic container 6 | 7 | ## What is a rootfs 8 | 9 | > A root file system contains everything needed to support a full Linux System. rootfs is a type of root file system. 10 | rootfs is considered to be a minimal root filesystem which is an instance of ramfs. 11 | Only certian libraries / binaries are loaded with a minimal rootfs. 12 | It has directories for proc, sys, dev - however a rootfs is not mounted by default. 13 | To use rootfs - you will have to mount /dev /sys to your local /dev and /sys 14 | A simple way of extracting rootfs from initramfs is as below - 15 | ~~~ 16 | cd `mktemp -d` && gzip -dc /boot/initrd.img-`uname -r` | cpio -ivd 17 | ~~~ 18 | ## Install debootstrap to create rootfs 19 | 20 | apt-get install debootstrap 21 | 22 | ## Use debootstrap to create rootfs 23 | 24 | 1. Debian rootfs 25 | 26 | mkdir rootfs_debian 27 | 28 | debootstrap stable rootfs_debian http://deb.debian.org/debian/ 29 | 30 | 31 | 2. Ubuntu rootfs 32 | 33 | mkdir rootfs_ubuntu 34 | 35 | debootstrap --arch=amd64 xenial rootfs_ubuntu http://archive.ubuntu.com/ubuntu/ 36 | 37 | ## Change root using chroot 38 | 39 | 1. Debian 40 | 41 | chroot rootfs_debian /bin/bash 42 | 43 | mount -t proc proc /proc 44 | 45 | 2. Ubuntu 46 | 47 | chroot rootfs_ubuntu /bin/bash 48 | 49 | mount -t proc proc /proc 50 | 51 | 3. Unmount proc after playing around 52 | 53 | umount /root/rootfs_ubuntu/proc 54 | 55 | umount /root/rootfs_debian/proc 56 | 57 | 58 | ## What is a Linux namespace 59 | 60 | Linux namespaces allow isolation of global system resources between multiple processes. 61 | The isolation can be on the levels of PID, mounts, IPC, network, user, UTS etc. 62 | This isolation is provided by unshare command 63 | 64 | Unshare provides the below flags 65 | 66 | -i Unshare the IPC namespace. 67 | 68 | -m Unshare the mount namespace. 69 | 70 | -n Unshare the network namespace. 71 | 72 | -p Unshare the pid namespace. 73 | 74 | -u Unshare the UTS namespace. 75 | 76 | -U Unshare the user namespace. 77 | 78 | ## Using unshare to create namespaces with chroot 79 | 80 | 1. PID namespace 81 | 82 | unshare -p -f chroot ./rootfs_ubuntu /bin/bash 83 | 84 | mount -t proc proc /proc 85 | 86 | ps -ef 87 | UID PID PPID C STIME TTY TIME CMD 88 | root 1 0 0 18:44 ? 00:00:00 /bin/bash 89 | root 13 1 0 18:44 ? 00:00:00 ps -ef 90 | 91 | Run some additional commands inside the container and on host machine - 92 | 93 | ipcs -a -- Same values on both container and local machine 94 | 95 | ip addr -- Same values on both container and local machine 96 | 97 | 2. Prove why only PID is isolated by comparing namespace details 98 | 99 | a. Open a new terminal 100 | 101 | b. ps -ef | grep "/bin/bash" and get PID of the bash spawned by unshare command 102 | 103 | c. ls -l /proc/{{PID}}/ns 104 | 105 | d. Take any existing process and get its PID 106 | 107 | e. ls -l /proc/{{PID}}/ns 108 | 109 | f. Compare values for all namespaces 110 | 111 | 3. A more complex unshare 112 | 113 | unshare -p -i -n -u -f chroot ./rootfs_ubuntu /bin/bash 114 | 115 | Verify - 116 | 117 | ipcs -a 118 | 119 | ip addr 120 | 121 | 122 | ## Using cgroups to assign resources 123 | 124 | 1. /sys/fs/cgroup/ - provides multiple resources that can be attached to your containers 125 | 126 | 2. mkdir /sys/fs/cgroup/memory/memory_ubuntu 127 | 128 | 3. Automatic assignment of some files - 129 | 130 | ls -ltra /sys/fs/cgroup/memory/memory_ubuntu 131 | 132 | 4. Add 1 MB memory to memory_ubuntu 133 | 134 | vi memory.limit_in_bytes 135 | 136 | Add the value - 1000000 137 | 138 | This is nearly 1 MB 139 | 140 | 5. On a separate terminal - unshare -p -i -n -u -f chroot ./rootfs_ubuntu /bin/bash 141 | 142 | 6. ps -ef | grep "/bin/bash" -- get the PID of the above container 143 | 144 | 7. echo {{PID}} > tasks 145 | 146 | 8. tasks file assigns this cgroup to the container 147 | 148 | 9. Perform some actions till all actions are getting killed due to memory issue 149 | 150 | 10. exit the container - notice that tasks and memory files are refreshed to an older state 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | -------------------------------------------------------------------------------- /controllers/deployments/mysql_deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mysqldeployment 5 | labels: 6 | app: mysql 7 | spec: 8 | replicas: 3 9 | revisionHistoryLimit: 5 10 | selector: 11 | matchLabels: 12 | app: mysql 13 | strategy: 14 | type: RollingUpdate 15 | rollingUpdate: 16 | maxUnavailable: 1 17 | maxSurge: 2 18 | template: 19 | metadata: 20 | name: mysql_pod 21 | labels: 22 | app: mysql 23 | spec: 24 | containers: 25 | - image: mysql:5.7 26 | name: mysql 27 | env: 28 | - name: MYSQL_ROOT_PASSWORD 29 | valueFrom: 30 | secretKeyRef: 31 | name: mysql-pass 32 | key: password 33 | ports: 34 | - containerPort: 3306 35 | name: mysql 36 | resources: 37 | limits: 38 | cpu: "1" 39 | requests: 40 | cpu: "0.5" 41 | volumeMounts: 42 | - name: mysql-persistent-storage 43 | mountPath: /var/lib/mysql 44 | volumes: 45 | - name: mysql-persistent-storage 46 | emptyDir: {} 47 | -------------------------------------------------------------------------------- /controllers/deployments/mysql_deployment_recreate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mysqldeployment 5 | labels: 6 | app: mysql 7 | spec: 8 | replicas: 3 9 | revisionHistoryLimit: 5 10 | selector: 11 | matchLabels: 12 | app: mysql 13 | strategy: 14 | type: Recreate 15 | template: 16 | metadata: 17 | name: mysql_pod 18 | labels: 19 | app: mysql 20 | spec: 21 | containers: 22 | - image: mysql:5.7 23 | name: mysql 24 | env: 25 | - name: MYSQL_ROOT_PASSWORD 26 | valueFrom: 27 | secretKeyRef: 28 | name: mysql-pass 29 | key: password 30 | ports: 31 | - containerPort: 3306 32 | name: mysql 33 | resources: 34 | limits: 35 | cpu: "1" 36 | requests: 37 | cpu: "0.5" 38 | volumeMounts: 39 | - name: mysql-persistent-storage 40 | mountPath: /var/lib/mysql 41 | volumes: 42 | - name: mysql-persistent-storage 43 | emptyDir: {} 44 | -------------------------------------------------------------------------------- /controllers/deployments/rollout_rollbacks.cmd: -------------------------------------------------------------------------------- 1 | kubectl rollout status deployment/mysqldeployment 2 | kubectl rollout history deployment/mysqldeployment 3 | kubectl rollout history deployment/mysqldeployment --revision=2 4 | kubectl rollout undo deployment/mysqldeployment --to-revision=1 5 | -------------------------------------------------------------------------------- /controllers/replicaset/mysql_rs1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: mysql_1 7 | spec: 8 | containers: 9 | - image: mysql:5.6 10 | name: mysql 11 | env: 12 | - name: MYSQL_ROOT_PASSWORD 13 | valueFrom: 14 | secretKeyRef: 15 | name: mysql-pass 16 | key: password 17 | ports: 18 | - containerPort: 3306 19 | name: mysql 20 | volumeMounts: 21 | - name: mysql-persistent-storage 22 | mountPath: /var/lib/mysql 23 | volumes: 24 | - name: mysql-persistent-storage 25 | hostPath: 26 | path: /data 27 | type: DirectoryOrCreate 28 | -------------------------------------------------------------------------------- /controllers/replicaset/mysql_rs2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: mysqlrs 5 | labels: 6 | app: mysql 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchExpressions: 11 | - {key: app, operator: In, values: [mysql_1, mysql_2]} 12 | template: 13 | metadata: 14 | name: mysql 15 | labels: 16 | app: mysql_2 17 | spec: 18 | containers: 19 | - image: mysql:5.6 20 | name: mysql 21 | env: 22 | - name: MYSQL_ROOT_PASSWORD 23 | valueFrom: 24 | secretKeyRef: 25 | name: mysql-pass 26 | key: password 27 | ports: 28 | - containerPort: 3306 29 | name: mysql 30 | volumeMounts: 31 | - name: mysql-persistent-storage 32 | mountPath: /var/lib/mysql 33 | volumes: 34 | - name: mysql-persistent-storage 35 | emptyDir: {} 36 | -------------------------------------------------------------------------------- /controllers/replicationcontroller/replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: my-nginx 5 | spec: 6 | replicas: 5 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx:1.7.9 15 | ports: 16 | - containerPort: 80 17 | 18 | -------------------------------------------------------------------------------- /controllers/replicationcontroller/replicationcontrollerupdate.cmd: -------------------------------------------------------------------------------- 1 | kubectl rolling-update my-nginx --image=nginx:1.9.1 2 | -------------------------------------------------------------------------------- /controllers/secret.conf: -------------------------------------------------------------------------------- 1 | kubectl create secret generic mysql-pass --from-literal=password=root 2 | -------------------------------------------------------------------------------- /dashboard/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Dashboard installation steps 2 | 3 | * Create self signed certs 4 | ~~~ 5 | mkdir certs 6 | cd certs 7 | openssl genrsa -out dashboard.key 2048 8 | openssl rsa -in dashboard.key -out dashboard.key 9 | openssl req -sha256 -new -key dashboard.key -out dashboard.csr -subj '/CN=localhost' 10 | openssl x509 -req -sha256 -days 365 -in dashboard.csr -signkey dashboard.key -out dashboard.crt 11 | 12 | ~~~ 13 | 14 | * Add certs as secrets to be consumed by dashboard 15 | 16 | ` kubectl -n kube-system create secret generic kubernetes-dashboard-certs --from-file=/PATH_TO_CERTS` 17 | 18 | * Create the dashboard deployment 19 | 20 | ` kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml ` 21 | 22 | * Create a pod security policy and assign the psp to dashboard service account 23 | 24 | ` kubectl create -f podsecuritypolicy.yaml` 25 | 26 | * Create a role for dashboard service account to use pod security policy 27 | 28 | ~~~ 29 | kubectl -n kube-system create role psp:dashboard --verb=use --resource=podsecuritypolicy --resource-name=dashboard 30 | kubectl -n kube-system create rolebinding kubernetes-dashboard-policy --role=psp:dashboard --serviceaccount=kube-system:kubernetes-dashboard 31 | kubectl --as=system:serviceaccount:kube-system:kubernetes-dashboard -n kube-system auth can-i use podsecuritypolicy/dashboard 32 | 33 | ~~~ 34 | 35 | * Create Admin user for dashboard and a corresponding clusterrolebinding 36 | 37 | ` kubectl create -f serviceaccount.yaml -f clusterrolebinding.yaml ` 38 | 39 | * Edit the dashboard service and expose it as either a loadbalancer or a nodeport. 40 | 41 | * Get the token for admin user from the secrets 42 | 43 | ` kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}') ` 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /dashboard/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: admin-user 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: admin-user 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /dashboard/podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | name: dashboard 5 | spec: 6 | privileged: false 7 | seLinux: 8 | rule: RunAsAny 9 | supplementalGroups: 10 | rule: RunAsAny 11 | runAsUser: 12 | rule: RunAsAny 13 | fsGroup: 14 | rule: RunAsAny 15 | volumes: 16 | - '*' 17 | -------------------------------------------------------------------------------- /dashboard/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /etcd/README.md: -------------------------------------------------------------------------------- 1 | # Securing ETCD 2 | 3 | ## Generating certificates 4 | 5 | The etcd datastore has the concept of users that are linked to roles, where each role has a defined set of access permissions to the data stored in etcd. This tutorial walks you through the process of generating the Certificate Authority (CA), Certificates and Keys that can be used to authenticate a specific user with etcd. There are many different tools that can be used to generate these files. This tutorial tries to layout the unique or specific details that are needed for each of the different certificates but uses the hack/tls-setup tool from the etcd repo, to make certificate generation easy. 6 | 7 | 8 | Generating the certificates creates: 9 | 10 | * CA 11 | * a certificate and key pair for 3 etcd servers 12 | * a certificate and key pair for etcd proxies 13 | * the certificate and key pairs for each user/component 14 | 15 | Edit the below CSR - 16 | 17 | ``` 18 | { 19 | "CN": "", 20 | "hosts": [ 21 | "localhost" 22 | ], 23 | "key": { 24 | "algo": "ecdsa", 25 | "size": 384 26 | }, 27 | "names": [ 28 | { 29 | "O": "autogenerated", 30 | "OU": "etcd cluster", 31 | "L": "the internet" 32 | } 33 | ] 34 | } 35 | 36 | ``` 37 | 38 | Execute the below to generate certificates - 39 | 40 | 41 | ``` 42 | 43 | $(CFSSL) gencert \ 44 | -ca certs/ca.pem \ 45 | -ca-key certs/ca-key.pem \ 46 | -config config/ca-config.json \ 47 | config/req-.json | $(JSON) -bare certs/ 48 | 49 | ``` 50 | 51 | ## Create ETCD user & roles 52 | 53 | There is one special user, root, and one special role, root. 54 | 55 | * User root 56 | The root user, which has full access to etcd, must be created before activating authentication. The idea behind the root user is for administrative purposes: managing roles and ordinary users. The root user must have the root role and is allowed to change anything inside etcd. 57 | 58 | * Role root 59 | The role root may be granted to any user, in addition to the root user. A user with the root role has both global read-write access and permission to update the cluster's authentication configuration. Furthermore, the root role grants privileges for general cluster maintenance, including modifying cluster membership, defragmenting the store, and taking snapshots. 60 | 61 | Create user - 62 | 63 | ``` 64 | etcdctl user add myusername 65 | 66 | ``` 67 | 68 | Grant Roles 69 | 70 | ``` 71 | $ etcdctl user grant-role myusername foo 72 | $ etcdctl user revoke-role myusername bar 73 | 74 | ``` 75 | 76 | ## ETCD segmentation 77 | 78 | 79 | When using etcd with RBAC, all components that access etcd must be configured with the proper certificates. This document describes the users and roles needed to segment etcd so that Kubernetes and Calico can only read and write within their respected subtrees/prefixes. To configure more compartmentalized configurations of the Calico components. 80 | 81 | 82 | The following components need certificates with a Common Name that matches an etcd user that has been given appropriate roles allowing access to the key prefixes or paths listed below. 83 | 84 | **kube-apiserver** 85 | 86 | Read and write access to /registry/. The etcd user needs to be given the root role to perform compaction when using the etcd v3 API (this also means that Kubernetes will have full read and write access to v3 data). 87 | 88 | ``` 89 | --etcd-cafile= 91 | --etcd-keyfile= 92 | 93 | ``` 94 | 95 | **Calico** 96 | 97 | Read and write access to /calico/. All certificate/key pairs that are referenced below are assumed to have been created for the specific component with the information above. 98 | 99 | ``` 100 | etcd_ca: "/calico-secrets/etcd-ca" 101 | etcd_cert: "/calico-secrets/etcd-cert" 102 | etcd_key: "/calico-secrets/etcd-key" 103 | 104 | ``` 105 | -------------------------------------------------------------------------------- /hpa/README.md: -------------------------------------------------------------------------------- 1 | # Horizontal Pod autoscaler 2 | 3 | ## Install Metrics server - 4 | 5 | > Clone this repository and install metrics server. Please do note that this setup is good for dev/qa environment. A lot of considerations must be put while installing metrics server in production environment. The official metrics-server repository is kept at https://github.com/kubernetes-incubator/metrics-server and we are using the same repo with few changes. 6 | 7 | > The file `metrics-server-deployment.yaml` is edited with the below statements - 8 | 9 | ~~~ 10 | command: 11 | - /metrics-server 12 | - --kubelet-insecure-tls 13 | - --kubelet-preferred-address-types=InternalIP 14 | 15 | ~~~ 16 | 17 | > The above command has the flag `--kubelet-insecure-tls` set which ignores strict check of kubelet certificates. In production you will definitely have a single CA private key that will be used to sign all kubelets. Please check the official metrics server documentation on other flags. 18 | 19 | > Install the metrics server 20 | 21 | ` cd metrics-server` 22 | 23 | ` kubectl create -f . ` 24 | 25 | 26 | ## Create nginx deployment 27 | 28 | > It is mandatory to set requests on cpu utilization as HPA requires CPU metrics. 29 | 30 | ` kubectl create -f nginx.yaml` 31 | 32 | ## Create HPA resource 33 | 34 | ` kubectl autoscale deploy nginx --min=3 --max=5 --cpu-percent=40` 35 | 36 | > This might take a minute or two to show up - 37 | 38 | ~~~ 39 | kubectl get hpa 40 | NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE 41 | nginx Deployment/nginx 0%/40% 3 5 3 55s 42 | ~~~ 43 | 44 | ## Test the HPA using apache bench 45 | 46 | ` apt-get install apache2-utils` 47 | 48 | ` kubectl expose deploy nginx --port=80 --type=ClusterIP` 49 | 50 | > Get the service IP address using ` kubectl get svc` 51 | 52 | ` ab -n 500000 -c 1000 http://10.97.161.152/` 53 | -------------------------------------------------------------------------------- /hpa/metrics-server/aggregated-metrics-reader.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: system:aggregated-metrics-reader 5 | labels: 6 | rbac.authorization.k8s.io/aggregate-to-view: "true" 7 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 8 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 9 | rules: 10 | - apiGroups: ["metrics.k8s.io"] 11 | resources: ["pods"] 12 | verbs: ["get", "list", "watch"] 13 | -------------------------------------------------------------------------------- /hpa/metrics-server/auth-delegator.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: metrics-server:system:auth-delegator 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: system:auth-delegator 10 | subjects: 11 | - kind: ServiceAccount 12 | name: metrics-server 13 | namespace: kube-system 14 | -------------------------------------------------------------------------------- /hpa/metrics-server/auth-reader.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: RoleBinding 4 | metadata: 5 | name: metrics-server-auth-reader 6 | namespace: kube-system 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: Role 10 | name: extension-apiserver-authentication-reader 11 | subjects: 12 | - kind: ServiceAccount 13 | name: metrics-server 14 | namespace: kube-system 15 | -------------------------------------------------------------------------------- /hpa/metrics-server/metrics-apiservice.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiregistration.k8s.io/v1beta1 3 | kind: APIService 4 | metadata: 5 | name: v1beta1.metrics.k8s.io 6 | spec: 7 | service: 8 | name: metrics-server 9 | namespace: kube-system 10 | group: metrics.k8s.io 11 | version: v1beta1 12 | insecureSkipTLSVerify: true 13 | groupPriorityMinimum: 100 14 | versionPriority: 100 15 | -------------------------------------------------------------------------------- /hpa/metrics-server/metrics-server-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: metrics-server 6 | namespace: kube-system 7 | --- 8 | apiVersion: extensions/v1beta1 9 | kind: Deployment 10 | metadata: 11 | name: metrics-server 12 | namespace: kube-system 13 | labels: 14 | k8s-app: metrics-server 15 | spec: 16 | selector: 17 | matchLabels: 18 | k8s-app: metrics-server 19 | template: 20 | metadata: 21 | name: metrics-server 22 | labels: 23 | k8s-app: metrics-server 24 | spec: 25 | serviceAccountName: metrics-server 26 | volumes: 27 | # mount in tmp so we can safely use from-scratch images and/or read-only containers 28 | - name: tmp-dir 29 | emptyDir: {} 30 | containers: 31 | - name: metrics-server 32 | image: k8s.gcr.io/metrics-server-amd64:v0.3.1 33 | imagePullPolicy: Always 34 | command: 35 | - /metrics-server 36 | - --kubelet-insecure-tls 37 | - --kubelet-preferred-address-types=InternalIP 38 | 39 | volumeMounts: 40 | - name: tmp-dir 41 | mountPath: /tmp 42 | 43 | -------------------------------------------------------------------------------- /hpa/metrics-server/metrics-server-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: metrics-server 6 | namespace: kube-system 7 | labels: 8 | kubernetes.io/name: "Metrics-server" 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | selector: 12 | k8s-app: metrics-server 13 | ports: 14 | - port: 443 15 | protocol: TCP 16 | targetPort: 443 17 | -------------------------------------------------------------------------------- /hpa/metrics-server/resource-reader.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: system:metrics-server 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | - nodes 12 | - nodes/stats 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | --- 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | kind: ClusterRoleBinding 20 | metadata: 21 | name: system:metrics-server 22 | roleRef: 23 | apiGroup: rbac.authorization.k8s.io 24 | kind: ClusterRole 25 | name: system:metrics-server 26 | subjects: 27 | - kind: ServiceAccount 28 | name: metrics-server 29 | namespace: kube-system 30 | -------------------------------------------------------------------------------- /hpa/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | name: nginxpod 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: nginx:latest 21 | resources: 22 | requests: 23 | cpu: 100m 24 | -------------------------------------------------------------------------------- /ingress-nginx/README.md: -------------------------------------------------------------------------------- 1 | # kubernetes-ingress 2 | ## Demo regarding kubernetes Ingress with Nginx ingress controller 3 | 4 | This demo shows how to install Nginx ingress controller and managing services using ingress through external DNS 5 | There are multiple ways to install the controller, we will focus on deploying the controller as a Deployment with 6 | Nodeport Service type 7 | 8 | ## Pre-requisite - 9 | 10 | 1. Working kubernetes cluster 11 | 2. git installed on the machine 12 | 3. Access to atleast one DNS that can be configured (optional) 13 | 14 | ## Steps - 15 | 16 | ## 1. Install ingress controller on your Kubernetes cluster 17 | 18 | a. git clone https://github.com/nginxinc/kubernetes-ingress.git (official kubernetes nginx github repository) 19 | 20 | b. cd kubernetes-ingress/deployments 21 | 22 | c. Create the namespace and service account - kubectl apply -f common/ns-and-sa.yaml 23 | 24 | d. Create the secret for TLS certificate - kubectl apply -f common/default-server-secret.yaml 25 | 26 | e. Create the configmap for Nginx controller configuration - kubectl apply -f common/nginx-config.yaml 27 | 28 | f. Configure RBAC by creating a cluster role - kubectl apply -f rbac/rbac.yaml 29 | 30 | g. Deploy the ingress controller as a deployment - kubectl apply -f deployment/nginx-ingress.yaml 31 | 32 | h. You can also use daemonset to deploy the controller, we are using deployment as the controller to deploy ingress 33 | 34 | i. Verify the ingress controller is running - kubectl get pods --namespace=nginx-ingress 35 | 36 | j. Expose the ingress controller by creating a service of type NodePort - kubectl create -f service/nodeport.yaml 37 | 38 | k. In case you are using managed Kubernetes Instances using GKE / AWS / AZURE you can create the service type as 39 | Loadbalancer 40 | 41 | l. Verify the service - kubectl get svc -n=nginx-ingress 42 | 43 | nginx-ingress NodePort 10.104.170.46 80:30982/TCP,443:31542/TCP 2m2s 44 | 45 | m. Note the HTTP and HTTPS port (30982 and 31542). 46 | 47 | n. Get the internal IP address of the ingress controller - kubectl get pods --namespace=nginx-ingress -o wide 48 | 49 | nginx-ingress-755df5c4cc-2pgbv 1/1 Running 0 5m21s 192.168.1.77 knode1 50 | 51 | o. Note the internal IP address - 192.168.1.77 52 | 53 | Now we have successfully installed NGINX Ingress controller. Lets now see the demo where we will route 3 services 54 | (nginx based service) using ingress. 55 | 56 | ## 2. Deploy the dummy application 57 | 58 | a. Clone this repository 59 | 60 | b. You will find 3 directories - app1, app2, and app3. These 3 directories will have a Dockerfile, index.html and a 61 | default.conf file. 62 | 63 | c. The default.conf file is edited to provide the locations /app1, /app2, and /app3 respectively. Similarly index.html 64 | file is modified to have different texts simulating 3 different applications deployed on nginx 65 | 66 | d. The Dockerfile is using nginx as the base image and the files default.conf and index.html are overridden. 67 | 68 | e. Lets build these images 69 | 70 | 1. docker login -- to login to your dockerhub repository. 71 | 72 | 2. cd app1 ; docker build . -t {YOUR_DOCKERLOGIN_ID}/nginx-app1 ; docker push {YOUR_DOCKERLOGIN_ID}/nginx-app1 73 | 74 | 3. cd app2 ; docker build . -t {YOUR_DOCKERLOGIN_ID}/nginx-app2 ; docker push {YOUR_DOCKERLOGIN_ID}/nginx-app2 75 | 76 | 4. cd app3 ; docker build . -t {YOUR_DOCKERLOGIN_ID}/nginx-app3 ; docker push {YOUR_DOCKERLOGIN_ID}/nginx-app3 77 | 78 | 5. There are 3 files present - app1.yaml, app2.yaml and app3.yaml which contains the deployment definition of all the 79 | 3 images 80 | 81 | 6. kubectl create -f app1.yaml -f app2.yaml -f app3.yaml 82 | 83 | 7. There is 1 file - service.yaml which contails the service definition of all the 3 deployments. 84 | 85 | 8. kubectl create -f service.yaml 86 | 87 | 9. Test the deployment to see everything is working - 88 | 89 | kubectl get svc 90 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 91 | app1 ClusterIP 10.102.238.68 80/TCP 5s 92 | app2 ClusterIP 10.97.89.152 80/TCP 5s 93 | app3 ClusterIP 10.110.243.149 80/TCP 5s 94 | 95 | 10. Curl the service IP address to get the below output 96 | 97 | curl 10.102.238.68 98 | You have reached app1 99 | 100 | curl 10.97.89.152 101 | You have reached app2 102 | 103 | curl 10.110.243.149 104 | You have reached app3 105 | 106 | Now that we have successfully deployed our application, its time to create an ingress resource that will route traffic to all 107 | the 3 services 108 | 109 | ## 3. Deploy ingress resource 110 | 111 | a. There is a file ingress.yaml that contains ingress definition to route the traffic 112 | 113 | b. The ingress file contails the host as : kubernetesfederatedcluster.com, If you have your own domain, make the relevant 114 | changes here. If you dont have your own domain, you can change this to any value like abc.com or example.com. 115 | 116 | b. kubectl create -f ingress.yaml 117 | 118 | d. Verify the ingress 119 | 120 | kubectl get ingress 121 | NAME HOSTS ADDRESS PORTS AGE 122 | app-ingress kubernetesfederatedcluster.com 80 4m11s 123 | 124 | We have now successfully deployed ingress controller. We can now test the ingress in 2 Ways 125 | 126 | ## 4. Testing ingress controller - 127 | 128 | ### a. You dont have your own domain name - 129 | 130 | 1. Go up to the step 1.m and 1.o where we got the IP address of the ingress controller and the Port 131 | 132 | 2. export IC_IP=192.168.1.77 (Step 1.o) 133 | 134 | 3. export IC_HTTP_PORT=80 (Step 1.m - Please not not to use the NodePort as we are using the IP address of 135 | the ingress controller) 136 | 137 | 4. You can specify the port as 443 if your application uses SSL. We are not using SSL at the moment 138 | 139 | 5. Run the below command that hits /app1- 140 | 141 | curl --resolve kubernetesfederatedcluster.com:$IC_HTTP_PORT:$IC_IP http://kubernetesfederatedcluster.com:$IC_HTTP_PORT/app1 --insecure 142 | You have reached app1 143 | 144 | 6. Similarly the command to hit app2 and app3 are as below 145 | 146 | curl --resolve kubernetesfederatedcluster.com:$IC_HTTP_PORT:$IC_IP http://kubernetesfederatedcluster.com:$IC_HTTP_PORT/app2 --insecure 147 | You have reached app2 148 | 149 | curl --resolve kubernetesfederatedcluster.com:$IC_HTTP_PORT:$IC_IP http://kubernetesfederatedcluster.com:$IC_HTTP_PORT/app3 --insecure 150 | You have reached app3 151 | 152 | 7. Make sure you change the host kubernetesfederatedcluster.com to the appropriate host that is defined in your 153 | ingress file. 154 | 155 | ### b. You own your own domain name - 156 | 157 | 1. If you own your own domain name, there are certain configuration you need to do before hand 158 | 159 | 2. Reserve a static external IP address with your cloud provider 160 | 161 | 3. Assign this static external IP address to any Virtual machine that is a part of your kubernetes cluster 162 | 163 | 4. if you are working with a cloud provider like GCP/AWS/AZURE/ 164 | 165 | a. Create a DNS on your cloud provider 166 | 167 | b. Associate the DNS with your owned DNS name - in my case - kubernetesfederatedcluster.com 168 | 169 | c. Update your Domain setting from your provider to use the nameservers provided by your cloud provider. 170 | Remove any custom DNS setting that you might have by your domain provider. 171 | 172 | d. Create a new A record for kubernetesfederatedcluster.com and associate the external static Ip that you reserved 173 | with the A record 174 | 175 | e. Create a new CNAME with DNS as www.kubernetesfederatedcluster.com. and associate it with the canonical name as 176 | kubernetesfederatedcluster.com 177 | 178 | f. Save these changes 179 | 180 | g. Primary goal is to make sure that your DNS resolves to the external IP that you have reserved and assigned to 181 | the VM within your cluster. 182 | 183 | h. Get the nginx controller port detail from above step 1.m - this is 30982 in our case. 184 | 185 | i. The DNS changes might take 10-15 mins to reflect. Make sure you ping your domain using - ping 186 | kubernetesfederatedcluster.com and you should get the correct IP address 187 | 188 | j. Access your ingress as below - 189 | 190 | www.kubernetesfederatedcluster.com:30982/app1 191 | 192 | www.kubernetesfederatedcluster.com:30982/app2 193 | 194 | www.kubernetesfederatedcluster.com:30982/app3 195 | 196 | 197 | 198 | -------------------------------------------------------------------------------- /ingress-nginx/_config.yml: -------------------------------------------------------------------------------- 1 | plugins: 2 | - jekyll-mentions 3 | - jemoji 4 | - jekyll-redirect-from 5 | - jekyll-sitemap 6 | - jekyll-feed 7 | -------------------------------------------------------------------------------- /ingress-nginx/app1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-app1 5 | labels: 6 | app: app1 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: app1 12 | template: 13 | metadata: 14 | name: nginx-app1 15 | labels: 16 | app: app1 17 | spec: 18 | containers: 19 | - name: app1 20 | image: harshal0812/nginx-app1 21 | -------------------------------------------------------------------------------- /ingress-nginx/app1/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:latest 2 | COPY index.html /usr/share/nginx/html/index.html 3 | COPY default.conf /etc/nginx/conf.d/default.conf 4 | -------------------------------------------------------------------------------- /ingress-nginx/app1/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | #charset koi8-r; 6 | #access_log /var/log/nginx/host.access.log main; 7 | 8 | location / { 9 | root /usr/share/nginx/html/; 10 | index index.html index.htm; 11 | } 12 | location /app1 { 13 | proxy_set_header X-Real-IP $remote_addr; 14 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 15 | proxy_set_header X-NginX-Proxy true; 16 | proxy_pass http://localhost/; 17 | proxy_ssl_session_reuse off; 18 | proxy_set_header Host $http_host; 19 | proxy_cache_bypass $http_upgrade; 20 | proxy_redirect off; 21 | } 22 | 23 | #error_page 404 /404.html; 24 | 25 | # redirect server error pages to the static page /50x.html 26 | # 27 | location /301.html { 28 | root /usr/share/nginx/html/; 29 | } 30 | error_page 500 502 503 504 /50x.html; 31 | location = /50x.html { 32 | root /usr/share/nginx/html; 33 | } 34 | 35 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80 36 | # 37 | #location ~ \.php$ { 38 | # proxy_pass http://127.0.0.1; 39 | #} 40 | 41 | # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 42 | # 43 | #location ~ \.php$ { 44 | # root html; 45 | # fastcgi_pass 127.0.0.1:9000; 46 | # fastcgi_index index.php; 47 | # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; 48 | # include fastcgi_params; 49 | #} 50 | 51 | # deny access to .htaccess files, if Apache's document root 52 | # concurs with nginx's one 53 | # 54 | #location ~ /\.ht { 55 | # deny all; 56 | #} 57 | } 58 | 59 | -------------------------------------------------------------------------------- /ingress-nginx/app1/index.html: -------------------------------------------------------------------------------- 1 | You have reached app1 2 | -------------------------------------------------------------------------------- /ingress-nginx/app2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-app2 5 | labels: 6 | app: app2 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: app2 12 | template: 13 | metadata: 14 | name: nginx-app2 15 | labels: 16 | app: app2 17 | spec: 18 | containers: 19 | - name: app2 20 | image: harshal0812/nginx-app2 21 | -------------------------------------------------------------------------------- /ingress-nginx/app2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:latest 2 | COPY index.html /usr/share/nginx/html/index.html 3 | COPY default.conf /etc/nginx/conf.d/default.conf 4 | -------------------------------------------------------------------------------- /ingress-nginx/app2/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | #charset koi8-r; 6 | #access_log /var/log/nginx/host.access.log main; 7 | 8 | location / { 9 | root /usr/share/nginx/html/; 10 | index index.html index.htm; 11 | } 12 | location /app2 { 13 | proxy_set_header X-Real-IP $remote_addr; 14 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 15 | proxy_set_header X-NginX-Proxy true; 16 | proxy_pass http://localhost/; 17 | proxy_ssl_session_reuse off; 18 | proxy_set_header Host $http_host; 19 | proxy_cache_bypass $http_upgrade; 20 | proxy_redirect off; 21 | } 22 | 23 | #error_page 404 /404.html; 24 | 25 | # redirect server error pages to the static page /50x.html 26 | # 27 | location /301.html { 28 | root /usr/share/nginx/html/; 29 | } 30 | error_page 500 502 503 504 /50x.html; 31 | location = /50x.html { 32 | root /usr/share/nginx/html; 33 | } 34 | 35 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80 36 | # 37 | #location ~ \.php$ { 38 | # proxy_pass http://127.0.0.1; 39 | #} 40 | 41 | # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 42 | # 43 | #location ~ \.php$ { 44 | # root html; 45 | # fastcgi_pass 127.0.0.1:9000; 46 | # fastcgi_index index.php; 47 | # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; 48 | # include fastcgi_params; 49 | #} 50 | 51 | # deny access to .htaccess files, if Apache's document root 52 | # concurs with nginx's one 53 | # 54 | #location ~ /\.ht { 55 | # deny all; 56 | #} 57 | } 58 | 59 | -------------------------------------------------------------------------------- /ingress-nginx/app2/index.html: -------------------------------------------------------------------------------- 1 | You have reached app2 2 | -------------------------------------------------------------------------------- /ingress-nginx/app3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-app3 5 | labels: 6 | app: app3 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: app3 12 | template: 13 | metadata: 14 | name: nginx-app3 15 | labels: 16 | app: app3 17 | spec: 18 | containers: 19 | - name: app3 20 | image: harshal0812/nginx-app3 21 | -------------------------------------------------------------------------------- /ingress-nginx/app3/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:latest 2 | COPY index.html /usr/share/nginx/html/index.html 3 | COPY default.conf /etc/nginx/conf.d/default.conf 4 | -------------------------------------------------------------------------------- /ingress-nginx/app3/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | #charset koi8-r; 6 | #access_log /var/log/nginx/host.access.log main; 7 | 8 | location / { 9 | root /usr/share/nginx/html/; 10 | index index.html index.htm; 11 | } 12 | location /app3 { 13 | proxy_set_header X-Real-IP $remote_addr; 14 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 15 | proxy_set_header X-NginX-Proxy true; 16 | proxy_pass http://localhost/; 17 | proxy_ssl_session_reuse off; 18 | proxy_set_header Host $http_host; 19 | proxy_cache_bypass $http_upgrade; 20 | proxy_redirect off; 21 | } 22 | 23 | #error_page 404 /404.html; 24 | 25 | # redirect server error pages to the static page /50x.html 26 | # 27 | location /301.html { 28 | root /usr/share/nginx/html/; 29 | } 30 | error_page 500 502 503 504 /50x.html; 31 | location = /50x.html { 32 | root /usr/share/nginx/html; 33 | } 34 | 35 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80 36 | # 37 | #location ~ \.php$ { 38 | # proxy_pass http://127.0.0.1; 39 | #} 40 | 41 | # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 42 | # 43 | #location ~ \.php$ { 44 | # root html; 45 | # fastcgi_pass 127.0.0.1:9000; 46 | # fastcgi_index index.php; 47 | # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; 48 | # include fastcgi_params; 49 | #} 50 | 51 | # deny access to .htaccess files, if Apache's document root 52 | # concurs with nginx's one 53 | # 54 | #location ~ /\.ht { 55 | # deny all; 56 | #} 57 | } 58 | 59 | -------------------------------------------------------------------------------- /ingress-nginx/app3/index.html: -------------------------------------------------------------------------------- 1 | You have reached app3 2 | -------------------------------------------------------------------------------- /ingress-nginx/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: app-ingress 5 | spec: 6 | rules: 7 | - host: kubernetesfederatedcluster.com 8 | http: 9 | paths: 10 | - path: /app1 11 | backend: 12 | serviceName: app1 13 | servicePort: 80 14 | - path: /app2 15 | backend: 16 | serviceName: app2 17 | servicePort: 80 18 | - path: /app3 19 | backend: 20 | serviceName: app3 21 | servicePort: 80 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /ingress-nginx/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: app1 5 | spec: 6 | ports: 7 | - port: 80 8 | targetPort: 80 9 | selector: 10 | app: app1 11 | --- 12 | 13 | apiVersion: v1 14 | kind: Service 15 | metadata: 16 | name: app2 17 | spec: 18 | ports: 19 | - port: 80 20 | targetPort: 80 21 | selector: 22 | app: app2 23 | --- 24 | 25 | apiVersion: v1 26 | kind: Service 27 | metadata: 28 | name: app3 29 | spec: 30 | ports: 31 | - port: 80 32 | targetPort: 80 33 | selector: 34 | app: app3 35 | --- 36 | -------------------------------------------------------------------------------- /ingress-nginx/t1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/rewrite-target: / 6 | name: demo-ingress 7 | spec: 8 | rules: 9 | - host: mysite.com 10 | http: 11 | paths: 12 | - path: /app1 13 | backend: 14 | serviceName: nginx 15 | servicePort: 80 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: nginx 21 | spec: 22 | ports: 23 | - port: 80 24 | targetPort: 80 25 | selector: 26 | app: nginx 27 | --- 28 | apiVersion: extensions/v1beta1 29 | kind: Deployment 30 | metadata: 31 | name: nginx 32 | spec: 33 | replicas: 1 34 | template: 35 | metadata: 36 | labels: 37 | app: nginx 38 | spec: 39 | containers: 40 | - name: echoserver 41 | image: nginx 42 | ports: 43 | - containerPort: 80 44 | 45 | -------------------------------------------------------------------------------- /jobs/README.md: -------------------------------------------------------------------------------- 1 | # JOBS and CRONJOBS 2 | 3 | ## JOBS 4 | 5 | > Jobs are specialized controllers. Just like any other controller (Replication Controller, ReplicaSet, or Deployments), they control the state of the underlying pods. Jobs will run a pod to completion - it means that job will create a pod, run it for a finite amount of time and place the pod in **Completed** state after the execution is done. 6 | 7 | Jobs are the perfect usecase for - 8 | 9 | * Batch Processing 10 | * Big Data transformation jobs (Mapreduce / GCP Dataflow ) 11 | * Creating a setup job like an Operator like Mysql/Spark Operator 12 | * ML processing (Image processing / NLP) 13 | 14 | ## Why Jobs ? 15 | 16 | > In order to understand why do you need jobs - lets do a quick demo to run a pod that adds two numbers - 17 | 18 | ` kubectl create -f pod.yaml` 19 | 20 | ``` 21 | kubectl create -f pod.yaml 22 | pod/pod-math created 23 | ``` 24 | 25 | Observations : 26 | 27 | ``` 28 | kubectl get pods 29 | NAME READY STATUS RESTARTS AGE 30 | pod-math 0/1 Completed 2 22s 31 | ``` 32 | 33 | > Check the restarts - everytime a pod completes, it restarts everytime and performs the same action again and again. 34 | 35 | > Jobs take advantage of the restartPolicy feature which is set to **Never** or **onFailure** which states that a pod will never restart or restart only when there is an underlying failure. 36 | 37 | > Lets create a job - 38 | 39 | ` kubectl create -f job.yaml` 40 | 41 | ``` 42 | kubectl get jobs 43 | NAME COMPLETIONS DURATION AGE 44 | math-job 1/1 3s 47s 45 | 46 | kubectl get pods 47 | NAME READY STATUS RESTARTS AGE 48 | math-job-pr8ks 0/1 Completed 0 52s 49 | ``` 50 | 51 | Observations: 52 | 53 | > The job has executed the pod in Completed status and the Restarts remain as 0. 54 | 55 | ## Types of Jobs 56 | 57 | There are three major types of jobs - 58 | 59 | * Jobs with a single completion count 60 | 61 | > By default the completion count of a Job is set as 1. A job which runs one single pod and executes to completion is a job with a single completion count. 62 | 63 | * Jobs with fixed completion count 64 | 65 | > A job can have a completion count set. If a completion count is set, the job will create one or more set of pods **sequentially** and execute the same set of task. Each pod will wait for its predeccesor pod to complete before it can start. 66 | 67 | > Lets do a demo by creating a job with completion count as 3 to perform prime number search between a range of numbers - 68 | 69 | ` kubectl create -f jobs-completion-count-prime.yaml` 70 | 71 | ``` 72 | kubectl get pods 73 | NAME READY STATUS RESTARTS AGE 74 | primes-parallel-h6cv5 1/1 Running 0 9s 75 | 76 | kubectl get pods 77 | NAME READY STATUS RESTARTS AGE 78 | primes-parallel-4ztql 0/1 ContainerCreating 0 1s 79 | primes-parallel-6b8cp 0/1 Completed 0 16s 80 | primes-parallel-h6cv5 0/1 Completed 0 33s 81 | ``` 82 | 83 | Observations - 84 | 85 | > Specifying the count as 3 - creates 3 pods sequentially. The output of all the pods are exactly the same as it is performing the same set of tasks. This kind of job is useful when working with a streaming service like Kafka / Pub-Sub etc which streams data continuously and the job will spawn new pods to take in new workload. 86 | 87 | 88 | * Jobs with parallelism 89 | 90 | > In contrast to fixed completion count, a job with parallelism will deploy the specified number of pods in parallel to perform parallel batch processing. You can specify the spec.parallelism count as the number of parallel pods you want to run. 91 | 92 | > Lets do a demo by creating a job with parallelism that calculates the value of pi 93 | 94 | ` kubectl create -f jobs-parallelism.yaml` 95 | 96 | ``` 97 | kubectl get pods 98 | NAME READY STATUS RESTARTS AGE 99 | example-job-cxng2 0/1 ContainerCreating 0 2s 100 | example-job-fh82k 0/1 ContainerCreating 0 2s 101 | example-job-rlts8 0/1 ContainerCreating 0 2s 102 | example-job-sm9gw 0/1 ContainerCreating 0 2s 103 | example-job-tnrpb 0/1 ContainerCreating 0 2s 104 | ``` 105 | 106 | Observations - 107 | 108 | > 5 parallel pods are created that have processed the same task of calculating the value of pi. A good use case of using parallelism is when you are working with a Queue like redis/rabbitMQ or running an automated build using a kubernetes deployment of jenkins. 109 | 110 | 111 | ## Cronjobs 112 | 113 | > Cronjobs are used to create a time based schedule for jobs. It utilizes the cron format to create a schedule and will schedule pods accordingly. Cronjobs are a good usecase when working with metrics/monitoring system where you want a scheduled checks on your system. 114 | 115 | > The crom format is as below - 116 | 117 | ~~~ 118 | # ┌───────────── minute (0 - 59) 119 | # │ ┌───────────── hour (0 - 23) 120 | # │ │ ┌───────────── day of the month (1 - 31) 121 | # │ │ │ ┌───────────── month (1 - 12) 122 | # │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday; 123 | # │ │ │ │ │ 7 is also Sunday on some systems) 124 | # │ │ │ │ │ 125 | # │ │ │ │ │ 126 | # * * * * * command to execute 127 | ~~~ 128 | 129 | > Lets do a demo by creating a cronjob that echos "Hello World" every 1 minute - 130 | 131 | ` kubectl create -f cronjob.yaml` 132 | 133 | ~~~ 134 | kubectl get cronjob 135 | NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE 136 | printhello */1 * * * * False 0 44s 137 | 138 | kubectl get pods 139 | NAME READY STATUS RESTARTS AGE 140 | printhello-1558115040-5ndq6 0/1 Completed 0 9s 141 | 142 | kubectl get pods 143 | NAME READY STATUS RESTARTS AGE 144 | printhello-1558115040-5ndq6 0/1 Completed 0 60s 145 | printhello-1558115100-m5g4v 0/1 ContainerCreating 0 0s 146 | ~~~ 147 | 148 | 149 | Observations: 150 | 151 | > As you can see - after 60 seconds, a new pod is scheduled that will perform the same action 152 | 153 | ## Advanced demo on Job parallelism 154 | 155 | > As a part of our advanced demo - we will deploy a redis pod and fill the redis pod with some dummy queue. Once the queue is created on redis, we will deploy a job with parallelism to query the same queue. The parallel pods will work with redis to fetch the data from queue parallely. 156 | 157 | ` cd parallelism` 158 | 159 | > Deploy the redis pod and service 160 | 161 | ` kubectl create -f redis-pod.yaml -f redis-service.yaml ` 162 | 163 | ~~~ 164 | kubectl get po,svc 165 | NAME READY STATUS RESTARTS AGE 166 | pod/redis-master 1/1 Running 0 14s 167 | 168 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 169 | service/kubernetes ClusterIP 10.96.0.1 443/TCP 5d4h 170 | service/redis ClusterIP 10.103.152.222 6379/TCP 22h 171 | ~~~ 172 | 173 | 174 | > Once the redis pod is created - we will deploy a temporary redis pod and connect it to our deployed redis pod using redis-cli. An alternative would be to install redis-cli on your system and use redis-cli to connect to your redis service. 175 | 176 | ` kubectl run -i --tty temp --image redis --command "/bin/sh" ` 177 | 178 | > Once inside the prompt - connect to our deployed redis service - 179 | 180 | ` redis-cli -h redis` 181 | 182 | ~~~ 183 | redis-cli -h redis 184 | redis:6379> 185 | ~~~ 186 | 187 | > Add some dummy queue and verify the dummy queue 188 | 189 | ~~~ 190 | rpush job2 "apple" 191 | rpush job2 "banana" 192 | rpush job2 "cherry" 193 | rpush job2 "date" 194 | rpush job2 "fig" 195 | rpush job2 "grape" 196 | rpush job2 "lemon" 197 | rpush job2 "melon" 198 | rpush job2 "orange" 199 | rpush job2 "strawberry" 200 | rpush job2 "mango" 201 | lrange job2 0 -1 202 | exit 203 | ~~~ 204 | 205 | > The Dockerfile and the corresponding python script to read the Queue is already provided in this repo. Build the dockerfile and push it to your docker registry 206 | 207 | ` docker build . -t YOUR_REGISTRY_NAME/job-redis` 208 | 209 | ` docker push YOUR_REGISTRY_NAME/job-redis 210 | 211 | > The file `pod-parallelism.yaml` contains the definition of a job that runs 2 parallel counts. This will query the same queue and will distribute the load. 212 | 213 | ` kubectl create -f pod-parallelism.yaml` 214 | 215 | 216 | ~~~ 217 | kubectl get pods 218 | NAME READY STATUS RESTARTS AGE 219 | job-redis-cr9zt 1/1 Running 0 8s 220 | job-redis-ftjt4 1/1 Running 0 8s 221 | ~~~ 222 | 223 | Observations: 224 | 225 | ` Check the logs of both the job. Even though they are running the same workload, the queries from redis is now distributed 226 | 227 | ~~~ 228 | kubectl logs job-redis-cr9zt 229 | Worker with sessionID: 1162d338-5e55-4332-b245-c473bac606bc 230 | Initial queue state: empty=False 231 | Working on mango 232 | Working on orange 233 | Working on lemon 234 | Working on fig 235 | Working on cherry 236 | Working on apple 237 | Queue empty, exiting 238 | 239 | 240 | kubectl logs job-redis-ftjt4 241 | Worker with sessionID: 905140d4-ae95-4887-bd9e-11170bd52837 242 | Initial queue state: empty=False 243 | Working on strawberry 244 | Working on melon 245 | Working on grape 246 | Working on date 247 | Working on banana 248 | Waiting for work 249 | Waiting for work 250 | Waiting for work 251 | Queue empty, exiting 252 | ~~~ 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | -------------------------------------------------------------------------------- /jobs/add-number-docker.sh: -------------------------------------------------------------------------------- 1 | docker run ubuntu expr 10 + 24 2 | -------------------------------------------------------------------------------- /jobs/cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: printhello 5 | spec: 6 | schedule: "*/1 * * * *" 7 | startingDeadlineSeconds: 100 8 | 9 | jobTemplate: 10 | spec: 11 | template: 12 | spec: 13 | containers: 14 | - name: hello 15 | image: busybox 16 | args: 17 | - /bin/sh 18 | - -c 19 | - date; echo "Hello, World!" 20 | restartPolicy: OnFailure 21 | 22 | -------------------------------------------------------------------------------- /jobs/job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: math-job 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: math-container 10 | image: ubuntu 11 | command: ["expr","10","+","24"] 12 | restartPolicy: Never 13 | -------------------------------------------------------------------------------- /jobs/jobs-completion-count-prime.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: primes-parallel 5 | labels: 6 | app: primes 7 | spec: 8 | completions: 3 9 | template: 10 | metadata: 11 | name: primes 12 | labels: 13 | app: primes 14 | spec: 15 | containers: 16 | - name: primes 17 | image: ubuntu 18 | command: ["bash"] 19 | args: ["-c", "current=0; max=110; echo 1; echo 2; for((i=3;i<=max;)); do for((j=i-1;j>=2;)); do if [ `expr $i % $j` -ne 0 ] ; then current=1; else current=0; break; fi; j=`expr $j - 1`; done; if [ $current -eq 1 ] ; then echo $i; fi; i=`expr $i + 1`; done"] 20 | restartPolicy: Never 21 | -------------------------------------------------------------------------------- /jobs/jobs-parallelism.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | # Unique key of the Job instance 5 | name: example-job 6 | spec: 7 | parallelism: 5 8 | template: 9 | metadata: 10 | name: example-job 11 | spec: 12 | containers: 13 | - name: pi 14 | image: perl 15 | command: ["perl"] 16 | args: ["-Mbignum=bpi", "-wle", "print bpi(2000)"] 17 | # Do not restart containers after they exit 18 | restartPolicy: Never 19 | 20 | -------------------------------------------------------------------------------- /jobs/jobs-simple-prime.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: primes 5 | spec: 6 | template: 7 | metadata: 8 | name: primes 9 | spec: 10 | containers: 11 | - name: primes 12 | image: ubuntu 13 | command: ["bash"] 14 | args: ["-c", "current=0; max=110; echo 1; echo 2; for((i=3;i<=max;)); do for((j=i-1;j>=2;)); do if [ `expr $i % $j` -ne 0 ] ; then current=1; else current=0; break; fi; j=`expr $j - 1`; done; if [ $current -eq 1 ] ; then echo $i; fi; i=`expr $i + 1`; done"] 15 | restartPolicy: Never 16 | backoffLimit: 4 17 | -------------------------------------------------------------------------------- /jobs/parallelism/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python 2 | RUN pip install redis 3 | COPY ./worker.py /worker.py 4 | COPY ./rediswq.py /rediswq.py 5 | 6 | CMD python worker.py 7 | -------------------------------------------------------------------------------- /jobs/parallelism/pod-parallelism.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: job-redis 5 | spec: 6 | parallelism: 2 7 | template: 8 | metadata: 9 | name: job-wq-2 10 | spec: 11 | containers: 12 | - name: c 13 | image: harshal0812/job-redis 14 | restartPolicy: OnFailure 15 | 16 | -------------------------------------------------------------------------------- /jobs/parallelism/redis-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: redis-master 5 | labels: 6 | app: redis 7 | spec: 8 | containers: 9 | - name: master 10 | image: redis 11 | env: 12 | - name: MASTER 13 | value: "true" 14 | ports: 15 | - containerPort: 6379 16 | -------------------------------------------------------------------------------- /jobs/parallelism/redis-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis 5 | spec: 6 | ports: 7 | - port: 6379 8 | targetPort: 6379 9 | selector: 10 | app: redis 11 | -------------------------------------------------------------------------------- /jobs/parallelism/rediswq.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Based on http://peter-hoffmann.com/2012/python-simple-queue-redis-queue.html 4 | # and the suggestion in the redis documentation for RPOPLPUSH, at 5 | # http://redis.io/commands/rpoplpush, which suggests how to implement a work-queue. 6 | 7 | 8 | import redis 9 | import uuid 10 | import hashlib 11 | 12 | class RedisWQ(object): 13 | """Simple Finite Work Queue with Redis Backend 14 | 15 | This work queue is finite: as long as no more work is added 16 | after workers start, the workers can detect when the queue 17 | is completely empty. 18 | 19 | The items in the work queue are assumed to have unique values. 20 | 21 | This object is not intended to be used by multiple threads 22 | concurrently. 23 | """ 24 | def __init__(self, name, **redis_kwargs): 25 | """The default connection parameters are: host='localhost', port=6379, db=0 26 | 27 | The work queue is identified by "name". The library may create other 28 | keys with "name" as a prefix. 29 | """ 30 | self._db = redis.StrictRedis(**redis_kwargs) 31 | # The session ID will uniquely identify this "worker". 32 | self._session = str(uuid.uuid4()) 33 | # Work queue is implemented as two queues: main, and processing. 34 | # Work is initially in main, and moved to processing when a client picks it up. 35 | self._main_q_key = name 36 | self._processing_q_key = name + ":processing" 37 | self._lease_key_prefix = name + ":leased_by_session:" 38 | 39 | def sessionID(self): 40 | """Return the ID for this session.""" 41 | return self._session 42 | 43 | def _main_qsize(self): 44 | """Return the size of the main queue.""" 45 | return self._db.llen(self._main_q_key) 46 | 47 | def _processing_qsize(self): 48 | """Return the size of the main queue.""" 49 | return self._db.llen(self._processing_q_key) 50 | 51 | def empty(self): 52 | """Return True if the queue is empty, including work being done, False otherwise. 53 | 54 | False does not necessarily mean that there is work available to work on right now, 55 | """ 56 | return self._main_qsize() == 0 and self._processing_qsize() == 0 57 | 58 | # TODO: implement this 59 | # def check_expired_leases(self): 60 | # """Return to the work queueReturn True if the queue is empty, False otherwise.""" 61 | # # Processing list should not be _too_ long since it is approximately as long 62 | # # as the number of active and recently active workers. 63 | # processing = self._db.lrange(self._processing_q_key, 0, -1) 64 | # for item in processing: 65 | # # If the lease key is not present for an item (it expired or was 66 | # # never created because the client crashed before creating it) 67 | # # then move the item back to the main queue so others can work on it. 68 | # if not self._lease_exists(item): 69 | # TODO: transactionally move the key from processing queue to 70 | # to main queue, while detecting if a new lease is created 71 | # or if either queue is modified. 72 | 73 | def _itemkey(self, item): 74 | """Returns a string that uniquely identifies an item (bytes).""" 75 | return hashlib.sha224(item).hexdigest() 76 | 77 | def _lease_exists(self, item): 78 | """True if a lease on 'item' exists.""" 79 | return self._db.exists(self._lease_key_prefix + self._itemkey(item)) 80 | 81 | def lease(self, lease_secs=60, block=True, timeout=None): 82 | """Begin working on an item the work queue. 83 | 84 | Lease the item for lease_secs. After that time, other 85 | workers may consider this client to have crashed or stalled 86 | and pick up the item instead. 87 | 88 | If optional args block is true and timeout is None (the default), block 89 | if necessary until an item is available.""" 90 | if block: 91 | item = self._db.brpoplpush(self._main_q_key, self._processing_q_key, timeout=timeout) 92 | else: 93 | item = self._db.rpoplpush(self._main_q_key, self._processing_q_key) 94 | if item: 95 | # Record that we (this session id) are working on a key. Expire that 96 | # note after the lease timeout. 97 | # Note: if we crash at this line of the program, then GC will see no lease 98 | # for this item a later return it to the main queue. 99 | itemkey = self._itemkey(item) 100 | self._db.setex(self._lease_key_prefix + itemkey, lease_secs, self._session) 101 | return item 102 | 103 | def complete(self, value): 104 | """Complete working on the item with 'value'. 105 | 106 | If the lease expired, the item may not have completed, and some 107 | other worker may have picked it up. There is no indication 108 | of what happened. 109 | """ 110 | self._db.lrem(self._processing_q_key, 0, value) 111 | # If we crash here, then the GC code will try to move the value, but it will 112 | # not be here, which is fine. So this does not need to be a transaction. 113 | itemkey = self._itemkey(value) 114 | self._db.delete(self._lease_key_prefix + itemkey, self._session) 115 | 116 | # TODO: add functions to clean up all keys associated with "name" when 117 | # processing is complete. 118 | 119 | # TODO: add a function to add an item to the queue. Atomically 120 | # check if the queue is empty and if so fail to add the item 121 | # since other workers might think work is done and be in the process 122 | # of exiting. 123 | 124 | # TODO(etune): move to my own github for hosting, e.g. github.com/erictune/rediswq-py and 125 | # make it so it can be pip installed by anyone (see 126 | # http://stackoverflow.com/questions/8247605/configuring-so-that-pip-install-can-work-from-github) 127 | 128 | # TODO(etune): finish code to GC expired leases, and call periodically 129 | # e.g. each time lease times out. 130 | 131 | -------------------------------------------------------------------------------- /jobs/parallelism/worker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import time 4 | import rediswq 5 | 6 | host="redis" 7 | # Uncomment next two lines if you do not have Kube-DNS working. 8 | # import os 9 | # host = os.getenv("REDIS_SERVICE_HOST") 10 | 11 | q = rediswq.RedisWQ(name="job2", host="redis") 12 | print("Worker with sessionID: " + q.sessionID()) 13 | print("Initial queue state: empty=" + str(q.empty())) 14 | while not q.empty(): 15 | item = q.lease(lease_secs=10, block=True, timeout=2) 16 | if item is not None: 17 | itemstr = item.decode("utf=8") 18 | print("Working on " + itemstr) 19 | time.sleep(10) # Put your actual work here instead of sleep. 20 | q.complete(item) 21 | else: 22 | print("Waiting for work") 23 | print("Queue empty, exiting") 24 | -------------------------------------------------------------------------------- /jobs/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-math 5 | spec: 6 | containers: 7 | - name: container-math 8 | image: ubuntu 9 | command: ["expr","10","+","24"] 10 | -------------------------------------------------------------------------------- /kubernetes-pods/README.md: -------------------------------------------------------------------------------- 1 | # Pod Design Patterns 2 | 3 | > A pod is the must fundamental unit of deployment in kubernetes. A pod contains either a single container or multiple containers. In most cases a single container pod would be the answer to your solution. However, if you want to embed multiple containers within the same pod, there are some design considerations that matches multiple use-cases. Use these considerations wisely. For example, its not recommended or advisable to have a webserver and a database container within a single pod, even though, it might lead to faster responses. One good use-case of using a multicontainer pod is if you have a proxy service that runs right before your API. 4 | 5 | > Its important to note that the functionality of your application doesnot change in either cases, i.e. if you deploy a multicontainer pod or if you just deploy individual pods. Multicontainer pods are used for simpler communication between closely coupled applications. Since containers within the same pod share the same **user-space**, it becomes easier for two containers to interact with each other using volumes, queues, semaphores etc. 6 | 7 | Below are some examples of multicontainer pod design patterns 8 | 9 | ## SideCar pattern 10 | 11 | > In this pattern, you deploy your primary APP container and one or more non-app containers. These non-app containers doesnt provide any significant enhancement to your primary application. These additional containers can be logging agents(logstash), monitoring agent (appdymanics) or any custom watcher or network sniffer containers. Since these containers share the same volumes, the non-app container can fetch data written by your app container and can be used to send these data to backend storages (persistent volumes, elastic search, stackdriver, datadog, etc ) 12 | 13 | ## Adapter pattern 14 | 15 | > This patters is used when you want to transform the output or data of your primary app containers before the data is actually utilized by your backend services. Adapter containers are used along with multiple web-services like nginx, apache which outputs the logs in a standard format. The adapter container can take the raw data from the web-server logs and perform selective data transfomation on top of these logs to create a standardize output file like csv or json when can then be used by your backend systems. 16 | 17 | ## Ambassador pattern 18 | 19 | > When designing a distributed appliction it is essential to understand that any communication of an application or microservice to any other microservice or to the external world is goverened by endpoints. In short - a microservice application only needs the endpoint details to talk to the rest of the world. In ambassador pattern, a proxy application is deployed along with the primary app. The primary app takes care of performing the workload tasks and the proxy containers takes care of providing endpoints for the primary app. The concept of service sharding is one of the most basic and primary usecase of ambassador pattern. Envoy is an example of a proxy container that runs as sidecar to your main app to provide service endpoints to other applicatons. Another good usecase is when you deploy in-memory caching applications like redis or memcache with your primary app. These memory-cache sidecars can interact locally with your application for faster caching. They can then connect to the corresponding masters/sentinel applications externally. 20 | -------------------------------------------------------------------------------- /kubernetes-pods/initcontainers/initcontainer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: initcontainerdemo 5 | spec: 6 | initContainers: 7 | - name: init1 8 | image: busybox 9 | command: ["/bin/sh","-c"] 10 | args: ["mkdir /nginxmount; echo This is coming from initcontainer > /nginxmount/index.html"] 11 | 12 | volumeMounts: 13 | - name: nginxmount 14 | mountPath: /nginxmount 15 | 16 | 17 | containers: 18 | - name: nginx 19 | image: nginx 20 | volumeMounts: 21 | - name: nginxmount 22 | mountPath: /usr/share/nginx/html 23 | 24 | volumes: 25 | - name: nginxmount 26 | hostPath: 27 | path: /nginx 28 | type: DirectoryOrCreate 29 | 30 | -------------------------------------------------------------------------------- /kubernetes-pods/inter-pod-communication/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:mainline 2 | ENV DEBIAN_FRONTEND noninteractive 3 | RUN mkdir -p /run/php 4 | RUN chmod 777 /run/php 5 | RUN apt-get update && apt-get -y install php-fpm php-mysqlnd 6 | RUN sed -i 's/listen.owner = www-data/listen.owner = nginx/g' /etc/php/7.0/fpm/pool.d/www.conf 7 | RUN sed -i 's/listen.group = www-data/listen.group = nginx/g' /etc/php/7.0/fpm/pool.d/www.conf 8 | RUN sed -i 's/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/g' /etc/php/7.0/fpm/php.ini 9 | #RUN service php7.0-fpm restart 10 | 11 | COPY select.php /usr/share/nginx/html/select.php 12 | COPY index.php /usr/share/nginx/html/index.php 13 | COPY default.conf /etc/nginx/conf.d/default.conf 14 | CMD /etc/init.d/php7.0-fpm restart && nginx -g "daemon off;" 15 | #CMD ["/usr/sbin/php-fpm7.0"] 16 | -------------------------------------------------------------------------------- /kubernetes-pods/inter-pod-communication/app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: interpoddemo 5 | labels: 6 | app: phpapp 7 | spec: 8 | containers: 9 | - name: podcommunicationdemo 10 | image: harshal0812/phpdemo 11 | -------------------------------------------------------------------------------- /kubernetes-pods/inter-pod-communication/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | #charset koi8-r; 6 | #access_log /var/log/nginx/host.access.log main; 7 | 8 | root /usr/share/nginx/html; 9 | index index.html index.htm; 10 | 11 | #error_page 404 /404.html; 12 | 13 | # redirect server error pages to the static page /50x.html 14 | # 15 | error_page 500 502 503 504 /50x.html; 16 | location = /50x.html { 17 | root /usr/share/nginx/html; 18 | } 19 | 20 | location ~* \.php$ { 21 | fastcgi_pass unix:/run/php/php7.0-fpm.sock; 22 | include fastcgi_params; 23 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 24 | fastcgi_param SCRIPT_NAME $fastcgi_script_name; 25 | } 26 | 27 | } 28 | -------------------------------------------------------------------------------- /kubernetes-pods/inter-pod-communication/index.php: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Pod Demo - PHP & Mysql pod connectivity 6 | 7 | 8 | 9 | 10 |

11 |
12 |
13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 |
First NameLast Name
{{x.first_name}}{{x.last_name}}
23 |
24 |
25 | 26 | 27 | 49 | -------------------------------------------------------------------------------- /kubernetes-pods/inter-pod-communication/select.php: -------------------------------------------------------------------------------- 1 | 0) 8 | { 9 | while($row = mysqli_fetch_array($result)) 10 | { 11 | $output[] = $row; 12 | } 13 | echo json_encode($output); 14 | } 15 | ?> 16 | 17 | -------------------------------------------------------------------------------- /kubernetes-pods/multicontainerpod/adapterpod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-with-adapter 5 | spec: 6 | volumes: 7 | - name: shared-logs 8 | emptyDir: {} 9 | 10 | containers: 11 | 12 | - name: app-container 13 | image: alpine 14 | command: ["/bin/sh"] 15 | args: ["-c", "while true; do date > /var/log/top.txt && top -n 1 -b >> /var/log/top.txt; sleep 5;done"] 16 | 17 | volumeMounts: 18 | - name: shared-logs 19 | mountPath: /var/log 20 | 21 | - name: adapter-container 22 | image: alpine 23 | command: ["/bin/sh"] 24 | args: ["-c", "while true; do (cat /var/log/top.txt | head -1 > /var/log/status.txt) && (cat /var/log/top.txt | head -2 | tail -1 | grep 25 | -o -E '\\d+\\w' | head -1 >> /var/log/status.txt) && (cat /var/log/top.txt | head -3 | tail -1 | grep 26 | -o -E '\\d+%' | head -1 >> /var/log/status.txt); sleep 5; done"] 27 | volumeMounts: 28 | - name: shared-logs 29 | mountPath: /var/log 30 | -------------------------------------------------------------------------------- /kubernetes-pods/multicontainerpod/sidecar.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mc1 5 | spec: 6 | volumes: 7 | - name: html 8 | emptyDir: {} 9 | containers: 10 | - name: 1st 11 | image: nginx 12 | volumeMounts: 13 | - name: html 14 | mountPath: /usr/share/nginx/html 15 | - name: 2nd 16 | image: debian 17 | volumeMounts: 18 | - name: html 19 | mountPath: /html 20 | command: ["/bin/sh", "-c"] 21 | args: 22 | - while true; do 23 | date >> /html/index.html; 24 | sleep 1; 25 | done 26 | -------------------------------------------------------------------------------- /kubernetes-pods/mysql-secrets/mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: mysql 7 | spec: 8 | containers: 9 | - image: mysql:5.6 10 | name: mysql 11 | env: 12 | - name: MYSQL_ROOT_PASSWORD 13 | valueFrom: 14 | secretKeyRef: 15 | name: mysql-pass 16 | key: password 17 | ports: 18 | - containerPort: 3306 19 | name: mysql 20 | volumeMounts: 21 | - name: mysql-persistent-storage 22 | mountPath: /var/lib/mysql 23 | volumes: 24 | - name: mysql-persistent-storage 25 | hostPath: 26 | path: /data 27 | type: DirectoryOrCreate 28 | -------------------------------------------------------------------------------- /kubernetes-pods/mysql-secrets/secret.conf: -------------------------------------------------------------------------------- 1 | kubectl create secret generic mysql-pass --from-literal=password=root 2 | -------------------------------------------------------------------------------- /kubernetes-pods/mysql-secrets/secretfile_mysql.yaml: -------------------------------------------------------------------------------- 1 | #echo -n "root" | base64 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: mysecret 6 | type: Opaque 7 | data: 8 | password: cm9vdA== 9 | -------------------------------------------------------------------------------- /kubernetes-pods/mysqlpod/db.txt: -------------------------------------------------------------------------------- 1 | -- 2 | -- Table structure for table `tbl_user` 3 | -- 4 | CREATE TABLE IF NOT EXISTS `tbl_user` ( 5 | `id` int(11) NOT NULL AUTO_INCREMENT, 6 | `first_name` varchar(200) NOT NULL, 7 | `last_name` varchar(200) NOT NULL, 8 | PRIMARY KEY (`id`) 9 | ) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=32 ; 10 | -- 11 | -- Dumping data for table `tbl_user` 12 | -- 13 | INSERT INTO `tbl_user` (`id`, `first_name`, `last_name`) VALUES 14 | (31, 'Tom', 'Cruze'), 15 | (30, 'Bill', 'Gates'), 16 | (29, 'John', 'Smith'), 17 | (28, 'Big', 'Show'), 18 | (27, 'Smith', 'Johnson'), 19 | (26, 'The', 'Rock'), 20 | (25, 'Peter', 'Parker'), 21 | (18, 'Mark', 'John'); 22 | -------------------------------------------------------------------------------- /kubernetes-pods/mysqlpod/mysqlpod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: mysql 7 | spec: 8 | containers: 9 | - image: mysql:5.7 10 | name: mysql 11 | env: 12 | - name: MYSQL_ROOT_PASSWORD 13 | value: "root" 14 | ports: 15 | - containerPort: 3306 16 | name: mysql 17 | volumeMounts: 18 | - name: hostvolume 19 | mountPath: /var/lib/mysql 20 | volumes: 21 | - name: hostvolume 22 | hostPath: 23 | path: /data 24 | type: DirectoryOrCreate 25 | -------------------------------------------------------------------------------- /logging-efk/README.md: -------------------------------------------------------------------------------- 1 | # EFK (ElasticSearch - FluentD - Kibana ) 2 | 3 | ## Steps to install EFK stack on kubernetes cluster 4 | 5 | ## Pre-requisite 6 | 7 | > Since EFK is a heavy application - the cluster needs to be atleast 6 cpu x 10 GB memory with 30 GB storage. EFK stack is a good example to understand the concepts of Deployment, Statefulset and DaemonSet. Lets start installing EFK stack on kubernetes - 8 | 9 | * Create the namespace to install the stack 10 | 11 | ` kubectl create ns kube-logging ` 12 | 13 | ``` 14 | kubectl get ns kube-logging 15 | NAME STATUS AGE 16 | kube-logging Active 11s 17 | ``` 18 | 19 | * Create persistent volumes and persistent volume claims 20 | 21 | > Elasticsearch will need a persistent volume and a corresponding claim that will be attached to the 3 replicas that we will create. The files pv.yaml and pvc.yaml contains the definition of persistent volume and persistent volume claim respectively. 22 | 23 | ` kubectl create -f pv.yaml -f pvc.yaml -n kube-logging ` 24 | 25 | > The output will show that 3 PVCs are **BOUND** to 3 PVs. 26 | 27 | ~~~ 28 | kubectl get pv,pvc -n kube-logging 29 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE 30 | persistentvolume/es-pv-0 10Gi RWO Retain Bound kube-logging/es-pvc-es-cluster-0 9s 31 | persistentvolume/es-pv-1 10Gi RWO Retain Bound kube-logging/es-pvc-es-cluster-1 9s 32 | persistentvolume/es-pv-2 10Gi RWO Retain Bound kube-logging/es-pvc-es-cluster-2 9s 33 | 34 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 35 | persistentvolumeclaim/es-pvc-es-cluster-0 Bound es-pv-0 10Gi RWO 9s 36 | persistentvolumeclaim/es-pvc-es-cluster-1 Bound es-pv-1 10Gi RWO 9s 37 | persistentvolumeclaim/es-pvc-es-cluster-2 Bound es-pv-2 10Gi RWO 9s 38 | 39 | ~~~ 40 | 41 | * Create elasticsearch Statefulset 42 | 43 | > As elasticsearch acts as the default backend of fluentd aggregated logs, its important that we deploy elasticsearch as an application that maintains state. Fluentd will continuously push data to elasticsearch. To reduce any latency and to associate the elasticsearch replicas directly to fluentd, we use the concept of Headless service. By using headless service - the DNS of the elasticsearch pods will be - *STATEFULSET-NAME-STICKYIDENTIFIER.HEADLESS-SERVICE-NAME*, i.e. **es-cluster-0.elasticsearch** 44 | 45 | > Lets install elasticsearch headless service first - 46 | 47 | ` kubectl create -f elasticsearch_svc.yaml` 48 | 49 | ``` 50 | kubectl get svc -n kube-logging 51 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 52 | elasticsearch ClusterIP None 9200/TCP,9300/TCP 7s 53 | ``` 54 | 55 | > Install elasticsearch statefulset 56 | 57 | ` kubectl create -f elasticsearch_statefulset.yaml` 58 | 59 | ``` 60 | kubectl get pods -n kube-logging 61 | NAME READY STATUS RESTARTS AGE 62 | es-cluster-0 1/1 Running 0 21s 63 | es-cluster-1 1/1 Running 0 14s 64 | es-cluster-2 1/1 Running 0 8s 65 | ``` 66 | 67 | > Using port-forward, verify the status of statefulset deployment 68 | 69 | ` kubectl port-forward es-cluster-0 9200:9200 --namespace=kube-logging` 70 | 71 | ` curl http://localhost:9200/_cluster/state?pretty ` 72 | 73 | > The output should be as below 74 | 75 | ``` 76 | curl http://localhost:9200/_cluster/state?pretty 77 | { 78 | "cluster_name" : "k8s-logs", 79 | "compressed_size_in_bytes" : 351, 80 | "cluster_uuid" : "fDRfwLflQjuKeOLAXuPwLg", 81 | "version" : 3, 82 | "state_uuid" : "NkdqNF34SKq0bmIMHrG96Q", 83 | "master_node" : "28Vbx-gdR7CKje0oT1PFhA", 84 | "blocks" : { }, 85 | "nodes" : { 86 | "4FNwm6qBS6qBZDDpMg4x9g" : { 87 | "name" : "es-cluster-2", 88 | "ephemeral_id" : "s182JiZdSHCYG8Ja-swyuA", 89 | "transport_address" : "192.168.1.192:9300", 90 | "attributes" : { } 91 | }, 92 | "VwgBprBNTA6kDP1BUJs_Zg" : { 93 | "name" : "es-cluster-0", 94 | "ephemeral_id" : "IQmaLDsJRzWU9tY7JDiUQg", 95 | "transport_address" : "192.168.1.191:9300", 96 | "attributes" : { } 97 | }, 98 | "28Vbx-gdR7CKje0oT1PFhA" : { 99 | "name" : "es-cluster-1", 100 | "ephemeral_id" : "lJFv0XwaShm_y8eIjuMf-g", 101 | "transport_address" : "192.168.2.178:9300", 102 | "attributes" : { } 103 | } 104 | }, 105 | ``` 106 | 107 | * Install Kibana 108 | 109 | ` kubectl create -f kibana.yaml ` 110 | 111 | > The output now should be as below - 112 | 113 | ~~~ 114 | kubectl get pods,svc -n kube-logging 115 | NAME READY STATUS RESTARTS AGE 116 | pod/es-cluster-0 1/1 Running 0 5m13s 117 | pod/es-cluster-1 1/1 Running 0 5m6s 118 | pod/es-cluster-2 1/1 Running 0 5m 119 | pod/kibana-bd6f49775-zmt4g 1/1 Running 0 22s 120 | 121 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 122 | service/elasticsearch ClusterIP None 9200/TCP,9300/TCP 6m14s 123 | service/kibana NodePort 10.99.16.215 5601:32182/TCP 22s 124 | ~~~ 125 | 126 | > Get the nodeport from the kibana service, and visit the kibana dashboard on your browser using - http://EXTERNAL_IP:nodeport. Currently kibana is empty as there are no logs being pushed to elasticsearch. 127 | 128 | * Install FluentD daemonset 129 | 130 | > FluentD will be installed as daemonset as we need one instance of fluentD running on all nodes. In order to run it on master, the corresponding tolerations has to be added to the fluentd yaml definition. The fluentd daemonset will look for the elasticsearch service to push the logs to. As a part of the environment variables, we define the headless service DNS (elasticsearch.kube-logging.svc.cluster.local) and the port 9200 so that fluentd can push all logs to the elasticsearch backend. 131 | 132 | > FluentD will aggregate logs from all pods running in all namespaces. In order to provide fluentd the corresponding privileges, we have to create a RBAC policy for fluentd to fetch data from the "POD" resource and fetch pods from all "NAMESPACES". The file clusterrole-fluentd.yaml provides the necessary clusterrole definition. The file clusterrolebinding-fluentd.yaml will bind the clusterrole to a serviceaccount which will be used to run the fluentd daemonset. 133 | 134 | ` kubectl create -f sa-fluentd.yaml -f clusterrole-fluentd.yaml -f clusterrolebinding-fluentd.yaml ` 135 | 136 | Output should be as below - 137 | 138 | ~~~ 139 | kubectl create -f sa-fluentd.yaml -f clusterrole-fluentd.yaml -f clusterrolebinding-fluentd.yaml 140 | serviceaccount/fluentd created 141 | clusterrole.rbac.authorization.k8s.io/fluentd created 142 | clusterrolebinding.rbac.authorization.k8s.io/fluentd created 143 | ~~~ 144 | 145 | > Deploy the fluentd daemonset 146 | 147 | ` kubectl create -f fluentd_daemonset.yaml ` 148 | 149 | > Below should be the output of the kube-logging namespace now 150 | ~~~ 151 | kubectl get pods -n kube-logging 152 | NAME READY STATUS RESTARTS AGE 153 | es-cluster-0 1/1 Running 0 16m 154 | es-cluster-1 1/1 Running 0 16m 155 | es-cluster-2 1/1 Running 0 15m 156 | fluentd-dcstb 1/1 Running 0 20s 157 | fluentd-kqmcd 1/1 Running 0 20s 158 | fluentd-xr987 1/1 Running 0 20s 159 | kibana-bd6f49775-zmt4g 1/1 Running 0 11m 160 | ~~~ 161 | 162 | 163 | * Refresh kibana dashboard to see if the logstash-* index patterns are getting created. 164 | 165 | > In Discovery section - use the index pattern as logstash-* with timestamp as the filter to view all the logs. 166 | 167 | * Cleanup 168 | 169 | ` kubectl delete ns kube-logging` 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | -------------------------------------------------------------------------------- /logging-efk/clusterrole-fluentd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: fluentd 5 | labels: 6 | app: fluentd 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - pods 12 | - namespaces 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | 18 | -------------------------------------------------------------------------------- /logging-efk/clusterrolebinding-fluentd.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: fluentd 5 | roleRef: 6 | kind: ClusterRole 7 | name: fluentd 8 | apiGroup: rbac.authorization.k8s.io 9 | subjects: 10 | - kind: ServiceAccount 11 | name: fluentd 12 | namespace: kube-logging 13 | 14 | -------------------------------------------------------------------------------- /logging-efk/counter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: counter 5 | spec: 6 | containers: 7 | - name: count 8 | image: busybox 9 | args: [/bin/sh, -c, 10 | 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done'] 11 | 12 | -------------------------------------------------------------------------------- /logging-efk/elasticsearch_statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: StatefulSet 3 | metadata: 4 | name: es-cluster 5 | namespace: kube-logging 6 | spec: 7 | serviceName: elasticsearch 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: elasticsearch 12 | template: 13 | metadata: 14 | labels: 15 | app: elasticsearch 16 | spec: 17 | containers: 18 | - name: elasticsearch 19 | image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.4.3 20 | resources: 21 | limits: 22 | cpu: 1000m 23 | requests: 24 | cpu: 100m 25 | ports: 26 | - containerPort: 9200 27 | name: rest 28 | protocol: TCP 29 | - containerPort: 9300 30 | name: inter-node 31 | protocol: TCP 32 | volumeMounts: 33 | - name: es-pvc 34 | mountPath: /usr/share/elasticsearch/data 35 | env: 36 | - name: cluster.name 37 | value: k8s-logs 38 | - name: node.name 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.name 42 | - name: discovery.zen.ping.unicast.hosts 43 | value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch" 44 | - name: discovery.zen.minimum_master_nodes 45 | value: "2" 46 | - name: ES_JAVA_OPTS 47 | value: "-Xms512m -Xmx512m" 48 | initContainers: 49 | - name: fix-permissions 50 | image: busybox 51 | command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"] 52 | securityContext: 53 | privileged: true 54 | volumeMounts: 55 | - name: es-pvc 56 | mountPath: /usr/share/elasticsearch/data 57 | - name: increase-vm-max-map 58 | image: busybox 59 | command: ["sysctl", "-w", "vm.max_map_count=262144"] 60 | securityContext: 61 | privileged: true 62 | - name: increase-fd-ulimit 63 | image: busybox 64 | command: ["sh", "-c", "ulimit -n 65536"] 65 | securityContext: 66 | privileged: true 67 | volumeClaimTemplates: 68 | - metadata: 69 | name: es-pvc 70 | spec: 71 | accessModes: [ "ReadWriteOnce" ] 72 | -------------------------------------------------------------------------------- /logging-efk/elasticsearch_svc.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: elasticsearch 5 | namespace: kube-logging 6 | labels: 7 | app: elasticsearch 8 | spec: 9 | selector: 10 | app: elasticsearch 11 | clusterIP: None 12 | ports: 13 | - port: 9200 14 | name: rest 15 | - port: 9300 16 | name: inter-node 17 | 18 | -------------------------------------------------------------------------------- /logging-efk/fluentd_daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd 5 | namespace: kube-logging 6 | labels: 7 | app: fluentd 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: fluentd 12 | template: 13 | metadata: 14 | labels: 15 | app: fluentd 16 | spec: 17 | serviceAccount: fluentd 18 | serviceAccountName: fluentd 19 | tolerations: 20 | - key: node-role.kubernetes.io/master 21 | effect: NoSchedule 22 | containers: 23 | - name: fluentd 24 | image: fluent/fluentd-kubernetes-daemonset:v0.12-debian-elasticsearch 25 | env: 26 | - name: FLUENT_ELASTICSEARCH_HOST 27 | value: "elasticsearch.kube-logging.svc.cluster.local" 28 | - name: FLUENT_ELASTICSEARCH_PORT 29 | value: "9200" 30 | - name: FLUENT_ELASTICSEARCH_SCHEME 31 | value: "http" 32 | - name: FLUENT_UID 33 | value: "0" 34 | resources: 35 | limits: 36 | memory: 512Mi 37 | requests: 38 | cpu: 100m 39 | memory: 200Mi 40 | volumeMounts: 41 | - name: varlog 42 | mountPath: /var/log 43 | - name: varlibdockercontainers 44 | mountPath: /var/lib/docker/containers 45 | readOnly: true 46 | terminationGracePeriodSeconds: 30 47 | volumes: 48 | - name: varlog 49 | hostPath: 50 | path: /var/log 51 | - name: varlibdockercontainers 52 | hostPath: 53 | path: /var/lib/docker/containers 54 | 55 | -------------------------------------------------------------------------------- /logging-efk/kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana 5 | namespace: kube-logging 6 | labels: 7 | app: kibana 8 | spec: 9 | ports: 10 | - port: 5601 11 | selector: 12 | app: kibana 13 | type: NodePort 14 | --- 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | name: kibana 19 | namespace: kube-logging 20 | labels: 21 | app: kibana 22 | spec: 23 | replicas: 1 24 | selector: 25 | matchLabels: 26 | app: kibana 27 | template: 28 | metadata: 29 | labels: 30 | app: kibana 31 | spec: 32 | containers: 33 | - name: kibana 34 | image: docker.elastic.co/kibana/kibana-oss:6.4.3 35 | resources: 36 | limits: 37 | cpu: 1000m 38 | requests: 39 | cpu: 100m 40 | env: 41 | - name: ELASTICSEARCH_URL 42 | value: http://elasticsearch:9200 43 | ports: 44 | - containerPort: 5601 45 | -------------------------------------------------------------------------------- /logging-efk/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: es-pv-0 5 | labels: 6 | app: es-pv 7 | podindex: "0" 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | capacity: 12 | storage: 10Gi 13 | hostPath: 14 | path: /data/pods/es-0/datadir 15 | 16 | --- 17 | 18 | apiVersion: v1 19 | kind: PersistentVolume 20 | metadata: 21 | name: es-pv-1 22 | labels: 23 | app: es-pv 24 | podindex: "1" 25 | spec: 26 | accessModes: 27 | - ReadWriteOnce 28 | capacity: 29 | storage: 10Gi 30 | hostPath: 31 | path: /data/pods/es-1/datadir 32 | 33 | --- 34 | 35 | apiVersion: v1 36 | kind: PersistentVolume 37 | metadata: 38 | name: es-pv-2 39 | labels: 40 | app: es-pv 41 | podindex: "2" 42 | spec: 43 | accessModes: 44 | - ReadWriteOnce 45 | capacity: 46 | storage: 10Gi 47 | hostPath: 48 | path: /data/pods/es-2/datadir 49 | -------------------------------------------------------------------------------- /logging-efk/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: es-pvc-es-cluster-0 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 10Gi 11 | selector: 12 | matchLabels: 13 | app: es-pv 14 | podindex: "0" 15 | 16 | --- 17 | 18 | apiVersion: v1 19 | kind: PersistentVolumeClaim 20 | metadata: 21 | name: es-pvc-es-cluster-1 22 | spec: 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 10Gi 28 | selector: 29 | matchLabels: 30 | app: es-pv 31 | podindex: "1" 32 | 33 | --- 34 | 35 | apiVersion: v1 36 | kind: PersistentVolumeClaim 37 | metadata: 38 | name: es-pvc-es-cluster-2 39 | spec: 40 | accessModes: 41 | - ReadWriteOnce 42 | resources: 43 | requests: 44 | storage: 10Gi 45 | selector: 46 | matchLabels: 47 | app: es-pv 48 | podindex: "2" 49 | -------------------------------------------------------------------------------- /logging-efk/sa-fluentd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: fluentd 5 | namespace: kube-logging 6 | labels: 7 | app: fluentd 8 | 9 | -------------------------------------------------------------------------------- /mariadb-statefulset/etcd-cluster.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: etcd-client 5 | spec: 6 | ports: 7 | - name: etcd-client-port 8 | port: 2379 9 | protocol: TCP 10 | targetPort: 2379 11 | selector: 12 | app: etcd 13 | 14 | --- 15 | 16 | apiVersion: v1 17 | kind: Pod 18 | metadata: 19 | labels: 20 | app: etcd 21 | etcd_node: etcd0 22 | name: etcd0 23 | spec: 24 | containers: 25 | - command: 26 | - /usr/local/bin/etcd 27 | - --name 28 | - etcd0 29 | - --initial-advertise-peer-urls 30 | - http://etcd0:2380 31 | - --listen-peer-urls 32 | - http://0.0.0.0:2380 33 | - --listen-client-urls 34 | - http://0.0.0.0:2379 35 | - --advertise-client-urls 36 | - http://etcd0:2379 37 | - --initial-cluster 38 | - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 39 | - --initial-cluster-state 40 | - new 41 | image: quay.io/coreos/etcd:latest 42 | name: etcd0 43 | ports: 44 | - containerPort: 2379 45 | name: client 46 | protocol: TCP 47 | - containerPort: 2380 48 | name: server 49 | protocol: TCP 50 | restartPolicy: Never 51 | 52 | --- 53 | 54 | apiVersion: v1 55 | kind: Service 56 | metadata: 57 | labels: 58 | etcd_node: etcd0 59 | name: etcd0 60 | spec: 61 | ports: 62 | - name: client 63 | port: 2379 64 | protocol: TCP 65 | targetPort: 2379 66 | - name: server 67 | port: 2380 68 | protocol: TCP 69 | targetPort: 2380 70 | selector: 71 | etcd_node: etcd0 72 | 73 | --- 74 | 75 | apiVersion: v1 76 | kind: Pod 77 | metadata: 78 | labels: 79 | app: etcd 80 | etcd_node: etcd1 81 | name: etcd1 82 | spec: 83 | containers: 84 | - command: 85 | - /usr/local/bin/etcd 86 | - --name 87 | - etcd1 88 | - --initial-advertise-peer-urls 89 | - http://etcd1:2380 90 | - --listen-peer-urls 91 | - http://0.0.0.0:2380 92 | - --listen-client-urls 93 | - http://0.0.0.0:2379 94 | - --advertise-client-urls 95 | - http://etcd1:2379 96 | - --initial-cluster 97 | - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 98 | - --initial-cluster-state 99 | - new 100 | image: quay.io/coreos/etcd:latest 101 | name: etcd1 102 | ports: 103 | - containerPort: 2379 104 | name: client 105 | protocol: TCP 106 | - containerPort: 2380 107 | name: server 108 | protocol: TCP 109 | restartPolicy: Never 110 | 111 | --- 112 | 113 | apiVersion: v1 114 | kind: Service 115 | metadata: 116 | labels: 117 | etcd_node: etcd1 118 | name: etcd1 119 | spec: 120 | ports: 121 | - name: client 122 | port: 2379 123 | protocol: TCP 124 | targetPort: 2379 125 | - name: server 126 | port: 2380 127 | protocol: TCP 128 | targetPort: 2380 129 | selector: 130 | etcd_node: etcd1 131 | 132 | --- 133 | 134 | apiVersion: v1 135 | kind: Pod 136 | metadata: 137 | labels: 138 | app: etcd 139 | etcd_node: etcd2 140 | name: etcd2 141 | spec: 142 | containers: 143 | - command: 144 | - /usr/local/bin/etcd 145 | - --name 146 | - etcd2 147 | - --initial-advertise-peer-urls 148 | - http://etcd2:2380 149 | - --listen-peer-urls 150 | - http://0.0.0.0:2380 151 | - --listen-client-urls 152 | - http://0.0.0.0:2379 153 | - --advertise-client-urls 154 | - http://etcd2:2379 155 | - --initial-cluster 156 | - etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 157 | - --initial-cluster-state 158 | - new 159 | image: quay.io/coreos/etcd:latest 160 | name: etcd2 161 | ports: 162 | - containerPort: 2379 163 | name: client 164 | protocol: TCP 165 | - containerPort: 2380 166 | name: server 167 | protocol: TCP 168 | restartPolicy: Never 169 | 170 | --- 171 | 172 | apiVersion: v1 173 | kind: Service 174 | metadata: 175 | labels: 176 | etcd_node: etcd2 177 | name: etcd2 178 | spec: 179 | ports: 180 | - name: client 181 | port: 2379 182 | protocol: TCP 183 | targetPort: 2379 184 | - name: server 185 | port: 2380 186 | protocol: TCP 187 | targetPort: 2380 188 | selector: 189 | etcd_node: etcd2 190 | -------------------------------------------------------------------------------- /mariadb-statefulset/mariadb-pv.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: datadir-galera-0 5 | labels: 6 | app: galera-ss 7 | podindex: "0" 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | capacity: 12 | storage: 10Gi 13 | hostPath: 14 | path: /data/pods/galera-0/datadir 15 | 16 | --- 17 | 18 | apiVersion: v1 19 | kind: PersistentVolume 20 | metadata: 21 | name: datadir-galera-1 22 | labels: 23 | app: galera-ss 24 | podindex: "1" 25 | spec: 26 | accessModes: 27 | - ReadWriteOnce 28 | capacity: 29 | storage: 10Gi 30 | hostPath: 31 | path: /data/pods/galera-1/datadir 32 | 33 | --- 34 | 35 | apiVersion: v1 36 | kind: PersistentVolume 37 | metadata: 38 | name: datadir-galera-2 39 | labels: 40 | app: galera-ss 41 | podindex: "2" 42 | spec: 43 | accessModes: 44 | - ReadWriteOnce 45 | capacity: 46 | storage: 10Gi 47 | hostPath: 48 | path: /data/pods/galera-2/datadir 49 | -------------------------------------------------------------------------------- /mariadb-statefulset/mariadb-pvc.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mysql-datadir-galera-ss-0 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 10Gi 11 | selector: 12 | matchLabels: 13 | app: galera-ss 14 | podindex: "0" 15 | 16 | --- 17 | 18 | apiVersion: v1 19 | kind: PersistentVolumeClaim 20 | metadata: 21 | name: mysql-datadir-galera-ss-1 22 | spec: 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 10Gi 28 | selector: 29 | matchLabels: 30 | app: galera-ss 31 | podindex: "1" 32 | 33 | --- 34 | 35 | apiVersion: v1 36 | kind: PersistentVolumeClaim 37 | metadata: 38 | name: mysql-datadir-galera-ss-2 39 | spec: 40 | accessModes: 41 | - ReadWriteOnce 42 | resources: 43 | requests: 44 | storage: 10Gi 45 | selector: 46 | matchLabels: 47 | app: galera-ss 48 | podindex: "2" 49 | -------------------------------------------------------------------------------- /mariadb-statefulset/mariadb-rs.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: galera-rs 5 | labels: 6 | app: galera-rs 7 | spec: 8 | type: NodePort 9 | ports: 10 | - nodePort: 30000 11 | port: 3306 12 | selector: 13 | app: galera 14 | 15 | --- 16 | 17 | apiVersion: extensions/v1beta1 18 | kind: Deployment 19 | metadata: 20 | name: galera 21 | labels: 22 | app: galera 23 | spec: 24 | replicas: 3 25 | strategy: 26 | type: Recreate 27 | template: 28 | metadata: 29 | labels: 30 | app: galera 31 | spec: 32 | containers: 33 | - name: galera 34 | image: severalnines/mariadb:10.1 35 | env: 36 | # kubectl create secret generic mysql-pass --from-file=password.txt 37 | - name: MYSQL_ROOT_PASSWORD 38 | value: myrootpassword 39 | - name: DISCOVERY_SERVICE 40 | value: etcd-client:2379 41 | - name: XTRABACKUP_PASSWORD 42 | value: password 43 | - name: CLUSTER_NAME 44 | value: mariadb_galera 45 | - name: MYSQL_DATABASE 46 | value: mydatabase 47 | - name: MYSQL_USER 48 | value: myuser 49 | - name: MYSQL_PASSWORD 50 | value: myuserpassword 51 | ports: 52 | - name: mysql 53 | containerPort: 3306 54 | readinessProbe: 55 | exec: 56 | command: 57 | - /healthcheck.sh 58 | - --readiness 59 | initialDelaySeconds: 120 60 | periodSeconds: 1 61 | livenessProbe: 62 | exec: 63 | command: 64 | - /healthcheck.sh 65 | - --liveness 66 | initialDelaySeconds: 120 67 | periodSeconds: 1 68 | -------------------------------------------------------------------------------- /mariadb-statefulset/mariadb-ss.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: galera-ss 5 | labels: 6 | app: galera-ss 7 | spec: 8 | ports: 9 | - port: 3306 10 | name: mysql 11 | clusterIP: None 12 | selector: 13 | app: galera-ss 14 | 15 | --- 16 | 17 | apiVersion: apps/v1beta1 18 | kind: StatefulSet 19 | metadata: 20 | name: galera-ss 21 | spec: 22 | serviceName: "galera-ss" 23 | replicas: 3 24 | updateStrategy: 25 | type: RollingUpdate 26 | 27 | template: 28 | metadata: 29 | labels: 30 | app: galera-ss 31 | spec: 32 | containers: 33 | - name: galera 34 | image: severalnines/mariadb:10.1 35 | ports: 36 | - name: mysql 37 | containerPort: 3306 38 | env: 39 | # kubectl create secret generic mysql-pass --from-file=password.txt 40 | - name: MYSQL_ROOT_PASSWORD 41 | value: myrootpassword 42 | - name: DISCOVERY_SERVICE 43 | value: etcd-client:2379 44 | - name: XTRABACKUP_PASSWORD 45 | value: password 46 | - name: CLUSTER_NAME 47 | value: mariadb_galera_ss 48 | - name: MYSQL_DATABASE 49 | value: mydatabase 50 | - name: MYSQL_USER 51 | value: myuser 52 | - name: MYSQL_PASSWORD 53 | value: myuserpassword 54 | readinessProbe: 55 | exec: 56 | command: 57 | - /healthcheck.sh 58 | - --readiness 59 | initialDelaySeconds: 120 60 | periodSeconds: 1 61 | livenessProbe: 62 | exec: 63 | command: 64 | - /healthcheck.sh 65 | - --liveness 66 | initialDelaySeconds: 120 67 | periodSeconds: 1 68 | volumeMounts: 69 | - name: mysql-datadir 70 | mountPath: /var/lib/mysql 71 | volumeClaimTemplates: 72 | - metadata: 73 | name: mysql-datadir 74 | spec: 75 | accessModes: [ "ReadWriteOnce" ] 76 | -------------------------------------------------------------------------------- /mariadb-statefulset/mariadb-ss_updated.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: galera-ss 5 | labels: 6 | app: galera-ss 7 | spec: 8 | ports: 9 | - port: 3306 10 | name: mysql 11 | clusterIP: None 12 | selector: 13 | app: galera-ss 14 | 15 | --- 16 | 17 | apiVersion: apps/v1beta1 18 | kind: StatefulSet 19 | metadata: 20 | name: galera-ss 21 | spec: 22 | serviceName: "galera-ss" 23 | replicas: 3 24 | updateStrategy: 25 | type: RollingUpdate 26 | 27 | template: 28 | metadata: 29 | labels: 30 | app: galera-ss 31 | spec: 32 | containers: 33 | - name: galera 34 | image: mariadb 35 | ports: 36 | - name: mysql 37 | containerPort: 3306 38 | env: 39 | # kubectl create secret generic mysql-pass --from-file=password.txt 40 | - name: MYSQL_ROOT_PASSWORD 41 | value: myrootpassword 42 | - name: DISCOVERY_SERVICE 43 | value: etcd-client:2379 44 | - name: XTRABACKUP_PASSWORD 45 | value: password 46 | - name: CLUSTER_NAME 47 | value: mariadb_galera_ss 48 | - name: MYSQL_DATABASE 49 | value: mydatabase 50 | - name: MYSQL_USER 51 | value: myuser 52 | - name: MYSQL_PASSWORD 53 | value: myuserpassword 54 | volumeMounts: 55 | - name: mysql-datadir 56 | mountPath: /var/lib/mysql 57 | volumeClaimTemplates: 58 | - metadata: 59 | name: mysql-datadir 60 | spec: 61 | accessModes: [ "ReadWriteOnce" ] 62 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/admin-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "admin", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "Portland", 11 | "O": "system:masters", 12 | "OU": "Kubernetes The Hard Way", 13 | "ST": "Oregon" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "8760h" 5 | }, 6 | "profiles": { 7 | "kubernetes": { 8 | "usages": ["signing", "key encipherment", "server auth", "client auth"], 9 | "expiry": "8760h" 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/ca-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "Kubernetes", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "Portland", 11 | "O": "Kubernetes", 12 | "OU": "CA", 13 | "ST": "Oregon" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/createnodecert.sh: -------------------------------------------------------------------------------- 1 | for i in `cat node.cfg` 2 | do 3 | WORKER_HOST=`echo ${i} | awk -F : '{print $1}'` 4 | WORKER_IP=`echo ${i} | awk -F : '{print $2}'` 5 | cp node-csr.json ${WORKER_HOST}-csr.json 6 | sed -i "s/WORKER_HOST/${WORKER_HOST}/g" ${WORKER_HOST}-csr.json 7 | echo $WORKER_HOST 8 | echo $WORKER_IP 9 | done 10 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/kube-controller-manager-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-controller-manager", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "Portland", 11 | "O": "system:kube-controller-manager", 12 | "OU": "Kubernetes The Hard Way", 13 | "ST": "Oregon" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/kube-proxy-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-proxy", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "Portland", 11 | "O": "system:node-proxier", 12 | "OU": "Kubernetes The Hard Way", 13 | "ST": "Oregon" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/kube-scheduler-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-scheduler", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "Portland", 11 | "O": "system:kube-scheduler", 12 | "OU": "Kubernetes The Hard Way", 13 | "ST": "Oregon" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/kubernetes-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "Portland", 11 | "O": "Kubernetes", 12 | "OU": "Kubernetes The Hard Way", 13 | "ST": "Oregon" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/node-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:node:WORKER_HOST", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "Portland", 11 | "O": "system:nodes", 12 | "OU": "Kubernetes The Hard Way", 13 | "ST": "Oregon" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/node.cfg: -------------------------------------------------------------------------------- 1 | node1:10.128.15.223 2 | node2:10.128.15.224 3 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/node1-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:node:${WORKER_HOST}", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "Portland", 11 | "O": "system:nodes", 12 | "OU": "Kubernetes The Hard Way", 13 | "ST": "Oregon" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /multi-master-hard-way/certs/service-account-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "service-accounts", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "US", 10 | "L": "Portland", 11 | "O": "Kubernetes", 12 | "OU": "Kubernetes The Hard Way", 13 | "ST": "Oregon" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /multi-master-hard-way/createnodecert.sh: -------------------------------------------------------------------------------- 1 | for i in `cat node.cfg` 2 | do 3 | WORKER_HOST=`echo ${i} | awk -F : '{print $1}'` 4 | WORKER_IP=`echo ${i} | awk -F : '{print $2}'` 5 | cp node-csr.json ${WORKER_HOST}-csr.json 6 | sed -i "s/WORKER_HOST/${WORKER_HOST}/g" ${WORKER_HOST}-csr.json 7 | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=${WORKER_HOST},${WORKER_IP} -profile=kubernetes ${WORKER_HOST}-csr.json | cfssljson -bare ${WORKER_HOST} 8 | done 9 | -------------------------------------------------------------------------------- /multi-master-hard-way/encryption-config.yaml: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfiguration 2 | apiVersion: apiserver.config.k8s.io/v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - aescbc: 8 | keys: 9 | - name: key1 10 | secret: L0KeMGR9dmgRvfzqkELizUkcTiUpqb8lqisyzYcGtIw= 11 | - identity: {} 12 | -------------------------------------------------------------------------------- /multi-master-hard-way/genkubeletcert.sh: -------------------------------------------------------------------------------- 1 | for instance in node1 node2; do 2 | kubectl config set-cluster kubernetes-the-hard-way \ 3 | --certificate-authority=ca.pem \ 4 | --embed-certs=true \ 5 | --server=https://${KUBERNETES_ADDRESS}:6443 \ 6 | --kubeconfig=${instance}.kubeconfig 7 | 8 | kubectl config set-credentials system:node:${instance} \ 9 | --client-certificate=${instance}.pem \ 10 | --client-key=${instance}-key.pem \ 11 | --embed-certs=true \ 12 | --kubeconfig=${instance}.kubeconfig 13 | 14 | kubectl config set-context default \ 15 | --cluster=kubernetes-the-hard-way \ 16 | --user=system:node:${instance} \ 17 | --kubeconfig=${instance}.kubeconfig 18 | 19 | kubectl config use-context default --kubeconfig=${instance}.kubeconfig 20 | done 21 | -------------------------------------------------------------------------------- /multi-master-hard-way/kube-proxy-config.yaml: -------------------------------------------------------------------------------- 1 | kind: KubeProxyConfiguration 2 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 3 | clientConnection: 4 | kubeconfig: "/var/lib/kube-proxy/kubeconfig" 5 | mode: "iptables" 6 | clusterCIDR: "10.200.0.0/16" 7 | -------------------------------------------------------------------------------- /multi-master-hard-way/kube-scheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kubescheduler.config.k8s.io/v1alpha1 2 | kind: KubeSchedulerConfiguration 3 | clientConnection: 4 | kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" 5 | leaderElection: 6 | leaderElect: true 7 | -------------------------------------------------------------------------------- /multi-master-hard-way/kubelet-rbac-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | annotations: 5 | rbac.authorization.kubernetes.io/autoupdate: "true" 6 | labels: 7 | kubernetes.io/bootstrapping: rbac-defaults 8 | name: system:kube-apiserver-to-kubelet 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - nodes/proxy 14 | - nodes/stats 15 | - nodes/log 16 | - nodes/spec 17 | - nodes/metrics 18 | verbs: 19 | - "*" 20 | 21 | -------------------------------------------------------------------------------- /multi-master-hard-way/kubelet-rbac-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: system:kube-apiserver 5 | namespace: "" 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: system:kube-apiserver-to-kubelet 10 | subjects: 11 | - apiGroup: rbac.authorization.k8s.io 12 | kind: User 13 | name: kubernetes 14 | 15 | -------------------------------------------------------------------------------- /multi-master-hard-way/node.cfg: -------------------------------------------------------------------------------- 1 | node1:10.128.15.223 2 | node2:10.128.15.224 3 | -------------------------------------------------------------------------------- /networkpolicies/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:mainline 2 | ENV DEBIAN_FRONTEND noninteractive 3 | RUN mkdir -p /run/php 4 | RUN chmod 777 /run/php 5 | RUN apt-get update && apt-get -y install php-fpm php-mysqlnd 6 | RUN sed -i 's/listen.owner = www-data/listen.owner = nginx/g' /etc/php/7.0/fpm/pool.d/www.conf 7 | RUN sed -i 's/listen.group = www-data/listen.group = nginx/g' /etc/php/7.0/fpm/pool.d/www.conf 8 | RUN sed -i 's/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/g' /etc/php/7.0/fpm/php.ini 9 | #RUN service php7.0-fpm restart 10 | 11 | COPY select.php /usr/share/nginx/html/select.php 12 | COPY index.php /usr/share/nginx/html/index.php 13 | COPY default.conf /etc/nginx/conf.d/default.conf 14 | CMD /etc/init.d/php7.0-fpm restart && nginx -g "daemon off;" 15 | #CMD ["/usr/sbin/php-fpm7.0"] 16 | -------------------------------------------------------------------------------- /networkpolicies/README.md: -------------------------------------------------------------------------------- 1 | # NetworkPolicies 2 | 3 | > A pod communicates with other pods within its own cluster and outside the cluster using services and external network endpoints. This is true in case of a microservice application in which the backend microservices interact with each other and the frontend microservice publishes itself to the external DNS. The concept of namespaces defines that all pods running within a single namespace can interact with each other using the service DNS. Pods outside the namespaces can communicate using the service FQDN. 4 | 5 | > By default all pods are non-isolated. It means that they can accept requests from anywhere. In case they are exposed externally using a loadbalancer, a pod might accept request from any source. All organizations do need some form of network isolation when it comes to deploying an application, for example - you might have a series of subnet that should not be able to access the pods OR the database that holds consumer sensitive data should not be accessed by any other applications. These isolations are provided with help of network policies. 6 | 7 | > Kubernetes supports the **NetworkPolicy** resource. However, this resource is useless unless the underlying network plugin (CNI) supports the implementation. Major network plugins like Calico, Flannel, Canal etc supports NetworkPolicy implementation in their own way. As long as one of the supported CNIs are installed, you can deploy networkpolicies on your cluster to implement pod isolations. 8 | 9 | > As per version 1.14 of kubernetes - a complete reference of network policies are available at - https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#networkpolicy-v1-networking-k8s-io 10 | 11 | 12 | # Calico for Kubernetes 13 | 14 | > Calico is a network plugin supported by kubernetes and it helps implement networking and networkpolicies in kubernetes cluster. Calico is supported by multiple cloud providers like AWS, GCP, Azure and comes with a pure IP networking fabric to provide high performance networking. Calico can also be used on private cloud by configuring BGP peering. More information on Calico and installation instructions are provided at calico's official website - https://docs.projectcalico.org/v2.0/getting-started/kubernetes/ 15 | 16 | # Implement a network policy using Calico 17 | 18 | > We will implement a network policy in a separate namespace - 19 | 20 | ` kubectl create ns networkdemo ` 21 | 22 | > Lets create a mysql deployment and expose the mysql deployment using a service type Cluster IP 23 | 24 | ` kubectl create -f mysqlpod.yaml ` 25 | 26 | ``` 27 | kubectl get pods -n networkdemo 28 | NAME READY STATUS RESTARTS AGE 29 | mysql-56546566c7-cpmsf 1/1 Running 0 3m27s 30 | ``` 31 | 32 | > Create a service for pod mysql - This service will be used by our webserver application 33 | 34 | ` kubectl expose deploy mysql --port=3306 --type=ClusterIP -n networkdemo` 35 | 36 | ``` 37 | kubectl get svc -n networkdemo 38 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 39 | mysql ClusterIP 10.105.107.237 3306/TCP 60s 40 | ``` 41 | 42 | ` kubectl create -f app.yaml ` 43 | 44 | ``` 45 | kubectl get pods -n networkdemo 46 | NAME READY STATUS RESTARTS AGE 47 | interpoddemo-6fddddbb74-jtsds 1/1 Running 0 16s 48 | interpoddemo-6fddddbb74-p8dk6 1/1 Running 0 16s 49 | interpoddemo-6fddddbb74-wpznb 1/1 Running 0 16s 50 | mysql-56546566c7-cpmsf 1/1 Running 0 7m50s 51 | ``` 52 | 53 | > Create a service for the PHP webserver POD. 54 | 55 | ` kubectl expose deploy interpoddemo --port=80 --type=ClusterIP -n networkdemo` 56 | 57 | ``` 58 | kubectl get svc -n networkdemo 59 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 60 | interpoddemo ClusterIP 10.102.212.191 80/TCP 32s 61 | mysql ClusterIP 10.105.107.237 3306/TCP 5m45s 62 | ``` 63 | 64 | > Insert dummy data in the mysql DB. The file db.txt is provided with dummy data. Create a database with name **db1** and use the db.txt to create a table and insert sample data. 65 | 66 | > Currently all our pods have ingress and egress routes enabled. They are in non-isolated state. We can verify that by creating a simple busybox pod - 67 | 68 | ` kubectl run -n networkdemo demopod --rm -ti --image busybox /bin/sh` 69 | 70 | ` wget -q --timeout=5 http://interpoddemo/index.php -O - ` 71 | 72 | > The above command should give you the php file as output 73 | 74 | > Lets create a Network Policy to deny all ingress to all pods in the networkdemo namespace. By default all access to all pods should be denied as a good security practice. Once all pods are isolated, we can create selective network policy to only open required access between pods. 75 | 76 | ` kubectl create -f deny-ingress.yaml` 77 | 78 | > Verify if access is denied - 79 | 80 | ` kubectl run -n networkdemo demopod --rm -ti --image busybox /bin/sh` 81 | 82 | ` wget -q --timeout=5 http://interpoddemo/index.php -O - ` 83 | 84 | ``` 85 | # wget -q --timeout=5 http://interpoddemo/index.php -O - 86 | wget: download timed out 87 | ``` 88 | 89 | > We will now create a networkpolicy to allow ingress to only the webserver pod and not the mysqlpod - 90 | 91 | ` kubectl create -f allow-ingress-webserver.yaml` 92 | 93 | > Verify if access is granted - 94 | 95 | ` kubectl run -n networkdemo demopod --rm -ti --image busybox /bin/sh` 96 | 97 | ` wget -q --timeout=5 http://interpoddemo/index.php -O - ` 98 | 99 | 100 | > Lets access the webserver from the browser - 101 | 102 | ` kubectl edit svc interpoddemo -n networkdemo` 103 | 104 | > Change the `type: ClusterIP` to `type: NodePort`. Get the nodeport value and access the webserver through your browser using - http://IP:NODEPORT/index.php 105 | 106 | > Verify that currently you are not able to access the webserver from the internet. Lets delete the networkpolicies to verify access from the internet. 107 | 108 | ` kubectl delete -f allow-ingress-webserver.yaml ` 109 | 110 | ` kubectl delete -f deny-ingress.yaml` 111 | 112 | > Lets restrict only the db-pod to deny all ingress 113 | 114 | ` kubectl create -f deny-ingress-db.yaml ` 115 | 116 | > Refresh your browser to verify that no db output is being fetched. 117 | 118 | > We will now create an ingress rule for our database pod to only restrict ingress from webserver pod - 119 | 120 | ` kubectl create -f allow-ingress-db.yaml ` 121 | 122 | > Refresh your browser to see if you are able to fetch the data again. 123 | 124 | > Lets verify the network policy by deploying the same application - but with different labels : 125 | 126 | ` kubectl create -f app2.yaml ` 127 | 128 | ``` 129 | kubectl get deploy -n networkdemo 130 | NAME READY UP-TO-DATE AVAILABLE AGE 131 | interpoddemo 3/3 3 3 61m 132 | interpoddemo2 3/3 3 3 9s 133 | mysql 1/1 1 1 69m 134 | ``` 135 | 136 | > Create a NodePort service for interpoddemo2 137 | 138 | ` kubectl expose deploy interpoddemo2 --port=80 --type=NodePort -n networkdemo` 139 | 140 | ``` 141 | kubectl get svc -n networkdemo 142 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 143 | interpoddemo NodePort 10.102.212.191 80:30469/TCP 61m 144 | interpoddemo2 NodePort 10.110.27.192 80:30408/TCP 21s 145 | mysql ClusterIP 10.105.107.237 3306/TCP 66m 146 | ``` 147 | 148 | > From your browser - open : http://IP:NODEPORT/index.php for interpoddemo2 149 | 150 | > Verify that interpoddemo2 service is not able to access mysql 151 | 152 | > We have now seen how to deny ingress to a pod. Lets now see how to handle egress on the cluster. We will start off by creating a denyALL egress rule - 153 | 154 | ` kubectl create -f deny-all-egress.yaml` 155 | 156 | > Verify all egress is denied - We will now run our access pod to perform a nslookup or ping to nginx pod - 157 | 158 | ` kubectl run --namespace=networkdemo access --rm -ti --image busybox /bin/sh` 159 | 160 | ` nslookup nginx` 161 | 162 | Observations - 163 | 164 | ``` 165 | / # nslookup nginx 166 | ;; connection timed out; no servers could be reached 167 | 168 | ``` 169 | 170 | 171 | > We will now allow all DNS egress traffic to our pod - 172 | 173 | ` kubectl create -f egress-allow-dns.yaml ` 174 | 175 | > The above file allows all traffic from any pod in networkdemo namespace to any pod in kube-system namespace. In order to achieve this egress, we are using a selector - name=kube-system. So its important to label the kube-system namespace with name=kube-system. 176 | 177 | ` kubectl label namespace kube-system name=kube-system` 178 | 179 | 180 | > Verify if egress is achieved - 181 | 182 | ` kubectl run --namespace=networkdemo access --rm -ti --image busybox /bin/sh` 183 | 184 | ` nslookup nginx` 185 | 186 | Observations - 187 | 188 | > Since coredns lies in kube-system namespace - nslookup command will now execute from networkdemo namespace and try to access coredns on kube-system. Since we have applied egress policies to kube-system, we should be able to get the nslookup output 189 | 190 | ``` 191 | nslookup nginx 192 | Server: 10.96.0.10 193 | Address: 10.96.0.10:53 194 | 195 | nslookup google.com 196 | Server: 10.96.0.10 197 | Address: 10.96.0.10:53 198 | ``` 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | -------------------------------------------------------------------------------- /networkpolicies/allow-ingress-db.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-access-database 5 | namespace: networkdemo 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: mysql 10 | ingress: 11 | - from: 12 | - podSelector: 13 | matchLabels: 14 | app: phpapp 15 | -------------------------------------------------------------------------------- /networkpolicies/allow-ingress-webserver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-access-webserver 5 | namespace: networkdemo 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: phpapp 10 | ingress: 11 | - from: 12 | - podSelector: 13 | matchLabels: {} 14 | 15 | -------------------------------------------------------------------------------- /networkpolicies/app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: interpoddemo 5 | namespace: networkdemo 6 | labels: 7 | app: phpapp 8 | spec: 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: phpapp 13 | template: 14 | metadata: 15 | name: phpapp 16 | labels: 17 | app: phpapp 18 | 19 | spec: 20 | containers: 21 | - name: podcommunicationdemo 22 | image: harshal0812/phpdemo 23 | -------------------------------------------------------------------------------- /networkpolicies/app2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: interpoddemo2 5 | namespace: networkdemo 6 | labels: 7 | app: phpapp2 8 | spec: 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: phpapp2 13 | template: 14 | metadata: 15 | name: phpapp2 16 | labels: 17 | app: phpapp2 18 | 19 | spec: 20 | containers: 21 | - name: podcommunicationdemo2 22 | image: harshal0812/phpdemo 23 | -------------------------------------------------------------------------------- /networkpolicies/db.txt: -------------------------------------------------------------------------------- 1 | -- 2 | -- Table structure for table `tbl_user` 3 | -- 4 | CREATE TABLE IF NOT EXISTS `tbl_user` ( 5 | `id` int(11) NOT NULL AUTO_INCREMENT, 6 | `first_name` varchar(200) NOT NULL, 7 | `last_name` varchar(200) NOT NULL, 8 | PRIMARY KEY (`id`) 9 | ) ENGINE=MyISAM DEFAULT CHARSET=latin1 AUTO_INCREMENT=32 ; 10 | -- 11 | -- Dumping data for table `tbl_user` 12 | -- 13 | INSERT INTO `tbl_user` (`id`, `first_name`, `last_name`) VALUES 14 | (31, 'Tom', 'Cruze'), 15 | (30, 'Bill', 'Gates'), 16 | (29, 'John', 'Smith'), 17 | (28, 'Big', 'Show'), 18 | (27, 'Smith', 'Johnson'), 19 | (26, 'The', 'Rock'), 20 | (25, 'Peter', 'Parker'), 21 | (18, 'Mark', 'John'); 22 | -------------------------------------------------------------------------------- /networkpolicies/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | #charset koi8-r; 6 | #access_log /var/log/nginx/host.access.log main; 7 | 8 | root /usr/share/nginx/html; 9 | index index.html index.htm; 10 | 11 | #error_page 404 /404.html; 12 | 13 | # redirect server error pages to the static page /50x.html 14 | # 15 | error_page 500 502 503 504 /50x.html; 16 | location = /50x.html { 17 | root /usr/share/nginx/html; 18 | } 19 | 20 | location ~* \.php$ { 21 | fastcgi_pass unix:/run/php/php7.0-fpm.sock; 22 | include fastcgi_params; 23 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 24 | fastcgi_param SCRIPT_NAME $fastcgi_script_name; 25 | } 26 | 27 | } 28 | -------------------------------------------------------------------------------- /networkpolicies/deny-all-egress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: default-deny-egress 5 | namespace: networkdemo 6 | spec: 7 | podSelector: 8 | matchLabels: {} 9 | policyTypes: 10 | - Egress 11 | 12 | -------------------------------------------------------------------------------- /networkpolicies/deny-ingress-db.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: default-deny-ingress-db 5 | namespace: networkdemo 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: mysql 10 | policyTypes: 11 | - Ingress 12 | 13 | -------------------------------------------------------------------------------- /networkpolicies/deny-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: default-deny-ingress 5 | namespace: networkdemo 6 | spec: 7 | podSelector: 8 | matchLabels: {} 9 | policyTypes: 10 | - Ingress 11 | 12 | -------------------------------------------------------------------------------- /networkpolicies/egress-allow-dns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-dns-access 5 | namespace: networkdemo 6 | spec: 7 | podSelector: 8 | matchLabels: {} 9 | policyTypes: 10 | - Egress 11 | egress: 12 | - to: 13 | - namespaceSelector: 14 | matchLabels: 15 | name: kube-system 16 | ports: 17 | - protocol: UDP 18 | port: 53 19 | -------------------------------------------------------------------------------- /networkpolicies/index.php: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Pod Demo - PHP & Mysql pod connectivity 6 | 7 | 8 | 9 | 10 |

11 |
12 |
13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 |
First NameLast Name
{{x.first_name}}{{x.last_name}}
23 |
24 |
25 | 26 | 27 | 49 | -------------------------------------------------------------------------------- /networkpolicies/mysqlpod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mysql 5 | namespace: networkdemo 6 | labels: 7 | app: db 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: mysql 13 | template: 14 | 15 | metadata: 16 | name: mysql 17 | labels: 18 | app: mysql 19 | spec: 20 | containers: 21 | - image: mysql:5.7 22 | name: mysql 23 | env: 24 | - name: MYSQL_ROOT_PASSWORD 25 | value: "root" 26 | ports: 27 | - containerPort: 3306 28 | name: mysql 29 | volumeMounts: 30 | - name: hostvolume 31 | mountPath: /var/lib/mysql 32 | volumes: 33 | - name: hostvolume 34 | hostPath: 35 | path: /dataa 36 | type: DirectoryOrCreate 37 | -------------------------------------------------------------------------------- /networkpolicies/select.php: -------------------------------------------------------------------------------- 1 | 0) 8 | { 9 | while($row = mysqli_fetch_array($result)) 10 | { 11 | $output[] = $row; 12 | } 13 | echo json_encode($output); 14 | } 15 | ?> 16 | 17 | -------------------------------------------------------------------------------- /pod-assignment/README.md: -------------------------------------------------------------------------------- 1 | # Pod Assignment 2 | 3 | > kube-scheduler is the component responsible of assigning pods to nodes which can supply enough resources for a pod to execute. In common scenarios this would work as its now the schedulers responsibility to ensure that it keeps a status of all the kubernetes nodes and only assign healthy nodes to upcoming pods. A kubernetes admin doesnt need to worry about which pod is assigned to which node in most cases. However there are multiple cases where you might want to assign pods to specific nodes. Few usecases are as below - 4 | 5 | * Critical database pods might need to get scheduled on nodes that have persistent disks 6 | * Pod scheduling as per availability zones 7 | * Service switch for the same pod 8 | * Co-locate pod as per affinity to another pod, ex - a backend pod might need tight interaction with a messaging Queue 9 | 10 | > In the above scenarios, we cannot rely on kube-scheduler directly. A quick look into kube-scheduler shows - 11 | 12 | ~~~ 13 | func (f *ConfigFactory) getNextPod() *v1.Pod { 14 | for { 15 | pod := cache.Pop(f.podQueue).(*v1.Pod) 16 | if f.ResponsibleForPod(pod) { 17 | glog.V(4).Infof("About to try and schedule pod %v", pod.Name) 18 | return pod 19 | } 20 | } 21 | } 22 | 23 | ~~~ 24 | 25 | > kube-scheduler maintains a Queue for all Pods. It means that any new pod that comes up goes directly into the podQueue datastructure. Once inside the Pod Queue, its now upto the scheduler to find the appropriate node. Each node is assigned a rank based on the below considerations 26 | 27 | * NoDiskConflict 28 | * NoVolumeZoneConflict 29 | * PodFitsHostPorts 30 | * HostName 31 | * MatchNodeSelector etc. 32 | 33 | 34 | > The above are considered as filters, and as and when scheduler drills down through all the filtering policies, it determines the best node that can accomodate the pod. If you carefully observe the above filtering criterias, there is a **HostName** and a **MatchNodeSelector** policy that provides specifications for an end user to assign nodes to a Pod directly. 35 | 36 | 37 | ## Pod Assignment strategies 38 | 39 | * nodeSelector 40 | 41 | > `nodeSelector` is a field of PodSpec. It specifies a map of key-value pairs. For the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels. This is the simplest form of selector. Lets do a quick demo to understand how node selectors work - 42 | 43 | > Label any one node - 44 | 45 | ` kubectl label node knode1 app=frontend ` 46 | 47 | ``` 48 | kubectl get nodes knode1 --show-labels 49 | NAME STATUS ROLES AGE VERSION LABELS 50 | knode1 Ready 5d5h v1.14.2 **app=frontend**,beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode1,kubernetes.io/os=linux 51 | ``` 52 | 53 | > We will now deploy a nginx Pod that has the attribute - nodeSelector which matches the label **app=frontend** 54 | 55 | ` kubectl create -f nginx-nodeselector.yaml` 56 | 57 | > Verify if node selection has worked 58 | 59 | ``` 60 | kubectl get pods -o wide 61 | nginx 1/1 Running 0 34s 192.168.1.22 knode1 62 | 63 | ``` 64 | 65 | > To remove a label from a node - 66 | 67 | ` kubectl edit node knode1` 68 | 69 | > Remove the entry app=frontend from .metadata.labels section and save the node config. 70 | 71 | 72 | * NodeAffinity 73 | 74 | > NodeSelector is a simple way of assigning pod to a node. However it defines a strict regulation that the pod will always be assigned to a node that matches the selector. Now that we all have deleted the label from the node, lets run the above example once again - 75 | 76 | ` kubectl create -f nginx-nodeselector.yaml` 77 | 78 | Observations - 79 | 80 | ~~~ 81 | kubectl get pods 82 | NAME READY STATUS RESTARTS AGE 83 | nginx 0/1 Pending 0 7s 84 | ~~~ 85 | 86 | > The Pod is in Pending state - NodeSelector enforces that the pod should always search for the node with the label - app=frontend. This is called as a hard rule. Hard rule is basically like a binary operator, its either 0 or 1. There is no state to determine if any nodes cannot satisfy the requirement of the pod, then what should be done in this case. 87 | 88 | > Node affinity solves this problem for us by defining a set of **Hard Rules** and **Soft Rules**. Hard Rules determines the strict scheduling rules for a pod and Soft Rules can be used to prefer selections from the outcomes of the Hard Rules. 89 | 90 | > Lets add some labels to our nodes - 91 | 92 | ` kubectl label node knode1 zone=us-central-1` 93 | 94 | ` kubectl label node knode2 zone=eu-west-1` 95 | 96 | ` kubectl label node knode2 drbackup=europe` 97 | 98 | ~~~ 99 | kubectl get nodes knode1 knode2 --show-labels 100 | NAME STATUS ROLES AGE VERSION LABELS 101 | knode1 Ready 5d6h v1.14.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode1,kubernetes.io/os=linux,zone=us-central-1 102 | NAME STATUS ROLES AGE VERSION LABELS 103 | knode2 Ready 5d6h v1.14.1 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,drbackup=europe,kubernetes.io/arch=amd64,kubernetes.io/hostname=knode2,kubernetes.io/os=linux,zone=eu-west-1 104 | 105 | ~~~ 106 | 107 | > As you can see our labels are now added. Lets now look at the hard and soft rules provided by nodeAffinity. 108 | 109 | 1. NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution - rule is “required during scheduling” but has no effect on an already-running Pod. 110 | 111 | 2. NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution - rule is “preferred during scheduling” but likewise has no effect on an already-running Pod. 112 | 113 | > RequiredDuringSchedulingIgnoredDuringExecution and PreferredDuringSchedulingIgnoredDuringExecution together forms the basis of NodeAffinity. NodeAffinity uses matchExpressions (set based selector) to perform selection from a group of labels. Lets do a demo that uses the NodeAffinity concept to understand how it works. Below is the definition of Node Affinity in our demo - 114 | 115 | ~~~ 116 | affinity: 117 | nodeAffinity: 118 | requiredDuringSchedulingIgnoredDuringExecution: 119 | nodeSelectorTerms: 120 | - matchExpressions: 121 | - key: zone 122 | operator: In 123 | values: 124 | - us-central-1 125 | - eu-west-1 126 | preferredDuringSchedulingIgnoredDuringExecution: 127 | - weight: 1 128 | preference: 129 | matchExpressions: 130 | - key: drbackup 131 | operator: In 132 | values: 133 | - europe 134 | 135 | ~~~ 136 | 137 | > **requiredDuringSchedulingIgnoredDuringExecution** states that the nodes that will be selected should have the zone set as either us-central-1 or us-west-1 138 | 139 | > **preferredDuringSchedulingIgnoredDuringExecution** states that out of the nodes that were selected, the preferred scheduling node is the node that has drbackup set as europe. 140 | 141 | > The **weight** can be anu number between 1-100. As discussed above, scheduler will compute a rank (integer) for all available nodes that satisfies the affinity criteria. When nodeAffinity has multiple nodeterms, each expression is ANDed. For each preffered action the weight is added to the terms satisfied on each node. The nodes with the highest weight are preffered. 142 | 143 | > The **nodeSelectorTerms** is a list of multiple matchexpressions to select nodes. 144 | 145 | ` kubectl create -f nginx-nodeaffinity.yaml ` 146 | 147 | Observations 148 | 149 | ~~~ 150 | kubectl get pods -o wide 151 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 152 | nginx-affinity-node 1/1 Running 0 6s 192.168.2.204 knode2 153 | ~~~ 154 | 155 | > The pod is now created on knode2. Knode2 satisfies the criteria of the hard rule - zone=us-west-1 and the soft rule - drbackup=europe. 156 | 157 | > Lets now see what happens when we delete the label drbackup=europe from knode2 and create the same pod - 158 | 159 | > Lets delete the pod first - 160 | 161 | ` kubectl delete -f nginx-nodeaffinity.yaml ` 162 | 163 | > We will now delete the label - drbackup=europe 164 | 165 | ` kubectl label node knode2 drbackup-` 166 | 167 | > Recreate the pod - 168 | 169 | ` kubectl create -f nginx-nodeaffinity.yaml` 170 | 171 | 172 | Observations - 173 | 174 | > Since the soft rule cannot be matched - multiple retries of the pod creation will create the pod on either of the node. 175 | 176 | ``` 177 | kubectl get pods -o wide 178 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 179 | nginx-affinity-node 1/1 Running 0 3s 192.168.1.11 knode1 180 | 181 | 182 | kubectl get pods -o wide 183 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 184 | nginx-affinity-node 1/1 Running 0 8s 192.168.2.8 knode2 185 | 186 | ``` 187 | 188 | * PodAffinity/Anti-Affinity 189 | 190 | > PodAffinity is selection of a node on the basis of labels of other pods running on the node. In similar fashion, pod anti-affinity is the way to repel a pod from a node on the basis of labels of pods running on that node. Pod Affinity has multiple usecases - for ex : running an application on the same node where memcache/redis are running. Similarly, pod-antiaffinity can be used when you want to spread your pods across multiple nodes - for ex : distributing mysql pods so that they dont interfere with volumes, spreading elasticsearch pod on different nodes to create a highly available deployment. 191 | 192 | > Pod affinity and antiaffinity is very similar to node affinity, the only difference is in the selection criteria. Pod affinity/antiaffinity introduces a new field called as **topology**. The topology can be any used defined key-value label. There are a few restrictions as below - 193 | 194 | 1. For affinity and for requiredDuringSchedulingIgnoredDuringExecution pod anti-affinity, empty topologyKey is not allowed. 195 | 2. For requiredDuringSchedulingIgnoredDuringExecution pod anti-affinity, the admission controller LimitPodHardAntiAffinityTopology was introduced to limit topologyKey to kubernetes.io/hostname. If you want to make it available for custom topologies, you may modify the admission controller, or simply disable it. 196 | 3. For preferredDuringSchedulingIgnoredDuringExecution pod anti-affinity, empty topologyKey is interpreted as “all topologies” (“all topologies” here is now limited to the combination of kubernetes.io/hostname, failure-domain.beta.kubernetes.io/zone and failure-domain.beta.kubernetes.io/region). 197 | 198 | 199 | > Lets now do a demo on pod affinity/antiaffinity. We will start by deploying a simple redis application with the label : app=cache 200 | 201 | ` kubectl create -f redis-cache.yaml` 202 | 203 | ~~~ 204 | kubectl get pods --show-labels -o wide 205 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES LABELS 206 | redis-cache-7d6d684f97-s7zrb 1/1 Running 0 48s 192.168.1.27 knode1 app=cache 207 | ~~~ 208 | 209 | > We will now deploy another redis pod with the label - app=web-cache which denotes that this redis deployment will server only web traffic. The nodeselector on redis-cache-web is set as knode2, basically any node on which our previous deployment doesnt run. 210 | 211 | ` kubectl create -f redis-cache-web.yaml` 212 | 213 | ~~~ 214 | kubectl get pods -owide 215 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 216 | redis-cache-7d6d684f97-s7zrb 1/1 Running 0 5m58s 192.168.1.27 knode1 217 | web-redis-cache-856b7bc58b-ksf7t 1/1 Running 0 9s 192.168.2.210 knode2 218 | ~~~ 219 | 220 | > We will now deploy a dummy application that will have both - affinity and antiaffinity as below - 221 | 222 | ``` 223 | affinity: 224 | podAntiAffinity: 225 | requiredDuringSchedulingIgnoredDuringExecution: 226 | - labelSelector: 227 | matchExpressions: 228 | - key: app 229 | operator: In 230 | values: 231 | - web-cache 232 | topologyKey: "kubernetes.io/hostname" 233 | podAffinity: 234 | requiredDuringSchedulingIgnoredDuringExecution: 235 | - labelSelector: 236 | matchExpressions: 237 | - key: app 238 | operator: In 239 | values: 240 | - cache 241 | topologyKey: "kubernetes.io/hostname" 242 | 243 | ``` 244 | 245 | > The above example says that - The application will not run on any node where any pod has a label - app=web-cache. All instances of the pod will always be colocated with the redis pod which has the label - app=cache. 246 | 247 | ` kubectl create -f nginx.yaml` 248 | 249 | Observations : 250 | 251 | ~~~ 252 | kubectl get pods -o wide 253 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 254 | redis-cache-7d6d684f97-s7zrb 1/1 Running 0 11m 192.168.1.27 knode1 255 | web-redis-cache-856b7bc58b-ksf7t 1/1 Running 0 5m13s 192.168.2.210 knode2 256 | web-server-f98668944-grtkv 1/1 Running 0 8s 192.168.1.30 knode1 257 | web-server-f98668944-rzmws 1/1 Running 0 8s 192.168.1.29 knode1 258 | web-server-f98668944-wzlsr 1/1 Running 0 8s 192.168.1.28 knode1 259 | ~~~ 260 | 261 | 262 | > As we understood - all the webserver replicas are now running on knode1 - which also serves the pod redis-cache that has the label set as - app=cache. No pods are running on the node knode2 where the redis pod is running with the label - app=web-cache 263 | 264 | > Lets delete the nginx deployment - 265 | 266 | ` kubectl delete -f nginx.yaml` 267 | 268 | > We will now create another redis pod with the label - app=cache and assign it to the same node where web-cache redis pod is running (nodeselector) 269 | 270 | ` kubectl create -f redis-cache-2.yaml` 271 | 272 | ``` 273 | kubectl get pods --show-labels -o wide 274 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES LABELS 275 | redis-cache-2-7495666dfc-pd6kd 1/1 Running 0 11s 192.168.2.211 knode2 app=cache,pod-template-hash=7495666dfc 276 | redis-cache-7d6d684f97-s7zrb 1/1 Running 0 16m 192.168.1.27 knode1 app=cache,pod-template-hash=7d6d684f97 277 | web-redis-cache-856b7bc58b-ksf7t 1/1 Running 0 10m 192.168.2.210 knode2 app=web-cache,pod-template-hash=856b7bc58b 278 | ``` 279 | 280 | > Create the nginx deployment once again to verify antiaffinity. Webserver pod will still repel knode2 281 | 282 | ~~~ 283 | kubectl create -f nginx.yaml 284 | 285 | kubectl get pods -o wide 286 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 287 | redis-cache-2-7495666dfc-pd6kd 1/1 Running 0 2m29s 192.168.2.211 knode2 288 | redis-cache-7d6d684f97-s7zrb 1/1 Running 0 19m 192.168.1.27 knode1 289 | web-redis-cache-856b7bc58b-ksf7t 1/1 Running 0 13m 192.168.2.210 knode2 290 | web-server-f98668944-5dljp 1/1 Running 0 13s 192.168.1.32 knode1 291 | web-server-f98668944-77q5j 1/1 Running 0 13s 192.168.1.31 knode1 292 | web-server-f98668944-k78mb 1/1 Running 0 13s 192.168.1.33 knode1 293 | ~~~ 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | -------------------------------------------------------------------------------- /pod-assignment/interpodaffinity/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: web-server 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: web 9 | replicas: 3 10 | template: 11 | metadata: 12 | labels: 13 | app: web 14 | spec: 15 | affinity: 16 | podAntiAffinity: 17 | requiredDuringSchedulingIgnoredDuringExecution: 18 | - labelSelector: 19 | matchExpressions: 20 | - key: app 21 | operator: In 22 | values: 23 | - web-cache 24 | topologyKey: "kubernetes.io/hostname" 25 | podAffinity: 26 | requiredDuringSchedulingIgnoredDuringExecution: 27 | - labelSelector: 28 | matchExpressions: 29 | - key: app 30 | operator: In 31 | values: 32 | - cache 33 | topologyKey: "kubernetes.io/hostname" 34 | containers: 35 | - name: web-app 36 | image: nginx:latest 37 | 38 | -------------------------------------------------------------------------------- /pod-assignment/interpodaffinity/redis-cache-2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: redis-cache-2 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: cache 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: cache 14 | spec: 15 | containers: 16 | - name: redis-server 17 | image: redis:3.2-alpine 18 | nodeSelector: 19 | kubernetes.io/hostname: knode2 20 | 21 | -------------------------------------------------------------------------------- /pod-assignment/interpodaffinity/redis-cache-web.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: web-redis-cache 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: web-cache 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: web-cache 14 | spec: 15 | containers: 16 | - name: redis-server 17 | image: redis:3.2-alpine 18 | nodeSelector: 19 | kubernetes.io/hostname: knode2 20 | 21 | -------------------------------------------------------------------------------- /pod-assignment/interpodaffinity/redis-cache.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: redis-cache 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: cache 9 | replicas: 1 10 | template: 11 | metadata: 12 | labels: 13 | app: cache 14 | spec: 15 | containers: 16 | - name: redis-server 17 | image: redis:3.2-alpine 18 | nodeSelector: 19 | kubernetes.io/hostname: knode1 20 | 21 | 22 | -------------------------------------------------------------------------------- /pod-assignment/nodeaffinity/nginx-nodeaffinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-affinity-node 5 | spec: 6 | affinity: 7 | nodeAffinity: 8 | requiredDuringSchedulingIgnoredDuringExecution: 9 | nodeSelectorTerms: 10 | - matchExpressions: 11 | - key: zone 12 | operator: In 13 | values: 14 | - us-central-1 15 | - eu-west-1 16 | preferredDuringSchedulingIgnoredDuringExecution: 17 | - weight: 1 18 | preference: 19 | matchExpressions: 20 | - key: drbackup 21 | operator: In 22 | values: 23 | - europe 24 | containers: 25 | - name: nginx 26 | image: nginx:latest 27 | 28 | -------------------------------------------------------------------------------- /pod-assignment/nodeselector/nginx-nodeselector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | env: test 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx 11 | imagePullPolicy: IfNotPresent 12 | nodeSelector: 13 | app: frontend 14 | 15 | -------------------------------------------------------------------------------- /pod-priority-preemption/README.md: -------------------------------------------------------------------------------- 1 | # Pod Priority and Pre-emption 2 | 3 | ## What is cluster Autoscaler ? 4 | 5 | > The kubernetes cluster autoscaler feature provides an excellent way to add more node to your cluster when your cluster is overloaded by heavy workload. Cluster autoscaler looks for the pods that cannot be scheduled and checks if adding a new node, similar to the other in the cluster, would help. If yes, then it resizes the cluster to accommodate the waiting pods. Cluster autoscaler also scales down the cluster if it notices that one or more nodes are not needed anymore for an extended period of time. There are implementation of Cluster autoscaler on GCP/GKE (best performance), AKS, EKS and Alibaba Cloud. 6 | 7 | > There are certain limitations in using Cluster Autoscaler - 8 | 9 | * Cluster Autoscaler doesnt work on physical clusters 10 | * Costs involved in adding more nodes 11 | * Time taken in adding nodes to the cluster during which your pods are in PENDING state. 12 | 13 | ## Advantages of Pod priority and preemption 14 | 15 | > Pod priority and preemption is used as an alternative to Cluster Autoscaler (specially on physical clusters). It helps you to achieve high level of scheduling confidence for your **CRITICAL WORKLOADS** without overprovisioning your cluster. 16 | 17 | > You can set priority to pods to determine its relative priority in comparison to other pods running in your system. If a pod with higher priority cannot be scheduled due to insufficient resources, kubernetes will preempt pods with lower priority to make space for your higher priority pod. 18 | 19 | ## Using Pod Priority 20 | 21 | > In order to use Pod Priority (stable as per version 1.14) - Your kubernetes admin will create multiple **PodPriorityClasses** which determines levels of priorities. As a part of your pod deployment, each pod will have a corresponding **priorityClassName** which associates your pod with the corresponding **PodPriorityClass** 22 | 23 | 24 | ## Working with Pod Priority 25 | 26 | * Create a Priority Class 27 | 28 | ` kubectl create -f priorityclass.yaml` 29 | 30 | ``` 31 | kubectl get priorityclass 32 | NAME VALUE GLOBAL-DEFAULT AGE 33 | high-priority 1000000 false 48s 34 | ``` 35 | 36 | * Create a dummy deployment for nginx and scale it to overutilize your cluster 37 | 38 | ` kubectl run nginx --image=nginx` 39 | 40 | ` kubectl scale deploy nginx --replicas=300` 41 | 42 | > The above command overutilizes our cluster and you will see a lot of pods in pending state. 43 | 44 | * Create a pod with high priority 45 | 46 | > Verify the current number of pods running in the overloaded deployment 47 | 48 | ``` 49 | kubectl get deploy 50 | NAME READY UP-TO-DATE AVAILABLE AGE 51 | nginx **214/300** 300 214 9m33s 52 | ``` 53 | 54 | > Create the pod with high priority 55 | 56 | ` kubectl create -f high-priority-nginx.yaml ` 57 | 58 | ``` 59 | kubectl get pods | grep -i high 60 | high-priority-nginx 1/1 Running 0 117s 61 | ``` 62 | 63 | > Check if an existing pod is evicted 64 | 65 | ``` 66 | kubectl get deploy 67 | NAME READY UP-TO-DATE AVAILABLE AGE 68 | nginx **213/300** 300 213 11m 69 | ``` 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /pod-priority-preemption/high-priority-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: high-priority-nginx2 5 | labels: 6 | env: test 7 | spec: 8 | containers: 9 | - name: nginx-high-priority 10 | image: nginx 11 | imagePullPolicy: IfNotPresent 12 | priorityClassName: high-priority 13 | 14 | -------------------------------------------------------------------------------- /pod-priority-preemption/priorityclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scheduling.k8s.io/v1 2 | kind: PriorityClass 3 | metadata: 4 | name: high-priority 5 | value: 1000000 6 | globalDefault: false 7 | description: "This priority class is the highest available priority class" 8 | 9 | -------------------------------------------------------------------------------- /probes/configmap-defaultconf.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: nginx-conf 5 | data: 6 | default.conf: | 7 | server { 8 | listen 80; 9 | server_name localhost; 10 | 11 | #charset koi8-r; 12 | #access_log /var/log/nginx/host.access.log main; 13 | 14 | location / { 15 | root /usr/share/nginx/html; 16 | index index.html index.htm; 17 | } 18 | location /healthz { 19 | return 200 'I am alive !'; 20 | } 21 | 22 | error_page 500 502 503 504 /50x.html; 23 | location = /50x.html { 24 | root /usr/share/nginx/html; 25 | } 26 | 27 | } 28 | 29 | -------------------------------------------------------------------------------- /probes/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | #charset koi8-r; 6 | #access_log /var/log/nginx/host.access.log main; 7 | 8 | location / { 9 | root /usr/share/nginx/html; 10 | index index.html index.htm; 11 | } 12 | location /healthz { 13 | return 200 'I am alive !'; 14 | } 15 | 16 | error_page 500 502 503 504 /50x.html; 17 | location = /50x.html { 18 | root /usr/share/nginx/html; 19 | } 20 | 21 | } 22 | -------------------------------------------------------------------------------- /probes/mysqlpod-tcpsocket.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: mysql 7 | spec: 8 | containers: 9 | - image: mysql:5.7 10 | name: mysql 11 | env: 12 | - name: MYSQL_ROOT_PASSWORD 13 | value: "root" 14 | ports: 15 | - containerPort: 3306 16 | name: mysql 17 | volumeMounts: 18 | - name: hostvolume 19 | mountPath: /var/lib/mysql 20 | readinessProbe: 21 | tcpSocket: 22 | port: 3306 23 | livenessProbe: 24 | exec: 25 | command: ["mysqladmin","-proot","ping"] 26 | initialDelaySeconds: 5 27 | periodSeconds: 2 28 | timeoutSeconds: 1 29 | volumes: 30 | - name: hostvolume 31 | hostPath: 32 | path: /data 33 | type: DirectoryOrCreate 34 | -------------------------------------------------------------------------------- /probes/mysqlpod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: mysql 7 | spec: 8 | containers: 9 | - image: mysql:5.7 10 | name: mysql 11 | env: 12 | - name: MYSQL_ROOT_PASSWORD 13 | value: "root" 14 | ports: 15 | - containerPort: 3306 16 | name: mysql 17 | volumeMounts: 18 | - name: hostvolume 19 | mountPath: /var/lib/mysql 20 | readinessProbe: 21 | exec: 22 | command: ["mysql", "-h", "127.0.0.1","-proot", "-e", "SELECT 1"] 23 | initialDelaySeconds: 5 24 | periodSeconds: 2 25 | timeoutSeconds: 1 26 | livenessProbe: 27 | exec: 28 | command: ["mysqladmin","-proot","ping"] 29 | initialDelaySeconds: 5 30 | periodSeconds: 2 31 | timeoutSeconds: 1 32 | volumes: 33 | - name: hostvolume 34 | hostPath: 35 | path: /data 36 | type: DirectoryOrCreate 37 | -------------------------------------------------------------------------------- /probes/mysqlpod_2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql2 5 | labels: 6 | app: mysql 7 | spec: 8 | containers: 9 | - image: mysql:5.7 10 | name: mysql 11 | env: 12 | - name: MYSQL_ROOT_PASSWORD 13 | value: "root" 14 | ports: 15 | - containerPort: 3306 16 | name: mysql 17 | volumeMounts: 18 | - name: hostvolume 19 | mountPath: /var/lib/mysql 20 | readinessProbe: 21 | exec: 22 | command: ["mysql", "-h", "127.0.0.1","-proot", "-e", "SELECT 1"] 23 | initialDelaySeconds: 5 24 | periodSeconds: 2 25 | timeoutSeconds: 1 26 | livenessProbe: 27 | exec: 28 | command: ["mysqladmin","-proot","ping"] 29 | initialDelaySeconds: 5 30 | periodSeconds: 2 31 | timeoutSeconds: 1 32 | volumes: 33 | - name: hostvolume 34 | hostPath: 35 | path: /data2 36 | type: DirectoryOrCreate 37 | -------------------------------------------------------------------------------- /probes/mysqlpod_noprobe.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: mysql 7 | spec: 8 | containers: 9 | - image: mysql:5.7 10 | name: mysql 11 | env: 12 | - name: MYSQL_ROOT_PASSWORD 13 | value: "root" 14 | ports: 15 | - containerPort: 3306 16 | name: mysql 17 | volumeMounts: 18 | - name: hostvolume 19 | mountPath: /var/lib/mysql 20 | volumes: 21 | - name: hostvolume 22 | hostPath: 23 | path: /data 24 | type: DirectoryOrCreate 25 | -------------------------------------------------------------------------------- /probes/mysqlpod_noprobe2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql2 5 | labels: 6 | app: mysql 7 | spec: 8 | containers: 9 | - image: mysql:5.7 10 | name: mysql 11 | env: 12 | - name: MYSQL_ROOT_PASSWORD 13 | value: "root" 14 | ports: 15 | - containerPort: 3306 16 | name: mysql 17 | volumeMounts: 18 | - name: hostvolume 19 | mountPath: /var/lib/mysql 20 | volumes: 21 | - name: hostvolume 22 | hostPath: 23 | path: /data2 24 | type: DirectoryOrCreate 25 | -------------------------------------------------------------------------------- /probes/nginx-bad.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-bad 5 | labels: 6 | app: nginx 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx 11 | ports: 12 | - containerPort: 80 13 | livenessProbe: #this block performs liveness probes 14 | httpGet: 15 | path: /healthz-notexist 16 | port: 80 17 | readinessProbe: #this block performs readiness probes 18 | httpGet: 19 | path: /myreadiness 20 | port: 80 21 | -------------------------------------------------------------------------------- /probes/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx 11 | ports: 12 | - containerPort: 80 13 | livenessProbe: #this block performs liveness probes 14 | httpGet: 15 | path: /healthz 16 | port: 80 17 | readinessProbe: #this block performs readiness probes 18 | httpGet: 19 | path: / 20 | port: 80 21 | volumeMounts: 22 | - mountPath: /etc/nginx/conf.d 23 | name: default-conf 24 | readOnly: true 25 | 26 | volumes: 27 | - name: default-conf 28 | configMap: 29 | name: nginx-conf 30 | items: 31 | - key: default.conf 32 | path: default.conf 33 | -------------------------------------------------------------------------------- /probes/nginx2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx2 5 | labels: 6 | app: nginx 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx 11 | ports: 12 | - containerPort: 80 13 | livenessProbe: #this block performs liveness probes 14 | httpGet: 15 | path: /healthz 16 | port: 80 17 | readinessProbe: #this block performs readiness probes 18 | httpGet: 19 | path: / 20 | port: 80 21 | volumeMounts: 22 | - mountPath: /etc/nginx/conf.d 23 | name: default-conf 24 | 25 | volumes: 26 | - name: default-conf 27 | configMap: 28 | name: nginx-conf 29 | items: 30 | - key: default.conf 31 | path: default.conf 32 | -------------------------------------------------------------------------------- /prometheus/basic-prometheus-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus-deployment 5 | labels: 6 | app: prometheus 7 | purpose: example 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: prometheus 13 | purpose: example 14 | template: 15 | metadata: 16 | labels: 17 | app: prometheus 18 | purpose: example 19 | spec: 20 | containers: 21 | - name: prometheus-example 22 | image: prom/prometheus 23 | volumeMounts: 24 | - name: config-volume 25 | mountPath: /etc/prometheus/prometheus.yml 26 | subPath: prometheus.yml 27 | ports: 28 | - containerPort: 9090 29 | volumes: 30 | - name: config-volume 31 | configMap: 32 | name: prometheus-example-cm 33 | --- 34 | kind: Service 35 | apiVersion: v1 36 | metadata: 37 | name: prometheus-example-service 38 | spec: 39 | selector: 40 | app: prometheus 41 | purpose: example 42 | ports: 43 | - name: promui 44 | protocol: TCP 45 | port: 9090 46 | targetPort: 9090 47 | type: NodePort 48 | -------------------------------------------------------------------------------- /prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: 'codelab-monitor' 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: 'prometheus' 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ['localhost:9090'] 20 | 21 | -------------------------------------------------------------------------------- /resource-metrics/README.md: -------------------------------------------------------------------------------- 1 | # Resource Metrics in Kubernetes 2 | 3 | Since heapster is now deprecated and has reached end of life, we will use metrics-server in order to check resource metrics in kubernetes. 4 | 5 | ## Install metrics server 6 | 7 | ``` 8 | git clone https://github.com/kubernetes-incubator/metrics-server 9 | cd metrics-server/deploy 10 | kubectl create -f 1.8+/ 11 | 12 | ``` 13 | 14 | Ensure that metrics server is running 15 | 16 | ``` 17 | kubectl get pods -n kube-system | grep metrics 18 | metrics-server-v0.3.1-8d4c5db46-fgb6v 2/2 Running 0 5m39s 19 | ``` 20 | 21 | ## Gather Pod Resource metrics 22 | 23 | ``` 24 | kubectl top pods --all-namespaces 25 | 26 | NAMESPACE NAME CPU(cores) MEMORY(bytes) 27 | kube-system event-exporter-v0.2.5-7df89f4b8f-gtrzv 1m 18Mi 28 | kube-system fluentd-gcp-scaler-54ccb89d5-8mjr6 0m 31Mi 29 | kube-system fluentd-gcp-v3.1.1-ksthc 10m 141Mi 30 | kube-system fluentd-gcp-v3.1.1-r7gch 6m 139Mi 31 | kube-system heapster-696599ddd4-kh2dt 1m 34Mi 32 | kube-system kube-dns-5877696fb4-sgz57 2m 30Mi 33 | kube-system kube-dns-5877696fb4-xvn8d 2m 30Mi 34 | kube-system kube-dns-autoscaler-85f8bdb54-mt25z 1m 4Mi 35 | kube-system kube-proxy-gke-standard-cluster-1-default-pool-410b975c-7jpk 1m 12Mi 36 | kube-system kube-proxy-gke-standard-cluster-1-default-pool-410b975c-xt8b 1m 12Mi 37 | kube-system l7-default-backend-8f479dd9-blgzg 1m 1Mi 38 | kube-system metrics-server-v0.3.1-8d4c5db46-fgb6v 1m 17Mi 39 | kube-system prometheus-to-sd-5v2sp 1m 14Mi 40 | kube-system prometheus-to-sd-qp4s2 1m 15Mi 41 | kube-system stackdriver-metadata-agent-cluster-level-8688665b4f-gg796 3m 19Mi 42 | ``` 43 | 44 | To see the CPU and memory usage for the individual containers of a pod - 45 | 46 | ``` 47 | kubectl top pod metrics-server-v0.3.1-8d4c5db46-fgb6v -n kube-system --containers 48 | 49 | POD NAME CPU(cores) MEMORY(bytes) 50 | metrics-server-v0.3.1-8d4c5db46-fgb6v metrics-server-nanny 1m 4Mi 51 | metrics-server-v0.3.1-8d4c5db46-fgb6v metrics-server 1m 12Mi 52 | ``` 53 | 54 | ## Gather Node resource metrics 55 | 56 | ``` 57 | kubectl top node 58 | 59 | NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% 60 | gke-standard-cluster-1-default-pool-410b975c-7jpk 40m 4% 570Mi 21% 61 | gke-standard-cluster-1-default-pool-410b975c-xt8b 69m 7% 764Mi 28% 62 | ``` 63 | 64 | We can also see the CPU and memory usage for individual nodes by specifying a node name 65 | 66 | ``` 67 | kubectl top node gke-standard-cluster-1-default-pool-410b975c-7jpk 68 | 69 | NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% 70 | gke-standard-cluster-1-default-pool-410b975c-7jpk 46m 4% 571Mi 21% 71 | 72 | ``` 73 | 74 | 75 | -------------------------------------------------------------------------------- /security-context/README.md: -------------------------------------------------------------------------------- 1 | # Understanding Kubernetes security contexts 2 | 3 | > kubernetes security contexts allows the kubernetes admin to add the layer of security, authentication and authorization to Kubernetes. We have already seen one way of securing kubernetes when we generated the self signed certificates for different kubernetes components to enable SSL connectivity. There are multiple other ways of securing your cluster. Lets understand some ways to secure our cluster 4 | 5 | 6 | # RBAC - Role Based Access Control 7 | 8 | > RBAC essentially provides authorizations for subjects (end user, application, kubernetes components) to perform actions on kubernetes resources (pods, service, namespaces, deployments, etc). kube-apiserver exposes the `rbac.authorization.k8s.io` API object which is used by the RBAC resources to provide authorization. Lets understand the building blocks for RBAC - 9 | 10 | - **Subjects** 11 | - **Users** - This can be any end user (OS user) like kubernetes administrator, developer, OPS, SA etc. This user needs to have appropriate permission in the kubernetes cluster to access resources. For example - an admin user will need full access to the cluster whereas a developer might need access to only a single namespace, similarly a Kubernetes user might just need GET, LIST, WATCH access on kubernetes cluster and not DELETE 12 | 13 | - **ServiceAccount** - Serviceaccounts are used by applications running in kubernetes. It is a kubernetes cluster level account (so not physical user in your OS) which is assigned to pods. Service accounts are provided with authorization policies which allows pods to interact with other resources in your cluster 14 | 15 | - **Groups** - Groups denotes a group of users which needs same level of access. 16 | 17 | - **Resources** - resources are kubernetes entities like Pods, deployments, hpa, persistent volumes etc and certain sub resources like nodes/stats, pod/logs. These are the entities to which authorization is provided by RBAC. 18 | 19 | - **Verbs** - verbs are actions which specifies the type of authorization provided on the Resources. Verbs are GET, LIST, WATCH, DELETE, CREATE, UPDATE, PATCH. you can fine grain the policies by using the correct verbs. for example - an admin needs all these actions on all resources, a developer might need these GET, LIST, CREATE, WATCH on a single namespace and doesnt need DELETE, UPDATE, PATCH. 20 | 21 | - **Roles and ClusterRoles** - 22 | 23 | - **Roles** - Roles are basically your security profile which combines a set of resources with the corresponding verbs. Roles are limited to a single namespace 24 | 25 | - **ClusterRole** - Very similar to Roles, they are security profile for the entire cluster 26 | 27 | - **RoleBinding and ClusterRoleBinding** 28 | 29 | - **RoleBinding** - RoleBinding binds a **subject** to a **Role**. It means, a subject is now assigned certian authorization policies. Since Roles are confined to a single namespace, the subjects have these authorization on the same namespace. Its important to understand that the serviceaccounts or users accessing the Roles must be present in the same namespace. 30 | 31 | - **ClusterRoleBinding** - Similar to Rolebinding, ClusterRoleBinding binds a **Subject** to a **ClusterRole**. Since ClusterRole spawns across the cluster, the subject will have these policies on all namespaces across your cluster. 32 | 33 | > Now that we have undestood the primary concepts regarding RBAC, lets do a demo on creating a kubernetes user with mimimal authorization - 34 | 35 | > Lets start off by creating a user `developer` on our linux machine from where kubectl is run - 36 | 37 | ` useradd -m developer` 38 | 39 | ` sudo -iu developer` 40 | 41 | ``` 42 | pwd 43 | /home/developer 44 | ``` 45 | 46 | > Just like we created Certificates for Admin user - we will now create a certificate for developer user. We need the CA certificate (ca.crt / ca.pem) and the CA private key (ca.key / ca-key.pem) 47 | 48 | ``` 49 | mkdir developercerts 50 | cd developercerts 51 | cp /etc/kubernetes/pki/ca.key . 52 | cp /etc/kubernetes/pki/ca.crt . 53 | ``` 54 | 55 | > We will now create a CSR for the user - developer and use the CA certificates to sign them. In order to create these certificates we need to set the CN as developer while creating the CSR. This tells kubernetes that any end user using these certificates will have the same access as the developer user. 56 | 57 | ~~~ 58 | openssl genrsa -out user.key 2048 59 | openssl req -new -key user.key -out user.csr -subj "/CN=developer/O=dev" 60 | openssl x509 -req -in user.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out user.crt -days 500 61 | ~~~ 62 | 63 | > Below are the files generated - 64 | 65 | ~~~ 66 | -rw------- 1 root root 1675 May 18 11:05 user.key 67 | -rw-r--r-- 1 root root 911 May 18 11:08 user.csr 68 | -rw-r--r-- 1 root root 1111 May 18 11:09 user.crt 69 | -rw-r--r-- 1 root root 17 May 18 11:09 ca.srl 70 | ~~~ 71 | 72 | 73 | > Create RBAC policies for the developer user - We will create a namespace called `development` and provide GET, LIST, WATCH, CREATE, UPDATE actions to ONLY Deployments, Replicasets and Pods on this namespace. Below is the definition of the ROLE and the corresponding ROLEBINDING 74 | 75 | ~~~ 76 | role.yaml 77 | 78 | kind: Role 79 | apiVersion: rbac.authorization.k8s.io/v1beta1 80 | metadata: 81 | namespace: development 82 | name: developer-role 83 | rules: 84 | - apiGroups: ["", "extensions", "apps"] 85 | resources: ["deployments", "replicasets", "pods"] 86 | verbs: ["get", "list", "watch", "create", "update"] 87 | 88 | 89 | rolebinding.yaml 90 | 91 | kind: RoleBinding 92 | apiVersion: rbac.authorization.k8s.io/v1beta1 93 | metadata: 94 | name: development-binding 95 | namespace: development 96 | subjects: 97 | - kind: User 98 | name: developer 99 | apiGroup: "" 100 | roleRef: 101 | kind: Role 102 | name: developer-role 103 | apiGroup: "" 104 | 105 | ~~~ 106 | 107 | 108 | Observations - 109 | 110 | > In the role - the namespace is specified as development and the rules specifies the policies that will be granted by this role. 111 | 112 | > In the RoleBinding - the namespace is specified as development, the subject is specified as developer (same value as that from CN) and the binding is between the developer user and the Role created above - developer-role. We will now create these policies - 113 | 114 | ` kubectl create ns development` 115 | 116 | ` kubectl create -f role.yaml -f rolebinding.yaml` 117 | 118 | ~~~ 119 | kubectl create -f role.yaml -f rolebinding.yaml 120 | role.rbac.authorization.k8s.io/developer-role created 121 | rolebinding.rbac.authorization.k8s.io/development-binding created 122 | ~~~ 123 | 124 | > We will now distribute the certificates to the end user - i.e. the developer user created on our system. We **SHOULD NOT** provide the CA private key to any user. 125 | 126 | ` cd developercerts` 127 | 128 | ` rm ca.key ` 129 | 130 | ` cd .. ` 131 | 132 | ` cp -R developercerts ~developer/ ` 133 | 134 | ` chown -R developer:developer ~developer/developercerts/ ` 135 | 136 | 137 | > Get the API server URL as below and save this URL - 138 | 139 | ``` 140 | grep -i server ~/.kube/config 141 | server: https://10.142.15.209:6443 142 | ``` 143 | 144 | ` sudo -iu developer` 145 | 146 | > Now that we have copied over the certificates to the developer user - we will now use the developer user to run kubectl commands to create the developer kubeconfig files - 147 | 148 | ` cd developercerts` 149 | 150 | ` kubectl config set-cluster usercluster --server=https://10.142.15.209:6443` 151 | 152 | ` kubectl config set-cluster usercluster --certificate-authority=ca.crt` 153 | 154 | ` kubectl config set-credentials developer --client-key=user.key --client-certificate=user.crt` 155 | 156 | ` kubectl config set-context userspace --cluster=usercluster --namespace=development --user=developer` 157 | 158 | ` kubectl config use-context userspace` 159 | 160 | 161 | > Your configuration is now set. You have successfully authorized the user - developer to perform only few actions on the namespace development. Lets run some commands to test - 162 | 163 | ` kubectl run nginx --image=nginx` 164 | 165 | ``` 166 | kubectl get pods 167 | NAME READY STATUS RESTARTS AGE 168 | nginx-7db9fccd9b-pl9np 1/1 Running 0 6s 169 | ``` 170 | 171 | > Verify the deployment - 172 | 173 | ``` 174 | kubectl get deploy 175 | NAME READY UP-TO-DATE AVAILABLE AGE 176 | nginx 1/1 1 1 38s 177 | ``` 178 | 179 | > Delete the deployment 180 | 181 | ` kubectl delete deploy nginx ` 182 | 183 | Observations - 184 | 185 | > As per our understanding the DELETE action was not assigned to the user - developer. Running the above command should give us an error related to authorization 186 | 187 | ``` 188 | kubectl delete deploy nginx 189 | Error from server (Forbidden): deployments.extensions "nginx" is forbidden: User "developer" cannot delete resource "deployments" in API group "extensions" in the namespace "development" 190 | ``` 191 | 192 | > Run a few additional commands to prove that we have limited access - 193 | 194 | ``` 195 | $ kubectl get namespaces 196 | Error from server (Forbidden): namespaces is forbidden: User "developer" cannot list resource "namespaces" in API group "" at the cluster scope 197 | 198 | $ kubectl get nodes 199 | Error from server (Forbidden): nodes is forbidden: User "developer" cannot list resource "nodes" in API group "" at the cluster scope 200 | 201 | $ kubectl delete pods --all 202 | Error from server (Forbidden): pods "nginx-7db9fccd9b-pl9np" is forbidden: User "developer" cannot delete resource "pods" in API group "" in the namespace "development" 203 | 204 | ``` 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | -------------------------------------------------------------------------------- /services/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Service 2 | 3 | A service is an abstract way to expose an application running on a set of Pods as a network service. With Kubernetes you don’t need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives Pods their own IP addresses and a single DNS name for a set of Pods, and can load-balance across them. 4 | 5 | Lets first create a simple deployment - 6 | 7 | ``` 8 | kubectl run nginx --image=nginx 9 | ``` 10 | 11 | We will now use the deployment `nginx` to create service resource on top of it 12 | 13 | A service can be created either using a YAML file or by using the kubectl utility 14 | 15 | ## Cluster IP 16 | 17 | ``` 18 | vi service.yaml 19 | 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: nginx-clusterip 24 | spec: 25 | selector: 26 | run: nginx 27 | ports: 28 | - protocol: TCP 29 | port: 80 30 | targetPort: 80 31 | type: ClusterIP 32 | 33 | ``` 34 | 35 | OR 36 | 37 | ``` 38 | kubectl expose deploy nginx --port=80 --type=ClusterIP --name=nginx-clusterip 39 | ``` 40 | 41 | ## NodePort 42 | 43 | ``` 44 | vi service-nodeport.yaml 45 | 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: nginx-nodeport 50 | spec: 51 | selector: 52 | app: MyApp 53 | ports: 54 | - protocol: TCP 55 | port: 80 56 | targetPort: 80 57 | type: NodePort 58 | 59 | ``` 60 | 61 | OR 62 | 63 | ``` 64 | kubectl expose deploy nginx --port=80 --type=NodePort --name=nginx-nodeport 65 | 66 | ``` 67 | 68 | ## LoadBalancer (Only on top of cloud providers) 69 | 70 | ``` 71 | vi service-lb.yaml 72 | 73 | apiVersion: v1 74 | kind: Service 75 | metadata: 76 | name: nginx-lb 77 | spec: 78 | selector: 79 | app: MyApp 80 | ports: 81 | - protocol: TCP 82 | port: 80 83 | targetPort: 80 84 | type: LoadBalancer 85 | ``` 86 | 87 | OR 88 | 89 | ``` 90 | kubectl expose deploy nginx --port=80 --type=LoadBalancer --name=nginx-lb 91 | ``` 92 | 93 | To view the services use the `kubectl get service` command 94 | -------------------------------------------------------------------------------- /storageclass/README.md: -------------------------------------------------------------------------------- 1 | # StorageClass 2 | 3 | StorageClass is a mechanism of defining different classes of storages in kubernetes. Your Kubernetes administrator along with your storage administrator might classify different types of storages available in your organization and make a reference of it in kubernetes. These storageclasses can then be directly referenced in a Persistent Volume Claim which can later be assigned to a pod. 4 | 5 | StorageClass definition requires the below information - 6 | 7 | * Provisioners - AWSElasticBlockStore, AzureFile, AzureDisk, GCEPersistentDisk, Glusterfs, iSCSI, NFS, VsphereVolume etc. 8 | * Parameters - type of storage (pd,ssd,magnetic), diskformat, datastore etc. 9 | * Reclaim Polcy - Retain or Delete 10 | 11 | Kubernetes ships some provisioners which are also called as internal provisioners. Some examples are EBS, Azure Disk, GCE PD etc. These internal provisioners are usually referred with a prefix of `kubernetes.io`. The kubernetes incubator repository also has a variety of external provisioners which can be used with storage types that dont have an internal provisioners. Few examples of external provisioners are - AWS EFS provisioner, CephFS, iSCSI, FlexVolumes, etc. 12 | 13 | Storage classes helps in dynamic provisioning of PV. Which means that your developers/devops need not worry about PV provisioning before hand. Your kubernetes administrator can set a default storage class for your cluster. If a PVC doesnt specify a PV or a storage class name, the default storage class is used. This PVC then automatically creates a new PV and the corresponding storage is then assigned. 14 | 15 | Managed kubernetes services like AKS, EKS and GKE provides a default storage class which points to their respective disk storage. 16 | 17 | ## Creating a dynamic PVC on top of GKE 18 | 19 | **PRE-REQUISITE** - A provisioned GKE cluster 20 | 21 | Create a basic PVC yaml - 22 | 23 | ``` 24 | vi pvc.yaml 25 | 26 | apiVersion: v1 27 | kind: PersistentVolumeClaim 28 | metadata: 29 | name: storageclassdemo 30 | spec: 31 | accessModes: 32 | - ReadWriteOnce 33 | resources: 34 | requests: 35 | storage: 30Gi 36 | 37 | ``` 38 | 39 | We are not defining any storageclass in this PVC, which means it will point to the default storage class. 40 | 41 | Now lets check the default storage class in gke - 42 | 43 | ``` 44 | kubectl get storageclass 45 | 46 | NAME PROVISIONER AGE 47 | standard (default) kubernetes.io/gce-pd 120m 48 | 49 | ``` 50 | 51 | We will now view the yaml of this storage class 52 | 53 | ``` 54 | kubectl get storageclass standard -o yaml 55 | 56 | allowVolumeExpansion: true 57 | apiVersion: storage.k8s.io/v1 58 | kind: StorageClass 59 | metadata: 60 | annotations: 61 | storageclass.kubernetes.io/is-default-class: "true" 62 | creationTimestamp: "2019-11-27T17:22:58Z" 63 | labels: 64 | addonmanager.kubernetes.io/mode: EnsureExists 65 | kubernetes.io/cluster-service: "true" 66 | name: standard 67 | resourceVersion: "298" 68 | selfLink: /apis/storage.k8s.io/v1/storageclasses/standard 69 | uid: 8e8b089a-113a-11ea-8807-42010a8e0194 70 | parameters: 71 | type: pd-standard 72 | provisioner: kubernetes.io/gce-pd 73 | reclaimPolicy: Delete 74 | volumeBindingMode: Immediate 75 | 76 | ``` 77 | 78 | Below are some important details - 79 | 80 | provisioner: kubernetes.io/gce-pd 81 | reclaimPolicy: Delete 82 | parameters: type: pd-standard 83 | 84 | 85 | Lets now create the pvc and see what happens - 86 | 87 | ``` 88 | kubectl create -f pvc.yaml 89 | 90 | NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE 91 | persistentvolume/pvc-a0afb7ee-114b-11ea-8807-42010a8e0194 30Gi RWO Delete Bound default/storageclassdemo standard 2s 92 | 93 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE 94 | persistentvolumeclaim/storageclassdemo Bound pvc-a0afb7ee-114b-11ea-8807-42010a8e0194 30Gi RWO standard 6s 95 | 96 | ``` 97 | 98 | A dynamic PV is now provisioned. 99 | 100 | Verify on your GCP account -> Compute Engine -> Disks to see if a new disk is dynamically created of 30GB. 101 | 102 | 103 | -------------------------------------------------------------------------------- /test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hub-kubernetes/kubernetes-CKA/b2e4d2fb317678eb6557465b54a266abce31afb8/test -------------------------------------------------------------------------------- /testfilecollaborator: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hub-kubernetes/kubernetes-CKA/b2e4d2fb317678eb6557465b54a266abce31afb8/testfilecollaborator --------------------------------------------------------------------------------