├── architecture ├── kubernetes-kubectl.png ├── kubernetes-kubelet.png ├── kubernetes-pod-new.png ├── kubernetes-deployment.png ├── kubernetes-master-node.png ├── kubernetes-pod-new (1).png ├── kubernetes-worker-node.png ├── kubernetes-deployment (1).png └── README.md ├── install ├── bootstrap.sh └── baremetal │ └── README.md ├── concept ├── day-1 │ ├── daemonset.yaml │ ├── deployment.yaml │ ├── cronjob.yaml │ ├── introduction.md │ └── getting-started.adoc ├── templates │ └── deployment.yaml └── day-0 │ └── k8s-for-docker.adoc ├── labs ├── helm │ ├── prometheus │ │ ├── rbac-config.yaml │ │ └── README.md │ ├── dashboard │ │ └── README.md │ └── README.md ├── kubernetes-openfaas.md ├── nginx │ └── README.md └── kubernetes-gce-lab │ └── README.md └── README.md /architecture/kubernetes-kubectl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajeetraina/kubernetes101/HEAD/architecture/kubernetes-kubectl.png -------------------------------------------------------------------------------- /architecture/kubernetes-kubelet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajeetraina/kubernetes101/HEAD/architecture/kubernetes-kubelet.png -------------------------------------------------------------------------------- /architecture/kubernetes-pod-new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajeetraina/kubernetes101/HEAD/architecture/kubernetes-pod-new.png -------------------------------------------------------------------------------- /architecture/kubernetes-deployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajeetraina/kubernetes101/HEAD/architecture/kubernetes-deployment.png -------------------------------------------------------------------------------- /architecture/kubernetes-master-node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajeetraina/kubernetes101/HEAD/architecture/kubernetes-master-node.png -------------------------------------------------------------------------------- /architecture/kubernetes-pod-new (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajeetraina/kubernetes101/HEAD/architecture/kubernetes-pod-new (1).png -------------------------------------------------------------------------------- /architecture/kubernetes-worker-node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajeetraina/kubernetes101/HEAD/architecture/kubernetes-worker-node.png -------------------------------------------------------------------------------- /architecture/kubernetes-deployment (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajeetraina/kubernetes101/HEAD/architecture/kubernetes-deployment (1).png -------------------------------------------------------------------------------- /install/bootstrap.sh: -------------------------------------------------------------------------------- 1 | ## Script to setup K8s Cluster 2 | 3 | kubeadm init --apiserver-advertise-address $(hostname -i) 4 | mkdir -p $HOME/.kube 5 | chown $(id -u):$(id -g) $HOME/.kube/config 6 | kubectl apply -n kube-system -f \ 7 | "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 |tr -d '\n')" 8 | -------------------------------------------------------------------------------- /concept/day-1/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: prometheus-daemonset 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | tier: monitoring 10 | name: prometheus-exporter 11 | spec: 12 | containers: 13 | - name: prometheus 14 | image: prom/node-exporter 15 | ports: 16 | - containerPort: 80 17 | -------------------------------------------------------------------------------- /concept/day-1/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx:1.12.1 15 | imagePullPolicy: IfNotPresent 16 | ports: 17 | - containerPort: 80 18 | - containerPort: 443 19 | -------------------------------------------------------------------------------- /concept/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx:1.12.1 15 | imagePullPolicy: IfNotPresent 16 | ports: 17 | - containerPort: 80 18 | - containerPort: 443 19 | -------------------------------------------------------------------------------- /labs/helm/prometheus/rbac-config.yaml: -------------------------------------------------------------------------------- 1 | [root@k8s-master3 ~]# cat rbac-config.yaml 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: tiller 6 | namespace: default 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1beta1 9 | kind: ClusterRoleBinding 10 | metadata: 11 | name: tiller 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: cluster-admin 16 | subjects: 17 | - kind: ServiceAccount 18 | name: tiller 19 | namespace: default 20 | -------------------------------------------------------------------------------- /concept/day-1/cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v2alpha1 2 | kind: CronJob 3 | metadata: 4 | name: hello 5 | spec: 6 | schedule: "*/1 * * * *" 7 | jobTemplate: 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | app: hello-cronpod 13 | spec: 14 | containers: 15 | - name: hello 16 | image: busybox 17 | args: 18 | - /bin/sh 19 | - -c 20 | - date; echo Hello World! 21 | restartPolicy: OnFailure 22 | -------------------------------------------------------------------------------- /labs/kubernetes-openfaas.md: -------------------------------------------------------------------------------- 1 | # Deploying OpenFaas on Docker for Mac 18.05.0 RC1 2 | 3 | ## Pre-requisite: 4 | 5 | - Docker for Mac 18.05.0 RC1 6 | -Enable Kubernetes 7 | 8 | ## Cloning the Fass-netes Repository 9 | 10 | ``` 11 | git clone https://github.com/openfaas/faas-netes 12 | ``` 13 | 14 | ``` 15 | git clone https://github.com/openfaas/faas-netes 16 | Cloning into 'faas-netes'... 17 | remote: Counting objects: 3517, done. 18 | remote: Compressing objects: 100% (19/19), done. 19 | remote: Total 3517 (delta 7), reused 12 (delta 3), pack-reused 3494 20 | Receiving objects: 100% (3517/3517), 4.21 21 | ``` 22 | 23 | ## Bring up OpenFaas-netes Stack 24 | 25 | ``` 26 | kubectl apply -f ./namespaces.yml,./yaml 27 | ``` 28 | 29 | ``` 30 | namespace "openfaas" created 31 | namespace "openfaas-fn" created 32 | deployment "alertmanager" created 33 | service "alertmanager" created 34 | configmap "alertmanager-config" created 35 | deployment "faas-netesd" created 36 | service "faas-netesd" created 37 | deployment "gateway" created 38 | service "gateway" created 39 | deployment "nats" created 40 | service "nats" created 41 | deployment "prometheus" created 42 | service "prometheus" created 43 | configmap "prometheus-config" created 44 | deployment "queue-worker" created 45 | serviceaccount "faas-controller" created 46 | role "faas-controller" created 47 | rolebinding "faas-controller-fn" created 48 | ``` 49 | 50 | 51 | ``` 52 | kubectl get po --namespace openfaas 53 | NAME READY STATUS RESTARTS AGE 54 | alertmanager-864794d49d-zvqs7 1/1 Running 0 3m 55 | faas-netesd-5d58bd4c7c-575l2 1/1 Running 0 3m 56 | gateway-649bbbf944-9z2zf 1/1 Running 1 3m 57 | nats-6c4f7df-9q9sf 1/1 Running 0 3m 58 | prometheus-d67b9ff9c-v44jz 1/1 Running 0 3m 59 | queue-worker-5dfb769469-2lh8f 1/1 Running 0 3m 60 | ``` 61 | 62 | 63 | -------------------------------------------------------------------------------- /labs/helm/dashboard/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ``` 4 | [node1 ~]$ helm install stable/kubernetes-dashboard 5 | NAME: loopy-anteater 6 | LAST DEPLOYED: Sun Oct 28 10:06:29 2018 7 | NAMESPACE: default 8 | STATUS: DEPLOYED 9 | 10 | RESOURCES: 11 | ==> v1/Service 12 | NAME AGE 13 | loopy-anteater-kubernetes-dashboard 0s 14 | 15 | ==> v1beta1/Deployment 16 | loopy-anteater-kubernetes-dashboard 0s 17 | 18 | ==> v1/Pod(related) 19 | 20 | NAME READY STATUS RESTARTS AGE 21 | loopy-anteater-kubernetes-dashboard-d49cf484b-s87mm 0/1 ContainerCreating 0 0s 22 | 23 | ==> v1/Secret 24 | 25 | NAME AGE 26 | loopy-anteater-kubernetes-dashboard 0s 27 | 28 | ==> v1/ServiceAccount 29 | loopy-anteater-kubernetes-dashboard 0s 30 | 31 | ==> v1beta1/Role 32 | loopy-anteater-kubernetes-dashboard 0s 33 | 34 | ==> v1beta1/RoleBinding 35 | loopy-anteater-kubernetes-dashboard 0s 36 | 37 | 38 | NOTES: 39 | ********************************************************************************* 40 | *** PLEASE BE PATIENT: kubernetes-dashboard may take a few minutes to install *** 41 | ********************************************************************************* 42 | 43 | Get the Kubernetes Dashboard URL by running: 44 | export POD_NAME=$(kubectl get pods -n default -l "app=kubernetes-dashboard,release=loopy-anteater" -o jsonpath="{.items[0].metadata.name}") 45 | echo https://127.0.0.1:8443/ 46 | kubectl -n default port-forward $POD_NAME 8443:8443 47 | 48 | [node1 ~]$ 49 | ``` 50 | 51 | ``` 52 | [node1 ~]$ helm list 53 | NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE 54 | excited-elk 1 Sun Oct 28 10:00:02 2018 DEPLOYED prometheus-7.3.4 2.4.3 default 55 | loopy-anteater 1 Sun Oct 28 10:06:29 2018 DEPLOYED kubernetes-dashboard-0.7.5 1.10.0 default 56 | [node1 ~]$ 57 | ``` 58 | -------------------------------------------------------------------------------- /labs/nginx/README.md: -------------------------------------------------------------------------------- 1 | # Building our First nginx Application on k8s cluster 2 | 3 | ``` 4 | [node1 ~]$ kubectl get nodes 5 | NAME STATUS ROLES AGE VERSION 6 | node1 Ready master 1h v1.10.2 7 | node2 Ready 1h v1.10.2 8 | node3 Ready 1h v1.10.2 9 | node4 Ready 1h v1.10.2 10 | node5 Ready 14m v1.10.2 11 | [node1 ~]$ 12 | 13 | ``` 14 | 15 | 16 | 17 | ## Running Nginx having 4 Replicas 18 | 19 | ``` 20 | kubectl run nginx --image=nginx:latest --replicas=4 21 | ``` 22 | 23 | ## Verifying K8s Pods Up and Running 24 | 25 | 26 | ``` 27 | [node1 ~]$ kubectl get po 28 | NAME READY STATUS RESTARTS AGE 29 | nginx-5db977d67c-6sdfd 1/1 Running 0 2m 30 | nginx-5db977d67c-jfq9h 1/1 Running 0 2m 31 | nginx-5db977d67c-vs925 1/1 Running 0 2m 32 | nginx-5db977d67c-z5r45 1/1 Running 0 2m 33 | [node1 ~]$ 34 | 35 | ``` 36 | 37 | 38 | ## Watch the pods 39 | 40 | ``` 41 | kubectl get pods -w 42 | ``` 43 | 44 | ## Expose the ElasticSearch HTTP API port: 45 | 46 | ``` 47 | 48 | kubectl expose deploy/nginx --port 80 49 | 50 | ``` 51 | 52 | ## Testing the Nginx Service 53 | 54 | ``` 55 | 56 | IP=$(kubectl get svc nginx -o go-template --template '{{ .spec.clusterIP }}') 57 | ``` 58 | 59 | Send a few requests: 60 | 61 | ``` 62 | [node1 ~]$ curl $IP:80 63 | 64 | 65 | 66 | Welcome to nginx! 67 | 74 | 75 | 76 |

Welcome to nginx!

77 |

If you see this page, the nginx web server is successfully installed and 78 | working. Further configuration is required.

79 | 80 |

For online documentation and support please refer to 81 | nginx.org.
82 | Commercial support is available at 83 | nginx.com.

84 | 85 |

Thank you for using nginx.

86 | 87 | 88 | [node1 ~]$ 89 | ``` 90 | -------------------------------------------------------------------------------- /concept/day-0/k8s-for-docker.adoc: -------------------------------------------------------------------------------- 1 | ## Kubernetes for Docker User 2 | 3 | If you are a Docker user and recently planned to try your hands on Kubernetes concepts, this is the right place to be. Let me get you started quickly. 4 | 5 | # A Simple Docker Image 6 | 7 | ## Tested Environment 8 | 9 | - Docker for Mac 10 | - Bootstraped Kubernetes Environment 11 | 12 | ## Clone the Repository 13 | 14 | ``` 15 | git clone https://github.com/ajeetraina/hellowhale 16 | ``` 17 | 18 | ## Building the Image 19 | 20 | ``` 21 | docker build -t hellowhale . 22 | ``` 23 | 24 | ## Running the Docker Container 25 | 26 | ``` 27 | docker run -d -p 80:80 --name hellowhale hellowhale 28 | ``` 29 | 30 | ## Tagging the Image 31 | 32 | ``` 33 | docker tag hellowhale ajeetraina/hellowhale 34 | ``` 35 | 36 | ## Pushing it to Dockerhub 37 | 38 | ``` 39 | docker login 40 | ``` 41 | 42 | ``` 43 | docker push ajeetraina/hellowhale 44 | ``` 45 | 46 | ## Verifying the URL 47 | 48 | ``` 49 | open localhost:80 50 | ``` 51 | 52 | ## How to Deploy the same Docker Container on Kubernetes 53 | 54 | ``` 55 | kubectl get nodes 56 | ``` 57 | 58 | ``` 59 | NAME STATUS ROLES AGE VERSION 60 | gke-k8s-lab1-default-pool-b2aaa29b-2gzh Ready 16h v1.8.8-gke.0 61 | gke-k8s-lab1-default-pool-b2aaa29b-qpc7 Ready 16h v1.8.8-gke.0 62 | gke-k8s-lab1-default-pool-b2aaa29b-w904 Ready 16h v1.8.8-gke.0 63 | ``` 64 | 65 | ``` 66 | kubectl create deployment hellowhale --image ajeetraina/hellowhale 67 | deployment "hellowhale" created 68 | ``` 69 | 70 | ``` 71 | kubectl get deploy 72 | ``` 73 | 74 | ``` 75 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 76 | hellowhale 1 1 1 1 10s 77 | ``` 78 | 79 | ``` 80 | kubectl get po 81 | ``` 82 | 83 | ``` 84 | NAME READY STATUS RESTARTS AGE 85 | hellowhale-6d46976f9b-bswrc 1/1 Running 0 16s 86 | e=LoadBalancer > kubectl expose deployment/hellowhale --port=80 --name=hellowhalesvc --typ 87 | service "hellowhalesvc" exposed 88 | ``` 89 | 90 | ``` 91 | kubectl get svc 92 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 93 | hellowhalesvc LoadBalancer 10.15.244.72 80:30355/TCP 16s 94 | kubernetes ClusterIP 10.15.240.1 443/TCP 15h 95 | ``` 96 | 97 | Wait for sometime till Externm-IP appear for hellowhalesvc. 98 | 99 | ``` 100 | kubectl get svc 101 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 102 | hellowhalesvc LoadBalancer 10.15.244.72 35.201.152.160 80:30355/TCP 1m 103 | kubernetes ClusterIP 10.15.240.1 443/TCP 15h 104 | ``` 105 | There you go. GO to the browser and type 35.201.152.160 to open up fancy hellowhale page. 106 | -------------------------------------------------------------------------------- /labs/helm/README.md: -------------------------------------------------------------------------------- 1 | # Installing Helm on PWD 2 | 3 | ## Pre-requisite 4 | 5 | 6 | ``` 7 | [node1 install]$ kubectl get nodes 8 | NAME STATUS ROLES AGE VERSION 9 | node1 Ready master 2m v1.11.3node2 Ready 48s v1.11.3 10 | node3 NotReady 32s v1.11.3 11 | node4 NotReady 13s v1.11.3 12 | node5 NotReady 8s v1.11.3 13 | ``` 14 | 15 | ``` 16 | [node1 ~]$ kubectl get nodes -o json | 17 | > jq ".items[] | {name:.metadata.name} + .status.capacity" 18 | 19 | ``` 20 | ``` 21 | { 22 | "name": "node1", 23 | "cpu": "8", 24 | "ephemeral-storage": "10Gi", 25 | "hugepages-1Gi": "0", 26 | "hugepages-2Mi": "0", 27 | "memory": "32929612Ki", 28 | "pods": "110" 29 | } 30 | { 31 | "name": "node2", 32 | "cpu": "8", 33 | "ephemeral-storage": "10Gi", 34 | "hugepages-1Gi": "0", 35 | "hugepages-2Mi": "0", 36 | "memory": "32929612Ki", 37 | "pods": "110" 38 | } 39 | { 40 | "name": "node3", 41 | "cpu": "8", 42 | "ephemeral-storage": "10Gi", 43 | "hugepages-1Gi": "0", 44 | "hugepages-2Mi": "0", 45 | "memory": "32929612Ki", 46 | "pods": "110" 47 | } 48 | { 49 | "name": "node4", 50 | "cpu": "8", 51 | "ephemeral-storage": "10Gi", 52 | "hugepages-1Gi": "0", 53 | "hugepages-2Mi": "0", 54 | "memory": "32929612Ki", 55 | "pods": "110" 56 | } 57 | { 58 | "name": "node5", 59 | "cpu": "8", 60 | "ephemeral-storage": "10Gi", 61 | "hugepages-1Gi": "0", 62 | "hugepages-2Mi": "0", 63 | "memory": "32929612Ki", 64 | "pods": "110" 65 | } 66 | ``` 67 | 68 | ## Installing OpenSSL 69 | 70 | ``` 71 | [node1 ~]$ yum install openssl 72 | 73 | ``` 74 | 75 | 76 | ``` 77 | $ curl https://raw.githubusercontent.com/helm/helm/master/scripts/get > get_helm.sh 78 | $ chmod 700 get_helm.sh 79 | $ ./get_helm.sh 80 | ``` 81 | 82 | ``` 83 | [node1 ~]$ sh get_helm.sh 84 | Downloading https://kubernetes-helm.storage.googleapis.com/helm-v2.11.0-linux-amd64.tar.gz 85 | Preparing to install helm and tiller into /usr/local/bin 86 | helm installed into /usr/local/bin/helm 87 | tiller installed into /usr/local/bin/tiller 88 | get_helm.sh: line 177: which: command not found 89 | Run 'helm init' to configure helm. 90 | ``` 91 | 92 | ``` 93 | [node1 ~]$ helm init 94 | Creating /root/.helm 95 | Creating /root/.helm/repository 96 | Creating /root/.helm/repository/cache 97 | Creating /root/.helm/repository/local 98 | Creating /root/.helm/plugins 99 | Creating /root/.helm/starters 100 | Creating /root/.helm/cache/archive 101 | Creating /root/.helm/repository/repositories.yaml 102 | Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com 103 | Adding local repo with URL: http://127.0.0.1:8879/charts 104 | $HELM_HOME has been configured at /root/.helm. 105 | 106 | Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster. 107 | 108 | Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy. 109 | To prevent this, run `helm init` with the --tiller-tls-verify flag. 110 | For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation 111 | Happy Helming 112 | ``` 113 | 114 | -------------------------------------------------------------------------------- /architecture/README.md: -------------------------------------------------------------------------------- 1 | # What is Kubernetes? 2 | 3 | Kubernetes (commonly referred to as K8s) is an orchestration engine for container technologies such as Docker and rkt that is taking over the DevOps scene in the last couple of years. It is already available on Azure and Google Cloud as a managed service. 4 | 5 | Kubernetes can speed up the development process by making easy, automated deployments, updates (rolling-update) and by managing our apps and services with almost zero downtime. It also provides self-healing. Kubernetes can detect and restart services when a process crashes inside the container. Kubernetes is originally developed by Google, it is open-sourced since its launch and managed by a large community of contributors. 6 | 7 | Any developer can package up applications and deploy them on Kubernetes with basic Docker knowledge. 8 | 9 | # What is K8s made up of? 10 | 11 | ## Kubectl: 12 | 13 | - A CLI tool for Kubernetes 14 | 15 | ![alt text](https://github.com/ajeetraina/kubernetes101/blob/master/architecture/kubernetes-kubectl.png) 16 | 17 | 18 | 19 | ## Master Node: 20 | 21 | ![alt text](https://github.com/ajeetraina/kubernetes101/blob/master/architecture/kubernetes-kubelet.png) 22 | 23 | - The main machine that controls the nodes 24 | - Main entrypoint for all administrative tasks 25 | - It handles the orchestration of the worker nodes 26 | 27 | ## Worker Node 28 | 29 | ![alt text](https://github.com/ajeetraina/kubernetes101/blob/master/architecture/kubernetes-worker-node.png) 30 | 31 | - It is a worker machine in Kubernetes (used to be known as minion) 32 | - This machine performs the requested tasks. Each Node is controlled by the Master Node 33 | - Runs containers inside pods 34 | - This is where the Docker engine runs and takes care of downloading images and starting containers 35 | 36 | ## Kubelet 37 | 38 | ![alt text](https://github.com/ajeetraina/kubernetes101/blob/master/architecture/kubernetes-kubelet.png) 39 | 40 | - Primary node agent 41 | - Ensures that containers are running and healthy 42 | 43 | ## Kubernetes Pod: 44 | 45 | - A Pod can host multiple containers and storage volumes 46 | - Pods are instances of Deployments (see Deployment) 47 | - One Deployment can have multiple pods 48 | - With Horizontal Pod Autoscaling, Pods of a Deployment can be automatically started and halted based on CPU usage 49 | - Containers within the same pod have access to shared volumes 50 | - Each Pod has its unique IP Address within the cluster 51 | - Pods are up and running until someone (or a controller) destroys them 52 | - Any data saved inside the Pod will disappear without a persistent storage 53 | 54 | ![alt text](https://github.com/ajeetraina/kubernetes101/blob/master/architecture/kubernetes-pod-new.png) 55 | 56 | 57 | ## Deployment: 58 | 59 | - A deployment is a blueprint for the Pods to be create (see Pod) 60 | - Handles update of its respective Pods. 61 | - A deployment will create a Pod by it’s spec from the template. 62 | - Their target is to keep the Pods running and update them (with rolling-update) in a more controlled way. 63 | - Pod(s) resource usage can be specified in the deployment. 64 | - Deployment can scale up replicas of Pods. 65 | - kubernetes-deployment 66 | 67 | ![alt text](https://github.com/ajeetraina/kubernetes101/blob/master/architecture/kubernetes-deployment%20(1).png) 68 | 69 | 70 | ## Secret: 71 | 72 | - A Secret is an object, where we can store sensitive informations like usernames and passwords. 73 | - In the secret files, values are base64 encoded. 74 | - To use a secret, we need to refer to the secret in our Pod. 75 | - Or we can put it inside a volume and mount that to the container. 76 | - Secrets are not encrypted by default. For encryption we need to create an EncryptionConfig. 77 | - You can read more about encryption here 78 | 79 | ## Service: 80 | 81 | - A service is responsible for making our Pods discoverable inside the network or exposing them to the internet 82 | - A Service identifies Pods by its LabelSelector 83 | - There are 3 types of services: 84 | 85 | ## ClusterIP: 86 | - The deployment is only visible inside the cluster 87 | - The deployment gets an internal ClusterIP assigned to it 88 | - Traffic is load balanced between the Pods of the deployment 89 | 90 | ## Node Port: 91 | - The deployment is visible inside the cluster 92 | - The deployment is bound to a port of the Master Node 93 | - Each Node will proxy that port to your Service 94 | - The service is available at http(s)://:/ 95 | - Traffic is load balanced between the Pods of the deployment 96 | 97 | ## Load Balancer: 98 | - The deployment gets a Public IP address assigned 99 | - The service is available at http(s)://:<80||42>/ 100 | - Traffic is load balanced between the Pods of the deployment 101 | 102 | ## Credits: 103 | - https://blog.risingstack.com/what-is-kubernetes-how-to-get-started/ 104 | 105 | -------------------------------------------------------------------------------- /labs/kubernetes-gce-lab/README.md: -------------------------------------------------------------------------------- 1 | # Setting up Kubernetes on Google Cloud Engine 2 | 3 | # Build the Kubernetes Cluster 4 | 5 | In this section, you will find code blocks with commands for you to copy & paste into the Google Cloud Shell (remote terminal window). We have excluded the command prompt itself (the dollar sign `$`) from the snippets. 6 | 7 | **Tips for those who have not used a command line in ages** 8 | 9 | It may take few minutes for a command prompt to respond. However, if it ever gets stuck you can use `Ctrl+C` to interrupt the current command and get back to the command prompt. 10 | 11 | You can use the `Up Arrow` to navigate to previous commands in the command history. 12 | 13 | To copy from the terminal window, simply highlight the desired text and it will be auto-copied. You can then use paste it as usual. 14 | 15 | ## 1. Sign In To Google Cloud 16 | 17 | Navigate to the Google Cloud portal: https://console.cloud.google.com/ 18 | 19 | For this workshop we will use the web-based terminal window, Google Cloud Shell, to avoid differences between operating systems and personal configurations. 20 | 21 | Open the Google Cloud Shell 22 | 23 | ![Cloud Shell](https://image.ibb.co/ccoxLF/cloudshell.png) 24 | 25 | List all the projects on Google Cloud to make sure everything is working and you're in the right place. 26 | 27 | ``` 28 | gcloud projects list 29 | ``` 30 | 31 | If you used Google Cloud before you may have more than one project. Make sure you change to your preferred project. If you never used Google Cloud before you can skip this command. 32 | 33 | ``` 34 | gcloud config set project [PROJECT_ID] 35 | ``` 36 | 37 | Set default zone in Google Cloud for the workshop 38 | 39 | ``` 40 | gcloud config set compute/zone us-east1-c 41 | ``` 42 | 43 | ## 2. Clone the Workshop Git Repository 44 | 45 | Clone the following GitHub repo into the Cloud Shell to get local access to the workshop demo files 46 | 47 | ``` 48 | git clone https://github.com/ajeetraina/hands-on-with-kubernetes-gke 49 | ``` 50 | 51 | If you type `ls` into the command line you should see a new `hands-on-with-kubernetes-gke` directory. 52 | 53 | Change into the workshop directory you just cloned 54 | 55 | ``` 56 | cd hands-on-with-kubernetes-gke 57 | ``` 58 | 59 | ## 3. Provision a Cluster 60 | 61 | Run the following command to create a 3-node Kubernetes cluster in Google Container Engine (GKE). This may take a minute to respond and will run for several minutes while the cluster is provisioned. 62 | 63 | ``` 64 | gcloud container clusters create "k8sworkshop" \ 65 | --zone "us-east1-c" \ 66 | --machine-type "n1-standard-1" \ 67 | --image-type "GCI" --disk-size "100" \ 68 | --scopes cloud-platform \ 69 | --num-nodes "3" 70 | ``` 71 | 72 | When the command is finishing executing, you will see output like this 73 | ![Imgur](http://i.imgur.com/zAMyyez.png) 74 | 75 | You can navigate to the Container Engine section of the Google Cloud Portal https://console.cloud.google.com/kubernetes/list. You should see your newly created Kubernetes cluster listed there. 76 | 77 | You can also navigate to the Compute Engine section to see the three virtual machines that were provisioned to power the new Kubernetes cluster https://console.cloud.google.com/compute/instances 78 | 79 | ## 4. Connect To The Cluster 80 | 81 | Make sure the Kubernetes command-line client (`kubectl`) is up to date 82 | 83 | ``` 84 | gcloud components install kubectl 85 | ``` 86 | 87 | Configure `kubectl` with the workshop cluster context/credentials 88 | 89 | ``` 90 | gcloud container clusters get-credentials k8sworkshop 91 | ``` 92 | 93 | Now verify that `kubectl` can connect to the cluster 94 | 95 | ``` 96 | kubectl cluster-info 97 | ``` 98 | 99 | Since we are in the Cloud Shell (essentially a VM in Google Cloud) we need to modify the Kubernetes Dashboard UI Service to expose it to the Internet 100 | 101 | ``` 102 | kubectl get svc kubernetes-dashboard -n kube-system -o yaml | \ 103 | sed "s/ClusterIP/LoadBalancer/" | \ 104 | kubectl apply -f - -n kube-system 105 | ``` 106 | 107 | Get the IP address of the Dashboard 108 | 109 | ``` 110 | kubectl get svc kubernetes-dashboard -n kube-system 111 | ``` 112 | 113 | The `EXTERNAL-IP` column may show `` for a short while while the Google Load Balancer is configured. You can simply use the `Up Arrow` to show the previous command and run it again until you see an IP address has been allocated. 114 | 115 | Grab the `EXTERNAL-IP` address (sample highlighted below) and paste it into a new browser tab or window (your IP will be different from one shown below) 116 | 117 | ![IP Address](http://i.imgur.com/i1hlPV2.png) 118 | 119 | ## 5. Run "Hello World" 120 | 121 | Deploy a "Hello, World!" application to get something up and running on your new cluster 122 | 123 | ``` 124 | kubectl run hello-world \ 125 | --replicas=5 --labels="run=load-balancer-example" \ 126 | --image=gcr.io/google-samples/node-hello:1.0 \ 127 | --port=8080 128 | ``` 129 | 130 | Navigate to the "Workloads" section in the Kubernetes Dashboard you previously opened and you should see the new Deployment, ReplicaSet and Pods being created 131 | 132 | ![Imgur](http://i.imgur.com/j8oVACv.png) 133 | 134 | ## 6. Tour of Dashboard (the official UI of Kubernetes) 135 | 136 | Instructor will give a tour of the Kubernetes Dashboard and cover the constructs of Kubernetes. 137 | 138 | They will then cover the demo apps found here https://github.com/ajeetraina/hands-on-with-kubernetes-gke/tree/master/docs/demos 139 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes For You 2 | 3 | Are you new to Kubernetes? Want to build your career in Kubernetes? 4 | 5 | Then Welcome ! You are at the right place. 6 | 7 | This repository brings you tutorials that help you get hands-on experience using Kubernetes. Here you will find a mix of labs and tutorials that will help you, no matter if you are a beginner, SysAdmin, IT Pro or Developer. Yes, you read it correct ! Its $0 learning platform. You don't need any infrastructure. Most of the tutorials runs on Play with K8s Platform. This is a free browser based learning platform for you. Kubernetes tools like kubeadm, kompose & kubectl are already installed for you. All you need is to get started. 8 | 9 | We recommend you start with one of our Beginners Guides, and then move to intermediate and expert level tutorials that cover most of the features of Kubernetes. For a comprehensive approach to understanding Docker, I have categorized it as shown below: 10 | 11 | [Kubernetes for Beginners](https://github.com/ajeetraina/docker101/blob/master/play-with-kubernetes/beginners/README.md)
12 | 13 | [Kubernetes for Intermediate](https://github.com/ajeetraina/docker101/blob/master/play-with-kubernetes/intermediate/README.md)
14 | 15 | [Kubernetes for Advanced Users](https://github.com/ajeetraina/docker101/play-with-kubernetes/advanced/README.md)
16 | 17 | ## Getting Started with Kubernetes 18 | 19 | To get started with Kubernetes, follow the below steps: 20 | 21 | - Open https://labs.play-with-k8s.com/ on your browser 22 | 23 | 24 | Click on Add Instances to setup first k8s node cluster 25 | 26 | ## Cloning the Repository 27 | 28 | ``` 29 | git clone https://github.com/ajeetraina/kubernetes101/ 30 | cd kubernetes101/install 31 | 32 | ``` 33 | 34 | ## Bootstrapping the First Node Cluster 35 | 36 | ``` 37 | sh bootstrap.sh 38 | ``` 39 | 40 | ## Adding New K8s Cluster Node 41 | 42 | Click on Add Instances to setup first k8s node cluster 43 | 44 | Wait for 1 minute time till it gets completed. 45 | 46 | Copy the command starting with ```kubeadm join ....```. We will need it to be run on the worker node. 47 | 48 | 49 | ## Setting up Worker Node 50 | 51 | Click on "Add New Instance" and paste the last kubeadm command on this fresh new worker node. 52 | 53 | ``` 54 | [node2 ~]$ kubeadm join --token 4f924f.14eb7618a20d2ece 192.168.0.8:6443 --discovery-token-ca-cert-hash sha256:a5c25aa4573e06a0c11b11df23c8f85c95bae36cbb07d5e7879d9341a3ec67b3``` 55 | ``` 56 | 57 | You will see the below output: 58 | 59 | ``` 60 | [kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters. 61 | [preflight] Skipping pre-flight checks[discovery] Trying to connect to API Server "192.168.0.8:6443" 62 | [discovery] Created cluster-info discovery client, requesting info from "https://192.168.0.8:6443" 63 | [discovery] Requesting info from "https://192.168.0.8:6443" again to validate TLS against the pinned public key 64 | [discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.0.8:6443"[discovery] Successfully established connection with API Server "192.168.0.8:6443" 65 | [bootstrap] Detected server version: v1.8.15 66 | [bootstrap] The server supports the Certificates API (certificates.k8s.io/v1beta1) 67 | Node join complete: 68 | * Certificate signing request sent to master and response 69 | received. 70 | * Kubelet informed of new secure connection details. 71 | 72 | Run 'kubectl get nodes' on the master to see this machine join. 73 | [node2 ~]$ 74 | ``` 75 | 76 | # Verifying Kubernetes Cluster 77 | 78 | Run the below command on master node 79 | 80 | ``` 81 | [node1 ~]$ kubectl get nodes 82 | NAME STATUS ROLES AGE VERSION 83 | node1 Ready master 15m v1.11.3 84 | node2 Ready 1m v1.11.3 85 | [node1 ~]$ 86 | ``` 87 | 88 | ## Adding Worker Nodes 89 | 90 | ``` 91 | [node1 ~]$ kubectl get nodes 92 | NAME STATUS ROLES AGE VERSION 93 | node1 Ready master 18m v1.11.3 94 | node2 Ready 4m v1.11.3 95 | node3 Ready 39s v1.11.3 96 | node4 NotReady 22s v1.11.3 97 | node5 NotReady 4s v1.11.3 98 | [node1 ~]$ 99 | ``` 100 | 101 | ``` 102 | [node1 ]$ kubectl get po 103 | No resources found. 104 | ``` 105 | 106 | ``` 107 | [node1 ]$ kubectl get svc 108 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 109 | kubernetes ClusterIP 10.96.0.1 443/TCP 1h 110 | [node1 istio]$ 111 | ``` 112 | 113 | 114 | # Learn Kubernetes in 10 Days 115 | 116 | [Day-0: What is Kubernetes? What are its components](https://github.com/ajeetraina/kubernetes101/blob/master/architecture/README.md)
117 | [Day-1: Getting Started with Pods, Services & Replicasets](https://github.com/ajeetraina/kubernetes101/blob/master/concept/day-1/getting-started.adoc)
118 | [Day-2: Kubernetes on AWS](https://github.com/ajeetraina/kubernetes-aws-workshop)
119 | [Day-3: Kubernetes on GCP](https://github.com/ajeetraina/kubernetes101/blob/master/labs/kubernetes-gce-lab/README.md)
120 | [Day-4: Kubernetes on Azure](https://github.com/ajeetraina/hands-on-with-kubernetes-azure)
121 | [Day-5: Kubernetes on Vagrant](https://github.com/ajeetraina/vagrant-kubernetes-lab)
122 | [Day-6: Kubernetes & Networking](https://collabnix.github.io/kubelabs/ClusterNetworking101/#Cluster-Networking)
123 | [Day-7: Kubernetes & Network Policy](https://collabnix.github.io/kubelabs/Network_Policies101/)
124 | [Day-8: Kubernetes & Monitoring](https://collabnix.github.io/kubelabs/Monitoring101/#Monitoring-in-Kubernetes)
125 | [Day-9: Kubernetes & Service Catalog](https://collabnix.github.io/kubelabs/ServiceCatalog101/what-is-service-catalog.html)
126 | [Day-10: Kubernetes & RBAC](https://collabnix.github.io/kubelabs/RBAC101/#role-based-access-control-rbac)
127 | [Day-11: Kubernetes & Ingress](https://collabnix.github.io/kubelabs/Ingress101/)
128 | [Day-12: Kubernetes and Jobs](https://collabnix.github.io/kubelabs/Jobs101/#creating-your-first-kubernetes-job)
129 | 130 | # Kubernetes on Collabnix.com 131 | 132 | [5 Minutes to Bootstrap Kubernetes Cluster on GKE using Docker for Mac 18.03.0](http://collabnix.com/bootstrapping-kubernetes-cluster-using-docker-for-mac-18-03-0-ce-edition/)
133 | [Context Switching Made Easy under Kubernetes powered Docker for Mac 18.02.0](http://collabnix.com/namespace-context-toggling-made-easy-under-docker-for-mac-18-02-release/)
134 | [2-minutes to Kubernetes Cluster on Docker for Mac 18.01 using Swarm CLI](http://collabnix.com/running-kubernetes-cluster-on-docker-for-mac-18-01-using-swarm-cli/)
135 | [3 Minutes to Single Node Kubernetes cluster on Docker for Mac Platform](http://collabnix.com/3-minutes-to-single-node-kubernetes-cluster-on-docker-for-mac-platform/)
136 | [When Kubernetes Meet Docker Swarm for the First time under Docker for Mac 17.12 Release](http://collabnix.com/integration-of-docker-swarm-kubernetes-under-docker-for-mac-platform/)
137 | [A First Look at Kubernetes Integrated Docker For Mac Platform](http://collabnix.com/a-first-look-at-kubernetes-integrated-docker-for-mac-platform/)
138 | [When Moby Meet Kubernetes for the first time](http://collabnix.com/when-linuxkit-meet-kubernetes-for-the-first-time/)
139 | [How to Build Kubernetes Cluster using CRI-containerd & Moby](http://collabnix.com/building-multi-node-kubernetes-cluster-using-linuxkit-cri-containerd/)
140 | [Getting Started with Multi-Node Kubernetes Cluster using LinuxKit](http://collabnix.com/getting-started-with-multi-node-kubernetes-cluster-using-linuxkit/) 141 | 142 | 143 | 144 | 145 | -------------------------------------------------------------------------------- /labs/helm/prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Installing Prometheus using Helm on Play with Kubernetes Platform 2 | 3 | ``` 4 | [node1 ~]$ helm search prometheus 5 | NAME CHART VERSION APP VERSION DESCRIPTION 6 | stable/prometheus 7.3.4 2.4.3 Prometheus is a monitoring system and time series database. 7 | stable/prometheus-adapter v0.2.0 v0.2.1 A Helm chart for k8s prometheus adapter 8 | stable/prometheus-blackbox-exporter 0.1.3 0.12.0 Prometheus Blackbox Exporter 9 | stable/prometheus-cloudwatch-exporter 0.2.1 0.5.0 A Helm chart for prometheus cloudwatch-exporter 10 | stable/prometheus-couchdb-exporter 0.1.0 1.0 A Helm chart to export the metrics from couchdb in Promet... 11 | stable/prometheus-mysql-exporter 0.2.1 v0.11.0 A Helm chart for prometheus mysql exporter with cloudsqlp... 12 | stable/prometheus-node-exporter 0.5.0 0.16.0 A Helm chart for prometheus node-exporter 13 | stable/prometheus-operator 0.1.7 0.24.0 Provides easy monitoring definitions for Kubernetes servi... 14 | stable/prometheus-postgres-exporter 0.5.0 0.4.6 A Helm chart for prometheus postgres-exporter 15 | stable/prometheus-pushgateway 0.1.3 0.6.0 A Helm chart for prometheus pushgateway 16 | stable/prometheus-rabbitmq-exporter 0.1.4 v0.28.0 Rabbitmq metrics exporter for prometheus 17 | stable/prometheus-redis-exporter 0.3.2 0.21.1 Prometheus exporter for Redis metrics 18 | stable/prometheus-to-sd 0.1.1 0.2.2 Scrape metrics stored in prometheus format and push them ... 19 | stable/elasticsearch-exporter 0.4.0 1.0.2 Elasticsearch stats exporter for Prometheus 20 | stable/karma 1.1.2 v0.14 A Helm chart for Karma - an UI for Prometheus Alertmanager 21 | stable/stackdriver-exporter 0.0.4 0.5.1 Stackdriver exporter for Prometheus 22 | stable/weave-cloud 0.3.0 1.1.0 Weave Cloud is a add-on to Kubernetes which provides Cont... 23 | stable/kube-state-metrics 0.9.0 1.4.0 Install kube-state-metrics to generate and expose cluster... 24 | stable/mariadb 5.2.2 10.1.36 Fast, reliable, scalable, and easy to use open-source rel... 25 | [node1 ~]$ 26 | ``` 27 | 28 | ## Update the Repo 29 | 30 | ``` 31 | [node1 ~]$ helm repo update 32 | Hang tight while we grab the latest from your chart repositories... 33 | ...Skip local chart repository 34 | ...Successfully got an update from the "stable" chart repository 35 | Update Complete. ⎈ Happy Helming!⎈ 36 | ``` 37 | 38 | ## Installing Prometheus 39 | 40 | ``` 41 | $helm install stable/prometheus 42 | ``` 43 | 44 | Error: 45 | namespaces "default" is forbidden: User "system:serviceaccount:kube-system:default" cannot get namespaces in the namespace "default" 46 | 47 | 48 | ## How to fix? 49 | 50 | ``` 51 | kubectl --namespace kube-system create serviceaccount tiller 52 | kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller 53 | helm init --service-account tiller --upgrade 54 | ``` 55 | 56 | ## Listing Helm 57 | 58 | ``` 59 | [node1 ~]$ helm list 60 | NAME REVISION UPDATED STATUS CHART APP VERSION NAMESPACE 61 | excited-elk 1 Sun Oct 28 10:00:02 2018 DEPLOYED prometheus-7.3.4 2.4.3 default 62 | ``` 63 | 64 | ``` 65 | [node1 ~]$ helm repo update 66 | Hang tight while we grab the latest from your chart repositories... 67 | ...Skip local chart repository 68 | ...Successfully got an update from the "stable" chart repository 69 | Update Complete. ⎈ Happy Helming!⎈ 70 | [node1 ~]$ helm install stable/prometheus 71 | NAME: excited-elk 72 | LAST DEPLOYED: Sun Oct 28 10:00:02 2018 73 | NAMESPACE: default 74 | STATUS: DEPLOYED 75 | 76 | RESOURCES: 77 | ==> v1beta1/DaemonSet 78 | NAME AGE 79 | excited-elk-prometheus-node-exporter 1s 80 | 81 | ==> v1/Pod(related) 82 | 83 | NAME READY STATUS RESTARTS AGE 84 | excited-elk-prometheus-node-exporter-7bjqc 0/1 ContainerCreating 0 1s 85 | excited-elk-prometheus-node-exporter-gbcd7 0/1 ContainerCreating 0 1s 86 | excited-elk-prometheus-node-exporter-tk56q 0/1 ContainerCreating 0 1s 87 | excited-elk-prometheus-node-exporter-tkk9b 0/1 ContainerCreating 0 1s 88 | excited-elk-prometheus-alertmanager-68f4f57c97-wrfjz 0/2 Pending 0 1s 89 | excited-elk-prometheus-kube-state-metrics-858d44dfdc-vt4wj 0/1 ContainerCreating 0 1s 90 | excited-elk-prometheus-pushgateway-58bfd54d6d-m4n69 0/1 ContainerCreating 0 1s 91 | excited-elk-prometheus-server-5958586794-b97xn 0/2 Pending 0 1s 92 | 93 | ==> v1/ConfigMap 94 | 95 | NAME AGE 96 | excited-elk-prometheus-alertmanager 1s 97 | excited-elk-prometheus-server 1s 98 | 99 | ==> v1/ServiceAccount 100 | excited-elk-prometheus-alertmanager 1s 101 | excited-elk-prometheus-kube-state-metrics 1s 102 | excited-elk-prometheus-node-exporter 1s 103 | excited-elk-prometheus-pushgateway 1s 104 | excited-elk-prometheus-server 1s 105 | 106 | ==> v1beta1/ClusterRole 107 | excited-elk-prometheus-kube-state-metrics 1s 108 | excited-elk-prometheus-server 1s 109 | 110 | ==> v1beta1/Deployment 111 | excited-elk-prometheus-alertmanager 1s 112 | excited-elk-prometheus-kube-state-metrics 1s 113 | excited-elk-prometheus-pushgateway 1s 114 | excited-elk-prometheus-server 1s 115 | 116 | ==> v1/PersistentVolumeClaim 117 | excited-elk-prometheus-alertmanager 1s 118 | excited-elk-prometheus-server 1s 119 | 120 | ==> v1beta1/ClusterRoleBinding 121 | excited-elk-prometheus-kube-state-metrics 1s 122 | excited-elk-prometheus-server 1s 123 | 124 | ==> v1/Service 125 | excited-elk-prometheus-alertmanager 1s 126 | excited-elk-prometheus-kube-state-metrics 1s 127 | excited-elk-prometheus-node-exporter 1s 128 | excited-elk-prometheus-pushgateway 1s 129 | excited-elk-prometheus-server 1s 130 | 131 | 132 | NOTES: 133 | The Prometheus server can be accessed via port 80 on the following DNS name from within your cluster: 134 | excited-elk-prometheus-server.default.svc.cluster.local 135 | 136 | 137 | Get the Prometheus server URL by running these commands in the same shell: 138 | export POD_NAME=$(kubectl get pods --namespace default -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}") 139 | kubectl --namespace default port-forward $POD_NAME 9090 140 | 141 | 142 | The Prometheus alertmanager can be accessed via port 80 on the following DNS name from within your cluster: 143 | excited-elk-prometheus-alertmanager.default.svc.cluster.local 144 | 145 | 146 | Get the Alertmanager URL by running these commands in the same shell: 147 | export POD_NAME=$(kubectl get pods --namespace default -l "app=prometheus,component=alertmanager" -o jsonpath="{.items[0].metadata.name}") 148 | kubectl --namespace default port-forward $POD_NAME 9093 149 | 150 | 151 | The Prometheus PushGateway can be accessed via port 9091 on the following DNS name from within your cluster: 152 | excited-elk-prometheus-pushgateway.default.svc.cluster.local 153 | 154 | 155 | Get the PushGateway URL by running these commands in the same shell: 156 | export POD_NAME=$(kubectl get pods --namespace default -l "app=prometheus,component=pushgateway" -o jsonpath="{.items[0].metadata.name}") 157 | kubectl --namespace default port-forward $POD_NAME 9091 158 | 159 | For more information on running Prometheus, visit: 160 | https://prometheus.io/ 161 | ``` 162 | 163 | ``` 164 | [node1 ~]$ kubectl get all 165 | NAME READY STATUS RESTARTS AGE 166 | pod/excited-elk-prometheus-alertmanager-68f4f57c97-wrfjz 0/2 Pending 0 3m 167 | pod/excited-elk-prometheus-kube-state-metrics-858d44dfdc-vt4wj 1/1 Running 0 3m 168 | pod/excited-elk-prometheus-node-exporter-7bjqc 1/1 Running 0 3m 169 | pod/excited-elk-prometheus-node-exporter-gbcd7 1/1 Running 0 3m 170 | pod/excited-elk-prometheus-node-exporter-tk56q 1/1 Running 0 3m 171 | pod/excited-elk-prometheus-node-exporter-tkk9b 1/1 Running 0 3m 172 | pod/excited-elk-prometheus-pushgateway-58bfd54d6d-m4n69 1/1 Running 0 3m 173 | pod/excited-elk-prometheus-server-5958586794-b97xn 0/2 Pending 0 3m 174 | 175 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 176 | service/excited-elk-prometheus-alertmanager ClusterIP 10.106.159.46 80/TCP 3m 177 | service/excited-elk-prometheus-kube-state-metrics ClusterIP None 80/TCP 3m 178 | service/excited-elk-prometheus-node-exporter ClusterIP None 9100/TCP 3m 179 | service/excited-elk-prometheus-pushgateway ClusterIP 10.106.88.15 9091/TCP 3m 180 | service/excited-elk-prometheus-server ClusterIP 10.107.15.64 80/TCP 3m 181 | service/kubernetes ClusterIP 10.96.0.1 443/TCP 37m 182 | 183 | NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE 184 | daemonset.apps/excited-elk-prometheus-node-exporter 4 4 4 4 4 3m 185 | 186 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 187 | deployment.apps/excited-elk-prometheus-alertmanager 1 1 1 0 3m 188 | deployment.apps/excited-elk-prometheus-kube-state-metrics 1 1 1 1 3m 189 | deployment.apps/excited-elk-prometheus-pushgateway 1 1 1 1 3m 190 | deployment.apps/excited-elk-prometheus-server 1 1 1 0 3m 191 | 192 | NAME DESIRED CURRENT READY AGE 193 | replicaset.apps/excited-elk-prometheus-alertmanager-68f4f57c97 1 1 0 3m 194 | replicaset.apps/excited-elk-prometheus-kube-state-metrics-858d44dfdc 1 1 1 3m 195 | replicaset.apps/excited-elk-prometheus-pushgateway-58bfd54d6d 1 1 1 3m 196 | replicaset.apps/excited-elk-prometheus-server-5958586794 1 1 0 3m 197 | [node1 ~]$ 198 | ``` 199 | 200 | 201 | 202 | -------------------------------------------------------------------------------- /concept/day-1/introduction.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## What is Kubernetes? 4 | 5 | Kubernetes (often abbreviated to K8S), is a container orchestration platform for applications that run on containers. 6 | Kubernetes is an open-source system for automating deployment, scaling, and management of containerized applications. 7 | It groups containers that make up an application into logical units for easy management and discovery. 8 | 9 | At a minimum, Kubernetes can schedule and run application containers on clusters of physical or virtual machines. 10 | However, Kubernetes also allows developers to ‘cut the cord’ to physical and virtual machines, moving from a host-centric infrastructure to a container-centric infrastructure, which provides the full advantages and benefits inherent to containers. 11 | Kubernetes provides the infrastructure to build a truly container-centric development environment. 12 | 13 | K8s provides a rich set of features for container grouping, container orchestration, health checking, service discovery, load balancing, horizontal autoscaling, secrets & configuration management, storage orchestration, resource usage monitoring, CLI, and dashboard. 14 | 15 | ## IMPORTANT NOTES: 16 | 17 | - Kubernetes operates at the application level rather than at the hardware level 18 | - Kubernetes is not monolithic, and these default solutions are optional and pluggable. 19 | - Additionally, Kubernetes is not a mere orchestration system. 20 | 21 | In fact, it eliminates the need for orchestration. 22 | The technical definition of orchestration is execution of a defined workflow: first do A, then B, then C. 23 | In contrast, Kubernetes is comprised of a set of independent, composable control processes that continuously drive the 24 | current state towards the provided desired state. 25 | It shouldn’t matter how you get from A to C. 26 | Centralized control is also not required; the approach is more akin to choreography. 27 | This results in a system that is easier to use and more powerful, robust, resilient, and extensible. 28 | 29 | - Kubernetes aims to support an extremely diverse variety of workloads, including stateless, stateful, and 30 | data-processing workloads.If an application can run in a container, it should run great on Kubernetes. 31 | 32 | - Allows users to choose their logging, monitoring, and alerting systems. (It provides some integrations as proof of concept.) 33 | - Kubernetes is designed to serve as a platform for building an ecosystem of components and tools to make it easier to deploy, 34 | scale, and manage applications. 35 | - Kubernetes is not a traditional, all-inclusive PaaS (Platform as a Service) system 36 | - Kubernetes operates on a declarative model, object specifications provided in so called manifest files declare how you want the cluster to look. There’s no need for a list of commands, it’s up to Kubernetes to do anything and everything it needs to get there. 37 | 38 | ## A Look at High Level Kubernetes Architecture 39 | 40 | - There is a kubernetes Client called Kubectl which talks to Kube API which is running on your master node. 41 | - All states and configuration are stored in etcd. The etcd can be run on master or outside the cluster. 42 | - You have nodes where you run workloads on 43 | 44 | # Kubernetes Master: 45 | 46 | This is the Kubernetes control panel or control plane. This is where decisions are made about the cluster, such as scheduling, and detecting/responding to cluster events. The components of the master can be run on any node in the cluster. Below is a breakdown of each of the key components of the master: 47 | 48 | It consists of - 49 | 50 | - Kube-apiserver 51 | - kube-controller-manager 52 | - kube-scheduler 53 | 54 | # Kube-apiserver 55 | 56 | This is the only component of the Kubernetes control panel with a user-accessible API and the sole master component that you’ll interact with. The API server exposes a restful Kubernetes API and consumes JSON manifest files. 57 | 58 | Whenever you run command through kubectl, it always hits the API server which then goes to etcd store. 59 | 60 | Kubernetes uses “etcd.” This is a strong, consistent, and highly-available key value store that Kubernetes uses for persistent storage of all API objects. Think of it as the “source of truth” for the cluster. 61 | 62 | The API server exposes four APIs; Kubernetes API, Extensions API, Autoscaling API, and Batch API. These are used for communicating with the Kubernetes cluster and executing container cluster operations. 63 | 64 | # Kube-Controller Manager 65 | 66 | Also known as the “kube-controller manager,” this runs all the controllers that handle routine tasks in the cluster. These include the Node Controller, Replication Controller, Endpoints Controller, and Service Account and Token Controllers. Each of these controllers works separately to maintain the desired state. 67 | 68 | Kube-controller manager will always look after your cluster and runs new command which you run against the API server 69 | Ensures that any workload you schedule it finds free nodes and then schedule workload on that node. 70 | 71 | Controller manager monitors the current state of the applications deployed on Kubernetes via the API server and makes sure that it meets the desired state. 72 | 73 | # Kube-Scheduler 74 | 75 | The scheduler watches for newly-created pods (groups of one or more containers) and assigns them to nodes. 76 | 77 | Please note that all Kubernetes nodes have kubelet that ensures that any pod assigned to it are running and configured in desired state. 78 | 79 | The Scheduler’s responsibility is to monitor the resource usage of each node and scheduling containers according to resource availability. 80 | 81 | # etcd 82 | 83 | etcd is a key/value store implemented by CoreOS. Kubernetes uses that as the persistence storage of all of its API objects 84 | 85 | ## Kubernetes Worker Nodes 86 | 87 | The second important component under the hood are nodes. Whereas the master handles and manages the cluster, worker nodes run the containers and provide the Kubernetes runtime environment. 88 | 89 | Worker nodes comprise a kubelet. This is the primary node agent. It watches the API server for pods that have been assigned to its node. Kubelet carries out tasks and maintains a reporting backchannel of pod status to the master node. 90 | 91 | Inside each pod there are containers, kubelet runs these via Docker (pulling images, starting and stopping containers, etc.). It also periodically executes any requested container liveness probes. In addition to Docker, RKT is also supported and the community is actively working to support OCI. 92 | 93 | Please note that all Kubernetes nodes have kubelet that ensures that any pod assigned to it are running and configured in desired state. 94 | 95 | 96 | In each Kubernetes node following components are installed: 97 | 98 | # Kubelet - 99 | 100 | Kubelet is the agent that runs on each node. It makes use of the pod specification for creating containers and managing them. 101 | 102 | 103 | # Kube-Proxy 104 | 105 | Another component of worker nodes is kube-proxy. This is the network brain of the node, maintaining network rules on the host and performing connection forwarding. It’s also responsible for load balancing across all pods in the service. 106 | 107 | Kube-proxy runs in each node for load balancing pods. It uses iptable rules for doing simple TCP, UDP stream forwarding or round robin TCP, UDP forwarding. 108 | 109 | 110 | # Important Note: 111 | 112 | A Kubernetes production deployment may need multiple master nodes and a separate etcd cluster for high availability. Kubernetes make use of an overlay network for providing networking capabilities similar to a virtual machine-based environment. It allows container-to-container communication throughout the cluster and will provide unique IP addresses for each container. If such a software defined network (SDN) is not used, the container runtimes in each node will have an isolated network and subsequently the above networking features will not be available. This is one of the key advantages of Kubernetes over other container cluster management solutions, such as Apache Mesos. 113 | 114 | 115 | 116 | 117 | # Pod - Container Grouping 118 | 119 | - Pods are a set of containers on a single docker host 120 | - Each pod is assigned an IP address 121 | - Communication between pods is performed via a proxy, which is the abstraction layer offering the pod’s IP address from outside 122 | - consits of one or more container 123 | - MOst Pods are single container to make it simple 124 | 125 | - a pod is a group of one or more containers (such as Docker containers), with shared storage/network. 126 | - Each pod contains specific information on how the containers should be run. Think of pods as a ring-fenced environment to run containers. 127 | - Pods are also a unit for scaling. If you need to scale an app component up or down, this can be achieved by adding or removing pods. 128 | - It’s possible to run more than one container in a pod (where each share the same IP address and mounted volumes), if they’re tightly coupled. 129 | - Pods are deployed on a single node and have a definite lifecycle. They can be pending, running, succeeding, or failing, but once gone, they are never brought back to life. If a pod dies, a replication controller or other controller must be used to create a new one. 130 | 131 | A pod [2] is a group of containers that share the storage, users, network interfaces, etc. using Linux namespaces (ipc, uts, mount, pid, network and user), cgroups, and other kernel features. This facilitates creating composite applications while preserving the one application per container model. Containers in a pod share an IP address and the port space. They can find each other using localhost and communicate using IPC technologies like SystemV semaphores or POSIX shared memory. A sample composition of a pod would be an application server container running in parallel with a Logstash container monitoring the server logs using the same filesystem. 132 | 133 | 134 | #ReplicaSets(formerly called Replication Controller) 135 | 136 | - A replication controller is a logical entity that creates and manages pods. 137 | - It uses a pod template for defining the container image identifiers, ports, and labels. 138 | - Replication controllers auto heal pods according to the given health checks. 139 | - These health checks are called liveness probes. 140 | - Replication controllers support manual scaling of pods, and this is handled by the replica count. 141 | 142 | 143 | 144 | # Services: 145 | 146 | - Helps us in finding out more pods. 147 | - You don't need to go to Pods using IP address, instead you go to service, and service route to specifc Pod. 148 | Services are more stable, pods keeping changing 149 | - An object that describes a set of pods that provide a useful service. 150 | - Services are typically used to define clusters of uniform pods. 151 | 152 | # Volume 153 | - Allow to maintain state in the cluster 154 | - keep any info we want 155 | - A Kubernetes abstraction for persistent storage. Kubernetes supports many types of volumes, such as NFS, Ceph, GlusterFS, local directory, etc. 156 | 157 | # namespace 158 | - seperating different workload from each other 159 | - You can have 10 developers, give them each namespace...each namespace which shoudnt consume mroe than 2GB RAM. 160 | - This is a tool used to group, separate, and isolate groups of objects. Namespaces are used for access control, network access control, resource management, and quoting 161 | 162 | # Ingress rules  163 | — These specify how incoming network traffic should be routed to services and pods. 164 | 165 | 166 | # Network policies  167 | 168 | — This defines the network access rules between pods inside the cluster. 169 | 170 | # ConfigMaps and Secrets  171 | 172 | — Used to separate configuration information from application definition. 173 | 174 | # Controllers  175 | 176 | — These implement different policies for automatic pod management. There are three main types: 177 | 178 | 1. Deployment — Responsible for maintaining a set of running pods of the same type. 179 | 180 | 2. DaemonSet — Runs a specific type of pod on each node based on a condition. 181 | 182 | 3. StatefulSet — Used when several pods of the same type are needed to run in parallel, but each of the pods is required to have a specific identity. 183 | 184 | Advance Features: 185 | 186 | 187 | 188 | # ReplicataSet 189 | 190 | - Both allows to ensure when you launch pods, I want 5 pods in a cluster, it will ensure we hae 5 pods in a cluster 191 | - Superseded Replication Controller 192 | - More Regex way of selecting the Pods 193 | 194 | 195 | 196 | # Deployments: 197 | 198 | - Combines ReplicataSets 199 | - Roll out new images and ROll back Images 200 | - Nice Continuous Deployment scenarios 201 | 202 | # Statefulset: 203 | 204 | - Databases in a cluster 205 | 206 | # DaemonSets: 207 | 208 | - Ensure that there is atleast 1 Pod running in a cluster 209 | 210 | # Jobs 211 | 212 | - Cron jobs or one of job you want to run 213 | - Batch Processing..it finishes up.. 214 | 215 | # What is Kubectl? 216 | 217 | kubectl is the client talking to a REST API, which in turn talks to the kublet info service, which in turn talks to the pods via local kublet agents. 218 | 219 | # Health Checking 220 | 221 | In reality, software applications fail due to many reasons; undiscovered bugs in the code, resource limitations, networking issues, infrastructure problems, etc. Therefore, monitoring software application deployments is essential. Kubernetes provides two main mechanisms for monitoring applications. This is done via the Kubelet agent: 222 | 223 | 1. Process Health Checking: Kubelet continuously checks the health of the containers via the Docker daemon. If a container process is not responding, it will get restarted. This feature is enabled by default and it’s not customizable. 224 | 225 | 2. Application Health Checking: Kubernetes provides three methods for monitoring the application health, and these are known as health checking probes: 226 | 227 | HTTP GET: If the application exposes an HTTP endpoint, an HTTP GET request can be used for checking the health status. The HTTP endpoint needs to return a HTTP status code between 200 and 399, for the application to be considered healthy. 228 | 229 | Container Exec: If not, a shell command can be used for this purpose. This command needs to return a zero to application to be considered healthy. 230 | 231 | TCP Socket: If none of the above works, a simple TCP socket can also be used for checking the health status. If Kubelet can establish a connection to the given socket, the application is considered healthy. 232 | 233 | # Service Discovery and Load Balancing 234 | 235 | - A Kubernetes service provides a mechanism for load balancing pods. 236 | - It is implemented using kube-proxy and internally uses iptable rules for load balancing at the network layer. 237 | - Each Kubernetes service exposes a DNS entry via Sky DNS for accessing the services within the Kubernetes internal network. 238 | - A Kubernetes service can be implemented as one of the following types: 239 | 240 | ClusterIP: This type will make the service only visible to the internal network for routing internal traffic. 241 | 242 | NodeIP: This type will expose the service via node ports to the external network. 243 | Each port in a service will be mapped to a node port and those will be accessible via :. 244 | 245 | Load Balancer: If services need to be exposed via a dynamic load balancer the service type can be set to Load Balancer. This feature is enabled by the underlying cloud provider (example: GCE). 246 | 247 | # Automated Rollouts and Rollbacks 248 | 249 | This is one of the distinguishing features of Kubernetes that allows users to do a rollout of a new application version without a service outage. Once an application is deployed using a replication controller, a rolling update can be triggered by packaging the new version of the application to a new container image. The rolling update process will create a new replication controller and rollout one pod at a time using the new replication controller created. The time interval between a pod replacement can be configured. Once all the pods are replaced the existing replication controller will be removed. 250 | A kubectl CLI command can be executed for updating an existing WSO2 ESB deployment via a rolling update. The following example updates an ESB cluster created using Docker image wso2esb:4.9.0-v1 to wso2esb:4.9.0-v2: 251 | 252 | ``` 253 | $ kubectl rolling-update my-wso2esb — image=wso2esb:4.9.0-v2 254 | ``` 255 | 256 | Similarly, an application update done via a rolling update can be rolled back if 257 | 258 | Similarly, an application update done via a rolling update can be rolled back if needed. The following sample command would rollback wso2esb:4.9.0-v2 to wso2esb:4.9.0-v1 assuming that its previous state was 4.9.0-v1: 259 | 260 | ``` 261 | $ kubectl rolling-update my-wso2esb — rollback 262 | ``` 263 | 264 | # Horizontal Autoscaling 265 | 266 | Horizontal Pod Autoscalers provide autoscaling capabilities for pods. It does this by monitoring health statistics sent by the cAdvisor. A cAdvisor instance runs in each node and provides information on CPU, memory, and disk usage of containers. These statistics get aggregated by Heapster and get accessible via the Kubernetes API server. Currently, horizontal autoscaling is only available based on CPU usage, and an initiative is in progress to support custom metrics. 267 | 268 | # Secret and Configuration Management 269 | 270 | Applications that run on pods may need to contain passwords, keys, and other sensitive information. Packaging them with the container image may lead to security threats. Technically, anyone who gets access to the container image will be able to see all of the above. Kubernetes provides a much more secure mechanism to send this sensitive information to the pods at the container startup without packaging them in the container image. These entries are called secrets. For example, a secret can be created via the secret API for storing a database password of a web application. Then the secret name can be given in the replication controller to let the pods access the actual value of the secret at the container startup. 271 | Kubernetes uses the same method for sending the token needed for accessing the Kubernetes API server to the pods. Similarly, Kubernetes supports sending configuration parameters to the pods via ConfigMap API. Both secrets and config key/value pairs can be accessed inside the container either using a virtual volume mount or using environment variables. 272 | 273 | # Storage Orchestration 274 | 275 | Docker supports mounting storage systems to containers using container host storage or network storage systems [11]. Kubernetes provides the same functionality via the Kubernetes API and supports NFS, iSCSI, Gluster, Ceph, Cinder, or Flocker. 276 | 277 | # Providing Well Known Ports for Kubernetes Services 278 | 279 | - Kubernetes provides a mechanism for adding a proxy server for Kubernetes services. This feature is known as Ingress [3]. 280 | - The main advantage of this is the ability to expose Kubernetes services via well-known ports, such as 80, 443. An ingress controller listens to Kubernetes API, generates a proxy configuration in runtime whenever a service is changed, and reloads the Nginx configuration. 281 | - It can expose any given port via a Docker host port. Clients can send requests to one of the Kubernetes node IPs, Nginx port and those will get redirected to the relevant service. 282 | - The service will do round robin load balancing in the network layer. 283 | The service can be identified using an URL context or hostname; 284 | https://node-ip/foo/, https://foo.bar.com/ 285 | 286 | # Sticky Session Management Using Service Load Balancers 287 | 288 | Similar to ingress controllers, Kubernetes provides another mechanism for load balancing pods using third-party load balancers. These are known as service load balancers. Unlike ingress, service load balancers don’t route requests to services, rather they are dispatched directly to the pods. The main advantage of this feature is the ability to provide sticky session management at the load balancer 289 | 290 | # Resource Usage Monitoring 291 | 292 | Kubernetes uses cAdvisor [5] for monitoring containers in each node. It provides information on CPU usage, memory consumption, disk usage, network statistics, etc. A component called Heapster [6] aggregates above data and makes them available via Kubernetes API. Optionally, data can be written to a data store and visualized via a UI. InfluxDB, Grafana and Kube-UI can be used for this purpose [7]. 293 | 294 | # Kubernetes Dashboard 295 | 296 | Kubernetes dashboard provides features for deploying and monitoring applications. Any server cluster can be deployed by specifying a Docker image ID and required service ports. Once deployed, server logs can be viewed via the same UI. 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | -------------------------------------------------------------------------------- /concept/day-1/getting-started.adoc: -------------------------------------------------------------------------------- 1 | = Kubernetes Developer Concepts = 2 | :toc: 3 | :imagesdir: ../images 4 | 5 | Kubernetes has a number of abstractions that map to API objects. These Kubernetes API Objects can be used to describe your cluster's desired state which will include info such as applications and workloads running, replicas, container images, networking resources and more. This section explains the key concepts relevant from an application developer perspecitve. 6 | 7 | 8 | 9 | 10 | == Pod 11 | 12 | A Pod is the smallest deployable unit that can be created, scheduled, and managed. It’s a logical collection of containers that belong to an application. Pods are created in a namespace. All containers in a pod share the namespace, volumes and networking stack. This allows containers in the pod to "`find`" each other and communicate using `localhost`. 13 | 14 | === Create a Pod 15 | 16 | Each resource in Kubernetes can be defined using a configuration file. For example, an NGINX pod can be defined with configuration file shown in below: 17 | 18 | apiVersion: v1 19 | kind: Pod 20 | metadata: 21 | name: nginx-pod 22 | labels: 23 | name: nginx-pod 24 | spec: 25 | containers: 26 | - name: nginx 27 | image: nginx:latest 28 | ports: 29 | - containerPort: 80 30 | 31 | Create the pod as shown below: 32 | 33 | $ kubectl create -f templates/pod.yaml 34 | pod "nginx-pod" created 35 | 36 | Get the list of pod: 37 | 38 | $ kubectl get pods 39 | NAME READY STATUS RESTARTS AGE 40 | nginx-pod 1/1 Running 0 22s 41 | 42 | Verify that the pod came up fine: 43 | 44 | kubectl -n default port-forward $(kubectl -n default get pod -l name=nginx-pod -o jsonpath='{.items[0].metadata.name}') 8080:80 & open http://localhost:8080/ 45 | 46 | This opens up a browser window and shows the NGINX main page: 47 | 48 | image::nginx-pod-default-page.png[] 49 | 50 | If the containers in the pod generate logs, then they can be seen using the command shown: 51 | 52 | $ kubectl logs nginx-pod 53 | 127.0.0.1 - - [03/Nov/2017:17:33:30 +0000] "GET / HTTP/1.1" 200 612 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36" "-" 54 | 127.0.0.1 - - [03/Nov/2017:17:33:32 +0000] "GET /favicon.ico HTTP/1.1" 404 571 "http://localhost:8080/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36" "-" 55 | 2017/11/03 17:33:32 [error] 5#5: *2 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 127.0.0.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "localhost:8080", referrer: "http://localhost:8080/" 56 | 57 | === Delete a Pod 58 | 59 | Delete the pod as shown below: 60 | 61 | $ kubectl delete -f templates/pod.yaml 62 | 63 | == Deployment 64 | 65 | A "`desired state`", such as 4 replicas of a pod, can be described in a Deployment object. The Deployment controller in Kubernetes cluster then ensures the desired and the actual state are matching. Deployment ensures the recreation of a pod when the worker node fails or reboots. If a pod dies, then a new pod is started to ensure the desired vs actual matches. It also allows both up- and down-scaling the number of replicas. This is achieved using ReplicaSet. The Deployment manages the ReplicaSets and provides updates to those pods. 66 | 67 | === Create a Deployment 68 | 69 | The folowing example will create a Deployment with 3 replicas of NGINX base image. Let's begin with the template: 70 | 71 | apiVersion: extensions/v1beta1 72 | kind: Deployment # kubernetes object type 73 | metadata: 74 | name: nginx-deployment # deployment name 75 | spec: 76 | replicas: 3 # number of replicas 77 | template: 78 | metadata: 79 | labels: 80 | app: nginx # pod labels 81 | spec: 82 | containers: 83 | - name: nginx # container name 84 | image: nginx:1.12.1 # nginx image 85 | imagePullPolicy: IfNotPresent # if exists, will not pull new image 86 | ports: # container and host port assignments 87 | - containerPort: 80 88 | - containerPort: 443 89 | 90 | This deployment will create 3 instances of NGINX image. 91 | 92 | Run the following command to create Deployment: 93 | 94 | $ kubectl create -f templates/deployment.yaml --record 95 | deployment "nginx-deployment" created 96 | 97 | The `--record` flag will track changes made through each revision. 98 | 99 | To monitor deployment rollout status: 100 | 101 | $ kubectl rollout status deployment/nginx-deployment 102 | deployment "nginx-deployment" successfully rolled out 103 | 104 | A Deployment creates a ReplicaSet to manage the number of replicas. Let's take a look at existing deployments and replica set. 105 | 106 | Get the deployments: 107 | 108 | $ kubectl get deployments 109 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 110 | nginx-deployment 3 3 3 3 25s 111 | 112 | Get the replica set for the deployment: 113 | 114 | $ kubectl get replicaset 115 | NAME DESIRED CURRENT READY AGE 116 | nginx-deployment-3441592026 3 3 3 1m 117 | 118 | Get the list of running pods: 119 | 120 | $ kubectl get pods 121 | NAME READY STATUS RESTARTS AGE 122 | nginx-deployment-3441592026-ddpf0 1/1 Running 0 2m 123 | nginx-deployment-3441592026-kkp8h 1/1 Running 0 2m 124 | nginx-deployment-3441592026-lx304 1/1 Running 0 2m 125 | 126 | === Scaling a Deployment 127 | 128 | Number of replicas for a Deployment can be scaled using the following command: 129 | 130 | $ kubectl scale --replicas=5 deployment/nginx-deployment 131 | deployment "nginx-deployment" scaled 132 | 133 | Verify the deployment: 134 | 135 | $ kubectl get deployments 136 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 137 | nginx-deployment 5 5 5 5 2m 138 | 139 | Verify the pods in the deployment: 140 | 141 | $ kubectl get pods 142 | NAME READY STATUS RESTARTS AGE 143 | nginx-deployment-3441592026-36957 1/1 Running 0 44s 144 | nginx-deployment-3441592026-8wch5 1/1 Running 0 44s 145 | nginx-deployment-3441592026-ddpf0 1/1 Running 0 3m 146 | nginx-deployment-3441592026-kkp8h 1/1 Running 0 3m 147 | nginx-deployment-3441592026-lx304 1/1 Running 0 3m 148 | 149 | === Update a Deployment 150 | 151 | A more general update to Deployment can be made by making edits to the pod spec. In this example, let's change to the latest nginx image. 152 | 153 | First, type the following to open up a text editor: 154 | 155 | $ kubectl edit deployment/nginx-deployment 156 | 157 | Next, change the image from `nginx:1.12.1` to `nginx:latest`. 158 | 159 | This should perform a rolling update of the deployment. To track the deployment details such as revision, image version, and ports - type in the following: 160 | 161 | ``` 162 | $ kubectl describe deployments 163 | Name: nginx-deployment 164 | Namespace: default 165 | CreationTimestamp: Mon, 23 Oct 2017 09:14:36 -0400 166 | Labels: app=nginx 167 | Annotations: deployment.kubernetes.io/revision=2 168 | kubernetes.io/change-cause=kubectl edit deployment/nginx-deployment 169 | Selector: app=nginx 170 | Replicas: 5 desired | 5 updated | 5 total | 5 available | 0 unavailable 171 | StrategyType: RollingUpdate 172 | MinReadySeconds: 0 173 | RollingUpdateStrategy: 1 max unavailable, 1 max surge 174 | Pod Template: 175 | Labels: app=nginx 176 | Containers: 177 | nginx: 178 | Image: nginx:latest 179 | Ports: 80/TCP, 443/TCP 180 | Environment: 181 | Mounts: 182 | Volumes: 183 | Conditions: 184 | Type Status Reason 185 | ---- ------ ------ 186 | Available True MinimumReplicasAvailable 187 | OldReplicaSets: 188 | NewReplicaSet: nginx-deployment-886641336 (5/5 replicas created) 189 | Events: 190 | Type Reason Age From Message 191 | ---- ------ ---- ---- ------- 192 | Normal ScalingReplicaSet 4m deployment-controller Scaled up replica set nginx-deployment-3441592026 to 3 193 | Normal ScalingReplicaSet 1m deployment-controller Scaled up replica set nginx-deployment-3441592026 to 5 194 | Normal ScalingReplicaSet 32s deployment-controller Scaled up replica set nginx-deployment-886641336 to 1 195 | Normal ScalingReplicaSet 32s deployment-controller Scaled down replica set nginx-deployment-3441592026 to 4 196 | Normal ScalingReplicaSet 32s deployment-controller Scaled up replica set nginx-deployment-886641336 to 2 197 | Normal ScalingReplicaSet 29s deployment-controller Scaled down replica set nginx-deployment-3441592026 to 3 198 | Normal ScalingReplicaSet 29s deployment-controller Scaled up replica set nginx-deployment-886641336 to 3 199 | Normal ScalingReplicaSet 28s deployment-controller Scaled down replica set nginx-deployment-3441592026 to 2 200 | Normal ScalingReplicaSet 28s deployment-controller Scaled up replica set nginx-deployment-886641336 to 4 201 | Normal ScalingReplicaSet 25s (x3 over 26s) deployment-controller (combined from similar events): Scaled down replica set nginx-deployment-3441592026 to 0 202 | ``` 203 | 204 | === Rollback a Deployment 205 | 206 | To rollback to a previous version, first check the revision history: 207 | 208 | $ kubectl rollout history deployment/nginx-deployment 209 | deployments "nginx-deployment" 210 | REVISION CHANGE-CAUSE 211 | 1 kubectl scale deployment/nginx-deployment --replicas=5 212 | 2 kubectl edit deployment/nginx-deployment 213 | 214 | If you only want to rollback to the previous revision, enter the following command: 215 | 216 | $ kubectl rollout undo deployment/nginx-deployment 217 | deployment "nginx-deployment" rolled back 218 | 219 | In our case, the deployment will rollback to use the `nginx:1.12.1` image. Check the image name: 220 | 221 | $ kubectl describe deployments | grep Image 222 | Image: nginx:1.12.1 223 | 224 | If rolling back to a specific revision then enter: 225 | 226 | $ kubectl rollout undo deployment/nginx-deployment --to-revision= 227 | 228 | === Delete a Deployment 229 | 230 | Run the following command to delete the Deployment: 231 | 232 | $ kubectl delete -f templates/deployment.yaml 233 | deployment "nginx-deployment" deleted 234 | 235 | == Service 236 | 237 | A pod is ephemeral. Each pod is assigned a unique IP address. If a pod that belongs to a replication controller dies, then it is recreated and may be given a different IP address. Further, additional pods may be created using Deployment or Replica Set. This makes it difficult for an application server, such as WildFly, to access a database, such as MySQL, using its IP address. 238 | 239 | A Service is an abstraction that defines a logical set of pods and a policy by which to access them. The IP address assigned to a service does not change over time, and thus can be relied upon by other pods. Typically, the pods belonging to a service are defined by a label selector. This is similar mechanism to how pods belong to a replica set. 240 | 241 | This abstraction of selecting pods using labels enables a loose coupling. The number of pods in the deployment may scale up or down but the application server can continue to access the database using the service. 242 | 243 | A Kubernetes service defines a logical set of pods and enables them to be accessed through microservices. 244 | 245 | === Create a Deployment for Service 246 | 247 | Pods belong to a service by using a loosely-coupled model where labels are attached to a pod and a service picks the pods by using those labels. 248 | 249 | Let's create a Deployment first that will create 3 replicas of a pod: 250 | 251 | apiVersion: extensions/v1beta1 252 | kind: Deployment 253 | metadata: 254 | name: echo-deployment 255 | spec: 256 | replicas: 3 257 | template: 258 | metadata: 259 | labels: 260 | app: echo-pod 261 | spec: 262 | containers: 263 | - name: echoheaders 264 | image: gcr.io/google_containers/echoserver:1.4 265 | imagePullPolicy: IfNotPresent 266 | ports: 267 | - containerPort: 8080 268 | 269 | 270 | This example creates an echo app that responds with HTTP headers from an Elastic Load Balancer. 271 | 272 | Type the following to create the deployment: 273 | 274 | $ kubectl create -f templates/echo-deployment.yaml --record 275 | 276 | Use the `kubectl describe deployment` command to confirm `echo-app` has been deployed: 277 | 278 | ``` 279 | $ kubectl describe deployment 280 | Name: echo-deployment 281 | Namespace: default 282 | CreationTimestamp: Mon, 23 Oct 2017 10:07:47 -0400 283 | Labels: app=echo-pod 284 | Annotations: deployment.kubernetes.io/revision=1 285 | kubernetes.io/change-cause=kubectl create --filename=templates/echo-deployment.yaml --record=true 286 | Selector: app=echo-pod 287 | Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable 288 | StrategyType: RollingUpdate 289 | MinReadySeconds: 0 290 | RollingUpdateStrategy: 1 max unavailable, 1 max surge 291 | Pod Template: 292 | Labels: app=echo-pod 293 | Containers: 294 | echoheaders: 295 | Image: gcr.io/google_containers/echoserver:1.4 296 | Port: 8080/TCP 297 | Environment: 298 | Mounts: 299 | Volumes: 300 | Conditions: 301 | Type Status Reason 302 | ---- ------ ------ 303 | Available True MinimumReplicasAvailable 304 | OldReplicaSets: 305 | NewReplicaSet: echo-deployment-3396249933 (3/3 replicas created) 306 | Events: 307 | Type Reason Age From Message 308 | ---- ------ ---- ---- ------- 309 | Normal ScalingReplicaSet 10s deployment-controller Scaled up replica set echo-deployment-3396249933 to 3 310 | ``` 311 | 312 | Get the list of pods: 313 | 314 | ``` 315 | $ kubectl get pods 316 | NAME READY STATUS RESTARTS AGE 317 | echo-deployment-3396249933-8slzp 1/1 Running 0 1m 318 | echo-deployment-3396249933-bjwqj 1/1 Running 0 1m 319 | echo-deployment-3396249933-r05nr 1/1 Running 0 1m 320 | ``` 321 | 322 | Check the label for a pod: 323 | 324 | ``` 325 | $ kubectl describe pods/echo-deployment-3396249933-8slzp | grep Label 326 | Labels: app=echo-pod 327 | ``` 328 | 329 | Each pod in this deployment has `app=echo-pod` label attached to it. 330 | 331 | === Create a Service 332 | 333 | In the following example, we create a service `echo-service`: 334 | 335 | apiVersion: v1 336 | kind: Service 337 | metadata: 338 | name: echo-service 339 | spec: 340 | selector: 341 | app: echo-pod 342 | ports: 343 | - name: http 344 | protocol: TCP 345 | port: 80 346 | targetPort: 8080 347 | type: LoadBalancer 348 | 349 | The set of pods targeted by the service are determined by the label `app: echo-pod` attached to them. It also defines an inbound port 80 to the target port of 8080 on the container. 350 | 351 | Kubernetes supports both TCP and UDP protocols. 352 | 353 | === Publish a Service 354 | 355 | A service can be published to an external IP using the `type` attribute. This attribute can take one of the following values: 356 | 357 | . `ClusterIP`: Service exposed on an IP address inside the cluster. This is the default behavior. 358 | . `NodePort`: Service exposed on each Node's IP address at a defined port. 359 | . `LoadBalancer`: If deployed in the cloud, exposed externally using a cloud-specific load balancer. 360 | . `ExternalName`: Service is attached to the `externalName` field. It is mapped to a CNAME with the value. 361 | 362 | Let's publish the service load balancer and expose your services, add a `type` field of `LoadBalancer`. 363 | 364 | This template will expose `echo-app` service on an Elastic Load Balancer (ELB): 365 | 366 | apiVersion: v1 367 | kind: Service 368 | metadata: 369 | name: echo-service 370 | spec: 371 | selector: 372 | app: echo-pod 373 | ports: 374 | - name: http 375 | protocol: TCP 376 | port: 80 377 | targetPort: 8080 378 | type: LoadBalancer 379 | 380 | Run the following command to create the service: 381 | 382 | $ kubectl create -f templates/service.yaml --record 383 | 384 | Get more details about the service: 385 | 386 | ``` 387 | $ kubectl get svc 388 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 389 | echo-service LoadBalancer 100.66.161.199 ad0b47976b7fe... 80:30125/TCP 40s 390 | kubernetes ClusterIP 100.64.0.1 443/TCP 1h 391 | $ kubectl describe service echo-service 392 | Name: echo-service 393 | Namespace: default 394 | Labels: 395 | Annotations: kubernetes.io/change-cause=kubectl create --filename=templates/service.yaml --record=true 396 | Selector: app=echo-pod 397 | Type: LoadBalancer 398 | IP: 100.66.161.199 399 | LoadBalancer Ingress: ad0b47976b7fe11e7a8870e55a29a6a9-1770422890.us-east-1.elb.amazonaws.com 400 | Port: http 80/TCP 401 | TargetPort: 8080/TCP 402 | NodePort: http 30125/TCP 403 | Endpoints: 100.96.3.8:8080,100.96.4.9:8080,100.96.5.9:8080 404 | Session Affinity: None 405 | External Traffic Policy: Cluster 406 | Events: 407 | Type Reason Age From Message 408 | ---- ------ ---- ---- ------- 409 | Normal CreatingLoadBalancer 58s service-controller Creating load balancer 410 | Normal CreatedLoadBalancer 56s service-controller Created load balancer 411 | ``` 412 | 413 | The output shows `LoadBalancer Ingress` as the addres of an Elastic Load Balancer (ELB). It takes about 2-3 minutes for the ELB to be provisioned and be available. Wait for a couple of minutes, and then access the service: 414 | 415 | ``` 416 | $ curl http://ad0b47976b7fe11e7a8870e55a29a6a9-1770422890.us-east-1.elb.amazonaws.com 417 | CLIENT VALUES: 418 | client_address=172.20.45.253 419 | command=GET 420 | real path=/ 421 | query=nil 422 | request_version=1.1 423 | request_uri=http://ad0b47976b7fe11e7a8870e55a29a6a9-1770422890.us-east-1.elb.amazonaws.com:8080/ 424 | 425 | SERVER VALUES: 426 | server_version=nginx: 1.10.0 - lua: 10001 427 | 428 | HEADERS RECEIVED: 429 | accept=*/* 430 | host=ad0b47976b7fe11e7a8870e55a29a6a9-1770422890.us-east-1.elb.amazonaws.com 431 | user-agent=curl/7.51.0 432 | BODY: 433 | -no body in request- 434 | ``` 435 | 436 | Note the `client_address` value shown in the output. This is the IP address of the pod serving the request. Multiple invocations of this command will show different values for this attribute. 437 | 438 | Now, the number of pods in the deployment can be scaled up and down. Or the pods may terminate and restart on a different host. But the service will still be able to target those pods because of the labels attached to the pod and used by the service. 439 | 440 | === Delete a Service 441 | 442 | Run the following command to delete the Service: 443 | 444 | $ kubectl delete -f templates/service.yaml 445 | 446 | The backend Deployment needs to be explicitly deleted as well: 447 | 448 | $ kubectl delete -f templates/echo-deployment.yaml 449 | 450 | == Daemon Set 451 | 452 | Daemon Set ensure that a copy of the pod runs on a selected set of nodes. By default, all nodes in the cluster are selected. A selection critieria may be specified to select a limited number of nodes. 453 | 454 | As new nodes are added to the cluster, pods are started on them. As nodes are removed, pods are removed through garbage collection. 455 | 456 | === Create a DaemonSet 457 | 458 | The folowing is an example DaemonSet that runs a Prometheus container. Let's begin with the template: 459 | 460 | apiVersion: extensions/v1beta1 461 | kind: DaemonSet 462 | metadata: 463 | name: prometheus-daemonset 464 | spec: 465 | template: 466 | metadata: 467 | labels: 468 | tier: monitoring 469 | name: prometheus-exporter 470 | spec: 471 | containers: 472 | - name: prometheus 473 | image: prom/node-exporter 474 | ports: 475 | - containerPort: 80 476 | 477 | Run the following command to create the ReplicaSet and pods: 478 | 479 | $ kubectl create -f templates/daemonset.yaml --record 480 | 481 | The `--record` flag will track changes made through each revision. 482 | 483 | Get basic details about the DaemonSet: 484 | 485 | $ kubectl get daemonsets/prometheus-daemonset 486 | NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE 487 | prometheus-daemonset 5 5 5 5 5 7s 488 | 489 | Get more details about the DaemonSet: 490 | 491 | ``` 492 | $ kubectl describe daemonset/prometheus-daemonset 493 | Name: prometheus-daemonset 494 | Selector: name=prometheus-exporter,tier=monitoring 495 | Node-Selector: 496 | Labels: name=prometheus-exporter 497 | tier=monitoring 498 | Annotations: kubernetes.io/change-cause=kubectl create --filename=templates/daemonset.yaml --record=true 499 | Desired Number of Nodes Scheduled: 5 500 | Current Number of Nodes Scheduled: 5 501 | Number of Nodes Scheduled with Up-to-date Pods: 5 502 | Number of Nodes Scheduled with Available Pods: 5 503 | Number of Nodes Misscheduled: 0 504 | Pods Status: 5 Running / 0 Waiting / 0 Succeeded / 0 Failed 505 | Pod Template: 506 | Labels: name=prometheus-exporter 507 | tier=monitoring 508 | Containers: 509 | prometheus: 510 | Image: prom/node-exporter 511 | Port: 80/TCP 512 | Environment: 513 | Mounts: 514 | Volumes: 515 | Events: 516 | Type Reason Age From Message 517 | ---- ------ ---- ---- ------- 518 | Normal SuccessfulCreate 28s daemon-set Created pod: prometheus-daemonset-pzfl8 519 | Normal SuccessfulCreate 28s daemon-set Created pod: prometheus-daemonset-sjcgh 520 | Normal SuccessfulCreate 28s daemon-set Created pod: prometheus-daemonset-ctrg4 521 | Normal SuccessfulCreate 28s daemon-set Created pod: prometheus-daemonset-rxg79 522 | Normal SuccessfulCreate 28s daemon-set Created pod: prometheus-daemonset-cnbkh 523 | ``` 524 | 525 | Get pods in the DaemonSet: 526 | 527 | ``` 528 | $ kubectl get pods -lname=prometheus-exporter 529 | NAME READY STATUS RESTARTS AGE 530 | prometheus-daemonset-cnbkh 1/1 Running 0 57s 531 | prometheus-daemonset-ctrg4 1/1 Running 0 57s 532 | prometheus-daemonset-pzfl8 1/1 Running 0 57s 533 | prometheus-daemonset-rxg79 1/1 Running 0 57s 534 | prometheus-daemonset-sjcgh 1/1 Running 0 57s 535 | ``` 536 | 537 | === Limit DaemonSets to specific nodes 538 | 539 | Verify that the Prometheus pod was successfully deployed to the cluster nodes: 540 | 541 | kubectl get pods -o wide 542 | 543 | The output should look as shown: 544 | 545 | $ kubectl get pods -o wide 546 | NAME READY STATUS RESTARTS AGE IP NODE 547 | prometheus-daemonset-sjcgh 1/1 Running 0 1m 100.96.7.10 ip-172-20-52-200.ec2.internal 548 | prometheus-daemonset-cnbkh 1/1 Running 0 1m 100.96.3.10 ip-172-20-57-5.ec2.internal 549 | prometheus-daemonset-ctrg4 1/1 Running 0 1m 100.96.6.10 ip-172-20-64-152.ec2.internal 550 | prometheus-daemonset-pzfl8 1/1 Running 0 1m 100.96.5.10 ip-172-20-125-181.ec2.internal 551 | prometheus-daemonset-rxg79 1/1 Running 0 1m 100.96.4.9 ip-172-20-107-81.ec2.internal 552 | 553 | Rename one of the node labels as follows: 554 | 555 | $ kubectl label node ip-172-20-52-200.ec2.internal app=prometheus-node 556 | node "ip-172-20-52-200.ec2.internal" labeled 557 | 558 | Next, edit the DaemonSet template using the command shown: 559 | 560 | $ kubectl edit ds/prometheus-daemonset 561 | 562 | Change the `spec.template.spec` to include a `nodeSelector` that matches the changed label: 563 | ``` 564 | nodeSelector: 565 | app: prometheus-node 566 | ``` 567 | 568 | After the update is performed, we have now configured Prometheus to run on a specific node: 569 | 570 | $ kubectl get ds/prometheus-daemonset 571 | NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE 572 | prometheus-daemonset 1 1 1 0 1 app=prometheus-node 2m 573 | 574 | === Delete a DaemonSet 575 | 576 | Run the following command to delete the DaemonSet: 577 | 578 | $ kubectl delete -f templates/daemonset.yaml 579 | 580 | == Job 581 | 582 | A Job creates one or more pods and ensures that a specified number of them successfully complete. A job keeps track of successful completion of a pod. When the specified number of pods have successfully completed, the job itself is complete. The job will start a new pod if the pod fails or is deleted due to hardware failure. A successful completion of the specified number of pods means the job is complete. 583 | 584 | This is different from a replica set or a deployment which ensures that a certain number of pods are always running. So if a pod in a replica set or deployment terminates, then it is restarted again. This makes replica set or deployment as long-running processes. This is well suited for a web server, such as NGINX. But a job is completed if the specified number of pods successfully completes. This is well suited for tasks that need to run only once. For example, a job may convert an image format from one to another. Restarting this pod in replication controller would not only cause redundant work but may be harmful in certain cases. 585 | 586 | Jobs are complementary to Replica Set. A Replica Set manages pods which are not expected to terminate (e.g. web servers), and a Job manages pods that are expected to terminate (e.g. batch jobs). 587 | 588 | Job is only appropriate for pods with `RestartPolicy` equal to `OnFailure` or `Never`. 589 | 590 | === Non-parallel Job 591 | 592 | Only one pod per job is started, unless the pod fails. Job is complete as soon as the pod terminates successfully. 593 | 594 | Here is the job specification: 595 | 596 | apiVersion: batch/v1 597 | kind: Job 598 | metadata: 599 | name: wait 600 | spec: 601 | template: 602 | metadata: 603 | name: wait 604 | spec: 605 | containers: 606 | - name: wait 607 | image: ubuntu 608 | command: ["sleep", "20"] 609 | restartPolicy: Never 610 | 611 | It creates an Ubuntu container, sleeps for 20 seconds and that's it! 612 | 613 | Create a job using the command: 614 | 615 | $ kubectl apply -f templates/job.yaml 616 | job "wait" created 617 | 618 | Look at the job: 619 | 620 | $ kubectl get jobs 621 | NAME DESIRED SUCCESSFUL AGE 622 | wait 1 0 0s 623 | 624 | The output shows that the job is not successful yet. Watch the pod status to confirm: 625 | 626 | $ kubectl get -w pods 627 | NAME READY STATUS RESTARTS AGE 628 | wait-lk49x 1/1 Running 0 7s 629 | wait-lk49x 0/1 Completed 0 24s 630 | 631 | To begin with, it shows that the pod for the job is running. The pod successfully exits after a few seconds and shows the `Completed` status. 632 | 633 | Now, watch the job status again: 634 | 635 | $ kubectl get jobs 636 | NAME DESIRED SUCCESSFUL AGE 637 | wait 1 1 1m 638 | 639 | The output shows that the job was successfully executed. 640 | 641 | The completed pod is not shown in the `kubectl get pods` command. Instead it can be shown by passing an additional option as shown below: 642 | 643 | $ kubectl get pods --show-all 644 | NAME READY STATUS RESTARTS AGE 645 | wait-lk49x 0/1 Completed 0 1m 646 | 647 | To delete the job, you can run this command 648 | 649 | $ kubectl delete -f templates/job.yaml 650 | 651 | === Parallel Job 652 | 653 | Non-parallel job runs only one pod per job. This API is used to run multiple pods in parallel for the job. The number of pods to complete is defined by `.spec.completions` attribute in the configuration file. The number of pods to run in parallel is defined by `.spec.parallelism` attribute in the configuration file. The default value for both of these attributes is 1. 654 | 655 | The job is complete when there is one successful pod for each value in the range in 1 to `.spec.completions`. For that reason, it is also called as _fixed completion count_ job. 656 | 657 | Here is a job specification: 658 | 659 | apiVersion: batch/v1 660 | kind: Job 661 | metadata: 662 | name: wait 663 | spec: 664 | completions: 6 665 | parallelism: 2 666 | template: 667 | metadata: 668 | name: wait 669 | spec: 670 | containers: 671 | - name: wait 672 | image: ubuntu 673 | command: ["sleep", "20"] 674 | restartPolicy: Never 675 | 676 | This job specification is similar to the non-parallel job specification. It has two new attributes added: `.spec.completions` and `.spec.parallelism`. This means the job will be complete when six pods have successfully completed. A maximum of two pods will run in parallel at a given time. 677 | 678 | Create a parallel job using the command: 679 | 680 | $ kubectl apply -f templates/job-parallel.yaml 681 | 682 | Watch the status of the job as shown: 683 | 684 | $ kubectl get -w jobs 685 | NAME DESIRED SUCCESSFUL AGE 686 | wait 6 0 2s 687 | wait 6 1 22s 688 | wait 6 2 22s 689 | wait 6 3 43s 690 | wait 6 4 43s 691 | wait 6 5 1m 692 | wait 6 6 1m 693 | 694 | The output shows that 2 pods are created about every 20 seconds. 695 | 696 | In another terminal window, watch the status of pods created: 697 | 698 | $ kubectl get -w pods -l job-name=wait 699 | NAME READY STATUS RESTARTS AGE 700 | wait-f7kgb 1/1 Running 0 5s 701 | wait-smp4t 1/1 Running 0 5s 702 | wait-smp4t 0/1 Completed 0 22s 703 | wait-jbdp7 0/1 Pending 0 0s 704 | wait-jbdp7 0/1 Pending 0 0s 705 | wait-jbdp7 0/1 ContainerCreating 0 0s 706 | wait-f7kgb 0/1 Completed 0 22s 707 | wait-r5v8n 0/1 Pending 0 0s 708 | wait-r5v8n 0/1 Pending 0 0s 709 | wait-r5v8n 0/1 ContainerCreating 0 0s 710 | wait-r5v8n 1/1 Running 0 1s 711 | wait-jbdp7 1/1 Running 0 1s 712 | wait-r5v8n 0/1 Completed 0 21s 713 | wait-ngrgl 0/1 Pending 0 0s 714 | wait-ngrgl 0/1 Pending 0 0s 715 | wait-ngrgl 0/1 ContainerCreating 0 0s 716 | wait-jbdp7 0/1 Completed 0 21s 717 | wait-6l22s 0/1 Pending 0 0s 718 | wait-6l22s 0/1 Pending 0 0s 719 | wait-6l22s 0/1 ContainerCreating 0 0s 720 | wait-ngrgl 1/1 Running 0 1s 721 | wait-6l22s 1/1 Running 0 1s 722 | wait-ngrgl 0/1 Completed 0 21s 723 | wait-6l22s 0/1 Completed 0 21s 724 | 725 | After all the pods have completed, `kubectl get pods` will not show the list of completed pods. The command to show the list of pods is shown below: 726 | 727 | $ kubectl get pods -a 728 | NAME READY STATUS RESTARTS AGE 729 | wait-6l22s 0/1 Completed 0 1m 730 | wait-f7kgb 0/1 Completed 0 2m 731 | wait-jbdp7 0/1 Completed 0 2m 732 | wait-ngrgl 0/1 Completed 0 1m 733 | wait-r5v8n 0/1 Completed 0 2m 734 | wait-smp4t 0/1 Completed 0 2m 735 | 736 | Similarly, `kubectl get jobs` shows the status of the job after it has completed: 737 | 738 | $ kubectl get jobs 739 | NAME DESIRED SUCCESSFUL AGE 740 | wait 6 6 3m 741 | 742 | Deleting a job deletes all the pods as well. Delete the job as: 743 | 744 | $ kubectl delete -f templates/job-parallel.yaml 745 | 746 | == Cron Job 747 | 748 | === Pre-requisites 749 | 750 | For Kubernetes cluster versions < 1.8, Cron Job can be created with API version `batch/v2alpha1`. You can check the cluster version using this command, 751 | 752 | $ kubectl version 753 | Client Version: version.Info{Major:"1", Minor:"8", GitVersion:"v1.8.1", GitCommit:"f38e43b221d08850172a9a4ea785a86a3ffa3b3a", GitTreeState:"clean", BuildDate:"2017-10-12T00:45:05Z", GoVersion:"go1.9.1", Compiler:"gc", Platform:"darwin/amd64"} 754 | Server Version: version.Info{Major:"1", Minor:"7", GitVersion:"v1.7.4", GitCommit:"793658f2d7ca7f064d2bdf606519f9fe1229c381", GitTreeState:"clean", BuildDate:"2017-08-17T08:30:51Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"} 755 | 756 | Notice that the server version is at v1.7.4. In this case, you need to explicitly enable API version `batch/v2alpha1` in Kubernetes cluster and perform a rolling-update. These steps are explained in link:../cluster-install#turn-on-an-api-version-for-your-cluster[Turn on an API version for your cluster]. 757 | 758 | NOTE: Once you switch API versions, you need to perform rolling-update of the cluster which generally takes 30 - 45 mins to complete for 3 master nodes and 5 worker nodes cluster. 759 | 760 | If you have cluster version >= 1.8, `batch/v2alpha1` API is deprecated for this version but you can switch to `batch/v1beta1` to create Cron Jobs 761 | 762 | === Create Cron Job 763 | 764 | A Cron Job is a job that runs on a given schedule, written in Cron format. There are two primary use cases: 765 | 766 | . Run jobs once at a specified point in time 767 | . Repeatedly at a specified point in time 768 | 769 | Here is the job specification: 770 | 771 | apiVersion: batch/v2alpha1 772 | kind: CronJob 773 | metadata: 774 | name: hello 775 | spec: 776 | schedule: "*/1 * * * *" 777 | jobTemplate: 778 | spec: 779 | template: 780 | spec: 781 | containers: 782 | - name: hello 783 | image: busybox 784 | args: 785 | - /bin/sh 786 | - -c 787 | - date; echo Hello World! 788 | restartPolicy: OnFailure 789 | 790 | This job prints the current timestamp and the message "`Hello World`" every minute. 791 | 792 | Create the Cron Job as shown in the command: 793 | 794 | $ kubectl create -f templates/cronjob.yaml --validate=false 795 | 796 | `--validate=false` is required because kubectl CLI version is 1.8. Without this option, you'll get the error: 797 | 798 | error: error validating "templates/cronjob.yaml": error validating data: unknown object type schema.GroupVersionKind{Group:"batch", Version:"v2alpha1", Kind:"CronJob"}; if you choose to ignore these errors, turn validation off with --validate=false 799 | 800 | Watch the status of the job as shown: 801 | 802 | $ kubectl get -w cronjobs 803 | NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE 804 | hello */1 * * * * False 0 805 | hello */1 * * * * False 0 806 | hello */1 * * * * False 1 Tue, 24 Oct 2017 15:41:00 -0700 807 | hello */1 * * * * False 0 Tue, 24 Oct 2017 15:41:00 -0700 808 | hello */1 * * * * False 1 Tue, 24 Oct 2017 15:42:00 -0700 809 | hello */1 * * * * False 0 Tue, 24 Oct 2017 15:42:00 -0700 810 | 811 | In another terminal window, watch the status of pods created: 812 | 813 | $ kubectl get -w pods -l app=hello-cronpod 814 | NAME READY STATUS RESTARTS AGE 815 | hello-1508884860-cq004 0/1 Pending 0 0s 816 | hello-1508884860-cq004 0/1 Pending 0 0s 817 | hello-1508884860-cq004 0/1 ContainerCreating 0 0s 818 | hello-1508884860-cq004 0/1 Completed 0 1s 819 | hello-1508884920-wl5bx 0/1 Pending 0 0s 820 | hello-1508884920-wl5bx 0/1 Pending 0 0s 821 | hello-1508884920-wl5bx 0/1 ContainerCreating 0 0s 822 | hello-1508884920-wl5bx 0/1 Completed 0 2s 823 | hello-1508884980-45ktd 0/1 Pending 0 0s 824 | hello-1508884980-45ktd 0/1 Pending 0 0s 825 | hello-1508884980-45ktd 0/1 ContainerCreating 0 0s 826 | hello-1508884980-45ktd 0/1 Completed 0 2s 827 | 828 | Get logs from one of the pods: 829 | 830 | $ kubectl logs hello-1508884860-cq004 831 | Tue Oct 24 22:41:02 UTC 2017 832 | Hello World! 833 | 834 | === Delete Cron Job 835 | 836 | Delete the Cron Job as shown in the following command: 837 | 838 | $ kubectl delete -f templates/cronjob.yaml 839 | cronjob "hello" deleted 840 | -------------------------------------------------------------------------------- /install/baremetal/README.md: -------------------------------------------------------------------------------- 1 | # Installing Kubernetes on Bare Metal System 2 | 3 | ``` 4 | root@ubuntu:~# curl -sS https://get.k8s.io | bash 5 | Downloading kubernetes release v1.11.2 6 | from https://dl.k8s.io/v1.11.2/kubernetes.tar.gz 7 | to /root/kubernetes.tar.gz 8 | Is this ok? [Y]/n 9 | % Total % Received % Xferd Average Speed Time Time Time Current 10 | Dload Upload Total Spent Left Speed 11 | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- -- 0 0 0 0 0 0 0 0 --:--:-- --:--:-- -- 100 161 100 161 0 0 119 0 0:00:01 0:00:01 -- 100 161 100 161 0 0 119 0 0:00:01 0:00:01 -- :--:-- 119 12 | 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 -- 1 1787k 1 19402 0 0 6103 0 0:04:59 0:00:03 0 21 1787k 21 385k 0 0 95930 0 0:00:19 0:00:04 0 77 1787k 77 1390k 0 0 271k 0 0:00:06 0:00:05 0 100 1787k 100 1787k 0 0 324k 0 0:00:05 0:00:05 -- :--:-- 588k 13 | Unpacking kubernetes release v1.11.2 14 | Kubernetes release: v1.11.2 15 | Server: linux/amd64 (to override, set KUBERNETES_SERVER_ARCH) 16 | Client: linux/amd64 (autodetected) 17 | 18 | Will download kubernetes-server-linux-amd64.tar.gz from https://dl .k8s.io/v1.11.2 19 | Will download and extract kubernetes-client-linux-amd64.tar.gz fro m https://dl.k8s.io/v1.11.2 20 | Is this ok? [Y]/n 21 | % Total % Received % Xferd Average Speed Time Time Time Current 22 | Dload Upload Total Spent Left Speed 23 | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- -- 0 0 0 0 0 0 0 0 --:--:-- --:--:-- -- 100 161 100 161 0 0 120 0 0:00:01 0:00:01 -- :--:-- 120 24 | 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 -- 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 -- 0 415M 0 34674 0 0 10056 0 12:02:11 0:00:03 12 0 415M 0 417k 0 0 96200 0 1:15:29 0:00:04 1 0 415M 0 1294k 0 0 237k 0 0:29:50 0:00:05 0 0 415M 0 2190k 0 0 339k 0 0:20:52 0:00:06 0 0 415M 0 3086k 0 0 414k 0 0:17:06 0:00:07 0 0 415M 0 3982k 0 0 471k 0 0:15:02 0:00:08 0 1 415M 1 4876k 0 0 516k 0 0:13:44 0:00:09 0 1 415M 1 5757k 0 0 551k 0 0:12:52 0:00:10 0 1 415M 1 6638k 0 0 579k 0 0:12:13 0:00:11 0 1 415M 1 7064k 0 0 566k 0 0:12:31 0:00:12 0 1 415M 1 7918k 0 0 588k 0 0:12:02 0:00:13 0 2 415M 2 8798k 0 0 609k 0 0:11:38 0:00:14 0 2 415M 2 9681k 0 0 626k 0 0:11:18 0:00:15 0 2 415M 2 10.3M 0 0 642k 0 0:11:02 0:00:16 0 2 415M 2 11.1M 0 0 655k 0 0:10:49 0:00:17 0 2 415M 2 12.0M 0 0 668k 0 0:10:36 0:00:18 0 3 415M 3 12.9M 0 0 679k 0 0:10:26 0:00:19 0 3 415M 3 13.7M 0 0 689k 0 0:10:17 0:00:20 0 3 415M 3 14.6M 0 0 698k 0 0:10:09 0:00:21 0 3 415M 3 15.4M 0 0 706k 0 0:10:02 0:00:22 0 3 415M 3 16.3M 0 0 713k 0 0:09:56 0:00:23 0 4 415M 4 17.2M 0 0 720k 0 0:09:50 0:00:24 0 4 415M 4 18.0M 0 0 726k 0 0:09:45 0:00:25 0 4 415M 4 18.9M 0 0 732k 0 0:09:40 0:00:26 0 4 415M 4 19.7M 0 0 738k 0 0:09:36 0:00:27 0 4 415M 4 20.6M 0 0 742k 0 0:09:32 0:00:28 0 5 415M 5 21.5M 0 0 747k 0 0:09:28 0:00:29 0 5 415M 5 22.3M 0 0 752k 0 0:09:25 0:00:30 0 5 415M 5 22.7M 0 0 740k 0 0:09:34 0:00:31 0 5 415M 5 23.6M 0 0 745k 0 0:09:31 0:00:32 0 5 415M 5 24.4M 0 0 748k 0 0:09:28 0:00:33 0 6 415M 6 25.3M 0 0 752k 0 0:09:25 0:00:34 0 6 415M 6 26.1M 0 0 756k 0 0:09:22 0:00:35 0 6 415M 6 27.0M 0 0 759k 0 0:09:20 0:00:36 0 6 415M 6 27.9M 0 0 763k 0 0:09:17 0:00:37 0 6 415M 6 28.7M 0 0 765k 0 0:09:15 0:00:38 0 7 415M 7 29.6M 0 0 769k 0 0:09:13 0:00:39 0 7 415M 7 30.4M 0 0 771k 0 0:09:11 0:00:40 0 7 415M 7 31.3M 0 0 774k 0 0:09:09 0:00:41 0 7 415M 7 32.2M 0 0 777k 0 0:09:07 0:00:42 0 7 415M 7 33.0M 0 0 779k 0 0:09:05 0:00:43 0 8 415M 8 33.9M 0 0 781k 0 0:09:04 0:00:44 0 8 415M 8 34.7M 0 0 783k 0 0:09:02 0:00:45 0 8 415M 8 35.6M 0 0 785k 0 0:09:01 0:00:46 0 8 415M 8 36.5M 0 0 788k 0 0:08:59 0:00:47 0 8 415M 8 36.8M 0 0 779k 0 0:09:05 0:00:48 0 9 415M 9 37.7M 0 0 781k 0 0:09:04 0:00:49 0 9 415M 9 38.6M 0 0 783k 0 0:09:02 0:00:50 0 9 415M 9 39.4M 0 0 785k 0 0:09:01 0:00:51 0 9 415M 9 40.3M 0 0 787k 0 0:09:00 0:00:52 0 9 415M 9 41.2M 0 0 789k 0 0:08:59 0:00:53 0 10 415M 10 42.0M 0 0 791k 0 0:08:57 0:00:54 0 10 415M 10 42.9M 0 0 792k 0 0:08:56 0:00:55 0 10 415M 10 43.7M 0 0 794k 0 0:08:55 0:00:56 0 10 415M 10 44.6M 0 0 795k 0 0:08:54 0:00:57 0 10 415M 10 45.5M 0 0 797k 0 0:08:53 0:00:58 0 11 415M 11 46.3M 0 0 798k 0 0:08:52 0:00:59 0 11 415M 11 47.2M 0 0 800k 0 0:08:51 0:01:00 0 11 415M 11 48.0M 0 0 801k 0 0:08:50 0:01:01 0 11 415M 11 48.9M 0 0 802k 0 0:08:50 0:01:02 0 11 415M 11 49.8M 0 0 803k 0 0:08:49 0:01:03 0 12 415M 12 50.6M 0 0 805k 0 0:08:48 0:01:04 0 12 415M 12 51.5M 0 0 805k 0 0:08:47 0:01:05 0 12 415M 12 52.3M 0 0 807k 0 0:08:47 0:01:06 0 12 415M 12 53.2M 0 0 808k 0 0:08:46 0:01:07 0 13 415M 13 54.1M 0 0 809k 0 0:08:45 0:01:08 0 13 415M 13 54.9M 0 0 810k 0 0:08:45 0:01:09 0 13 415M 13 55.8M 0 0 811k 0 0:08:44 0:01:10 0 13 415M 13 56.6M 0 0 812k 0 0:08:43 0:01:11 0 13 415M 13 57.5M 0 0 813k 0 0:08:43 0:01:12 0 14 415M 14 58.4M 0 0 814k 0 0:08:42 0:01:13 0 14 415M 14 59.2M 0 0 815k 0 0:08:41 0:01:14 0 14 415M 14 60.1M 0 0 816k 0 0:08:41 0:01:15 0 14 415M 14 60.9M 0 0 816k 0 0:08:40 0:01:16 0 14 415M 14 61.3M 0 0 811k 0 0:08:44 0:01:17 0 14 415M 14 62.2M 0 0 812k 0 0:08:43 0:01:18 0 15 415M 15 63.0M 0 0 813k 0 0:08:43 0:01:19 0 15 415M 15 63.9M 0 0 814k 0 0:08:42 0:01:20 0 15 415M 15 64.8M 0 0 814k 0 0:08:42 0:01:21 0 15 415M 15 65.6M 0 0 815k 0 0:08:41 0:01:22 0 16 415M 16 66.5M 0 0 816k 0 0:08:41 0:01:23 0 16 415M 16 67.3M 0 0 817k 0 0:08:40 0:01:24 0 16 415M 16 68.2M 0 0 818k 0 0:08:40 0:01:25 0 16 415M 16 69.1M 0 0 818k 0 0:08:39 0:01:26 0 16 415M 16 69.9M 0 0 819k 0 0:08:39 0:01:27 0 17 415M 17 70.8M 0 0 820k 0 0:08:38 0:01:28 0 17 415M 17 71.6M 0 0 820k 0 0:08:38 0:01:29 0 17 415M 17 72.5M 0 0 821k 0 0:08:37 0:01:30 0 17 415M 17 72.9M 0 0 816k 0 0:08:40 0:01:31 0 17 415M 17 73.8M 0 0 817k 0 0:08:40 0:01:32 0 17 415M 17 74.6M 0 0 818k 0 0:08:40 0:01:33 0 18 415M 18 75.5M 0 0 818k 0 0:08:39 0:01:34 0 18 415M 18 76.3M 0 0 819k 0 0:08:39 0:01:35 0 18 415M 18 77.2M 0 0 819k 0 0:08:38 0:01:36 0 18 415M 18 78.1M 0 0 820k 0 0:08:38 0:01:37 0 19 415M 19 78.9M 0 0 821k 0 0:08:38 0:01:38 0 19 415M 19 79.8M 0 0 821k 0 0:08:37 0:01:39 0 19 415M 19 80.6M 0 0 822k 0 0:08:37 0:01:40 0 19 415M 19 81.5M 0 0 823k 0 0:08:36 0:01:41 0 19 415M 19 82.4M 0 0 823k 0 0:08:36 0:01:42 0 20 415M 20 83.2M 0 0 824k 0 0:08:36 0:01:43 0 20 415M 20 84.1M 0 0 824k 0 0:08:35 0:01:44 0 20 415M 20 84.9M 0 0 825k 0 0:08:35 0:01:45 0 20 415M 20 85.8M 0 0 825k 0 0:08:35 0:01:46 0 20 415M 20 86.7M 0 0 826k 0 0:08:34 0:01:47 0 21 415M 21 87.5M 0 0 826k 0 0:08:34 0:01:48 0 21 415M 21 88.4M 0 0 827k 0 0:08:34 0:01:49 0 21 415M 21 89.2M 0 0 827k 0 0:08:34 0:01:50 0 21 415M 21 90.0M 0 0 826k 0 0:08:35 0:01:51 0 21 415M 21 90.5M 0 0 824k 0 0:08:36 0:01:52 0 21 415M 21 91.3M 0 0 824k 0 0:08:35 0:01:53 0 22 415M 22 92.2M 0 0 825k 0 0:08:35 0:01:54 0 22 415M 22 93.0M 0 0 825k 0 0:08:35 0:01:55 0 22 415M 22 93.9M 0 0 826k 0 0:08:35 0:01:56 0 22 415M 22 94.7M 0 0 826k 0 0:08:34 0:01:57 0 23 415M 23 95.6M 0 0 827k 0 0:08:34 0:01:58 0 23 415M 23 96.5M 0 0 827k 0 0:08:34 0:01:59 0 23 415M 23 97.3M 0 0 827k 0 0:08:33 0:02:00 0 23 415M 23 98.2M 0 0 828k 0 0:08:33 0:02:01 0 23 415M 23 99.1M 0 0 828k 0 0:08:33 0:02:02 0 24 415M 24 99.9M 0 0 829k 0 0:08:33 0:02:03 0 24 415M 24 100M 0 0 829k 0 0:08:32 0:02:04 0 24 415M 24 101M 0 0 830k 0 0:08:32 0:02:05 0 24 415M 24 102M 0 0 830k 0 0:08:32 0:02:06 0 24 415M 24 103M 0 0 830k 0 0:08:32 0:02:07 0 25 415M 25 104M 0 0 831k 0 0:08:31 0:02:08 0 25 415M 25 105M 0 0 829k 0 0:08:32 0:02:09 0 25 415M 25 105M 0 0 828k 0 0:08:33 0:02:10 0 25 415M 25 106M 0 0 828k 0 0:08:33 0:02:11 0 25 415M 25 107M 0 0 829k 0 0:08:33 0:02:12 0 26 415M 26 108M 0 0 829k 0 0:08:33 0:02:13 0 26 415M 26 108M 0 0 829k 0 0:08:32 0:02:14 0 26 415M 26 109M 0 0 830k 0 0:08:32 0:02:15 0 26 415M 26 110M 0 0 830k 0 0:08:32 0:02:16 0 26 415M 26 111M 0 0 830k 0 0:08:32 0:02:17 0 27 415M 27 112M 0 0 831k 0 0:08:31 0:02:18 0 27 415M 27 113M 0 0 831k 0 0:08:31 0:02:19 0 27 415M 27 114M 0 0 832k 0 0:08:31 0:02:20 0 27 415M 27 114M 0 0 832k 0 0:08:31 0:02:21 0 27 415M 27 115M 0 0 832k 0 0:08:30 0:02:22 0 28 415M 28 116M 0 0 833k 0 0:08:30 0:02:23 0 28 415M 28 117M 0 0 833k 0 0:08:30 0:02:24 0 28 415M 28 118M 0 0 833k 0 0:08:30 0:02:25 0 28 415M 28 119M 0 0 834k 0 0:08:30 0:02:26 0 28 415M 28 119M 0 0 832k 0 0:08:31 0:02:27 0 29 415M 29 120M 0 0 832k 0 0:08:30 0:02:28 0 29 415M 29 121M 0 0 833k 0 0:08:30 0:02:29 0 29 415M 29 122M 0 0 833k 0 0:08:30 0:02:30 0 29 415M 29 123M 0 0 833k 0 0:08:30 0:02:31 0 29 415M 29 124M 0 0 834k 0 0:08:30 0:02:32 0 30 415M 30 125M 0 0 834k 0 0:08:29 0:02:33 0 30 415M 30 125M 0 0 834k 0 0:08:29 0:02:34 0 30 415M 30 126M 0 0 835k 0 0:08:29 0:02:35 0 30 415M 30 127M 0 0 835k 0 0:08:29 0:02:36 0 30 415M 30 128M 0 0 834k 0 0:08:30 0:02:37 0 31 415M 31 128M 0 0 832k 0 0:08:30 0:02:38 0 31 415M 31 129M 0 0 833k 0 0:08:30 0:02:39 0 31 415M 31 130M 0 0 833k 0 0:08:30 0:02:40 0 31 415M 31 131M 0 0 833k 0 0:08:30 0:02:41 0 31 415M 31 132M 0 0 833k 0 0:08:30 0:02:42 0 32 415M 32 133M 0 0 834k 0 0:08:30 0:02:43 0 32 415M 32 134M 0 0 834k 0 0:08:29 0:02:44 0 32 415M 32 134M 0 0 834k 0 0:08:29 0:02:45 0 32 415M 32 135M 0 0 835k 0 0:08:29 0:02:46 0 32 415M 32 136M 0 0 835k 0 0:08:29 0:02:47 0 33 415M 33 137M 0 0 835k 0 0:08:29 0:02:48 0 33 415M 33 138M 0 0 835k 0 0:08:29 0:02:49 0 33 415M 33 139M 0 0 834k 0 0:08:29 0:02:50 0 33 415M 33 139M 0 0 833k 0 0:08:30 0:02:51 0 33 415M 33 140M 0 0 833k 0 0:08:30 0:02:52 0 34 415M 34 141M 0 0 834k 0 0:08:30 0:02:53 0 34 415M 34 142M 0 0 834k 0 0:08:29 0:02:54 0 34 415M 34 142M 0 0 834k 0 0:08:29 0:02:55 0 34 415M 34 143M 0 0 834k 0 0:08:29 0:02:56 0 34 415M 34 144M 0 0 835k 0 0:08:29 0:02:57 0 35 415M 35 145M 0 0 835k 0 0:08:29 0:02:58 0 35 415M 35 146M 0 0 835k 0 0:08:29 0:02:59 0 35 415M 35 147M 0 0 835k 0 0:08:29 0:03:00 0 35 415M 35 148M 0 0 836k 0 0:08:28 0:03:01 0 35 415M 35 149M 0 0 836k 0 0:08:28 0:03:02 0 36 415M 36 149M 0 0 836k 0 0:08:28 0:03:03 0 36 415M 36 150M 0 0 836k 0 0:08:28 0:03:04 0 36 415M 36 151M 0 0 837k 0 0:08:28 0:03:05 0 36 415M 36 152M 0 0 837k 0 0:08:28 0:03:06 0 36 415M 36 153M 0 0 837k 0 0:08:27 0:03:07 0 37 415M 37 154M 0 0 837k 0 0:08:27 0:03:08 0 37 415M 37 155M 0 0 838k 0 0:08:27 0:03:09 0 37 415M 37 155M 0 0 838k 0 0:08:27 0:03:10 0 37 415M 37 156M 0 0 838k 0 0:08:27 0:03:11 0 37 415M 37 157M 0 0 838k 0 0:08:27 0:03:12 0 38 415M 38 158M 0 0 839k 0 0:08:27 0:03:13 0 38 415M 38 159M 0 0 839k 0 0:08:27 0:03:14 0 38 415M 38 160M 0 0 839k 0 0:08:26 0:03:15 0 38 415M 38 161M 0 0 839k 0 0:08:26 0:03:16 0 38 415M 38 161M 0 0 839k 0 0:08:26 0:03:17 0 39 415M 39 162M 0 0 840k 0 0:08:26 0:03:18 0 39 415M 39 163M 0 0 840k 0 0:08:26 0:03:19 0 39 415M 39 164M 0 0 838k 0 0:08:27 0:03:20 0 39 415M 39 164M 0 0 838k 0 0:08:27 0:03:21 0 39 415M 39 165M 0 0 838k 0 0:08:27 0:03:22 0 40 415M 40 166M 0 0 838k 0 0:08:27 0:03:23 0 40 415M 40 167M 0 0 838k 0 0:08:27 0:03:24 0 40 415M 40 168M 0 0 839k 0 0:08:27 0:03:25 0 40 415M 40 169M 0 0 839k 0 0:08:26 0:03:26 0 40 415M 40 170M 0 0 839k 0 0:08:26 0:03:27 0 41 415M 41 170M 0 0 839k 0 0:08:26 0:03:28 0 41 415M 41 171M 0 0 840k 0 0:08:26 0:03:29 0 41 415M 41 172M 0 0 840k 0 0:08:26 0:03:30 0 41 415M 41 173M 0 0 840k 0 0:08:26 0:03:31 0 41 415M 41 174M 0 0 840k 0 0:08:26 0:03:32 0 42 415M 42 175M 0 0 840k 0 0:08:26 0:03:33 0 42 415M 42 176M 0 0 840k 0 0:08:26 0:03:34 0 42 415M 42 176M 0 0 841k 0 0:08:25 0:03:35 0 42 415M 42 177M 0 0 839k 0 0:08:27 0:03:36 0 42 415M 42 178M 0 0 839k 0 0:08:27 0:03:37 0 43 415M 43 179M 0 0 839k 0 0:08:26 0:03:38 0 43 415M 43 179M 0 0 839k 0 0:08:26 0:03:39 0 43 415M 43 180M 0 0 839k 0 0:08:26 0:03:40 0 43 415M 43 181M 0 0 839k 0 0:08:26 0:03:41 0 43 415M 43 182M 0 0 840k 0 0:08:26 0:03:42 0 44 415M 44 183M 0 0 840k 0 0:08:26 0:03:43 0 44 415M 44 184M 0 0 840k 0 0:08:26 0:03:44 0 44 415M 44 185M 0 0 840k 0 0:08:26 0:03:45 0 44 415M 44 185M 0 0 840k 0 0:08:26 0:03:46 0 44 415M 44 186M 0 0 841k 0 0:08:25 0:03:47 0 45 415M 45 187M 0 0 841k 0 0:08:25 0:03:48 0 45 415M 45 188M 0 0 841k 0 0:08:25 0:03:49 0 45 415M 45 189M 0 0 841k 0 0:08:25 0:03:50 0 45 415M 45 190M 0 0 841k 0 0:08:25 0:03:51 0 45 415M 45 191M 0 0 841k 0 0:08:25 0:03:52 0 46 415M 46 191M 0 0 842k 0 0:08:25 0:03:53 0 46 415M 46 192M 0 0 842k 0 0:08:25 0:03:54 0 46 415M 46 193M 0 0 842k 0 0:08:25 0:03:55 0 46 415M 46 194M 0 0 842k 0 0:08:25 0:03:56 0 47 415M 47 195M 0 0 842k 0 0:08:24 0:03:57 0 47 415M 47 196M 0 0 842k 0 0:08:24 0:03:58 0 47 415M 47 197M 0 0 843k 0 0:08:24 0:03:59 0 47 415M 47 197M 0 0 842k 0 0:08:25 0:04:00 0 47 415M 47 198M 0 0 841k 0 0:08:25 0:04:01 0 47 415M 47 199M 0 0 841k 0 0:08:25 0:04:02 0 48 415M 48 200M 0 0 841k 0 0:08:25 0:04:03 0 48 415M 48 200M 0 0 841k 0 0:08:25 0:04:04 0 48 415M 48 201M 0 0 841k 0 0:08:25 0:04:05 0 48 415M 48 202M 0 0 842k 0 0:08:25 0:04:06 0 48 415M 48 203M 0 0 842k 0 0:08:25 0:04:07 0 49 415M 49 204M 0 0 842k 0 0:08:25 0:04:08 0 49 415M 49 205M 0 0 842k 0 0:08:25 0:04:09 0 49 415M 49 206M 0 0 842k 0 0:08:24 0:04:10 0 49 415M 49 206M 0 0 842k 0 0:08:24 0:04:11 0 50 415M 50 207M 0 0 842k 0 0:08:24 0:04:12 0 50 415M 50 208M 0 0 843k 0 0:08:24 0:04:13 0 50 415M 50 209M 0 0 843k 0 0:08:24 0:04:14 0 50 415M 50 210M 0 0 843k 0 0:08:24 0:04:15 0 50 415M 50 211M 0 0 843k 0 0:08:24 0:04:16 0 51 415M 51 212M 0 0 843k 0 0:08:24 0:04:17 0 51 415M 51 212M 0 0 843k 0 0:08:24 0:04:18 0 51 415M 51 213M 0 0 843k 0 0:08:24 0:04:19 0 51 415M 51 214M 0 0 844k 0 0:08:24 0:04:20 0 51 415M 51 215M 0 0 844k 0 0:08:24 0:04:21 0 51 415M 51 215M 0 0 842k 0 0:08:25 0:04:22 0 52 415M 52 216M 0 0 842k 0 0:08:24 0:04:23 0 52 415M 52 217M 0 0 842k 0 0:08:24 0:04:24 0 52 415M 52 218M 0 0 842k 0 0:08:24 0:04:25 0 52 415M 52 219M 0 0 843k 0 0:08:24 0:04:26 0 52 415M 52 220M 0 0 843k 0 0:08:24 0:04:27 0 53 415M 53 221M 0 0 843k 0 0:08:24 0:04:28 0 53 415M 53 221M 0 0 843k 0 0:08:24 0:04:29 0 53 415M 53 222M 0 0 843k 0 0:08:24 0:04:30 0 53 415M 53 223M 0 0 843k 0 0:08:24 0:04:31 0 54 415M 54 224M 0 0 843k 0 0:08:24 0:04:32 0 54 415M 54 225M 0 0 844k 0 0:08:24 0:04:33 0 54 415M 54 226M 0 0 844k 0 0:08:24 0:04:34 0 54 415M 54 227M 0 0 844k 0 0:08:24 0:04:35 0 54 415M 54 227M 0 0 844k 0 0:08:23 0:04:36 0 55 415M 55 228M 0 0 843k 0 0:08:24 0:04:37 0 55 415M 55 229M 0 0 842k 0 0:08:24 0:04:38 0 55 415M 55 230M 0 0 843k 0 0:08:24 0:04:39 0 55 415M 55 230M 0 0 843k 0 0:08:24 0:04:40 0 55 415M 55 231M 0 0 843k 0 0:08:24 0:04:41 0 55 415M 55 232M 0 0 843k 0 0:08:24 0:04:42 0 56 415M 56 233M 0 0 843k 0 0:08:24 0:04:43 0 56 415M 56 234M 0 0 843k 0 0:08:24 0:04:44 0 56 415M 56 235M 0 0 843k 0 0:08:24 0:04:45 0 56 415M 56 236M 0 0 844k 0 0:08:24 0:04:46 0 57 415M 57 236M 0 0 844k 0 0:08:24 0:04:47 0 57 415M 57 237M 0 0 844k 0 0:08:23 0:04:48 0 57 415M 57 238M 0 0 844k 0 0:08:23 0:04:49 0 57 415M 57 239M 0 0 844k 0 0:08:23 0:04:50 0 57 415M 57 240M 0 0 844k 0 0:08:23 0:04:51 0 58 415M 58 241M 0 0 844k 0 0:08:23 0:04:52 0 58 415M 58 242M 0 0 844k 0 0:08:23 0:04:53 0 58 415M 58 243M 0 0 845k 0 0:08:23 0:04:54 0 58 415M 58 243M 0 0 845k 0 0:08:23 0:04:55 0 58 415M 58 244M 0 0 845k 0 0:08:23 0:04:56 0 59 415M 59 245M 0 0 845k 0 0:08:23 0:04:57 0 59 415M 59 246M 0 0 845k 0 0:08:23 0:04:58 0 59 415M 59 247M 0 0 845k 0 0:08:23 0:04:59 0 59 415M 59 247M 0 0 844k 0 0:08:24 0:05:00 0 59 415M 59 248M 0 0 844k 0 0:08:24 0:05:01 0 60 415M 60 249M 0 0 844k 0 0:08:23 0:05:02 0 60 415M 60 250M 0 0 844k 0 0:08:23 0:05:03 0 60 415M 60 251M 0 0 844k 0 0:08:23 0:05:04 0 60 415M 60 251M 0 0 844k 0 0:08:23 0:05:05 0 60 415M 60 252M 0 0 844k 0 0:08:23 0:05:06 0 61 415M 61 253M 0 0 845k 0 0:08:23 0:05:07 0 61 415M 61 254M 0 0 845k 0 0:08:23 0:05:08 0 61 415M 61 255M 0 0 845k 0 0:08:23 0:05:09 0 61 415M 61 256M 0 0 845k 0 0:08:23 0:05:10 0 61 415M 61 257M 0 0 845k 0 0:08:23 0:05:11 0 62 415M 62 258M 0 0 845k 0 0:08:23 0:05:12 0 62 415M 62 258M 0 0 845k 0 0:08:23 0:05:13 0 62 415M 62 259M 0 0 845k 0 0:08:23 0:05:14 0 62 415M 62 260M 0 0 845k 0 0:08:23 0:05:15 0 62 415M 62 261M 0 0 846k 0 0:08:22 0:05:16 0 63 415M 63 262M 0 0 846k 0 0:08:22 0:05:17 0 63 415M 63 262M 0 0 844k 0 0:08:23 0:05:18 0 63 415M 63 263M 0 0 844k 0 0:08:23 0:05:19 0 63 415M 63 264M 0 0 844k 0 0:08:23 0:05:20 0 63 415M 63 265M 0 0 845k 0 0:08:23 0:05:21 0 64 415M 64 266M 0 0 845k 0 0:08:23 0:05:22 0 64 415M 64 267M 0 0 845k 0 0:08:23 0:05:23 0 64 415M 64 267M 0 0 845k 0 0:08:23 0:05:24 0 64 415M 64 268M 0 0 845k 0 0:08:23 0:05:25 0 64 415M 64 269M 0 0 845k 0 0:08:23 0:05:26 0 65 415M 65 270M 0 0 845k 0 0:08:23 0:05:27 0 65 415M 65 271M 0 0 845k 0 0:08:23 0:05:28 0 65 415M 65 272M 0 0 845k 0 0:08:23 0:05:29 0 65 415M 65 273M 0 0 846k 0 0:08:22 0:05:30 0 65 415M 65 273M 0 0 846k 0 0:08:22 0:05:31 0 66 415M 66 274M 0 0 846k 0 0:08:22 0:05:32 0 66 415M 66 275M 0 0 846k 0 0:08:22 0:05:33 0 66 415M 66 276M 0 0 846k 0 0:08:22 0:05:34 0 66 415M 66 277M 0 0 845k 0 0:08:23 0:05:35 0 66 415M 66 277M 0 0 845k 0 0:08:23 0:05:36 0 67 415M 67 278M 0 0 845k 0 0:08:23 0:05:37 0 67 415M 67 279M 0 0 845k 0 0:08:23 0:05:38 0 67 415M 67 280M 0 0 845k 0 0:08:23 0:05:39 0 67 415M 67 281M 0 0 845k 0 0:08:23 0:05:40 0 67 415M 67 282M 0 0 845k 0 0:08:23 0:05:41 0 68 415M 68 282M 0 0 845k 0 0:08:23 0:05:42 0 68 415M 68 283M 0 0 846k 0 0:08:22 0:05:43 0 68 415M 68 284M 0 0 846k 0 0:08:22 0:05:44 0 68 415M 68 285M 0 0 846k 0 0:08:22 0:05:45 0 68 415M 68 286M 0 0 846k 0 0:08:22 0:05:46 0 69 415M 69 287M 0 0 846k 0 0:08:22 0:05:47 0 69 415M 69 288M 0 0 846k 0 0:08:22 0:05:48 0 69 415M 69 288M 0 0 846k 0 0:08:22 0:05:49 0 69 415M 69 289M 0 0 846k 0 0:08:22 0:05:50 0 69 415M 69 290M 0 0 846k 0 0:08:22 0:05:51 0 70 415M 70 291M 0 0 846k 0 0:08:22 0:05:52 0 70 415M 70 292M 0 0 847k 0 0:08:22 0:05:53 0 70 415M 70 293M 0 0 847k 0 0:08:22 0:05:54 0 70 415M 70 294M 0 0 847k 0 0:08:22 0:05:55 0 70 415M 70 294M 0 0 847k 0 0:08:22 0:05:56 0 71 415M 71 295M 0 0 847k 0 0:08:22 0:05:57 0 71 415M 71 296M 0 0 846k 0 0:08:22 0:05:58 0 71 415M 71 297M 0 0 846k 0 0:08:22 0:05:59 0 71 415M 71 297M 0 0 846k 0 0:08:22 0:06:00 0 71 415M 71 298M 0 0 846k 0 0:08:22 0:06:01 0 72 415M 72 299M 0 0 846k 0 0:08:22 0:06:02 0 72 415M 72 300M 0 0 846k 0 0:08:22 0:06:03 0 72 415M 72 301M 0 0 846k 0 0:08:22 0:06:04 0 72 415M 72 302M 0 0 846k 0 0:08:22 0:06:05 0 72 415M 72 303M 0 0 846k 0 0:08:22 0:06:06 0 73 415M 73 303M 0 0 847k 0 0:08:22 0:06:07 0 73 415M 73 304M 0 0 847k 0 0:08:22 0:06:08 0 73 415M 73 305M 0 0 847k 0 0:08:22 0:06:09 0 73 415M 73 306M 0 0 847k 0 0:08:22 0:06:10 0 73 415M 73 307M 0 0 847k 0 0:08:22 0:06:11 0 74 415M 74 307M 0 0 846k 0 0:08:22 0:06:12 0 74 415M 74 308M 0 0 846k 0 0:08:22 0:06:13 0 74 415M 74 309M 0 0 846k 0 0:08:22 0:06:14 0 74 415M 74 310M 0 0 846k 0 0:08:22 0:06:15 0 74 415M 74 311M 0 0 846k 0 0:08:22 0:06:16 0 75 415M 75 312M 0 0 846k 0 0:08:22 0:06:17 0 75 415M 75 312M 0 0 846k 0 0:08:22 0:06:18 0 75 415M 75 313M 0 0 846k 0 0:08:22 0:06:19 0 75 415M 75 314M 0 0 846k 0 0:08:22 0:06:20 0 75 415M 75 315M 0 0 847k 0 0:08:22 0:06:21 0 76 415M 76 316M 0 0 847k 0 0:08:22 0:06:22 0 76 415M 76 317M 0 0 847k 0 0:08:22 0:06:23 0 76 415M 76 318M 0 0 847k 0 0:08:22 0:06:24 0 76 415M 76 318M 0 0 846k 0 0:08:22 0:06:25 0 76 415M 76 319M 0 0 846k 0 0:08:22 0:06:26 0 77 415M 77 320M 0 0 846k 0 0:08:22 0:06:27 0 77 415M 77 321M 0 0 846k 0 0:08:22 0:06:28 0 77 415M 77 321M 0 0 846k 0 0:08:22 0:06:29 0 77 415M 77 322M 0 0 846k 0 0:08:22 0:06:30 0 77 415M 77 323M 0 0 846k 0 0:08:22 0:06:31 0 78 415M 78 324M 0 0 846k 0 0:08:22 0:06:32 0 78 415M 78 325M 0 0 846k 0 0:08:22 0:06:33 0 78 415M 78 326M 0 0 846k 0 0:08:22 0:06:34 0 78 415M 78 327M 0 0 846k 0 0:08:22 0:06:35 0 78 415M 78 327M 0 0 847k 0 0:08:22 0:06:36 0 79 415M 79 328M 0 0 847k 0 0:08:22 0:06:37 0 79 415M 79 329M 0 0 847k 0 0:08:22 0:06:38 0 79 415M 79 330M 0 0 847k 0 0:08:22 0:06:39 0 79 415M 79 331M 0 0 847k 0 0:08:22 0:06:40 0 79 415M 79 332M 0 0 847k 0 0:08:22 0:06:41 0 80 415M 80 332M 0 0 846k 0 0:08:22 0:06:42 0 80 415M 80 333M 0 0 846k 0 0:08:22 0:06:43 0 80 415M 80 334M 0 0 846k 0 0:08:22 0:06:44 0 80 415M 80 335M 0 0 846k 0 0:08:22 0:06:45 0 80 415M 80 336M 0 0 846k 0 0:08:22 0:06:46 0 81 415M 81 336M 0 0 846k 0 0:08:22 0:06:47 0 81 415M 81 337M 0 0 846k 0 0:08:22 0:06:48 0 81 415M 81 338M 0 0 846k 0 0:08:22 0:06:49 0 81 415M 81 339M 0 0 846k 0 0:08:22 0:06:50 0 81 415M 81 340M 0 0 847k 0 0:08:22 0:06:51 0 82 415M 82 341M 0 0 847k 0 0:08:22 0:06:52 0 82 415M 82 342M 0 0 847k 0 0:08:22 0:06:53 0 82 415M 82 342M 0 0 847k 0 0:08:22 0:06:54 0 82 415M 82 343M 0 0 847k 0 0:08:22 0:06:55 0 82 415M 82 344M 0 0 847k 0 0:08:22 0:06:56 0 83 415M 83 345M 0 0 847k 0 0:08:22 0:06:57 0 83 415M 83 346M 0 0 847k 0 0:08:22 0:06:58 0 83 415M 83 347M 0 0 847k 0 0:08:22 0:06:59 0 83 415M 83 348M 0 0 847k 0 0:08:21 0:07:00 0 83 415M 83 348M 0 0 847k 0 0:08:21 0:07:01 0 84 415M 84 349M 0 0 847k 0 0:08:21 0:07:02 0 84 415M 84 350M 0 0 847k 0 0:08:21 0:07:03 0 84 415M 84 351M 0 0 848k 0 0:08:21 0:07:04 0 84 415M 84 351M 0 0 846k 0 0:08:22 0:07:05 0 84 415M 84 352M 0 0 847k 0 0:08:22 0:07:06 0 85 415M 85 353M 0 0 847k 0 0:08:22 0:07:07 0 85 415M 85 354M 0 0 847k 0 0:08:22 0:07:08 0 85 415M 85 355M 0 0 847k 0 0:08:22 0:07:09 0 85 415M 85 356M 0 0 847k 0 0:08:22 0:07:10 0 85 415M 85 357M 0 0 847k 0 0:08:22 0:07:11 0 86 415M 86 357M 0 0 847k 0 0:08:22 0:07:12 0 86 415M 86 358M 0 0 847k 0 0:08:22 0:07:13 0 86 415M 86 359M 0 0 847k 0 0:08:21 0:07:14 0 86 415M 86 360M 0 0 847k 0 0:08:21 0:07:15 0 86 415M 86 361M 0 0 847k 0 0:08:22 0:07:16 0 87 415M 87 361M 0 0 846k 0 0:08:22 0:07:17 0 87 415M 87 362M 0 0 846k 0 0:08:22 0:07:18 0 87 415M 87 363M 0 0 846k 0 0:08:22 0:07:19 0 87 415M 87 364M 0 0 847k 0 0:08:22 0:07:20 0 87 415M 87 365M 0 0 847k 0 0:08:22 0:07:21 0 88 415M 88 366M 0 0 847k 0 0:08:22 0:07:22 0 88 415M 88 366M 0 0 847k 0 0:08:22 0:07:23 0 88 415M 88 367M 0 0 847k 0 0:08:22 0:07:24 0 88 415M 88 368M 0 0 847k 0 0:08:22 0:07:25 0 88 415M 88 369M 0 0 847k 0 0:08:22 0:07:26 0 89 415M 89 370M 0 0 847k 0 0:08:22 0:07:27 0 89 415M 89 371M 0 0 847k 0 0:08:22 0:07:28 0 89 415M 89 372M 0 0 847k 0 0:08:21 0:07:29 0 89 415M 89 372M 0 0 847k 0 0:08:21 0:07:30 0 89 415M 89 373M 0 0 847k 0 0:08:21 0:07:31 0 90 415M 90 374M 0 0 847k 0 0:08:21 0:07:32 0 90 415M 90 375M 0 0 848k 0 0:08:21 0:07:33 0 90 415M 90 376M 0 0 848k 0 0:08:21 0:07:34 0 90 415M 90 377M 0 0 848k 0 0:08:21 0:07:35 0 90 415M 90 378M 0 0 848k 0 0:08:21 0:07:36 0 91 415M 91 378M 0 0 848k 0 0:08:21 0:07:37 0 91 415M 91 379M 0 0 847k 0 0:08:21 0:07:38 0 91 415M 91 380M 0 0 847k 0 0:08:22 0:07:39 0 91 415M 91 381M 0 0 847k 0 0:08:22 0:07:40 0 91 415M 91 381M 0 0 847k 0 0:08:22 0:07:41 0 92 415M 92 382M 0 0 847k 0 0:08:22 0:07:42 0 92 415M 92 383M 0 0 847k 0 0:08:21 0:07:43 0 92 415M 92 384M 0 0 847k 0 0:08:21 0:07:44 0 92 415M 92 385M 0 0 847k 0 0:08:21 0:07:45 0 92 415M 92 386M 0 0 847k 0 0:08:21 0:07:46 0 93 415M 93 387M 0 0 847k 0 0:08:21 0:07:47 0 93 415M 93 387M 0 0 848k 0 0:08:21 0:07:48 0 93 415M 93 388M 0 0 848k 0 0:08:21 0:07:49 0 93 415M 93 389M 0 0 848k 0 0:08:21 0:07:50 0 93 415M 93 390M 0 0 848k 0 0:08:21 0:07:51 0 94 415M 94 391M 0 0 848k 0 0:08:21 0:07:52 0 94 415M 94 392M 0 0 848k 0 0:08:21 0:07:53 0 94 415M 94 393M 0 0 848k 0 0:08:21 0:07:54 0 94 415M 94 393M 0 0 848k 0 0:08:21 0:07:55 0 95 415M 95 394M 0 0 848k 0 0:08:21 0:07:56 0 95 415M 95 395M 0 0 847k 0 0:08:22 0:07:57 0 95 415M 95 396M 0 0 847k 0 0:08:21 0:07:58 0 95 415M 95 396M 0 0 847k 0 0:08:21 0:07:59 0 95 415M 95 397M 0 0 847k 0 0:08:21 0:08:00 0 95 415M 95 398M 0 0 847k 0 0:08:21 0:08:01 0 96 415M 96 399M 0 0 847k 0 0:08:21 0:08:02 0 96 415M 96 400M 0 0 848k 0 0:08:21 0:08:03 0 96 415M 96 401M 0 0 848k 0 0:08:21 0:08:04 0 96 415M 96 402M 0 0 848k 0 0:08:21 0:08:05 0 96 415M 96 402M 0 0 848k 0 0:08:21 0:08:06 0 97 415M 97 403M 0 0 848k 0 0:08:21 0:08:07 0 97 415M 97 404M 0 0 848k 0 0:08:21 0:08:08 0 97 415M 97 405M 0 0 848k 0 0:08:21 0:08:09 0 97 415M 97 406M 0 0 848k 0 0:08:21 0:08:10 0 98 415M 98 407M 0 0 848k 0 0:08:21 0:08:11 0 98 415M 98 408M 0 0 848k 0 0:08:21 0:08:12 0 98 415M 98 408M 0 0 848k 0 0:08:21 0:08:13 0 98 415M 98 409M 0 0 848k 0 0:08:21 0:08:14 0 98 415M 98 410M 0 0 848k 0 0:08:21 0:08:15 0 99 415M 99 411M 0 0 848k 0 0:08:21 0:08:16 0 99 415M 99 412M 0 0 848k 0 0:08:21 0:08:17 0 99 415M 99 413M 0 0 849k 0 0:08:21 0:08:18 0 99 415M 99 413M 0 0 848k 0 0:08:21 0:08:19 0 99 415M 99 414M 0 0 848k 0 0:08:21 0:08:20 0 99 415M 99 415M 0 0 848k 0 0:08:21 0:08:21 -- 100 415M 100 415M 0 0 848k 0 0:08:21 0:08:21 -- :--:-- 763k 25 | 26 | md5sum(kubernetes-server-linux-amd64.tar.gz)=89fa567b0e20bb2ac9525 12a985419fe 27 | sha1sum(kubernetes-server-linux-amd64.tar.gz)=2f4bb5e579f038d4f71a b88a68653dd64dacb924 28 | 29 | % Total % Received % Xferd Average Speed Time Time Time Current 30 | Dload Upload Total Spent Left Speed 31 | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- -- 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 -- 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 -- 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 -- 100 161 100 161 0 0 54 0 0:00:02 0:00:02 -- :--:-- 54 32 | 0 0 0 0 0 0 0 0 --:--:-- 0:00:03 -- 0 13.2M 0 621 0 0 130 0 29:42:29 0:00:04 29 0 13.2M 0 123k 0 0 22536 0 0:10:16 0:00:05 0 6 13.2M 6 917k 0 0 139k 0 0:01:37 0:00:06 0 13 13.2M 13 1870k 0 0 246k 0 0:00:55 0:00:07 0 20 13.2M 20 2830k 0 0 329k 0 0:00:41 0:00:08 0 27 13.2M 27 3790k 0 0 395k 0 0:00:34 0:00:09 0 34 13.2M 34 4750k 0 0 448k 0 0:00:30 0:00:10 0 42 13.2M 42 5704k 0 0 492k 0 0:00:27 0:00:11 0 49 13.2M 49 6657k 0 0 529k 0 0:00:25 0:00:12 0 55 13.2M 55 7598k 0 0 559k 0 0:00:24 0:00:13 0 63 13.2M 63 8555k 0 0 586k 0 0:00:23 0:00:14 0 69 13.2M 69 9504k 0 0 609k 0 0:00:22 0:00:15 0 76 13.2M 76 10.2M 0 0 630k 0 0:00:21 0:00:16 0 80 13.2M 80 10.6M 0 0 620k 0 0:00:21 0:00:17 0 87 13.2M 87 11.5M 0 0 638k 0 0:00:21 0:00:18 0 94 13.2M 94 12.4M 0 0 653k 0 0:00:20 0:00:19 0 100 13.2M 100 13.2M 0 0 665k 0 0:00:20 0:00:20 -- :--:-- 846k 33 | 34 | md5sum(kubernetes-client-linux-amd64.tar.gz)=aa489d572dab2869942bc 8392d21f0a4 35 | sha1sum(kubernetes-client-linux-amd64.tar.gz)=40ef1a51abeb60b32416 7e534d2c8fe94e8f352b 36 | 37 | Extracting /root/kubernetes/client/kubernetes-client-linux-amd64.t ar.gz into /root/kubernetes/platforms/linux/amd64 38 | Add '/root/kubernetes/client/bin' to your PATH to use newly-instal led binaries. 39 | root@ubuntu:~# 40 | 41 | ``` 42 | --------------------------------------------------------------------------------