├── README.md ├── getting-started-with-kubeadm ├── assets │ └── dashboard.yaml ├── courseBase.sh ├── finish.md ├── index.json ├── intro.md ├── set-env.sh ├── step1.md ├── step2.md ├── step3.md ├── step4.md ├── step5.md ├── step6.md └── step7.md ├── helm-classic-package-manager ├── courseBase.sh ├── finish.md ├── index.json ├── intro.md ├── set-env.sh ├── step1.md ├── step2.md ├── step3.md └── step4.md ├── helm-package-manager ├── courseBase.sh ├── finish.md ├── index.json ├── intro.md ├── set-env.sh ├── step1.md ├── step2.md ├── step3.md └── step4.md └── playground ├── courseBase.sh ├── finish.md ├── index.json ├── intro.md ├── set-env.sh └── step1.md /README.md: -------------------------------------------------------------------------------- 1 | # kubernetes-scenarios 2 | Katacoda Kubernetes Interactive Scenarios - https://katacoda.com/courses/kubernetes/ 3 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/assets/dashboard.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Configuration to deploy release version of the Dashboard UI compatible with 16 | # Kubernetes 1.6 (RBAC enabled). 17 | # 18 | # Example usage: kubectl create -f 19 | 20 | apiVersion: v1 21 | kind: ServiceAccount 22 | metadata: 23 | labels: 24 | k8s-app: kubernetes-dashboard 25 | name: kubernetes-dashboard 26 | namespace: kube-system 27 | --- 28 | apiVersion: rbac.authorization.k8s.io/v1beta1 29 | kind: ClusterRoleBinding 30 | metadata: 31 | name: kubernetes-dashboard 32 | labels: 33 | k8s-app: kubernetes-dashboard 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: ClusterRole 37 | name: cluster-admin 38 | subjects: 39 | - kind: ServiceAccount 40 | name: kubernetes-dashboard 41 | namespace: kube-system 42 | --- 43 | kind: Deployment 44 | apiVersion: extensions/v1beta1 45 | metadata: 46 | labels: 47 | k8s-app: kubernetes-dashboard 48 | name: kubernetes-dashboard 49 | namespace: kube-system 50 | spec: 51 | replicas: 1 52 | revisionHistoryLimit: 10 53 | selector: 54 | matchLabels: 55 | k8s-app: kubernetes-dashboard 56 | template: 57 | metadata: 58 | labels: 59 | k8s-app: kubernetes-dashboard 60 | spec: 61 | containers: 62 | - name: kubernetes-dashboard 63 | image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.0 64 | ports: 65 | - containerPort: 9090 66 | protocol: TCP 67 | args: 68 | # Uncomment the following line to manually specify Kubernetes API server Host 69 | # If not specified, Dashboard will attempt to auto discover the API server and connect 70 | # to it. Uncomment only if the default does not work. 71 | # - --apiserver-host=http://my-address:port 72 | livenessProbe: 73 | httpGet: 74 | path: / 75 | port: 9090 76 | initialDelaySeconds: 30 77 | timeoutSeconds: 30 78 | serviceAccountName: kubernetes-dashboard 79 | # Comment the following tolerations if Dashboard must not be deployed on master 80 | tolerations: 81 | - key: node-role.kubernetes.io/master 82 | effect: NoSchedule 83 | --- 84 | kind: Service 85 | apiVersion: v1 86 | metadata: 87 | labels: 88 | k8s-app: kubernetes-dashboard 89 | name: kubernetes-dashboard 90 | namespace: kube-system 91 | spec: 92 | type: NodePort 93 | ports: 94 | - port: 9090 95 | nodePort: 30000 96 | selector: 97 | k8s-app: kubernetes-dashboard 98 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/courseBase.sh: -------------------------------------------------------------------------------- 1 | ssh root@[[HOST2_IP]] "kubeadm reset" 2 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/finish.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/katacoda-scenarios/kubernetes-scenarios/c068ff465d63a8c5aa8868725cc87e29254c4bd3/getting-started-with-kubeadm/finish.md -------------------------------------------------------------------------------- /getting-started-with-kubeadm/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "pathwayTitle": "Kubernetes", 3 | "title": "Getting Started With Kubeadm", 4 | "description": "Learn how to deploy a Kubeadm cluster", 5 | "difficulty": "intermediate", 6 | "time": "10-15 minutes", 7 | "details": { 8 | "steps": [ 9 | { 10 | "title": "Step 1 - Initialise Master", 11 | "text": "step1.md" 12 | }, 13 | { 14 | "title": "Step 2 - Join Cluster", 15 | "text": "step2.md" 16 | }, 17 | { 18 | "title": "Step 3 - View Nodes", 19 | "text": "step3.md" 20 | }, 21 | { 22 | "title": "Step 4 - Deploy Container Networking Interface (CNI)", 23 | "text": "step4.md" 24 | }, 25 | { 26 | "title": "Step 5 - Deploy Pod", 27 | "text": "step5.md" 28 | }, 29 | { 30 | "title": "Step 6 - Work Remotely", 31 | "text": "step6.md" 32 | }, 33 | { 34 | "title": "Step 7 - Deploy Dashboard", 35 | "text": "step7.md" 36 | } 37 | ], 38 | "intro": { 39 | "text": "intro.md", 40 | "courseData": "courseBase.sh", 41 | "credits": "" 42 | }, 43 | "finish": { 44 | "text": "finish.md" 45 | }, 46 | "assets": { 47 | "host01": [{"file": "dashboard.yaml", "target": "/root"}] 48 | } 49 | }, 50 | "environment": { 51 | "uilayout": "terminal-terminal", 52 | "uimessage1": "\u001b[32mYour Interactive Bash Terminal. A safe place to learn and execute commands.\u001b[m\r\n" 53 | }, 54 | "backend": { 55 | "imageid": "kubernetes-cluster" 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/intro.md: -------------------------------------------------------------------------------- 1 | In this scenario you'll learn how to bootstrap a Kubernetes cluster using Kubeadm. 2 | 3 | Kubeadm solves the problem of handling TLS encryption configuration, deploying the core Kubernetes components and ensuring that additional nodes can easily join the cluster. The resulting cluster is secured out of the box via mechanisms such as RBAC. 4 | 5 | More details on Kubeadm can be found at https://github.com/kubernetes/kubeadm 6 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/set-env.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/katacoda-scenarios/kubernetes-scenarios/c068ff465d63a8c5aa8868725cc87e29254c4bd3/getting-started-with-kubeadm/set-env.sh -------------------------------------------------------------------------------- /getting-started-with-kubeadm/step1.md: -------------------------------------------------------------------------------- 1 | Kubeadm has been installed on the nodes. Packages are available for Ubuntu 16.04+, CentOS 7 or HypriotOS v1.0.1+. 2 | 3 | The first stage of initialising the cluster is to launch the master node. The master is responsible for running the control plane components, etcd and the API server. Clients will communicate to the API to schedule workloads and manage the state of the cluster. 4 | 5 | ## Task 6 | 7 | The command below will initialise the cluster with a known token to simplify the following steps. 8 | 9 | `kubeadm init --token=102952.1a7dd4cc8d1f4cc5`{{execute HOST1}} 10 | 11 | In production, it's recommend to exclude the token causing kubeadm to generate one on your behalf. 12 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/step2.md: -------------------------------------------------------------------------------- 1 | Once the Master has initialised, additional nodes can join the cluster as long as they have the correct token. The tokens can be managed via `kubeadm token`, for example `kubeadm token list`{{execute}}. 2 | 3 | ## Task 4 | 5 | One the second node, run the command to join the cluster providing the IP address of the Master node. 6 | 7 | `kubeadm join --token=102952.1a7dd4cc8d1f4cc5 [[HOST_IP]]:6443`{{execute HOST2}} 8 | 9 | This is the same command provided after the Master has been initialised. 10 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/step3.md: -------------------------------------------------------------------------------- 1 | The cluster has now been initialised. The Master node will manage the cluster, while our one worker node will run our container workloads. 2 | 3 | ## Task 4 | 5 | To manage the Kubernetes cluster, the client configuration and certificates are required. This configuration is created when _kubeadm_ initialises the cluster. The command copies the configuration to the users home directory and sets the environment variable for use with the CLI. 6 | 7 | ``` 8 | sudo cp /etc/kubernetes/admin.conf $HOME/ 9 | sudo chown $(id -u):$(id -g) $HOME/admin.conf 10 | export KUBECONFIG=$HOME/admin.conf 11 | ```{{execute HOST1}} 12 | 13 | The Kubernetes CLI, known as _kubectl_, can now use the configuration to access the cluster. For example, the command below will return the two nodes in our cluster. 14 | 15 | `kubectl get nodes`{{execute HOST1}} 16 | 17 | At this point, the Nodes may not be ready. This is because the Container Network Interface has not been deployed. This will be fixed within the next step. 18 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/step4.md: -------------------------------------------------------------------------------- 1 | The Container Network Interface (CNI) defines how the different nodes and their workloads should communicate. There are multiple network providers available, some are listed [here](https://kubernetes.io/docs/admin/addons/). 2 | 3 | ## Task 4 | 5 | In this scenario we'll use WeaveWorks. The deployment definition can be viewed at `cat /opt/weave-kube`{{execute HOST1}} 6 | 7 | This can be deployed using `kubectl apply`. 8 | 9 | `kubectl apply -f /opt/weave-kube`{{execute HOST1}} 10 | 11 | Weave will now deploy as a series of Pods on the cluster. The status of this can be viewed using the command `kubectl get pod -n kube-system`{{execute HOST1}} 12 | 13 | When installing Weave on your cluster, visit https://www.weave.works/docs/net/latest/kube-addon/ for details. 14 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/step5.md: -------------------------------------------------------------------------------- 1 | The state of the two nodes in the cluster should now be Ready. This means that our deployments can be scheduled and launched. 2 | 3 | Using Kubectl, it's possible to deploy pods. Commands are always issued for the Master with each node only responsible for executing the workloads. 4 | 5 | The command below create a Pod based on the Docker Image _katacoda/docker-http-server_. 6 | 7 | `kubectl run http --image=katacoda/docker-http-server:latest --replicas=1`{{execute HOST1}} 8 | 9 | The status of the Pod creation can be viewed using `kubectl get pods`{{execute HOST1}} 10 | 11 | Once running, you can see the Docker Container running on the node. 12 | 13 | `docker ps | grep docker-http-server`{{execute HOST2}} 14 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/step6.md: -------------------------------------------------------------------------------- 1 | `cat /etc/kubernetes/admin.conf`{{execute HOST1}} 2 | 3 | `scp root@:/etc/kubernetes/admin.conf .` 4 | 5 | `kubectl --kubeconfig ./admin.conf get nodes` 6 | -------------------------------------------------------------------------------- /getting-started-with-kubeadm/step7.md: -------------------------------------------------------------------------------- 1 | Kubernetes has a web-based dashboard UI giving visibility into the Kubernetes cluster. 2 | 3 | ## Task 4 | 5 | Deploy the dashboard yaml with the command `kubectl apply -f dashboard.yaml`{{execute HOST1}} 6 | 7 | The dashboard is deployed into the _kube-system_ namespace. View the status of the deployment with `kubectl get pods -n kube-system`{{execute HOST1}} 8 | 9 | When the dashboard was deployed, it was assigned a NodePort of 30000. This makes the dashboard available to outside of the cluster and viewable at https://[[HOST2_SUBDOMAIN]]-30000-[[KATACODA_HOST]].environments.katacoda.com/ 10 | 11 | For your cluster, the dashboard yaml definition can be downloaded from https://github.com/kubernetes/dashboard/blob/master/src/deploy/kubernetes-dashboard.yaml. 12 | -------------------------------------------------------------------------------- /helm-classic-package-manager/courseBase.sh: -------------------------------------------------------------------------------- 1 | echo 'apiVersion: v1' >> ~/skydns-rc.yaml 2 | echo 'kind: ReplicationController' >> ~/skydns-rc.yaml 3 | echo 'metadata:' >> ~/skydns-rc.yaml 4 | echo ' name: kube-dns-v9' >> ~/skydns-rc.yaml 5 | echo ' namespace: kube-system' >> ~/skydns-rc.yaml 6 | echo ' labels:' >> ~/skydns-rc.yaml 7 | echo ' k8s-app: kube-dns' >> ~/skydns-rc.yaml 8 | echo ' version: v9' >> ~/skydns-rc.yaml 9 | echo ' kubernetes.io/cluster-service: "true"' >> ~/skydns-rc.yaml 10 | echo 'spec:' >> ~/skydns-rc.yaml 11 | echo ' replicas: 1' >> ~/skydns-rc.yaml 12 | echo ' selector:' >> ~/skydns-rc.yaml 13 | echo ' k8s-app: kube-dns' >> ~/skydns-rc.yaml 14 | echo ' version: v9' >> ~/skydns-rc.yaml 15 | echo ' template:' >> ~/skydns-rc.yaml 16 | echo ' metadata:' >> ~/skydns-rc.yaml 17 | echo ' labels:' >> ~/skydns-rc.yaml 18 | echo ' k8s-app: kube-dns' >> ~/skydns-rc.yaml 19 | echo ' version: v9' >> ~/skydns-rc.yaml 20 | echo ' kubernetes.io/cluster-service: "true"' >> ~/skydns-rc.yaml 21 | echo ' spec:' >> ~/skydns-rc.yaml 22 | echo ' containers:' >> ~/skydns-rc.yaml 23 | echo ' - name: etcd' >> ~/skydns-rc.yaml 24 | echo ' image: gcr.io/google_containers/etcd:2.2.1' >> ~/skydns-rc.yaml 25 | echo ' resources:' >> ~/skydns-rc.yaml 26 | echo ' limits:' >> ~/skydns-rc.yaml 27 | echo ' cpu: 100m' >> ~/skydns-rc.yaml 28 | echo ' memory: 50Mi' >> ~/skydns-rc.yaml 29 | echo ' command:' >> ~/skydns-rc.yaml 30 | echo ' - /usr/local/bin/etcd' >> ~/skydns-rc.yaml 31 | echo ' - -data-dir' >> ~/skydns-rc.yaml 32 | echo ' - /var/etcd/data' >> ~/skydns-rc.yaml 33 | echo ' - -listen-client-urls' >> ~/skydns-rc.yaml 34 | echo ' - http://127.0.0.1:2379,http://127.0.0.1:4001' >> ~/skydns-rc.yaml 35 | echo ' - -advertise-client-urls' >> ~/skydns-rc.yaml 36 | echo ' - http://127.0.0.1:2379,http://127.0.0.1:4001' >> ~/skydns-rc.yaml 37 | echo ' - -initial-cluster-token' >> ~/skydns-rc.yaml 38 | echo ' - skydns-etcd' >> ~/skydns-rc.yaml 39 | echo ' volumeMounts:' >> ~/skydns-rc.yaml 40 | echo ' - name: etcd-storage' >> ~/skydns-rc.yaml 41 | echo ' mountPath: /var/etcd/data' >> ~/skydns-rc.yaml 42 | echo ' - name: kube2sky' >> ~/skydns-rc.yaml 43 | echo ' image: gcr.io/google_containers/kube2sky:1.11' >> ~/skydns-rc.yaml 44 | echo ' resources:' >> ~/skydns-rc.yaml 45 | echo ' limits:' >> ~/skydns-rc.yaml 46 | echo ' cpu: 100m' >> ~/skydns-rc.yaml 47 | echo ' memory: 50Mi' >> ~/skydns-rc.yaml 48 | echo ' args:' >> ~/skydns-rc.yaml 49 | echo ' # command = "/kube2sky"' >> ~/skydns-rc.yaml 50 | echo ' - -domain=cluster.local' >> ~/skydns-rc.yaml 51 | echo ' - -kube_master_url=http://[[HOST_IP]]:8080' >> ~/skydns-rc.yaml 52 | echo ' - name: skydns' >> ~/skydns-rc.yaml 53 | echo ' image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c' >> ~/skydns-rc.yaml 54 | echo ' resources:' >> ~/skydns-rc.yaml 55 | echo ' limits:' >> ~/skydns-rc.yaml 56 | echo ' cpu: 100m' >> ~/skydns-rc.yaml 57 | echo ' memory: 50Mi' >> ~/skydns-rc.yaml 58 | echo ' args:' >> ~/skydns-rc.yaml 59 | echo ' # command = "/skydns"' >> ~/skydns-rc.yaml 60 | echo ' - -machines=http://127.0.0.1:4001' >> ~/skydns-rc.yaml 61 | echo ' - -addr=0.0.0.0:53' >> ~/skydns-rc.yaml 62 | echo ' - -ns-rotate=false' >> ~/skydns-rc.yaml 63 | echo ' - -domain=cluster.local' >> ~/skydns-rc.yaml 64 | echo ' ports:' >> ~/skydns-rc.yaml 65 | echo ' - containerPort: 53' >> ~/skydns-rc.yaml 66 | echo ' name: dns' >> ~/skydns-rc.yaml 67 | echo ' protocol: UDP' >> ~/skydns-rc.yaml 68 | echo ' - containerPort: 53' >> ~/skydns-rc.yaml 69 | echo ' name: dns-tcp' >> ~/skydns-rc.yaml 70 | echo ' protocol: TCP' >> ~/skydns-rc.yaml 71 | echo ' livenessProbe:' >> ~/skydns-rc.yaml 72 | echo ' httpGet:' >> ~/skydns-rc.yaml 73 | echo ' path: /healthz' >> ~/skydns-rc.yaml 74 | echo ' port: 8080' >> ~/skydns-rc.yaml 75 | echo ' scheme: HTTP' >> ~/skydns-rc.yaml 76 | echo ' initialDelaySeconds: 30' >> ~/skydns-rc.yaml 77 | echo ' timeoutSeconds: 5' >> ~/skydns-rc.yaml 78 | echo ' readinessProbe:' >> ~/skydns-rc.yaml 79 | echo ' httpGet:' >> ~/skydns-rc.yaml 80 | echo ' path: /healthz' >> ~/skydns-rc.yaml 81 | echo ' port: 8080' >> ~/skydns-rc.yaml 82 | echo ' scheme: HTTP' >> ~/skydns-rc.yaml 83 | echo ' initialDelaySeconds: 1' >> ~/skydns-rc.yaml 84 | echo ' timeoutSeconds: 5' >> ~/skydns-rc.yaml 85 | echo ' - name: healthz' >> ~/skydns-rc.yaml 86 | echo ' image: gcr.io/google_containers/exechealthz:1.0' >> ~/skydns-rc.yaml 87 | echo ' resources:' >> ~/skydns-rc.yaml 88 | echo ' limits:' >> ~/skydns-rc.yaml 89 | echo ' cpu: 10m' >> ~/skydns-rc.yaml 90 | echo ' memory: 20Mi' >> ~/skydns-rc.yaml 91 | echo ' args:' >> ~/skydns-rc.yaml 92 | echo ' - -cmd=nslookup kubernetes.default.svc.cluster.local localhost >/dev/null' >> ~/skydns-rc.yaml 93 | echo ' - -port=8080' >> ~/skydns-rc.yaml 94 | echo ' ports:' >> ~/skydns-rc.yaml 95 | echo ' - containerPort: 8080' >> ~/skydns-rc.yaml 96 | echo ' protocol: TCP' >> ~/skydns-rc.yaml 97 | echo ' volumes:' >> ~/skydns-rc.yaml 98 | echo ' - name: etcd-storage' >> ~/skydns-rc.yaml 99 | echo ' emptyDir: {}' >> ~/skydns-rc.yaml 100 | echo ' dnsPolicy: Default' >> ~/skydns-rc.yaml 101 | echo 'apiVersion: v1' >> ~/skydns-svc.yaml 102 | echo 'kind: Service' >> ~/skydns-svc.yaml 103 | echo 'metadata:' >> ~/skydns-svc.yaml 104 | echo ' name: kube-dns' >> ~/skydns-svc.yaml 105 | echo ' namespace: kube-system' >> ~/skydns-svc.yaml 106 | echo ' labels:' >> ~/skydns-svc.yaml 107 | echo ' k8s-app: kube-dns' >> ~/skydns-svc.yaml 108 | echo ' kubernetes.io/cluster-service: "true"' >> ~/skydns-svc.yaml 109 | echo ' kubernetes.io/name: "KubeDNS"' >> ~/skydns-svc.yaml 110 | echo 'spec:' >> ~/skydns-svc.yaml 111 | echo ' selector:' >> ~/skydns-svc.yaml 112 | echo ' k8s-app: kube-dns' >> ~/skydns-svc.yaml 113 | echo ' clusterIP: 10.0.0.10' >> ~/skydns-svc.yaml 114 | echo ' ports:' >> ~/skydns-svc.yaml 115 | echo ' - name: dns' >> ~/skydns-svc.yaml 116 | echo ' port: 53' >> ~/skydns-svc.yaml 117 | echo ' protocol: UDP' >> ~/skydns-svc.yaml 118 | echo ' - name: dns-tcp' >> ~/skydns-svc.yaml 119 | echo ' port: 53' >> ~/skydns-svc.yaml 120 | echo ' protocol: TCP' >> ~/skydns-svc.yaml 121 | echo '{' >> ~/kube-system.json 122 | echo ' "kind": "Namespace",' >> ~/kube-system.json 123 | echo ' "apiVersion": "v1",' >> ~/kube-system.json 124 | echo ' "metadata": {' >> ~/kube-system.json 125 | echo ' "name": "kube-system"' >> ~/kube-system.json 126 | echo ' }' >> ~/kube-system.json 127 | echo '}' >> ~/kube-system.json 128 | echo 'kind: List' >> ~/dashboard.yaml 129 | echo 'apiVersion: v1' >> ~/dashboard.yaml 130 | echo 'items:' >> ~/dashboard.yaml 131 | echo '- kind: ReplicationController' >> ~/dashboard.yaml 132 | echo ' apiVersion: v1' >> ~/dashboard.yaml 133 | echo ' metadata:' >> ~/dashboard.yaml 134 | echo ' labels:' >> ~/dashboard.yaml 135 | echo ' app: kubernetes-dashboard' >> ~/dashboard.yaml 136 | echo ' version: v1.0.1' >> ~/dashboard.yaml 137 | echo ' name: kubernetes-dashboard' >> ~/dashboard.yaml 138 | echo ' namespace: kube-system' >> ~/dashboard.yaml 139 | echo ' spec:' >> ~/dashboard.yaml 140 | echo ' replicas: 1' >> ~/dashboard.yaml 141 | echo ' selector:' >> ~/dashboard.yaml 142 | echo ' app: kubernetes-dashboard' >> ~/dashboard.yaml 143 | echo ' template:' >> ~/dashboard.yaml 144 | echo ' metadata:' >> ~/dashboard.yaml 145 | echo ' labels:' >> ~/dashboard.yaml 146 | echo ' app: kubernetes-dashboard' >> ~/dashboard.yaml 147 | echo ' spec:' >> ~/dashboard.yaml 148 | echo ' containers:' >> ~/dashboard.yaml 149 | echo ' - name: kubernetes-dashboard' >> ~/dashboard.yaml 150 | echo ' image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.0.1' >> ~/dashboard.yaml 151 | echo ' imagePullPolicy: Always' >> ~/dashboard.yaml 152 | echo ' ports:' >> ~/dashboard.yaml 153 | echo ' - containerPort: 9090' >> ~/dashboard.yaml 154 | echo ' protocol: TCP' >> ~/dashboard.yaml 155 | echo ' args:' >> ~/dashboard.yaml 156 | echo ' - --apiserver-host=[[HOST_IP]]:8080' >> ~/dashboard.yaml 157 | echo ' livenessProbe:' >> ~/dashboard.yaml 158 | echo ' httpGet:' >> ~/dashboard.yaml 159 | echo ' path: /' >> ~/dashboard.yaml 160 | echo ' port: 9090' >> ~/dashboard.yaml 161 | echo ' initialDelaySeconds: 30' >> ~/dashboard.yaml 162 | echo ' timeoutSeconds: 30' >> ~/dashboard.yaml 163 | echo '- kind: Service' >> ~/dashboard.yaml 164 | echo ' apiVersion: v1' >> ~/dashboard.yaml 165 | echo ' metadata:' >> ~/dashboard.yaml 166 | echo ' labels:' >> ~/dashboard.yaml 167 | echo ' app: kubernetes-dashboard' >> ~/dashboard.yaml 168 | echo ' kubernetes.io/cluster-service: "true"' >> ~/dashboard.yaml 169 | echo ' name: kubernetes-dashboard' >> ~/dashboard.yaml 170 | echo ' namespace: kube-system' >> ~/dashboard.yaml 171 | echo ' spec:' >> ~/dashboard.yaml 172 | echo ' type: NodePort' >> ~/dashboard.yaml 173 | echo ' ports:' >> ~/dashboard.yaml 174 | echo ' - port: 80' >> ~/dashboard.yaml 175 | echo ' targetPort: 9090' >> ~/dashboard.yaml 176 | echo ' selector:' >> ~/dashboard.yaml 177 | echo ' app: kubernetes-dashboard' >> ~/dashboard.yaml 178 | echo 'echo "Starting Kubernetes v1.2.2..."' >> ~/.bin/launch.sh 179 | echo 'docker run -d --net=host gcr.io/google_containers/etcd:2.2.1 /usr/local/bin/etcd --listen-client-urls=http://0.0.0.0:4001 --advertise-client-urls=http://0.0.0.0:4001 --data-dir=/var/etcd/data' >> ~/.bin/launch.sh 180 | echo 'docker run -d --name=api --net=host --pid=host --privileged=true gcr.io/google_containers/hyperkube:v1.2.2 /hyperkube apiserver --insecure-bind-address=0.0.0.0 --service-cluster-ip-range=10.0.0.1/24 --etcd_servers=http://127.0.0.1:4001 --v=2' >> ~/.bin/launch.sh 181 | echo 'docker run -d --name=kubs --volume=/:/rootfs:ro --volume=/sys:/sys:ro --volume=/dev:/dev --volume=/var/lib/docker/:/var/lib/docker:rw --volume=/var/lib/kubelet/:/var/lib/kubelet:rw --volume=/var/run:/var/run:rw --net=host --pid=host --privileged=true gcr.io/google_containers/hyperkube:v1.2.2 /hyperkube kubelet --hostname-override="127.0.0.1" --address="0.0.0.0" --api-servers=http://0.0.0.0:8080 --cluster_dns=10.0.0.10 --cluster_domain=cluster.local --config=/etc/kubernetes/manifests-multi' >> ~/.bin/launch.sh 182 | echo 'docker run -d --name=proxy --net=host --privileged gcr.io/google_containers/hyperkube:v1.2.2 /hyperkube proxy --master=http://0.0.0.0:8080 --v=2' >> ~/.bin/launch.sh 183 | echo 'echo "Downloading Kubectl..."' >> ~/.bin/launch.sh 184 | echo 'curl -o ~/.bin/kubectl http://storage.googleapis.com/kubernetes-release/release/v1.2.2/bin/linux/amd64/kubectl' >> ~/.bin/launch.sh 185 | echo 'chmod u+x ~/.bin/kubectl' >> ~/.bin/launch.sh 186 | echo 'export KUBERNETES_MASTER=http://docker:8080' >> ~/.bin/launch.sh 187 | echo 'echo "Waiting for Kubernetes to start..."' >> ~/.bin/launch.sh 188 | echo 'until $(kubectl cluster-info &> /dev/null); do' >> ~/.bin/launch.sh 189 | echo ' sleep 1' >> ~/.bin/launch.sh 190 | echo 'done' >> ~/.bin/launch.sh 191 | echo 'echo "Kubernetes started"' >> ~/.bin/launch.sh 192 | echo 'echo "Starting Kubernetes DNS..."' >> ~/.bin/launch.sh 193 | echo 'kubectl -s http://docker:8080 create -f ~/kube-system.json' >> ~/.bin/launch.sh 194 | echo 'kubectl -s http://docker:8080 create -f ~/skydns-rc.yaml' >> ~/.bin/launch.sh 195 | echo 'kubectl -s http://docker:8080 create -f ~/skydns-svc.yaml' >> ~/.bin/launch.sh 196 | echo 'echo "Starting Kubernetes UI..."' >> ~/.bin/launch.sh 197 | echo 'kubectl -s http://docker:8080 create -f ~/dashboard.yaml' >> ~/.bin/launch.sh 198 | echo 'kubectl -s http://docker:8080 cluster-info' >> ~/.bin/launch.sh 199 | chmod +x ~/.bin/launch.sh 200 | curl -L http://assets.joinscrapbook.com/unzip -o ~/.bin/unzip 201 | chmod +x ~/.bin/unzip 202 | launch.sh 203 | 204 | docker pull deis/redis:v0.0.5 205 | -------------------------------------------------------------------------------- /helm-classic-package-manager/finish.md: -------------------------------------------------------------------------------- 1 | Congratulations! You've successfully deployed Redis to Kubernetes using Helm. 2 | 3 | More details can be found on their website at http://www.helm.sh/ 4 | -------------------------------------------------------------------------------- /helm-classic-package-manager/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "pathwayTitle": "Kubernetes", 3 | "title": "Helm (Classic) Package Manager", 4 | "description": "Use Helm (Classic) Package Manager for Kubernetes to deploy Redis", 5 | "details": { 6 | "steps": [ 7 | { 8 | "title": "Install Helm", 9 | "code": "set-env.sh", 10 | "text": "step1.md" 11 | }, 12 | { 13 | "title": "Search For Chart", 14 | "text": "step2.md" 15 | }, 16 | { 17 | "title": "Deploy Redis", 18 | "text": "step3.md" 19 | }, 20 | { 21 | "title": "See Results", 22 | "text": "step4.md" 23 | } 24 | ], 25 | "intro": { 26 | "text": "intro.md", 27 | "courseData": "courseBase.sh", 28 | "credits": "" 29 | }, 30 | "finish": { 31 | "text": "finish.md" 32 | } 33 | }, 34 | "environment": { 35 | "uilayout": "terminal", 36 | "uimessage1": "\u001b[32mYour Interactive Hands On Lab Terminal\u001b[m\r\n" 37 | }, 38 | "backend": { 39 | "port": 8080, 40 | "imageid": "kubernetes-cluster-running" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /helm-classic-package-manager/intro.md: -------------------------------------------------------------------------------- 1 | This scenario teaches you how to use Helm, the package manager for Kubernetes, to deploy Redis. Helm simplifies discovering and deploying services to a Kubernetes cluster. 2 | 3 | ## "Helm is the best way to find, share, and use software built for Kubernetes." 4 | 5 | More details can be found on their website at http://www.helm.sh/ 6 | -------------------------------------------------------------------------------- /helm-classic-package-manager/set-env.sh: -------------------------------------------------------------------------------- 1 | export KUBERNETES_MASTER=http://host01:8080 2 | -------------------------------------------------------------------------------- /helm-classic-package-manager/step1.md: -------------------------------------------------------------------------------- 1 | Helm is a single binary that manages deploying Charts to Kubernetes. A chart is a packaged unit of kubernetes software. 2 | 3 | `curl -Ls https://get.helm.sh | bash`{{execute}} 4 | 5 | Once installed, update the local cache to sync the latest available packages with the environment. 6 | 7 | `./helmc update`{{execute}} 8 | -------------------------------------------------------------------------------- /helm-classic-package-manager/step2.md: -------------------------------------------------------------------------------- 1 | You can now start deploying software. To find available charts you can use the search command. 2 | 3 | For example, to deploy Redis we need to find a Redis chart. 4 | 5 | `./helmc search redis`{{execute}} 6 | 7 | We can identify more information with the info command. 8 | 9 | `./helmc info redis-cluster`{{execute}} 10 | -------------------------------------------------------------------------------- /helm-classic-package-manager/step3.md: -------------------------------------------------------------------------------- 1 | Use the _install_ command to deploy the chart to your cluster. 2 | 3 | `./helmc install redis-cluster`{{execute}} 4 | 5 | Helm will now launch the required pods. In the next step we'll verify the deployment status. 6 | -------------------------------------------------------------------------------- /helm-classic-package-manager/step4.md: -------------------------------------------------------------------------------- 1 | Helm deploys all the pods, replication controllers and services. The pod will be in a _pending_ state while the Docker Image is downloaded. Once complete it will move into a _running_ state. You'll now have a Redis Cluster running on top of Kubernetes. 2 | 3 | Use _kubectl_ to find out what was deployed. 4 | 5 | `kubectl get pods`{{execute}} 6 | 7 | `kubectl get rc`{{execute}} 8 | 9 | `kubectl get svc`{{execute}} 10 | -------------------------------------------------------------------------------- /helm-package-manager/courseBase.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/katacoda-scenarios/kubernetes-scenarios/c068ff465d63a8c5aa8868725cc87e29254c4bd3/helm-package-manager/courseBase.sh -------------------------------------------------------------------------------- /helm-package-manager/finish.md: -------------------------------------------------------------------------------- 1 | Congratulations! You've successfully deployed Redis to Kubernetes using Helm. 2 | 3 | More details can be found on their website at http://www.helm.sh/ 4 | -------------------------------------------------------------------------------- /helm-package-manager/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "pathwayTitle": "Kubernetes", 3 | "title": "Helm Package Manager", 4 | "description": "Use Helm Package Manager for Kubernetes to deploy Redis", 5 | "details": { 6 | "steps": [ 7 | { 8 | "title": "Install Helm", 9 | "code": "set-env.sh", 10 | "text": "step1.md" 11 | }, 12 | { 13 | "title": "Search For Chart", 14 | "text": "step2.md" 15 | }, 16 | { 17 | "title": "Deploy Redis", 18 | "text": "step3.md" 19 | }, 20 | { 21 | "title": "See Results", 22 | "text": "step4.md" 23 | } 24 | ], 25 | "intro": { 26 | "text": "intro.md", 27 | "courseData": "courseBase.sh", 28 | "credits": "" 29 | }, 30 | "finish": { 31 | "text": "finish.md" 32 | } 33 | }, 34 | "environment": { 35 | "uilayout": "terminal", 36 | "uimessage1": "\u001b[32mYour Interactive Hands On Lab Terminal\u001b[m\r\n" 37 | }, 38 | "backend": { 39 | "port": 8080, 40 | "imageid": "kubernetes-cluster-running" 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /helm-package-manager/intro.md: -------------------------------------------------------------------------------- 1 | This scenario teaches you how to use Helm, the package manager for Kubernetes, to deploy Redis. Helm simplifies discovering and deploying services to a Kubernetes cluster. 2 | 3 | ## "Helm is the best way to find, share, and use software built for Kubernetes." 4 | 5 | More details can be found on their website at http://www.helm.sh/ 6 | -------------------------------------------------------------------------------- /helm-package-manager/set-env.sh: -------------------------------------------------------------------------------- 1 | launch.sh 2 | -------------------------------------------------------------------------------- /helm-package-manager/step1.md: -------------------------------------------------------------------------------- 1 | Helm is a single binary that manages deploying Charts to Kubernetes. A chart is a packaged unit of kubernetes software. It can be downloaded from https://github.com/kubernetes/helm/releases 2 | 3 | `curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.3.0-linux-amd64.tar.gz 4 | tar -xvf helm-v2.3.0-linux-amd64.tar.gz 5 | mv linux-amd64/helm /usr/local/bin/`{{execute}} 6 | 7 | 8 | Once installed, initialise update the local cache to sync the latest available packages with the environment. 9 | 10 | `helm init 11 | helm repo update`{{execute}} 12 | -------------------------------------------------------------------------------- /helm-package-manager/step2.md: -------------------------------------------------------------------------------- 1 | You can now start deploying software. To find available charts you can use the search command. 2 | 3 | For example, to deploy Redis we need to find a Redis chart. 4 | 5 | `helm search redis`{{execute}} 6 | 7 | We can identify more information with the _inspect_ command. 8 | 9 | `helm inspect stable/redis`{{execute}} 10 | -------------------------------------------------------------------------------- /helm-package-manager/step3.md: -------------------------------------------------------------------------------- 1 | Use the _install_ command to deploy the chart to your cluster. 2 | 3 | `helm install stable/redis`{{execute}} 4 | 5 | Helm will now launch the required pods. You can view all packages using `helm ls`{{execute}} 6 | 7 | If you receive an error that Helm _could not find a ready tiller pod_, it means that helm is still deploying. Wait a few moments for the tiller Docker Image to finish downloading. 8 | 9 | In the next step we'll verify the deployment status. 10 | -------------------------------------------------------------------------------- /helm-package-manager/step4.md: -------------------------------------------------------------------------------- 1 | Helm deploys all the pods, replication controllers and services. The pod will be in a _pending_ state while the Docker Image is downloaded and until a Persistent Volume is available. Once complete it will move into a _running_ state. You'll now have a Redis Cluster running on top of Kubernetes. 2 | 3 | Use _kubectl_ to find out what was deployed. 4 | 5 | `kubectl get pods`{{execute}} 6 | 7 | `kubectl get deploy`{{execute}} 8 | 9 | `kubectl get svc`{{execute}} 10 | -------------------------------------------------------------------------------- /playground/courseBase.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/katacoda-scenarios/kubernetes-scenarios/c068ff465d63a8c5aa8868725cc87e29254c4bd3/playground/courseBase.sh -------------------------------------------------------------------------------- /playground/finish.md: -------------------------------------------------------------------------------- 1 | Thank you for trying the playground. More courses and scenarios are available at on our [homepage](/) 2 | -------------------------------------------------------------------------------- /playground/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "pathwayTitle": "Kubernetes", 3 | "icon": "fa-kubernetes", 4 | "title": "Kubernetes Playground", 5 | "description": "Use Kubernetes in a hosted sandboxed interactive environment", 6 | "details": { 7 | "steps": [ 8 | { 9 | "code": "set-env.sh", 10 | "text": "step1.md" 11 | } 12 | ], 13 | "intro": { 14 | "text": "intro.md", 15 | "courseData": "courseBase.sh", 16 | "credits": "" 17 | }, 18 | "finish": { 19 | "text": "finish.md" 20 | } 21 | }, 22 | "environment": { 23 | "showdashboard": true, 24 | "dashboard": "Dashboard", 25 | "uilayout": "terminal-terminal", 26 | "uimessage1": "\u001b[32mYour Interactive Bash Terminal.\u001b[m\r\n" 27 | }, 28 | "backend": { 29 | "port": 8080, 30 | "imageid": "kubernetes-cluster-running" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /playground/intro.md: -------------------------------------------------------------------------------- 1 | This is a Kubernetes playground. From here you can play with a Kubernetes host and explore it's API. 2 | 3 | #### What are playgrounds? 4 | 5 | Playgrounds give you a configured environment to start playing and exploring using an unstructured learning approach. 6 | 7 | Playgrounds are great for experimenting and trying samples. To learn more about the technology then start with one of our [labs](/learn) 8 | -------------------------------------------------------------------------------- /playground/set-env.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/katacoda-scenarios/kubernetes-scenarios/c068ff465d63a8c5aa8868725cc87e29254c4bd3/playground/set-env.sh -------------------------------------------------------------------------------- /playground/step1.md: -------------------------------------------------------------------------------- 1 | #### Launch Cluster 2 | 3 | `launch.sh`{{execute}} 4 | 5 | This will create a two node Kubernetes cluster using WeaveNet for networking. 6 | 7 | #### Health Check 8 | 9 | ` 10 | kubectl cluster-info 11 | `{{execute}} 12 | 13 | 14 | Interested in writing your own Kubernetes scenarios and demos? Visit [www.katacoda.com/teach](http://www.katacoda.com/teach) 15 | --------------------------------------------------------------------------------