├── yum_install_kube.sh ├── kubeadm_init.sh ├── set_up_bridging.sh ├── turn_off_selinux.sh ├── install_flannel.sh ├── create-admin-sa.yaml ├── enable_start_docker_kube.sh ├── create-admin-sa-dashboard.yaml ├── turn_off_swap.sh ├── yum_install_docker_ce.sh ├── ks-demo-pod.yaml ├── kubernetes.repo ├── create-admin-rb.yaml ├── create-admin-rb-dashboard.yaml ├── psp-deploy.yaml ├── ks-setup-step2.sh ├── README.md ├── ks-setup-step1.sh ├── k8s-create.sh ├── k8s-up.sh ├── wget_shell_files.sh ├── kube-aws-config.sh ├── Security_Contexts_Exercise_Command_Summary ├── Authentication_Exercise_Command_summary ├── Authorization_Exercise_Commands_Summary ├── Dashboard_Setup_Command_Summary.md ├── Web UI (Dashboard) Setup.md └── Playground_Setup_Command_Summary.md /yum_install_kube.sh: -------------------------------------------------------------------------------- 1 | yum install -y kubelet kubeadm kubectl 2 | -------------------------------------------------------------------------------- /kubeadm_init.sh: -------------------------------------------------------------------------------- 1 | kubeadm init --pod-network-cidr=10.244.0.0/16 2 | -------------------------------------------------------------------------------- /set_up_bridging.sh: -------------------------------------------------------------------------------- 1 | modprobe br_netfilter 2 | echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables 3 | -------------------------------------------------------------------------------- /turn_off_selinux.sh: -------------------------------------------------------------------------------- 1 | setenforce 0 2 | sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 3 | -------------------------------------------------------------------------------- /install_flannel.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 2 | -------------------------------------------------------------------------------- /create-admin-sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /enable_start_docker_kube.sh: -------------------------------------------------------------------------------- 1 | systemctl enable docker 2 | systemctl enable kubelet 3 | systemctl start docker 4 | systemctl start kubelet 5 | -------------------------------------------------------------------------------- /create-admin-sa-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kubernetes-dashboard 6 | -------------------------------------------------------------------------------- /turn_off_swap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # swapoff -a to disable swapping 3 | swapoff -a 4 | # sed to comment the swap partition in /etc/fstab 5 | sed -i.bak -r 's/(.+ swap .+)/#\1/' /etc/fstab 6 | -------------------------------------------------------------------------------- /yum_install_docker_ce.sh: -------------------------------------------------------------------------------- 1 | yum install -y yum-utils device-mapper-persistent-data lvm2 2 | yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 3 | yum install -y docker-ce 4 | -------------------------------------------------------------------------------- /ks-demo-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: ks-demo-pod 5 | spec: 6 | serviceAccountName: jenkins 7 | containers: 8 | - name: shell 9 | image: alpine:3.7 10 | command: 11 | - "sh" 12 | - "-c" 13 | - "sleep 10000" 14 | -------------------------------------------------------------------------------- /kubernetes.repo: -------------------------------------------------------------------------------- 1 | [kubernetes] 2 | name=Kubernetes 3 | baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 4 | enabled=1 5 | gpgcheck=0 6 | repo_gpgcheck=0 7 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg 8 | https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 9 | -------------------------------------------------------------------------------- /create-admin-rb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: admin-user 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: admin-user 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /create-admin-rb-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: admin-user 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: admin-user 12 | namespace: kubernetes-dashboard 13 | -------------------------------------------------------------------------------- /psp-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: psp-deploy 5 | labels: 6 | app: paused 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: paused 12 | template: 13 | metadata: 14 | labels: 15 | app: paused 16 | spec: 17 | containers: 18 | - name: paused 19 | image: k8s.gcr.io/pause 20 | -------------------------------------------------------------------------------- /ks-setup-step2.sh: -------------------------------------------------------------------------------- 1 | chmod +x *.sh &>> ./setup-step2.out 2 | echo "1...\n" >> ./setup-step2.out 3 | ./yum_install_docker_ce.sh &>> ./setup-step2.out 4 | echo "12...\n" >> ./setup-step2.out 5 | ./yum_install_kube.sh &>> ./setup-step2.out 6 | echo "123...\n" >> ./setup-step2.out 7 | ./enable_start_docker_kube.sh &>> ./setup-step2.out 8 | echo "1234...\n" >> ./setup-step2.out 9 | ./set_up_bridging.sh &>> ./setup-step2.out 10 | echo "Complete.\n" >> ./setup-step2.out 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![la logo](https://user-images.githubusercontent.com/42839573/67322755-818e9400-f4df-11e9-97c1-388bf357353d.png) 2 | 3 | ## content-kubernetes-security 4 | 5 | This repository is a group of scripts and documents to assist students of Linux Academy in taking the Kubernetes Security Course provided at linuxacademy.com 6 | 7 | All scripts and documents in this repository are copyrighted work product of Linux Academy Inc. 8 | 9 | (c) 2019 Linux Academy, All Rights Reserved 10 | -------------------------------------------------------------------------------- /ks-setup-step1.sh: -------------------------------------------------------------------------------- 1 | chmod +x ./*.sh &>> ./setup-step1.out 2 | echo "1...\n" >> ./setup-step1.out 3 | ./turn_off_selinux.sh &>> ./setup-step1.out 4 | echo "12...\n" >> ./setup-step1.out 5 | ./turn_off_swap.sh &>> ./setup-step1.out 6 | echo "123...\n" >> ./setup-step1.out 7 | ./set_up_bridging.sh &>> ./setup-step1.out 8 | echo "1234\n" >> ./setup-step1.out 9 | cp ./kubernetes.repo /etc/yum.repos.d/kubernetes.repo &>> ./setup-step1.out 10 | echo "Rebooting now...\n" >> ./setup-step1.out 11 | reboot 12 | -------------------------------------------------------------------------------- /k8s-create.sh: -------------------------------------------------------------------------------- 1 | export AWS_HOSTED_ZONE="$(aws route53 list-hosted-zones | jq '.HostedZones[0] .Name' | tr -d '"' | sed 's/\.$//' )" 2 | echo "Hosted Zone:" $AWS_HOSTED_ZONE 3 | echo "CREATING S3 BUCKET" 4 | echo "==================" 5 | aws s3 mb s3://k8s3.$AWS_HOSTED_ZONE 6 | export KOPS_STATE_STORE=s3://k8s3.$AWS_HOSTED_ZONE 7 | export KOPS_CLUSTER_NAME=k8s.$AWS_HOSTED_ZONE 8 | echo "CREATING RSA KEY" 9 | echo "=================" 10 | ssh-keygen -q -f ./.ssh/id_rsa -N '' 11 | echo "CREATING CLUSTER" 12 | echo "=================" 13 | kops create cluster --master-size=t2.medium --zones=us-east-1c --name=$KOPS_CLUSTER_NAME 14 | -------------------------------------------------------------------------------- /k8s-up.sh: -------------------------------------------------------------------------------- 1 | export AWS_HOSTED_ZONE="$(aws route53 list-hosted-zones | jq '.HostedZones[0] .Name' | tr -d '"' | sed 's/\.$//' )" 2 | echo "Hosted Zone:" $AWS_HOSTED_ZONE 3 | echo "CREATING S3 BUCKET" 4 | echo "==================" 5 | aws s3 mb s3://k8s3.$AWS_HOSTED_ZONE 6 | export KOPS_STATE_STORE=s3://k8s3.$AWS_HOSTED_ZONE 7 | export KOPS_CLUSTER_NAME=k8s.$AWS_HOSTED_ZONE 8 | echo "CREATING RSA KEY" 9 | echo "=================" 10 | ssh-keygen -q -f ./.ssh/id_rsa -N '' 11 | echo "CREATING CLUSTER" 12 | echo "=================" 13 | kops create cluster --master-size=t2.medium --zones=us-east-1c --name=$KOPS_CLUSTER_NAME 14 | echo "UPDATE CLUSTER" 15 | echo "==============" 16 | kops update cluster --name=$KOPS_CLUSTER_NAME --yes 17 | -------------------------------------------------------------------------------- /wget_shell_files.sh: -------------------------------------------------------------------------------- 1 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/enable_start_docker_kube.sh 2 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/install_flannel.sh 3 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/kubeadm_init.sh 4 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/kubernetes.repo 5 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/set_cgroup_driver.sh 6 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/set_up_bridging.sh 7 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/turn_off_selinux.sh 8 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/turn_off_swap.sh 9 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/yum_install_docker_ce.sh 10 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/yum_install_kube.sh 11 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/ks-setup-step1.sh 12 | wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/ks-setup-step2.sh 13 | -------------------------------------------------------------------------------- /kube-aws-config.sh: -------------------------------------------------------------------------------- 1 | aws ec2 create-key-pair --key-name ks-lab-key --query 'KeyMaterial' --output text > ks-lab-key.pem 2 | chmod 400 ks-lab-key.pem 3 | export AWS_KMS_KEY=$(aws kms create-key | grep Arn | awk '{print $2}' | sed 's/,*$//g' | sed 's/\"//g') 4 | echo "AWS Key:" $AWS_KMS_KEY 5 | export AWS_HOSTED_ZONE="$(aws route53 list-hosted-zones | jq '.HostedZones[0] .Name' | tr -d '"' | sed 's/\.$//' )" 6 | echo "Hosted Zone:" $AWS_HOSTED_ZONE 7 | export AWS_HOSTED_ZONE_ID="$(aws route53 list-hosted-zones | jq '.HostedZones[0] .Id' | tr -d '"' | sed 's/\.$//' )" 8 | echo "Hosted Zone ID:" $AWS_HOSTED_ZONE_ID 9 | aws s3 mb s3://k8s3.$AWS_HOSTED_ZONE 10 | export AWS_S3_BUCKET=s3://k8s3.$AWS_HOSTED_ZONE 11 | echo "kube-aws init \\ " 12 | echo " --cluster-name=k8s-ks-lab \\ " 13 | echo " --region=us-east-1 \\ " 14 | echo " --availability-zone=us-east-1c \\ " 15 | echo " --hosted-zone-id=$AWS_HOSTED_ZONE_ID \\ " 16 | echo " --external-dns-name=k8s.$AWS_HOSTED_ZONE \\ " 17 | echo " --key-name=ks-lab-key \\ " 18 | echo " --kms-key-arn=$AWS_KMS_KEY \\ " 19 | echo " --s3-uri=$AWS_S3_BUCKET" 20 | kube-aws init \ 21 | --cluster-name=k8s-ks-lab \ 22 | --region=us-east-1 \ 23 | --availability-zone=us-east-1c \ 24 | --hosted-zone-id=$AWS_HOSTED_ZONE_ID \ 25 | --external-dns-name=k8s.$AWS_HOSTED_ZONE \ 26 | --key-name=ks-lab-key \ 27 | --kms-key-arn=$AWS_KMS_KEY \ 28 | --s3-uri=$AWS_S3_BUCKET 29 | -------------------------------------------------------------------------------- /Security_Contexts_Exercise_Command_Summary: -------------------------------------------------------------------------------- 1 | Kubernetes Security 2 | Security Contexts Exercise 3 | Commands Summary 4 | 5 | The command to run a pod from the repository specified in the lesson is: 6 | 7 | $ kubectl create -f https://k8s.io/examples/pods/security/security-context.yaml 8 | 9 | NOTE: OPTIONAL: You may use wget to bring the file down to your present working directory by entering: 10 | 11 | $ wget https://k8s.io/examples/pods/security/security-context.yaml 12 | 13 | Then you could look at its contents using an editor or with the Linux cat command. 14 | 15 | If you bring the file down using wget, then you can run the pod with: 16 | 17 | $ kubectl create -f security-context.yaml 18 | 19 | To see which pods are running on your system, enter: 20 | 21 | $ kubectl get pods 22 | 23 | Once the pod is running, you may execute a shell inside the container instance, by entering: 24 | 25 | $ kubectl exec -it security-context-demo -- sh 26 | 27 | Once inside the container, you may list the directory contents: 28 | 29 | $ ls -l 30 | 31 | To change directory to the data directory, input: 32 | 33 | $ cd data 34 | 35 | Once the data directory is your present working directory, you can create a file with: 36 | 37 | $ echo "hello" > test.file 38 | 39 | Then list the file with ls to see its owner and group settings: 40 | 41 | $ ls -l 42 | 43 | You may also run a process status command to see the processes running and note the user under which they are running. 44 | 45 | $ ps aux 46 | 47 | You may exit the container shell by entering: 48 | 49 | $ exit 50 | -------------------------------------------------------------------------------- /Authentication_Exercise_Command_summary: -------------------------------------------------------------------------------- 1 | Kubernetes Security 2 | Authentication Exercise Commands Summary 3 | 4 | To create a serviceaccount called jenkins, enter the following command: 5 | 6 | $ kubectl create serviceaccount jenkins 7 | 8 | You may display detailed information about the service account with the following command: 9 | 10 | $ kubectl get serviceaccounts jenkins -o yaml 11 | 12 | To see all secrets in the current namespace input: 13 | 14 | $ kubectl get secrets 15 | 16 | To interrogate further you may output yaml for a particular secret with this command: 17 | 18 | $ kubectl get secret -o yaml 19 | 20 | To see all serviceaccounts in the current namespace: 21 | 22 | $ kubectl get serviceaccounts 23 | 24 | To intterogate a serviceaccount: 25 | 26 | $ kubectl describe serviceaccount jenkins 27 | 28 | To run a pos under a specific serviceaccount, create a yaml file as follows: 29 | 30 | You may use the editor to input the file or use wget to download it from the course github site. 31 | 32 | apiVersion: v1 33 | kind: Pod 34 | metadata: 35 | name: ks-demo-pod 36 | spec: 37 | serviceAccountName: jenkins 38 | containers: 39 | - name: shell 40 | image: alpine:3.7 41 | command: 42 | - "sh" 43 | - "-c" 44 | - "sleep 10000" 45 | 46 | To run the pod, enter this command: 47 | 48 | $ kubectl apply -f ks-demo-pod.yaml 49 | 50 | You may interrogate the pod with the describe verb: 51 | 52 | $ kubectl describe po/ks-demo-pod 53 | 54 | To run a shell inside the container, input: 55 | 56 | $ kubectl exec -it ks-demo-pod -- sh 57 | 58 | Once inside the pod container, you may output the token with the following command: 59 | 60 | / # cat /var/run/secrets/kubernetes.io/serviceaccount/token 61 | -------------------------------------------------------------------------------- /Authorization_Exercise_Commands_Summary: -------------------------------------------------------------------------------- 1 | Kubernetes Security 2 | Authorization Exercise Commands Summary 3 | 4 | To view the clusterroles on your system, enter: 5 | 6 | $ kubectl get clusterroles 7 | 8 | To interrogate a particular clusterrole, input: 9 | 10 | $ kubectl get clusterroles -o yaml 11 | 12 | You may also use the describe verb with clusterroles: 13 | 14 | $ kubectl describe clusterrole view 15 | 16 | To create a namespace called 'qa-test' input the following: 17 | 18 | $ kubectl create namespace qa-test 19 | 20 | To create a serviceaccount called qa-test-account within the namespace qa-test, input: 21 | 22 | $ kubectl --namespace=qa-test create serviceaccount qa-test-account 23 | 24 | To create a role with the qa-test namespace called 'qa-tester-view' that grants the get and list verb actions on the pods resource, input: 25 | 26 | $ kubectl --namespace=qa-test create role qa-tester-view --verb=get --verb=list --resource=pods 27 | 28 | To setup a Linux alias so you don't have to specify the namespace on every command, input: 29 | 30 | $ alias kubeqa='kubectl --namespace=qa-test' 31 | 32 | If you set up the above alias, the commands to create the role would be: 33 | 34 | $ kubeqa create role qa-tester-view --verb=get --verb=list --resource=pods 35 | 36 | To describe the role, input either of the following: 37 | 38 | $ kubectl --namespace=qa-test describe role/qa-tester-view 39 | 40 | or 41 | 42 | $ kubeqa describe role/qa-tester-view 43 | 44 | Now the role must be bound to the serviceaccount. To create the rolebinding, input: 45 | 46 | $ kubeqa create rolebinding qa-viewer --role=qa-tester-view --serviceaccount=qa-test:qa-test-account 47 | 48 | To describe the rolebinding, enter: 49 | 50 | $ kubeqa describe rolebinding/qa-viewer 51 | 52 | To test authorization, you may use the 'can-i' argument as follows. To test whether the serviceaccount may list pods, input: 53 | 54 | $ kubeqa auth can-i --as=system:serviceaccount:qa-test:qa-test-account list pods 55 | 56 | To test whether the serviceaccount may list services, input: 57 | 58 | $ kubeqa auth can-i --as=system:serviceaccount:qa-test:qa-test-account list services 59 | -------------------------------------------------------------------------------- /Dashboard_Setup_Command_Summary.md: -------------------------------------------------------------------------------- 1 | ## Kubernetes Security 2 | ## Dashboard Setup Command Summary 3 | 4 | ### THIS LESSON IS BEING DEPRACATED AND THE WEB UI (DASHBOARD) SETUP IS ITS REPLACEMENT 5 | 6 | The following is a command summary to install the Kubernetes Dashboard on your playground server. 7 | 8 | 9 | 1) Create admin-user service Account for dashboard 10 | 11 | The file create-admin-sa.yaml is available on the course github site. You may download it by entering: 12 | 13 | ```$ wget https://raw.gitbub.com/linuxacademy/content-kubernetes-security/master/create-admin-sa.yaml``` 14 | 15 | ...or, you may simply use vi or the editor of your choice to create the following file. 16 | 17 | ```apiVersion: v1 18 | kind: ServiceAccount 19 | metadata: 20 | name: admin-user 21 | namespace: kube-system 22 | ``` 23 | 24 | After creating the file, enter the following command: 25 | 26 | ```$ kubectl create -f create-admin-sa.yaml``` 27 | 28 | After creating the service account, it is necessary to create the role binding. To do so you will need to download or create the following file. 29 | 30 | To download: 31 | 32 | ```$ wget https://raw.gitbub.com/linuxacademy/content-kubernetes-security/master/create-admin-rb.yaml``` 33 | 34 | ...or use an editor to create the following file; 35 | 36 | ``` 37 | apiVersion: rbac.authorization.k8s.io/v1 38 | kind: ClusterRoleBinding 39 | metadata: 40 | name: admin-user 41 | roleRef: 42 | apiGroup: rbac.authorization.k8s.io 43 | kind: ClusterRole 44 | name: cluster-admin 45 | subjects: 46 | - kind: ServiceAccount 47 | name: admin-user 48 | namespace: kube-system 49 | ``` 50 | 51 | Once the file has been created, you may input the following command: 52 | 53 | ```$ kubectl create -f create-admin-rb.yaml``` 54 | 55 | After creating the service account and role binding, you may use the following command to query the secret for the admin-user. 56 | 57 | ```$ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')``` 58 | 59 | The command will provide the following output; 60 | ``` 61 | Name: admin-user-token-ff2mm 62 | Namespace: kube-system 63 | Labels: 64 | Annotations: kubernetes.io/service-account.name: admin-user 65 | kubernetes.io/service-account.uid: c6facdce-2578-11e9-8dc9-062d4745d730 66 | 67 | Type: kubernetes.io/service-account-token 68 | 69 | Data 70 | ==== 71 | ca.crt: 1025 bytes 72 | namespace: 11 bytes 73 | token: 74 | ``` 75 | The token data displayed should be copied off to a document or notepad so that you may use it when it is time to log into the dashboard. 76 | 77 | To install the dashboard software, input the following four commands; 78 | 79 | ``` 80 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml 81 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml 82 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml 83 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/rbac/heapster-rbac.yaml 84 | ``` 85 | The port specified in the dashboard sets it up to listen on localhost:8001. To expose this port to a client you will need to run the Kubernetes proxy, and tunnel into the server with ssh. 86 | 87 | To run the proxy, input the following command. You may elect to place an '&' ampersand after the command to run it in background. If you do so, not the PID (Process ID) so you may terminate it later. 88 | 89 | ```$ kubectl proxy``` 90 | 91 | In a terminal emulator window, you may start the tunnel with the following command. Again an '&' ampersand may be used to run it in background. 92 | 93 | ```$ ssh -g -L 8001:localhost:8001 -f -N cloud_user@``` 94 | 95 | Once the tunnel has been established, you may enter the following URL address from the same system that is running the ssh tunnel. 96 | 97 | http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login 98 | 99 | For more information, the following tutorial is available: 100 | https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html 101 | 102 | 103 | -------------------------------------------------------------------------------- /Web UI (Dashboard) Setup.md: -------------------------------------------------------------------------------- 1 | ![la logo](https://user-images.githubusercontent.com/42839573/67322755-818e9400-f4df-11e9-97c1-388bf357353d.png) 2 | 3 | ## Kubernetes Security 4 | ### Dashboard Setup Command Summary 5 | 6 | The following is a command summary to install the Kubernetes Dashboard on your playground server. 7 | 8 | 1) Install the Web UI Dashboard using the recommended yaml file provided by Kubernetes: 9 | 10 | ``` $kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml``` 11 | 12 | 2) Use the commands below to download the yaml file to create a service account. Then use cat to look at the file contents, and finally execute the file with the kubectl command. 13 | 14 | ``` 15 | $ wget https://raw.githubusercontent.com/linuxacademy/content-kubernetes-security/master/create-admin-sa-dashboard.yaml 16 | $ cat create-admin-sa-dashboard.yaml 17 | $ kubectl apply -f create-admin-sa-dashboard.yaml 18 | ``` 19 | 20 | For your reference here, this is the contents of the file. 21 | ``` 22 | apiVersion: v1 23 | kind: ServiceAccount 24 | metadata: 25 | name: admin-user 26 | namespace: kubernetes-dashboard 27 | ``` 28 | 29 | 3) After creating the service account, it is necessary to create the cluster role binding. To do so you may foenload the file, examine its contents, and run it as before with the service account yaml. 30 | 31 | ``` 32 | $ wget https://raw.githubusercontent.com/linuxacademy/content-kubernetes-security/master/create-admin-rb-dashboard.yaml 33 | $ cat create-admin-rb-dashboard.yaml 34 | $ kubectl apply -f create-admin-rb-dashboard.yam 35 | ``` 36 | 37 | For your reference, the yaml is as follows: 38 | 39 | ``` 40 | apiVersion: rbac.authorization.k8s.io/v1 41 | kind: ClusterRoleBinding 42 | metadata: 43 | name: admin-user 44 | roleRef: 45 | apiGroup: rbac.authorization.k8s.io 46 | kind: ClusterRole 47 | name: cluster-admin 48 | subjects: 49 | - kind: ServiceAccount 50 | name: admin-user 51 | namespace: kubernetes-dashboard 52 | ``` 53 | 54 | 4) After creating the service account and cluster role binding, you may use the following command to query the secret for the admin-user. The reason we do this is to obtain the token that has been created for that user. We will use the token to login to the dashboard. 55 | 56 | ```$ kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')``` 57 | 58 | The command will provide output similar to the following: 59 | 60 | ``` 61 | Name: admin-user-token-ff2mm 62 | Namespace: kubernetes-dashboard 63 | Labels: 64 | Annotations: kubernetes.io/service-account.name: admin-user 65 | kubernetes.io/service-account.uid: c6facdce-2578-11e9-8dc9-062d4745d730 66 | 67 | Type: kubernetes.io/service-account-token 68 | 69 | Data 70 | ==== 71 | ca.crt: 1025 bytes 72 | namespace: 11 bytes 73 | token: 74 | ``` 75 | 76 | The token data displayed should be copied off to a document or notepad so that you may use it when it is time to log into the dashboard. 77 | 78 | 5) To deploy the dashboard enter the following command: 79 | 80 | ``` 81 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml 82 | ``` 83 | 84 | 6) The port specified in the dashboard sets it up to listen on localhost:8001. To expose this port to a client you will need to run the Kubernetes proxy, and tunnel into the server with ssh. 85 | 86 | To run the proxy, input the following command. You may elect to place an '&' ampersand after the command to run it in background. If you do so, not the PID (Process ID) so you may terminate it later. 87 | 88 | ``` 89 | $ kubectl proxy 90 | ``` 91 | 92 | In a terminal emulator window, you may start the tunnel with the following command. Again an '&' ampersand may be used to run it in background. 93 | 94 | ``` 95 | $ ssh -g -L 8001:localhost:8001 -f -N cloud_user@ 96 | ``` 97 | 98 | Once the tunnel has been established, you may enter the following URL address from the same system that is running the ssh tunnel. 99 | 100 | http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/. 101 | 102 | For more information, the following tutorial is available: 103 | https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/ 104 | 105 | Information on creating the service account and cluster role binding may be found here: 106 | https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md 107 | -------------------------------------------------------------------------------- /Playground_Setup_Command_Summary.md: -------------------------------------------------------------------------------- 1 | ![la logo](https://user-images.githubusercontent.com/42839573/67322755-818e9400-f4df-11e9-97c1-388bf357353d.png) 2 | 3 | ### Kubernetes Security 4 | ### Linux Commands Summary and Guide 5 | 6 | #### Preparing the Playground Servers 7 | 8 | From linuxacademy.com, Cloud Servers -> Playground 9 | 10 | Set up Medium Servers with CentOS in our Cloud Playgrounds. Label one as Master, and a second as Worker Node1, and a third as Worker Node2. 11 | 12 | You may change the Zone from North America but please use the same Zone for all three servers. 13 | 14 | Copy off the Public IP Address and the cloud_user credentials; the login and temporary password. 15 | 16 | #### Setting Up The Master Nodes 17 | 18 | The URL for the course assets in GitHub are at: 19 | https://github.com/linuxacademy/content-kubernetes-security 20 | 21 | Access your servers through the SSH utility on your client or our terminal session provided through linuxacademy.com. 22 | 23 | ` $ ssh cloud_user@ ` 24 | 25 | Use the temporary password for first login, and when it prompts you to change the password, set the password to whatever you choose. 26 | 27 | #### On the master Node: 28 | 29 | Run a wget command to pull down the script that will pull the others. 30 | 31 | ` $ wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/wget_shell_files.sh ` 32 | 33 | Change the permissions to add execute permission to the shell script file. 34 | 35 | ` $ chmod +x wget_shell_files.sh ` 36 | 37 | Execute the shell script from your current working directory. 38 | 39 | ` $ ./wget_shell_files.sh ` 40 | 41 | #### On the Worker Nodes 42 | 43 | Repeat the wget of the main shell script on Node1 and Node2 servers. 44 | 45 | ` $ wget https://raw.github.com/linuxacademy/content-kubernetes-security/master/wget_shell_files.sh ` 46 | 47 | Change the permissions to add execute permission to the shell script file. 48 | 49 | ` $ chmod +x wget_shell_files.sh ` 50 | 51 | Execute the shell script from your current working directory. 52 | 53 | ` $ ./wget_shell_files.sh ` 54 | 55 | #### Back On the Master Node 56 | 57 | Use the sudo command to become the super user. 58 | 59 | ` $ sudo su ` 60 | 61 | Use the chmod command to grant execute permission to all of the shell script files. 62 | 63 | ` # chmod +x *.sh ` 64 | 65 | Then from the present working directory, that contains all of the downloaded scripts, run the first script. 66 | 67 | ` # ./ks-setup-step1.sh ` 68 | 69 | > NOTE: The above script will make some configuration edits to your server and reboot the server, so your ssh connection will be closed. 70 | 71 | After your servers reboot, you will need to restablish your ssh session. YOU MAY NEED TO VERIFY in linuxacademy.com whether the Public IP Address for your servers has changed. If so, make note of the new IP’s as before. Your passwords will not have changed. 72 | 73 | After you reestablish your session as cloud_user, use the sudo command to become super user. 74 | 75 | ` $ sudo su ` 76 | 77 | From the directory containing the scripts, run the second setup script. 78 | 79 | ` # ./ks-setup-step2.sh ` 80 | 81 | Verify that docker is installed. 82 | 83 | ` # docker version ` 84 | 85 | Verify that kubeadm has been installed. 86 | 87 | ` # kubeadm version ` 88 | 89 | Enter the command (on the master node) to initiate a cluster. 90 | 91 | ` # kubeadm init –-pod-network-cidr=10.244.0.0/16 ` 92 | 93 | > NOTE: The –-pod-network-cidr address pool being supplied is intended to facilitate the use of the flannel network overlay which will be installed in a subsequent step. 94 | 95 | > VERY IMPORTANT 96 | Copy off the kubeadm join command that is presented when the kubeadm init completes. That join command has a token that is needed by the worker nodes to join the master. 97 | 98 | > ALSO NOTE: kubeadm creates tokens for the worker nodes to join and they expire within 24 hours. So you will want to join your nodes to the cluster soon after configuring the master node. 99 | 100 | Copy off the commands presented that are necessary to copy the config file for kubectl. 101 | 102 | Now exit the super user shell session. 103 | 104 | ` $ exit ` 105 | 106 | Use cd to change to your home directory if necessary. 107 | 108 | ` $ cd ` 109 | 110 | Create the hidden directory called .kube. 111 | 112 | ` $ mkdir -p .kube ` 113 | 114 | Copy the kubernetes admin configuration from /etc/kubernetes to the .kube directory in our home path, and name it config. 115 | 116 | ` $ sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config ` 117 | 118 | Change ownership and group ownership of the config file to cloud_user. 119 | 120 | ` $ sudo chown $(id -u):$(id -g) $HOME/.kube/config ` 121 | 122 | Use kubectl to see the status of the master node. 123 | 124 | ` $ kubectl get nodes ` 125 | 126 | Install the flannel network overlay with this command. 127 | 128 | ` $ kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml ` 129 | 130 | Continue to execute a get nodes command until your master is ready. 131 | 132 | ` $ kubectl get nodes ` 133 | 134 | #### Setting Up The Worker Nodes (Perform these commands on both worker nodes) 135 | 136 | One the worker nodes, establish a super user session 137 | 138 | ` $ sudo su` 139 | 140 | Change the shell files to add execute permission. 141 | 142 | `# chmod +x *.sh` 143 | 144 | Execute the first setup script. 145 | 146 | ` # ./ks-setup-step1.sh` 147 | 148 | > NOTE: This script will disconnect you and reboot the server, just as it did on the master node. 149 | 150 | After your servers have rebooted, reestablish a ssh session on both worker nodes. 151 | 152 | Execute the script to do the step2 installs. 153 | 154 | ` $ sudo su ` 155 | 156 | ` # ./ks-setup-step2.sh ` 157 | 158 | Verify that docker and kubeadm are installed. 159 | 160 | ` # docker version ` 161 | 162 | ` # kubeadm version ` 163 | 164 | Using the kubeadm join command that you copied off before, join the worker node to your master. 165 | 166 | ` # kubeadm join :6443 --token --discovery-token-ca-cert-hash sha ` 167 | 168 | Now, back on the master, use kubectl to determine that the worker nodes have joined the cluster. 169 | 170 | ` $ kubectl get nodes ` 171 | 172 | #### Validating The Cluster 173 | 174 | On the master node. Use kubectl to exercise a few commands to validate the cluster. 175 | 176 | ` $ kubectl get namespaces ` 177 | 178 | ` $ kubectl get roles --all-namespaces ` 179 | 180 | ` $ kubectl get serviceaccounts --all-namespaces ` 181 | 182 | Test your cluster by doing a deployment. 183 | 184 | ` $ kubectl create deployment hello-node --image=gcr.io/hello-minikube-zero-install/hello-node ` 185 | 186 | Verify the deployment is ready. 187 | 188 | ` $ kubectl get deployments ` 189 | 190 | Verify the deployment launched the pod. 191 | 192 | ` $ kubectl get pods ` 193 | 194 | Use the get events command to review the events that have been performed on your cluster. 195 | 196 | ` $ kubectl get events ` 197 | 198 | Then, when you are ready delete the deployment to clean up. 199 | 200 | ` $ kubectl delete deployment/hello-node ` 201 | 202 | #### Some Other Helpful Commands 203 | 204 | Throughout the course it is useful to be able to interrogate your cluster of a variety or resources that may exist. You may also use abbreviations or ‘shortnames’ of most resource names. To view all of the resources and their abbreviation, use the command: 205 | 206 | ` $ kubectl api-resources ` 207 | 208 | 209 | To Re-Instantiate Your Cluster WITHOUT recreating your Cloud Playground Server Images 210 | 211 | On the Master and Worker Nodes 212 | 213 | ` # kubeadm reset ` 214 | 215 | Then just repeat the init command on the master. 216 | 217 | ` # kubeadm init –pod-network-cidr=10.244.0.0/16 ` 218 | 219 | > NOTE: Copy off the join commands 220 | 221 | Configure kubectl as before in .kube, just with the copy and chmod if needed. 222 | 223 | ` $ sudo cp -I /etc/kubernetes/admin.conf $HOME/.kube/config ` 224 | ` $ sudo chown $(id -u):$(id -g) $HOME/.kube/config ` 225 | 226 | Use kubectl to see the status of the master node. 227 | 228 | ` $ kubectl get nodes ` 229 | 230 | Install the flannel network overlay with this command. 231 | 232 | ` $ kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml ` 233 | 234 | And use the join commands to rejoin the worker nodes. 235 | 236 | --------------------------------------------------------------------------------