├── .gitignore ├── 00-cluster-prep ├── 00-weave-network │ ├── master │ │ ├── destroy_master.sh │ │ └── init_master.sh │ └── worker │ │ ├── destroy_worker.sh │ │ └── init_worker.sh ├── 01-calico │ ├── master │ │ ├── destroy_master.sh │ │ └── init_master.sh │ └── worker │ │ ├── destroy_worker.sh │ │ └── init_worker.sh ├── cleanup_images.sh ├── refresh_node.sh └── setup_node.sh ├── 01-dashboard ├── dashboard-insecure.yaml └── dashboard.yaml ├── 02-ingress ├── banana.yaml ├── cert.pem ├── create-tls-secret.sh ├── ingress.yaml ├── key.pem ├── mango.yaml ├── orange.yaml ├── pineapple.yaml ├── remove_ingress.sh └── setup_ingress.sh ├── 03-rbac ├── clusterrole-binding │ └── deploy-deleter.yaml ├── csr │ ├── create-csr.sh │ ├── jane.conf │ └── template.yaml └── role-rolebinding │ ├── blue-get-secrets.yaml │ └── red-get-secrets.yaml ├── 04-sa ├── disable-sa-automount.yaml ├── pod.yaml └── sa.yaml ├── 05-restrict-api-access ├── disable-anonymous-api-access.yaml └── insecure-apiserver.yaml ├── 06-manage-secrets ├── mysecret-01.yaml ├── mysecret-02.yaml └── secret-pod.yaml ├── 07-encrypt-etcd ├── encrypt-config.yaml └── reencrypt-all-secrets.sh ├── 08-runtime-class ├── gvisor.yaml └── runtimeclass.yaml ├── 09-microservices-os-level-security ├── allow-priv-esc.yaml ├── pod-user-group.yaml ├── priv-containers.yaml └── run-as-nonroot.yaml ├── 10-psp ├── allow-priv-esc.yaml ├── example-psp.yaml ├── kube-apiserver.yaml ├── priv-containers.yaml └── psp-role.yaml ├── 11-sidecar └── app.yaml ├── 12-opa ├── deny-all │ ├── all_pod_always_deny.yaml │ └── alwaysdeny_template.yaml ├── deployment-replica-count │ ├── all_deployment_must_have_min_replicacount.yaml │ └── k8sminreplicacount_template.yaml ├── namespace-labels │ ├── all_ns_must_have_cks.yaml │ ├── all_pod_must_have_cks.yaml │ └── k8srequiredlabels_template.yaml ├── opa-gatekeeper.yaml └── whitelist-registries │ ├── all_pod_must_have_trusted_images.yaml │ └── k8strustedimages_template.yaml ├── 13-image-footprint ├── default │ ├── Dockerfile │ └── app.go ├── multi-stage │ ├── Dockerfile │ └── app.go ├── no-root │ ├── Dockerfile │ └── app.go ├── no-shell │ ├── Dockerfile │ └── app.go ├── pkg-versions │ ├── Dockerfile │ └── app.go └── read-only-fs │ ├── Dockerfile │ └── app.go ├── 14-networkpolicies ├── db.yaml ├── default-deny-allow-dns.yaml ├── default-deny.yaml ├── merged.yaml └── pod-selector.yaml ├── 15-conftest ├── docker │ ├── Dockerfile │ ├── policy │ │ ├── base.rego │ │ └── commands.rego │ └── run.sh └── kubernetes │ ├── deploy.yaml │ ├── policy │ └── deployment.rego │ └── run.sh ├── 16-imgpolicy-webhook ├── admission_config.yaml ├── apiserver-client-cert.pem ├── apiserver-client-key.pem ├── external-cert.pem ├── external-key.pem └── kubeconf ├── 17-immutability ├── immutable-pod.yaml └── readonly-fs.yaml ├── 18-auditing └── policy │ ├── all-metadata.yaml │ └── generic.yaml ├── 19-upgrade-scenario ├── base │ └── 1.18.0 │ │ ├── master │ │ ├── destroy_master.sh │ │ └── init_master.sh │ │ ├── setup_node.sh │ │ └── worker │ │ ├── destroy_worker.sh │ │ └── init_worker.sh └── upgrade │ └── 1.19.6 │ ├── upgrade_master.sh │ └── upgrade_worker.sh ├── LICENSE └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | **.key 2 | **.crt 3 | **.csr 4 | **.pem 5 | -------------------------------------------------------------------------------- /00-cluster-prep/00-weave-network/master/destroy_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to destroy master prior to shut down 3 | 4 | kubeadm reset -f 5 | rm -rf /etc/cni/net.d /etc/kubernetes /var/lib/etcd /var/lib/kubelet /var/run/kubernetes /var/lib/cni ~/.kube /opt/cni 6 | iptables -F 7 | -------------------------------------------------------------------------------- /00-cluster-prep/00-weave-network/master/init_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to initialize master 3 | 4 | KUBE_VERSION=1.21.1 5 | HOST_IP=`/sbin/ifconfig enp0s8 | egrep -o 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2` 6 | ### init k8s 7 | kubeadm init --apiserver-advertise-address=${HOST_IP} --kubernetes-version=${KUBE_VERSION} --ignore-preflight-errors=NumCPU --skip-token-print 8 | ip route add 10.96.0.0/16 dev enp0s8 src ${HOST_IP} 9 | 10 | mkdir -p $HOME/.kube 11 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 12 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 13 | 14 | kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" 15 | 16 | echo 17 | echo "### COMMAND TO ADD A WORKER NODE ###" 18 | kubeadm token create --print-join-command --ttl 0 19 | -------------------------------------------------------------------------------- /00-cluster-prep/00-weave-network/worker/destroy_worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to destroy worker node prior to shut down 3 | 4 | kubeadm reset -f 5 | rm -rf ~/.kube /etc/cni/net.d /etc/kubernetes /var/lib/etcd /var/lib/kubelet /var/run/kubernetes /var/lib/cni 6 | iptables -F 7 | -------------------------------------------------------------------------------- /00-cluster-prep/00-weave-network/worker/init_worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to initialize worker node 3 | 4 | HOST_IP=`/sbin/ifconfig enp0s8 | egrep -o 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2` 5 | ip route add 10.96.0.0/16 dev enp0s8 src ${HOST_IP} 6 | 7 | echo 8 | echo "EXECUTE ON MASTER: kubeadm token create --print-join-command --ttl 0" 9 | echo "THEN RUN THE OUTPUT AS COMMAND HERE TO ADD AS WORKER" 10 | echo 11 | -------------------------------------------------------------------------------- /00-cluster-prep/01-calico/master/destroy_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to destroy master prior to shut down 3 | 4 | kubeadm reset -f 5 | rm -rf ~/.kube /etc/cni/net.d /etc/kubernetes /var/lib/etcd /var/lib/kubelet /var/run/kubernetes /var/lib/cni /opt/cni 6 | iptables -F 7 | -------------------------------------------------------------------------------- /00-cluster-prep/01-calico/master/init_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to initialize master 3 | 4 | KUBE_VERSION=1.21.1 5 | HOST_IP=`/sbin/ifconfig enp0s8 | egrep -o 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2` 6 | ### init k8s 7 | kubeadm init --apiserver-advertise-address=${HOST_IP} --kubernetes-version=${KUBE_VERSION} --pod-network-cidr=192.168.0.0/16 --ignore-preflight-errors=NumCPU --skip-token-print 8 | ip route add 10.96.0.0/16 dev enp0s8 src ${HOST_IP} 9 | 10 | mkdir -p $HOME/.kube 11 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 12 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 13 | 14 | kubectl taint nodes --all node-role.kubernetes.io/master- 15 | 16 | kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml 17 | 18 | echo 19 | echo "### COMMAND TO ADD A WORKER NODE ###" 20 | kubeadm token create --print-join-command --ttl 0 21 | -------------------------------------------------------------------------------- /00-cluster-prep/01-calico/worker/destroy_worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to destroy worker node prior to shut down 3 | 4 | kubeadm reset -f 5 | rm -rf ~/.kube /etc/cni/net.d /etc/kubernetes /var/lib/etcd /var/lib/kubelet /var/run/kubernetes /var/lib/cni 6 | iptables -F 7 | -------------------------------------------------------------------------------- /00-cluster-prep/01-calico/worker/init_worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to initialize worker node 3 | 4 | HOST_IP=`/sbin/ifconfig enp0s8 | egrep -o 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2` 5 | ip route add 10.96.0.0/16 dev enp0s8 src ${HOST_IP} 6 | 7 | echo 8 | echo "EXECUTE ON MASTER: kubeadm token create --print-join-command --ttl 0" 9 | echo "THEN RUN THE OUTPUT AS COMMAND HERE TO ADD AS WORKER" 10 | echo 11 | -------------------------------------------------------------------------------- /00-cluster-prep/cleanup_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Clean up images with a search string. 3 | # name/tag provided as initial argument to the script 4 | 5 | KUBE_VERSION=$1 6 | docker rmi $(docker images | grep $KUBE_VERSION | tr -s ' ' | cut -d ' ' -f 3) 7 | -------------------------------------------------------------------------------- /00-cluster-prep/refresh_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to setup any node in your Kuberntes cluster 3 | # Either master or worker 4 | # Source: http://kubernetes.io/docs/getting-started-guides/kubeadm/ 5 | 6 | ### setup terminal 7 | KUBE_VERSION=1.21.1 8 | 9 | #Turn Off Swap 10 | sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab 11 | swapoff -a 12 | 13 | ### install k8s and docker 14 | apt-get remove -y kubelet kubernetes-cni 15 | apt-get autoremove -y 16 | systemctl daemon-reload 17 | 18 | apt-get update 19 | apt-get -y install linux-headers-$(uname -r) 20 | apt-get install -y etcd-client vim build-essential bash-completion binutils apparmor-utils falco kubelet=${KUBE_VERSION}-00 kubeadm=${KUBE_VERSION}-00 kubectl=${KUBE_VERSION}-00 kubernetes-cni=0.8.7-00 trivy 21 | 22 | cat > /etc/docker/daemon.json <> ~/.vimrc 10 | echo 'set tabstop=2' >> ~/.vimrc 11 | echo 'set shiftwidth=2' >> ~/.vimrc 12 | echo 'set expandtab' >> ~/.vimrc 13 | echo 'source <(kubectl completion bash)' >> ~/.bashrc 14 | echo 'alias k=kubectl' >> ~/.bashrc 15 | echo 'alias c=clear' >> ~/.bashrc 16 | echo 'complete -F __start_kubectl k' >> ~/.bashrc 17 | sed -i '1s/^/force_color_prompt=yes\n/' ~/.bashrc 18 | 19 | #Turn Off Swap 20 | sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab 21 | swapoff -a 22 | 23 | apt-get update 24 | apt-get install wget apt-transport-https gnupg lsb-release -y 25 | 26 | curl -s https://falco.org/repo/falcosecurity-3672BA8F.asc | apt-key add - 27 | echo "deb https://download.falco.org/packages/deb stable main" > /etc/apt/sources.list.d/falcosecurity.list 28 | 29 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 30 | cat < /etc/apt/sources.list.d/kubernetes.list 31 | deb http://apt.kubernetes.io/ kubernetes-xenial main 32 | EOF 33 | 34 | wget -qO - https://aquasecurity.github.io/trivy-repo/deb/public.key | sudo apt-key add - 35 | echo "deb https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main" > /etc/apt/sources.list.d/trivy.list 36 | 37 | ### install k8s and docker 38 | apt-get remove -y docker.io kubelet kubeadm kubectl kubernetes-cni docker-ce 39 | apt-get autoremove -y 40 | systemctl daemon-reload 41 | 42 | apt-get update -y 43 | apt-get -y install linux-headers-$(uname -r) 44 | apt-get install -y etcd-client vim build-essential bash-completion binutils apparmor-utils falco docker.io kubelet=${KUBE_VERSION}-00 kubeadm=${KUBE_VERSION}-00 kubectl=${KUBE_VERSION}-00 kubernetes-cni=0.8.7-00 trivy 45 | 46 | ### install kube-bench 47 | curl -L https://github.com/aquasecurity/kube-bench/releases/download/v0.3.1/kube-bench_0.3.1_linux_amd64.tar.gz -o /tmp/kube-bench_0.3.1_linux_amd64.tar.gz 48 | mkdir -p /tmp/kube-bench 49 | tar -xzf /tmp/kube-bench_0.3.1_linux_amd64.tar.gz -C /tmp/kube-bench 50 | cp /tmp/kube-bench/kube-bench /usr/local/bin/kube-bench 51 | mkdir -p /etc/kube-bench 52 | cp -Rf /tmp/kube-bench/cfg /etc/kube-bench/ 53 | rm -rf /tmp/kube-bench* 54 | 55 | 56 | ### install kubesec 57 | curl -L https://github.com/controlplaneio/kubesec/releases/download/v2.8.0/kubesec_linux_386.tar.gz -o /tmp/kubesec_linux_386.tar.gz 58 | mkdir -p /tmp/kubesec 59 | tar -xvf /tmp/kubesec_linux_386.tar.gz -C /tmp/kubesec/ 60 | cp /tmp/kubesec/kubesec /usr/bin/kubesec 61 | rm -rf /tmp/kubesec* 62 | 63 | cat > /etc/docker/daemon.json < jane-csr.yaml 7 | 8 | -------------------------------------------------------------------------------- /03-rbac/csr/jane.conf: -------------------------------------------------------------------------------- 1 | [req] 2 | default_bits = 2048 3 | distinguished_name = dn 4 | prompt = no 5 | 6 | [dn] 7 | C="IN" 8 | ST="India" 9 | L="MH" 10 | O="PNQ" 11 | OU="PNQ" 12 | emailAddress="name@mail.com" 13 | CN="jane" 14 | -------------------------------------------------------------------------------- /03-rbac/csr/template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: jane 5 | spec: 6 | groups: 7 | - system:authenticated 8 | request: CSR_VAL 9 | signerName: kubernetes.io/kube-apiserver-client 10 | usages: 11 | - client auth 12 | -------------------------------------------------------------------------------- /03-rbac/role-rolebinding/blue-get-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | creationTimestamp: null 5 | name: secret-manager 6 | namespace: blue 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - secrets 12 | verbs: 13 | - get 14 | - list 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: RoleBinding 18 | metadata: 19 | creationTimestamp: null 20 | name: secret-binding 21 | namespace: blue 22 | roleRef: 23 | apiGroup: rbac.authorization.k8s.io 24 | kind: Role 25 | name: secret-manager 26 | subjects: 27 | - apiGroup: rbac.authorization.k8s.io 28 | kind: User 29 | name: jane 30 | -------------------------------------------------------------------------------- /03-rbac/role-rolebinding/red-get-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | creationTimestamp: null 5 | name: secret-manager 6 | namespace: red 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - secrets 12 | verbs: 13 | - get 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: RoleBinding 17 | metadata: 18 | creationTimestamp: null 19 | name: secret-binding 20 | namespace: red 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: Role 24 | name: secret-manager 25 | subjects: 26 | - apiGroup: rbac.authorization.k8s.io 27 | kind: User 28 | name: jane 29 | -------------------------------------------------------------------------------- /04-sa/disable-sa-automount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: pod 7 | name: pod 8 | spec: 9 | automountServiceAccountToken: false 10 | containers: 11 | - image: nginx 12 | name: pod 13 | resources: {} 14 | dnsPolicy: ClusterFirst 15 | restartPolicy: Always 16 | status: {} 17 | -------------------------------------------------------------------------------- /04-sa/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: pod 7 | name: pod 8 | spec: 9 | containers: 10 | - image: nginx 11 | name: pod 12 | resources: {} 13 | automountServiceAccountToken: true 14 | dnsPolicy: ClusterFirst 15 | restartPolicy: Always 16 | status: {} 17 | -------------------------------------------------------------------------------- /04-sa/sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | creationTimestamp: null 5 | name: podaccess 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: RoleBinding 9 | metadata: 10 | creationTimestamp: null 11 | name: podaccess 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: edit 16 | subjects: 17 | - kind: ServiceAccount 18 | name: podaccess 19 | namespace: default 20 | -------------------------------------------------------------------------------- /05-restrict-api-access/disable-anonymous-api-access.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 172.16.99.21:6443 6 | creationTimestamp: null 7 | labels: 8 | component: kube-apiserver 9 | tier: control-plane 10 | name: kube-apiserver 11 | namespace: kube-system 12 | spec: 13 | containers: 14 | - command: 15 | - kube-apiserver 16 | - --anonymous-auth=false # Disabled anonymous access 17 | - --advertise-address={IP} 18 | - --allow-privileged=true 19 | - --authorization-mode=Node,RBAC 20 | - --client-ca-file=/etc/kubernetes/pki/ca.crt 21 | - --enable-admission-plugins=NodeRestriction 22 | - --enable-bootstrap-token-auth=true 23 | - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt 24 | - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt 25 | - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key 26 | - --etcd-servers=https://127.0.0.1:2379 27 | - --insecure-port=0 28 | - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt 29 | - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key 30 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 31 | - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt 32 | - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key 33 | - --requestheader-allowed-names=front-proxy-client 34 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt 35 | - --requestheader-extra-headers-prefix=X-Remote-Extra- 36 | - --requestheader-group-headers=X-Remote-Group 37 | - --requestheader-username-headers=X-Remote-User 38 | - --secure-port=6443 39 | - --service-account-key-file=/etc/kubernetes/pki/sa.pub 40 | - --service-cluster-ip-range=10.96.0.0/12 41 | - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt 42 | - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key 43 | image: k8s.gcr.io/kube-apiserver:v1.19.3 44 | imagePullPolicy: IfNotPresent 45 | livenessProbe: 46 | failureThreshold: 8 47 | httpGet: 48 | host: {IP} 49 | path: /livez 50 | port: 6443 51 | scheme: HTTPS 52 | initialDelaySeconds: 10 53 | periodSeconds: 10 54 | timeoutSeconds: 15 55 | name: kube-apiserver 56 | readinessProbe: 57 | failureThreshold: 3 58 | httpGet: 59 | host: {IP} 60 | path: /readyz 61 | port: 6443 62 | scheme: HTTPS 63 | periodSeconds: 1 64 | timeoutSeconds: 15 65 | resources: 66 | requests: 67 | cpu: 250m 68 | startupProbe: 69 | failureThreshold: 24 70 | httpGet: 71 | host: {IP} 72 | path: /livez 73 | port: 6443 74 | scheme: HTTPS 75 | initialDelaySeconds: 10 76 | periodSeconds: 10 77 | timeoutSeconds: 15 78 | volumeMounts: 79 | - mountPath: /etc/ssl/certs 80 | name: ca-certs 81 | readOnly: true 82 | - mountPath: /etc/ca-certificates 83 | name: etc-ca-certificates 84 | readOnly: true 85 | - mountPath: /etc/kubernetes/pki 86 | name: k8s-certs 87 | readOnly: true 88 | - mountPath: /usr/local/share/ca-certificates 89 | name: usr-local-share-ca-certificates 90 | readOnly: true 91 | - mountPath: /usr/share/ca-certificates 92 | name: usr-share-ca-certificates 93 | readOnly: true 94 | hostNetwork: true 95 | priorityClassName: system-node-critical 96 | volumes: 97 | - hostPath: 98 | path: /etc/ssl/certs 99 | type: DirectoryOrCreate 100 | name: ca-certs 101 | - hostPath: 102 | path: /etc/ca-certificates 103 | type: DirectoryOrCreate 104 | name: etc-ca-certificates 105 | - hostPath: 106 | path: /etc/kubernetes/pki 107 | type: DirectoryOrCreate 108 | name: k8s-certs 109 | - hostPath: 110 | path: /usr/local/share/ca-certificates 111 | type: DirectoryOrCreate 112 | name: usr-local-share-ca-certificates 113 | - hostPath: 114 | path: /usr/share/ca-certificates 115 | type: DirectoryOrCreate 116 | name: usr-share-ca-certificates 117 | status: {} 118 | -------------------------------------------------------------------------------- /05-restrict-api-access/insecure-apiserver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 172.16.99.21:6443 6 | creationTimestamp: null 7 | labels: 8 | component: kube-apiserver 9 | tier: control-plane 10 | name: kube-apiserver 11 | namespace: kube-system 12 | spec: 13 | containers: 14 | - command: 15 | - kube-apiserver 16 | - --anonymous-auth=false 17 | - --advertise-address=172.16.99.21 18 | - --allow-privileged=true 19 | - --authorization-mode=Node,RBAC 20 | - --client-ca-file=/etc/kubernetes/pki/ca.crt 21 | - --enable-admission-plugins=NodeRestriction 22 | - --enable-bootstrap-token-auth=true 23 | - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt 24 | - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt 25 | - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key 26 | - --etcd-servers=https://127.0.0.1:2379 27 | - --insecure-port=8080 28 | - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt 29 | - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key 30 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 31 | - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt 32 | - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key 33 | - --requestheader-allowed-names=front-proxy-client 34 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt 35 | - --requestheader-extra-headers-prefix=X-Remote-Extra- 36 | - --requestheader-group-headers=X-Remote-Group 37 | - --requestheader-username-headers=X-Remote-User 38 | - --secure-port=6443 39 | - --service-account-key-file=/etc/kubernetes/pki/sa.pub 40 | - --service-cluster-ip-range=10.96.0.0/12 41 | - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt 42 | - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key 43 | image: k8s.gcr.io/kube-apiserver:v1.19.3 44 | imagePullPolicy: IfNotPresent 45 | livenessProbe: 46 | failureThreshold: 8 47 | httpGet: 48 | host: 172.16.99.21 49 | path: /livez 50 | port: 6443 51 | scheme: HTTPS 52 | initialDelaySeconds: 10 53 | periodSeconds: 10 54 | timeoutSeconds: 15 55 | name: kube-apiserver 56 | readinessProbe: 57 | failureThreshold: 3 58 | httpGet: 59 | host: 172.16.99.21 60 | path: /readyz 61 | port: 6443 62 | scheme: HTTPS 63 | periodSeconds: 1 64 | timeoutSeconds: 15 65 | resources: 66 | requests: 67 | cpu: 250m 68 | startupProbe: 69 | failureThreshold: 24 70 | httpGet: 71 | host: 172.16.99.21 72 | path: /livez 73 | port: 6443 74 | scheme: HTTPS 75 | initialDelaySeconds: 10 76 | periodSeconds: 10 77 | timeoutSeconds: 15 78 | volumeMounts: 79 | - mountPath: /etc/ssl/certs 80 | name: ca-certs 81 | readOnly: true 82 | - mountPath: /etc/ca-certificates 83 | name: etc-ca-certificates 84 | readOnly: true 85 | - mountPath: /etc/kubernetes/pki 86 | name: k8s-certs 87 | readOnly: true 88 | - mountPath: /usr/local/share/ca-certificates 89 | name: usr-local-share-ca-certificates 90 | readOnly: true 91 | - mountPath: /usr/share/ca-certificates 92 | name: usr-share-ca-certificates 93 | readOnly: true 94 | hostNetwork: true 95 | priorityClassName: system-node-critical 96 | volumes: 97 | - hostPath: 98 | path: /etc/ssl/certs 99 | type: DirectoryOrCreate 100 | name: ca-certs 101 | - hostPath: 102 | path: /etc/ca-certificates 103 | type: DirectoryOrCreate 104 | name: etc-ca-certificates 105 | - hostPath: 106 | path: /etc/kubernetes/pki 107 | type: DirectoryOrCreate 108 | name: k8s-certs 109 | - hostPath: 110 | path: /usr/local/share/ca-certificates 111 | type: DirectoryOrCreate 112 | name: usr-local-share-ca-certificates 113 | - hostPath: 114 | path: /usr/share/ca-certificates 115 | type: DirectoryOrCreate 116 | name: usr-share-ca-certificates 117 | status: {} 118 | -------------------------------------------------------------------------------- /06-manage-secrets/mysecret-01.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | user: YWRtaW4= 4 | kind: Secret 5 | metadata: 6 | creationTimestamp: null 7 | name: mysecret 8 | -------------------------------------------------------------------------------- /06-manage-secrets/mysecret-02.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | password: MTIzNDU2Nzg= 4 | kind: Secret 5 | metadata: 6 | creationTimestamp: null 7 | name: mysecret2 8 | -------------------------------------------------------------------------------- /06-manage-secrets/secret-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: secret-pod 7 | name: secret-pod 8 | spec: 9 | containers: 10 | - image: nginx 11 | env: 12 | - name: SECRET_PASSWORD 13 | valueFrom: 14 | secretKeyRef: 15 | name: mysecret2 16 | key: password 17 | name: secret-pod 18 | resources: {} 19 | volumeMounts: 20 | - name: secret1 21 | mountPath: "/etc/secret1" 22 | readOnly: true 23 | volumes: 24 | - name: secret1 25 | secret: 26 | secretName: mysecret 27 | dnsPolicy: ClusterFirst 28 | restartPolicy: Always 29 | status: {} 30 | -------------------------------------------------------------------------------- /07-encrypt-etcd/encrypt-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiserver.config.k8s.io/v1 2 | kind: EncryptionConfiguration 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - identity: {} 8 | - aescbc: 9 | keys: 10 | - name: key1 11 | secret: cGFzc3dvcmRwYXNzd29yZA== 12 | 13 | -------------------------------------------------------------------------------- /07-encrypt-etcd/reencrypt-all-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | kubectl get secrets -A -o yaml | kubectl replace -f - 3 | -------------------------------------------------------------------------------- /08-runtime-class/gvisor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: gvisor 7 | name: gvisor 8 | spec: 9 | runtimeClassName: gvisor 10 | containers: 11 | - image: nginx 12 | name: gvisor 13 | resources: {} 14 | dnsPolicy: ClusterFirst 15 | restartPolicy: Always 16 | status: {} 17 | -------------------------------------------------------------------------------- /08-runtime-class/runtimeclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: node.k8s.io/v1beta1 # RuntimeClass is defined in the node.k8s.io API group 2 | kind: RuntimeClass 3 | metadata: 4 | name: gvisor # The name the RuntimeClass will be referenced by 5 | # RuntimeClass is a non-namespaced resource 6 | handler: runsc #The name of the corresponding CRI configuration 7 | -------------------------------------------------------------------------------- /09-microservices-os-level-security/allow-priv-esc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: allow-priv-esc 5 | spec: 6 | volumes: 7 | - name: sec-ctx-vol 8 | emptyDir: {} 9 | containers: 10 | - name: sec-ctx-demo 11 | image: busybox 12 | command: [ "sh", "-c", "sleep 1h" ] 13 | volumeMounts: 14 | - name: sec-ctx-vol 15 | mountPath: /data/demo 16 | securityContext: 17 | allowPrivilegeEscalation: false 18 | -------------------------------------------------------------------------------- /09-microservices-os-level-security/pod-user-group.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: security-context-demo 5 | spec: 6 | securityContext: 7 | runAsUser: 1000 8 | runAsGroup: 3000 9 | fsGroup: 2000 10 | volumes: 11 | - name: sec-ctx-vol 12 | emptyDir: {} 13 | containers: 14 | - name: sec-ctx-demo 15 | image: busybox 16 | command: [ "sh", "-c", "sleep 1h" ] 17 | volumeMounts: 18 | - name: sec-ctx-vol 19 | mountPath: /data/demo 20 | securityContext: 21 | allowPrivilegeEscalation: false 22 | -------------------------------------------------------------------------------- /09-microservices-os-level-security/priv-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: priv-containers 5 | spec: 6 | volumes: 7 | - name: sec-ctx-vol 8 | emptyDir: {} 9 | containers: 10 | - name: sec-ctx-demo 11 | image: busybox 12 | command: [ "sh", "-c", "sleep 1h" ] 13 | volumeMounts: 14 | - name: sec-ctx-vol 15 | mountPath: /data/demo 16 | securityContext: 17 | runAsNonRoot: false 18 | privileged: true 19 | -------------------------------------------------------------------------------- /09-microservices-os-level-security/run-as-nonroot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: run-as-non-root 5 | spec: 6 | # securityContext: 7 | # runAsUser: 1000 8 | # runAsGroup: 3000 9 | # fsGroup: 2000 10 | volumes: 11 | - name: sec-ctx-vol 12 | emptyDir: {} 13 | containers: 14 | - name: sec-ctx-demo 15 | image: busybox 16 | command: [ "sh", "-c", "sleep 1h" ] 17 | volumeMounts: 18 | - name: sec-ctx-vol 19 | mountPath: /data/demo 20 | securityContext: 21 | allowPrivilegeEscalation: false 22 | runAsNonRoot: true 23 | -------------------------------------------------------------------------------- /10-psp/allow-priv-esc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: allow-priv-esc 5 | spec: 6 | volumes: 7 | - name: sec-ctx-vol 8 | emptyDir: {} 9 | containers: 10 | - name: sec-ctx-demo 11 | image: busybox 12 | command: [ "sh", "-c", "sleep 1h" ] 13 | volumeMounts: 14 | - name: sec-ctx-vol 15 | mountPath: /data/demo 16 | securityContext: 17 | allowPrivilegeEscalation: true 18 | -------------------------------------------------------------------------------- /10-psp/example-psp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | name: default 5 | spec: 6 | allowPrivilegeEscalation: false 7 | privileged: false # Don't allow privileged pods! 8 | # The rest fills in some required fields. 9 | seLinux: 10 | rule: RunAsAny 11 | supplementalGroups: 12 | rule: RunAsAny 13 | runAsUser: 14 | rule: RunAsAny 15 | fsGroup: 16 | rule: RunAsAny 17 | volumes: 18 | - '*' 19 | -------------------------------------------------------------------------------- /10-psp/kube-apiserver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 172.16.99.21:6443 6 | creationTimestamp: null 7 | labels: 8 | component: kube-apiserver 9 | tier: control-plane 10 | name: kube-apiserver 11 | namespace: kube-system 12 | spec: 13 | containers: 14 | - command: 15 | - kube-apiserver 16 | - --advertise-address=172.16.99.21 17 | - --allow-privileged=true 18 | - --authorization-mode=Node,RBAC 19 | - --client-ca-file=/etc/kubernetes/pki/ca.crt 20 | - --enable-admission-plugins=NodeRestriction,PodSecurityPolicy 21 | - --enable-bootstrap-token-auth=true 22 | - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt 23 | - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt 24 | - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key 25 | - --etcd-servers=https://127.0.0.1:2379 26 | - --insecure-port=0 27 | - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt 28 | - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key 29 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 30 | - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt 31 | - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key 32 | - --requestheader-allowed-names=front-proxy-client 33 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt 34 | - --requestheader-extra-headers-prefix=X-Remote-Extra- 35 | - --requestheader-group-headers=X-Remote-Group 36 | - --requestheader-username-headers=X-Remote-User 37 | - --secure-port=6443 38 | - --service-account-key-file=/etc/kubernetes/pki/sa.pub 39 | - --service-cluster-ip-range=10.96.0.0/12 40 | - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt 41 | - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key 42 | image: k8s.gcr.io/kube-apiserver:v1.19.3 43 | imagePullPolicy: IfNotPresent 44 | livenessProbe: 45 | failureThreshold: 8 46 | httpGet: 47 | host: 172.16.99.21 48 | path: /livez 49 | port: 6443 50 | scheme: HTTPS 51 | initialDelaySeconds: 10 52 | periodSeconds: 10 53 | timeoutSeconds: 15 54 | name: kube-apiserver 55 | readinessProbe: 56 | failureThreshold: 3 57 | httpGet: 58 | host: 172.16.99.21 59 | path: /readyz 60 | port: 6443 61 | scheme: HTTPS 62 | periodSeconds: 1 63 | timeoutSeconds: 15 64 | resources: 65 | requests: 66 | cpu: 250m 67 | startupProbe: 68 | failureThreshold: 24 69 | httpGet: 70 | host: 172.16.99.21 71 | path: /livez 72 | port: 6443 73 | scheme: HTTPS 74 | initialDelaySeconds: 10 75 | periodSeconds: 10 76 | timeoutSeconds: 15 77 | volumeMounts: 78 | - mountPath: /etc/ssl/certs 79 | name: ca-certs 80 | readOnly: true 81 | - mountPath: /etc/ca-certificates 82 | name: etc-ca-certificates 83 | readOnly: true 84 | - mountPath: /etc/kubernetes/pki 85 | name: k8s-certs 86 | readOnly: true 87 | - mountPath: /usr/local/share/ca-certificates 88 | name: usr-local-share-ca-certificates 89 | readOnly: true 90 | - mountPath: /usr/share/ca-certificates 91 | name: usr-share-ca-certificates 92 | readOnly: true 93 | hostNetwork: true 94 | priorityClassName: system-node-critical 95 | volumes: 96 | - hostPath: 97 | path: /etc/ssl/certs 98 | type: DirectoryOrCreate 99 | name: ca-certs 100 | - hostPath: 101 | path: /etc/ca-certificates 102 | type: DirectoryOrCreate 103 | name: etc-ca-certificates 104 | - hostPath: 105 | path: /etc/kubernetes/pki 106 | type: DirectoryOrCreate 107 | name: k8s-certs 108 | - hostPath: 109 | path: /usr/local/share/ca-certificates 110 | type: DirectoryOrCreate 111 | name: usr-local-share-ca-certificates 112 | - hostPath: 113 | path: /usr/share/ca-certificates 114 | type: DirectoryOrCreate 115 | name: usr-share-ca-certificates 116 | status: {} 117 | -------------------------------------------------------------------------------- /10-psp/priv-containers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: priv-containers 5 | spec: 6 | volumes: 7 | - name: sec-ctx-vol 8 | emptyDir: {} 9 | containers: 10 | - name: sec-ctx-demo 11 | image: busybox 12 | command: [ "sh", "-c", "sleep 1h" ] 13 | volumeMounts: 14 | - name: sec-ctx-vol 15 | mountPath: /data/demo 16 | securityContext: 17 | runAsNonRoot: false 18 | privileged: true 19 | -------------------------------------------------------------------------------- /10-psp/psp-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | creationTimestamp: null 5 | name: psp-access 6 | rules: 7 | - apiGroups: 8 | - policy 9 | resources: 10 | - podsecuritypolicies 11 | verbs: 12 | - use 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | kind: RoleBinding 16 | metadata: 17 | creationTimestamp: null 18 | name: psp-access 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: Role 22 | name: psp-access 23 | subjects: 24 | - kind: ServiceAccount 25 | name: default 26 | namespace: default 27 | -------------------------------------------------------------------------------- /11-sidecar/app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: app 7 | name: app 8 | spec: 9 | containers: 10 | - name: proxy 11 | image: ubuntu 12 | command: 13 | - sh 14 | - -c 15 | - 'apt-get update && apt-get install iptables -y && iptables -L && sleep 1d' 16 | securityContext: 17 | capabilities: 18 | add: ["NET_ADMIN"] 19 | - name: app 20 | command: 21 | - sh 22 | - -c 23 | - ping google.com 24 | image: bash 25 | resources: {} 26 | dnsPolicy: ClusterFirst 27 | restartPolicy: Always 28 | status: {} 29 | -------------------------------------------------------------------------------- /12-opa/deny-all/all_pod_always_deny.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sAlwaysDeny 3 | metadata: 4 | name: pod-always-deny 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Pod"] 10 | parameters: 11 | message: "ACCESS DENIED!" 12 | -------------------------------------------------------------------------------- /12-opa/deny-all/alwaysdeny_template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8salwaysdeny 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sAlwaysDeny 10 | validation: 11 | # Schema for the `parameters` field 12 | openAPIV3Schema: 13 | properties: 14 | message: 15 | type: string 16 | targets: 17 | - target: admission.k8s.gatekeeper.sh 18 | rego: | 19 | package k8salwaysdeny 20 | 21 | violation[{"msg": msg}] { 22 | 1 < 0 23 | msg := input.parameters.message 24 | } 25 | -------------------------------------------------------------------------------- /12-opa/deployment-replica-count/all_deployment_must_have_min_replicacount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sMinReplicaCount 3 | metadata: 4 | name: deployment-must-have-min-replicas 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["apps"] 9 | kinds: ["Deployment"] 10 | parameters: 11 | min: 2 12 | -------------------------------------------------------------------------------- /12-opa/deployment-replica-count/k8sminreplicacount_template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8sminreplicacount 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sMinReplicaCount 10 | validation: 11 | # Schema for the `parameters` field 12 | openAPIV3Schema: 13 | properties: 14 | min: 15 | type: integer 16 | targets: 17 | - target: admission.k8s.gatekeeper.sh 18 | rego: | 19 | package k8sminreplicacount 20 | 21 | violation[{"msg": msg, "details": {"missing_replicas": missing}}] { 22 | provided := input.review.object.spec.replicas 23 | required := input.parameters.min 24 | missing := required - provided 25 | missing > 0 26 | msg := sprintf("you must provide %v more replicas", [missing]) 27 | } 28 | -------------------------------------------------------------------------------- /12-opa/namespace-labels/all_ns_must_have_cks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sRequiredLabels 3 | metadata: 4 | name: ns-must-have-cks 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Namespace"] 10 | parameters: 11 | labels: ["cks"] 12 | -------------------------------------------------------------------------------- /12-opa/namespace-labels/all_pod_must_have_cks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sRequiredLabels 3 | metadata: 4 | name: pod-must-have-cks 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Pod"] 10 | parameters: 11 | labels: ["cks"] 12 | -------------------------------------------------------------------------------- /12-opa/namespace-labels/k8srequiredlabels_template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8srequiredlabels 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sRequiredLabels 10 | validation: 11 | # Schema for the `parameters` field 12 | openAPIV3Schema: 13 | properties: 14 | labels: 15 | type: array 16 | items: string 17 | targets: 18 | - target: admission.k8s.gatekeeper.sh 19 | rego: | 20 | package k8srequiredlabels 21 | 22 | violation[{"msg": msg, "details": {"missing_labels": missing}}] { 23 | provided := {label | input.review.object.metadata.labels[label]} 24 | required := {label | label := input.parameters.labels[_]} 25 | missing := required - provided 26 | count(missing) > 0 27 | msg := sprintf("you must provide labels: %v", [missing]) 28 | } 29 | -------------------------------------------------------------------------------- /12-opa/opa-gatekeeper.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | admission.gatekeeper.sh/ignore: no-self-managing 6 | control-plane: controller-manager 7 | gatekeeper.sh/system: "yes" 8 | name: gatekeeper-system 9 | --- 10 | apiVersion: apiextensions.k8s.io/v1beta1 11 | kind: CustomResourceDefinition 12 | metadata: 13 | annotations: 14 | controller-gen.kubebuilder.io/version: v0.3.0 15 | creationTimestamp: null 16 | labels: 17 | gatekeeper.sh/system: "yes" 18 | name: configs.config.gatekeeper.sh 19 | spec: 20 | group: config.gatekeeper.sh 21 | names: 22 | kind: Config 23 | listKind: ConfigList 24 | plural: configs 25 | singular: config 26 | scope: Namespaced 27 | validation: 28 | openAPIV3Schema: 29 | description: Config is the Schema for the configs API 30 | properties: 31 | apiVersion: 32 | description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 33 | type: string 34 | kind: 35 | description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 36 | type: string 37 | metadata: 38 | type: object 39 | spec: 40 | description: ConfigSpec defines the desired state of Config 41 | properties: 42 | match: 43 | description: Configuration for namespace exclusion 44 | items: 45 | properties: 46 | excludedNamespaces: 47 | items: 48 | type: string 49 | type: array 50 | processes: 51 | items: 52 | type: string 53 | type: array 54 | type: object 55 | type: array 56 | readiness: 57 | description: Configuration for readiness tracker 58 | properties: 59 | statsEnabled: 60 | type: boolean 61 | type: object 62 | sync: 63 | description: Configuration for syncing k8s objects 64 | properties: 65 | syncOnly: 66 | description: If non-empty, only entries on this list will be replicated into OPA 67 | items: 68 | properties: 69 | group: 70 | type: string 71 | kind: 72 | type: string 73 | version: 74 | type: string 75 | type: object 76 | type: array 77 | type: object 78 | validation: 79 | description: Configuration for validation 80 | properties: 81 | traces: 82 | description: List of requests to trace. Both "user" and "kinds" must be specified 83 | items: 84 | properties: 85 | dump: 86 | description: Also dump the state of OPA with the trace. Set to `All` to dump everything. 87 | type: string 88 | kind: 89 | description: Only trace requests of the following GroupVersionKind 90 | properties: 91 | group: 92 | type: string 93 | kind: 94 | type: string 95 | version: 96 | type: string 97 | type: object 98 | user: 99 | description: Only trace requests from the specified user 100 | type: string 101 | type: object 102 | type: array 103 | type: object 104 | type: object 105 | status: 106 | description: ConfigStatus defines the observed state of Config 107 | type: object 108 | type: object 109 | version: v1alpha1 110 | versions: 111 | - name: v1alpha1 112 | served: true 113 | storage: true 114 | status: 115 | acceptedNames: 116 | kind: "" 117 | plural: "" 118 | conditions: [] 119 | storedVersions: [] 120 | --- 121 | apiVersion: apiextensions.k8s.io/v1beta1 122 | kind: CustomResourceDefinition 123 | metadata: 124 | annotations: 125 | controller-gen.kubebuilder.io/version: v0.3.0 126 | creationTimestamp: null 127 | labels: 128 | gatekeeper.sh/system: "yes" 129 | name: constraintpodstatuses.status.gatekeeper.sh 130 | spec: 131 | group: status.gatekeeper.sh 132 | names: 133 | kind: ConstraintPodStatus 134 | listKind: ConstraintPodStatusList 135 | plural: constraintpodstatuses 136 | singular: constraintpodstatus 137 | scope: Namespaced 138 | validation: 139 | openAPIV3Schema: 140 | description: ConstraintPodStatus is the Schema for the constraintpodstatuses API 141 | properties: 142 | apiVersion: 143 | description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 144 | type: string 145 | kind: 146 | description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 147 | type: string 148 | metadata: 149 | type: object 150 | status: 151 | description: ConstraintPodStatusStatus defines the observed state of ConstraintPodStatus 152 | properties: 153 | constraintUID: 154 | description: Storing the constraint UID allows us to detect drift, such as when a constraint has been recreated after its CRD was deleted out from under it, interrupting the watch 155 | type: string 156 | enforced: 157 | type: boolean 158 | errors: 159 | items: 160 | description: Error represents a single error caught while adding a constraint to OPA 161 | properties: 162 | code: 163 | type: string 164 | location: 165 | type: string 166 | message: 167 | type: string 168 | required: 169 | - code 170 | - message 171 | type: object 172 | type: array 173 | id: 174 | type: string 175 | observedGeneration: 176 | format: int64 177 | type: integer 178 | operations: 179 | items: 180 | type: string 181 | type: array 182 | type: object 183 | type: object 184 | version: v1beta1 185 | versions: 186 | - name: v1beta1 187 | served: true 188 | storage: true 189 | status: 190 | acceptedNames: 191 | kind: "" 192 | plural: "" 193 | conditions: [] 194 | storedVersions: [] 195 | --- 196 | apiVersion: apiextensions.k8s.io/v1beta1 197 | kind: CustomResourceDefinition 198 | metadata: 199 | annotations: 200 | controller-gen.kubebuilder.io/version: v0.3.0 201 | creationTimestamp: null 202 | labels: 203 | gatekeeper.sh/system: "yes" 204 | name: constrainttemplatepodstatuses.status.gatekeeper.sh 205 | spec: 206 | group: status.gatekeeper.sh 207 | names: 208 | kind: ConstraintTemplatePodStatus 209 | listKind: ConstraintTemplatePodStatusList 210 | plural: constrainttemplatepodstatuses 211 | singular: constrainttemplatepodstatus 212 | scope: Namespaced 213 | validation: 214 | openAPIV3Schema: 215 | description: ConstraintTemplatePodStatus is the Schema for the constrainttemplatepodstatuses API 216 | properties: 217 | apiVersion: 218 | description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 219 | type: string 220 | kind: 221 | description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 222 | type: string 223 | metadata: 224 | type: object 225 | status: 226 | description: ConstraintTemplatePodStatusStatus defines the observed state of ConstraintTemplatePodStatus 227 | properties: 228 | errors: 229 | items: 230 | description: CreateCRDError represents a single error caught during parsing, compiling, etc. 231 | properties: 232 | code: 233 | type: string 234 | location: 235 | type: string 236 | message: 237 | type: string 238 | required: 239 | - code 240 | - message 241 | type: object 242 | type: array 243 | id: 244 | description: 'Important: Run "make" to regenerate code after modifying this file' 245 | type: string 246 | observedGeneration: 247 | format: int64 248 | type: integer 249 | operations: 250 | items: 251 | type: string 252 | type: array 253 | templateUID: 254 | description: UID is a type that holds unique ID values, including UUIDs. Because we don't ONLY use UUIDs, this is an alias to string. Being a type captures intent and helps make sure that UIDs and names do not get conflated. 255 | type: string 256 | type: object 257 | type: object 258 | version: v1beta1 259 | versions: 260 | - name: v1beta1 261 | served: true 262 | storage: true 263 | status: 264 | acceptedNames: 265 | kind: "" 266 | plural: "" 267 | conditions: [] 268 | storedVersions: [] 269 | --- 270 | apiVersion: apiextensions.k8s.io/v1beta1 271 | kind: CustomResourceDefinition 272 | metadata: 273 | creationTimestamp: null 274 | labels: 275 | controller-tools.k8s.io: "1.0" 276 | gatekeeper.sh/system: "yes" 277 | name: constrainttemplates.templates.gatekeeper.sh 278 | spec: 279 | group: templates.gatekeeper.sh 280 | names: 281 | kind: ConstraintTemplate 282 | plural: constrainttemplates 283 | scope: Cluster 284 | subresources: 285 | status: {} 286 | validation: 287 | openAPIV3Schema: 288 | properties: 289 | apiVersion: 290 | description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 291 | type: string 292 | kind: 293 | description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 294 | type: string 295 | metadata: 296 | type: object 297 | spec: 298 | properties: 299 | crd: 300 | properties: 301 | spec: 302 | properties: 303 | names: 304 | properties: 305 | kind: 306 | type: string 307 | shortNames: 308 | items: 309 | type: string 310 | type: array 311 | type: object 312 | validation: 313 | type: object 314 | type: object 315 | type: object 316 | targets: 317 | items: 318 | properties: 319 | libs: 320 | items: 321 | type: string 322 | type: array 323 | rego: 324 | type: string 325 | target: 326 | type: string 327 | type: object 328 | type: array 329 | type: object 330 | status: 331 | properties: 332 | byPod: 333 | items: 334 | properties: 335 | errors: 336 | items: 337 | properties: 338 | code: 339 | type: string 340 | location: 341 | type: string 342 | message: 343 | type: string 344 | required: 345 | - code 346 | - message 347 | type: object 348 | type: array 349 | id: 350 | description: a unique identifier for the pod that wrote the status 351 | type: string 352 | observedGeneration: 353 | format: int64 354 | type: integer 355 | type: object 356 | type: array 357 | created: 358 | type: boolean 359 | type: object 360 | version: v1beta1 361 | versions: 362 | - name: v1beta1 363 | served: true 364 | storage: true 365 | - name: v1alpha1 366 | served: true 367 | storage: false 368 | status: 369 | acceptedNames: 370 | kind: "" 371 | plural: "" 372 | conditions: [] 373 | storedVersions: [] 374 | --- 375 | apiVersion: v1 376 | kind: ServiceAccount 377 | metadata: 378 | labels: 379 | gatekeeper.sh/system: "yes" 380 | name: gatekeeper-admin 381 | namespace: gatekeeper-system 382 | --- 383 | apiVersion: rbac.authorization.k8s.io/v1 384 | kind: Role 385 | metadata: 386 | creationTimestamp: null 387 | labels: 388 | gatekeeper.sh/system: "yes" 389 | name: gatekeeper-manager-role 390 | namespace: gatekeeper-system 391 | rules: 392 | - apiGroups: 393 | - "" 394 | resources: 395 | - events 396 | verbs: 397 | - create 398 | - patch 399 | - apiGroups: 400 | - "" 401 | resources: 402 | - secrets 403 | verbs: 404 | - create 405 | - delete 406 | - get 407 | - list 408 | - patch 409 | - update 410 | - watch 411 | --- 412 | apiVersion: rbac.authorization.k8s.io/v1 413 | kind: ClusterRole 414 | metadata: 415 | creationTimestamp: null 416 | labels: 417 | gatekeeper.sh/system: "yes" 418 | name: gatekeeper-manager-role 419 | rules: 420 | - apiGroups: 421 | - '*' 422 | resources: 423 | - '*' 424 | verbs: 425 | - get 426 | - list 427 | - watch 428 | - apiGroups: 429 | - apiextensions.k8s.io 430 | resources: 431 | - customresourcedefinitions 432 | verbs: 433 | - create 434 | - delete 435 | - get 436 | - list 437 | - patch 438 | - update 439 | - watch 440 | - apiGroups: 441 | - config.gatekeeper.sh 442 | resources: 443 | - configs 444 | verbs: 445 | - create 446 | - delete 447 | - get 448 | - list 449 | - patch 450 | - update 451 | - watch 452 | - apiGroups: 453 | - config.gatekeeper.sh 454 | resources: 455 | - configs/status 456 | verbs: 457 | - get 458 | - patch 459 | - update 460 | - apiGroups: 461 | - constraints.gatekeeper.sh 462 | resources: 463 | - '*' 464 | verbs: 465 | - create 466 | - delete 467 | - get 468 | - list 469 | - patch 470 | - update 471 | - watch 472 | - apiGroups: 473 | - policy 474 | resources: 475 | - podsecuritypolicies 476 | verbs: 477 | - use 478 | - apiGroups: 479 | - status.gatekeeper.sh 480 | resources: 481 | - '*' 482 | verbs: 483 | - create 484 | - delete 485 | - get 486 | - list 487 | - patch 488 | - update 489 | - watch 490 | - apiGroups: 491 | - templates.gatekeeper.sh 492 | resources: 493 | - constrainttemplates 494 | verbs: 495 | - create 496 | - delete 497 | - get 498 | - list 499 | - patch 500 | - update 501 | - watch 502 | - apiGroups: 503 | - templates.gatekeeper.sh 504 | resources: 505 | - constrainttemplates/finalizers 506 | verbs: 507 | - delete 508 | - get 509 | - patch 510 | - update 511 | - apiGroups: 512 | - templates.gatekeeper.sh 513 | resources: 514 | - constrainttemplates/status 515 | verbs: 516 | - get 517 | - patch 518 | - update 519 | - apiGroups: 520 | - admissionregistration.k8s.io 521 | resourceNames: 522 | - gatekeeper-validating-webhook-configuration 523 | resources: 524 | - validatingwebhookconfigurations 525 | verbs: 526 | - create 527 | - delete 528 | - get 529 | - list 530 | - patch 531 | - update 532 | - watch 533 | --- 534 | apiVersion: rbac.authorization.k8s.io/v1 535 | kind: RoleBinding 536 | metadata: 537 | labels: 538 | gatekeeper.sh/system: "yes" 539 | name: gatekeeper-manager-rolebinding 540 | namespace: gatekeeper-system 541 | roleRef: 542 | apiGroup: rbac.authorization.k8s.io 543 | kind: Role 544 | name: gatekeeper-manager-role 545 | subjects: 546 | - kind: ServiceAccount 547 | name: gatekeeper-admin 548 | namespace: gatekeeper-system 549 | --- 550 | apiVersion: rbac.authorization.k8s.io/v1 551 | kind: ClusterRoleBinding 552 | metadata: 553 | labels: 554 | gatekeeper.sh/system: "yes" 555 | name: gatekeeper-manager-rolebinding 556 | roleRef: 557 | apiGroup: rbac.authorization.k8s.io 558 | kind: ClusterRole 559 | name: gatekeeper-manager-role 560 | subjects: 561 | - kind: ServiceAccount 562 | name: gatekeeper-admin 563 | namespace: gatekeeper-system 564 | --- 565 | apiVersion: v1 566 | kind: Secret 567 | metadata: 568 | labels: 569 | gatekeeper.sh/system: "yes" 570 | name: gatekeeper-webhook-server-cert 571 | namespace: gatekeeper-system 572 | --- 573 | apiVersion: v1 574 | kind: Service 575 | metadata: 576 | labels: 577 | gatekeeper.sh/system: "yes" 578 | name: gatekeeper-webhook-service 579 | namespace: gatekeeper-system 580 | spec: 581 | ports: 582 | - port: 443 583 | targetPort: 8443 584 | selector: 585 | control-plane: controller-manager 586 | gatekeeper.sh/operation: webhook 587 | gatekeeper.sh/system: "yes" 588 | --- 589 | apiVersion: apps/v1 590 | kind: Deployment 591 | metadata: 592 | labels: 593 | control-plane: controller-manager 594 | gatekeeper.sh/operation: audit 595 | gatekeeper.sh/system: "yes" 596 | name: gatekeeper-audit 597 | namespace: gatekeeper-system 598 | spec: 599 | replicas: 1 600 | selector: 601 | matchLabels: 602 | control-plane: audit-controller 603 | gatekeeper.sh/operation: audit 604 | gatekeeper.sh/system: "yes" 605 | template: 606 | metadata: 607 | annotations: 608 | container.seccomp.security.alpha.kubernetes.io/manager: runtime/default 609 | labels: 610 | control-plane: audit-controller 611 | gatekeeper.sh/operation: audit 612 | gatekeeper.sh/system: "yes" 613 | spec: 614 | containers: 615 | - args: 616 | - --operation=audit 617 | - --operation=status 618 | - --logtostderr 619 | command: 620 | - /manager 621 | env: 622 | - name: POD_NAMESPACE 623 | valueFrom: 624 | fieldRef: 625 | apiVersion: v1 626 | fieldPath: metadata.namespace 627 | - name: POD_NAME 628 | valueFrom: 629 | fieldRef: 630 | fieldPath: metadata.name 631 | image: openpolicyagent/gatekeeper:469f747 632 | imagePullPolicy: Always 633 | livenessProbe: 634 | httpGet: 635 | path: /healthz 636 | port: 9090 637 | name: manager 638 | ports: 639 | - containerPort: 8888 640 | name: metrics 641 | protocol: TCP 642 | - containerPort: 9090 643 | name: healthz 644 | protocol: TCP 645 | readinessProbe: 646 | httpGet: 647 | path: /readyz 648 | port: 9090 649 | resources: 650 | limits: 651 | cpu: 1000m 652 | memory: 512Mi 653 | requests: 654 | cpu: 100m 655 | memory: 256Mi 656 | securityContext: 657 | allowPrivilegeEscalation: false 658 | capabilities: 659 | drop: 660 | - all 661 | runAsGroup: 999 662 | runAsNonRoot: true 663 | runAsUser: 1000 664 | nodeSelector: 665 | kubernetes.io/os: linux 666 | serviceAccountName: gatekeeper-admin 667 | terminationGracePeriodSeconds: 60 668 | --- 669 | apiVersion: apps/v1 670 | kind: Deployment 671 | metadata: 672 | labels: 673 | control-plane: controller-manager 674 | gatekeeper.sh/operation: webhook 675 | gatekeeper.sh/system: "yes" 676 | name: gatekeeper-controller-manager 677 | namespace: gatekeeper-system 678 | spec: 679 | replicas: 1 680 | selector: 681 | matchLabels: 682 | control-plane: controller-manager 683 | gatekeeper.sh/operation: webhook 684 | gatekeeper.sh/system: "yes" 685 | template: 686 | metadata: 687 | annotations: 688 | container.seccomp.security.alpha.kubernetes.io/manager: runtime/default 689 | labels: 690 | control-plane: controller-manager 691 | gatekeeper.sh/operation: webhook 692 | gatekeeper.sh/system: "yes" 693 | spec: 694 | affinity: 695 | podAntiAffinity: 696 | preferredDuringSchedulingIgnoredDuringExecution: 697 | - podAffinityTerm: 698 | labelSelector: 699 | matchExpressions: 700 | - key: gatekeeper.sh/operation 701 | operator: In 702 | values: 703 | - webhook 704 | topologyKey: kubernetes.io/hostname 705 | weight: 100 706 | containers: 707 | - args: 708 | - --port=8443 709 | - --logtostderr 710 | - --exempt-namespace=gatekeeper-system 711 | - --operation=webhook 712 | command: 713 | - /manager 714 | env: 715 | - name: POD_NAMESPACE 716 | valueFrom: 717 | fieldRef: 718 | apiVersion: v1 719 | fieldPath: metadata.namespace 720 | - name: POD_NAME 721 | valueFrom: 722 | fieldRef: 723 | fieldPath: metadata.name 724 | image: openpolicyagent/gatekeeper:469f747 725 | imagePullPolicy: Always 726 | livenessProbe: 727 | httpGet: 728 | path: /healthz 729 | port: 9090 730 | name: manager 731 | ports: 732 | - containerPort: 8443 733 | name: webhook-server 734 | protocol: TCP 735 | - containerPort: 8888 736 | name: metrics 737 | protocol: TCP 738 | - containerPort: 9090 739 | name: healthz 740 | protocol: TCP 741 | readinessProbe: 742 | httpGet: 743 | path: /readyz 744 | port: 9090 745 | resources: 746 | limits: 747 | cpu: 1000m 748 | memory: 512Mi 749 | requests: 750 | cpu: 100m 751 | memory: 256Mi 752 | securityContext: 753 | allowPrivilegeEscalation: false 754 | capabilities: 755 | drop: 756 | - all 757 | runAsGroup: 999 758 | runAsNonRoot: true 759 | runAsUser: 1000 760 | volumeMounts: 761 | - mountPath: /certs 762 | name: cert 763 | readOnly: true 764 | nodeSelector: 765 | kubernetes.io/os: linux 766 | serviceAccountName: gatekeeper-admin 767 | terminationGracePeriodSeconds: 60 768 | volumes: 769 | - name: cert 770 | secret: 771 | defaultMode: 420 772 | secretName: gatekeeper-webhook-server-cert 773 | --- 774 | apiVersion: admissionregistration.k8s.io/v1beta1 775 | kind: ValidatingWebhookConfiguration 776 | metadata: 777 | creationTimestamp: null 778 | labels: 779 | gatekeeper.sh/system: "yes" 780 | name: gatekeeper-validating-webhook-configuration 781 | webhooks: 782 | - clientConfig: 783 | caBundle: Cg== 784 | service: 785 | name: gatekeeper-webhook-service 786 | namespace: gatekeeper-system 787 | path: /v1/admit 788 | failurePolicy: Ignore 789 | name: validation.gatekeeper.sh 790 | namespaceSelector: 791 | matchExpressions: 792 | - key: admission.gatekeeper.sh/ignore 793 | operator: DoesNotExist 794 | rules: 795 | - apiGroups: 796 | - '*' 797 | apiVersions: 798 | - '*' 799 | operations: 800 | - CREATE 801 | - UPDATE 802 | resources: 803 | - '*' 804 | sideEffects: None 805 | timeoutSeconds: 5 806 | - clientConfig: 807 | caBundle: Cg== 808 | service: 809 | name: gatekeeper-webhook-service 810 | namespace: gatekeeper-system 811 | path: /v1/admitlabel 812 | failurePolicy: Fail 813 | name: check-ignore-label.gatekeeper.sh 814 | rules: 815 | - apiGroups: 816 | - "" 817 | apiVersions: 818 | - '*' 819 | operations: 820 | - CREATE 821 | - UPDATE 822 | resources: 823 | - namespaces 824 | sideEffects: None 825 | timeoutSeconds: 5 826 | -------------------------------------------------------------------------------- /12-opa/whitelist-registries/all_pod_must_have_trusted_images.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sTrustedImages 3 | metadata: 4 | name: pod-trusted-images 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Pod"] 10 | -------------------------------------------------------------------------------- /12-opa/whitelist-registries/k8strustedimages_template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8strustedimages 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sTrustedImages 10 | targets: 11 | - target: admission.k8s.gatekeeper.sh 12 | rego: | 13 | package k8strustedimages 14 | 15 | violation[{"msg": msg}] { 16 | image := input.review.object.spec.containers[_].image 17 | not startswith(image, "docker.io/") 18 | not startswith(image, "k8s.gcr.io/") 19 | msg := "not trusted image!" 20 | } 21 | -------------------------------------------------------------------------------- /13-image-footprint/default/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | ARG DEBIAN_FRONTEND=noninteractive 3 | RUN apt-get update && apt-get install -y golang-go 4 | COPY app.go . 5 | RUN CGO_ENABLED=0 go build app.go 6 | CMD ["./app"] 7 | -------------------------------------------------------------------------------- /13-image-footprint/default/app.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | "os/user" 7 | ) 8 | 9 | func main () { 10 | user, err := user.Current() 11 | if err != nil { 12 | panic(err) 13 | } 14 | 15 | for { 16 | fmt.Println("user: " + user.Username + " id: " + user.Uid) 17 | time.Sleep(1 * time.Second) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /13-image-footprint/multi-stage/Dockerfile: -------------------------------------------------------------------------------- 1 | # build container stage 1 2 | FROM ubuntu 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | RUN apt-get update && apt-get install -y golang-go 5 | COPY app.go . 6 | RUN CGO_ENABLED=0 go build app.go 7 | 8 | # app container stage 2 9 | FROM alpine 10 | COPY --from=0 /app . 11 | CMD ["./app"] 12 | -------------------------------------------------------------------------------- /13-image-footprint/multi-stage/app.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | "os/user" 7 | ) 8 | 9 | func main () { 10 | user, err := user.Current() 11 | if err != nil { 12 | panic(err) 13 | } 14 | 15 | for { 16 | fmt.Println("user: " + user.Username + " id: " + user.Uid) 17 | time.Sleep(1 * time.Second) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /13-image-footprint/no-root/Dockerfile: -------------------------------------------------------------------------------- 1 | # build container stage 1 2 | FROM ubuntu:20.04 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | RUN apt-get update && apt-get install -y golang-go=2:1.13~1ubuntu2 5 | COPY app.go . 6 | RUN pwd 7 | RUN CGO_ENABLED=0 go build app.go 8 | 9 | # app container stage 2 10 | FROM alpine:3.12.0 11 | RUN addgroup -S appgroup && adduser -S appuser -G appgroup -h /home/appuser 12 | COPY --from=0 /app /home/appuser/ 13 | USER appuser 14 | CMD ["/home/appuser/app"] 15 | -------------------------------------------------------------------------------- /13-image-footprint/no-root/app.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | "os/user" 7 | ) 8 | 9 | func main () { 10 | user, err := user.Current() 11 | if err != nil { 12 | panic(err) 13 | } 14 | 15 | for { 16 | fmt.Println("user: " + user.Username + " id: " + user.Uid) 17 | time.Sleep(1 * time.Second) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /13-image-footprint/no-shell/Dockerfile: -------------------------------------------------------------------------------- 1 | # build container stage 1 2 | FROM ubuntu:20.04 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | RUN apt-get update && apt-get install -y golang-go=2:1.13~1ubuntu2 5 | COPY app.go . 6 | RUN pwd 7 | RUN CGO_ENABLED=0 go build app.go 8 | 9 | # app container stage 2 10 | FROM alpine:3.12.0 11 | RUN addgroup -S appgroup && adduser -S appuser -G appgroup -h /home/appuser 12 | RUN rm -rf /bin/* 13 | COPY --from=0 /app /home/appuser/ 14 | USER appuser 15 | CMD ["/home/appuser/app"] 16 | -------------------------------------------------------------------------------- /13-image-footprint/no-shell/app.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | "os/user" 7 | ) 8 | 9 | func main () { 10 | user, err := user.Current() 11 | if err != nil { 12 | panic(err) 13 | } 14 | 15 | for { 16 | fmt.Println("user: " + user.Username + " id: " + user.Uid) 17 | time.Sleep(1 * time.Second) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /13-image-footprint/pkg-versions/Dockerfile: -------------------------------------------------------------------------------- 1 | # build container stage 1 2 | FROM ubuntu 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | RUN apt-get update && apt-get install -y golang-go 5 | COPY app.go . 6 | RUN CGO_ENABLED=0 go build app.go 7 | 8 | # app container stage 2 9 | FROM alpine:3.11.6 10 | COPY --from=0 /app . 11 | CMD ["./app"] 12 | -------------------------------------------------------------------------------- /13-image-footprint/pkg-versions/app.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | "os/user" 7 | ) 8 | 9 | func main () { 10 | user, err := user.Current() 11 | if err != nil { 12 | panic(err) 13 | } 14 | 15 | for { 16 | fmt.Println("user: " + user.Username + " id: " + user.Uid) 17 | time.Sleep(1 * time.Second) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /13-image-footprint/read-only-fs/Dockerfile: -------------------------------------------------------------------------------- 1 | # build container stage 1 2 | FROM ubuntu:20.04 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | RUN apt-get update && apt-get install -y golang-go=2:1.13~1ubuntu2 5 | COPY app.go . 6 | RUN pwd 7 | RUN CGO_ENABLED=0 go build app.go 8 | 9 | # app container stage 2 10 | FROM alpine:3.12.0 11 | RUN chmod a-w /etc 12 | RUN addgroup -S appgroup && adduser -S appuser -G appgroup -h /home/appuser 13 | COPY --from=0 /app /home/appuser/ 14 | USER appuser 15 | CMD ["/home/appuser/app"] 16 | -------------------------------------------------------------------------------- /13-image-footprint/read-only-fs/app.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | "os/user" 7 | ) 8 | 9 | func main () { 10 | user, err := user.Current() 11 | if err != nil { 12 | panic(err) 13 | } 14 | 15 | for { 16 | fmt.Println("user: " + user.Username + " id: " + user.Uid) 17 | time.Sleep(1 * time.Second) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /14-networkpolicies/db.yaml: -------------------------------------------------------------------------------- 1 | # allows cassandra pods having incoming connection from backend namespace 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: cassandra 6 | namespace: cassandra 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | run: cassandra 11 | policyTypes: 12 | - Ingress 13 | ingress: 14 | - from: 15 | - namespaceSelector: 16 | matchLabels: 17 | ns: default 18 | -------------------------------------------------------------------------------- /14-networkpolicies/default-deny-allow-dns.yaml: -------------------------------------------------------------------------------- 1 | # deny all incoming and outgoing traffic from all pods in namespace default 2 | # but allow DNS traffic. This way you can do for example: kubectl exec frontend -- curl backend 3 | apiVersion: networking.k8s.io/v1 4 | kind: NetworkPolicy 5 | metadata: 6 | name: deny 7 | namespace: default 8 | spec: 9 | podSelector: {} 10 | policyTypes: 11 | - Egress 12 | - Ingress 13 | egress: 14 | - to: 15 | ports: 16 | - port: 53 17 | protocol: TCP 18 | - port: 53 19 | protocol: UDP 20 | -------------------------------------------------------------------------------- /14-networkpolicies/default-deny.yaml: -------------------------------------------------------------------------------- 1 | # deny all incoming and outgoing traffic from all pods in namespace default 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: deny 6 | namespace: default 7 | spec: 8 | podSelector: {} 9 | policyTypes: 10 | - Egress 11 | - Ingress 12 | -------------------------------------------------------------------------------- /14-networkpolicies/merged.yaml: -------------------------------------------------------------------------------- 1 | # all outgoing traffic if: 2 | # (destination namespace label=id=ns1 AND port=80) OR (destination pod label=id=backend in default namespace) 3 | apiVersion: networking.k8s.io/v1 4 | kind: NetworkPolicy 5 | metadata: 6 | name: example 7 | namespace: default 8 | spec: 9 | podSelector: 10 | matchLabels: 11 | id: frontend 12 | policyTypes: 13 | - Egress 14 | egress: 15 | - to: 16 | - namespaceSelector: 17 | matchLabels: 18 | id: ns1 19 | ports: 20 | - protocol: TCP 21 | port: 80 22 | 23 | - to: 24 | - podSelector: 25 | matchLabels: 26 | id: backend 27 | 28 | --- 29 | 30 | # the following two NPs combined/merged are the same as the top one 31 | apiVersion: networking.k8s.io/v1 32 | kind: NetworkPolicy 33 | metadata: 34 | name: example2a 35 | namespace: default 36 | spec: 37 | podSelector: 38 | matchLabels: 39 | id: frontend 40 | policyTypes: 41 | - Egress 42 | egress: 43 | - to: 44 | - namespaceSelector: 45 | matchLabels: 46 | id: ns1 47 | ports: 48 | - protocol: TCP 49 | port: 80 50 | --- 51 | apiVersion: networking.k8s.io/v1 52 | kind: NetworkPolicy 53 | metadata: 54 | name: example2b 55 | namespace: default 56 | spec: 57 | podSelector: 58 | matchLabels: 59 | id: frontend 60 | policyTypes: 61 | - Egress 62 | egress: 63 | - to: 64 | - podSelector: 65 | matchLabels: 66 | id: backend 67 | -------------------------------------------------------------------------------- /14-networkpolicies/pod-selector.yaml: -------------------------------------------------------------------------------- 1 | # allows frontend pods to communicate with backend pods 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: frontend 6 | namespace: default 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | run: frontend 11 | policyTypes: 12 | - Egress 13 | egress: 14 | - to: 15 | - podSelector: 16 | matchLabels: 17 | run: backend 18 | --- 19 | # allows backend pods to have incoming traffic from frontend pods 20 | apiVersion: networking.k8s.io/v1 21 | kind: NetworkPolicy 22 | metadata: 23 | name: backend 24 | namespace: default 25 | spec: 26 | podSelector: 27 | matchLabels: 28 | run: backend 29 | policyTypes: 30 | - Ingress 31 | ingress: 32 | - from: 33 | - podSelector: 34 | matchLabels: 35 | run: frontend 36 | -------------------------------------------------------------------------------- /15-conftest/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | ARG DEBIAN_FRONTEND=noninteractive 3 | RUN apt-get update && apt-get install -y golang-go 4 | COPY app.go . 5 | RUN go build app.go 6 | CMD ["./app"] 7 | -------------------------------------------------------------------------------- /15-conftest/docker/policy/base.rego: -------------------------------------------------------------------------------- 1 | # from https://www.conftest.dev 2 | package main 3 | 4 | denylist = [ 5 | "ubuntu" 6 | ] 7 | 8 | deny[msg] { 9 | input[i].Cmd == "from" 10 | val := input[i].Value 11 | contains(val[i], denylist[_]) 12 | 13 | msg = sprintf("unallowed image found %s", [val]) 14 | } 15 | -------------------------------------------------------------------------------- /15-conftest/docker/policy/commands.rego: -------------------------------------------------------------------------------- 1 | # from https://www.conftest.dev 2 | 3 | package commands 4 | 5 | denylist = [ 6 | "apk", 7 | "apt", 8 | "pip", 9 | "curl", 10 | "wget", 11 | ] 12 | 13 | deny[msg] { 14 | input[i].Cmd == "run" 15 | val := input[i].Value 16 | contains(val[_], denylist[_]) 17 | 18 | msg = sprintf("unallowed commands found %s", [val]) 19 | } 20 | -------------------------------------------------------------------------------- /15-conftest/docker/run.sh: -------------------------------------------------------------------------------- 1 | docker run --rm -v $(pwd):/project instrumenta/conftest test Dockerfile --all-namespaces 2 | -------------------------------------------------------------------------------- /15-conftest/kubernetes/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: test 7 | name: test 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: test 13 | strategy: {} 14 | template: 15 | metadata: 16 | creationTimestamp: null 17 | labels: 18 | app: test 19 | spec: 20 | containers: 21 | - image: httpd 22 | name: httpd 23 | resources: {} 24 | status: {} 25 | -------------------------------------------------------------------------------- /15-conftest/kubernetes/policy/deployment.rego: -------------------------------------------------------------------------------- 1 | # from https://www.conftest.dev 2 | package main 3 | 4 | deny[msg] { 5 | input.kind = "Deployment" 6 | not input.spec.template.spec.securityContext.runAsNonRoot = true 7 | msg = "Containers must not run as root" 8 | } 9 | 10 | deny[msg] { 11 | input.kind = "Deployment" 12 | not input.spec.selector.matchLabels.app 13 | msg = "Containers must provide app label for pod selectors" 14 | } 15 | -------------------------------------------------------------------------------- /15-conftest/kubernetes/run.sh: -------------------------------------------------------------------------------- 1 | docker run --rm -v $(pwd):/project instrumenta/conftest test deploy.yaml 2 | -------------------------------------------------------------------------------- /16-imgpolicy-webhook/admission_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiserver.config.k8s.io/v1 2 | kind: AdmissionConfiguration 3 | plugins: 4 | - name: ImagePolicyWebhook 5 | configuration: 6 | imagePolicy: 7 | kubeConfigFile: /etc/kubernetes/admission/kubeconf 8 | allowTTL: 50 9 | denyTTL: 50 10 | retryBackoff: 500 11 | defaultAllow: false 12 | -------------------------------------------------------------------------------- /16-imgpolicy-webhook/apiserver-client-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDGDCCAgCgAwIBAgITRRSRf+ctQj9YO8ZK8cFXdeU75DANBgkqhkiG9w0BAQsF 3 | ADAcMRowGAYDVQQDDBFib3VuY2VyLmxvY2FsLmxhbjAeFw0yMDEwMzEwOTUxMTNa 4 | Fw0yMDExMzAwOTUxMTNaMBwxGjAYBgNVBAMMEWJvdW5jZXIubG9jYWwubGFuMIIB 5 | IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwfKtOY/2mszPovJ3emsuPudz 6 | RqKMXQ4KDgQOdkQIltxsyoLjwOq96vhUUOPqZxx5UoUtVEKo/ada3re/ibF8aXBY 7 | bBlbpyGmLIyHOeZG1rzu58wRLRyYBKiOiHg25KCG5+itaHZTKeMQFoWZTJtSbBUF 8 | UCJcf/B33qP2aUvYX9mKR59KcPmFZ+3unJ24hQ0lsgDDdfvWEYjfE8JCbmU5n+zF 9 | pzIsRF9pPpg2WonjIRS+CZU08yUggNz9cesB2D0LqbxSeslpIe1hguN+zWCe9FUq 10 | BzxEaX5140Ls6oZ0wstV0lDODdGtuE+4AD7vOvASK7/9TusOH39q4z/R8OFz6wID 11 | AQABo1MwUTAdBgNVHQ4EFgQU1FLcD6xjFWKm1dWnDdR08LF5t4owHwYDVR0jBBgw 12 | FoAU1FLcD6xjFWKm1dWnDdR08LF5t4owDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG 13 | 9w0BAQsFAAOCAQEAc8psO+1tBy1KfD9/+EEmb8LbZfAKXMLbfL9/pRIGdt1oiDSR 14 | OUZqmU0yja1O15Fh4AWw+h3oOxjvurP14RkH/KCL6KGj0JGWqUDcUqUa6DH7JiJu 15 | VzGMJXESXmwFZYyfkO/86cI3MLyNu+nV7NcJzQlPofGgi1Hr8wCgTGAYNbet1FOp 16 | ABiT28TELBpTT39U3kB2aZ/0qfUsoYjrouaJFTEroL6LSiuutksdoE1wmDp7rLRW 17 | qOkCOlU3mnSIRESbOn1MPLLzXWNM8P0BOK9MlK4yL27/ytgyWm/ETlg2/z/LtTjw 18 | FVWJRdIudJBc7gGBMeiSIeAESpOSQdr07t+8YA== 19 | -----END CERTIFICATE----- 20 | -------------------------------------------------------------------------------- /16-imgpolicy-webhook/apiserver-client-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDB8q05j/aazM+i 3 | 8nd6ay4+53NGooxdDgoOBA52RAiW3GzKguPA6r3q+FRQ4+pnHHlShS1UQqj9p1re 4 | t7+JsXxpcFhsGVunIaYsjIc55kbWvO7nzBEtHJgEqI6IeDbkoIbn6K1odlMp4xAW 5 | hZlMm1JsFQVQIlx/8Hfeo/ZpS9hf2YpHn0pw+YVn7e6cnbiFDSWyAMN1+9YRiN8T 6 | wkJuZTmf7MWnMixEX2k+mDZaieMhFL4JlTTzJSCA3P1x6wHYPQupvFJ6yWkh7WGC 7 | 437NYJ70VSoHPERpfnXjQuzqhnTCy1XSUM4N0a24T7gAPu868BIrv/1O6w4ff2rj 8 | P9Hw4XPrAgMBAAECggEAKpgaVQ758MtUOowXk0ogsO3x0ix2uSURLjzT1ENiw4cL 9 | WXpffInRRd5d3hn/679EIcxfxSaSqIptmYYvSZUyJpHmyW7UQyNPtG70b9HT8usx 10 | aLTXfNnPxQ4jp3MHUzSyDBJ8qvl5kDRu3xNeNIXUFyqSrSOuvuZX3QyTXFKs5Xem 11 | PDKJ9q7XjNV/F9Ak18UicEWHk+THJTLF6KC1qDCqc/i6eXj7J1x9rs5uT2mOkUeC 12 | thd6j8e32ERZ9ZANHhMYaEWh2c8smPQpI7WiAIlEiOYt7NLcAHd9UK8M6Sw8x77F 13 | szvG/G0vEM8k3qjnv6+LxE5eTWPyXlTR+bORoIZYIQKBgQDpn0uXjTVkEHKks2N9 14 | XTmvHhXJAezVr+XyOGNlOACflJ+rJu/oLS+ZVMBiv2age8/W99cz3UhPqWEzepBS 15 | 0PvIABoRBdgn13XUsUuPmQ7ves6xE8X8B1cwbw8CnVg8fBs8RVt2UnYV5MvqWDhN 16 | 0c8V3yAgctmRTcLZ0tf2EgZ2+wKBgQDUhoTbi6fYfootYEkbTTanhNFZzIEQrJcB 17 | pGG5AEfrKBypxF9ssN4nZkrro2SbptnK/dqFpJyGSAPHp9ArMXDH85bMjnu3Ef/Y 18 | hSI6naU+q5TUSMUCMT/x6HLHmKC/WLansxoMy0YLgl6VApShSx4vXeSGeatcukXf 19 | VTYxfiIj0QKBgQC76zJ22VMVBlXxPYrNkGuR/PUxFqdWy4J/b/QjuwRlWLuGhG99 20 | prse7xlBVQxMEst/8yPbyCceTPSu3+uvcgLVQZ+2CalxgtJ5H5PUEh39bB9OOMWM 21 | kKsFfo/oXoBXRszxkiib6ICr47pS6lt/3UBiK3RAJaH0S0RsyjiE6sLgeQKBgDvx 22 | mUWndXct8eFsmBI4TCMh5j6y3PCMDaer6thYiPB+Bt+ngNbSaEaqppUiJ/1zKVkF 23 | Ev7A+FkztMU6ww97yXjMbwtRYPm4/PXgT1BDKNmtYbQitlaw931O+BjuJuDxr8tL 24 | TByxtRcH9Y+IA84etuvbStrXNItaCt08VqXNEuoRAoGAY4adOhNppw960sgMbasr 25 | z3jDs7/uoO9mhgKNAthNPtIEBkjPyFEp926iqiDkO4sr20QC30smY6vhwH6hfHsW 26 | J9KKcb6ghzkA+5FZbp9i0cpKBz91lx+qaEarKmid59jp1Qa66SiIDdMIYrUDfPWx 27 | hfBaRj6zF8ABh5H5AH37nUA= 28 | -----END PRIVATE KEY----- -------------------------------------------------------------------------------- /16-imgpolicy-webhook/external-cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDFzCCAf+gAwIBAgIUaRt+PAzRXodqMXEF5sbPyb6nV10wDQYJKoZIhvcNAQEL 3 | BQAwGzEZMBcGA1UEAwwQZXh0ZXJuYWwtc2VydmljZTAeFw0yMDEwMzExMDUxNDda 4 | Fw0yMDExMzAxMDUxNDdaMBsxGTAXBgNVBAMMEGV4dGVybmFsLXNlcnZpY2UwggEi 5 | MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsDTI216EC4/cjDb5SEVYifJ3D 6 | VbV3fgg/LLAAEk/7UmRRX8aVe8ckaF6lEWRmwwUmZzuKigfQS3CDYPbBVhfmQHW5 7 | nfKEOfbhVdrX3UXChLPd8x79WUWq/zQ4gGgjhob1989BughNOlut8JxyBf7n1Kxm 8 | qa+RVtSH9ywaw4I14OUjEEl/d1TofjKlDuGPfS87S+wKhliKUgCKHxbaFlWd3rNP 9 | oOonY09Bm6f/bPxn6P6m3n7MtRvdRWnTAeDAvCpSIt0Q8qRY/3fpg3gROvBC17eh 10 | 2l6aD43BfiPG/YIW3E9seYDNBv9T8ah7h+zLQnBrJiSvBvYKv9nhSRlqCnyNAgMB 11 | AAGjUzBRMB0GA1UdDgQWBBRuzRw1Y5BO03wGeIv6qdYA/F2ZRDAfBgNVHSMEGDAW 12 | gBRuzRw1Y5BO03wGeIv6qdYA/F2ZRDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 13 | DQEBCwUAA4IBAQAgAngjNhpInyOZCmVflgKYXgPcniAmG1Ow7qELTzKFE6afdSy+ 14 | IQk5bQCYiuDbN7tA0pvKdrqiXUNBpikNGUFYrdWDf4oQxdo2CP/wcfDrC8Y6AKLq 15 | m8+RaFs5iosV6+ZWwJdawSGuqxLOTLcD83xM1goU919IhwntGFYepthHjbtkg+2X 16 | 3Ydhz21afrkXZYFQBXKKvzebK0mYWeIgVeqDR4zL0uRQmL1TCxIhokDhcGxHDOyZ 17 | 82BomsT2mg2APXtCCGZ+dCd1pnNoYRunCn2ZXzTSvdfc7kzWMPe4fN8tBwLVK7+/ 18 | vL1/34t0+adU3beD/3RUKDxD6gjTB6qe3fn0 19 | -----END CERTIFICATE----- 20 | ~ -------------------------------------------------------------------------------- /16-imgpolicy-webhook/external-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PRIVATE KEY----- 2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCsDTI216EC4/cj 3 | Db5SEVYifJ3DVbV3fgg/LLAAEk/7UmRRX8aVe8ckaF6lEWRmwwUmZzuKigfQS3CD 4 | YPbBVhfmQHW5nfKEOfbhVdrX3UXChLPd8x79WUWq/zQ4gGgjhob1989BughNOlut 5 | 8JxyBf7n1Kxmqa+RVtSH9ywaw4I14OUjEEl/d1TofjKlDuGPfS87S+wKhliKUgCK 6 | HxbaFlWd3rNPoOonY09Bm6f/bPxn6P6m3n7MtRvdRWnTAeDAvCpSIt0Q8qRY/3fp 7 | g3gROvBC17eh2l6aD43BfiPG/YIW3E9seYDNBv9T8ah7h+zLQnBrJiSvBvYKv9nh 8 | SRlqCnyNAgMBAAECggEAE8gEoPeTxJBPRtF6s1bY72gif2XoUk/ERPcvWSLB3WRw 9 | skZvv5oWoGH7l8DzKTl86xhtaRVLprA1xHUuibYLU2bP4kJ9sqQzdV74pV8EeUPm 10 | pjsgsgJEl092QpTI5GTAOaF/S9BGyhJOniYO5rE3yJHULI2P6QbxspxBjuYxn14G 11 | 1BuVa85TPGwALlwkRlHWz5K3K/p6jSVQFYg77YOJK9kgE4Ht7YM6bsUucKnTVaqV 12 | +YNfoL9UG3CBXwgj/vi1JaJPpxduUz3Vt01Cv1KbdNLGgaopZ7X8qrWCKaYtSlRy 13 | CdRjcfHhUgFFTqJRb19L1ZRSN36ix5Mk27oZnam5QQKBgQDXHVY0eBxjzqt2swHw 14 | 7xb+FDKHKcN15oJ6hfKmZQKm4G5HsH6LkUQpQXdZRN0qVimLJOXV5+n7BabzajC4 15 | pUb8/B3YnaTgDL5V881chUaHws+U0WwtZd6a4JKY3QO9GnG3Ht3pTaduK3qxw2+Y 16 | q3e/6laNIoiOfabMO5LvHqfTUQKBgQDMwJL+QJyC/0fkiHrcXKZMSNos90OzHzYv 17 | /1aezrh4kjaQXF+lK8Z54mSdWgfJHgP04VxWQ1ATmZgdnXXmQqQGOCyWmKOkESID 18 | YLuTbIQYVsdNB6aWhWCrpNAPt07GxPp/5mC89d3GOwdhRGxypOFPRYUGNf+8uAK3 19 | l/QFirPufQKBgC+T1oAuXKEakcPkHbbLOruhffd1Hj0SaaNtwthYmPiNxPZoK+T0 20 | kS3Kw/njiEMX4YJb+9WOwKp7eox13SbJr2S+4l8JpvDDAMoD7VTcSVg3ly/kDdgQ 21 | ouaAawwSPgRyN9p3oj86pKByT2XHwwPyUL8Ktm/qxmc1lVGwj3QBxL6xAoGBAK6p 22 | sDsYHutYnZmEG1qT5oC4vPUS0Tuh6nWzWc8LBS5FfCpVwnmCCEO9LptUS8PyyOlC 23 | vaQQs+SkZ7ELu6F7Q7TSgqbM+vtgaiV7hUfTkDO7AzTgy+knSjnh1GMzug2xxwcI 24 | HYnUKyWJAVimJ+T8Zf9bjIip08jHJ/o5s75ufeN5AoGBAL+vf85J4+8jjxDDFIFl 25 | Wq3tmUkDXqdn2kHpbUkZycuqveKjBr/fqM9vvY0sKLlzQpcVblgtVMXQJqKgQXfR 26 | oqYnTRMvDoW+ZWAE12sv2ga6Zs/zde800XlHdqggX0cE/OdeK0hUz7BkmaGmDkNQ 27 | KMJ1t5WbE93W0XmnT2a3ydDX 28 | -----END PRIVATE KEY----- -------------------------------------------------------------------------------- /16-imgpolicy-webhook/kubeconf: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | 4 | # clusters refers to the remote service. 5 | clusters: 6 | - cluster: 7 | certificate-authority: /etc/kubernetes/admission/external-cert.pem # CA for verifying the remote service. 8 | server: https://external-service:1234/check-image # URL of remote service to query. Must use 'https'. 9 | name: image-checker 10 | 11 | contexts: 12 | - context: 13 | cluster: image-checker 14 | user: api-server 15 | name: image-checker 16 | current-context: image-checker 17 | preferences: {} 18 | 19 | # users refers to the API server's webhook configuration. 20 | users: 21 | - name: api-server 22 | user: 23 | client-certificate: /etc/kubernetes/admission/apiserver-client-cert.pem # cert for the webhook admission controller to use 24 | client-key: /etc/kubernetes/admission/apiserver-client-key.pem # key matching the cert 25 | -------------------------------------------------------------------------------- /17-immutability/immutable-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: impod 7 | name: impod 8 | spec: 9 | containers: 10 | - image: nginx 11 | name: impod 12 | resources: {} 13 | startupProbe: 14 | exec: 15 | command: 16 | - rm 17 | - /bin/bash 18 | initialDelaySeconds: 5 19 | periodSeconds: 5 20 | dnsPolicy: ClusterFirst 21 | restartPolicy: Always 22 | status: {} 23 | -------------------------------------------------------------------------------- /17-immutability/readonly-fs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: ro-pod 7 | name: ro-pod 8 | spec: 9 | containers: 10 | - image: nginx 11 | name: ro-pod 12 | resources: {} 13 | securityContext: 14 | readOnlyRootFilesystem: true 15 | volumeMounts: 16 | - name: etc-nginx-conf 17 | mountPath: /etc/nginx/conf.d/ 18 | volumeMounts: 19 | - name: var-cache-nginx 20 | mountPath: /var/cache/nginx/ 21 | - name: var-run 22 | mountPath: /var/run/ 23 | volumes: 24 | - name: etc-nginx-conf 25 | emptyDir: {} 26 | - name: var-cache-nginx 27 | emptyDir: {} 28 | - name: var-run 29 | emptyDir: {} 30 | dnsPolicy: ClusterFirst 31 | restartPolicy: Always 32 | status: {} 33 | -------------------------------------------------------------------------------- /18-auditing/policy/all-metadata.yaml: -------------------------------------------------------------------------------- 1 | # Log all requests at the Metadata level. 2 | apiVersion: audit.k8s.io/v1 3 | kind: Policy 4 | rules: 5 | - level: Metadata 6 | -------------------------------------------------------------------------------- /18-auditing/policy/generic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: audit.k8s.io/v1 # This is required. 2 | kind: Policy 3 | # Don't generate audit events for all requests in RequestReceived stage. 4 | omitStages: 5 | - "RequestReceived" 6 | rules: 7 | # Log pod changes at RequestResponse level 8 | - level: RequestResponse 9 | resources: 10 | - group: "" 11 | # Resource "pods" doesn't match requests to any subresource of pods, 12 | # which is consistent with the RBAC policy. 13 | resources: ["pods"] 14 | # Log "pods/log", "pods/status" at Metadata level 15 | - level: Metadata 16 | resources: 17 | - group: "" 18 | resources: ["pods/log", "pods/status"] 19 | 20 | # Don't log requests to a configmap called "controller-leader" 21 | - level: None 22 | resources: 23 | - group: "" 24 | resources: ["configmaps"] 25 | resourceNames: ["controller-leader"] 26 | 27 | # Don't log watch requests by the "system:kube-proxy" on endpoints or services 28 | - level: None 29 | users: ["system:kube-proxy"] 30 | verbs: ["watch"] 31 | resources: 32 | - group: "" # core API group 33 | resources: ["endpoints", "services"] 34 | 35 | # Don't log authenticated requests to certain non-resource URL paths. 36 | - level: None 37 | userGroups: ["system:authenticated"] 38 | nonResourceURLs: 39 | - "/api*" # Wildcard matching. 40 | - "/version" 41 | 42 | # Log the request body of configmap changes in kube-system. 43 | - level: Request 44 | resources: 45 | - group: "" # core API group 46 | resources: ["configmaps"] 47 | # This rule only applies to resources in the "kube-system" namespace. 48 | # The empty string "" can be used to select non-namespaced resources. 49 | namespaces: ["kube-system"] 50 | 51 | # Log configmap and secret changes in all other namespaces at the Metadata level. 52 | - level: Metadata 53 | resources: 54 | - group: "" # core API group 55 | resources: ["secrets", "configmaps"] 56 | 57 | # Log all other resources in core and extensions at the Request level. 58 | - level: Request 59 | resources: 60 | - group: "" # core API group 61 | - group: "extensions" # Version of group should NOT be included. 62 | 63 | # A catch-all rule to log all other requests at the Metadata level. 64 | - level: Metadata 65 | # Long-running requests like watches that fall under this rule will not 66 | # generate an audit event in RequestReceived. 67 | omitStages: 68 | - "RequestReceived" 69 | 70 | -------------------------------------------------------------------------------- /19-upgrade-scenario/base/1.18.0/master/destroy_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to destroy master prior to shut down 3 | 4 | kubeadm reset -f 5 | rm -rf /etc/cni/net.d /etc/kubernetes /var/lib/etcd /var/lib/kubelet /var/run/kubernetes /var/lib/cni ~/.kube 6 | iptables -F 7 | init 0 8 | -------------------------------------------------------------------------------- /19-upgrade-scenario/base/1.18.0/master/init_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to initialize master 3 | 4 | KUBE_VERSION=1.18.0 5 | HOST_IP=`/sbin/ifconfig enp0s8 | egrep -o 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2` 6 | ### init k8s 7 | kubeadm init --apiserver-advertise-address=${HOST_IP} --kubernetes-version=${KUBE_VERSION} --ignore-preflight-errors=NumCPU --skip-token-print 8 | ip route add 10.96.0.0/16 dev enp0s8 src ${HOST_IP} 9 | 10 | mkdir -p $HOME/.kube 11 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 12 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 13 | 14 | kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" 15 | 16 | echo 17 | echo "### COMMAND TO ADD A WORKER NODE ###" 18 | kubeadm token create --print-join-command --ttl 0 19 | -------------------------------------------------------------------------------- /19-upgrade-scenario/base/1.18.0/setup_node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Use this script to setup any node in your Kuberntes cluster 3 | # Either master or worker 4 | # Source: http://kubernetes.io/docs/getting-started-guides/kubeadm/ 5 | 6 | ### setup terminal 7 | KUBE_VERSION=1.18.0 8 | apt-get install -y bash-completion binutils apparmor-utils 9 | echo 'colorscheme ron' >> ~/.vimrc 10 | echo 'set tabstop=2' >> ~/.vimrc 11 | echo 'set shiftwidth=2' >> ~/.vimrc 12 | echo 'set expandtab' >> ~/.vimrc 13 | echo 'source <(kubectl completion bash)' >> ~/.bashrc 14 | echo 'alias k=kubectl' >> ~/.bashrc 15 | echo 'alias c=clear' >> ~/.bashrc 16 | echo 'complete -F __start_kubectl k' >> ~/.bashrc 17 | sed -i '1s/^/force_color_prompt=yes\n/' ~/.bashrc 18 | 19 | 20 | ### install k8s and docker 21 | apt-get remove -y docker.io kubelet kubeadm kubectl kubernetes-cni docker-ce 22 | apt-get autoremove -y 23 | apt-get install -y etcd-client vim build-essential 24 | 25 | ### 26 | docker rmi -f $(docker images -aq) 27 | 28 | ### install kube-bench 29 | curl -L https://github.com/aquasecurity/kube-bench/releases/download/v0.3.1/kube-bench_0.3.1_linux_amd64.deb -o /tmp/kube-bench_0.3.1_linux_amd64.deb 30 | sudo apt install /tmp/kube-bench_0.3.1_linux_amd64.deb -f 31 | rm -rf /tmp/kube-bench* 32 | 33 | ### install falco 34 | curl -s https://falco.org/repo/falcosecurity-3672BA8F.asc | apt-key add - 35 | echo "deb https://dl.bintray.com/falcosecurity/deb stable main" | tee -a /etc/apt/sources.list.d/falcosecurity.list 36 | 37 | ### install kubesec 38 | 39 | curl -L https://github.com/controlplaneio/kubesec/releases/download/v2.8.0/kubesec_linux_386.tar.gz -o /tmp/kubesec_linux_386.tar.gz 40 | tar -xvf /tmp/kubesec_linux_386.tar.gz -C /tmp/kubesec/ 41 | cp /tmp/kubesec/kubesec /usr/bin/kubesec 42 | rm -rf /tmp/kubesec* 43 | 44 | apt-get update -y 45 | apt-get -y install linux-headers-$(uname -r) 46 | apt-get install -y falco 47 | 48 | systemctl daemon-reload 49 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 50 | cat < /etc/apt/sources.list.d/kubernetes.list 51 | deb http://apt.kubernetes.io/ kubernetes-xenial main 52 | EOF 53 | apt-get update 54 | apt-get install -y docker.io kubelet=${KUBE_VERSION}-00 kubeadm=${KUBE_VERSION}-00 kubectl=${KUBE_VERSION}-00 kubernetes-cni=0.8.7-00 55 | 56 | cat > /etc/docker/daemon.json <