├── logout.sh ├── prometheus ├── scripts │ ├── cleanup.sh │ ├── 1-deploy-prometheus.sh │ └── 2-deploy-rpc-app.sh ├── prometheus-service.yaml ├── rpc-app-service.yaml ├── rpc-app-deployment.yaml ├── prometheus-deployment.yaml ├── config │ └── prometheus.yml └── rbac.yaml ├── sanity ├── scripts │ ├── 1-configmap.sh │ ├── cleanup.sh │ ├── 2-deploy-pod.sh │ ├── 3-deployment.sh │ ├── 7-daemonset.sh │ ├── 4-service-clusterip.sh │ ├── 6-stateful-app.sh │ ├── 5-service-loadbalancer.sh │ ├── 8-ingress-controller-nginx.sh │ └── 9-ingress-test.sh ├── 1-configmap.yaml ├── 4-service-clusterip.yaml ├── 5-service-loadbalancer.yaml ├── 3-deployment.yaml ├── 9-ingress-test.yaml ├── README.md ├── 6-stateful-app.yaml └── 7-daemonset.yaml ├── login-k8s-admin.sh ├── util ├── debian-shell.sh ├── ssh-to-node.sh ├── wait-for-pod.sh ├── curl-in-cluster.sh ├── wait-for-external-IP.sh ├── wait-for-ingress-IP.sh ├── wait-for-deployment.sh └── wait-for-daemonset.sh ├── login-bob-simple-cluster.sh ├── roles └── clusterrole-storageclass-view.yaml ├── login-k8s-admin-simple-cluster.sh ├── rolebindings ├── rolebinding-def-svc-acc-psp.yaml ├── ns1 │ ├── rolebinding-def-svc-acc-psp-ns1.yaml │ └── rolebinding-bob-ns1.yaml ├── rolebinding-ingress-nginx-psp.yaml ├── clusterrolebinding-tkg-dev.yaml └── psp-vmware-system-restricted-with-root.yaml ├── README.md ├── gateway ├── configure-gw-10.sh └── configure-gw-20.sh └── tkc ├── simple-cluster-upgrade-1.18.5.yaml └── simple-cluster-1.17.yaml /logout.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kubectl vsphere logout 3 | -------------------------------------------------------------------------------- /prometheus/scripts/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl delete -f prometheus 4 | kubectl delete configmap prometheus-config 5 | -------------------------------------------------------------------------------- /sanity/scripts/1-configmap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | namespace=${1:-default} 4 | 5 | kubectl -n "$namespace" apply -f sanity/1-configmap.yaml -------------------------------------------------------------------------------- /login-k8s-admin.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kubectl vsphere login --server 192.168.20.128 -u k8s-cluster-admin@vsphere.local 3 | kubectl config use-context test-cluster 4 | -------------------------------------------------------------------------------- /sanity/1-configmap.yaml: -------------------------------------------------------------------------------- 1 | # An example ConfigMap 2 | 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: cm-test 7 | data: 8 | foo: "bar" 9 | 10 | -------------------------------------------------------------------------------- /util/debian-shell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Creates a pod in the cluster with a Debian shell. Useful for debugging 4 | 5 | kubectl run -i --tty busybox --image=docker.io/library/debian:latest --restart=Never -- sh 6 | kubectl delete pod busybox 7 | -------------------------------------------------------------------------------- /login-bob-simple-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kubectl vsphere login --server 192.168.20.128 -u bob@vsphere.local --tanzu-kubernetes-cluster-name simple-cluster --tanzu-kubernetes-cluster-namespace test-cluster 3 | kubectl config use-context simple-cluster 4 | -------------------------------------------------------------------------------- /roles/clusterrole-storageclass-view.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: storageclass-view 5 | rules: 6 | - apiGroups: ["storage.k8s.io"] 7 | resources: ["storageclasses"] 8 | verbs: ["get", "watch", "list"] 9 | -------------------------------------------------------------------------------- /login-k8s-admin-simple-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kubectl vsphere login --server 192.168.20.128 -u k8s-cluster-admin@vsphere.local --tanzu-kubernetes-cluster-name simple-cluster --tanzu-kubernetes-cluster-namespace test-cluster 3 | kubectl config use-context simple-cluster 4 | -------------------------------------------------------------------------------- /prometheus/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: prometheus-service 5 | spec: 6 | selector: 7 | app: prometheus 8 | ports: 9 | - protocol: TCP 10 | port: 9090 11 | targetPort: 9090 12 | type: LoadBalancer 13 | -------------------------------------------------------------------------------- /prometheus/rpc-app-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rpc-app-service 5 | labels: 6 | app: rpc-app 7 | spec: 8 | ports: 9 | - port: 8081 10 | targetPort: 8081 11 | protocol: TCP 12 | selector: 13 | app: rpc-app 14 | type: LoadBalancer 15 | -------------------------------------------------------------------------------- /sanity/scripts/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Note that this doesn't clean up any rolebindings 4 | 5 | namespace=${1:-default} 6 | 7 | kubectl -n "$namespace" delete -f sanity 8 | kubectl delete -f sanity/7-daemonset.yaml 9 | kubectl -n "$namespace" delete pod kuard 10 | helm delete ingress-nginx -------------------------------------------------------------------------------- /util/ssh-to-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$#" -ne 1 ]; then 4 | echo "Usage: ./ssh-to-node.sh " 5 | exit 1 6 | fi 7 | 8 | secret=$(kubectl get secret tkg-cluster-1-ssh-password -o jsonpath='{.data.ssh-passwordkey}' | base64 -D) 9 | ssh -t root@192.168.20.6 sshpass -p "$secret" ssh vmware-system-user@$1 10 | -------------------------------------------------------------------------------- /rolebindings/rolebinding-def-svc-acc-psp.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-def-svc-acc-psp 5 | roleRef: 6 | kind: ClusterRole 7 | name: psp:vmware-system-privileged 8 | apiGroup: rbac.authorization.k8s.io 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: default 13 | -------------------------------------------------------------------------------- /util/wait-for-pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Waits for a Pod to be ready 4 | 5 | if [ "$#" -ne 2 ]; then 6 | echo "Usage: ./wait-for-pod.sh " 7 | exit 1 8 | fi 9 | 10 | while [[ $(kubectl -n "$1" get pod "$2" -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do 11 | echo "waiting for pod" && sleep 1; 12 | done 13 | -------------------------------------------------------------------------------- /util/curl-in-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Runs a pod in the cluster and runs curl, passing through whatever parameters are given 4 | 5 | if [ "$#" -ne 1 ]; then 6 | echo "Usage: ./curl-in-cluster.sh " 7 | exit 1 8 | fi 9 | 10 | kubectl run -i --tty curl --image=docker.io/curlimages/curl:latest --restart=Never -- curl "$1" 11 | kubectl delete pod curl 12 | -------------------------------------------------------------------------------- /sanity/4-service-clusterip.yaml: -------------------------------------------------------------------------------- 1 | # See https://kubernetes.io/docs/concepts/services-networking/service/ 2 | # Adds a ClusterIP service to 3-deployment.yaml 3 | 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: my-clusterip-service 8 | spec: 9 | selector: 10 | app: nginx 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 80 15 | 16 | -------------------------------------------------------------------------------- /rolebindings/ns1/rolebinding-def-svc-acc-psp-ns1.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-def-svc-acc-psp 5 | namespace: ns1 6 | roleRef: 7 | kind: ClusterRole 8 | name: psp:vmware-system-restricted-with-root 9 | apiGroup: rbac.authorization.k8s.io 10 | subjects: 11 | - kind: ServiceAccount 12 | name: default 13 | namespace: ns1 14 | -------------------------------------------------------------------------------- /sanity/5-service-loadbalancer.yaml: -------------------------------------------------------------------------------- 1 | # See https://kubernetes.io/docs/concepts/services-networking/service/ 2 | # Adds a LoadBalancer service to 3-deployment.yaml 3 | 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: my-loadbalanced-service 8 | spec: 9 | selector: 10 | app: nginx 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 80 15 | type: LoadBalancer 16 | 17 | -------------------------------------------------------------------------------- /util/wait-for-external-IP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Waits for an external IP of a service to be ready 4 | 5 | if [ "$#" -ne 2 ]; then 6 | echo "Usage: ./wait-for-external-IP.sh " 7 | exit 1 8 | fi 9 | 10 | filter='jsonpath={..status.loadBalancer.ingress[0].ip}' 11 | while [[ $(kubectl -n "$1" get service "$2" -o "$filter") == "" ]]; do 12 | echo "waiting for external IP" && sleep 1; 13 | done 14 | -------------------------------------------------------------------------------- /util/wait-for-ingress-IP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Waits for an external IP of an ingress to be ready 4 | 5 | if [ "$#" -ne 2 ]; then 6 | echo "Usage: ./wait-for-ingress-IP.sh " 7 | exit 1 8 | fi 9 | 10 | filter='jsonpath={..status.loadBalancer.ingress[0].ip}' 11 | while [[ $(kubectl -n "$1" get ingress "$2" -o "$filter") == "" ]]; do 12 | echo "waiting for ingress IP" && sleep 1; 13 | done 14 | -------------------------------------------------------------------------------- /util/wait-for-deployment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Waits for a Deployment to be ready 4 | 5 | if [ "$#" -ne 2 ]; then 6 | echo "Usage: ./wait-for-deployment.sh " 7 | exit 1 8 | fi 9 | 10 | while [[ $(kubectl -n "$1" get deployment "$2" -o 'jsonpath={..spec.replicas}') != $(kubectl -n "$1" get deployment "$2" -o 'jsonpath={..status.readyReplicas}') ]]; do 11 | echo "waiting for replicas to be ready" && sleep 1; 12 | done 13 | -------------------------------------------------------------------------------- /util/wait-for-daemonset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Waits for a DaemonSet to be ready 4 | 5 | if [ "$#" -ne 2 ]; then 6 | echo "Usage: ./wait-for-daemonset.sh " 7 | exit 1 8 | fi 9 | 10 | while [[ $(kubectl -n "$1" get daemonset "$2" -o 'jsonpath={..status.numberReady}') != $(kubectl -n "$1" get daemonset "$2" -o 'jsonpath={..status.desiredNumberScheduled}') ]]; do 11 | echo "waiting for replicas to be ready" && sleep 1; 12 | done 13 | -------------------------------------------------------------------------------- /sanity/scripts/2-deploy-pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # See https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-tanzu/GUID-5DFC347C-694B-4288-96DA-EAEB5818D951.html 3 | 4 | namespace=${1:-default} 5 | name="kuard" 6 | 7 | kubectl -n "$namespace" run --restart=Never --image=gcr.io/kuar-demo/kuard-amd64:blue "$name" 8 | util/wait-for-pod.sh "$namespace" "$name" 9 | echo "Pod Started - now open http://localhost:8080 in your browser" 10 | kubectl -n "$namespace" port-forward "$name" 8080:8080 11 | -------------------------------------------------------------------------------- /sanity/scripts/3-deployment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # See https://kubernetes.io/docs/tasks/run-application/run-stateless-application-deployment/ 4 | 5 | # Note that this requires a PSP binding for the default service account 6 | # see rolebindings/rolebinding-def-svc-acc-psp.yaml 7 | 8 | namespace=${1:-default} 9 | name="nginx-deployment" 10 | 11 | kubectl -n "$namespace" apply -f sanity/3-deployment.yaml 12 | util/wait-for-deployment.sh "$namespace" "$name" 13 | kubectl -n "$namespace" get deployment "$name" -------------------------------------------------------------------------------- /prometheus/rpc-app-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rpc-app-deployment 5 | labels: 6 | app: rpc-app 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: rpc-app 12 | template: 13 | metadata: 14 | labels: 15 | app: rpc-app 16 | spec: 17 | containers: 18 | - name: rpc-app-cont 19 | image: supergiantkir/prometheus-test-app 20 | ports: 21 | - name: web 22 | containerPort: 8081 23 | -------------------------------------------------------------------------------- /sanity/scripts/7-daemonset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # See https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ 4 | 5 | # Note that this requires a PSP binding for the default service account 6 | # see rolebindings/rolebinding-def-svc-acc-psp.yaml 7 | 8 | # Note that this YAML prescribes the kube-system namespace 9 | 10 | namespace="kube-system" 11 | name="fluentd-elasticsearch" 12 | 13 | kubectl apply -f sanity/7-daemonset.yaml 14 | util/wait-for-daemonset.sh "$namespace" "$name" 15 | kubectl -n "$namespace" get daemonset "$name" -------------------------------------------------------------------------------- /sanity/scripts/4-service-clusterip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # See https://kubernetes.io/docs/tasks/run-application/run-stateless-application-deployment/ 4 | 5 | # Note that this requires a PSP binding for the default service account 6 | # see rolebindings/rolebinding-def-svc-acc-psp.yaml 7 | 8 | namespace=${1:-default} 9 | 10 | sanity/scripts/3-deployment.sh "$namespace" 11 | kubectl -n "$namespace" apply -f sanity/4-service-clusterip.yaml 12 | kubectl -n "$namespace" get endpoints,services 13 | echo -e "\nNow use util/curl-in-cluster.sh to validate" 14 | -------------------------------------------------------------------------------- /sanity/scripts/6-stateful-app.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # See https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-tanzu/GUID-A19F6480-40DC-4343-A5A9-A5D3BFC0742E.html 4 | 5 | # Note that this requires a PSP binding for the default service account 6 | # see rolebindings/rolebinding-def-svc-acc-psp.yaml 7 | 8 | namespace=${1:-default} 9 | name="mysql" 10 | 11 | kubectl -n "$namespace" apply -f sanity/6-stateful-app.yaml 12 | util/wait-for-deployment.sh "$namespace" "$name" 13 | kubectl -n "$namespace" get pvc 14 | kubectl -n "$namespace" get deployment "$name" 15 | 16 | -------------------------------------------------------------------------------- /sanity/scripts/5-service-loadbalancer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # See https://kubernetes.io/docs/concepts/services-networking/service/ 4 | 5 | # Note that this requires a PSP binding for the default service account 6 | # see rolebindings/rolebinding-def-svc-acc-psp.yaml 7 | 8 | namespace=${1:-default} 9 | 10 | sanity/scripts/3-deployment.sh "$namespace" 11 | kubectl -n "$namespace" apply -f sanity/5-service-loadbalancer.yaml 12 | util/wait-for-external-IP.sh "$namespace" my-loadbalanced-service 13 | kubectl -n "$namespace" get endpoints,services 14 | echo -e "\nNow use curl :80 to validate" 15 | -------------------------------------------------------------------------------- /sanity/3-deployment.yaml: -------------------------------------------------------------------------------- 1 | # See https://kubernetes.io/docs/tasks/run-application/run-stateless-application-deployment/ 2 | # An example Deployment 3 | 4 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 5 | kind: Deployment 6 | metadata: 7 | name: nginx-deployment 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | replicas: 2 # tells deployment to run 2 pods matching the template 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: nginx:1.14.2 21 | ports: 22 | - containerPort: 80 23 | -------------------------------------------------------------------------------- /prometheus/scripts/1-deploy-prometheus.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script will deploy Prometheus to a TKG cluster. 4 | # Example taken from https://medium.com/kubernetes-tutorials/monitoring-your-kubernetes-deployments-with-prometheus-5665eda54045 5 | 6 | kubectl apply -f prometheus/rbac.yaml 7 | kubectl create configmap prometheus-config --from-file prometheus/config/prometheus.yml 8 | kubectl apply -f prometheus/prometheus-deployment.yaml 9 | util/wait-for-deployment.sh "default" "prometheus-deployment" 10 | kubectl apply -f prometheus/prometheus-service.yaml 11 | util/wait-for-external-IP.sh "default" "prometheus-service" 12 | kubectl get services 13 | echo -e "\nNow visit :9090 in a browser" 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vSphere with Tanzu Demos 2 | 3 | This repository is designed to accompany the YouTube Video series of Deep Dive videos on vSphere with Tanzu 4 | 5 | See https://www.youtube.com/playlist?list=PLymLY4xJSThpIfzKDLIMtRlf-butzn-Yq 6 | 7 | Contents are currently: 8 | 1. Login scripts in the root to demonstrate how to log into a TKC as different personas 9 | 2. Gateway configurations to accompany video 1 in showing how to configure Photon Linux as a network gateway 10 | 3. Sanity checks that you can run as part of TKC validation (see sanity/README.md) shown in video 5 11 | 4. Some example TKC cluster definitions to support video 4 12 | 5. An example Prometheus setup 13 | 6. Useful utilities for the scripting of the demos -------------------------------------------------------------------------------- /rolebindings/rolebinding-ingress-nginx-psp.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-ingress-nginx-admission-psp 5 | roleRef: 6 | kind: ClusterRole 7 | name: psp:vmware-system-privileged 8 | apiGroup: rbac.authorization.k8s.io 9 | subjects: 10 | - kind: ServiceAccount 11 | name: ingress-nginx-admission 12 | namespace: default 13 | --- 14 | kind: RoleBinding 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | metadata: 17 | name: rolebinding-ingress-nginx-psp 18 | roleRef: 19 | kind: ClusterRole 20 | name: psp:vmware-system-privileged 21 | apiGroup: rbac.authorization.k8s.io 22 | subjects: 23 | - kind: ServiceAccount 24 | name: ingress-nginx 25 | namespace: default 26 | -------------------------------------------------------------------------------- /prometheus/prometheus-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus-deployment 5 | spec: 6 | replicas: 2 7 | selector: 8 | matchLabels: 9 | app: prometheus 10 | template: 11 | metadata: 12 | labels: 13 | app: prometheus 14 | spec: 15 | containers: 16 | - name: prometheus-cont 17 | image: prom/prometheus 18 | volumeMounts: 19 | - name: config-volume 20 | mountPath: /etc/prometheus/prometheus.yml 21 | subPath: prometheus.yml 22 | ports: 23 | - containerPort: 9090 24 | volumes: 25 | - name: config-volume 26 | configMap: 27 | name: prometheus-config 28 | serviceAccountName: prometheus 29 | -------------------------------------------------------------------------------- /rolebindings/ns1/rolebinding-bob-ns1.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-ns1-bob 5 | namespace: ns1 6 | roleRef: 7 | kind: ClusterRole 8 | name: edit 9 | apiGroup: rbac.authorization.k8s.io 10 | subjects: 11 | - kind: User 12 | name: sso:bob@vsphere.local 13 | apiGroup: rbac.authorization.k8s.io 14 | 15 | --- 16 | kind: RoleBinding 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | metadata: 19 | name: rolebinding-ns1-bob-psp 20 | namespace: ns1 21 | roleRef: 22 | kind: ClusterRole 23 | name: psp:vmware-system-restricted-with-root 24 | apiGroup: rbac.authorization.k8s.io 25 | subjects: 26 | - kind: User 27 | name: sso:bob@vsphere.local 28 | apiGroup: rbac.authorization.k8s.io 29 | 30 | -------------------------------------------------------------------------------- /rolebindings/clusterrolebinding-tkg-dev.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: clusterrolebinding-view-group-tkg-dev 5 | roleRef: 6 | kind: ClusterRole 7 | name: view 8 | apiGroup: rbac.authorization.k8s.io 9 | subjects: 10 | - kind: Group 11 | name: sso:tkg-dev@vsphere.local 12 | apiGroup: rbac.authorization.k8s.io 13 | 14 | --- 15 | kind: ClusterRoleBinding 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | metadata: 18 | name: clusterrolebinding-storageclass-view-group-tkg-dev 19 | roleRef: 20 | kind: ClusterRole 21 | name: storageclass-view 22 | apiGroup: rbac.authorization.k8s.io 23 | subjects: 24 | - kind: Group 25 | name: sso:tkg-dev@vsphere.local 26 | apiGroup: rbac.authorization.k8s.io 27 | 28 | -------------------------------------------------------------------------------- /sanity/scripts/8-ingress-controller-nginx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # See https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-tanzu/GUID-457B1569-DFDC-4849-959C-72EDA72030AD.html 4 | 5 | # Note that this requires PSP bindings for the service accounts used in the helm chart 6 | # see rolebindings/rolebinding-ingress-nginx-psp.yaml 7 | 8 | namespace=${1:-default} 9 | name="ingress-nginx-controller" 10 | 11 | if [[ ! $(type -P "helm") ]]; then 12 | { echo "helm is not in PATH" 1>&2; exit 1; } 13 | fi 14 | 15 | helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx 16 | helm install --debug ingress-nginx ingress-nginx/ingress-nginx 17 | util/wait-for-deployment.sh "$namespace" "$name" 18 | kubectl get deployment "$name" 19 | kubectl get services 20 | echo -e "\nYou should now have a load-balanced service IP for the ingress controller" -------------------------------------------------------------------------------- /sanity/scripts/9-ingress-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # See https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-tanzu/GUID-457B1569-DFDC-4849-959C-72EDA72030AD.html 4 | 5 | # Note that this requires a PSP binding for the default service account 6 | # see rolebindings/rolebinding-def-svc-acc-psp.yaml 7 | 8 | namespace=${1:-default} 9 | name="hello" 10 | 11 | kubectl apply -f sanity/9-ingress-test.yaml 12 | util/wait-for-deployment.sh "$namespace" "$name" 13 | kubectl get deployment "$name" 14 | kubectl get service ingress-nginx-controller 15 | echo -e "\nProceeding to wait for ingress IP. This can take a while to sync" 16 | echo -e "The above Service External IP should work if you want to try http:///hello\n" 17 | util/wait-for-ingress-IP.sh "$namespace" "ingress-$name" 18 | kubectl get ingress 19 | echo -e "\nYou should now be able to hit http:///hello in a browser" -------------------------------------------------------------------------------- /prometheus/scripts/2-deploy-rpc-app.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script will deploy a Prometheus test app to a TKG cluster. 4 | # It has a dependency on the deploy-prometheus.sh script being successfully run 5 | # Example taken from https://medium.com/kubernetes-tutorials/monitoring-your-kubernetes-deployments-with-prometheus-5665eda54045 6 | 7 | # Note that this requires a PSP binding for the default service account 8 | # see rolebindings/rolebinding-def-svc-acc-psp.yaml 9 | 10 | kubectl apply -f prometheus/rpc-app-deployment.yaml 11 | util/wait-for-deployment.sh "default" "rpc-app-deployment" 12 | kubectl apply -f prometheus/rpc-app-service.yaml 13 | util/wait-for-external-IP.sh "default" "rpc-app-service" 14 | kubectl get services 15 | echo -e "\nNow use curl http://:8081/metrics to validate" 16 | echo -e "\nYou can also visit the /targets endpoint in the Prometheus URL and see the app" 17 | -------------------------------------------------------------------------------- /prometheus/config/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15seconds. # Attach these labels to any time series or alerts when #communicating with external systems (federation, remote storage, #Alertmanager). 3 | external_labels: 4 | monitor: 'codelab-monitor' 5 | # Scraping Prometheus itself 6 | scrape_configs: 7 | - job_name: 'prometheus' 8 | scrape_interval: 5s 9 | static_configs: 10 | - targets: ['localhost:9090'] 11 | - job_name: 'kubernetes-service-endpoints' 12 | kubernetes_sd_configs: 13 | - role: endpoints 14 | relabel_configs: 15 | - action: labelmap 16 | regex: __meta_kubernetes_service_label_(.+) 17 | - source_labels: [__meta_kubernetes_namespace] 18 | action: replace 19 | target_label: kubernetes_namespace 20 | - source_labels: [__meta_kubernetes_service_name] 21 | action: replace 22 | target_label: kubernetes_name 23 | -------------------------------------------------------------------------------- /gateway/configure-gw-10.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | iptables -P INPUT ACCEPT 4 | iptables -P OUTPUT ACCEPT 5 | iptables -P FORWARD ACCEPT 6 | 7 | # make sure forwarding is enabled in the kernel 8 | echo 1 > /proc/sys/net/ipv4/ip_forward 9 | 10 | # where is iptables located? 11 | iptables=`which iptables` 12 | 13 | # flush all existing rules 14 | $iptables -F 15 | 16 | # this is for NAT 17 | # enable masquerading 18 | /sbin/iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE 19 | 20 | # don't forward packets from off-lan to lan if 21 | # they are a brand new connection being initiated 22 | $iptables -A FORWARD -i eth0 -o eth1 -m state --state NEW -j REJECT 23 | 24 | # if the packets come from off-lan but they are 25 | # related to a connection that was established from 26 | # within the lan, go ahead and forward them 27 | $iptables -A FORWARD -i eth0 -o eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT 28 | 29 | # whatever traffic comes from the lan to go to 30 | # the world allow thru 31 | $iptables -A FORWARD -i eth1 -o eth0 -j ACCEPT 32 | 33 | -------------------------------------------------------------------------------- /sanity/9-ingress-test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: ingress-hello 5 | spec: 6 | rules: 7 | - http: 8 | paths: 9 | - path: /hello 10 | backend: 11 | serviceName: hello 12 | servicePort: 80 13 | --- 14 | kind: Service 15 | apiVersion: v1 16 | metadata: 17 | name: hello 18 | spec: 19 | selector: 20 | app: hello 21 | tier: backend 22 | ports: 23 | - protocol: TCP 24 | port: 80 25 | targetPort: http 26 | --- 27 | apiVersion: apps/v1 28 | kind: Deployment 29 | metadata: 30 | name: hello 31 | spec: 32 | replicas: 3 33 | selector: 34 | matchLabels: 35 | app: hello 36 | tier: backend 37 | track: stable 38 | template: 39 | metadata: 40 | labels: 41 | app: hello 42 | tier: backend 43 | track: stable 44 | spec: 45 | containers: 46 | - name: hello 47 | image: "gcr.io/google-samples/hello-go-gke:1.0" 48 | ports: 49 | - name: http 50 | containerPort: 80 -------------------------------------------------------------------------------- /gateway/configure-gw-20.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | iptables -P INPUT ACCEPT 4 | iptables -P OUTPUT ACCEPT 5 | iptables -P FORWARD ACCEPT 6 | 7 | # make sure forwarding is enabled in the kernel 8 | echo 1 > /proc/sys/net/ipv4/ip_forward 9 | 10 | # where is iptables located? 11 | iptables=`which iptables` 12 | 13 | # flush all existing rules 14 | iptables -F 15 | 16 | # this is for NAT 17 | # enable masquerading 18 | /sbin/iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE 19 | 20 | # don't forward packets from off-lan to lan if 21 | # they are a brand new connection being initiated 22 | iptables -A FORWARD -i eth0 -o eth1 -m state --state NEW -j REJECT 23 | 24 | # if the packets come from off-lan but they are 25 | # related to a connection that was established from 26 | # within the lan, go ahead and forward them 27 | iptables -A FORWARD -i eth0 -o eth1 -m state --state RELATED,ESTABLISHED -j ACCEPT 28 | 29 | # whatever traffic comes from the lan to go to 30 | # the world allow thru 31 | iptables -A FORWARD -i eth1 -o eth0 -j ACCEPT 32 | 33 | # configure firewall to prevent routing to management IPs 34 | iptables -I FORWARD -d 192.168.1.128/25 -j DROP 35 | 36 | -------------------------------------------------------------------------------- /sanity/README.md: -------------------------------------------------------------------------------- 1 | # Sanity checks for a TKC cluster 2 | 3 | This configuration and scripts represent a convenient way to test a new TKC cluster 4 | 5 | The simplest way to use these scripts is to run them one after the other from the root. 6 | 7 | Eg. sanity/scripts/1-configmap.sh 8 | 9 | The checks comprise: 10 | 11 | 1. Create a ConfigMap - validates permission to create a simple object 12 | 2. Run a Pod - validates K8S admin permissions & validates registry settings 13 | 3. Run a Deployment - validates permissions of the default system user 14 | 4. Create a ClusterIP Service - validates cluster networking 15 | 5. Create an Load-Balanced Service - validates HAProxy load balancing 16 | 6. Create a Stateful App - validates persistent volumes are working 17 | 7. Run a DaemonSet - validates all nodes can be scheduled to 18 | 8. Deploy an ingress controller - validates a more complex setup 19 | 9. Create and test an ingress - validates the ingress controller 20 | 21 | Note that you can also specify a namespace for tests 1-6 by appending it to the script 22 | 23 | Eg. sanity/scripts/1-configmap.sh mynamespace 24 | 25 | Each script has more information about the specific test and its prerequisites -------------------------------------------------------------------------------- /prometheus/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus 5 | rules: 6 | - apiGroups: [""] 7 | resources: 8 | - nodes 9 | - services 10 | - endpoints 11 | - pods 12 | verbs: ["get", "list", "watch"] 13 | - apiGroups: [""] 14 | resources: 15 | - configmaps 16 | verbs: ["get"] 17 | - nonResourceURLs: ["/metrics"] 18 | verbs: ["get"] 19 | --- 20 | apiVersion: v1 21 | kind: ServiceAccount 22 | metadata: 23 | name: prometheus 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1beta1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | name: prometheus 29 | roleRef: 30 | apiGroup: rbac.authorization.k8s.io 31 | kind: ClusterRole 32 | name: prometheus 33 | subjects: 34 | - kind: ServiceAccount 35 | name: prometheus 36 | namespace: default 37 | --- 38 | kind: RoleBinding 39 | apiVersion: rbac.authorization.k8s.io/v1 40 | metadata: 41 | name: rolebinding-prometheus-psp 42 | namespace: default 43 | roleRef: 44 | kind: ClusterRole 45 | name: psp:vmware-system-privileged 46 | apiGroup: rbac.authorization.k8s.io 47 | subjects: 48 | - kind: ServiceAccount 49 | name: prometheus 50 | namespace: default 51 | 52 | -------------------------------------------------------------------------------- /tkc/simple-cluster-upgrade-1.18.5.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: run.tanzu.vmware.com/v1alpha1 #TKG API endpoint 2 | kind: TanzuKubernetesCluster #required parameter 3 | metadata: 4 | name: simple-cluster #cluster name, user defined 5 | namespace: test-cluster #supervisor namespace 6 | spec: 7 | distribution: 8 | fullVersion: null 9 | version: v1.18.5 10 | topology: 11 | controlPlane: 12 | count: 1 #number of control plane nodes 13 | class: best-effort-small #vmclass for control plane nodes 14 | storageClass: k8s-default-storage #storageclass for control plane 15 | workers: 16 | count: 3 #number of worker nodes 17 | class: best-effort-small #vmclass for worker nodes 18 | storageClass: k8s-default-storage 19 | settings: 20 | network: 21 | cni: 22 | name: antrea 23 | services: 24 | cidrBlocks: ["10.96.1.0/24"] #Cannot overlap with Supervisor Cluster 25 | pods: 26 | cidrBlocks: ["172.16.0.0/16"] #Cannot overlap with Supervisor Cluster 27 | storage: 28 | classes: ["k8s-default-storage"] #Named PVC storage classes 29 | -------------------------------------------------------------------------------- /rolebindings/psp-vmware-system-restricted-with-root.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: psp:vmware-system-restricted-with-root 5 | rules: 6 | - apiGroups: 7 | - policy 8 | resourceNames: 9 | - vmware-system-restricted-with-root 10 | resources: 11 | - podsecuritypolicies 12 | verbs: 13 | - use 14 | --- 15 | apiVersion: policy/v1beta1 16 | kind: PodSecurityPolicy 17 | metadata: 18 | annotations: 19 | apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default 20 | apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default 21 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default 22 | seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default 23 | name: vmware-system-restricted-with-root 24 | spec: 25 | allowPrivilegeEscalation: false 26 | fsGroup: 27 | ranges: 28 | - max: 65535 29 | min: 1 30 | rule: MustRunAs 31 | runAsUser: 32 | rule: RunAsAny 33 | seLinux: 34 | rule: RunAsAny 35 | supplementalGroups: 36 | ranges: 37 | - max: 65535 38 | min: 1 39 | rule: MustRunAs 40 | volumes: 41 | - configMap 42 | - emptyDir 43 | - projected 44 | - secret 45 | - downwardAPI 46 | - persistentVolumeClaim 47 | -------------------------------------------------------------------------------- /tkc/simple-cluster-1.17.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: run.tanzu.vmware.com/v1alpha1 #TKG API endpoint 2 | kind: TanzuKubernetesCluster #required parameter 3 | metadata: 4 | name: simple-cluster #cluster name, user defined 5 | namespace: test-cluster #supervisor namespace 6 | spec: 7 | distribution: 8 | version: v1.17 #Resolves to the latest v1.17 image (v1.17.8+vmware.1-tkg.1.5417466) 9 | topology: 10 | controlPlane: 11 | count: 1 #number of control plane nodes 12 | class: best-effort-small #vmclass for control plane nodes 13 | storageClass: k8s-default-storage #storageclass for control plane 14 | workers: 15 | count: 3 #number of worker nodes 16 | class: best-effort-small #vmclass for worker nodes 17 | storageClass: k8s-default-storage 18 | settings: 19 | network: 20 | cni: 21 | name: antrea 22 | services: 23 | cidrBlocks: ["10.96.1.0/24"] #Cannot overlap with Supervisor Cluster 24 | pods: 25 | cidrBlocks: ["172.16.0.0/16"] #Cannot overlap with Supervisor Cluster 26 | storage: 27 | classes: ["k8s-default-storage"] #Named PVC storage classes 28 | -------------------------------------------------------------------------------- /sanity/6-stateful-app.yaml: -------------------------------------------------------------------------------- 1 | # See https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-tanzu/GUID-A19F6480-40DC-4343-A5A9-A5D3BFC0742E.html 2 | # An example stateful app 3 | 4 | apiVersion: v1 5 | kind: PersistentVolumeClaim 6 | metadata: 7 | name: my-pvc 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | storageClassName: k8s-default-storage 12 | resources: 13 | requests: 14 | storage: 2Gi 15 | --- 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: mysql 20 | spec: 21 | ports: 22 | - port: 3306 23 | selector: 24 | app: mysql 25 | clusterIP: None 26 | --- 27 | apiVersion: apps/v1 28 | kind: Deployment 29 | metadata: 30 | name: mysql 31 | spec: 32 | selector: 33 | matchLabels: 34 | app: mysql 35 | strategy: 36 | type: Recreate 37 | template: 38 | metadata: 39 | labels: 40 | app: mysql 41 | spec: 42 | containers: 43 | - image: mysql:5.6 44 | name: mysql 45 | env: 46 | # Use secret in real usage 47 | - name: MYSQL_ROOT_PASSWORD 48 | value: password 49 | ports: 50 | - containerPort: 3306 51 | name: mysql 52 | volumeMounts: 53 | - name: mysql-persistent-storage 54 | mountPath: /var/lib/mysql 55 | volumes: 56 | - name: mysql-persistent-storage 57 | persistentVolumeClaim: 58 | claimName: my-pvc 59 | -------------------------------------------------------------------------------- /sanity/7-daemonset.yaml: -------------------------------------------------------------------------------- 1 | # See https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ 2 | # An example DaemonSet 3 | 4 | apiVersion: apps/v1 5 | kind: DaemonSet 6 | metadata: 7 | name: fluentd-elasticsearch 8 | namespace: kube-system 9 | labels: 10 | k8s-app: fluentd-logging 11 | spec: 12 | selector: 13 | matchLabels: 14 | name: fluentd-elasticsearch 15 | template: 16 | metadata: 17 | labels: 18 | name: fluentd-elasticsearch 19 | spec: 20 | tolerations: 21 | # this toleration is to have the daemonset runnable on master nodes 22 | # remove it if your masters can't run pods 23 | - key: node-role.kubernetes.io/master 24 | effect: NoSchedule 25 | containers: 26 | - name: fluentd-elasticsearch 27 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 28 | resources: 29 | limits: 30 | memory: 200Mi 31 | requests: 32 | cpu: 100m 33 | memory: 200Mi 34 | volumeMounts: 35 | - name: varlog 36 | mountPath: /var/log 37 | - name: varlibdockercontainers 38 | mountPath: /var/lib/docker/containers 39 | readOnly: true 40 | terminationGracePeriodSeconds: 30 41 | volumes: 42 | - name: varlog 43 | hostPath: 44 | path: /var/log 45 | - name: varlibdockercontainers 46 | hostPath: 47 | path: /var/lib/docker/containers 48 | --------------------------------------------------------------------------------