├── .gitignore ├── LICENSE ├── README.md ├── TROUBLESHOOTING.md ├── chapter1 ├── README.md └── install-docker.sh ├── chapter10 └── kubectl-secure-my-dashboard.go ├── chapter11 ├── defaultUser │ ├── addDefaultUser.yaml │ └── setUnprivileged.yaml ├── enforce-ingress-vap │ ├── vap-binding-ingress.yaml │ └── vap-ingress.yaml ├── enforce-ingress │ ├── json │ │ └── test-data.json │ ├── rego │ │ ├── enforceingress.rego │ │ └── enforceingress_test.rego │ └── yaml │ │ ├── config.yaml │ │ ├── gatekeeper-policy-template.yaml │ │ ├── gatekeeper-policy.yaml │ │ └── namespaces.yaml ├── example_admission_request.json ├── parameter-opa-policy-fail │ └── rego │ │ ├── limitregistries.rego │ │ └── limitregistries_test.rego ├── parameter-opa-policy │ ├── rego │ │ ├── limitregistries.rego │ │ └── limitregistries_test.rego │ └── yaml │ │ ├── gatekeeper-policy-template.yaml │ │ └── gatekeeper-policy.yaml └── simple-opa-policy │ ├── rego │ ├── limitregistries.rego │ └── limitregistries_test.rego │ └── yaml │ ├── gatekeeper-policy-template.yaml │ └── gatekeeper-policy.yaml ├── chapter12 ├── default_mutations.yaml ├── delete_all_pods_except_gatekeeper.sh ├── deploy_gatekeeper_psp_policies.sh ├── enforce_node_policies.yaml ├── make_cluster_work_policies.yaml ├── minimal_gatekeeper_constraints.yaml ├── multi-tenant │ ├── opa │ │ ├── all-namespaces-psp.rego │ │ └── all-namespaces-psp_test.rego │ ├── sourcedata │ │ ├── cache_data.json │ │ └── pod_data.json │ └── yaml │ │ ├── check-new-pods-psp.yaml │ │ ├── gatekeeper-config.yaml │ │ ├── minimal_gatekeeper_constraints.yaml │ │ ├── require-psp-for-namespaceconstraint.yaml │ │ └── require-psp-for-namespaceconstrainttemplate.yaml └── show_constraint_violations.sh ├── chapter13 ├── bin-deny.yaml ├── cluster │ ├── calico │ │ ├── custom-resources.yaml │ │ └── tigera-operator.yaml │ ├── cluster01-kind.yaml │ ├── create-cluster.sh │ ├── get_helm.sh │ ├── nginx-ingress │ │ └── nginx-deploy.yaml │ └── pvc-test │ │ └── test-pvc.yaml ├── get-kubearmor-bin.sh ├── get-kubearmor-bin.sh-old ├── kubearmor-patch.sh ├── nginx-secrets │ ├── create-nginx-vault.sh │ ├── nginx-ingress.yaml │ ├── nginx-secrets-block.yaml │ ├── nginx-secrets.yaml │ ├── redeploy-nginx-vault.sh │ ├── volume-secrets-watch.yaml │ ├── volume-secrets.yaml │ └── volume-vault-watch.yaml ├── nginx │ ├── ingress.yaml │ ├── ngnix-ingress-remove.sh │ └── ngnix-ingress.sh ├── patch-relay.yaml ├── vault │ ├── api-server-ingress.yaml │ ├── deploy_vault.sh │ ├── install_vault.sh │ ├── unseal.sh │ ├── vault-ingress.yaml │ ├── vault_cli.sh │ └── vault_integrate_cluster.sh └── vault_cli.sh ├── chapter14 ├── create-backup-objects.sh ├── create-minio-ingress.sh ├── credentials-velero ├── etcd │ └── install-etcd-tools.sh ├── install-velero-binary.sh ├── minio-deployment.yaml ├── minio-ingress.yaml ├── pvc-example │ └── busybox-pvc.yaml └── velero-cluster │ ├── calico │ ├── custom-resources.yaml │ └── tigera-operator.yaml │ ├── create-velero-cluster.sh │ ├── nginx-ingress │ └── nginx-deploy.yaml │ └── velero-cluster.yaml ├── chapter15 ├── alertmanager-webhook │ ├── alertmanager-webhook.yaml │ └── critical-alerts.yaml ├── opensearch │ ├── deploy_opensearch.sh │ ├── fluentbit.yaml │ └── opensearch-sso.yaml ├── simple │ ├── alertmanager-ingress.yaml │ ├── deploy-prometheus-charts.sh │ ├── grafana-ingress.yaml │ ├── prometheus-ingress.yaml │ └── values.yaml └── user-auth │ ├── apacheds.yaml │ ├── deploy_openunison_imp_impersonation.sh │ ├── grafana-custom-dashboard.yaml │ ├── grafana-result-group.yaml │ ├── kubernetes-dashboard-values.yaml │ ├── monitor-openunison.yaml │ ├── myvd-book.yaml │ ├── openunison-values-impersonation.yaml │ └── prom-openunison-values.yaml ├── chapter16 ├── add-ons │ ├── deploy-add-ons.sh │ ├── grafana-deployment.yaml │ ├── grafana-vs.yaml │ ├── gw.yaml │ ├── jaeger-deployment.yaml │ ├── jaeger-vs.yaml │ ├── prometheus-deployment.yaml │ └── prometheus-vs.yaml ├── example-app │ ├── deploy-example.sh │ ├── gw.yaml │ ├── istio-manifests.yaml │ ├── kubernetes-manifests.yaml │ └── vs.yaml ├── expose_istio.sh ├── install-istio.sh ├── kiali │ ├── deploy-kiali.sh │ ├── gw.yaml │ └── vs.yaml └── testapp │ ├── create-gw-vs.sh │ ├── deploy-testapp.sh │ ├── example1-policy.yaml │ ├── example2-policy.yaml │ ├── gw.yaml │ ├── remove-testapp.sh │ └── vs.yaml ├── chapter17 ├── authentication │ ├── deploy-auth.sh │ ├── patch-istiod.json │ └── service-auth.yaml ├── coursed-grained-authorization │ └── coursed-grained-az.yaml ├── hello-world │ ├── deploy_helloworld.sh │ └── hello-world.yaml ├── kiali │ ├── grafana-deployment.yaml │ ├── integrate-openunison-kiali.sh │ ├── jaeger-deployment.yaml │ ├── openunison-values-impersonation.yaml │ └── prometheus-deployment.yaml ├── opa │ ├── deploy_opa_istio.sh │ ├── opa-istio.yaml │ └── rego │ │ ├── istio.authz.rego │ │ └── istio.authz_test.rego ├── openunison-istio │ ├── apacheds.yaml │ ├── deploy_openunison_istio.sh │ ├── kubernetes-dashboard-values.yaml │ ├── myvd-book.yaml │ └── openunison-values-impersonation.yaml ├── openunison-service-auth │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── authentication_chains.yaml │ │ ├── getusertoken.yaml │ │ └── sts.yaml │ └── values.yaml └── write-checks │ ├── call_service.sh │ ├── deploy_write_checks.sh │ └── write_checks.yaml ├── chapter19 ├── examples │ ├── myapp │ │ ├── .gitlab-ci.yml │ │ └── source │ │ │ ├── Dockerfile │ │ │ ├── helloworld.py │ │ │ └── requirements.txt │ └── ops │ │ └── python-hello.yaml ├── pulumi │ ├── .gitignore │ ├── Pulumi.yaml │ ├── __main__.py │ ├── requirements.txt │ └── src │ │ ├── devplatform │ │ ├── argocd │ │ │ ├── __init__.py │ │ │ └── deploy.py │ │ ├── cert_manager │ │ │ ├── __init__.py │ │ │ └── deploy.py │ │ ├── gitlab │ │ │ ├── __init__.py │ │ │ ├── deploy.py │ │ │ └── initialize.py │ │ ├── harbor │ │ │ ├── __init.py__ │ │ │ └── deploy.py │ │ ├── mysql │ │ │ ├── __init__.py │ │ │ └── deploy.py │ │ ├── openunison │ │ │ ├── __init__.py │ │ │ └── deploy.py │ │ ├── openunison_idp │ │ │ ├── __init__.py │ │ │ └── deploy.py │ │ ├── openunison_sat │ │ │ ├── __init__.py │ │ │ └── deploy.py │ │ ├── smtp_blackhole │ │ │ ├── __init__.py │ │ │ └── deploy.py │ │ └── vault │ │ │ ├── __init__.py │ │ │ └── deploy.py │ │ ├── hcl │ │ ├── vault-admin.hcl │ │ └── vault-ou-admins.hcl │ │ ├── helm │ │ └── kube-enterprise-guide-openunison-idp │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── templates │ │ │ ├── applications │ │ │ │ ├── get-target-token.yaml │ │ │ │ └── newproject.yaml │ │ │ ├── configmaps │ │ │ │ ├── vcluster-scripts.yaml │ │ │ │ └── vcluster-vault-policy-template.yaml │ │ │ ├── portalurls │ │ │ │ ├── argocd.yaml │ │ │ │ ├── gitlab.yaml │ │ │ │ ├── harbor.yaml │ │ │ │ └── vault.yaml │ │ │ ├── targets │ │ │ │ ├── dev-mysql.yaml │ │ │ │ ├── gitlab.yaml │ │ │ │ └── prod-mysql.yaml │ │ │ ├── trusts │ │ │ │ ├── argocd.yaml │ │ │ │ ├── gitlab.yaml │ │ │ │ ├── harbor.yaml │ │ │ │ └── vault.yaml │ │ │ └── workflows │ │ │ │ ├── initialization │ │ │ │ └── init-openunison.yaml │ │ │ │ ├── jit-gitlab.yaml │ │ │ │ └── new-project │ │ │ │ ├── argocd │ │ │ │ └── create-argocd-tenant.yaml │ │ │ │ ├── create-new-project.yaml │ │ │ │ ├── gitlab │ │ │ │ ├── new-application-production.yaml │ │ │ │ ├── new-ops-dev.yaml │ │ │ │ ├── new-ops-production.yaml │ │ │ │ └── setup-gitlab.yaml │ │ │ │ ├── harbor │ │ │ │ ├── create-harbor-project.yaml │ │ │ │ ├── onboard-harbor-gitlab.yaml │ │ │ │ ├── onboard-harbor-tenant.yaml │ │ │ │ └── sync-harbor-pull-secret.yaml │ │ │ │ ├── new-project.yaml │ │ │ │ ├── vault │ │ │ │ ├── deploy-external-secret-operator.yaml │ │ │ │ └── integrate-vault-vcluster.yaml │ │ │ │ └── vcluster │ │ │ │ ├── create-vcluster-db.yaml │ │ │ │ ├── deploy-dev-vcluster.yaml │ │ │ │ ├── deploy-openunison-to-vcluster.yaml │ │ │ │ ├── deploy-prod-vcluster.yaml │ │ │ │ └── deploy-vcluster.yaml │ │ │ └── values.yaml │ │ ├── lib │ │ ├── __init__.py │ │ ├── helm_chart_versions.py │ │ ├── kubernetes_api_endpoint.py │ │ └── namespace.py │ │ └── yaml │ │ ├── argocd-helm-support.yaml │ │ ├── mysql.yaml │ │ ├── mysql_node.yaml │ │ └── vaultintegration.yaml ├── scripts │ ├── get_gitlab_root_pwd.sh │ ├── harbor-get-root-password.sh │ ├── patch-nginx.txt │ ├── patch_nginx_ssh.sh │ └── pulumi-initialize.sh └── vault │ ├── unseal.sh │ └── unseal_after_init.sh ├── chapter2 ├── .create-cluster.sh.swp ├── HAdemo │ ├── create-multinode.sh │ └── multinode.yaml ├── calico │ ├── custom-resources.yaml │ └── tigera-operator.yaml ├── cluster01-kind.yaml ├── create-cluster.sh ├── get_helm.sh ├── nginx-ingress │ └── nginx-deploy.yaml └── pvc-test │ └── test-pvc.yaml ├── chapter4 ├── ingress │ ├── ingress.yaml │ ├── ngnix-ingress-remove.sh │ └── ngnix-ingress.sh ├── metallb │ ├── dns-multi.yaml │ ├── install-metallb.sh │ ├── l2advertisement.yaml │ ├── metallb-deploy.yaml │ ├── metallb-pool-2.yaml │ ├── metallb-pool-template.yaml │ ├── metallb-pool-template2.yaml │ ├── metallb-pool.yaml │ ├── nginx-lb-2.yaml │ ├── nginx-lb.sh │ ├── nginx-lb.yaml │ └── nginx-loadbalancer.sh └── netpol │ ├── backend-db-netpol.yaml │ ├── netpol.sh │ └── remove-all.sh ├── chapter5 ├── coredns │ └── values.yaml ├── etcd │ ├── deploy-etcd.sh │ └── values.yaml ├── externaldns │ ├── coredns-add-template.txt │ ├── coredns-add.txt │ ├── coredns-cm-template.txt │ ├── coredns-cm.yaml │ ├── deploy-externaldns.sh │ ├── deployment-externaldns.yaml │ ├── deployment-template.yaml │ └── nginx-lb.yaml └── k8gb-example │ ├── README.md │ ├── get_helm.sh │ ├── k8gb │ ├── coredns-dual-svc.yaml │ ├── deploy-k8gb-buf.sh │ ├── deploy-k8gb-nyc.sh │ ├── get_helm.sh │ ├── k8gb-buf-values.yaml │ ├── k8gb-example-buf.yaml │ ├── k8gb-example-nyc.yaml │ ├── k8gb-nyc-values.yaml │ ├── nginx-fe-buff.yaml │ └── nginx-fe-nyc.yaml │ ├── kubeadm │ ├── calico.yaml │ ├── create-kubeadm-single.sh │ ├── get_helm.sh │ ├── kubeadm-config.yaml │ └── nginx-deploy.yaml │ └── metallb │ ├── install-metallb-buf.sh │ ├── install-metallb-nyc.sh │ ├── l2advertisement.yaml │ ├── metallb-config-buf.sh │ ├── metallb-config-nyc.sh │ ├── metallb-deploy.yaml │ ├── metallb-pool-buf.yaml │ └── metallb-pool-nyc.yaml ├── chapter6 ├── README.md ├── pipelines │ ├── cicd-proxy │ │ ├── cicd-proxy_template.yaml │ │ ├── deploy-proxy.sh │ │ ├── run_workflow.sh │ │ ├── run_workflow.yaml │ │ └── target-ns.yaml │ ├── sa-cluster-admins.yaml │ └── token-login │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ ├── applications.yaml │ │ ├── authentication_chains.yaml │ │ └── authentication_mechanisms.yaml │ │ └── values.yaml └── user-auth │ ├── apacheds.yaml │ ├── deploy_openunison_imp_impersonation.sh │ ├── deploy_openunison_imp_noimpersonation.sh │ ├── kubernetes-dashboard-values.yaml │ ├── myvd-book.yaml │ ├── openunison-values-impersonation.yaml │ └── openunison-values-noimpersonation.yaml ├── chapter7 ├── cm │ └── k8s-audit-policy.yaml └── enable-auditing.sh ├── chapter8 ├── external-secrets │ ├── ext-secret-template.yaml │ └── install_external_secrets.sh ├── integration │ ├── envvars │ │ ├── envars-secrets-watch.yaml │ │ ├── envars-secrets.yaml │ │ └── envars-vault-watch.yaml │ └── volumes │ │ ├── create-vault.sh │ │ ├── volume-secrets-watch.yaml │ │ ├── volume-secrets.yaml │ │ └── volume-vault-watch.yaml └── vault │ ├── api-server-ingress.yaml │ ├── deploy_vault.sh │ ├── install_vault.sh │ ├── unseal.sh │ ├── vault-ingress.yaml │ ├── vault_cli.sh │ └── vault_integrate_cluster.sh ├── chapter9 ├── deploy_vcluster_cli.sh ├── ha │ ├── create-ha-db.sql │ ├── deploy_mysql.sh │ ├── mysql.yaml │ ├── postgresql.yaml │ ├── vcluster-ha-tenant1-vaules-upgrade.yaml │ ├── vcluster-ha-tenant1-vaules.yaml │ └── vcluster-tenant1.yaml ├── host │ ├── apacheds.yaml │ ├── deploy_openunison_imp_impersonation.sh │ ├── deploy_openunison_vcluster.sh │ ├── kubernetes-dashboard-values.yaml │ ├── myvd-book.yaml │ ├── openunison-values-impersonation.yaml │ ├── openunison-values-vcluster.yaml │ ├── vcluster-values-secrets.yaml │ └── vcluster-values.yaml ├── multitenant │ ├── examples │ │ ├── create-vault.sh │ │ └── volume-vault-watch.yaml │ ├── setup │ │ ├── apacheds.yaml │ │ ├── deploy_openunison.sh │ │ ├── kubernetes-dashboard-values.yaml │ │ ├── mysql.yaml │ │ ├── myvd-book.yaml │ │ ├── openunison-values-impersonation.yaml │ │ └── vault │ │ │ ├── api-server-ingress.yaml │ │ │ ├── deploy_vault.sh │ │ │ ├── install_vault.sh │ │ │ ├── integrate_cp.sh │ │ │ ├── unseal.sh │ │ │ ├── vault-admin.hcl │ │ │ ├── vault-ingress.yaml │ │ │ ├── vault_cli.sh │ │ │ ├── vault_integrate_cluster.sh │ │ │ ├── vcluster-values-secrets.yaml │ │ │ └── vcluster-values.yaml │ └── vcluster-multitenant │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ ├── configmaps │ │ │ ├── vcluster-scripts.yaml │ │ │ └── vcluster-vault-policy-template.yaml │ │ └── workflows │ │ │ ├── create-vcluster-db.yaml │ │ │ ├── deploy-openunison-to-vcluster.yaml │ │ │ ├── deploy-vcluster.yaml │ │ │ ├── integrate-vault-vcluster.yaml │ │ │ └── onboard-vcluster.yaml │ │ └── values.yaml └── simple │ └── virtual-pod.yaml └── excursions ├── DNSMasq ├── README.md └── dnsmasq.conf ├── README.md └── kind-multi-cluster ├── README.md └── kind-multi-cluster.sh /.gitignore: -------------------------------------------------------------------------------- 1 | chapter19/pulumi/Pulumi.bookv3-platform.yaml 2 | */__pycache__ 3 | */stack.json 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Packt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes-An-Enterprise-Guide-Third-Edition 2 | Welcome to the repo for the book Kubernetes – An Enterprise Guide, Third Edition - Published by Packt. 3 | Written by Marc Boorshtein and Scott Surovich. 4 | 5 | # Extra's Directory 6 | We have created a directoryb called Extras. This contains content that we wanted to add for readers that is outside of the book content. 7 | It contains add-ons that we find useful for additional testing and are provided as-is without any support. 8 | 9 | Currently, it container one add-on, dnsmasq. You can used dnsmasq to create your own domain for Ingress and ISTIO Virtual Services rather than using nip.io like the book exercises use. 10 | If you decide to use dndmasq for a domain, the scripts in the exercises will still create nip.io URL's, so you will need to change the domains after the scripts create them with your dnsmasq domain instead of nip.io 11 | 12 | dnsmasq was added for readers that may want to used the scripts on other Kubernetes clusters like a kubeadm cluster - or if you use your KinD cluster offline, where nip.io domains will not resolve due to not having an Internet connection. 13 | 14 | ## Troubleshooting and Getting Help 15 | 16 | If labs don't work, take a look at [TROUBLESHOOTING.md](TROUBLESHOOTING.md). If you're still having issues, please open an issue int his repo and we'll be happy to help! 17 | -------------------------------------------------------------------------------- /TROUBLESHOOTING.md: -------------------------------------------------------------------------------- 1 | # Troubleshooting 2 | 3 | If the labs aren't working, even though you followed all the steps, take a look here to see if its a common issue before opening an issue on GitHub. If it's still not working, please open an issue so we can help out! 4 | 5 | ## Invalid Login from OpenUnison 6 | 7 | When you login to any of the OpenUnison labs you may find you're getting a screen that says "Invalid Credentials". First, make sure you're typing in the right password. If that fails, sometimes the simulated "Active Directory" will fail to start properly. The easiest way to fix this is to just delete the `Pod` and let ApacheDS restart: 8 | 9 | ``` 10 | kubectl delete pods -l app=apacheds -n activedirectory 11 | ``` 12 | 13 | Once the pod restarts, try again. If it still doesn't work, look at the logs for OpenUnison in the `openunison` namepace: 14 | 15 | ``` 16 | kubectl logs -f -l app=openunison-orchestra -n openunison 17 | ``` 18 | 19 | If you don't see any exceptions, while trying to login, please open an issue. 20 | 21 | ## MySQL Won't Start 22 | 23 | Depending on where you're running your Ubuntu VM, you may get the message `Fatal glibc error: CPU does not support x86-64-v2`. This is because of the processor you have your VM configured with. If you set it to passthrough and reboot, this error should be eliminated. -------------------------------------------------------------------------------- /chapter1/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes-and-Docker-The-Complete-Guide 2 | Kubernetes and Docker: The Complete Guide, published by Packt 3 | 4 | # Chapter 1 Scripts 5 | This directory contains the script to install Docker. 6 | -------------------------------------------------------------------------------- /chapter10/kubectl-secure-my-dashboard.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "crypto/tls" 6 | "encoding/json" 7 | "fmt" 8 | "io/ioutil" 9 | "net/http" 10 | "os" 11 | ) 12 | 13 | type csrfToken struct { 14 | Token string `json:"token"` 15 | } 16 | 17 | func main() { 18 | baseURL := os.Args[1] 19 | fmt.Println("Running analysis on " + baseURL) 20 | 21 | http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} 22 | url := baseURL + "/api/v1/csrftoken/appdeployment" 23 | 24 | resp, err := http.DefaultClient.Get(url) 25 | if err != nil { 26 | fmt.Println(err) 27 | } 28 | 29 | body, err := ioutil.ReadAll(resp.Body) 30 | 31 | if err != nil { 32 | panic(err) 33 | 34 | } 35 | 36 | token := csrfToken{} 37 | err = json.Unmarshal(body, &token) 38 | 39 | if err != nil { 40 | panic(err) 41 | 42 | } 43 | 44 | jsonStr := []byte(`{"containerImage":"busybox","imagePullSecret":null,"containerCommand":"sh -c echo \"Hello, Kubernetes!\" && sleep 3600","containerCommandArgs":null,"isExternal":false,"name":"not-a-bitcoin-miner","description":null,"portMappings":[],"variables":[],"replicas":1,"namespace":"default","cpuRequirement":null,"memoryRequirement":null,"labels":[{"editable":false,"key":"k8s-app","value":"not-a-bitcoin-miner"}],"runAsPrivileged":false}`) 45 | req, err := http.NewRequest(http.MethodPost, baseURL+"/api/v1/appdeployment", bytes.NewBuffer(jsonStr)) 46 | req.Header.Set("Content-Type", "application/json") 47 | req.Header.Set("X-CSRF-TOKEN", token.Token) 48 | resp, err = http.DefaultClient.Do(req) 49 | 50 | if err != nil { 51 | panic(err) 52 | } 53 | 54 | fmt.Println("Your dashboard has been secured!") 55 | 56 | } 57 | -------------------------------------------------------------------------------- /chapter11/defaultUser/addDefaultUser.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mutations.gatekeeper.sh/v1 2 | kind: Assign 3 | metadata: 4 | name: default-user 5 | spec: 6 | applyTo: 7 | - groups: [""] 8 | kinds: ["Pod"] 9 | versions: ["v1"] 10 | match: 11 | scope: Namespaced 12 | excludedNamespaces: 13 | - kube-system 14 | location: "spec.securityContext.runAsUser" 15 | parameters: 16 | assign: 17 | value: 70391 18 | pathTests: 19 | - subPath: "spec.securityContext.runAsUser" 20 | condition: MustNotExist -------------------------------------------------------------------------------- /chapter11/defaultUser/setUnprivileged.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mutations.gatekeeper.sh/v1 2 | kind: Assign 3 | metadata: 4 | name: set-unprivileged 5 | spec: 6 | applyTo: 7 | - groups: [""] 8 | kinds: ["Pod"] 9 | versions: ["v1"] 10 | match: 11 | scope: Namespaced 12 | excludedNamespaces: 13 | - kube-system 14 | location: "spec.containers[image:*].securityContext.privileged" 15 | parameters: 16 | assign: 17 | value: false 18 | pathTests: 19 | - subPath: "spec.containers[image:*].securityContext.privileged" 20 | condition: MustNotExist -------------------------------------------------------------------------------- /chapter11/enforce-ingress-vap/vap-binding-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: admissionregistration.k8s.io/v1beta1 2 | kind: ValidatingAdmissionPolicyBinding 3 | metadata: 4 | name: "vap-binding-ingress" 5 | spec: 6 | policyName: "vap-ingress" 7 | validationActions: [Deny] 8 | -------------------------------------------------------------------------------- /chapter11/enforce-ingress-vap/vap-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: admissionregistration.k8s.io/v1beta1 2 | kind: ValidatingAdmissionPolicy 3 | metadata: 4 | name: "vap-ingress" 5 | spec: 6 | failurePolicy: Fail 7 | matchConstraints: 8 | resourceRules: 9 | - apiGroups: ["networking.k8s.io"] 10 | apiVersions: ["v1"] 11 | operations: ["CREATE", "UPDATE"] 12 | resources: ["ingresses"] 13 | validations: 14 | - expression: |- 15 | namespaceObject.metadata.labels.allowingress == "true" -------------------------------------------------------------------------------- /chapter11/enforce-ingress/json/test-data.json: -------------------------------------------------------------------------------- 1 | { 2 | "cluster": { 3 | "v1": { 4 | "Namespace": { 5 | 6 | "ns-with-ingress": { 7 | "metadata": { 8 | "labels": { 9 | "allowingress": "true" 10 | } 11 | } 12 | }, 13 | "ns-without-ingress": { 14 | "metadata": { 15 | } 16 | } 17 | 18 | } 19 | } 20 | } 21 | } -------------------------------------------------------------------------------- /chapter11/enforce-ingress/rego/enforceingress.rego: -------------------------------------------------------------------------------- 1 | package k8senforceingress 2 | 3 | violation[{"msg":msg,"details":{}}] { 4 | missingIngressLabel 5 | msg := "Missing label allowingress: \"true\"" 6 | } 7 | 8 | missingIngressLabel { 9 | data.inventory.cluster["v1"].Namespace[input.review.object.metadata.namespace].metadata.labels["allowingress"] != "true" 10 | } 11 | 12 | missingIngressLabel { 13 | not data.inventory.cluster["v1"].Namespace[input.review.object.metadata.namespace].metadata.labels["allowingress"] 14 | } -------------------------------------------------------------------------------- /chapter11/enforce-ingress/yaml/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: config.gatekeeper.sh/v1alpha1 2 | kind: Config 3 | metadata: 4 | name: config 5 | namespace: "gatekeeper-system" 6 | spec: 7 | sync: 8 | syncOnly: 9 | - group: "" 10 | version: "v1" 11 | kind: "Namespace" 12 | 13 | -------------------------------------------------------------------------------- /chapter11/enforce-ingress/yaml/gatekeeper-policy-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8senforceingress 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sEnforceIngress 10 | validation: {} 11 | targets: 12 | - target: admission.k8s.gatekeeper.sh 13 | rego: |- 14 | package k8senforceingress 15 | 16 | violation[{"msg":msg,"details":{}}] { 17 | missingIngressLabel 18 | msg := "Missing label allowingress: \"true\"" 19 | } 20 | 21 | missingIngressLabel { 22 | data.inventory.cluster["v1"].Namespace[input.review.object.metadata.namespace].metadata.labels["allowingress"] != "true" 23 | } 24 | 25 | missingIngressLabel { 26 | not data.inventory.cluster["v1"].Namespace[input.review.object.metadata.namespace].metadata.labels["allowingress"] 27 | } 28 | 29 | 30 | -------------------------------------------------------------------------------- /chapter11/enforce-ingress/yaml/gatekeeper-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sEnforceIngress 3 | metadata: 4 | name: require-ingress-label 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["networking.k8s.io"] 9 | kinds: ["Ingress"] 10 | -------------------------------------------------------------------------------- /chapter11/enforce-ingress/yaml/namespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ns-with-ingress 5 | labels: 6 | allowingress: "true" 7 | spec: {} 8 | --- 9 | apiVersion: v1 10 | kind: Namespace 11 | metadata: 12 | name: ns-without-ingress 13 | spec: {} 14 | -------------------------------------------------------------------------------- /chapter11/parameter-opa-policy-fail/rego/limitregistries.rego: -------------------------------------------------------------------------------- 1 | package k8sallowedregistries 2 | 3 | violation[{"msg": msg, "details": {}}] { 4 | invalidRegistry 5 | msg := "Invalid registry" 6 | } 7 | 8 | # returns true if a valid registry is not specified 9 | invalidRegistry { 10 | trace(sprintf("input_images : %v",[input_images])) 11 | ok_images = [image | 12 | trace(sprintf("image %v",[input_images[j]])) 13 | startswith(input_images[j],input.parameters.registries[_]) ; 14 | image = input_images[j] 15 | ] 16 | trace(sprintf("ok_images %v",[ok_images])) 17 | trace(sprintf("ok_images size %v / input_images size %v",[count(ok_images),count(input_images)])) 18 | count(ok_images) != count(input_images) 19 | } 20 | 21 | input_images[image] { 22 | image := input.review.object.spec.template.spec.containers[_].image 23 | } 24 | 25 | input_images[image] { 26 | image := input.review.object.spec.containers[_].image 27 | } 28 | 29 | input_images[image] { 30 | image := input.review.object.spec.jobTemplate.spec.template.spec.containers[_].image 31 | } 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /chapter11/parameter-opa-policy/rego/limitregistries.rego: -------------------------------------------------------------------------------- 1 | package k8sallowedregistries 2 | 3 | violation[{"msg": msg, "details": {}}] { 4 | invalidRegistry 5 | msg := "Invalid registry" 6 | } 7 | 8 | # returns true if a valid registry is not specified 9 | invalidRegistry { 10 | ok_images = [image | startswith(input_images[i],input.parameters.registries[_]) ; image = input_images[i] ] 11 | count(ok_images) != count(input_images) 12 | } 13 | 14 | input_images[image] { 15 | image := input.review.object.spec.template.spec.containers[_].image 16 | } 17 | 18 | input_images[image] { 19 | image := input.review.object.spec.containers[_].image 20 | } 21 | 22 | input_images[image] { 23 | image := input.review.object.spec.jobTemplate.spec.template.spec.containers[_].image 24 | } 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /chapter11/parameter-opa-policy/yaml/gatekeeper-policy-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8sallowedregistries 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sAllowedRegistries 10 | validation: 11 | openAPIV3Schema: 12 | properties: 13 | registries: 14 | type: array 15 | items: string 16 | targets: 17 | - target: admission.k8s.gatekeeper.sh 18 | rego: | 19 | package k8sallowedregistries 20 | 21 | violation[{"msg": msg, "details": {}}] { 22 | invalidRegistry 23 | msg := "Invalid registry" 24 | } 25 | 26 | # returns true if a valid registry is not specified 27 | invalidRegistry { 28 | ok_images = [image | startswith(input_images[_],input.parameters.registries[_]) ; image = input_images[_] ] 29 | count(ok_images) != count(input_images) 30 | } 31 | 32 | input_images[image] { 33 | image := input.review.object.spec.template.spec.containers[_].image 34 | } 35 | 36 | input_images[image] { 37 | image := input.review.object.spec.containers[_].image 38 | } 39 | 40 | input_images[image] { 41 | image := input.review.object.spec.jobTemplate.spec.template.spec.containers[_].image 42 | } -------------------------------------------------------------------------------- /chapter11/parameter-opa-policy/yaml/gatekeeper-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sAllowedRegistries 3 | metadata: 4 | name: restrict-openunison-registries 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Pod"] 10 | - apiGroups: ["apps"] 11 | kinds: 12 | - StatefulSet 13 | - Deployment 14 | - apiGroups: ["batch"] 15 | kinds: 16 | - CronJob 17 | namespaces: ["testpolicy"] 18 | parameters: 19 | registries: ["docker.io/nginx/"] -------------------------------------------------------------------------------- /chapter11/simple-opa-policy/rego/limitregistries.rego: -------------------------------------------------------------------------------- 1 | package k8sallowedregistries 2 | 3 | violation[{"msg": msg, "details": {}}] { 4 | invalidRegistry 5 | msg := "Invalid registry" 6 | } 7 | 8 | 9 | # returns true if a valid registry is not specified 10 | invalidRegistry { 11 | input_images[image] 12 | not startswith(image, "quay.io/") 13 | } 14 | 15 | # load images from Pod objects 16 | input_images[image] { 17 | image := input.review.object.spec.containers[_].image 18 | } 19 | 20 | # load images from Deployment and StatefulSet objects 21 | input_images[image] { 22 | image := input.review.object.spec.template.spec.containers[_].image 23 | } 24 | 25 | # load images from CronJob objects 26 | # Uncomment in chapter 8 27 | #input_images[image] { 28 | # image := input.review.object.spec.jobTemplate.spec.template.spec.containers[_].image 29 | #} -------------------------------------------------------------------------------- /chapter11/simple-opa-policy/yaml/gatekeeper-policy-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8sallowedregistries 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sAllowedRegistries 10 | validation: {} 11 | targets: 12 | - target: admission.k8s.gatekeeper.sh 13 | rego: | 14 | package k8sallowedregistries 15 | 16 | violation[{"msg": msg, "details": {}}] { 17 | invalidRegistry 18 | msg := "Invalid registry" 19 | } 20 | 21 | 22 | # returns true if a valid registry is not specified 23 | invalidRegistry { 24 | input_images[image] 25 | not startswith(image, "quay.io/") 26 | } 27 | 28 | # load images from Pod objects 29 | input_images[image] { 30 | image := input.review.object.spec.containers[_].image 31 | } 32 | 33 | # load images from Deployment and StatefulSet objects 34 | input_images[image] { 35 | image := input.review.object.spec.template.spec.containers[_].image 36 | } 37 | 38 | # load images from CronJob objects 39 | # Uncomment in chapter 11 40 | input_images[image] { 41 | image := input.review.object.spec.jobTemplate.spec.template.spec.containers[_].image 42 | } -------------------------------------------------------------------------------- /chapter11/simple-opa-policy/yaml/gatekeeper-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sAllowedRegistries 3 | metadata: 4 | name: restrict-openunison-registries 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Pod"] 10 | - apiGroups: ["apps"] 11 | kinds: 12 | - StatefulSet 13 | - Deployment 14 | - apiGroups: ["batch"] 15 | kinds: 16 | - CronJob 17 | namespaces: ["testpolicy"] -------------------------------------------------------------------------------- /chapter12/delete_all_pods_except_gatekeeper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for each in $(kubectl get ns | tail -n +2 | awk '{print $1}' | grep -v gatekeeper-system); 4 | do 5 | echo "$each" 6 | kubectl delete pods --all -n "$each" 7 | done -------------------------------------------------------------------------------- /chapter12/deploy_gatekeeper_psp_policies.sh: -------------------------------------------------------------------------------- 1 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/allow-privilege-escalation/template.yaml 2 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/capabilities/template.yaml 3 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/forbidden-sysctls/template.yaml 4 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/host-filesystem/template.yaml 5 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/host-namespaces/template.yaml 6 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/host-network-ports/template.yaml 7 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/privileged-containers/template.yaml 8 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/proc-mount/template.yaml 9 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/read-only-root-filesystem/template.yaml 10 | kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/users/template.yaml 11 | -------------------------------------------------------------------------------- /chapter12/enforce_node_policies.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: constraints.gatekeeper.sh/v1beta1 3 | kind: K8sPSPHostFilesystem 4 | metadata: 5 | name: psp-tigera-operator-allow-host-filesystem 6 | spec: 7 | match: 8 | kinds: 9 | - apiGroups: [""] 10 | kinds: ["Pod"] 11 | namespaces: ["tigera-operator"] 12 | parameters: 13 | allowedHostPaths: 14 | - pathPrefix: "/var/lib/calico" 15 | readOnly: false 16 | --- 17 | apiVersion: constraints.gatekeeper.sh/v1beta1 18 | kind: K8sPSPCapabilities 19 | metadata: 20 | name: capabilities-ingress-nginx 21 | spec: 22 | match: 23 | kinds: 24 | - apiGroups: [""] 25 | kinds: ["Pod"] 26 | namespaces: ["ingress-nginx"] 27 | parameters: 28 | requiredDropCapabilities: ["all"] 29 | allowedCapabilities: ["NET_BIND_SERVICE"] 30 | 31 | -------------------------------------------------------------------------------- /chapter12/make_cluster_work_policies.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: constraints.gatekeeper.sh/v1beta1 3 | kind: K8sPSPHostNetworkingPorts 4 | metadata: 5 | name: psp-deny-all-host-network-ports 6 | spec: 7 | match: 8 | kinds: 9 | - apiGroups: [""] 10 | kinds: ["Pod"] 11 | excludedNamespaces: ["kube-system","calico-system","tigera-operator","ingress-nginx"] 12 | parameters: 13 | hostNetwork: false 14 | --- 15 | apiVersion: constraints.gatekeeper.sh/v1beta1 16 | kind: K8sPSPHostFilesystem 17 | metadata: 18 | name: psp-deny-host-filesystem 19 | spec: 20 | match: 21 | kinds: 22 | - apiGroups: [""] 23 | kinds: ["Pod"] 24 | excludedNamespaces: ["kube-system","calico-system","tigera-operator"] 25 | parameters: 26 | allowedHostPaths: [] 27 | --- 28 | apiVersion: constraints.gatekeeper.sh/v1beta1 29 | kind: K8sPSPCapabilities 30 | metadata: 31 | name: capabilities-drop-all 32 | spec: 33 | match: 34 | kinds: 35 | - apiGroups: [""] 36 | kinds: ["Pod"] 37 | excludedNamespaces: ["kube-system","calico-system","ingress-nginx","local-path-storage","tigera-operator"] 38 | parameters: 39 | requiredDropCapabilities: ["all"] 40 | allowedCapabilities: [] 41 | --- 42 | apiVersion: constraints.gatekeeper.sh/v1beta1 43 | kind: K8sPSPAllowPrivilegeEscalationContainer 44 | metadata: 45 | name: privilege-escalation-deny-all 46 | spec: 47 | match: 48 | kinds: 49 | - apiGroups: [""] 50 | kinds: ["Pod"] 51 | excludedNamespaces: ["kube-system","calico-system","ingress-nginx","local-path-storage","tigera-operator"] 52 | -------------------------------------------------------------------------------- /chapter12/multi-tenant/yaml/gatekeeper-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: config.gatekeeper.sh/v1alpha1 2 | kind: Config 3 | metadata: 4 | name: config 5 | namespace: "gatekeeper-system" 6 | spec: 7 | sync: 8 | syncOnly: 9 | - group: "constraints.gatekeeper.sh" 10 | version: "v1beta1" 11 | kind: "K8sPSPAllowPrivilegeEscalationContainer" 12 | - group: "constraints.gatekeeper.sh" 13 | version: "v1beta1" 14 | kind: "K8sPSPCapabilities" 15 | - group: "constraints.gatekeeper.sh" 16 | version: "v1beta1" 17 | kind: "K8sPSPForbiddenSysctls" 18 | - group: "constraints.gatekeeper.sh" 19 | version: "v1beta1" 20 | kind: "K8sPSPHostFilesystem" 21 | - group: "constraints.gatekeeper.sh" 22 | version: "v1beta1" 23 | kind: "K8sPSPHostNamespace" 24 | - group: "constraints.gatekeeper.sh" 25 | version: "v1beta1" 26 | kind: "K8sPSPHostNetworkingPorts" 27 | - group: "constraints.gatekeeper.sh" 28 | version: "v1beta1" 29 | kind: "K8sPSPPrivilegedContainer" 30 | - group: "constraints.gatekeeper.sh" 31 | version: "v1beta1" 32 | kind: "K8sPSPProcMount" 33 | - group: "constraints.gatekeeper.sh" 34 | version: "v1beta1" 35 | kind: "K8sPSPAllowedUsers" 36 | -------------------------------------------------------------------------------- /chapter12/multi-tenant/yaml/require-psp-for-namespaceconstraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sRequirePSPForNamespace 3 | metadata: 4 | name: k8srequirepspfornamespace 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Pod"] 10 | excludedNamespaces: 11 | - calico-system 12 | - gatekeeper-system 13 | - ingress-nginx 14 | - kube-node-lease 15 | - kube-public 16 | - kube-system 17 | - kubernetes-dashboard 18 | - local-path-storage 19 | - tigera-operator 20 | - activedirectory 21 | - cert-manager 22 | - docker-registry 23 | - gitlab 24 | - docker-registry -------------------------------------------------------------------------------- /chapter12/show_constraint_violations.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for constraint in $(kubectl get crds | grep 'constraints.gatekeeper.sh' | awk '{print $1}'); 4 | do 5 | echo "$constraint $(kubectl get $constraint -o json | jq -r '.items[0].status.totalViolations')" 6 | done -------------------------------------------------------------------------------- /chapter13/bin-deny.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: security.kubearmor.com/v1 2 | kind: KubeArmorPolicy 3 | metadata: 4 | name: block-write-bin 5 | namespace: demo 6 | spec: 7 | action: Block 8 | file: 9 | # /bin in the KinD image is a link to /usr/bin - KubeArmor secures the final endpoint, so we need to have /usr/bin in the policy 10 | matchDirectories: 11 | - dir: /usr/bin/ 12 | readOnly: true 13 | recursive: true 14 | - dir: /bin/ 15 | readOnly: true 16 | recursive: true 17 | message: Alert! An attempt to write to the /bin directory denied. 18 | 19 | -------------------------------------------------------------------------------- /chapter13/cluster/calico/custom-resources.yaml: -------------------------------------------------------------------------------- 1 | # This section includes base Calico installation configuration. 2 | # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation 3 | apiVersion: operator.tigera.io/v1 4 | kind: Installation 5 | metadata: 6 | name: default 7 | spec: 8 | # Configures Calico networking. 9 | calicoNetwork: 10 | # Note: The ipPools section cannot be modified post-install. 11 | ipPools: 12 | - blockSize: 26 13 | cidr: 10.240.0.0/16 14 | encapsulation: VXLANCrossSubnet 15 | natOutgoing: Enabled 16 | nodeSelector: all() 17 | 18 | --- 19 | 20 | # This section configures the Calico API server. 21 | # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer 22 | apiVersion: operator.tigera.io/v1 23 | kind: APIServer 24 | metadata: 25 | name: default 26 | spec: {} 27 | 28 | -------------------------------------------------------------------------------- /chapter13/cluster/cluster01-kind.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | runtimeConfig: 4 | "authentication.k8s.io/v1beta1": "true" 5 | networking: 6 | apiServerAddress: "0.0.0.0" 7 | disableDefaultCNI: true 8 | apiServerPort: 6443 9 | podSubnet: "10.240.0.0/16" 10 | serviceSubnet: "10.96.0.0/16" 11 | nodes: 12 | - role: control-plane 13 | extraMounts: 14 | - hostPath: /sys/kernel/security 15 | containerPath: /sys/kernel/security 16 | - role: worker 17 | extraPortMappings: 18 | - containerPort: 80 19 | hostPort: 80 20 | - containerPort: 443 21 | hostPort: 443 22 | - containerPort: 2222 23 | hostPort: 2222 24 | extraMounts: 25 | - hostPath: /sys/kernel/security 26 | containerPath: /sys/kernel/security 27 | -------------------------------------------------------------------------------- /chapter13/cluster/pvc-test/test-pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: test-claim 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Mi 11 | --- 12 | kind: Pod 13 | apiVersion: v1 14 | metadata: 15 | name: test-pvc-claim 16 | spec: 17 | containers: 18 | - name: test-pod 19 | image: busybox 20 | command: 21 | - "/bin/sh" 22 | args: 23 | - "-c" 24 | - "touch /mnt/test && exit 0 || exit 1" 25 | volumeMounts: 26 | - name: test-pvc 27 | mountPath: "/mnt" 28 | restartPolicy: "Never" 29 | volumes: 30 | - name: test-pvc 31 | persistentVolumeClaim: 32 | claimName: test-claim 33 | 34 | -------------------------------------------------------------------------------- /chapter13/nginx-secrets/nginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: nginx-secrets 5 | namespace: my-ext-secret 6 | spec: 7 | rules: 8 | - http: 9 | paths: 10 | - backend: 11 | service: 12 | name: nginx-web 13 | port: 14 | number: 80 15 | path: "/" 16 | pathType: Prefix 17 | host: secret.IPADDR.nip.io 18 | -------------------------------------------------------------------------------- /chapter13/nginx-secrets/nginx-secrets-block.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: security.kubearmor.com/v1 2 | kind: KubeArmorPolicy 3 | metadata: 4 | name: nginx-secret 5 | namespace: my-ext-secret 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: nginx-web 10 | file: 11 | matchDirectories: 12 | - dir: / 13 | recursive: true 14 | - dir: /etc/nginx/ 15 | recursive: true 16 | fromSource: 17 | - path: /usr/sbin/nginx 18 | - dir: /etc/secrets/ 19 | recursive: true 20 | fromSource: 21 | - path: /usr/sbin/nginx 22 | - dir: /etc/nginx/ 23 | recursive: true 24 | action: Block 25 | - dir: /etc/secrets/ 26 | recursive: true 27 | action: Block 28 | process: 29 | matchPaths: 30 | - path: /usr/sbin/nginx 31 | action: 32 | Allow 33 | -------------------------------------------------------------------------------- /chapter13/nginx-secrets/nginx-secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: nginx-html 6 | namespace: my-ext-secret 7 | labels: 8 | app: nginx 9 | data: 10 | default.conf: |- 11 | 12 | server { 13 | listen 80; 14 | listen [::]:80; 15 | server_name localhost; 16 | 17 | location /secrets { 18 | alias /etc/secrets/; 19 | autoindex on; 20 | default_type text/html; 21 | } 22 | 23 | 24 | location / { 25 | root /usr/share/nginx/html; 26 | index index.html index.htm; 27 | } 28 | 29 | error_page 500 502 503 504 /50x.html; 30 | location = /50x.html { 31 | root /usr/share/nginx/html; 32 | } 33 | } 34 | --- 35 | apiVersion: v1 36 | kind: Pod 37 | metadata: 38 | labels: 39 | app: nginx-web 40 | name: nginx-secrets 41 | namespace: my-ext-secret 42 | annotations: 43 | #container.apparmor.security.beta.kubernetes.io/vault-agent: "unconfined" 44 | container.apparmor.security.beta.kubernetes.io/vault-agent-init: "unconfined" 45 | vault.hashicorp.com/service: "https://vault.apps.IPADDR.nip.io" 46 | vault.hashicorp.com/agent-inject: "true" 47 | vault.hashicorp.com/log-level: trace 48 | vault.hashicorp.com/role: extsecret 49 | vault.hashicorp.com/tls-skip-verify: "true" 50 | vault.hashicorp.com/agent-inject-secret-myenv: 'secret/data/extsecret/config' 51 | vault.hashicorp.com/secret-volume-path-myenv: '/etc/secrets' 52 | spec: 53 | containers: 54 | - image: nginx 55 | name: nginx-web 56 | volumeMounts: 57 | - mountPath: /etc/nginx/conf.d 58 | name: sources 59 | dnsPolicy: ClusterFirst 60 | restartPolicy: Never 61 | serviceAccountName: ext-secret-vault 62 | serviceAccount: ext-secret-vault 63 | volumes: 64 | - name: sources 65 | configMap: 66 | name: nginx-html 67 | defaultMode: 0777 68 | --- 69 | apiVersion: v1 70 | kind: Service 71 | metadata: 72 | creationTimestamp: null 73 | labels: 74 | app: nginx 75 | name: nginx-web 76 | namespace: my-ext-secret 77 | spec: 78 | ports: 79 | - port: 80 80 | protocol: TCP 81 | targetPort: 80 82 | name: http-service 83 | selector: 84 | app: nginx-web 85 | 86 | -------------------------------------------------------------------------------- /chapter13/nginx-secrets/redeploy-nginx-vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 5 5 | echo -e "\n \n*******************************************************************************************************************" 6 | echo -e "Deleting NGINX pod and namespace" 7 | echo -e "*******************************************************************************************************************" 8 | tput setaf 3 9 | kubectl delete pods nginx-secrets -n my-ext-secret 10 | kubectl delete ns my-ext-secret 11 | tput setaf 7 12 | echo -e "\nExecuting script to re-create namespace and NGINX pod" 13 | sleep 5 14 | 15 | tput setaf 5 16 | echo -e "\n \n*******************************************************************************************************************" 17 | echo -e "Redeploying new NGINX pod" 18 | echo -e "*******************************************************************************************************************" 19 | tput setaf 3 20 | ./create-nginx-vault.sh 21 | 22 | tput setaf 5 23 | echo -e "*******************************************************************************************************************" 24 | echo -e "Creating Kubearmor policy to secure Vault secret in pod" 25 | echo -e "*******************************************************************************************************************" 26 | tput setaf 3 27 | kubectl apply -f nginx-secrets-block.yaml 28 | 29 | tput setaf 7 30 | echo -e "\n\n*******************************************************************************************************************" 31 | echo -e "Done deploying new NGINX pod and Kubearmor policy to protect the Vault secret" 32 | echo -e "*******************************************************************************************************************\n\n" 33 | tput setaf 2 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /chapter13/nginx-secrets/volume-secrets-watch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: watch-volume 6 | name: test-volumes-secrets-watch 7 | namespace: my-ext-secret 8 | spec: 9 | containers: 10 | - image: ubuntu:22.04 11 | name: test 12 | resources: {} 13 | command: 14 | - bash 15 | - -c 16 | - 'while [[ 1 == 1 ]]; do date && cat /etc/secrets/somepassword && echo "" && echo "----------" && sleep 1; done' 17 | volumeMounts: 18 | - name: mypassword 19 | mountPath: /etc/secrets 20 | volumes: 21 | - name: mypassword 22 | secret: 23 | secretName: secret-to-be-created 24 | dnsPolicy: ClusterFirst 25 | restartPolicy: Never -------------------------------------------------------------------------------- /chapter13/nginx-secrets/volume-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: test-volume 6 | name: test-volume-secrets 7 | namespace: my-ext-secret 8 | spec: 9 | containers: 10 | - image: busybox 11 | name: test 12 | resources: {} 13 | command: 14 | - sh 15 | - -c 16 | - 'cat /etc/secrets/somepassword' 17 | volumeMounts: 18 | - name: mypassword 19 | mountPath: /etc/secrets 20 | volumes: 21 | - name: mypassword 22 | secret: 23 | secretName: secret-to-be-created 24 | dnsPolicy: ClusterFirst 25 | restartPolicy: Never -------------------------------------------------------------------------------- /chapter13/nginx-secrets/volume-vault-watch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: watch-vault-volume 6 | name: test-vault-vault-watch 7 | namespace: my-ext-secret 8 | annotations: 9 | vault.hashicorp.com/service: "https://vault.apps.IPADDR.nip.io" 10 | vault.hashicorp.com/agent-inject: "true" 11 | vault.hashicorp.com/log-level: trace 12 | vault.hashicorp.com/role: extsecret 13 | vault.hashicorp.com/tls-skip-verify: "true" 14 | vault.hashicorp.com/agent-inject-secret-myenv: 'secret/data/extsecret/config' 15 | vault.hashicorp.com/secret-volume-path-myenv: '/etc/secrets' 16 | vault.hashicorp.com/agent-inject-template-myenv: | 17 | {{- with secret "secret/data/extsecret/config" -}} 18 | MY_SECRET_PASSWORD="{{ index .Data "some-password" }}" 19 | {{- end }} 20 | spec: 21 | containers: 22 | - image: ubuntu:22.04 23 | name: test 24 | resources: {} 25 | command: 26 | - bash 27 | - -c 28 | - 'while [[ 1 == 1 ]]; do date && cat /etc/secrets/myenv && echo "" && echo "----------" && sleep 1; done' 29 | dnsPolicy: ClusterFirst 30 | restartPolicy: Never 31 | serviceAccountName: ext-secret-vault 32 | serviceAccount: ext-secret-vault -------------------------------------------------------------------------------- /chapter13/nginx/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: nginx-ingress 5 | spec: 6 | rules: 7 | - host: "webserver.$hostip.nip.io" 8 | http: 9 | paths: 10 | - path: / 11 | pathType: Prefix 12 | backend: 13 | service: 14 | name: nginx-web 15 | port: 16 | number: 8080 17 | -------------------------------------------------------------------------------- /chapter13/nginx/ngnix-ingress-remove.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | # Create a simple NGINX deployment using kubectl and name it nginx-web 5 | tput setaf 5 6 | echo -e "\n*******************************************************************************************************************" 7 | echo -e "Cleaning up the NGINX resources" 8 | echo -e "*******************************************************************************************************************" 9 | tput setaf 2 10 | kubectl delete deployment nginx-web -n demo 11 | kubectl delete svc nginx-web -n demo 12 | kubectl delete ingress nginx-ingress -n demo 13 | 14 | tput setaf 7 15 | echo -e "\n \n*******************************************************************************************************************" 16 | echo -e "Done. The ingress example has been removed from the cluster." 17 | echo -e "******************************************************************************************************************* \n\n" 18 | tput setaf 2 19 | 20 | -------------------------------------------------------------------------------- /chapter13/patch-relay.yaml: -------------------------------------------------------------------------------- 1 | spec: 2 | template: 3 | spec: 4 | containers: 5 | - name: kubearmor-relay-server 6 | env: 7 | - name: ENABLE_STDOUT_LOGS 8 | value: "true" 9 | - name: ENABLE_STDOUT_ALERTS 10 | value: "true" 11 | 12 | -------------------------------------------------------------------------------- /chapter13/vault/api-server-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: api-server-ingress 5 | namespace: default 6 | annotations: 7 | kubernetes.io/ingress.class: "nginx" 8 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 9 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 10 | cert-manager.io/cluster-issuer: "enterprise-ca" 11 | spec: 12 | rules: 13 | - http: 14 | paths: 15 | - backend: 16 | service: 17 | name: kubernetes 18 | port: 19 | number: 443 20 | path: "/" 21 | pathType: Prefix 22 | host: kube-api.IPADDR.nip.io 23 | tls: 24 | - hosts: 25 | - kube-api.IPADDR.nip.io 26 | secretName: api-web-tls -------------------------------------------------------------------------------- /chapter13/vault/install_vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if which vault > /dev/null; then 4 | echo "vault already installed" 5 | else 6 | echo "install the vault binary" 7 | sudo apt update && sudo apt install gpg 8 | wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg 9 | gpg --no-default-keyring --keyring /usr/share/keyrings/hashicorp-archive-keyring.gpg --fingerprint 10 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list 11 | sudo apt update 12 | sudo apt install vault 13 | fi -------------------------------------------------------------------------------- /chapter13/vault/unseal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | KEYS=$(jq -r '@sh "\(.unseal_keys_hex)\n"'< $1) 5 | echo $KEYS 6 | 7 | for KEY in $KEYS 8 | do 9 | echo $KEY 10 | KEY2=$(echo -n $KEY | cut -d "'" -f 2) 11 | kubectl exec -i vault-0 -n vault - -- vault operator unseal $KEY2 12 | done 13 | -------------------------------------------------------------------------------- /chapter13/vault/vault-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: vault-ingress 5 | namespace: vault 6 | annotations: 7 | kubernetes.io/ingress.class: "nginx" 8 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 9 | nginx.ingress.kubernetes.io/backend-protocol: "HTTP" 10 | cert-manager.io/cluster-issuer: "enterprise-ca" 11 | spec: 12 | rules: 13 | - http: 14 | paths: 15 | - backend: 16 | service: 17 | name: vault-ui 18 | port: 19 | number: 8200 20 | path: "/" 21 | pathType: Prefix 22 | host: vault.apps.IPADDR.nip.io 23 | tls: 24 | - hosts: 25 | - vault.apps.IPADDR.nip.io 26 | secretName: vault-web-tls -------------------------------------------------------------------------------- /chapter13/vault/vault_cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl get secret root-ca -n cert-manager -o json | jq -r '.data["tls.crt"]' | base64 -d > /tmp/root-ca.crt 4 | 5 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 6 | 7 | export VAULT_ADDR="https://vault.apps.$hostip.nip.io/" 8 | export VAULT_CACERT="/tmp/root-ca.crt" 9 | export VAULT_TOKEN=$(jq -r '.root_token' < ~/unseal-keys.json) -------------------------------------------------------------------------------- /chapter13/vault/vault_integrate_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 5 | 6 | kubectl create ns vault-integration 7 | kubectl create sa vault-client -n vault-integration 8 | 9 | kubectl create -f - < /tmp/root-ca.crt 4 | 5 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 6 | 7 | export VAULT_ADDR="https://vault.apps.$hostip.nip.io/" 8 | export VAULT_CACERT="/tmp/root-ca.crt" 9 | export VAULT_TOKEN=$(jq -r '.root_token' < ~/unseal-keys.json) -------------------------------------------------------------------------------- /chapter14/create-backup-objects.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 6 5 | echo -e "\n \n*******************************************************************************************************************" 6 | echo -e "Creating objects for Velero tests" 7 | echo -e "*******************************************************************************************************************" 8 | 9 | tput setaf 6 10 | echo -e "\n \n*******************************************************************************************************************" 11 | echo -e "Creating Demo Namespaces" 12 | echo -e "*******************************************************************************************************************" 13 | tput setaf 2 14 | kubectl create ns demo1 15 | kubectl create ns demo2 16 | kubectl create ns demo3 17 | kubectl create ns demo4 18 | 19 | tput setaf 6 20 | echo -e "\n \n*******************************************************************************************************************" 21 | echo -e "Creating Objects in each namespace" 22 | echo -e "*******************************************************************************************************************" 23 | tput setaf 2 24 | kubectl run nginx --image=bitnami/nginx -n demo1 25 | kubectl run nginx --image=bitnami/nginx -n demo2 26 | kubectl run nginx --image=bitnami/nginx -n demo3 27 | kubectl run nginx --image=bitnami/nginx -n demo4 28 | 29 | echo -e "\n" 30 | tput setaf 9 31 | -------------------------------------------------------------------------------- /chapter14/create-minio-ingress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 6 5 | echo -e "\n \n*******************************************************************************************************************" 6 | echo -e "Deploying the Ingress Rule for the Velero Dashboard" 7 | echo -e "*******************************************************************************************************************" 8 | 9 | tput setaf 6 10 | echo -e "\n \n*******************************************************************************************************************" 11 | echo -e "Creating Ingress rule for the Velero Dashboard" 12 | echo -e "*******************************************************************************************************************" 13 | tput setaf 2 14 | export hostip=$(hostname -I | cut -f1 -d' ') 15 | envsubst < minio-ingress.yaml | kubectl apply -f - --namespace velero 16 | 17 | tput setaf 5 18 | echo -e "\n\nThe Minio dashboard ingress rule has been ceated, you can open the UI using http://minio-console.$hostip.nip.io/" 19 | 20 | echo -e "\n\n" 21 | tput setaf 9 22 | -------------------------------------------------------------------------------- /chapter14/credentials-velero: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id = packt 3 | aws_secret_access_key = packt123 4 | 5 | -------------------------------------------------------------------------------- /chapter14/etcd/install-etcd-tools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | clear 4 | tput setaf 5 5 | echo -e "\n*******************************************************************************************************************" 6 | echo -e "Downloading etcdctl and extracting contents" 7 | echo -e "*******************************************************************************************************************" 8 | tput setaf 2 9 | wget https://github.com/etcd-io/etcd/releases/download/v3.5.10/etcd-v3.5.10-linux-amd64.tar.gz 10 | tar xvf etcd-v3.5.10-linux-amd64.tar.gz 11 | 12 | tput setaf 5 13 | echo -e "\n*******************************************************************************************************************" 14 | echo -e "Moving the KinD Binary to /usr/bin" 15 | echo -e "*******************************************************************************************************************" 16 | tput setaf 2 17 | sudo cp etcd-v3.5.10-linux-amd64/etcdctl /usr/bin 18 | 19 | tput setaf 5 20 | echo -e "\n*******************************************************************************************************************" 21 | echo -e "Removing archive and extra files" 22 | echo -e "*******************************************************************************************************************" 23 | tput setaf 2 24 | rm etcd-v3.5.10-linux-amd64.tar.gz 25 | rm -rf etcd-v3.5.10-linux-amd64 26 | 27 | tput setaf 5 28 | echo -e "\n*******************************************************************************************************************" 29 | echo -e "Copying the etcd certificates from the control plane into the ./certs directory" 30 | echo -e "*******************************************************************************************************************" 31 | tput setaf 2 32 | 33 | docker cp cluster01-control-plane:/etc/kubernetes/pki/etcd ./certs 34 | 35 | tput setaf 3 36 | echo -e "\n*******************************************************************************************************************" 37 | echo -e "etcdctl download complete" 38 | echo -e "*******************************************************************************************************************" 39 | tput setaf 2 40 | 41 | -------------------------------------------------------------------------------- /chapter14/install-velero-binary.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | clear 4 | tput setaf 5 5 | echo -e "\n*******************************************************************************************************************" 6 | echo -e "Downloading the Velero Binary" 7 | echo -e "*******************************************************************************************************************" 8 | tput setaf 2 9 | wget https://github.com/vmware-tanzu/velero/releases/download/v1.12.1/velero-v1.12.1-linux-amd64.tar.gz 10 | 11 | tput setaf 5 12 | echo -e "\n*******************************************************************************************************************" 13 | echo -e "Extracting archive" 14 | echo -e "*******************************************************************************************************************" 15 | tput setaf 2 16 | tar xvf velero-v1.12.1-linux-amd64.tar.gz 17 | 18 | tput setaf 5 19 | echo -e "\n*******************************************************************************************************************" 20 | echo -e "Moving Velero binary to /usr/bin" 21 | echo -e "*******************************************************************************************************************" 22 | tput setaf 2 23 | sudo mv velero-v1.12.1-linux-amd64/velero /usr/bin 24 | 25 | tput setaf 5 26 | echo -e "\n*******************************************************************************************************************" 27 | echo -e "Removing extra files" 28 | echo -e "*******************************************************************************************************************" 29 | tput setaf 2 30 | rm -rf velero-v1.12.1-linux-amd64 31 | 32 | tput setaf 3 33 | echo -e "\n*******************************************************************************************************************" 34 | echo -e "Velero binary install complete, you should see the version below" 35 | echo -e "*******************************************************************************************************************" 36 | tput setaf 2 37 | velero version 38 | 39 | echo -e "\n\n" 40 | 41 | -------------------------------------------------------------------------------- /chapter14/minio-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: minio-ingress 5 | namespace: velero 6 | spec: 7 | rules: 8 | - host: "minio-console.$hostip.nip.io" 9 | http: 10 | paths: 11 | - path: / 12 | pathType: Prefix 13 | backend: 14 | service: 15 | name: console 16 | port: 17 | number: 9001 18 | - host: "minio.$hostip.nip.io" 19 | http: 20 | paths: 21 | - path: / 22 | pathType: Prefix 23 | backend: 24 | service: 25 | name: minio 26 | port: 27 | number: 9000 28 | 29 | -------------------------------------------------------------------------------- /chapter14/pvc-example/busybox-pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: Namespace 2 | apiVersion: v1 3 | metadata: 4 | name: demo 5 | --- 6 | kind: PersistentVolumeClaim 7 | apiVersion: v1 8 | metadata: 9 | name: test-claim 10 | namespace: demo 11 | annotations: 12 | volumeType: local 13 | spec: 14 | accessModes: 15 | - ReadWriteOnce 16 | resources: 17 | requests: 18 | storage: 1Gi 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: busybox-pvc 24 | namespace: demo 25 | spec: 26 | selector: 27 | matchLabels: 28 | app: busybox-pvc 29 | template: 30 | metadata: 31 | labels: 32 | app: busybox-pvc 33 | spec: 34 | containers: 35 | - name: busybox-pvc 36 | image: busybox 37 | command: 38 | - "/bin/sh" 39 | args: 40 | - "-c" 41 | - "touch /mnt/original-data && sleep 20000" 42 | volumeMounts: 43 | - name: test-pvc 44 | mountPath: "/mnt" 45 | volumes: 46 | - name: test-pvc 47 | persistentVolumeClaim: 48 | claimName: test-claim 49 | 50 | -------------------------------------------------------------------------------- /chapter14/velero-cluster/calico/custom-resources.yaml: -------------------------------------------------------------------------------- 1 | # This section includes base Calico installation configuration. 2 | # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation 3 | apiVersion: operator.tigera.io/v1 4 | kind: Installation 5 | metadata: 6 | name: default 7 | spec: 8 | # Configures Calico networking. 9 | calicoNetwork: 10 | # Note: The ipPools section cannot be modified post-install. 11 | ipPools: 12 | - blockSize: 26 13 | cidr: 10.25.0.0/16 14 | encapsulation: VXLANCrossSubnet 15 | natOutgoing: Enabled 16 | nodeSelector: all() 17 | 18 | --- 19 | 20 | # This section configures the Calico API server. 21 | # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer 22 | apiVersion: operator.tigera.io/v1 23 | kind: APIServer 24 | metadata: 25 | name: default 26 | spec: {} 27 | 28 | -------------------------------------------------------------------------------- /chapter14/velero-cluster/velero-cluster.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | runtimeConfig: 4 | "authentication.k8s.io/v1beta1": "true" 5 | "admissionregistration.k8s.io/v1beta1": true 6 | featureGates: 7 | "ValidatingAdmissionPolicy": true 8 | networking: 9 | apiServerAddress: "0.0.0.0" 10 | disableDefaultCNI: true 11 | apiServerPort: 6444 12 | podSubnet: "10.25.0.0/16" 13 | serviceSubnet: "10.97.0.0/16" 14 | -------------------------------------------------------------------------------- /chapter15/alertmanager-webhook/critical-alerts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1alpha1 2 | kind: AlertmanagerConfig 3 | metadata: 4 | name: critical-alerts 5 | namespace: kube-system 6 | labels: 7 | alertmanagerConfig: critical 8 | spec: 9 | receivers: 10 | - name: nginx-webhook 11 | webhookConfigs: 12 | - sendResolved: true 13 | url: http://nginx-alerts.alert-manager-webhook.svc/webhook 14 | route: 15 | repeatInterval: 30s 16 | receiver: 'nginx-webhook' 17 | matchers: 18 | - name: severity 19 | matchType: "=" 20 | value: critical 21 | groupBy: ['namespace'] 22 | groupWait: 30s 23 | groupInterval: 5m -------------------------------------------------------------------------------- /chapter15/opensearch/fluentbit.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | outputs: | 3 | [OUTPUT] 4 | Name opensearch 5 | Match kube.* 6 | Host my-cluster.opensearch-cp.svc 7 | HTTP_User logstash 8 | HTTP_Passwd logstash 9 | tls On 10 | tls.verify Off 11 | Logstash_Format On 12 | Retry_Limit False 13 | Suppress_Type_Name On 14 | 15 | [OUTPUT] 16 | Name opensearch 17 | Type opensearch 18 | Match host.* 19 | Host my-cluster.opensearch-cp.svc 20 | HTTP_User logstash 21 | HTTP_Passwd logstash 22 | tls On 23 | tls.verify Off 24 | Logstash_Format On 25 | Logstash_Prefix node 26 | Retry_Limit False 27 | Suppress_Type_Name On -------------------------------------------------------------------------------- /chapter15/simple/alertmanager-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: alertmanager 5 | namespace: monitoring 6 | annotations: 7 | kubernetes.io/ingress.class: "nginx" 8 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 9 | nginx.ingress.kubernetes.io/backend-protocol: "HTTP" 10 | spec: 11 | rules: 12 | - http: 13 | paths: 14 | - backend: 15 | service: 16 | name: prometheus-kube-prometheus-alertmanager 17 | port: 18 | number: 9093 19 | path: "/" 20 | pathType: Prefix 21 | host: alertmanager.apps.IPADDR.nip.io 22 | tls: 23 | - hosts: 24 | - alertmanager.apps.IPADDR.nip.io 25 | secretName: alertmanager-web-tls -------------------------------------------------------------------------------- /chapter15/simple/grafana-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | annotations: 7 | kubernetes.io/ingress.class: "nginx" 8 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 9 | nginx.ingress.kubernetes.io/backend-protocol: "HTTP" 10 | spec: 11 | rules: 12 | - http: 13 | paths: 14 | - backend: 15 | service: 16 | name: prometheus-grafana 17 | port: 18 | number: 80 19 | path: "/" 20 | pathType: Prefix 21 | host: grafana.apps.IPADDR.nip.io 22 | tls: 23 | - hosts: 24 | - grafana.apps.IPADDR.nip.io 25 | secretName: grafana-web-tls -------------------------------------------------------------------------------- /chapter15/simple/prometheus-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: prometheus 5 | namespace: monitoring 6 | annotations: 7 | kubernetes.io/ingress.class: "nginx" 8 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 9 | nginx.ingress.kubernetes.io/backend-protocol: "HTTP" 10 | spec: 11 | rules: 12 | - http: 13 | paths: 14 | - backend: 15 | service: 16 | name: prometheus-kube-prometheus-prometheus 17 | port: 18 | number: 9090 19 | path: "/" 20 | pathType: Prefix 21 | host: prometheus.apps.IPADDR.nip.io 22 | tls: 23 | - hosts: 24 | - prometheus.apps.IPADDR.nip.io 25 | secretName: prometheus-web-tls -------------------------------------------------------------------------------- /chapter15/user-auth/grafana-result-group.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: openunison.tremolo.io/v1 2 | kind: ResultGroup 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: openunison-resultgroups 6 | app.kubernetes.io/instance: openunison-orchestra-login-portal 7 | app.kubernetes.io/name: openunison 8 | app.kubernetes.io/part-of: openunison 9 | name: grafana 10 | namespace: openunison 11 | spec: 12 | - resultType: header 13 | source: static 14 | value: X-WEBAUTH-GROUPS=Admin 15 | - resultType: header 16 | source: user 17 | value: X-WEBAUTH-USER=uid -------------------------------------------------------------------------------- /chapter15/user-auth/kubernetes-dashboard-values.yaml: -------------------------------------------------------------------------------- 1 | nginx: 2 | enabled: false 3 | 4 | kong: 5 | enabled: false 6 | 7 | api: 8 | scaling: 9 | replicas: 1 10 | containers: 11 | ports: 12 | - name: api-tls 13 | containerPort: 8001 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp 17 | name: tmp-volume 18 | - mountPath: /certs 19 | name: tls 20 | volumes: 21 | - name: tmp-volume 22 | emptyDir: {} 23 | - name: tls 24 | secret: 25 | secretName: kubernetes-dashboard-certs 26 | optional: true 27 | 28 | web: 29 | scaling: 30 | replicas: 1 31 | containers: 32 | ports: 33 | - name: api-tls 34 | containerPort: 8001 35 | protocol: TCP 36 | volumeMounts: 37 | - mountPath: /tmp 38 | name: tmp-volume 39 | - mountPath: /certs 40 | name: tls 41 | volumes: 42 | - name: tmp-volume 43 | emptyDir: {} 44 | - name: tls 45 | secret: 46 | secretName: kubernetes-dashboard-certs 47 | optional: true 48 | 49 | auth: 50 | scaling: 51 | replicas: 0 52 | volumeMounts: 53 | - mountPath: /tmp 54 | name: tmp-volume 55 | - mountPath: /certs 56 | name: tls 57 | volumes: 58 | - name: tmp-volume 59 | emptyDir: {} 60 | - name: tls 61 | secret: 62 | secretName: kubernetes-dashboard-certs 63 | optional: false -------------------------------------------------------------------------------- /chapter15/user-auth/prom-openunison-values.yaml: -------------------------------------------------------------------------------- 1 | grafana: 2 | grafana.ini: 3 | users: 4 | allow_sign_up: false 5 | auto_assign_org: true 6 | auto_assign_org_role: Admin 7 | 8 | 9 | auth.proxy: 10 | enabled: true 11 | header_name: X-WEBAUTH-USER 12 | auto_sign_up: true 13 | headers: "Groups:X-WEBAUTH-GROUPS" -------------------------------------------------------------------------------- /chapter16/add-ons/deploy-add-ons.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 6 5 | echo -e "\n \n*******************************************************************************************************************" 6 | echo -e "Deploying Stateful Istio Add-Ons: Prometheus, Grafana and Jeager" 7 | echo -e "*******************************************************************************************************************" 8 | tput setaf 2 9 | kubectl apply -f prometheus-deployment.yaml -n istio-system 10 | kubectl apply -f grafana-deployment.yaml -n istio-system 11 | kubectl apply -f jaeger-deployment.yaml -n istio-system 12 | 13 | tput setaf 6 14 | echo -e "\n \n*******************************************************************************************************************" 15 | echo -e "Getting the Host IP address to create the nip.ip name and deploying new Gateway for Add-Ons" 16 | echo -e "*******************************************************************************************************************" 17 | tput setaf 2 18 | export hostip=$(hostname -I | cut -f1 -d' ') 19 | envsubst < gw.yaml | kubectl apply -f - 20 | 21 | tput setaf 6 22 | echo -e "\n \n*******************************************************************************************************************" 23 | echo -e "Creating VirtualServices for Each Add-On" 24 | echo -e "*******************************************************************************************************************" 25 | tput setaf 2 26 | envsubst < grafana-vs.yaml | kubectl apply -f - 27 | envsubst < prometheus-vs.yaml | kubectl apply -f - 28 | envsubst < jaeger-vs.yaml | kubectl apply -f - 29 | 30 | tput setaf 5 31 | echo -e "\n\nThe Istio objects have been created for Grafana, Prometheus and Jaeger, you can open the UI using the following URL's\n" 32 | tput setaf 7 33 | echo -e "Grafana : http://grafana.$hostip.nip.io/" 34 | echo -e "Prometheus : http://prom.$hostip.nip.io/" 35 | echo -e "Jaeger : http://jaeger.$hostip.nip.io/" 36 | 37 | echo -e "\n\n" 38 | tput setaf 9 39 | -------------------------------------------------------------------------------- /chapter16/add-ons/grafana-vs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1beta1 2 | kind: VirtualService 3 | metadata: 4 | name: grafana-vs 5 | namespace: istio-system 6 | spec: 7 | hosts: 8 | - grafana.$hostip.nip.io 9 | gateways: 10 | - grafana-gateway 11 | http: 12 | - route: 13 | - destination: 14 | port: 15 | number: 3000 16 | host: grafana 17 | 18 | -------------------------------------------------------------------------------- /chapter16/add-ons/gw.yaml: -------------------------------------------------------------------------------- 1 | kind: Gateway 2 | apiVersion: networking.istio.io/v1alpha3 3 | metadata: 4 | name: grafana-gateway 5 | namespace: istio-system 6 | spec: 7 | servers: 8 | - hosts: 9 | - grafana.$hostip.nip.io 10 | - prom.$hostip.nip.io 11 | - jaeger.$hostip.nip.io 12 | port: 13 | name: http 14 | number: 80 15 | protocol: HTTP 16 | selector: 17 | istio: ingressgateway 18 | -------------------------------------------------------------------------------- /chapter16/add-ons/jaeger-vs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1beta1 2 | kind: VirtualService 3 | metadata: 4 | name: jaeger-vs 5 | namespace: istio-system 6 | spec: 7 | hosts: 8 | - jaeger.$hostip.nip.io 9 | gateways: 10 | - grafana-gateway 11 | http: 12 | - route: 13 | - destination: 14 | port: 15 | number: 80 16 | host: tracing 17 | 18 | -------------------------------------------------------------------------------- /chapter16/add-ons/prometheus-vs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1beta1 2 | kind: VirtualService 3 | metadata: 4 | name: prometheus-vs 5 | namespace: istio-system 6 | spec: 7 | hosts: 8 | - prom.$hostip.nip.io 9 | gateways: 10 | - grafana-gateway 11 | http: 12 | - route: 13 | - destination: 14 | port: 15 | number: 9090 16 | host: prometheus 17 | 18 | -------------------------------------------------------------------------------- /chapter16/example-app/gw.yaml: -------------------------------------------------------------------------------- 1 | kind: Gateway 2 | apiVersion: networking.istio.io/v1alpha3 3 | metadata: 4 | name: frontend-gateway 5 | namespace: demo 6 | spec: 7 | servers: 8 | - hosts: 9 | - demo.$hostip.nip.io 10 | port: 11 | name: http 12 | number: 80 13 | protocol: HTTP 14 | selector: 15 | istio: ingressgateway 16 | -------------------------------------------------------------------------------- /chapter16/example-app/vs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1beta1 2 | kind: VirtualService 3 | metadata: 4 | name: frontend-vs 5 | namespace: demo 6 | spec: 7 | hosts: 8 | - demo.$hostip.nip.io 9 | gateways: 10 | - frontend-gateway 11 | http: 12 | - route: 13 | - destination: 14 | port: 15 | number: 80 16 | host: frontend 17 | 18 | -------------------------------------------------------------------------------- /chapter16/expose_istio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 6 5 | echo -e "\n \n*******************************************************************************************************************" 6 | echo -e "Deleting ingress-nginx namespace" 7 | echo -e "*******************************************************************************************************************" 8 | tput setaf 2 9 | kubectl delete -f ../chapter2/nginx-ingress/nginx-deploy.yaml 10 | 11 | tput setaf 6 12 | echo -e "\n \n*******************************************************************************************************************" 13 | echo -e "Patching istio-ingressgateway" 14 | echo -e "*******************************************************************************************************************" 15 | tput setaf 2 16 | kubectl patch deployments istio-ingressgateway -n istio-system -p '{"spec":{"template":{"spec":{"containers":[{"name":"istio-proxy","ports":[{"containerPort":15021,"protocol":"TCP"},{"containerPort":8080,"hostPort":80,"protocol":"TCP"},{"containerPort":8443,"hostPort":443,"protocol":"TCP"},{"containerPort":31400,"protocol":"TCP"},{"containerPort":15443,"protocol":"TCP"},{"containerPort":15090,"name":"http-envoy-prom","protocol":"TCP"}]}]}}}}' 17 | 18 | tput setaf 5 19 | echo -e "\n\nIstio ingress-gateway has replaced NGINX as the Ingress for KinD\n\n" 20 | -------------------------------------------------------------------------------- /chapter16/kiali/gw.yaml: -------------------------------------------------------------------------------- 1 | kind: Gateway 2 | apiVersion: networking.istio.io/v1alpha3 3 | metadata: 4 | name: kiali-gateway 5 | namespace: istio-system 6 | spec: 7 | servers: 8 | - hosts: 9 | - kiali.$hostip.nip.io 10 | port: 11 | name: http 12 | number: 80 13 | protocol: HTTP 14 | selector: 15 | istio: ingressgateway 16 | -------------------------------------------------------------------------------- /chapter16/kiali/vs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1beta1 2 | kind: VirtualService 3 | metadata: 4 | name: kiali-vs 5 | namespace: istio-system 6 | spec: 7 | hosts: 8 | - kiali.$hostip.nip.io 9 | gateways: 10 | - kiali-gateway 11 | http: 12 | - route: 13 | - destination: 14 | port: 15 | number: 20001 16 | host: kiali 17 | 18 | -------------------------------------------------------------------------------- /chapter16/testapp/create-gw-vs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | tput setaf 6 4 | echo -e "\n \n*******************************************************************************************************************" 5 | echo -e "Deploying the Gateway and VirtualService for the test application" 6 | echo -e "*******************************************************************************************************************" 7 | 8 | tput setaf 6 9 | echo -e "\n \n*******************************************************************************************************************" 10 | echo -e "Getting the Host IP address to create the nip.ip name" 11 | echo -e "*******************************************************************************************************************" 12 | export hostip=$(hostname -I | cut -f1 -d' ') 13 | 14 | tput setaf 6 15 | echo -e "\n \n*******************************************************************************************************************" 16 | echo -e "Creating Gateway for the testapp" 17 | echo -e "*******************************************************************************************************************" 18 | envsubst < gw.yaml | kubectl apply -f - --namespace testapp 19 | 20 | tput setaf 6 21 | echo -e "\n \n*******************************************************************************************************************" 22 | echo -e "Creating VirtualService" 23 | echo -e "*******************************************************************************************************************" 24 | envsubst < vs.yaml | kubectl apply -f - --namespace testapp 25 | 26 | tput setaf 5 27 | echo -e "\n \nIt may take 3-5 minutes for the application pods to become ready" 28 | echo -e "\n \nOnce all pods are running, the Boutique application can be accessed using using http://testapp.$hostip.nip.io/" 29 | 30 | echo -e "\n\n" 31 | tput setaf 9 32 | -------------------------------------------------------------------------------- /chapter16/testapp/example1-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: security.istio.io/v1beta1 2 | kind: AuthorizationPolicy 3 | metadata: 4 | name: testapp-policy-deny 5 | namespace: testapp 6 | spec: 7 | {} 8 | 9 | -------------------------------------------------------------------------------- /chapter16/testapp/example2-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: security.istio.io/v1beta1 2 | kind: AuthorizationPolicy 3 | metadata: 4 | name: testapp-get-allow 5 | namespace: testapp 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: nginx-web 10 | action: ALLOW 11 | rules: 12 | - to: 13 | - operation: 14 | methods: ["GET"] 15 | 16 | -------------------------------------------------------------------------------- /chapter16/testapp/gw.yaml: -------------------------------------------------------------------------------- 1 | kind: Gateway 2 | apiVersion: networking.istio.io/v1alpha3 3 | metadata: 4 | name: frontend-gateway 5 | namespace: testapp 6 | spec: 7 | servers: 8 | - hosts: 9 | - testapp.$hostip.nip.io 10 | port: 11 | name: http 12 | number: 8080 13 | protocol: HTTP 14 | selector: 15 | istio: ingressgateway 16 | -------------------------------------------------------------------------------- /chapter16/testapp/remove-testapp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | # Create a simple NGINX deployment using kubectl and name it nginx-web 5 | tput setaf 5 6 | echo -e "\n*******************************************************************************************************************" 7 | echo -e "Cleaning up the NGINX resources" 8 | echo -e "*******************************************************************************************************************" 9 | tput setaf 2 10 | kubectl delete ns testapp 11 | 12 | tput setaf 7 13 | echo -e "\n \n*******************************************************************************************************************" 14 | echo -e "Done. The ingress example has been removed from the cluster." 15 | echo -e "******************************************************************************************************************* \n\n" 16 | tput setaf 2 17 | 18 | -------------------------------------------------------------------------------- /chapter16/testapp/vs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1beta1 2 | kind: VirtualService 3 | metadata: 4 | name: testapp-vs 5 | namespace: testapp 6 | spec: 7 | hosts: 8 | - testapp.$hostip.nip.io 9 | gateways: 10 | - frontend-gateway 11 | http: 12 | - route: 13 | - destination: 14 | port: 15 | number: 8080 16 | host: nginx-web 17 | 18 | -------------------------------------------------------------------------------- /chapter17/authentication/deploy-auth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl apply -f - < /tmp/service-auth.yaml 39 | 40 | kubectl create -f /tmp/service-auth.yaml -------------------------------------------------------------------------------- /chapter17/authentication/patch-istiod.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "op": "add", 4 | "path":"/spec/template/spec/volumes/-", 5 | "value": { 6 | "name": "extracerts", 7 | "configMap": { 8 | "name": "jwks-certs", 9 | "defaultMode": 420, 10 | "optional": true 11 | } 12 | } 13 | }, 14 | { 15 | "op": "add", 16 | "path": "/spec/template/spec/containers/0/volumeMounts/-", 17 | "value": { 18 | "name": "extracerts", 19 | "readOnly": true, 20 | "mountPath": "/cacerts" 21 | } 22 | } 23 | ] -------------------------------------------------------------------------------- /chapter17/authentication/service-auth.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: security.istio.io/v1 3 | kind: RequestAuthentication 4 | metadata: 5 | name: hello-world-auth 6 | namespace: istio-hello-world 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: run-service 11 | jwtRules: 12 | - issuer: https://k8sou.apps.IPADDR.nip.io/auth/idp/k8sIdp 13 | jwksUri: https://k8sou.apps.IPADDR.nip.io/auth/idp/k8sIdp/certs 14 | audiences: 15 | - kubernetes 16 | outputPayloadToHeader: User-Info 17 | --- 18 | apiVersion: security.istio.io/v1 19 | kind: AuthorizationPolicy 20 | metadata: 21 | name: simple-hellow-world 22 | namespace: istio-hello-world 23 | spec: 24 | action: ALLOW 25 | selector: 26 | matchLabels: 27 | app: run-service 28 | rules: 29 | - from: 30 | - source: 31 | requestPrincipals: ["*"] 32 | -------------------------------------------------------------------------------- /chapter17/coursed-grained-authorization/coursed-grained-az.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: security.istio.io/v1 3 | kind: AuthorizationPolicy 4 | metadata: 5 | name: service-level-az 6 | namespace: istio-hello-world 7 | spec: 8 | action: ALLOW 9 | selector: 10 | matchLabels: 11 | app: run-service 12 | rules: 13 | - when: 14 | - key: request.auth.claims[groups] 15 | values: ["cn=group2,ou=Groups,DC=domain,DC=com"] -------------------------------------------------------------------------------- /chapter17/hello-world/deploy_helloworld.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 4 | 5 | sed "s/IPADDR/$hostip/g" < ./hello-world.yaml > /tmp/hello-world.yaml 6 | 7 | kubectl create -f /tmp/hello-world.yaml -------------------------------------------------------------------------------- /chapter17/opa/rego/istio.authz.rego: -------------------------------------------------------------------------------- 1 | package istio.authz 2 | 3 | import input.attributes.request.http as http_request 4 | import input.parsed_path 5 | 6 | default allow = false 7 | 8 | 9 | # helper function to check that an array contains a value 10 | contains_element(arr, elem) = true { 11 | arr[_] = elem 12 | } else = false { true } 13 | 14 | # checks that the authorization header is properly formatted and returns the parsed payload 15 | verify_headers() = payload { 16 | # verify that there is an authorization header 17 | startswith(http_request.headers["authorization"], "Bearer ") 18 | 19 | # parse the JWT into its header, payload, and signature 20 | # we aren't going to validate the JWT, Istio did that in 21 | # the RequestAuthentication 22 | [header, payload, signature] := io.jwt.decode(trim_prefix(http_request.headers["authorization"], "Bearer ")) 23 | 24 | } else = false {true} 25 | 26 | # Test case if the groups claim is a list for both k8s-cluster-admins and not group2 27 | allow { 28 | 29 | # verify headers and retrieve the jwt payload 30 | payload := verify_headers() 31 | 32 | # make sure there are groups 33 | payload.groups 34 | 35 | # make sure the user is an admin 36 | contains_element(payload.groups,"cn=k8s-cluster-admins,ou=Groups,DC=domain,DC=com") 37 | 38 | # make sure the user is not in group2 39 | not contains_element(payload.groups,"cn=group2,ou=Groups,DC=domain,DC=com") 40 | } 41 | 42 | # Test case if groups is not a list, only checking for 43 | allow { 44 | # verify headers and retrieve the jwt payload 45 | payload := verify_headers() 46 | 47 | # make sure there are groups 48 | payload.groups 49 | 50 | # make sure the user is an admin 51 | payload.groups == "cn=k8s-cluster-admins,ou=Groups,DC=domain,DC=com" 52 | } 53 | 54 | -------------------------------------------------------------------------------- /chapter17/openunison-istio/kubernetes-dashboard-values.yaml: -------------------------------------------------------------------------------- 1 | nginx: 2 | enabled: false 3 | 4 | kong: 5 | enabled: false 6 | 7 | api: 8 | scaling: 9 | replicas: 1 10 | containers: 11 | ports: 12 | - name: api-tls 13 | containerPort: 8001 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp 17 | name: tmp-volume 18 | - mountPath: /certs 19 | name: tls 20 | volumes: 21 | - name: tmp-volume 22 | emptyDir: {} 23 | - name: tls 24 | secret: 25 | secretName: kubernetes-dashboard-certs 26 | optional: true 27 | 28 | web: 29 | scaling: 30 | replicas: 1 31 | containers: 32 | ports: 33 | - name: api-tls 34 | containerPort: 8001 35 | protocol: TCP 36 | volumeMounts: 37 | - mountPath: /tmp 38 | name: tmp-volume 39 | - mountPath: /certs 40 | name: tls 41 | volumes: 42 | - name: tmp-volume 43 | emptyDir: {} 44 | - name: tls 45 | secret: 46 | secretName: kubernetes-dashboard-certs 47 | optional: true 48 | 49 | auth: 50 | scaling: 51 | replicas: 0 52 | volumeMounts: 53 | - mountPath: /tmp 54 | name: tmp-volume 55 | - mountPath: /certs 56 | name: tls 57 | volumes: 58 | - name: tmp-volume 59 | emptyDir: {} 60 | - name: tls 61 | secret: 62 | secretName: kubernetes-dashboard-certs 63 | optional: false -------------------------------------------------------------------------------- /chapter17/openunison-service-auth/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /chapter17/openunison-service-auth/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: openunison-service-auth 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.16.0" 25 | -------------------------------------------------------------------------------- /chapter17/openunison-service-auth/templates/authentication_chains.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: openunison.tremolo.io/v1 3 | kind: AuthenticationChain 4 | metadata: 5 | name: service-api-idp 6 | namespace: {{ .Release.Namespace }} 7 | spec: 8 | level: 20 9 | root: o=Data 10 | authMechs: 11 | - name: basic 12 | required: required 13 | params: 14 | realmName: service-api 15 | uidAttr: uid 16 | secretParams: [] 17 | - name: map 18 | required: required 19 | params: 20 | map: 21 | - "uid|composite|${uid}" 22 | - "mail|composite|${mail}" 23 | - "givenName|composite|${givenName}" 24 | - "sn|composite|${sn}" 25 | - "displayName|composite|${displayName}" 26 | - "memberOf|user|memberOf" 27 | - name: jit 28 | required: required 29 | params: 30 | nameAttr: uid 31 | workflowName: jitdb 32 | - name: genoidctoken 33 | required: required 34 | params: 35 | idpName: service-idp 36 | trustName: users -------------------------------------------------------------------------------- /chapter17/openunison-service-auth/templates/getusertoken.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: openunison.tremolo.io/v1 3 | kind: Application 4 | metadata: 5 | name: getusertoken 6 | namespace: {{ .Release.Namespace }} 7 | spec: 8 | azTimeoutMillis: 3000 9 | isApp: true 10 | urls: 11 | - hosts: 12 | - "#[OU_HOST]" 13 | filterChain: 14 | - className: com.tremolosecurity.scalejs.token.ws.ScaleToken 15 | params: 16 | displayNameAttribute: "sub" 17 | frontPage.title: "token" 18 | frontPage.text: "token" 19 | uidAttributeName: uid 20 | logoutURL: "/logout" 21 | homeURL: "/scale/" 22 | warnMinutesLeft: "5" 23 | tokenClassName: "com.tremolosecurity.idp.providers.oidc.scalejs.IdTokenLoader" 24 | uri: /get-user-token/token 25 | azRules: 26 | - scope: dn 27 | constraint: o=Tremolo 28 | authChain: service-api-idp 29 | results: {} 30 | cookieConfig: 31 | sessionCookieName: tremolosession 32 | domain: "#[OU_HOST]" 33 | secure: true 34 | httpOnly: true 35 | logoutURI: "/logout" 36 | keyAlias: session-unison 37 | timeout: 900 38 | scope: -1 39 | cookiesEnabled: false 40 | -------------------------------------------------------------------------------- /chapter17/openunison-service-auth/values.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter17/openunison-service-auth/values.yaml -------------------------------------------------------------------------------- /chapter17/write-checks/call_service.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 4 | 5 | curl -H "Authorization: Bearer $(curl --insecure -u 'mmosley:start123' https://k8sou.apps.$hostip.nip.io/get-user-token/token/user 2>/dev/null| jq -r '.token.id_token')" http://write-checks.$hostip.nip.io/write-check 2>/dev/null | jq -r -------------------------------------------------------------------------------- /chapter17/write-checks/deploy_write_checks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 4 | 5 | echo "getting oidc config" 6 | export oidc_config=$(curl --insecure https://k8sou.apps.$hostip.nip.io/auth/idp/service-idp/.well-known/openid-configuration 2>/dev/null | jq -r '.jwks_uri') 7 | 8 | 9 | sed "s/IPADDR/$hostip/g" < ./write_checks.yaml | sed "s/JWKS_FROM_SERVER/$jwks/g" > /tmp/write_checks.yaml 10 | 11 | kubectl apply -f /tmp/write_checks.yaml -------------------------------------------------------------------------------- /chapter19/examples/myapp/.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - build 3 | - deploy 4 | 5 | 6 | build-job: 7 | stage: build 8 | image: 9 | name: gcr.io/kaniko-project/executor:v1.14.0-debug 10 | entrypoint: 11 | - "" 12 | script: 13 | - mkdir -p /kaniko/.docker 14 | - cp $config_json /kaniko/.docker/config.json 15 | - /kaniko/executor 16 | --dockerfile source/Dockerfile 17 | --context source 18 | --destination $HARBOR_HOST/$NAMESPACE/mypython:${CI_COMMIT_SHORT_SHA} 19 | --registry-certificate $HARBOR_HOST=$ca_certificate 20 | 21 | deploy-job: 22 | stage: deploy 23 | environment: production 24 | image: docker.io/mlbiam/vcluster-onboard:1.0.0 25 | script: |- 26 | mkdir ~/.ssh 27 | cp $PATCH_KEY ~/.ssh/id_rsa 28 | chmod go-rwx ~/.ssh/id_rsa 29 | ssh-keyscan $GITLAB_HOST > ~/.ssh/known_hosts 30 | cd /tmp 31 | mkdir remote 32 | cd remote 33 | git clone git@$GITLAB_HOST:$NAMESPACE-dev/$NAMESPACE-ops.git 34 | cd $NAMESPACE-ops 35 | git config user.name gitlab-cicd 36 | git config user.email gitlab-cicd@$GITLAB_HOST 37 | kubectl patch --local -f yaml/namespaces/default/deployments/python-hello.yaml -p "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"python-hello\",\"image\":\"$HARBOR_HOST/$NAMESPACE/mypython:${CI_COMMIT_SHORT_SHA}\"}]}}}}" -o yaml > /tmp/python-hello.yaml 38 | cp /tmp/python-hello.yaml yaml/namespaces/default/deployments/python-hello.yaml 39 | git add yaml/namespaces/default/deployments/python-hello.yaml 40 | git commit -m "commit automated build from commit ${CI_COMMIT_SHORT_SHA}" 41 | git push 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /chapter19/examples/myapp/source/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:buster 2 | 3 | RUN groupadd -r microsvc -g 433 && \ 4 | mkdir /usr/local/microsvc && \ 5 | useradd -u 431 -r -g microsvc -d /usr/local/microsvc -s /sbin/nologin -c "Micro Service User" microsvc 6 | 7 | ADD requirements.txt /usr/local/microsvc/requirements.txt 8 | ADD helloworld.py /usr/local/microsvc/helloworld.py 9 | 10 | WORKDIR /usr/local/microsvc 11 | 12 | 13 | RUN pip3 install -r ./requirements.txt 14 | 15 | USER 431 16 | 17 | CMD ["python3","helloworld.py"] -------------------------------------------------------------------------------- /chapter19/examples/myapp/source/helloworld.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | import os 3 | import socket 4 | import json 5 | 6 | app = Flask(__name__) 7 | 8 | @app.route('/') 9 | def hello(): 10 | retVal = { 11 | "msg":"hello world!", 12 | "host":"%s" % socket.gethostname() 13 | 14 | } 15 | return json.dumps(retVal) 16 | 17 | if __name__ == "__main__": 18 | app.run(host="0.0.0.0", port=8080, debug=True) -------------------------------------------------------------------------------- /chapter19/examples/myapp/source/requirements.txt: -------------------------------------------------------------------------------- 1 | flask -------------------------------------------------------------------------------- /chapter19/examples/ops/python-hello.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: python-hello 5 | namespace: default 6 | spec: 7 | progressDeadlineSeconds: 600 8 | replicas: 1 9 | revisionHistoryLimit: 10 10 | selector: 11 | matchLabels: 12 | app: python-hello 13 | strategy: 14 | rollingUpdate: 15 | maxSurge: 25% 16 | maxUnavailable: 25% 17 | type: RollingUpdate 18 | template: 19 | metadata: 20 | labels: 21 | app: python-hello 22 | spec: 23 | containers: 24 | - image: harbor.idp-cp.tremolo.dev/myapp/mypython:a79bad2a 25 | imagePullPolicy: Always 26 | name: python-hello 27 | resources: {} 28 | terminationMessagePath: /dev/termination-log 29 | terminationMessagePolicy: File 30 | imagePullSecrets: 31 | - name: pull-secret 32 | dnsPolicy: ClusterFirst 33 | imagePullSecret: pull-secret 34 | restartPolicy: Always 35 | schedulerName: default-scheduler 36 | securityContext: {} 37 | serviceAccount: default 38 | serviceAccountName: default 39 | terminationGracePeriodSeconds: 30 40 | -------------------------------------------------------------------------------- /chapter19/pulumi/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | venv/ 3 | .pulumi 4 | -------------------------------------------------------------------------------- /chapter19/pulumi/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: kube-enterprise-3-idp 2 | runtime: 3 | name: python 4 | options: 5 | virtualenv: /Users/marcboorshtein/Documents/bookv3/venv 6 | description: A minimal Python Pulumi program 7 | config: 8 | pulumi:tags: 9 | value: 10 | pulumi:template: python 11 | -------------------------------------------------------------------------------- /chapter19/pulumi/requirements.txt: -------------------------------------------------------------------------------- 1 | pulumi>=3.0.0,<4.0.0 2 | pulumi_kubernetes>=4.7.1 3 | kubernetes>=4.7.1 4 | beautifulsoup4 5 | pyyaml 6 | packaging 7 | pulumi_vault 8 | pulumiverse_harbor 9 | pulumi_gitlab -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/argocd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/argocd/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/cert_manager/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/cert_manager/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/gitlab/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/gitlab/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/gitlab/initialize.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import pulumi 3 | from pulumi_kubernetes import helm, Provider 4 | import pulumi_kubernetes as k8s 5 | from pulumi_kubernetes.apiextensions.CustomResource import CustomResource 6 | from ...lib.helm_chart_versions import get_latest_helm_chart_version 7 | import logging 8 | import kubernetes 9 | from kubernetes import config as kube_config, dynamic 10 | from kubernetes import client as k8s_client 11 | from kubernetes.dynamic.exceptions import ResourceNotFoundError 12 | from kubernetes.client import api_client 13 | import secrets 14 | import pulumi_gitlab as gitlab 15 | 16 | def initialize_gitlab(): 17 | # create a "group" (namespace) in gitlab 18 | cluster_ops_group = gitlab.Group("cluster-operations",path="cluster-operations",request_access_enabled=False) 19 | 20 | # create group that maps to cluster admins 21 | cluster_administrators_group = gitlab.Group("k8s-cluster-k8s-administrators",path="k8s-cluster-k8s-administrators",request_access_enabled=False) 22 | 23 | -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/harbor/__init.py__: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/harbor/__init.py__ -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/mysql/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/mysql/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/openunison/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/openunison/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/openunison_idp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/openunison_idp/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/openunison_sat/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/openunison_sat/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/smtp_blackhole/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/smtp_blackhole/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/devplatform/vault/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/devplatform/vault/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/hcl/vault-admin.hcl: -------------------------------------------------------------------------------- 1 | # Read system health check 2 | path "sys/health" 3 | { 4 | capabilities = ["read", "sudo"] 5 | } 6 | 7 | # Create and manage ACL policies broadly across Vault 8 | 9 | # List existing policies 10 | path "sys/policies/acl" 11 | { 12 | capabilities = ["list"] 13 | } 14 | 15 | # Create and manage ACL policies 16 | path "sys/policies/acl/*" 17 | { 18 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 19 | } 20 | 21 | # Enable and manage authentication methods broadly across Vault 22 | 23 | # Manage auth methods broadly across Vault 24 | path "auth/*" 25 | { 26 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 27 | } 28 | 29 | # Create, update, and delete auth methods 30 | path "sys/auth/*" 31 | { 32 | capabilities = ["create", "update", "delete", "sudo", "read"] 33 | } 34 | 35 | # List auth methods 36 | path "sys/auth" 37 | { 38 | capabilities = ["read"] 39 | } 40 | 41 | # Enable and manage the key/value secrets engine at `secret/` path 42 | 43 | # List, create, update, and delete key/value secrets 44 | path "secret/*" 45 | { 46 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 47 | } 48 | 49 | # Manage secrets engines 50 | path "sys/mounts/*" 51 | { 52 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 53 | } 54 | 55 | # List existing secrets engines. 56 | path "sys/mounts" 57 | { 58 | capabilities = ["read"] 59 | } 60 | -------------------------------------------------------------------------------- /chapter19/pulumi/src/hcl/vault-ou-admins.hcl: -------------------------------------------------------------------------------- 1 | path "*" { 2 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 3 | } -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kube-enterprise-guide-openunison-idp 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 3.0.10 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.16.0" 25 | -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/templates/applications/get-target-token.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: openunison.tremolo.io/v1 3 | kind: Application 4 | metadata: 5 | name: get-target-token 6 | namespace: openunison 7 | spec: 8 | azTimeoutMillis: 3000 9 | isApp: true 10 | urls: 11 | - hosts: 12 | - "#[OU_HOST]" 13 | filterChain: 14 | - className: com.tremolosecurity.proxy.filters.JavaScriptFilter 15 | params: 16 | javaScript: |- 17 | GlobalEntries = Java.type("com.tremolosecurity.server.GlobalEntries"); 18 | HashMap = Java.type("java.util.HashMap"); 19 | 20 | function initFilter(config) { 21 | 22 | } 23 | 24 | function doFilter(request,response,chain) { 25 | var targetName = request.getParameter("targetName").getValues().get(0); 26 | var k8s = GlobalEntries.getGlobalEntries().getConfigManager().getProvisioningEngine().getTarget(targetName).getProvider() 27 | 28 | 29 | response.getWriter().print(k8s.getAuthToken()); 30 | } 31 | 32 | uri: /api/get-target-token 33 | azRules: 34 | - scope: filter 35 | constraint: (sub=system:serviceaccount:argocd:argocd-application-controller) 36 | - scope: filter 37 | constraint: (sub=system:serviceaccount:openunison:openunison-{{ .Values.openunison.orchestra_name }}) 38 | authChain: oauth2k8s 39 | results: {} 40 | cookieConfig: 41 | sessionCookieName: tremolosession 42 | domain: "#[OU_HOST]" 43 | secure: true 44 | httpOnly: true 45 | logoutURI: "/logout" 46 | keyAlias: session-unison 47 | timeout: 1 48 | scope: -1 49 | cookiesEnabled: false -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/templates/configmaps/vcluster-vault-policy-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: vcluster-vault-policy-template 5 | namespace: {{ .Release.Namespace }} 6 | data: 7 | template.hcl: |- 8 | path "secret/data/vclusters/$nsName$-$env$/ns/{{ "{{" }}identity.entity.aliases.$authMount$.metadata.service_account_namespace{{ "}}" }}" { 9 | capabilities = ["read"] 10 | } 11 | 12 | path "secret/data/vclusters/$nsName$-$env$/ns/{{ "{{" }}identity.entity.aliases.$authMount$.metadata.service_account_namespace{{ "}}" }}/*" { 13 | capabilities = ["read"] 14 | } -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/templates/targets/dev-mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: openunison.tremolo.io/v1 2 | kind: Target 3 | metadata: 4 | name: dev-mysql 5 | namespace: {{ .Release.Namespace }} 6 | spec: 7 | className: com.tremolosecurity.provisioning.core.providers.BasicDB 8 | params: 9 | - name: driver 10 | value: com.mysql.jdbc.Driver 11 | - name: url 12 | value: jdbc:mysql://mysql.{{ .Values.dev_dns_suffix }} 13 | - name: user 14 | value: root 15 | - name: maxCons 16 | value: "10" 17 | - name: maxIdleCons 18 | value: "10" 19 | - name: validationQuery 20 | value: SELECT 1 21 | - name: userTable 22 | value: localUsers 23 | - name: userPrimaryKey 24 | value: userId 25 | - name: userName 26 | value: sub 27 | - name: groupMode 28 | value: ManyToMany 29 | - name: groupTable 30 | value: localGroups 31 | - name: groupName 32 | value: name 33 | - name: groupUserKey 34 | value: userId 35 | - name: groupLinkTableName 36 | value: userGroups 37 | - name: groupGroupKey 38 | value: groupId 39 | - name: groupPrimaryKey 40 | value: groupId 41 | - name: userSQL 42 | value: "" 43 | - name: groupSQL 44 | value: "" 45 | - name: customProvider 46 | value: "" 47 | - name: readOnly 48 | value: "true" 49 | secretParams: 50 | - name: password 51 | secretKey: dev 52 | secretName: mysql-passwords 53 | targetAttributes: 54 | - name: sub 55 | source: sub 56 | sourceType: user 57 | - name: mail 58 | source: mail 59 | sourceType: user 60 | - name: firstName 61 | source: firstName 62 | sourceType: user 63 | - name: lastName 64 | source: lastName 65 | sourceType: user -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/templates/targets/gitlab.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: openunison.tremolo.io/v1 2 | kind: Target 3 | metadata: 4 | name: gitlab 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | app.kubernetes.io/name: openunison 8 | app.kubernetes.io/instance: openunison-{{ .Release.Name }} 9 | app.kubernetes.io/component: openunison-targets 10 | app.kubernetes.io/part-of: kube-enterprise-guide 11 | spec: 12 | className: com.tremolosecurity.unison.gitlab.provisioning.targets.GitlabUserProvider 13 | params: 14 | - name: url 15 | value: "{{ .Values.gitlab_url }}" 16 | secretParams: 17 | - name: token 18 | secretName: gitlab-target 19 | secretKey: gitlab.root.token 20 | targetAttributes: 21 | - name: username 22 | source: username 23 | sourceType: user 24 | - name: name 25 | source: name 26 | sourceType: user 27 | - name: email 28 | source: email 29 | sourceType: user 30 | - name: isAdmin 31 | source: isAdmin 32 | sourceType: user 33 | - name: skipConfirmation 34 | source: "true" 35 | sourceType: static 36 | - name: projectsLimit 37 | source: "100000" 38 | sourceType: static -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/templates/targets/prod-mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: openunison.tremolo.io/v1 2 | kind: Target 3 | metadata: 4 | name: prod-mysql 5 | namespace: {{ .Release.Namespace }} 6 | spec: 7 | className: com.tremolosecurity.provisioning.core.providers.BasicDB 8 | params: 9 | - name: driver 10 | value: com.mysql.jdbc.Driver 11 | - name: url 12 | value: jdbc:mysql://mysql.{{ .Values.prod_dns_suffix }} 13 | - name: user 14 | value: root 15 | - name: maxCons 16 | value: "10" 17 | - name: maxIdleCons 18 | value: "10" 19 | - name: validationQuery 20 | value: SELECT 1 21 | - name: userTable 22 | value: localUsers 23 | - name: userPrimaryKey 24 | value: userId 25 | - name: userName 26 | value: sub 27 | - name: groupMode 28 | value: ManyToMany 29 | - name: groupTable 30 | value: localGroups 31 | - name: groupName 32 | value: name 33 | - name: groupUserKey 34 | value: userId 35 | - name: groupLinkTableName 36 | value: userGroups 37 | - name: groupGroupKey 38 | value: groupId 39 | - name: groupPrimaryKey 40 | value: groupId 41 | - name: userSQL 42 | value: "" 43 | - name: groupSQL 44 | value: "" 45 | - name: customProvider 46 | value: "" 47 | - name: readOnly 48 | value: "true" 49 | secretParams: 50 | - name: password 51 | secretKey: prod 52 | secretName: mysql-passwords 53 | targetAttributes: 54 | - name: sub 55 | source: sub 56 | sourceType: user 57 | - name: mail 58 | source: mail 59 | sourceType: user 60 | - name: firstName 61 | source: firstName 62 | sourceType: user 63 | - name: lastName 64 | source: lastName 65 | sourceType: user -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/templates/trusts/argocd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: openunison.tremolo.io/v1 2 | kind: Trust 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: openunison 6 | app.kubernetes.io/instance: openunison-{{ .Release.Name }} 7 | app.kubernetes.io/component: argocd-sso 8 | app.kubernetes.io/part-of: kube-enterprise-guide 9 | name: argocd 10 | namespace: {{ .Release.Namespace }} 11 | spec: 12 | accessTokenSkewMillis: 120000 13 | accessTokenTimeToLive: 1200000 14 | authChainName: login-service 15 | clientId: argocd 16 | codeLastMileKeyName: lastmile-oidc 17 | codeTokenSkewMilis: 60000 18 | publicEndpoint: true 19 | redirectURI: 20 | - {{ .Values.argocd_url }}/auth/callback 21 | - http://localhost:8085/auth/callback 22 | signedUserInfo: true 23 | verifyRedirect: true 24 | -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/templates/trusts/gitlab.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: openunison.tremolo.io/v1 2 | kind: Trust 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: openunison 6 | app.kubernetes.io/instance: openunison-{{ .Release.Name }} 7 | app.kubernetes.io/component: gitlab-sso 8 | app.kubernetes.io/part-of: kube-enterprise-guide 9 | name: gitlab 10 | namespace: {{ .Release.Namespace }} 11 | spec: 12 | accessTokenSkewMillis: 120000 13 | accessTokenTimeToLive: 60000 14 | authChainName: login-service 15 | clientId: gitlab 16 | clientSecret: 17 | keyName: gitlab.oidc.client_secret 18 | secretName: gitlab-oidc 19 | codeLastMileKeyName: lastmile-oidc 20 | codeTokenSkewMilis: 60000 21 | publicEndpoint: false 22 | redirectURI: 23 | - {{ .Values.gitlab_url }}/users/auth/openid_connect/callback 24 | signedUserInfo: false 25 | verifyRedirect: true -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/templates/trusts/harbor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: openunison.tremolo.io/v1 2 | kind: Trust 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: openunison 6 | app.kubernetes.io/instance: openunison-orchestra 7 | app.kubernetes.io/component: harbor-sso 8 | app.kubernetes.io/part-of: kube-enterprise-guide 9 | name: harbor 10 | namespace: {{ .Release.Namespace }} 11 | spec: 12 | accessTokenSkewMillis: 120000 13 | accessTokenTimeToLive: 60000 14 | authChainName: login-service 15 | clientId: harbor # this is your harbor client id 16 | clientSecret: 17 | keyName: harbor.oidc.client_secret 18 | secretName: harbor-oidc 19 | codeLastMileKeyName: lastmile-oidc 20 | codeTokenSkewMilis: 60000 21 | publicEndpoint: true 22 | redirectURI: 23 | - {{ .Values.harbor_url }}/c/oidc/callback 24 | signedUserInfo: false 25 | verifyRedirect: true -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/templates/trusts/vault.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: openunison.tremolo.io/v1 2 | kind: Trust 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: openunison 6 | app.kubernetes.io/instance: openunison-orchestra 7 | app.kubernetes.io/component: vault-sso 8 | app.kubernetes.io/part-of: kube-enterprise-guide 9 | name: vault 10 | namespace: {{ .Release.Namespace }} 11 | spec: 12 | accessTokenSkewMillis: 120000 13 | accessTokenTimeToLive: 60000 14 | authChainName: login-service 15 | clientId: vault # this is your vault client id 16 | clientSecret: 17 | keyName: vault.oidc.client_secret 18 | secretName: vault-oidc 19 | codeLastMileKeyName: lastmile-oidc 20 | codeTokenSkewMilis: 60000 21 | publicEndpoint: true 22 | redirectURI: 23 | - {{ .Values.vault_url }}/ui/vault/auth/oidc/oidc/callback 24 | - {{ .Values.vault_url }}/oidc/oidc/callback 25 | signedUserInfo: false 26 | verifyRedirect: true -------------------------------------------------------------------------------- /chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/values.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/helm/kube-enterprise-guide-openunison-idp/values.yaml -------------------------------------------------------------------------------- /chapter19/pulumi/src/lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter19/pulumi/src/lib/__init__.py -------------------------------------------------------------------------------- /chapter19/pulumi/src/lib/kubernetes_api_endpoint.py: -------------------------------------------------------------------------------- 1 | import pulumi 2 | from pulumi_kubernetes import core, meta, Provider 3 | 4 | class KubernetesApiEndpointIp(pulumi.ComponentResource): 5 | """ 6 | Represents a Kubernetes API endpoint IP address. 7 | 8 | Args: 9 | name (str): The name of the resource. 10 | k8s_provider (Provider): The Kubernetes provider. 11 | 12 | Attributes: 13 | endpoint (core.v1.Endpoints): The Kubernetes endpoint. 14 | ips (pulumi.Output[str]): The comma-separated string of IP addresses. 15 | 16 | """ 17 | def __init__(self, name: str, k8s_provider: Provider): 18 | super().__init__('custom:x:KubernetesApiEndpointIp', name, {}, opts=pulumi.ResourceOptions(provider=k8s_provider)) 19 | 20 | self.endpoint = core.v1.Endpoints.get( 21 | "kubernetes", 22 | "kubernetes", 23 | opts=pulumi.ResourceOptions(provider=k8s_provider) 24 | ) 25 | 26 | self.ips = self.endpoint.subsets.apply( 27 | lambda subsets: ','.join([address.ip for subset in subsets for address in subset.addresses]) if subsets else '' 28 | ) 29 | 30 | self.register_outputs({"ip_string": self.ips}) 31 | -------------------------------------------------------------------------------- /chapter19/pulumi/src/lib/namespace.py: -------------------------------------------------------------------------------- 1 | import pulumi 2 | from pulumi_kubernetes import core as k8s_core, meta as k8s_meta 3 | from typing import List 4 | 5 | def create_namespaces(namespaces: List[str], provider): 6 | """ 7 | Create Kubernetes namespaces. 8 | 9 | Args: 10 | namespaces (List[str]): List of namespace names. 11 | provider: The provider for the namespaces. 12 | 13 | Returns: 14 | List[k8s_core.v1.Namespace]: List of created namespace objects. 15 | """ 16 | namespace_objects = [] 17 | for ns_name in namespaces: 18 | ns = k8s_core.v1.Namespace( 19 | ns_name, 20 | metadata=k8s_meta.v1.ObjectMetaArgs(name=ns_name), 21 | opts=pulumi.ResourceOptions(provider=provider) 22 | ) 23 | namespace_objects.append(ns) 24 | return namespace_objects 25 | -------------------------------------------------------------------------------- /chapter19/pulumi/src/yaml/argocd-helm-support.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: argocd-remote-tokens 6 | namespace: argocd 7 | data: 8 | remote-token.sh: |- 9 | #!/bin/bash 10 | 11 | REMOTE_TOKEN=$(/custom-tools/curl --insecure -H "Authorization: Bearer $(<$3)" https://$1/api/get-target-token?targetName=$2 2>/dev/null) 12 | 13 | echo -n "{\"apiVersion\": \"client.authentication.k8s.io/v1\",\"kind\": \"ExecCredential\",\"status\": {\"token\": \"$REMOTE_TOKEN\"}}" -------------------------------------------------------------------------------- /chapter19/pulumi/src/yaml/mysql.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: mysql 6 | --- 7 | apiVersion: apps/v1 8 | kind: StatefulSet 9 | metadata: 10 | labels: 11 | app: mysql 12 | name: mysql 13 | namespace: mysql 14 | spec: 15 | serviceName: mysql 16 | replicas: 1 17 | revisionHistoryLimit: 10 18 | selector: 19 | matchLabels: 20 | app: "mysql" 21 | template: 22 | metadata: 23 | creationTimestamp: null 24 | labels: 25 | app: mysql 26 | spec: 27 | containers: 28 | - env: 29 | - name: MYSQL_ROOT_PASSWORD 30 | value: start123 31 | - name: MYSQL_DATABASE 32 | value: unison 33 | - name: MYSQL_USER 34 | value: unison 35 | - name: MYSQL_PASSWORD 36 | value: startt123 37 | image: mysql 38 | imagePullPolicy: Always 39 | name: mysql 40 | resources: {} 41 | terminationMessagePath: /dev/termination-log 42 | terminationMessagePolicy: File 43 | volumeMounts: 44 | - mountPath: /var/lib/mysql 45 | name: mysql-data 46 | dnsPolicy: ClusterFirst 47 | restartPolicy: Always 48 | schedulerName: default-scheduler 49 | securityContext: {} 50 | terminationGracePeriodSeconds: 30 51 | volumes: [] 52 | volumeClaimTemplates: 53 | - metadata: 54 | name: mysql-data 55 | spec: 56 | accessModes: [ "ReadWriteOnce" ] 57 | resources: 58 | requests: 59 | storage: 1Gi 60 | --- 61 | apiVersion: v1 62 | kind: Service 63 | metadata: 64 | labels: 65 | app: mysql 66 | name: mysql 67 | namespace: mysql 68 | spec: 69 | ports: 70 | - port: 3306 71 | protocol: TCP 72 | targetPort: 3306 73 | selector: 74 | app: mysql 75 | sessionAffinity: None 76 | type: ClusterIP -------------------------------------------------------------------------------- /chapter19/pulumi/src/yaml/vaultintegration.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | labels: 6 | kubernetes.io/metadata.name: vault-integration 7 | name: vault-integration 8 | spec: {} 9 | --- 10 | apiVersion: v1 11 | kind: ServiceAccount 12 | metadata: 13 | name: vault-client 14 | namespace: vault-integration 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: ClusterRoleBinding 18 | metadata: 19 | name: role-tokenreview-binding 20 | roleRef: 21 | apiGroup: rbac.authorization.k8s.io 22 | kind: ClusterRole 23 | name: system:auth-delegator 24 | subjects: 25 | - kind: ServiceAccount 26 | name: vault-client 27 | namespace: vault-integration -------------------------------------------------------------------------------- /chapter19/scripts/get_gitlab_root_pwd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl get secret $(k get secrets -n gitlab | grep root | awk '{print $1}') -n gitlab -o json | jq -r '.data.password' | base64 -d -------------------------------------------------------------------------------- /chapter19/scripts/harbor-get-root-password.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pulumi config set harbor:password $(kubectl get secret harbor-admin -n harbor -o json | jq -r '.data["harbor-admin"]' | base64 -d) --secret 4 | pulumi config set harbor.configured true -------------------------------------------------------------------------------- /chapter19/scripts/patch-nginx.txt: -------------------------------------------------------------------------------- 1 | kubectl patch deployments ingress-nginx-controller -n ingress-nginx -p '{"spec":{"template":{"spec":{"containers":[{"name":"controller","ports":[{"containerPort":80,"hostPort":80,"protocol":"TCP"},{"containerPort":443,"hostPort":443,"protocol":"TCP"},{"containerPort":22,"hostPort":22,"protocol":"TCP"},{"containerPort":3306,"hostPort":3306,"protocol":"TCP"}]}]}}}} -------------------------------------------------------------------------------- /chapter19/scripts/patch_nginx_ssh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace --set tcp.3306=mysql/mysql:3306 --set tcp.22=gitlab/$(kubectl get svc -n gitlab | grep shell | awk '{print $1}'):22 -------------------------------------------------------------------------------- /chapter19/scripts/pulumi-initialize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pulumi config set openunison.cp.dns_suffix idp-cp.tremolo.dev 4 | pulumi config set kube.cp.context kubernetes-admin@kubernetes 5 | pulumi config set harbor:url https://harbor.idp-cp.tremolo.dev 6 | pulumi config set kube.cp.path /Users/marcboorshtein/.kube/idp-cp 7 | pulumi config set harbor:username admin 8 | pulumi config set openunison.dev.dns_suffix idp-dev.tremolo.dev 9 | pulumi config set openunison.prod.dns_suffix idp-prod.tremolo.dev -------------------------------------------------------------------------------- /chapter19/vault/unseal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | 5 | 6 | pod_name=$(kubectl get pods -lapp.kubernetes.io/name=vault -n vault -o json | jq -r '.items[0].metadata.name') 7 | 8 | # wait for vault to be running 9 | while [[ $(kubectl get pod $pod_name -n vault -o 'jsonpath={..status.containerStatuses[0].started}') != "true" ]]; do echo "waiting for vault pod" && sleep 1; done 10 | 11 | # get the seal secrets 12 | path_to_secrets=$(mktemp) 13 | echo $path_to_secrets 14 | 15 | kubectl exec --stdin=true --tty=true $pod_name -n vault -- vault operator init --format=json > $path_to_secrets 16 | 17 | # unseal the vault pod 18 | KEYS=$(jq -r '@sh "\(.unseal_keys_hex)\n"'< $path_to_secrets) 19 | echo $KEYS 20 | 21 | for KEY in $KEYS 22 | do 23 | echo $KEY 24 | KEY2=$(echo -n $KEY | cut -d "'" -f 2) 25 | kubectl exec -i $pod_name -n vault - -- vault operator unseal $KEY2 26 | done 27 | 28 | # get the root token and set the secret 29 | pulumi config set vault.key $(jq -r '.root_token' < $path_to_secrets) --secret 30 | 31 | # save all tokens 32 | pulumi config set vault.tokens --secret < $path_to_secrets -------------------------------------------------------------------------------- /chapter19/vault/unseal_after_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | pod_name=$(kubectl get pods -lapp.kubernetes.io/name=vault -n vault -o json | jq -r '.items[0].metadata.name') 3 | 4 | # wait for vault to be running 5 | while [[ $(kubectl get pod $pod_name -n vault -o 'jsonpath={..status.containerStatuses[0].started}') != "true" ]]; do echo "waiting for vault pod" && sleep 1; done 6 | 7 | # get the seal secrets 8 | path_to_secrets=$(mktemp) 9 | echo $path_to_secrets 10 | 11 | pulumi config --show-secrets -j | jq -r '.["kube-enterprise-3-idp:vault.tokens"].value' > $path_to_secrets 12 | 13 | # unseal the vault pod 14 | KEYS=$(jq -r '@sh "\(.unseal_keys_hex)\n"'< $path_to_secrets) 15 | echo $KEYS 16 | 17 | for KEY in $KEYS 18 | do 19 | echo $KEY 20 | KEY2=$(echo -n $KEY | cut -d "'" -f 2) 21 | kubectl exec -i $pod_name -n vault - -- vault operator unseal $KEY2 22 | done 23 | 24 | -------------------------------------------------------------------------------- /chapter2/.create-cluster.sh.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Kubernetes-An-Enterprise-Guide-Third-Edition/7be4e8ded572e7e305ee6456a549ae58224d2e85/chapter2/.create-cluster.sh.swp -------------------------------------------------------------------------------- /chapter2/HAdemo/multinode.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | apiServerAddress: "0.0.0.0" 5 | disableDefaultCNI: true 6 | apiServerPort: 6443 7 | podSubnet: "10.240.0.0/16" 8 | serviceSubnet: "10.96.0.0/16" 9 | nodes: 10 | - role: control-plane 11 | - role: control-plane 12 | - role: control-plane 13 | - role: worker 14 | - role: worker 15 | - role: worker 16 | -------------------------------------------------------------------------------- /chapter2/calico/custom-resources.yaml: -------------------------------------------------------------------------------- 1 | # This section includes base Calico installation configuration. 2 | # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.Installation 3 | apiVersion: operator.tigera.io/v1 4 | kind: Installation 5 | metadata: 6 | name: default 7 | spec: 8 | # Configures Calico networking. 9 | calicoNetwork: 10 | # Note: The ipPools section cannot be modified post-install. 11 | ipPools: 12 | - blockSize: 26 13 | cidr: 10.240.0.0/16 14 | encapsulation: VXLANCrossSubnet 15 | natOutgoing: Enabled 16 | nodeSelector: all() 17 | 18 | --- 19 | 20 | # This section configures the Calico API server. 21 | # For more information, see: https://projectcalico.docs.tigera.io/master/reference/installation/api#operator.tigera.io/v1.APIServer 22 | apiVersion: operator.tigera.io/v1 23 | kind: APIServer 24 | metadata: 25 | name: default 26 | spec: {} 27 | 28 | -------------------------------------------------------------------------------- /chapter2/cluster01-kind.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | runtimeConfig: 4 | "authentication.k8s.io/v1beta1": "true" 5 | "admissionregistration.k8s.io/v1beta1": true 6 | featureGates: 7 | "ValidatingAdmissionPolicy": true 8 | networking: 9 | apiServerAddress: "0.0.0.0" 10 | disableDefaultCNI: true 11 | apiServerPort: 6443 12 | podSubnet: "10.240.0.0/16" 13 | serviceSubnet: "10.96.0.0/16" 14 | nodes: 15 | - role: control-plane 16 | extraPortMappings: 17 | - containerPort: 2379 18 | hostPort: 2379 19 | extraMounts: 20 | - hostPath: /sys/kernel/security 21 | containerPath: /sys/kernel/security 22 | - role: worker 23 | extraPortMappings: 24 | - containerPort: 80 25 | hostPort: 80 26 | - containerPort: 443 27 | hostPort: 443 28 | - containerPort: 2222 29 | hostPort: 2222 30 | extraMounts: 31 | - hostPath: /sys/kernel/security 32 | containerPath: /sys/kernel/security 33 | 34 | -------------------------------------------------------------------------------- /chapter2/pvc-test/test-pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: test-claim 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Mi 11 | --- 12 | kind: Pod 13 | apiVersion: v1 14 | metadata: 15 | name: test-pvc-claim 16 | spec: 17 | containers: 18 | - name: test-pod 19 | image: busybox 20 | command: 21 | - "/bin/sh" 22 | args: 23 | - "-c" 24 | - "touch /mnt/test && exit 0 || exit 1" 25 | volumeMounts: 26 | - name: test-pvc 27 | mountPath: "/mnt" 28 | restartPolicy: "Never" 29 | volumes: 30 | - name: test-pvc 31 | persistentVolumeClaim: 32 | claimName: test-claim 33 | 34 | -------------------------------------------------------------------------------- /chapter4/ingress/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: nginx-ingress 5 | spec: 6 | rules: 7 | - host: "webserver.$hostip.nip.io" 8 | http: 9 | paths: 10 | - path: / 11 | pathType: Prefix 12 | backend: 13 | service: 14 | name: nginx-web 15 | port: 16 | number: 8080 17 | -------------------------------------------------------------------------------- /chapter4/ingress/ngnix-ingress-remove.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | # Create a simple NGINX deployment using kubectl and name it nginx-web 5 | tput setaf 5 6 | echo -e "\n*******************************************************************************************************************" 7 | echo -e "Cleaning up the NGINX resources" 8 | echo -e "*******************************************************************************************************************" 9 | tput setaf 2 10 | kubectl delete deployment nginx-web 11 | kubectl delete svc nginx-web 12 | kubectl delete ingress nginx-ingress 13 | 14 | tput setaf 7 15 | echo -e "\n \n*******************************************************************************************************************" 16 | echo -e "Done. The ingress example has been removed from the cluster." 17 | echo -e "******************************************************************************************************************* \n\n" 18 | tput setaf 2 19 | 20 | -------------------------------------------------------------------------------- /chapter4/metallb/dns-multi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: coredns-tcp 5 | namespace: kube-system 6 | spec: 7 | selector: 8 | k8s-app: kube-dns 9 | ports: 10 | - name: dns-tcp 11 | port: 53 12 | protocol: TCP 13 | targetPort: 53 14 | - name: dns-udp 15 | port: 53 16 | protocol: UDP 17 | targetPort: 53 18 | type: LoadBalancer 19 | 20 | -------------------------------------------------------------------------------- /chapter4/metallb/l2advertisement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: l2-all-pools 5 | namespace: metallb-system 6 | -------------------------------------------------------------------------------- /chapter4/metallb/metallb-pool-2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: pool-02 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 9 | 10 | -------------------------------------------------------------------------------- /chapter4/metallb/metallb-pool-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: pool-01 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 9 | 10 | -------------------------------------------------------------------------------- /chapter4/metallb/metallb-pool-template2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: pool-02 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 9 | 10 | -------------------------------------------------------------------------------- /chapter4/metallb/metallb-pool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: pool-01 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 9 | 10 | -------------------------------------------------------------------------------- /chapter4/metallb/nginx-lb-2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-lb-pool02 5 | annotations: 6 | metallb.universe.tf/address-pool: pool-02 7 | namespace: default 8 | spec: 9 | ports: 10 | - port: 80 11 | protocol: TCP 12 | targetPort: 8080 13 | selector: 14 | run: nginx-lb 15 | type: LoadBalancer 16 | 17 | -------------------------------------------------------------------------------- /chapter4/metallb/nginx-lb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | # Deploy a NGINX pod that we will expose as a LoadBalancer service 5 | tput setaf 3 6 | echo -e "\n*******************************************************************************************************************" 7 | echo -e "Deploying a NGINX pod and LoadBalancer service in the default namespace" 8 | echo -e "*******************************************************************************************************************\n" 9 | tput setaf 2 10 | kubectl run nginx-lb --image bitnami/nginx 11 | kubectl create -f nginx-lb.yaml 12 | 13 | echo -e "\n\n" 14 | -------------------------------------------------------------------------------- /chapter4/metallb/nginx-lb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-lb 5 | namespace: default 6 | spec: 7 | ports: 8 | - port: 80 9 | protocol: TCP 10 | targetPort: 8080 11 | selector: 12 | run: nginx-lb 13 | type: LoadBalancer 14 | 15 | -------------------------------------------------------------------------------- /chapter4/metallb/nginx-loadbalancer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | # Create a simple NGINX deployment using kubectl and name it nginx-web 5 | tput setaf 5 6 | echo -e "\n*******************************************************************************************************************" 7 | echo -e "Deploying the NGINX pod" 8 | echo -e "*******************************************************************************************************************" 9 | tput setaf 2 10 | kubectl create deployment nginx-web --image bitnami/nginx 11 | 12 | # Create a LoadBalancer service that exposes the Deployment on port 8080 called nginx-web 13 | tput setaf 5 14 | echo -e "\n*******************************************************************************************************************" 15 | echo -e "Creating the NGINX LoadBalancer service" 16 | echo -e "*******************************************************************************************************************" 17 | tput setaf 2 18 | kubectl expose deployment nginx-web --port 80 --target-port 8080 --type=LoadBalancer --name nginx-web-lb 19 | 20 | tput setaf 5 21 | echo -e "\n*******************************************************************************************************************" 22 | echo -e "Getting the LoadBalancer IP address for the NGINX service" 23 | echo -e "*******************************************************************************************************************" 24 | tput setaf 2 25 | svc_ip=$(kubectl get service nginx-web-lb -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 26 | 27 | tput setaf 7 28 | echo -e "\n \n*******************************************************************************************************************" 29 | echo -e "The NGINX server has been exposed on a LoadBalancer service with IP address: $svc_ip \n" 30 | echo -e "\n\n Due to how networking works with KinD, we can only test the LoadBalancer address from the Docker host itself" 31 | echo -e "You can do this by using a simple curl command: curl $svc_ip" 32 | echo -e "******************************************************************************************************************* \n\n" 33 | tput setaf 2 34 | 35 | -------------------------------------------------------------------------------- /chapter4/netpol/backend-db-netpol.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: backend-netpol 5 | namespace: sales 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app.kubernetes.io/name: postgresql 10 | ingress: 11 | - from: 12 | - podSelector: 13 | matchLabels: 14 | app: frontend 15 | ports: 16 | - protocol: TCP 17 | port: 5432 18 | policyTypes: 19 | - Ingress 20 | -------------------------------------------------------------------------------- /chapter4/netpol/remove-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 3 5 | echo -e "*******************************************************************************************************************" 6 | echo -e "Deleting Network Policy" 7 | echo -e "******************************************************************************************************************" 8 | kubectl delete -f ./backend-db-netpol.yaml -n sales 9 | 10 | tput setaf 5 11 | echo -e "\n*******************************************************************************************************************" 12 | echo -e "Deleting DB StateFulSet" 13 | echo -e "*******************************************************************************************************************" 14 | tput setaf 2 15 | kubectl delete statefulset.apps/db-postgresql -n sales 16 | 17 | tput setaf 5 18 | echo -e "\n*******************************************************************************************************************" 19 | echo -e "Deleting Sales Namespace" 20 | echo -e "*******************************************************************************************************************" 21 | tput setaf 2 22 | kubectl delete ns sales 23 | 24 | -------------------------------------------------------------------------------- /chapter5/etcd/deploy-etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 6 5 | echo -e "\n \n*******************************************************************************************************************" 6 | echo -e "Deploying ETCD into your cluster" 7 | echo -e "*******************************************************************************************************************" 8 | 9 | # Create a new namespace called etcd-dns 10 | # You can have Helm create the namespace as well using the --create-namespace option. We are creating the namespace using 11 | # kubectl as an example only. There are advantages to creating a namespace using kubectl over Helm in most Enterprise 12 | # clusters. When you use the --create-namespace option in Helml, you can only create a namespace - you cannot set any options. 13 | # Mny enterprises will create labels on namespaces, at a minimum. Also, if the namespace exists before running Helm with 14 | # --create-namespace, the Helm deployment will fail and you need to delete the namespace before running the Helm command. 15 | tput setaf 6 16 | echo -e "\n \n*******************************************************************************************************************" 17 | echo -e "Creating the etcd-dns namespace" 18 | echo -e "*******************************************************************************************************************" 19 | tput setaf 2 20 | kubectl create ns etcd-dns 21 | 22 | # This section will deploy the ETCD chart in our cluster using a values.yaml file. 23 | tput setaf 6 24 | echo -e "\n \n*******************************************************************************************************************" 25 | echo -e "Deploying Bitnami's ETCD Helm Chart" 26 | echo -e "*******************************************************************************************************************" 27 | tput setaf 2 28 | helm install etcd-dns oci://registry-1.docker.io/bitnamicharts/etcd -f values.yaml -n etcd-dns 29 | 30 | tput setaf 6 31 | echo -e "\n\nETCD has been deployed in the etcd-dns namespace" 32 | tput setaf 2 33 | 34 | -------------------------------------------------------------------------------- /chapter5/externaldns/coredns-add-template.txt: -------------------------------------------------------------------------------- 1 | etcd foowidgets.k8s { 2 | stubzones 3 | path /skydns 4 | endpoint http://:2379 5 | } 6 | 7 | -------------------------------------------------------------------------------- /chapter5/externaldns/coredns-add.txt: -------------------------------------------------------------------------------- 1 | etcd foowidgets.k8s { 2 | stubzones 3 | path /skydns 4 | endpoint http://10.96.113.214:2379 5 | } 6 | 7 | -------------------------------------------------------------------------------- /chapter5/externaldns/coredns-cm-template.txt: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | Corefile: | 4 | .:53 { 5 | errors 6 | health { 7 | lameduck 5s 8 | } 9 | ready 10 | kubernetes cluster.local in-addr.arpa ip6.arpa { 11 | pods insecure 12 | fallthrough in-addr.arpa ip6.arpa 13 | ttl 30 14 | } 15 | prometheus :9153 16 | forward . /etc/resolv.conf 17 | 18 | cache 30 19 | loop 20 | reload 21 | loadbalance 22 | } 23 | kind: ConfigMap 24 | metadata: 25 | name: coredns 26 | namespace: kube-system 27 | -------------------------------------------------------------------------------- /chapter5/externaldns/coredns-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | Corefile: | 4 | .:53 { 5 | errors 6 | health { 7 | lameduck 5s 8 | } 9 | ready 10 | kubernetes cluster.local in-addr.arpa ip6.arpa { 11 | pods insecure 12 | fallthrough in-addr.arpa ip6.arpa 13 | ttl 30 14 | } 15 | prometheus :9153 16 | forward . /etc/resolv.conf 17 | 18 | etcd foowidgets.k8s { 19 | stubzones 20 | path /skydns 21 | endpoint http://10.96.113.214:2379 22 | } 23 | 24 | cache 30 25 | loop 26 | reload 27 | loadbalance 28 | } 29 | kind: ConfigMap 30 | metadata: 31 | name: coredns 32 | namespace: kube-system 33 | -------------------------------------------------------------------------------- /chapter5/externaldns/deployment-externaldns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: external-dns 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["services","endpoints","pods"] 9 | verbs: ["get","watch","list"] 10 | - apiGroups: ["extensions","networking.k8s.io"] 11 | resources: ["ingresses"] 12 | verbs: ["get","watch","list"] 13 | - apiGroups: [""] 14 | resources: ["nodes"] 15 | verbs: ["list"] 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRoleBinding 19 | metadata: 20 | name: external-dns-viewer 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: ClusterRole 24 | name: external-dns 25 | subjects: 26 | - kind: ServiceAccount 27 | name: external-dns 28 | namespace: external-dns 29 | --- 30 | apiVersion: v1 31 | kind: ServiceAccount 32 | metadata: 33 | name: external-dns 34 | namespace: external-dns 35 | --- 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: external-dns 40 | namespace: external-dns 41 | spec: 42 | strategy: 43 | type: Recreate 44 | selector: 45 | matchLabels: 46 | app: external-dns 47 | template: 48 | metadata: 49 | labels: 50 | app: external-dns 51 | spec: 52 | serviceAccountName: external-dns 53 | containers: 54 | - name: external-dns 55 | image: registry.k8s.io/external-dns/external-dns:v0.13.5 56 | args: 57 | - --source=service 58 | - --provider=coredns 59 | - --publish-internal-services 60 | - --log-level=debug # debug only 61 | env: 62 | - name: ETCD_URLS 63 | value: http://10.96.113.214:2379 64 | -------------------------------------------------------------------------------- /chapter5/externaldns/deployment-template.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: external-dns 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["services","endpoints","pods"] 9 | verbs: ["get","watch","list"] 10 | - apiGroups: ["extensions","networking.k8s.io"] 11 | resources: ["ingresses"] 12 | verbs: ["get","watch","list"] 13 | - apiGroups: [""] 14 | resources: ["nodes"] 15 | verbs: ["list"] 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRoleBinding 19 | metadata: 20 | name: external-dns-viewer 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: ClusterRole 24 | name: external-dns 25 | subjects: 26 | - kind: ServiceAccount 27 | name: external-dns 28 | namespace: external-dns 29 | --- 30 | apiVersion: v1 31 | kind: ServiceAccount 32 | metadata: 33 | name: external-dns 34 | namespace: external-dns 35 | --- 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: external-dns 40 | namespace: external-dns 41 | spec: 42 | strategy: 43 | type: Recreate 44 | selector: 45 | matchLabels: 46 | app: external-dns 47 | template: 48 | metadata: 49 | labels: 50 | app: external-dns 51 | spec: 52 | serviceAccountName: external-dns 53 | containers: 54 | - name: external-dns 55 | image: registry.k8s.io/external-dns/external-dns:v0.13.5 56 | args: 57 | - --source=service 58 | - --provider=coredns 59 | - --publish-internal-services 60 | - --log-level=info 61 | env: 62 | - name: ETCD_URLS 63 | value: http://:2379 64 | -------------------------------------------------------------------------------- /chapter5/externaldns/nginx-lb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: nginx-lb 6 | namespace: default 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: nginx 11 | replicas: 1 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:latest 20 | ports: 21 | - containerPort: 80 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | annotations: 27 | external-dns.alpha.kubernetes.io/hostname: nginx.foowidgets.k8s 28 | name: nginx-ext-dns 29 | namespace: default 30 | spec: 31 | ports: 32 | - port: 80 33 | protocol: TCP 34 | targetPort: 80 35 | selector: 36 | app: nginx 37 | type: LoadBalancer 38 | 39 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/k8gb/coredns-dual-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: k8gb-coredns-dual 5 | namespace: k8gb 6 | spec: 7 | externalTrafficPolicy: Cluster 8 | internalTrafficPolicy: Cluster 9 | ipFamilies: 10 | - IPv4 11 | ipFamilyPolicy: SingleStack 12 | ports: 13 | - name: udp-5353 14 | port: 53 15 | protocol: UDP 16 | targetPort: 5353 17 | - name: tcp-5353 18 | port: 53 19 | protocol: TCP 20 | targetPort: 5353 21 | selector: 22 | app.kubernetes.io/instance: k8gb 23 | app.kubernetes.io/name: coredns 24 | sessionAffinity: None 25 | type: LoadBalancer 26 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/k8gb/k8gb-buf-values.yaml: -------------------------------------------------------------------------------- 1 | k8gb: 2 | dnsZone: "gb.foowidgets.k8s" # dnsZone controlled by gslb 3 | edgeDNSZone: "foowidgets.k8s" # main zone which would contain gslb zone to delegate 4 | edgeDNSServers: 5 | - 10.2.1.14 # use this DNS server as a main resolver to enable cross k8gb DNS based communication 6 | clusterGeoTag: "us-buf" # used for places where we need to distinguish between differnet Gslb instances 7 | extGslbClustersGeoTags: "us-nyc" # comma-separated list of external gslb geo tags to pair with 8 | 9 | log: 10 | # -- log format (simple,json) 11 | format: simple # log format (simple,json) 12 | # -- log level (panic,fatal,error,warn,info,debug,trace) 13 | level: trace # log level (panic,fatal,error,warn,info,debug,trace) 14 | 15 | coredns: 16 | # -- service: refer to https://www.k8gb.io/docs/service_upgrade.html for upgrading CoreDNS service steps 17 | isClusterService: false 18 | deployment: 19 | # -- Skip CoreDNS creation and uses the one shipped by k8gb instead 20 | skipConfig: true 21 | image: 22 | # -- CoreDNS CRD plugin image 23 | repository: absaoss/k8s_crd 24 | # -- image tag 25 | tag: v0.1.1 26 | # -- Creates serviceAccount for coredns 27 | serviceAccount: 28 | create: true 29 | name: coredns 30 | serviceType: LoadBalancer 31 | loadBalancerIP: 10.3.1.120 32 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/k8gb/k8gb-example-buf.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8gb.absa.oss/v1beta1 2 | kind: Gslb 3 | metadata: 4 | name: gslb-failover-buf 5 | namespace: demo 6 | spec: 7 | ingress: 8 | ingressClassName: nginx 9 | rules: 10 | - host: fe.gb.foowidgets.k8s # Desired GSLB enabled FQDN 11 | http: 12 | paths: 13 | - backend: 14 | service: 15 | name: nginx # Service name to enable GSLB for 16 | port: 17 | number: 80 18 | path: / 19 | pathType: Prefix 20 | strategy: 21 | type: failover # Global load balancing strategy 22 | primaryGeoTag: us-buf # Primary cluster geo tag 23 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/k8gb/k8gb-example-nyc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8gb.absa.oss/v1beta1 2 | kind: Gslb 3 | metadata: 4 | name: gslb-failover-nyc 5 | namespace: demo 6 | spec: 7 | ingress: 8 | ingressClassName: nginx 9 | rules: 10 | - host: fe.gb.foowidgets.k8s # Desired GSLB enabled FQDN 11 | http: 12 | paths: 13 | - backend: 14 | service: 15 | name: nginx # Service name to enable GSLB for 16 | port: 17 | number: 80 18 | path: / 19 | pathType: Prefix 20 | strategy: 21 | type: failover # Global load balancing strategy 22 | primaryGeoTag: us-buf # Primary cluster geo tag 23 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/k8gb/k8gb-nyc-values.yaml: -------------------------------------------------------------------------------- 1 | k8gb: 2 | dnsZone: "gb.foowidgets.k8s" # dnsZone controlled by gslb 3 | edgeDNSZone: "foowidgets.k8s" # main zone which would contain gslb zone to delegate 4 | edgeDNSServers: 5 | - 10.2.1.14 # use this DNS server as a main resolver to enable cross k8gb DNS based communication 6 | clusterGeoTag: "us-nyc" # used for places where we need to distinguish between differnet Gslb instances 7 | extGslbClustersGeoTags: "us-buf" # comma-separated list of external gslb geo tags to pair with 8 | 9 | log: 10 | # -- log format (simple,json) 11 | format: simple # log format (simple,json) 12 | # -- log level (panic,fatal,error,warn,info,debug,trace) 13 | level: trace # log level (panic,fatal,error,warn,info,debug,trace) 14 | 15 | coredns: 16 | # -- service: refer to https://www.k8gb.io/docs/service_upgrade.html for upgrading CoreDNS service steps 17 | isClusterService: false 18 | deployment: 19 | # -- Skip CoreDNS creation and uses the one shipped by k8gb instead 20 | skipConfig: true 21 | image: 22 | # -- CoreDNS CRD plugin image 23 | repository: absaoss/k8s_crd 24 | # -- image tag 25 | tag: v0.0.11 26 | # -- Creates serviceAccount for coredns 27 | serviceAccount: 28 | create: true 29 | name: coredns 30 | serviceType: LoadBalancer 31 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/k8gb/nginx-fe-buff.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: demo 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: index 10 | namespace: demo 11 | data: 12 | index.html: | 13 | 14 |

Welcome

15 |
16 |

Hi! This is a webserver in Buffalo for our K8GB example...

17 | 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: nginx 23 | namespace: demo 24 | spec: 25 | selector: 26 | matchLabels: 27 | name: nginx 28 | replicas: 1 29 | template: 30 | metadata: 31 | labels: 32 | name: nginx 33 | spec: 34 | containers: 35 | - name: nginx 36 | image: nginx:latest 37 | ports: 38 | - containerPort: 80 39 | volumeMounts: 40 | - name: index-file 41 | mountPath: /usr/share/nginx/html/ 42 | volumes: 43 | - name: index-file 44 | configMap: 45 | name: index 46 | --- 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | name: nginx 51 | namespace: demo 52 | spec: 53 | selector: 54 | name: nginx 55 | ports: 56 | - protocol: TCP 57 | port: 80 58 | targetPort: 80 59 | type: ClusterIP 60 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/k8gb/nginx-fe-nyc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: demo 5 | --- 6 | apiVersion: v1 7 | kind: ConfigMap 8 | metadata: 9 | name: index 10 | namespace: demo 11 | data: 12 | index.html: | 13 | 14 |

Welcome

15 |
16 |

Hi! This is a webserver in NYC for our K8GB example...

17 | 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: nginx 23 | namespace: demo 24 | spec: 25 | selector: 26 | matchLabels: 27 | app: nginx 28 | replicas: 1 29 | template: 30 | metadata: 31 | labels: 32 | app: nginx 33 | spec: 34 | containers: 35 | - name: nginx 36 | image: nginx:latest 37 | ports: 38 | - containerPort: 80 39 | volumeMounts: 40 | - name: index-file 41 | mountPath: /usr/share/nginx/html/ 42 | volumes: 43 | - name: index-file 44 | configMap: 45 | name: index 46 | --- 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | name: nginx 51 | namespace: demo 52 | spec: 53 | selector: 54 | app: nginx 55 | ports: 56 | - protocol: TCP 57 | port: 80 58 | targetPort: 80 59 | type: ClusterIP 60 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/kubeadm/calico.yaml: -------------------------------------------------------------------------------- 1 | # This section includes base Calico installation configuration. 2 | # For more information, see: https://docs.projectcalico.org/v3.19/reference/installation/api#operator.tigera.io/v1.Installation 3 | apiVersion: operator.tigera.io/v1 4 | kind: Installation 5 | metadata: 6 | name: default 7 | spec: 8 | # Configures Calico networking. 9 | calicoNetwork: 10 | # Note: The ipPools section cannot be modified post-install. 11 | ipPools: 12 | - blockSize: 26 13 | cidr: 10.240.0.0/16 14 | encapsulation: VXLANCrossSubnet 15 | natOutgoing: Enabled 16 | nodeSelector: all() 17 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/kubeadm/kubeadm-config.yaml: -------------------------------------------------------------------------------- 1 | 2 | # kubeadm-config.yaml 3 | kind: ClusterConfiguration 4 | apiVersion: kubeadm.k8s.io/v1beta2 5 | kubernetesVersion: v1.32.2 6 | --- 7 | kind: KubeletConfiguration 8 | apiVersion: kubelet.config.k8s.io/v1beta1 9 | cgroupDriver: cgroupfs 10 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/metallb/install-metallb-buf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | # Deploy MetalLB using the downloaded manifest from the MetalLB repository. 5 | tput setaf 5 6 | echo -e "\n*******************************************************************************************************************" 7 | echo -e "Installing MetalLB - Buffalo Cluster - Version v0.13.10" 8 | echo -e "*******************************************************************************************************************" 9 | tput setaf 2 10 | kubectl create -f metallb-deploy.yaml 11 | 12 | # Wait for MetalLB to deploy before creating custom resource 13 | tput setaf 5 14 | echo -e "\n*******************************************************************************************************************" 15 | echo -e "Waiting for MetalLB to Deploy before continuing - This will take a minute or two" 16 | echo -e "*******************************************************************************************************************" 17 | kubectl wait deployment/controller --for=condition=available --timeout=300s -n metallb-system 18 | 19 | tput setaf 5 20 | echo -e "\n*******************************************************************************************************************" 21 | echo -e "Deploying the MetalLB Pool and L2Advertisement Resources" 22 | echo -e "*******************************************************************************************************************" 23 | tput setaf 2 24 | sleep 5 25 | kubectl apply -f metallb-pool-buf.yaml 26 | kubectl apply -f l2advertisement.yaml 27 | 28 | # Show the pods from the metallb-system namespace 29 | tput setaf 3 30 | echo -e "\n*******************************************************************************************************************" 31 | echo -e "MetalLB installation complete" 32 | echo -e "*******************************************************************************************************************\n" 33 | tput setaf 2 34 | 35 | kubectl get pods -n metallb-system 36 | 37 | echo -e "\n\n" 38 | 39 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/metallb/install-metallb-nyc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | # Deploy MetalLB using the downloaded manifest from the MetalLB repository. 5 | tput setaf 5 6 | echo -e "\n*******************************************************************************************************************" 7 | echo -e "Installing MetalLB - NYC Cluster - Version v0.13.10" 8 | echo -e "*******************************************************************************************************************" 9 | tput setaf 2 10 | kubectl create -f metallb-deploy.yaml 11 | 12 | # Wait for MetalLB to deploy before creating custom resource 13 | tput setaf 5 14 | echo -e "\n*******************************************************************************************************************" 15 | echo -e "Waiting for MetalLB to Deploy before continuing - This will take a minute or two" 16 | echo -e "*******************************************************************************************************************" 17 | kubectl wait deployment/controller --for=condition=available --timeout=300s -n metallb-system 18 | 19 | tput setaf 5 20 | echo -e "\n*******************************************************************************************************************" 21 | echo -e "Deploying the MetalLB Pool and L2Advertisement Resources" 22 | echo -e "*******************************************************************************************************************" 23 | tput setaf 2 24 | sleep 5 25 | kubectl apply -f metallb-pool-nyc.yaml 26 | kubectl apply -f l2advertisement.yaml 27 | 28 | # Show the pods from the metallb-system namespace 29 | tput setaf 3 30 | echo -e "\n*******************************************************************************************************************" 31 | echo -e "MetalLB installation complete" 32 | echo -e "*******************************************************************************************************************\n" 33 | tput setaf 2 34 | 35 | kubectl get pods -n metallb-system 36 | 37 | echo -e "\n\n" 38 | 39 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/metallb/l2advertisement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: l2-all-pools 5 | namespace: metallb-system 6 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/metallb/metallb-config-buf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 5 5 | echo -e "\n*******************************************************************************************************************" 6 | echo -e "Installing MetalLB for the Buffalo Cluster" 7 | echo -e "*******************************************************************************************************************" 8 | tput setaf 2 9 | kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/namespace.yaml 10 | kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/metallb.yaml 11 | kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" 12 | 13 | tput setaf 5 14 | echo -e "\n*******************************************************************************************************************" 15 | echo -e "Configuring MetalLB IP Pool for Buffalo Cluster" 16 | echo -e "*******************************************************************************************************************" 17 | tput setaf 2 18 | kubectl apply -f metallb-config-buf.yaml 19 | 20 | tput setaf 3 21 | echo -e "\n*******************************************************************************************************************" 22 | echo -e "MetalLB installation complete" 23 | echo -e "*******************************************************************************************************************\n" 24 | tput setaf 2 25 | 26 | kubectl get pods -n metallb-system 27 | 28 | echo -e "\n\n" 29 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/metallb/metallb-config-nyc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 5 5 | echo -e "\n*******************************************************************************************************************" 6 | echo -e "Installing MetalLB for the NYC Cluster" 7 | echo -e "*******************************************************************************************************************" 8 | tput setaf 2 9 | kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/namespace.yaml 10 | kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/metallb.yaml 11 | kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)" 12 | 13 | tput setaf 5 14 | echo -e "\n*******************************************************************************************************************" 15 | echo -e "Configuring MetalLB IP Pool for NYC Cluster" 16 | echo -e "*******************************************************************************************************************" 17 | tput setaf 2 18 | kubectl apply -f metallb-config-nyc.yaml 19 | 20 | tput setaf 3 21 | echo -e "\n*******************************************************************************************************************" 22 | echo -e "MetalLB installation complete" 23 | echo -e "*******************************************************************************************************************\n" 24 | tput setaf 2 25 | 26 | kubectl get pods -n metallb-system 27 | 28 | echo -e "\n\n" 29 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/metallb/metallb-pool-buf.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: pool-01 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 10.3.1.91-10.3.1.120 9 | 10 | -------------------------------------------------------------------------------- /chapter5/k8gb-example/metallb/metallb-pool-nyc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: pool-01 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 10.3.1.60-10.3.1.90 9 | -------------------------------------------------------------------------------- /chapter6/pipelines/cicd-proxy/cicd-proxy_template.yaml: -------------------------------------------------------------------------------- 1 | cicd_proxy: 2 | image: docker.io/tremolosecurity/kube-oidc-proxy:latest 3 | replicas: 1 4 | explicit_certificate_trust: true 5 | oidc: 6 | audience: https://cicd-proxy.apps.IPADDR.nip.io/ 7 | issuer: kubernetes.default.svc.cluster.local 8 | claims: 9 | user: sub 10 | ca: CERTPEM 11 | network: 12 | ingress_type: nginx 13 | ingress_annotations: {} 14 | api_server_host: cicd-proxy.apps.IPADDR.nip.io 15 | secure_from_ingress: true 16 | network_policies: 17 | enabled: true 18 | ingress: 19 | labels: 20 | kubernetes.io/metadata.name: ingress-nginx 21 | services: 22 | enable_tokenrequest: false 23 | token_request_audience: https://kubernetes.default.svc.cluster.local/ 24 | expirationSeconds: 600 25 | node_selectors: [] 26 | impersonation: 27 | serviceaccounts: 28 | - namespace: cicd-ns 29 | name: default 30 | 31 | -------------------------------------------------------------------------------- /chapter6/pipelines/cicd-proxy/run_workflow.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 4 | 5 | sed "s/IPADDR/$hostip/g" < ./run_workflow.yaml > /tmp/run_workflow.yaml 6 | 7 | kubectl create -f /tmp/run_workflow.yaml -------------------------------------------------------------------------------- /chapter6/pipelines/cicd-proxy/target-ns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: cicd-ns 6 | --- 7 | apiVersion: v1 8 | kind: Namespace 9 | metadata: 10 | name: target-ns 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: RoleBinding 14 | metadata: 15 | name: delete-pods 16 | namespace: target-ns 17 | roleRef: 18 | apiGroup: rbac.authorization.k8s.io 19 | kind: Role 20 | name: delete-pods 21 | subjects: 22 | - apiGroup: rbac.authorization.k8s.io 23 | kind: User 24 | name: system:serviceaccount:cicd-ns:default 25 | --- 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | kind: Role 28 | metadata: 29 | name: delete-pods 30 | namespace: target-ns 31 | rules: 32 | - apiGroups: 33 | - "" 34 | resources: 35 | - pods 36 | verbs: 37 | - delete 38 | - get 39 | - list 40 | --- 41 | apiVersion: apps/v1 42 | kind: Deployment 43 | metadata: 44 | name: test-pods 45 | namespace: target-ns 46 | spec: 47 | progressDeadlineSeconds: 600 48 | replicas: 1 49 | revisionHistoryLimit: 10 50 | selector: 51 | matchLabels: 52 | app: test-pods 53 | strategy: 54 | rollingUpdate: 55 | maxSurge: 25% 56 | maxUnavailable: 25% 57 | type: RollingUpdate 58 | template: 59 | metadata: 60 | labels: 61 | app: test-pods 62 | spec: 63 | containers: 64 | - args: 65 | - while true; do sleep 30; done; 66 | command: 67 | - /bin/bash 68 | - -c 69 | - -- 70 | image: ubuntu:22.04 71 | imagePullPolicy: Always 72 | name: test-pod 73 | resources: {} 74 | terminationMessagePath: /dev/termination-log 75 | terminationMessagePolicy: File 76 | dnsPolicy: ClusterFirst 77 | restartPolicy: Always 78 | schedulerName: default-scheduler 79 | securityContext: 80 | fsGroup: 1000 81 | runAsGroup: 1000 82 | runAsUser: 1000 83 | terminationGracePeriodSeconds: 30 -------------------------------------------------------------------------------- /chapter6/pipelines/sa-cluster-admins.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: sa-cluster-admins 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: Group 12 | name: sa-cluster-admins 13 | -------------------------------------------------------------------------------- /chapter6/pipelines/token-login/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /chapter6/pipelines/token-login/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: token-login 3 | description: Creates a token generation API 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.16.0" 25 | -------------------------------------------------------------------------------- /chapter6/pipelines/token-login/templates/authentication_chains.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: openunison.tremolo.io/v1 3 | kind: AuthenticationChain 4 | metadata: 5 | name: api-idp 6 | namespace: {{ .Release.Namespace }} 7 | spec: 8 | level: 20 9 | root: o=Data 10 | authMechs: 11 | - name: basic 12 | required: required 13 | params: 14 | realmName: k8s-api 15 | uidAttr: uid 16 | secretParams: [] 17 | - name: map 18 | required: required 19 | params: 20 | map: 21 | - "uid|composite|${uid}" 22 | - "mail|composite|${mail}" 23 | - "givenName|composite|${givenName}" 24 | - "sn|composite|${sn}" 25 | - "displayName|composite|${displayName}" 26 | - "memberOf|user|memberOf" 27 | - name: jit 28 | required: required 29 | params: 30 | nameAttr: uid 31 | workflowName: jitdb 32 | - name: genoidctoken 33 | required: required 34 | params: 35 | idpName: k8sidp 36 | trustName: kubernetes -------------------------------------------------------------------------------- /chapter6/pipelines/token-login/templates/authentication_mechanisms.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: openunison.tremolo.io/v1 3 | kind: AuthenticationMechanism 4 | metadata: 5 | name: basic 6 | namespace: {{ .Release.Namespace }} 7 | spec: 8 | className: com.tremolosecurity.proxy.auth.BasicAuth 9 | uri: "/auth/basic" 10 | init: {} 11 | secretParams: [] -------------------------------------------------------------------------------- /chapter6/pipelines/token-login/values.yaml: -------------------------------------------------------------------------------- 1 | # none -------------------------------------------------------------------------------- /chapter6/user-auth/deploy_openunison_imp_noimpersonation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z "${TS_REPO_NAME}" ]]; then 4 | REPO_NAME="tremolo" 5 | else 6 | REPO_NAME=$TS_REPO_NAME 7 | fi 8 | 9 | echo "Helm Repo Name $REPO_NAME" 10 | 11 | if [[ -z "${TS_REPO_URL}" ]]; then 12 | REPO_URL="https://nexus.tremolo.io/repository/helm" 13 | else 14 | REPO_URL=$TS_REPO_URL 15 | fi 16 | 17 | echo "Helm Repo URL $REPO_URL" 18 | 19 | 20 | 21 | 22 | 23 | echo "Deploying the Kubernetes Dashboard" 24 | 25 | helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/ 26 | helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard -f ./kubernetes-dashboard-values.yaml 27 | 28 | echo "Deploying ActiveDirectory (ApacheDS)" 29 | 30 | kubectl apply -f ./apacheds.yaml 31 | 32 | while [[ $(kubectl get pods -l app=apacheds -n activedirectory -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do echo "waiting for apacheds to be running" && sleep 1; done 33 | 34 | echo "Adding helm repo" 35 | 36 | helm repo add $REPO_NAME $REPO_URL 37 | helm repo update 38 | 39 | 40 | echo "Creating openunison namespace" 41 | 42 | kubectl create ns openunison 43 | 44 | 45 | echo "Pre-configuring OpenUnison LDAP" 46 | 47 | kubectl create -f ./myvd-book.yaml 48 | 49 | 50 | echo "Downloading the ouctl utility to /tmp/ouctl" 51 | 52 | wget https://nexus.tremolo.io/repository/ouctl/ouctl-0.0.11-linux -O /tmp/ouctl 53 | chmod +x /tmp/ouctl 54 | 55 | 56 | echo "Generating helm chart values to /tmp/openunison-values.yaml" 57 | 58 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 59 | 60 | 61 | sed "s/IPADDR/$hostip/g" < ./openunison-values-noimpersonation.yaml > /tmp/openunison-values.yaml 62 | 63 | echo "Deploying Orchestra" 64 | echo -n 'start123' > /tmp/ldaps 65 | /tmp/ouctl install-auth-portal -s /tmp/ldaps -o $REPO_NAME/openunison-operator -c $REPO_NAME/orchestra -l $REPO_NAME/orchestra-login-portal /tmp/openunison-values.yaml 66 | 67 | 68 | 69 | 70 | echo "OpenUnison is deployed!" 71 | 72 | 73 | -------------------------------------------------------------------------------- /chapter6/user-auth/kubernetes-dashboard-values.yaml: -------------------------------------------------------------------------------- 1 | nginx: 2 | enabled: false 3 | 4 | kong: 5 | enabled: false 6 | 7 | api: 8 | scaling: 9 | replicas: 1 10 | containers: 11 | ports: 12 | - name: api-tls 13 | containerPort: 8001 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp 17 | name: tmp-volume 18 | - mountPath: /certs 19 | name: tls 20 | volumes: 21 | - name: tmp-volume 22 | emptyDir: {} 23 | - name: tls 24 | secret: 25 | secretName: kubernetes-dashboard-certs 26 | optional: true 27 | 28 | web: 29 | scaling: 30 | replicas: 1 31 | containers: 32 | ports: 33 | - name: api-tls 34 | containerPort: 8001 35 | protocol: TCP 36 | volumeMounts: 37 | - mountPath: /tmp 38 | name: tmp-volume 39 | - mountPath: /certs 40 | name: tls 41 | volumes: 42 | - name: tmp-volume 43 | emptyDir: {} 44 | - name: tls 45 | secret: 46 | secretName: kubernetes-dashboard-certs 47 | optional: true 48 | 49 | auth: 50 | scaling: 51 | replicas: 0 52 | volumeMounts: 53 | - mountPath: /tmp 54 | name: tmp-volume 55 | - mountPath: /certs 56 | name: tls 57 | volumes: 58 | - name: tmp-volume 59 | emptyDir: {} 60 | - name: tls 61 | secret: 62 | secretName: kubernetes-dashboard-certs 63 | optional: false -------------------------------------------------------------------------------- /chapter7/enable-auditing.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | clear 3 | 4 | tput setaf 5 5 | echo -e "\n*******************************************************************************************************************" 6 | echo -e "Enabling auditing on the cluster" 7 | echo -e "*******************************************************************************************************************" 8 | 9 | echo -e "\n\n*******************************************************************************************************************" 10 | echo -e "Step 1: Creating /var/log/k8s" 11 | echo -e "*******************************************************************************************************************" 12 | docker exec -ti cluster01-control-plane mkdir /var/log/k8s 13 | 14 | echo -e "\n\n*******************************************************************************************************************" 15 | echo -e "Step 2: Creating /etc/kubernetes/audit" 16 | echo -e "*******************************************************************************************************************" 17 | docker exec -ti cluster01-control-plane mkdir /etc/kubernetes/audit 18 | 19 | tput setaf 5 20 | echo -e "\n*******************************************************************************************************************" 21 | echo -e "Step 3: Copying Kubernetes audit policy to API server" 22 | echo -e "*******************************************************************************************************************" 23 | docker cp cm/k8s-audit-policy.yaml cluster01-control-plane:/etc/kubernetes/audit/ 24 | 25 | tput setaf 3 26 | echo -e "\n\n*******************************************************************************************************************" 27 | echo -e "Initial setup complete." 28 | echo -e "\nYou must edit your API manifest fil manually, follow the steps in chapter 7 to enable auditing" 29 | echo -e "*******************************************************************************************************************\n" 30 | tput setaf 2 31 | 32 | 33 | -------------------------------------------------------------------------------- /chapter8/external-secrets/ext-secret-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1beta1 2 | kind: SecretStore 3 | metadata: 4 | name: vault-backend 5 | namespace: my-ext-secret 6 | spec: 7 | provider: 8 | vault: 9 | server: "https://vault.apps.IPADDR.nip.io" 10 | path: "secret" 11 | version: "v1" 12 | caProvider: 13 | # Can be Secret or ConfigMap 14 | type: "ConfigMap" 15 | name: "cacerts" 16 | key: "tls.crt" 17 | namespace: "my-ext-secret" 18 | auth: 19 | # Authenticate against Vault using a Kubernetes ServiceAccount 20 | # token stored in a Secret. 21 | # https://www.vaultproject.io/docs/auth/kubernetes 22 | kubernetes: 23 | # Path where the Kubernetes authentication backend is mounted in Vault 24 | mountPath: "kubernetes" 25 | # A required field containing the Vault Role to assume. 26 | role: "extsecret" 27 | # Optional service account field containing the name 28 | # of a kubernetes ServiceAccount 29 | serviceAccountRef: 30 | name: "ext-secret-vault" 31 | --- 32 | apiVersion: external-secrets.io/v1beta1 33 | kind: ExternalSecret 34 | metadata: 35 | name: my-external-secret 36 | namespace: my-ext-secret 37 | spec: 38 | refreshInterval: 1m # rate SecretManager pulls GCPSM 39 | secretStoreRef: 40 | kind: SecretStore 41 | name: vault-backend # name of the SecretStore (or kind specified) 42 | target: 43 | name: secret-to-be-created # name of the k8s Secret to be created 44 | creationPolicy: Owner 45 | data: 46 | - secretKey: somepassword 47 | remoteRef: 48 | key: /data/extsecret/config 49 | property: some-password -------------------------------------------------------------------------------- /chapter8/external-secrets/install_external_secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | helm repo add external-secrets https://charts.external-secrets.io 4 | helm repo update 5 | 6 | helm install external-secrets \ 7 | external-secrets/external-secrets \ 8 | -n external-secrets \ 9 | --create-namespace 10 | 11 | while [[ $(kubectl get pods -l app.kubernetes.io/name=external-secrets-webhook -n external-secrets -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do echo "waiting for external-secrets webhook" && sleep 1; done 12 | while [[ $(kubectl get pods -l app.kubernetes.io/name=external-secrets -n external-secrets -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do echo "waiting for external-secrets controller" && sleep 1; done 13 | while [[ $(kubectl get pods -l app.kubernetes.io/name=external-secrets-cert-controller -n external-secrets -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do echo "waiting for external-secrets certbot" && sleep 1; done 14 | 15 | 16 | kubectl create ns my-ext-secret 17 | kubectl create sa ext-secret-vault -n my-ext-secret 18 | 19 | 20 | kubectl create secret generic secret-to-be-created -n my-ext-secret 21 | 22 | mkdir /tmp/cabundle 23 | kubectl get secret root-ca -n cert-manager -o json | jq -r '.data["tls.crt"]' | base64 -d > /tmp/cabundle/tls.crt 24 | kubectl create configmap cacerts --from-file=/tmp/cabundle -n my-ext-secret 25 | 26 | 27 | 28 | . ../vault/vault_cli.sh 29 | 30 | vault kv put secret/data/extsecret/config some-password=mysupersecretp@ssw0rd 31 | 32 | vault policy write extsecret - < /tmp/ext-secret-template.yaml 55 | kubectl create -f /tmp/ext-secret-template.yaml 56 | -------------------------------------------------------------------------------- /chapter8/integration/envvars/envars-secrets-watch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: watch-env 6 | name: test-envvars-secrets-watch 7 | namespace: my-ext-secret 8 | spec: 9 | containers: 10 | - image: ubuntu:22.04 11 | name: test 12 | resources: {} 13 | command: 14 | - bash 15 | - -c 16 | - 'while [[ 1 == 1 ]]; do date && env | grep MY_SECRET_PASSWORD && sleep 1; done' 17 | env: 18 | - name: MY_SECRET_PASSWORD 19 | valueFrom: 20 | secretKeyRef: 21 | name: secret-to-be-created 22 | key: somepassword 23 | dnsPolicy: ClusterFirst 24 | restartPolicy: Never -------------------------------------------------------------------------------- /chapter8/integration/envvars/envars-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: test 6 | name: test-envvars-secrets 7 | namespace: my-ext-secret 8 | spec: 9 | containers: 10 | - image: busybox 11 | name: test 12 | resources: {} 13 | command: 14 | - env 15 | env: 16 | - name: MY_SECRET_PASSWORD 17 | valueFrom: 18 | secretKeyRef: 19 | name: secret-to-be-created 20 | key: somepassword 21 | dnsPolicy: ClusterFirst 22 | restartPolicy: Never -------------------------------------------------------------------------------- /chapter8/integration/envvars/envars-vault-watch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: watch-vault-env 6 | name: test-envvars-vault-watch 7 | namespace: my-ext-secret 8 | annotations: 9 | vault.hashicorp.com/agent-inject: "true" 10 | vault.hashicorp.com/log-level: trace 11 | vault.hashicorp.com/role: extsecret 12 | vault.hashicorp.com/tls-skip-verify: "true" 13 | vault.hashicorp.com/agent-inject-secret-myenv: 'secret/data/extsecret/config' 14 | vault.hashicorp.com/agent-inject-template-myenv: | 15 | {{- with secret "secret/data/extsecret/config" -}} 16 | export MY_SECRET_PASSWORD="{{ index .Data "some-password" }}" 17 | {{- end }} 18 | spec: 19 | containers: 20 | - image: ubuntu:22.04 21 | name: test 22 | resources: {} 23 | command: 24 | - bash 25 | - -c 26 | - 'echo "sleeping 5 seconds"; sleep 5;source /vault/secrets/myenv ; env | grep MY_SECRET_PASSWORD' 27 | dnsPolicy: ClusterFirst 28 | restartPolicy: Never 29 | serviceAccountName: ext-secret-vault 30 | serviceAccount: ext-secret-vault -------------------------------------------------------------------------------- /chapter8/integration/volumes/create-vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 4 | sed "s/IPADDR/$hostip/g" < ./volume-vault-watch.yaml > /tmp/volume-vault-watch.yaml 5 | kubectl create -f /tmp/volume-vault-watch.yaml -------------------------------------------------------------------------------- /chapter8/integration/volumes/volume-secrets-watch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: watch-volume 6 | name: test-volumes-secrets-watch 7 | namespace: my-ext-secret 8 | spec: 9 | containers: 10 | - image: ubuntu:22.04 11 | name: test 12 | resources: {} 13 | command: 14 | - bash 15 | - -c 16 | - 'while [[ 1 == 1 ]]; do date && cat /etc/secrets/somepassword && echo "" && echo "----------" && sleep 1; done' 17 | volumeMounts: 18 | - name: mypassword 19 | mountPath: /etc/secrets 20 | volumes: 21 | - name: mypassword 22 | secret: 23 | secretName: secret-to-be-created 24 | dnsPolicy: ClusterFirst 25 | restartPolicy: Never -------------------------------------------------------------------------------- /chapter8/integration/volumes/volume-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: test-volume 6 | name: test-volume-secrets 7 | namespace: my-ext-secret 8 | spec: 9 | containers: 10 | - image: busybox 11 | name: test 12 | resources: {} 13 | command: 14 | - sh 15 | - -c 16 | - 'cat /etc/secrets/somepassword' 17 | volumeMounts: 18 | - name: mypassword 19 | mountPath: /etc/secrets 20 | volumes: 21 | - name: mypassword 22 | secret: 23 | secretName: secret-to-be-created 24 | dnsPolicy: ClusterFirst 25 | restartPolicy: Never -------------------------------------------------------------------------------- /chapter8/integration/volumes/volume-vault-watch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: watch-vault-volume 6 | name: test-vault-vault-watch 7 | namespace: my-ext-secret 8 | annotations: 9 | vault.hashicorp.com/service: "https://vault.apps.IPADDR.nip.io" 10 | vault.hashicorp.com/agent-inject: "true" 11 | vault.hashicorp.com/log-level: trace 12 | vault.hashicorp.com/role: extsecret 13 | vault.hashicorp.com/tls-skip-verify: "true" 14 | vault.hashicorp.com/agent-inject-secret-myenv: 'secret/data/extsecret/config' 15 | vault.hashicorp.com/secret-volume-path-myenv: '/etc/secrets' 16 | vault.hashicorp.com/agent-inject-template-myenv: | 17 | {{- with secret "secret/data/extsecret/config" -}} 18 | MY_SECRET_PASSWORD="{{ index .Data "some-password" }}" 19 | {{- end }} 20 | spec: 21 | containers: 22 | - image: ubuntu:22.04 23 | name: test 24 | resources: {} 25 | command: 26 | - bash 27 | - -c 28 | - 'while [[ 1 == 1 ]]; do date && cat /etc/secrets/myenv && echo "" && echo "----------" && sleep 1; done' 29 | dnsPolicy: ClusterFirst 30 | restartPolicy: Never 31 | serviceAccountName: ext-secret-vault 32 | serviceAccount: ext-secret-vault 33 | -------------------------------------------------------------------------------- /chapter8/vault/api-server-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: api-server-ingress 5 | namespace: default 6 | annotations: 7 | kubernetes.io/ingress.class: "nginx" 8 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 9 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 10 | cert-manager.io/cluster-issuer: "enterprise-ca" 11 | spec: 12 | rules: 13 | - http: 14 | paths: 15 | - backend: 16 | service: 17 | name: kubernetes 18 | port: 19 | number: 443 20 | path: "/" 21 | pathType: Prefix 22 | host: kube-api.IPADDR.nip.io 23 | tls: 24 | - hosts: 25 | - kube-api.IPADDR.nip.io 26 | secretName: api-web-tls -------------------------------------------------------------------------------- /chapter8/vault/install_vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if which vault > /dev/null; then 4 | echo "vault already installed" 5 | else 6 | echo "install the vault binary" 7 | sudo apt update && sudo apt install gpg 8 | wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg 9 | gpg --no-default-keyring --keyring /usr/share/keyrings/hashicorp-archive-keyring.gpg --fingerprint 10 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list 11 | sudo apt update 12 | sudo apt install vault 13 | fi -------------------------------------------------------------------------------- /chapter8/vault/unseal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | KEYS=$(jq -r '@sh "\(.unseal_keys_hex)\n"'< $1) 5 | echo $KEYS 6 | 7 | for KEY in $KEYS 8 | do 9 | echo $KEY 10 | KEY2=$(echo -n $KEY | cut -d "'" -f 2) 11 | kubectl exec -i vault-0 -n vault - -- vault operator unseal $KEY2 12 | done 13 | -------------------------------------------------------------------------------- /chapter8/vault/vault-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: vault-ingress 5 | namespace: vault 6 | annotations: 7 | kubernetes.io/ingress.class: "nginx" 8 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 9 | nginx.ingress.kubernetes.io/backend-protocol: "HTTP" 10 | cert-manager.io/cluster-issuer: "enterprise-ca" 11 | spec: 12 | rules: 13 | - http: 14 | paths: 15 | - backend: 16 | service: 17 | name: vault-ui 18 | port: 19 | number: 8200 20 | path: "/" 21 | pathType: Prefix 22 | host: vault.apps.IPADDR.nip.io 23 | tls: 24 | - hosts: 25 | - vault.apps.IPADDR.nip.io 26 | secretName: vault-web-tls -------------------------------------------------------------------------------- /chapter8/vault/vault_cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl get secret root-ca -n cert-manager -o json | jq -r '.data["tls.crt"]' | base64 -d > /tmp/root-ca.crt 4 | 5 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 6 | 7 | export VAULT_ADDR="https://vault.apps.$hostip.nip.io/" 8 | export VAULT_CACERT="/tmp/root-ca.crt" 9 | export VAULT_TOKEN=$(jq -r '.root_token' < ~/unseal-keys.json) -------------------------------------------------------------------------------- /chapter9/deploy_vcluster_cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | curl -L -o vcluster "https://github.com/loft-sh/vcluster/releases/latest/download/vcluster-linux-amd64" && sudo install -c -m 0755 vcluster /usr/local/bin && rm -f vcluster -------------------------------------------------------------------------------- /chapter9/ha/create-ha-db.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE vcluster_tenant1; 2 | 3 | 4 | CREATE USER 'vcluster_tenant1'@'%' 5 | REQUIRE SUBJECT '/O=k8s-enterprise-guide/CN=vcluster_tenant1' 6 | AND ISSUER '/CN=enterprise-ca'; 7 | 8 | GRANT ALL PRIVILEGES ON vcluster_tenant1.* to 'vcluster_tenant1'@'%'; 9 | -------------------------------------------------------------------------------- /chapter9/ha/vcluster-ha-tenant1-vaules-upgrade.yaml: -------------------------------------------------------------------------------- 1 | controlPlane: 2 | backingStore: 3 | database: 4 | external: 5 | enabled: true 6 | dataSource: mysql://vcluster_tenant1@tcp(mysql.mysql.svc:3306)/vcluster_tenant1 7 | keyFile: /etc/mysql-tls/tls.key 8 | certFile: /etc/mysql-tls/tls.crt 9 | caFile: /etc/mysql-tls/ca.crt 10 | coredns: 11 | deployment: 12 | replicas: 2 13 | distro: 14 | k3s: 15 | enabled: true 16 | image: 17 | tag: v1.30.1-k3s1 18 | statefulSet: 19 | highAvailability: 20 | replicas: 2 21 | persistence: 22 | addVolumes: 23 | - name: mysql-tls 24 | secret: 25 | secretName: vcluster-client-tls 26 | addVolumeMounts: 27 | - name: mysql-tls 28 | mountPath: /etc/mysql-tls 29 | volumeClaim: 30 | enabled: false 31 | scheduling: 32 | podManagementPolicy: OrderedReady 33 | -------------------------------------------------------------------------------- /chapter9/ha/vcluster-ha-tenant1-vaules.yaml: -------------------------------------------------------------------------------- 1 | controlPlane: 2 | backingStore: 3 | database: 4 | external: 5 | enabled: true 6 | dataSource: mysql://vcluster_tenant1@tcp(mysql.mysql.svc:3306)/vcluster_tenant1 7 | keyFile: /etc/mysql-tls/tls.key 8 | certFile: /etc/mysql-tls/tls.crt 9 | caFile: /etc/mysql-tls/ca.crt 10 | coredns: 11 | deployment: 12 | replicas: 2 13 | distro: 14 | k3s: 15 | enabled: true 16 | image: 17 | tag: v1.29.5-k3s1 18 | statefulSet: 19 | highAvailability: 20 | replicas: 2 21 | persistence: 22 | addVolumes: 23 | - name: mysql-tls 24 | secret: 25 | secretName: vcluster-client-tls 26 | addVolumeMounts: 27 | - name: mysql-tls 28 | mountPath: /etc/mysql-tls 29 | volumeClaim: 30 | enabled: false 31 | scheduling: 32 | podManagementPolicy: OrderedReady 33 | -------------------------------------------------------------------------------- /chapter9/ha/vcluster-tenant1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: tenant1 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: RoleBinding 9 | metadata: 10 | name: tenant1-admin 11 | namespace: tenant1 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: admin 16 | subjects: 17 | - kind: ServiceAccount 18 | name: vc-tenant1 19 | namespace: tenant1 20 | --- 21 | apiVersion: cert-manager.io/v1 22 | kind: Certificate 23 | metadata: 24 | name: vcluster-tenant1 25 | namespace: tenant1 26 | spec: 27 | # Secret names are always required. 28 | secretName: vcluster-client-tls 29 | 30 | duration: 2160h # 90d 31 | renewBefore: 360h # 15d 32 | subject: 33 | organizations: 34 | - k8s-enterprise-guide 35 | # The use of the common name field has been deprecated since 2000 and is 36 | # discouraged from being used. 37 | commonName: vcluster_tenant1 38 | isCA: false 39 | privateKey: 40 | algorithm: RSA 41 | encoding: PKCS1 42 | size: 2048 43 | usages: 44 | - server auth 45 | - client auth 46 | # Issuer references are always required. 47 | issuerRef: 48 | name: selfsigned-issuer 49 | # We can reference ClusterIssuers by changing the kind here. 50 | # The default value is Issuer (i.e. a locally namespaced Issuer) 51 | kind: ClusterIssuer 52 | # This is optional since cert-manager will default to this value however 53 | # if you are using an external issuer, change this to that issuer group. 54 | group: cert-manager.io -------------------------------------------------------------------------------- /chapter9/host/deploy_openunison_vcluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | 5 | 6 | if [[ -z "${TS_REPO_NAME}" ]]; then 7 | REPO_NAME="tremolo" 8 | else 9 | REPO_NAME=$TS_REPO_NAME 10 | fi 11 | 12 | echo "Helm Repo Name $REPO_NAME" 13 | 14 | if [[ -z "${TS_REPO_URL}" ]]; then 15 | REPO_URL="https://nexus.tremolo.io/repository/helm" 16 | else 17 | REPO_URL=$TS_REPO_URL 18 | fi 19 | 20 | echo "Helm Repo URL $REPO_URL" 21 | 22 | vcluster connect myvcluster -n tenant1 23 | 24 | echo "Deploying the Kubernetes Dashboard" 25 | 26 | helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/ 27 | helm upgrade --install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --create-namespace --namespace kubernetes-dashboard -f ./kubernetes-dashboard-values.yaml 28 | 29 | wget https://nexus.tremolo.io/repository/ouctl/ouctl-0.0.11-linux -O /tmp/ouctl 30 | chmod +x /tmp/ouctl 31 | 32 | 33 | echo "Generating helm chart values to /tmp/openunison-vcluster-values.yaml" 34 | 35 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 36 | 37 | 38 | sed "s/IPADDR/$hostip/g" < ./openunison-values-vcluster.yaml > /tmp/openunison-vcluster-values.yaml 39 | 40 | echo "Deploying Orchestra" 41 | /tmp/ouctl install-satelite -o $REPO_NAME/openunison-operator -c $REPO_NAME/orchestra -l $REPO_NAME/orchestra-login-portal /tmp/openunison-vcluster-values.yaml kind-cluster01 vcluster_myvcluster_tenant1_kind-cluster01 42 | 43 | kubectl create -f - < /tmp/volume-vault-watch.yaml 5 | kubectl create -f /tmp/volume-vault-watch.yaml -------------------------------------------------------------------------------- /chapter9/multitenant/examples/volume-vault-watch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: watch-vault-volume 6 | name: test-vault-vault-watch 7 | namespace: default 8 | annotations: 9 | vault.hashicorp.com/service: "https://vault.apps.IPADDR.nip.io" 10 | vault.hashicorp.com/auth-path: "auth/vcluster-tenant1" 11 | vault.hashicorp.com/agent-inject: "true" 12 | vault.hashicorp.com/log-level: trace 13 | vault.hashicorp.com/role: cluster-read 14 | vault.hashicorp.com/tls-skip-verify: "true" 15 | vault.hashicorp.com/agent-inject-secret-myenv: 'secret/data/vclusters/tenant1/ns/default/config' 16 | vault.hashicorp.com/secret-volume-path-myenv: '/etc/secrets' 17 | vault.hashicorp.com/agent-inject-template-myenv: | 18 | {{- with secret "secret/data/vclusters/tenant1/ns/default/config" -}} 19 | MY_SECRET_PASSWORD="{{ index .Data "somepassword" }}" 20 | {{- end }} 21 | spec: 22 | containers: 23 | - image: ubuntu:22.04 24 | name: test 25 | resources: {} 26 | command: 27 | - bash 28 | - -c 29 | - 'while [[ 1 == 1 ]]; do date && cat /etc/secrets/myenv && echo "" && echo "----------" && sleep 1; done' 30 | dnsPolicy: ClusterFirst 31 | restartPolicy: Never 32 | serviceAccountName: default 33 | serviceAccount: default -------------------------------------------------------------------------------- /chapter9/multitenant/setup/kubernetes-dashboard-values.yaml: -------------------------------------------------------------------------------- 1 | nginx: 2 | enabled: false 3 | 4 | kong: 5 | enabled: false 6 | 7 | api: 8 | scaling: 9 | replicas: 1 10 | containers: 11 | ports: 12 | - name: api-tls 13 | containerPort: 8001 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp 17 | name: tmp-volume 18 | - mountPath: /certs 19 | name: tls 20 | volumes: 21 | - name: tmp-volume 22 | emptyDir: {} 23 | - name: tls 24 | secret: 25 | secretName: kubernetes-dashboard-certs 26 | optional: true 27 | 28 | web: 29 | scaling: 30 | replicas: 1 31 | containers: 32 | ports: 33 | - name: api-tls 34 | containerPort: 8001 35 | protocol: TCP 36 | volumeMounts: 37 | - mountPath: /tmp 38 | name: tmp-volume 39 | - mountPath: /certs 40 | name: tls 41 | volumes: 42 | - name: tmp-volume 43 | emptyDir: {} 44 | - name: tls 45 | secret: 46 | secretName: kubernetes-dashboard-certs 47 | optional: true 48 | 49 | auth: 50 | scaling: 51 | replicas: 0 52 | volumeMounts: 53 | - mountPath: /tmp 54 | name: tmp-volume 55 | - mountPath: /certs 56 | name: tls 57 | volumes: 58 | - name: tmp-volume 59 | emptyDir: {} 60 | - name: tls 61 | secret: 62 | secretName: kubernetes-dashboard-certs 63 | optional: false -------------------------------------------------------------------------------- /chapter9/multitenant/setup/vault/api-server-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: api-server-ingress 5 | namespace: default 6 | annotations: 7 | kubernetes.io/ingress.class: "nginx" 8 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 9 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 10 | cert-manager.io/cluster-issuer: "enterprise-ca" 11 | spec: 12 | rules: 13 | - http: 14 | paths: 15 | - backend: 16 | service: 17 | name: kubernetes 18 | port: 19 | number: 443 20 | path: "/" 21 | pathType: Prefix 22 | host: kube-api.IPADDR.nip.io 23 | tls: 24 | - hosts: 25 | - kube-api.IPADDR.nip.io 26 | secretName: api-web-tls -------------------------------------------------------------------------------- /chapter9/multitenant/setup/vault/deploy_vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # deploy vault 4 | kubectl create ns vault 5 | helm repo add hashicorp https://helm.releases.hashicorp.com 6 | helm repo update 7 | 8 | helm install vault hashicorp/vault --namespace vault --set ui.enabled=true --set ui.serviceType=ClusterIP 9 | 10 | 11 | while [[ $(kubectl get pod vault-0 -n vault -o 'jsonpath={..status.containerStatuses[0].started}') != "true" ]]; do echo "waiting for vault pod" && sleep 1; done 12 | 13 | kubectl exec --stdin=true --tty=true vault-0 -n vault -- vault operator init --format=json > ~/unseal-keys.json 14 | 15 | ./unseal.sh ~/unseal-keys.json 16 | 17 | # creating ingress 18 | 19 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 20 | sed "s/IPADDR/$hostip/g" < ./vault-ingress.yaml > /tmp/vault-ingress.yaml 21 | kubectl create -f /tmp/vault-ingress.yaml 22 | 23 | # create API Server Ingress 24 | 25 | sed "s/IPADDR/$hostip/g" < ./api-server-ingress.yaml > /tmp/api-server-ingress.yaml 26 | kubectl create -f /tmp/api-server-ingress.yaml 27 | 28 | echo "sleeping 30 seconds" 29 | sleep 30s 30 | 31 | # install CLI 32 | ./install_vault.sh 33 | 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /chapter9/multitenant/setup/vault/install_vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if which vault > /dev/null; then 4 | echo "vault already installed" 5 | else 6 | echo "install the vault binary" 7 | sudo apt update && sudo apt install gpg 8 | wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg 9 | gpg --no-default-keyring --keyring /usr/share/keyrings/hashicorp-archive-keyring.gpg --fingerprint 10 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list 11 | sudo apt update 12 | sudo apt install vault 13 | fi -------------------------------------------------------------------------------- /chapter9/multitenant/setup/vault/integrate_cp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # first, configure our control plane to be able to authenticate pods 4 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 5 | 6 | kubectl create ns vault-integration 7 | kubectl create sa vault-client -n vault-integration 8 | 9 | kubectl create -f - < /tmp/root-ca.crt 4 | 5 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 6 | 7 | export VAULT_ADDR="https://vault.apps.$hostip.nip.io/" 8 | export VAULT_CACERT="/tmp/root-ca.crt" 9 | export VAULT_TOKEN=$(jq -r '.root_token' < ~/unseal-keys.json) -------------------------------------------------------------------------------- /chapter9/multitenant/setup/vault/vault_integrate_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | export hostip=$(hostname -I | cut -f1 -d' ' | sed 's/[.]/-/g') 5 | 6 | kubectl create ns vault-integration 7 | kubectl create sa vault-client -n vault-integration 8 | 9 | kubectl create -f - <