├── .gitignore ├── REDACTED-params.yaml ├── Readme.md ├── acme-fitness ├── acme-fitness-mongodata-pvc.yaml ├── acme-fitness-namespace-settings.yaml ├── acme-fitness-secrets.yaml ├── app-label-overlay.yaml ├── catalog-db-volume-overlay.yaml ├── jaeger-daemonset.yaml └── template │ └── acme-fitness-frontend-ingress.yaml ├── argocd ├── 01-namespace.yaml ├── 02-serviceaccount.yaml ├── 03-clusterrolebinding.yaml ├── fortune-teller │ ├── base │ │ ├── deployment.yaml │ │ ├── kustomization.yaml │ │ └── service.yaml │ ├── dev │ │ ├── deployment.yaml │ │ └── kustomization.yaml │ ├── kustomization.yaml │ └── production │ │ ├── deployment.yaml │ │ ├── kustomization.yaml │ │ └── service.yaml ├── httpproxy.yaml └── values.yaml ├── capi-overrides └── disable-aws-bastion.yaml ├── cluster-overlays ├── Readme.md └── tkg │ └── providers │ ├── infrastructure-vsphere │ └── ytt │ │ ├── mtu-default-values.yaml │ │ ├── mtu.yaml │ │ ├── nameservers-default-values.yaml │ │ ├── nameservers.yaml │ │ ├── search-domain-default-values.yaml │ │ └── search-domain.yaml │ └── ytt │ └── 04_user_customizations │ ├── ntp-servers-default-values.yaml │ ├── ntp-servers.yaml │ ├── trusted-certs-default-values.yaml │ └── trusted-certs.yaml ├── concourse ├── common-secrets.yaml ├── concourse-values-contour-template.yaml └── test-pipeline.yaml ├── config-templates ├── aws-mc-config.yaml ├── aws-workload-cluster-config.yaml ├── azure-mc-config.yaml ├── azure-workload-cluster-config.yaml ├── vsphere-mc-config.yaml └── vsphere-workload-cluster-config.yaml ├── crashd └── .gitkeep ├── dex ├── 01-namespace.yaml ├── dex-values.yaml └── generate-and-apply-dex-yaml.sh ├── dns ├── tkg-aws-lab-record-sets-aws.json.template └── tkg-lab-record-sets.yml ├── docs ├── acme-fitness-lab │ ├── 01_okta_setup.md │ ├── 02_policy_acme.md │ ├── 03-login-kubeconfig.md │ └── 04-deploy-app.md ├── argocd-app-details.png ├── argocd-apps.png ├── avi │ ├── avi-admin-user.png │ ├── avi-cloud-ipam-dns.png │ ├── avi-cloud-type.png │ ├── avi-config-new-cert.png │ ├── avi-create-cert.png │ ├── avi-datacenter.png │ ├── avi-dns-profile.png │ ├── avi-dns.png │ ├── avi-essentials.png │ ├── avi-export-cert.png │ ├── avi-ipam.png │ ├── avi-mgmt-net.png │ ├── avi-net-ready.png │ ├── avi-ntp.png │ ├── avi-ova-resize.png │ ├── avi-ova-setup.png │ ├── avi-routes.png │ ├── avi-smtp.png │ ├── avi-software.png │ ├── avi-systemupdate.png │ ├── avi-tenant.png │ ├── avi-vc.png │ ├── avi-vc2.png │ ├── avi-vc3.png │ ├── avi-vip-pool.png │ ├── net-2.png │ ├── net-3.png │ ├── net-flat.png │ ├── se-group-cluster-designation.png │ ├── session-timeout.png │ ├── setup_avi_ctrl.md │ └── static-route.png ├── baseline-lab-setup │ ├── one-step.md │ └── step-by-step.md ├── bonus-labs │ ├── argocd-kustomize.md │ ├── avi-ldap-auth │ │ ├── assign-auth-profile.png │ │ ├── auth-default-view.png │ │ ├── avi-ldap-auth.md │ │ ├── profile-create.png │ │ └── role-mapping.png │ ├── cluster-autoscaling.md │ ├── concourse.md │ ├── crashd.md │ ├── gitlab.md │ ├── jaeger_tracing.md │ ├── kubeapps.md │ ├── prometheus_grafana.md │ ├── tmc_image_policy.md │ ├── to.md │ └── velero_restore.md ├── guestbook-app.png ├── mgmt-cluster │ ├── 01_install_tkg_mgmt.md │ ├── 02_attach_tmc_mgmt.md │ ├── 03_dns_certs_mgmt.md │ ├── 04_okta_mgmt.md │ ├── 06_contour_mgmt.md │ ├── 07_update_pinniped_config_mgmt.md │ ├── 08_monitoring_mgmt.md │ ├── 09_fluentbit_mgmt.md │ └── 10_velero_mgmt.md ├── misc │ ├── cloud_dns_sample.png │ └── goog_cloud_dns.md ├── shared-services-cluster │ ├── 01_install_tkg_ssc.md │ ├── 02_attach_tmc_ssc.md │ ├── 03_policy_ssc.md │ ├── 04_contour_ssc.md │ ├── 06_ek_ssc.md │ ├── 07_fluentbit_ssc.md │ ├── 08_5_minio_ssc.md │ ├── 08_monitoring_ssc.md │ ├── 09_velero_ssc.md │ ├── 10_harbor.md │ ├── harbor-oidc-config.png │ └── tanzu-repo.png ├── tkg-deployment.png ├── tkg-lab-base.png ├── tkg.png ├── troubleshooting │ ├── DomainDNS.png │ ├── HostedZone1Details.png │ ├── HostedZone2Details.png │ ├── HostedZones.png │ ├── Readme.md │ └── dns-setup.md └── workload-cluster │ ├── 01_install_tkg_and_components_wlc.md │ └── tanzu-repo.png ├── elasticsearch-kibana ├── 01-namespace.yaml ├── 02-statefulset.yaml ├── 03-service.yaml ├── curator-cm.yaml ├── curator-cronjob.yaml └── template │ ├── 03b-ingress.yaml │ ├── 04-kibana.yaml │ └── 05-kibana-ingress.yaml ├── gitlab └── values-gitlab.yaml ├── keys └── .gitkeep ├── kuard ├── deployment.yaml ├── ingress.yaml └── svc.yaml ├── kubeapps ├── 01-namespace.yaml ├── generate-and-apply-kubeapps-yaml.sh ├── kubeapps-jwt-authenticator.yaml └── kubeapps-values.yaml ├── local-config └── .gitkeep ├── overlay └── trust-certificate │ ├── configmap.yaml │ ├── overlay.yaml │ └── values.yaml ├── pipeline.yaml ├── scripts ├── 01-prep-aws-objects.sh ├── 01-prep-azure-objects.sh ├── 01-prep-vsphere-objects.sh ├── 02-deploy-aws-mgmt-cluster.sh ├── 02-deploy-azure-mgmt-cluster.sh ├── 02-deploy-vsphere-mgmt-cluster.sh ├── 03-post-deploy-mgmt-cluster.sh ├── add-dockerhub-pull-secret.sh ├── apply-acme-fitness-quota.sh ├── create-dns-zone.sh ├── dataprotection.sh ├── delete-all.sh ├── deploy-all-workload-cluster-components.sh ├── deploy-all.sh ├── deploy-cert-manager.sh ├── deploy-tanzu-standard-package-repo.sh ├── deploy-wavefront-tracing.sh ├── deploy-wavefront.sh ├── deploy-workload-cluster.sh ├── generate-acme-fitness-yaml.sh ├── generate-and-apply-argocd-yaml.sh ├── generate-and-apply-cluster-issuer-yaml.sh ├── generate-and-apply-concourse-yaml.sh ├── generate-and-apply-contour-yaml.sh ├── generate-and-apply-elasticsearch-kibana-yaml.sh ├── generate-and-apply-external-dns-yaml.sh ├── generate-and-apply-fluent-bit-yaml.sh ├── generate-and-apply-grafana-yaml.sh ├── generate-and-apply-harbor-yaml.sh ├── generate-and-apply-minio-yaml.sh ├── generate-and-apply-prometheus-yaml.sh ├── generate-and-apply-tmc-acme-fitness-yaml.sh ├── generate-gitlab.sh ├── inject-dex-client-kubeapps.sh ├── prep-cluster-overlays.sh ├── register-cluster-argocd.sh ├── retrieve-acme-fitness-source.sh ├── retrieve-lets-encrypt-ca-cert.sh ├── set-env.sh ├── tmc-attach.sh ├── tmc-policy.sh ├── tmc-register-mc.sh ├── update-pinniped-configuration.sh └── velero.sh ├── terraform ├── .gitignore └── aws │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── tkg-extensions-mods-examples ├── authentication │ └── pinniped │ │ ├── pinniped-certificate.yaml │ │ └── pinniped-ingress.yaml ├── ingress │ └── contour │ │ ├── contour-cluster-issuer-dns-aws.yaml │ │ ├── contour-cluster-issuer-dns-azure.yaml │ │ └── contour-cluster-issuer-dns-gcloud.yaml ├── monitoring │ ├── grafana-cert.yaml │ └── prometheus-cert.yaml ├── registry │ └── harbor │ │ ├── 02-certs.yaml │ │ └── overlay-timeout-increase.yaml └── service-discovery │ └── external-dns │ ├── external-dns-data-values-aws-with-contour.yaml.example │ ├── external-dns-data-values-azure-with-contour.yaml.example │ ├── external-dns-data-values-google-with-contour.yaml.example │ └── metrics-overlay.yaml ├── tmc └── config │ ├── namespace │ └── tkg-wlc-acme-fitness.yaml │ └── workspace │ └── acme-fitness-dev.yaml └── wavefront └── wf-preprocessor.yml /.gitignore: -------------------------------------------------------------------------------- 1 | keys/* 2 | !keys/.gitkeep 3 | config.yaml 4 | .secrets 5 | tkg-extensions 6 | tkg-extensions-manifests*.gz 7 | acme_fitness_demo 8 | tmc.token 9 | sensitive/ 10 | params.yaml 11 | generated/ 12 | .DS_Store 13 | local-config/* 14 | !local-config/.gitkeep 15 | crashd/* 16 | !crashd/.gitkeep 17 | tkg-mgmt.diagnostics.tar.gz 18 | .envrc 19 | .vscode/ 20 | -------------------------------------------------------------------------------- /acme-fitness/acme-fitness-mongodata-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mongodata 5 | namespace: acme-fitness 6 | labels: 7 | app: acmefit 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 1Gi -------------------------------------------------------------------------------- /acme-fitness/acme-fitness-namespace-settings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ResourceQuota 3 | metadata: 4 | name: acme-fitness 5 | namespace: acme-fitness 6 | spec: 7 | hard: 8 | requests.cpu: "3" 9 | requests.memory: 4Gi 10 | requests.storage: 10Gi 11 | limits.cpu: "3" 12 | limits.memory: 4Gi 13 | --- 14 | apiVersion: v1 15 | kind: LimitRange 16 | metadata: 17 | name: acme-fitness 18 | namespace: acme-fitness 19 | spec: 20 | limits: 21 | - default: 22 | cpu: "100m" 23 | memory: "100Mi" 24 | defaultRequest: 25 | cpu: "100m" 26 | memory: "100Mi" 27 | type: Container 28 | -------------------------------------------------------------------------------- /acme-fitness/acme-fitness-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | labels: 5 | app: acmefit 6 | name: order-postgres-pass 7 | namespace: acme-fitness 8 | data: 9 | password: S2VlcEl0U2ltcGxlMSE= 10 | type: Opaque 11 | --- 12 | apiVersion: v1 13 | kind: Secret 14 | metadata: 15 | labels: 16 | app: acmefit 17 | name: cart-redis-pass 18 | namespace: acme-fitness 19 | data: 20 | password: S2VlcEl0U2ltcGxlMSE= 21 | type: Opaque 22 | --- 23 | apiVersion: v1 24 | kind: Secret 25 | metadata: 26 | labels: 27 | app: acmefit 28 | name: catalog-mongo-pass 29 | namespace: acme-fitness 30 | data: 31 | password: S2VlcEl0U2ltcGxlMSE= 32 | type: Opaque 33 | --- 34 | apiVersion: v1 35 | kind: Secret 36 | metadata: 37 | labels: 38 | app: acmefit 39 | name: users-mongo-pass 40 | namespace: acme-fitness 41 | data: 42 | password: S2VlcEl0U2ltcGxlMSE= 43 | type: Opaque 44 | --- 45 | apiVersion: v1 46 | kind: Secret 47 | metadata: 48 | labels: 49 | app: acmefit 50 | name: users-redis-pass 51 | namespace: acme-fitness 52 | data: 53 | password: S2VlcEl0U2ltcGxlMSE= 54 | type: Opaque 55 | -------------------------------------------------------------------------------- /acme-fitness/app-label-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.all, expects="1+" 4 | --- 5 | metadata: 6 | #@overlay/match missing_ok=True 7 | namespace: acme-fitness 8 | #@overlay/match missing_ok=True 9 | labels: 10 | #@overlay/match missing_ok=True 11 | app: acmefit 12 | 13 | #! By adding this label to pods, the app will appear in TMC 14 | #@overlay/match by=overlay.subset({"kind": "Deployment"}), expects="1+" 15 | --- 16 | spec: 17 | template: 18 | metadata: 19 | #@overlay/match missing_ok=True 20 | labels: 21 | #@overlay/match missing_ok=True 22 | app.kubernetes.io/part-of: acme-fitness 23 | -------------------------------------------------------------------------------- /acme-fitness/catalog-db-volume-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.subset({"kind": "Deployment","metadata":{"name":"catalog-mongo"}}) 4 | --- 5 | spec: 6 | template: 7 | spec: 8 | volumes: 9 | #@overlay/match by="name" 10 | - name: mongodata 11 | #@overlay/remove 12 | emptyDir: {} 13 | #@overlay/match missing_ok=True 14 | persistentVolumeClaim: 15 | claimName: mongodata 16 | -------------------------------------------------------------------------------- /acme-fitness/jaeger-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: jaeger-agent 5 | labels: 6 | app: jaeger-agent 7 | jaeger-infra: agent-daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: jaeger-agent 12 | template: 13 | metadata: 14 | labels: 15 | app: jaeger-agent 16 | spec: 17 | dnsPolicy: ClusterFirstWithHostNet 18 | hostNetwork: true 19 | containers: 20 | - name: jaeger-agent 21 | image: jaegertracing/jaeger-agent:1.16.0 22 | imagePullPolicy: IfNotPresent 23 | args: 24 | - --reporter.tchannel.host-port=wavefront-proxy.wavefront:30001 25 | - --reporter.type=tchannel 26 | - --log-level=debug 27 | ports: 28 | - containerPort: 5778 29 | name: config-rest 30 | protocol: TCP 31 | - containerPort: 6831 32 | name: jg-compact-trft 33 | protocol: UDP 34 | - containerPort: 6832 35 | name: jg-binary-trft 36 | protocol: UDP 37 | - containerPort: 14271 38 | name: admin-http 39 | protocol: TCP 40 | -------------------------------------------------------------------------------- /acme-fitness/template/acme-fitness-frontend-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: frontend 5 | namespace: acme-fitness 6 | labels: 7 | app: acmefit 8 | annotations: 9 | cert-manager.io/cluster-issuer: letsencrypt-contour-cluster-issuer 10 | ingress.kubernetes.io/force-ssl-redirect: "true" 11 | kubernetes.io/ingress.class: contour 12 | kubernetes.io/tls-acme: "true" 13 | spec: 14 | tls: 15 | - secretName: acme-fitness-tls 16 | hosts: 17 | - acme-fitness.wlc-1.tkg-aws-lab.winterfell.live 18 | rules: 19 | - host: acme-fitness.wlc-1.tkg-aws-lab.winterfell.live 20 | http: 21 | paths: 22 | - pathType: Prefix 23 | path: "/" 24 | backend: 25 | service: 26 | name: frontend 27 | port: 28 | number: 80 29 | -------------------------------------------------------------------------------- /argocd/01-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: argocd 5 | -------------------------------------------------------------------------------- /argocd/02-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: argocd 5 | namespace: argocd 6 | -------------------------------------------------------------------------------- /argocd/03-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: argocd 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: argocd 12 | namespace: argocd 13 | -------------------------------------------------------------------------------- /argocd/fortune-teller/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: fortune-app 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: fortune-app 10 | template: 11 | metadata: 12 | name: fortune-app 13 | labels: 14 | app: fortune-app 15 | annotations: 16 | prometheus.io/path: '/actuator/prometheus' 17 | prometheus.io/port: '8080' 18 | prometheus.io/scrape: 'true' 19 | spec: 20 | containers: 21 | - image: azwickey/fortune-demo:latest 22 | name: fortune-app 23 | ports: 24 | - containerPort: 8080 25 | protocol: TCP 26 | env: 27 | - name: SPRING_REDIS_HOST 28 | value: "fortune-redis-master.default.svc.cluster.local" 29 | -------------------------------------------------------------------------------- /argocd/fortune-teller/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - deployment.yaml 3 | - service.yaml 4 | -------------------------------------------------------------------------------- /argocd/fortune-teller/base/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: fortune-service 5 | labels: 6 | app: fortune-service 7 | spec: 8 | ports: 9 | - port: 80 10 | targetPort: 8080 11 | name: boot 12 | selector: 13 | app: fortune-app 14 | type: ClusterIP 15 | -------------------------------------------------------------------------------- /argocd/fortune-teller/dev/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: fortune-app 5 | spec: 6 | replicas: 2 -------------------------------------------------------------------------------- /argocd/fortune-teller/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ./../base 3 | namePrefix: dev- 4 | namespace: fortune-app-development -------------------------------------------------------------------------------- /argocd/fortune-teller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ./dev 3 | - ./production 4 | 5 | namePrefix: cluster-a- 6 | -------------------------------------------------------------------------------- /argocd/fortune-teller/production/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: fortune-app 5 | spec: 6 | replicas: 4 7 | -------------------------------------------------------------------------------- /argocd/fortune-teller/production/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ./../base 3 | namePrefix: prod- 4 | patches: 5 | - service.yaml 6 | - deployment.yaml 7 | namespace: fortune-app-production -------------------------------------------------------------------------------- /argocd/fortune-teller/production/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: fortune-service 5 | spec: 6 | type: LoadBalancer 7 | -------------------------------------------------------------------------------- /argocd/httpproxy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: projectcontour.io/v1 2 | kind: HTTPProxy 3 | metadata: 4 | name: argocd 5 | namespace: argocd 6 | spec: 7 | virtualhost: 8 | fqdn: YOUR_VALUE 9 | tls: 10 | passthrough: true 11 | tcpproxy: 12 | services: 13 | - name: argocd-server 14 | port: 443 15 | -------------------------------------------------------------------------------- /argocd/values.yaml: -------------------------------------------------------------------------------- 1 | configs: 2 | secret: 3 | # Argo expects the password in the secret to be bcrypt hashed. You can create this hash with 4 | # `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'` 5 | argocdServerAdminPassword: BCRYPTED_PWORD 6 | installCRDs: false 7 | server: 8 | certificate: 9 | enabled: true 10 | domain: ARGOCD_CN 11 | issuer: 12 | name: letsencrypt-contour-cluster-issuer 13 | kind: ClusterIssuer 14 | -------------------------------------------------------------------------------- /capi-overrides/disable-aws-bastion.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@overlay/match by=overlay.subset({"kind":"AWSCluster"}) 3 | --- 4 | spec: 5 | #@overlay/match missing_ok=True 6 | bastion: 7 | enabled: false -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/infrastructure-vsphere/ytt/mtu-default-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | CUSTOM_MTU: -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/infrastructure-vsphere/ytt/mtu.yaml: -------------------------------------------------------------------------------- 1 | #! Purpose: In the event you need to manually override the default 1500 mtu settings for your node. 2 | #! Implement: Set the CUSTOM_MTU parameter as an environment variable or in cluster config. 3 | #! CUSTOM_MTU: 1300 4 | #! Test: You can validate using by ssh'ing onto the cluster nodes 5 | #! $ ip addr 6 | 7 | #@ load("@ytt:overlay", "overlay") 8 | #@ load("@ytt:data", "data") 9 | 10 | #@ if data.values.CUSTOM_MTU != None: 11 | 12 | #@overlay/match by=overlay.subset({"kind":"VSphereMachineTemplate"}) 13 | --- 14 | spec: 15 | template: 16 | spec: 17 | network: 18 | devices: 19 | #@overlay/match by=overlay.all, expects="1+" 20 | - 21 | #@overlay/match missing_ok=True 22 | mtu: #@ data.values.CUSTOM_MTU 23 | 24 | #@ end 25 | -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/infrastructure-vsphere/ytt/nameservers-default-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | CUSTOM_NAMESERVERS: -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/infrastructure-vsphere/ytt/nameservers.yaml: -------------------------------------------------------------------------------- 1 | #! Purpose: In the event your nameserver information is not communicated to the nodes via DHCP Option 5 2 | #! you can manually set desired nameservers. 3 | #! Implement: Set the CUSTOM_NAMESERVERS parameter as an environment variable or in cluster config. 4 | #! For multiple servers seperate with comma. 5 | #! CUSTOM_NAMESERVERS: "8.8.8.8,name.google.com" 6 | #! Test: You can validate using by ssh'ing onto the cluster nodes 7 | #! $ resolvectl status 8 | 9 | #@ load("@ytt:overlay", "overlay") 10 | #@ load("@ytt:data", "data") 11 | 12 | #@ if data.values.CUSTOM_NAMESERVERS != None: 13 | 14 | #@overlay/match by=overlay.subset({"kind":"VSphereMachineTemplate"}) 15 | --- 16 | spec: 17 | template: 18 | spec: 19 | network: 20 | devices: 21 | #@overlay/match by=overlay.all, expects="1+" 22 | - 23 | #@overlay/match missing_ok=True 24 | nameservers: #@ data.values.CUSTOM_NAMESERVERS.split(",") 25 | 26 | #@overlay/match by=overlay.subset({"kind":"KubeadmControlPlane"}) 27 | --- 28 | spec: 29 | kubeadmConfigSpec: 30 | preKubeadmCommands: 31 | #! disable dns from being emitted by dhcp client 32 | #@overlay/append 33 | - echo '[DHCPv4]' >> /etc/systemd/network/10-id0.network 34 | #@overlay/append 35 | - echo 'UseDNS=no' >> /etc/systemd/network/10-id0.network 36 | #@overlay/append 37 | - '/usr/bin/systemctl restart systemd-networkd 2>/dev/null' 38 | 39 | #@overlay/match by=overlay.subset({"kind":"KubeadmConfigTemplate"}) 40 | --- 41 | spec: 42 | template: 43 | spec: 44 | #@overlay/match missing_ok=True 45 | preKubeadmCommands: 46 | #! disable dns from being emitted by dhcp client 47 | #@overlay/append 48 | - echo '[DHCPv4]' >> /etc/systemd/network/10-id0.network 49 | #@overlay/append 50 | - echo 'UseDNS=no' >> /etc/systemd/network/10-id0.network 51 | #@overlay/append 52 | - '/usr/bin/systemctl restart systemd-networkd 2>/dev/null' 53 | 54 | #@ end 55 | -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/infrastructure-vsphere/ytt/search-domain-default-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | CUSTOM_SEARCH_DOMAIN: -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/infrastructure-vsphere/ytt/search-domain.yaml: -------------------------------------------------------------------------------- 1 | #! Purpose: In the event your search domain information is not communicated to the nodes via DHCP Option 15 2 | #! you can manually set desired search domains. 3 | #! Implement: Set the CUSTOM_SEARCH_DOMAIN parameter as an environment variable or in cluster config. 4 | #! For multiple servers seperate with comma. 5 | #! CUSTOM_SEARCH_DOMAIN: "acme.com,acme.org" 6 | #! Test: You can validate using by ssh'ing onto the cluster nodes 7 | #! $ resolvectl status 8 | 9 | #@ load("@ytt:overlay", "overlay") 10 | #@ load("@ytt:data", "data") 11 | 12 | #@ if data.values.CUSTOM_SEARCH_DOMAIN != None: 13 | 14 | #@overlay/match by=overlay.subset({"kind":"VSphereMachineTemplate"}) 15 | --- 16 | spec: 17 | template: 18 | spec: 19 | network: 20 | devices: 21 | #@overlay/match by=overlay.all, expects="1+" 22 | - 23 | #@overlay/match missing_ok=True 24 | searchDomains: #@ data.values.CUSTOM_SEARCH_DOMAIN.split(",") 25 | 26 | #@ end 27 | -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/ytt/04_user_customizations/ntp-servers-default-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | CUSTOM_NTP_SERVERS: -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/ytt/04_user_customizations/ntp-servers.yaml: -------------------------------------------------------------------------------- 1 | #! Purpose: In the event your NTP server information is not communicated to the nodes via DHCP Option 42 2 | #! you can manually set desired ntp servers. 3 | #! Implement: Set the CUSTOM_NTP_SERVERS parameter as an environment variable or in cluster config. 4 | #! For multiple servers seperate with comma. 5 | #! CUSTOM_NTP_SERVERS: "122.123.134.12,time.google.com" 6 | #! Test: You can validate using by ssh'ing onto the cluster nodes 7 | #! $ chronyc sources 8 | #! $ cat /etc/chrony/chrony.conf 9 | #! Notes: As of 6/25/2021, this only works with ubuntu. Photon is not leveraging chronyc 10 | 11 | #@ load("@ytt:overlay", "overlay") 12 | #@ load("@ytt:data", "data") 13 | 14 | #@ if data.values.CUSTOM_NTP_SERVERS != None: 15 | 16 | #@overlay/match by=overlay.subset({"kind":"KubeadmControlPlane"}) 17 | --- 18 | spec: 19 | #@overlay/match-child-defaults missing_ok=True 20 | kubeadmConfigSpec: 21 | ntp: 22 | enabled: true 23 | servers: #@ data.values.CUSTOM_NTP_SERVERS.split(",") 24 | 25 | #@overlay/match by=overlay.subset({"kind":"KubeadmConfigTemplate"}),expects="1+" 26 | --- 27 | spec: 28 | template: 29 | #@overlay/match-child-defaults missing_ok=True 30 | spec: 31 | ntp: 32 | enabled: true 33 | servers: #@ data.values.CUSTOM_NTP_SERVERS.split(",") 34 | 35 | #@ end 36 | -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/ytt/04_user_customizations/trusted-certs-default-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | TRUSTED_CERTS_B64: -------------------------------------------------------------------------------- /cluster-overlays/tkg/providers/ytt/04_user_customizations/trusted-certs.yaml: -------------------------------------------------------------------------------- 1 | #! Purpose: In the event you need additional custom CA certificates, 2 | #! so containerd and other tools trust these CA certificates. 3 | #! It works when using Photon or Ubuntu as the TKG node template on all TKG infrastructure providers. 4 | #! Implement: Set the CUSTOM_TRUSTED_CERTS_B64 parameter as an environment variable or in cluster config. 5 | #! For multiple CAs, put them together in a file and then base64 the total content 6 | #! CUSTOM_TRUSTED_CERTS_B64: cmVndWxhcg== 7 | #! Test: You can validate using by ssh'ing onto the cluster nodes and then attempt to curl a target endpoint 8 | #! You will see that the SSL session is successfully negotiated. 9 | #! $ curl -v https:// 10 | 11 | #@ load("@ytt:overlay", "overlay") 12 | #@ load("@ytt:data", "data") 13 | #@ load("@ytt:base64", "base64") 14 | 15 | #@ if data.values.CUSTOM_TRUSTED_CERTS_B64 != None: 16 | 17 | #! Trust your custom CA certificates on all Control Plane nodes. 18 | #@overlay/match by=overlay.subset({"kind":"KubeadmControlPlane"}) 19 | --- 20 | spec: 21 | kubeadmConfigSpec: 22 | #@overlay/match missing_ok=True 23 | files: 24 | #@overlay/append 25 | - content: #@ base64.decode(data.values.CUSTOM_TRUSTED_CERTS_B64) 26 | owner: root:root 27 | permissions: "0644" 28 | path: /etc/ssl/certs/tkg-custom-ca.pem 29 | #@overlay/match missing_ok=True 30 | preKubeadmCommands: 31 | #@ if data.values.OS_NAME == "photon": 32 | #@overlay/append 33 | - '! which rehash_ca_certificates.sh 2>/dev/null || rehash_ca_certificates.sh' 34 | #@ end 35 | #@ if data.values.OS_NAME == "ubuntu": 36 | #@overlay/append 37 | - '! which update-ca-certificates 2>/dev/null || (mv /etc/ssl/certs/tkg-custom-ca.pem /usr/local/share/ca-certificates/tkg-custom-ca.crt && update-ca-certificates)' 38 | #@ end 39 | 40 | #! Trust your custom CA certificates on all worker nodes. 41 | #@overlay/match by=overlay.subset({"kind":"KubeadmConfigTemplate"}) 42 | --- 43 | spec: 44 | template: 45 | spec: 46 | #@overlay/match missing_ok=True 47 | files: 48 | #@overlay/append 49 | - content: #@ base64.decode(data.values.CUSTOM_TRUSTED_CERTS_B64) 50 | owner: root:root 51 | permissions: "0644" 52 | path: /etc/ssl/certs/tkg-custom-ca.pem 53 | #@overlay/match missing_ok=True 54 | preKubeadmCommands: 55 | #@ if data.values.OS_NAME == "photon": 56 | #@overlay/append 57 | - '! which rehash_ca_certificates.sh 2>/dev/null || rehash_ca_certificates.sh' 58 | #@ end 59 | #@ if data.values.OS_NAME == "ubuntu": 60 | #@overlay/append 61 | - '! which update-ca-certificates 2>/dev/null || (mv /etc/ssl/certs/tkg-custom-ca.pem /usr/local/share/ca-certificates/tkg-custom-ca.crt && update-ca-certificates)' 62 | #@ end 63 | 64 | #@ end 65 | -------------------------------------------------------------------------------- /concourse/common-secrets.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | #@ load("@ytt:base64", "base64") 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: common-secrets 7 | namespace: concourse-main 8 | type: Opaque 9 | data: 10 | name: #@ base64.encode("world") 11 | -------------------------------------------------------------------------------- /concourse/concourse-values-contour-template.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | concourse: 3 | web: 4 | externalUrl: https://CONCOURSE_URL # TO BE REPLACED 5 | auth: 6 | mainTeam: 7 | localUser: "admin" 8 | oidc: 9 | group: platform-team 10 | oidc: 11 | displayName: Okta 12 | enabled: true 13 | groupsKey: groups 14 | userNameKey: email 15 | issuer: https://OKTA_AUTH_SERVER_CN/oauth2/default 16 | scope: openid,profile,email,groups,offline_access 17 | skipSslValidation: false 18 | 19 | secrets: 20 | oidcClientId: OKTA_CONCOURSE_APP_CLIENT_ID # TO BE REPLACED 21 | oidcClientSecret: OKTA_CONCOURSE_APP_CLIENT_SECRET # TO BE REPLACED 22 | # CERT WILL BE REPLACED. This did not work as expected throu testing, so we additionally use ytt to overlay the Let's Encrypt Cert as a mounted volume on the worker pods 23 | oidcCaCert: OKTA_AUTH_SERVER_CA_CERT 24 | localUsers: "admin:ADMIN_PASSWORD" # TO BE REPLACED 25 | 26 | web: 27 | env: 28 | # Add this env variable for OIDC provider like okta who don't support email verified claim 29 | # https://github.com/concourse/concourse/releases/tag/v6.7.5 30 | - name: CONCOURSE_OIDC_SKIP_EMAIL_VERIFIED_VALIDATION 31 | value: "true" 32 | ingress: 33 | enabled: true 34 | annotations: 35 | kubernetes.io/ingress.class: "contour" 36 | cert-manager.io/cluster-issuer: "letsencrypt-contour-cluster-issuer" 37 | projectcontour.io/websocket-routes: "/" 38 | ingress.kubernetes.io/force-ssl-redirect: true 39 | hosts: 40 | - CONCOURSE_URL # TO BE REPLACED 41 | tls: 42 | - hosts: 43 | - CONCOURSE_URL # TO BE REPLACED 44 | secretName: concourse-cert 45 | -------------------------------------------------------------------------------- /concourse/test-pipeline.yaml: -------------------------------------------------------------------------------- 1 | 2 | jobs: 3 | - name: hello-world 4 | plan: 5 | - do: 6 | - task: hello-world 7 | config: 8 | platform: linux 9 | image_resource: 10 | type: docker-image 11 | source: 12 | repository: ubuntu 13 | run: 14 | path: sh 15 | args: 16 | - -exc 17 | - | 18 | echo "Hello $WORLD_PARAM" 19 | params: 20 | WORLD_PARAM: ((common-secrets.name)) -------------------------------------------------------------------------------- /config-templates/aws-mc-config.yaml: -------------------------------------------------------------------------------- 1 | AWS_PRIVATE_SUBNET_ID: # Added by scripts 2 | AWS_PUBLIC_SUBNET_ID: # Added by scripts 3 | AWS_VPC_ID: # Added by scripts 4 | 5 | AWS_REGION: # Added by scripts 6 | AWS_SSH_KEY_NAME: # Added by scripts 7 | CLUSTER_NAME: # Added by scripts 8 | OIDC_IDENTITY_PROVIDER_CLIENT_ID: # Added by scripts 9 | OIDC_IDENTITY_PROVIDER_CLIENT_SECRET: # Added by scripts 10 | OIDC_IDENTITY_PROVIDER_ISSUER_URL: # Added by scripts 11 | WORKER_MACHINE_COUNT: # Added by scripts 12 | 13 | INFRASTRUCTURE_PROVIDER: aws 14 | CLUSTER_PLAN: dev # default is blank 15 | CONTROL_PLANE_MACHINE_TYPE: # Added by scripts by pulling aws.control-plane-machine-type in params.yaml 16 | NODE_MACHINE_TYPE: # Added by scripts by pulling aws.node-machine-type in params.yaml 17 | 18 | TKG_HTTP_PROXY_ENABLED: "false" # default is blank 19 | ENABLE_AUDIT_LOGGING: "true" 20 | ENABLE_MHC: "true" # default is blank 21 | ENABLE_CEIP_PARTICIPATION: "false" # default is true 22 | BASTION_HOST_ENABLED: "true" # this is default but good to be explicit 23 | IDENTITY_MANAGEMENT_TYPE: oidc # this is default but good to be explicit 24 | 25 | # Following are explicit opinions set for this lab associated for OIDC 26 | OIDC_IDENTITY_PROVIDER_GROUPS_CLAIM: groups 27 | OIDC_IDENTITY_PROVIDER_NAME: "okta" 28 | OIDC_IDENTITY_PROVIDER_SCOPES: openid,profile,email,groups,offline_access 29 | OIDC_IDENTITY_PROVIDER_USERNAME_CLAIM: email 30 | -------------------------------------------------------------------------------- /config-templates/aws-workload-cluster-config.yaml: -------------------------------------------------------------------------------- 1 | AWS_REGION: # Added by scripts 2 | AWS_SSH_KEY_NAME: # Added by scripts 3 | CLUSTER_NAME: # Added by scripts 4 | AWS_VPC_ID: # Added by scripts 5 | AWS_PUBLIC_SUBNET_ID: # Added by scripts 6 | AWS_PRIVATE_SUBNET_ID: # Added by scripts 7 | WORKER_MACHINE_COUNT: # Added by scripts 8 | 9 | CONTROL_PLANE_MACHINE_TYPE: # Added by scripts by pulling aws.control-plane-machine-type in params.yaml 10 | NODE_MACHINE_TYPE: # Added by scripts by pulling aws.node-machine-type in params.yaml 11 | 12 | CLUSTER_PLAN: dev # default is blank 13 | TKG_HTTP_PROXY_ENABLED: "false" # default is blank 14 | ENABLE_MHC: "true" # default is blank 15 | BASTION_HOST_ENABLED: false # default is true 16 | IDENTITY_MANAGEMENT_TYPE: oidc 17 | -------------------------------------------------------------------------------- /config-templates/azure-mc-config.yaml: -------------------------------------------------------------------------------- 1 | AZURE_CLIENT_ID: 2 | AZURE_CLIENT_SECRET: 3 | AZURE_CONTROL_PLANE_MACHINE_TYPE: # Added by scripts by pulling azure.control-plane-machine-type in params.yaml 4 | AZURE_LOCATION: 5 | AZURE_NODE_MACHINE_TYPE: # Added by scripts by pulling azure.node-machine-type in params.yaml 6 | AZURE_SSH_PUBLIC_KEY_B64: 7 | AZURE_SUBSCRIPTION_ID: 8 | AZURE_TENANT_ID: 9 | CLUSTER_NAME: 10 | 11 | # Defaults 12 | IDENTITY_MANAGEMENT_TYPE: oidc 13 | OIDC_IDENTITY_PROVIDER_GROUPS_CLAIM: groups 14 | OIDC_IDENTITY_PROVIDER_SCOPES: openid,profile,email,groups,offline_access 15 | OIDC_IDENTITY_PROVIDER_USERNAME_CLAIM: email 16 | INFRASTRUCTURE_PROVIDER: azure 17 | CLUSTER_PLAN: dev 18 | TKG_HTTP_PROXY_ENABLED: "false" 19 | ENABLE_AUDIT_LOGGING: "true" 20 | ENABLE_CEIP_PARTICIPATION: "false" 21 | ENABLE_MHC: "true" 22 | AZURE_CUSTOM_TAGS: "created-by=tkg" 23 | -------------------------------------------------------------------------------- /config-templates/azure-workload-cluster-config.yaml: -------------------------------------------------------------------------------- 1 | AZURE_SSH_PUBLIC_KEY_B64: 2 | AZURE_CLIENT_ID: 3 | AZURE_CLIENT_SECRET: 4 | AZURE_LOCATION: 5 | AZURE_SUBSCRIPTION_ID: 6 | AZURE_TENANT_ID: 7 | 8 | AZURE_CONTROL_PLANE_MACHINE_TYPE: # Added by scripts by pulling azure.control-plane-machine-type in params.yaml 9 | AZURE_NODE_MACHINE_TYPE: # Added by scripts by pulling azure.node-machine-type in params.yaml 10 | 11 | CLUSTER_PLAN: dev 12 | ENABLE_AUDIT_LOGGING: true 13 | ENABLE_DEFAULT_STORAGE_CLASS: true 14 | IDENTITY_MANAGEMENT_TYPE: oidc 15 | AZURE_CUSTOM_TAGS: "created-by=tkg" 16 | AZURE_ENABLE_ACCELERATED_NETWORKING: "true" 17 | -------------------------------------------------------------------------------- /config-templates/vsphere-mc-config.yaml: -------------------------------------------------------------------------------- 1 | # To be changed by scripts based on params.yaml 2 | AVI_CA_DATA_B64: #"changeme" 3 | AVI_CLOUD_NAME: #Default-Cloud 4 | AVI_CONTROLLER: #192.168.14.190 5 | AVI_DATA_NETWORK: #VIP-VLAN15-PG 6 | AVI_DATA_NETWORK_CIDR: #192.168.15.0/24 7 | AVI_LABELS: #"" 8 | AVI_MANAGEMENT_CLUSTER_VIP_NETWORK_CIDR: #192.168.15.0/24 9 | AVI_MANAGEMENT_CLUSTER_VIP_NETWORK_NAME: #VIP-VLAN15-PG 10 | AVI_PASSWORD: "" #"changeme" # Need this value to be quoted for yq parsing 11 | AVI_SERVICE_ENGINE_GROUP: #Default-Group 12 | AVI_USERNAME: #admin 13 | CLUSTER_NAME: #gerudo 14 | VSPHERE_CONTROL_PLANE_ENDPOINT: #192.168.14.185 15 | VSPHERE_SERVER: #vcenter7.hyrulelab.com 16 | VSPHERE_USERNAME: #administrator@vsphere.local 17 | VSPHERE_PASSWORD: "" #"changeme" # Need this value to be quoted for yq parsing 18 | VSPHERE_DATASTORE: #/Datacenter/datastore/datastore 19 | VSPHERE_FOLDER: #/Datacenter/vm/tkg 20 | VSPHERE_DATACENTER: #/Datacenter 21 | VSPHERE_NETWORK: #TKG-VLAN14-PG 22 | VSPHERE_RESOURCE_POOL: #/Datacenter/host/Cluster/Resources/tkg 23 | VSPHERE_SSH_AUTHORIZED_KEY: #changeme 24 | VSPHERE_TLS_THUMBPRINT: #changeme 25 | OS_NAME: #photon 26 | OS_VERSION: #3 27 | OIDC_IDENTITY_PROVIDER_CLIENT_ID: #changeme 28 | OIDC_IDENTITY_PROVIDER_CLIENT_SECRET: #changeme 29 | OIDC_IDENTITY_PROVIDER_ISSUER_URL: #https://dev-281476.okta.com 30 | 31 | # Defaults 32 | AVI_ENABLE: "true" # "true" # Need this value to be quoted for yq parsing 33 | AVI_CONTROL_PLANE_HA_PROVIDER: "true" 34 | IDENTITY_MANAGEMENT_TYPE: oidc 35 | OIDC_IDENTITY_PROVIDER_GROUPS_CLAIM: groups 36 | OIDC_IDENTITY_PROVIDER_NAME: "okta" 37 | OIDC_IDENTITY_PROVIDER_SCOPES: openid,profile,email,groups,offline_access 38 | OIDC_IDENTITY_PROVIDER_USERNAME_CLAIM: email 39 | INFRASTRUCTURE_PROVIDER: vsphere 40 | CLUSTER_PLAN: dev 41 | TKG_HTTP_PROXY_ENABLED: "false" 42 | ENABLE_AUDIT_LOGGING: "true" 43 | ENABLE_CEIP_PARTICIPATION: "false" 44 | ENABLE_MHC: "true" 45 | VSPHERE_CONTROL_PLANE_DISK_GIB: "40" 46 | VSPHERE_CONTROL_PLANE_MEM_MIB: "4096" 47 | VSPHERE_CONTROL_PLANE_NUM_CPUS: "3" 48 | VSPHERE_WORKER_DISK_GIB: "40" 49 | VSPHERE_WORKER_MEM_MIB: "4096" 50 | VSPHERE_WORKER_NUM_CPUS: "3" 51 | DEPLOY_TKG_ON_VSPHERE7: true 52 | ENABLE_TKGS_ON_VSPHERE7: false 53 | -------------------------------------------------------------------------------- /config-templates/vsphere-workload-cluster-config.yaml: -------------------------------------------------------------------------------- 1 | # To be changed by scripts based on params.yaml 2 | CLUSTER_NAME: # rito 3 | VSPHERE_SERVER: #vcenter7.hyrulelab.com 4 | VSPHERE_USERNAME: #administrator@vsphere.local 5 | VSPHERE_PASSWORD: "" #"changeme" # Need this value to be quoted for yq parsing 6 | VSPHERE_CONTROL_PLANE_ENDPOINT: # 192.168.14.185 7 | VSPHERE_DATASTORE: #/Datacenter/datastore/datastore 8 | VSPHERE_FOLDER: #/Datacenter/vm/tkg 9 | VSPHERE_DATACENTER: #/Datacenter 10 | VSPHERE_RESOURCE_POOL: #/Datacenter/host/Cluster/Resources/tkg 11 | VSPHERE_SSH_AUTHORIZED_KEY: #changeme 12 | VSPHERE_TLS_THUMBPRINT: #changeme 13 | VSPHERE_NETWORK: #changeme 14 | OS_NAME: #photon 15 | OS_VERSION: #3 16 | ANTREA_NODEPORTLOCAL: #changeme 17 | 18 | # Defaults 19 | AVI_CONTROL_PLANE_HA_PROVIDER: "true" 20 | INFRASTRUCTURE_PROVIDER: vsphere 21 | CLUSTER_PLAN: dev 22 | ENABLE_AUDIT_LOGGING: "true" 23 | ENABLE_MHC: "true" 24 | VSPHERE_CONTROL_PLANE_DISK_GIB: "40" 25 | VSPHERE_CONTROL_PLANE_MEM_MIB: "4096" 26 | VSPHERE_CONTROL_PLANE_NUM_CPUS: "3" 27 | VSPHERE_WORKER_DISK_GIB: "50" 28 | VSPHERE_WORKER_MEM_MIB: "4096" 29 | VSPHERE_WORKER_NUM_CPUS: "4" 30 | IDENTITY_MANAGEMENT_TYPE: oidc 31 | -------------------------------------------------------------------------------- /crashd/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/crashd/.gitkeep -------------------------------------------------------------------------------- /dex/01-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dex 5 | -------------------------------------------------------------------------------- /dex/dex-values.yaml: -------------------------------------------------------------------------------- 1 | ingress: 2 | enabled: true 3 | 4 | annotations: 5 | cert-manager.io/cluster-issuer: letsencrypt-contour-cluster-issuer 6 | ingress.kubernetes.io/force-ssl-redirect: "true" 7 | kubernetes.io/ingress.class: contour 8 | kubernetes.io/tls-acme: "true" 9 | 10 | hosts: 11 | - host: # Dyanamically Populated 12 | paths: 13 | - path: / 14 | pathType: ImplementationSpecific 15 | 16 | tls: 17 | - secretName: dex-cert 18 | hosts: 19 | - # Dyanamically Populated 20 | 21 | config: 22 | issuer: # Dyanamically Populated 23 | expiry: 24 | signingKeys: 360m 25 | idTokens: 180m 26 | logger: 27 | level: info 28 | format: json 29 | staticClients: 30 | - redirectURIs: 31 | - # Dyanamically Populated 32 | id: kubeapps 33 | name: kubeapps 34 | secret: FOO_SECRET 35 | connectors: 36 | - type: oidc 37 | id: oidc 38 | name: oidc 39 | config: 40 | issuer: # Dyanamically Populated 41 | clientID: # Dyanamically Populated 42 | clientSecret: # Dyanamically Populated 43 | redirectURI: # Dyanamically Populated 44 | scopes: 45 | - openid 46 | - profile 47 | - email 48 | - groups 49 | - offline_access 50 | insecureEnableGroups: true 51 | getUserInfo: true 52 | userNameKey: email 53 | claimMapping: 54 | email: "" 55 | email_verified: email_verified 56 | groups: groups 57 | preferred_username: "" 58 | insecureSkipVerify: false 59 | oauth2: 60 | skipApprovalScreen: true 61 | responseTypes: 62 | - code 63 | storage: 64 | type: kubernetes 65 | config: 66 | inCluster: true 67 | enablePasswordDB: false 68 | -------------------------------------------------------------------------------- /dex/generate-and-apply-dex-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/../scripts/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster_name as args" 8 | exit 1 9 | fi 10 | 11 | export CLUSTER_NAME=$1 12 | 13 | export DEX_CN=$(yq e .kubeapps.oidc-issuer-fqdn $PARAMS_YAML) 14 | export DEX_URL=https://$DEX_CN 15 | export DEX_CALLBACK_URL=$DEX_URL/callback 16 | 17 | export OKTA_AUTH_SERVER_URL=https://$(yq e .okta.auth-server-fqdn $PARAMS_YAML) 18 | export OKTA_CLIENT_ID=$(yq e .okta.kubeapps-dex-app-client-id $PARAMS_YAML) 19 | export OKTA_CLIENT_SECRET=$(yq e .okta.kubeapps-dex-app-client-secret $PARAMS_YAML) 20 | 21 | export KUBEAPPS_FQDN=$(yq e .kubeapps.server-fqdn $PARAMS_YAML) 22 | export KUBEAPPS_CALLBACK_URL=https://$KUBEAPPS_FQDN/oauth2/callback 23 | 24 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 25 | 26 | echo "Beginning Dex install..." 27 | 28 | mkdir -p generated/$CLUSTER_NAME/dex 29 | 30 | kubectl apply -f dex/01-namespace.yaml 31 | 32 | 33 | cp dex/dex-values.yaml generated/$CLUSTER_NAME/dex/dex-values.yaml 34 | yq e -i ".ingress.hosts.[0].host = env(DEX_CN)" generated/$CLUSTER_NAME/dex/dex-values.yaml 35 | yq e -i ".ingress.tls.[0].hosts.[0] = env(DEX_CN)" generated/$CLUSTER_NAME/dex/dex-values.yaml 36 | yq e -i ".config.issuer = env(DEX_URL)" generated/$CLUSTER_NAME/dex/dex-values.yaml 37 | yq e -i ".config.staticClients.[0].redirectURIs.[0] = env(KUBEAPPS_CALLBACK_URL)" generated/$CLUSTER_NAME/dex/dex-values.yaml 38 | yq e -i ".config.connectors.[0].config.issuer = env(OKTA_AUTH_SERVER_URL)" generated/$CLUSTER_NAME/dex/dex-values.yaml 39 | yq e -i ".config.connectors.[0].config.clientID = env(OKTA_CLIENT_ID)" generated/$CLUSTER_NAME/dex/dex-values.yaml 40 | yq e -i ".config.connectors.[0].config.clientSecret = env(OKTA_CLIENT_SECRET)" generated/$CLUSTER_NAME/dex/dex-values.yaml 41 | yq e -i ".config.connectors.[0].config.redirectURI = env(DEX_CALLBACK_URL)" generated/$CLUSTER_NAME/dex/dex-values.yaml 42 | 43 | helm repo add dex https://charts.dexidp.io 44 | helm upgrade --install dex --namespace dex dex/dex -f generated/$CLUSTER_NAME/dex/dex-values.yaml 45 | 46 | -------------------------------------------------------------------------------- /dns/tkg-aws-lab-record-sets-aws.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "Comment": "TKG Record for CNAME", 3 | "Changes": [ 4 | { 5 | "Action": "UPSERT", 6 | "ResourceRecordSet": { 7 | "Name": "FQDN", 8 | "Type": "CNAME", 9 | "TTL": 300, 10 | "ResourceRecords": [ 11 | { 12 | "Value": "LBHOST" 13 | } 14 | ] 15 | } 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /dns/tkg-lab-record-sets.yml: -------------------------------------------------------------------------------- 1 | kind: dns#resourceRecordSet 2 | name: tkg-vsp-lab.hyrulelab.com. 3 | rrdatas: 4 | - ns-cloud-b1.googledomains.com. 5 | - ns-cloud-b2.googledomains.com. 6 | - ns-cloud-b3.googledomains.com. 7 | - ns-cloud-b4.googledomains.com. 8 | ttl: 21600 9 | type: NS 10 | --- 11 | kind: dns#resourceRecordSet 12 | name: tkg-vsp-lab.hyrulelab.com. 13 | rrdatas: 14 | - ns-cloud-b1.googledomains.com. cloud-dns-hostmaster.google.com. 1 21600 3600 259200 15 | 300 16 | ttl: 21600 17 | type: SOA 18 | --- 19 | kind: dns#resourceRecordSet 20 | name: '*.mgmt.tkg-vsp-lab.hyrulelab.com.' 21 | rrdatas: 22 | - 192.168.14.200 23 | ttl: 300 24 | type: A 25 | --- 26 | kind: dns#resourceRecordSet 27 | name: '*.wlc-1.tkg-vsp-lab.hyrulelab.com.' 28 | rrdatas: 29 | - 192.168.14.221 30 | ttl: 300 31 | type: A 32 | -------------------------------------------------------------------------------- /docs/acme-fitness-lab/01_okta_setup.md: -------------------------------------------------------------------------------- 1 | # Update Okta for Application Team Users and Group 2 | 3 | Go to your Okta Console. 4 | 5 | Once logged in... 6 | 7 | You will need to go to the Admin interface. If you are in the standard interface by default, choose green Admin button on the top right. 8 | 9 | ## Create Developer User(s) 10 | 11 | Choose Directory (side menu) > People > Add Person (For each user you add): 12 | - Set First Name, Last Name and Email: (e.g: Cody Smith, cody@winterfell.live) 13 | - Password Set by Admin, YOUR_PASSWORD 14 | - Uncheck user must change password on first login 15 | 16 | > Note: Do this for two users: Cody Smith, cody@winterfell.live; and Naomi Smith, naomi@winterfell.live. Feel free to choose a different domain name. 17 | 18 | ## Create Development Group 19 | 20 | Choose Directory (side menu) > Groups and then > Add Group: 21 | - acme-fitness-devs 22 | 23 | Click on acme-fitness-devs group > Manage People: Then add cody and naomi to the acme-fitness-devs. Save 24 | 25 | ## Go to Next Step 26 | 27 | [Set policy on Workload Cluster and Namespace](02_policy_acme.md) 28 | -------------------------------------------------------------------------------- /docs/acme-fitness-lab/02_policy_acme.md: -------------------------------------------------------------------------------- 1 | # Set policy on Workload Cluster and Namespace 2 | 3 | ## Prepare Manifests and Execute Yaml 4 | 5 | Prepare the YAML manifests for acme-fitness workspace and namespace for acme-fitness related to TMC. Manifest will be output into `generated/$CLUSTER_NAME/tmc/` in case you want to inspect. 6 | 7 | ```bash 8 | ./scripts/generate-and-apply-tmc-acme-fitness-yaml.sh $(yq e .workload-cluster.name $PARAMS_YAML) 9 | ``` 10 | 11 | ## Set Resource Quota for acme-fitness namespace 12 | 13 | We want to limit the resources that acme-fitness team can consume on the cluster. Use this script. 14 | 15 | ```bash 16 | ./scripts/apply-acme-fitness-quota.sh 17 | ``` 18 | 19 | ## Go to Next Step 20 | 21 | [Log-in to workload cluster and setup kubeconfig](03-login-kubeconfig.md) 22 | -------------------------------------------------------------------------------- /docs/acme-fitness-lab/03-login-kubeconfig.md: -------------------------------------------------------------------------------- 1 | # Log-in to workload cluster as developer and setup kubeconfig 2 | 3 | ## Log out of your alana context 4 | 5 | You may already have a session established as alana, the admin user. If so, perform the following steps. 6 | 7 | 1. Go to okta using your okta domain and if you are logged in, then perform a log out 8 | 2. Delete your local tanzu pinniped session 9 | 10 | ```bash 11 | rm -rf ~/.config/tanzu/pinniped/ 12 | ``` 13 | 14 | ## Login As Cody 15 | 16 | ```bash 17 | tanzu cluster kubeconfig get $(yq e .workload-cluster.name $PARAMS_YAML) 18 | kubectl config use-context tanzu-cli-$(yq e .workload-cluster.name $PARAMS_YAML)@$(yq e .workload-cluster.name $PARAMS_YAML) 19 | # At login prompt, login with your development user: cody 20 | kubectl get pods -n acme-fitness 21 | ``` 22 | 23 | >Note: If you get "No resources found in acme-fitness namespace." then you successfully logged in. Meaning you have permission to get resources in this namespace. 24 | 25 | ## Go to Next Step 26 | 27 | [Get, update, and deploy Acme-fitness app](04-deploy-app.md) 28 | -------------------------------------------------------------------------------- /docs/acme-fitness-lab/04-deploy-app.md: -------------------------------------------------------------------------------- 1 | # Get, update, and deploy Acme-fitness app 2 | 3 | ## Set configuration parameters 4 | 5 | The scripts to prepare the YAML to deploy acme-fitness depend on a parameters to be set. Ensure the following are set in `params.yaml`: 6 | 7 | ```yaml 8 | acme-fitness: 9 | fqdn: acme-fitness.highgarden.tkg-aws-e2-lab.winterfell.live 10 | ``` 11 | 12 | ## Retrieve acme-fitness source code 13 | 14 | ```bash 15 | ./scripts/retrieve-acme-fitness-source.sh 16 | ``` 17 | 18 | ## Prepare Manifests for acme-fitness 19 | 20 | Prepare the YAML manifests for customized acme-fitness K8S objects. Manifests will be output into `generated/$WORKLOAD_CLUSTER_NAME/acme-fitness/` in case you want to inspect. 21 | 22 | ```bash 23 | ./scripts/generate-acme-fitness-yaml.sh $(yq e .workload-cluster.name $PARAMS_YAML) 24 | ``` 25 | 26 | ## Deploy acme-fitness 27 | 28 | Ensure you are using your non-admin context and logged in with your developer account: `cody`. 29 | 30 | ```bash 31 | kubectl config use-context tanzu-cli-$(yq e .workload-cluster.name $PARAMS_YAML)@$(yq e .workload-cluster.name $PARAMS_YAML) 32 | ytt \ 33 | --ignore-unknown-comments \ 34 | -f acme-fitness/app-label-overlay.yaml \ 35 | -f acme-fitness/acme-fitness-secrets.yaml \ 36 | -f acme-fitness/acme-fitness-mongodata-pvc.yaml \ 37 | -f acme-fitness/catalog-db-volume-overlay.yaml \ 38 | -f acme_fitness_demo/kubernetes-manifests/cart-redis-total.yaml \ 39 | -f acme_fitness_demo/kubernetes-manifests/cart-total.yaml \ 40 | -f acme_fitness_demo/kubernetes-manifests/catalog-db-initdb-configmap.yaml \ 41 | -f acme_fitness_demo/kubernetes-manifests/catalog-db-total.yaml \ 42 | -f acme_fitness_demo/kubernetes-manifests/catalog-total.yaml \ 43 | -f acme_fitness_demo/kubernetes-manifests/frontend-total.yaml \ 44 | -f acme_fitness_demo/kubernetes-manifests/payment-total.yaml \ 45 | -f acme_fitness_demo/kubernetes-manifests/order-db-total.yaml \ 46 | -f acme_fitness_demo/kubernetes-manifests/order-total.yaml \ 47 | -f acme_fitness_demo/kubernetes-manifests/users-db-initdb-configmap.yaml \ 48 | -f acme_fitness_demo/kubernetes-manifests/users-db-total.yaml \ 49 | -f acme_fitness_demo/kubernetes-manifests/users-redis-total.yaml \ 50 | -f acme_fitness_demo/kubernetes-manifests/users-total.yaml \ 51 | -f acme_fitness_demo/kubernetes-manifests/frontend-total.yaml \ 52 | -f generated/$(yq e .workload-cluster.name $PARAMS_YAML)/acme-fitness/acme-fitness-frontend-ingress.yaml | \ 53 | kapp deploy -n acme-fitness -a acme-fitness -y -f - 54 | ``` 55 | 56 | ### Validation Step 57 | 58 | Go to the ingress URL to test out. 59 | 60 | ```bash 61 | open https://$(yq e .acme-fitness.fqdn $PARAMS_YAML) 62 | # login with eric/vmware1! in order to make a purchase. 63 | ``` 64 | -------------------------------------------------------------------------------- /docs/argocd-app-details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/argocd-app-details.png -------------------------------------------------------------------------------- /docs/argocd-apps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/argocd-apps.png -------------------------------------------------------------------------------- /docs/avi/avi-admin-user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-admin-user.png -------------------------------------------------------------------------------- /docs/avi/avi-cloud-ipam-dns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-cloud-ipam-dns.png -------------------------------------------------------------------------------- /docs/avi/avi-cloud-type.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-cloud-type.png -------------------------------------------------------------------------------- /docs/avi/avi-config-new-cert.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-config-new-cert.png -------------------------------------------------------------------------------- /docs/avi/avi-create-cert.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-create-cert.png -------------------------------------------------------------------------------- /docs/avi/avi-datacenter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-datacenter.png -------------------------------------------------------------------------------- /docs/avi/avi-dns-profile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-dns-profile.png -------------------------------------------------------------------------------- /docs/avi/avi-dns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-dns.png -------------------------------------------------------------------------------- /docs/avi/avi-essentials.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-essentials.png -------------------------------------------------------------------------------- /docs/avi/avi-export-cert.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-export-cert.png -------------------------------------------------------------------------------- /docs/avi/avi-ipam.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-ipam.png -------------------------------------------------------------------------------- /docs/avi/avi-mgmt-net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-mgmt-net.png -------------------------------------------------------------------------------- /docs/avi/avi-net-ready.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-net-ready.png -------------------------------------------------------------------------------- /docs/avi/avi-ntp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-ntp.png -------------------------------------------------------------------------------- /docs/avi/avi-ova-resize.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-ova-resize.png -------------------------------------------------------------------------------- /docs/avi/avi-ova-setup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-ova-setup.png -------------------------------------------------------------------------------- /docs/avi/avi-routes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-routes.png -------------------------------------------------------------------------------- /docs/avi/avi-smtp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-smtp.png -------------------------------------------------------------------------------- /docs/avi/avi-software.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-software.png -------------------------------------------------------------------------------- /docs/avi/avi-systemupdate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-systemupdate.png -------------------------------------------------------------------------------- /docs/avi/avi-tenant.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-tenant.png -------------------------------------------------------------------------------- /docs/avi/avi-vc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-vc.png -------------------------------------------------------------------------------- /docs/avi/avi-vc2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-vc2.png -------------------------------------------------------------------------------- /docs/avi/avi-vc3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-vc3.png -------------------------------------------------------------------------------- /docs/avi/avi-vip-pool.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/avi-vip-pool.png -------------------------------------------------------------------------------- /docs/avi/net-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/net-2.png -------------------------------------------------------------------------------- /docs/avi/net-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/net-3.png -------------------------------------------------------------------------------- /docs/avi/net-flat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/net-flat.png -------------------------------------------------------------------------------- /docs/avi/se-group-cluster-designation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/se-group-cluster-designation.png -------------------------------------------------------------------------------- /docs/avi/session-timeout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/session-timeout.png -------------------------------------------------------------------------------- /docs/avi/static-route.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/avi/static-route.png -------------------------------------------------------------------------------- /docs/baseline-lab-setup/one-step.md: -------------------------------------------------------------------------------- 1 | # One Step Foundation Deployment 2 | 3 | This lab can be used to deploy all three clusters included in the foundational lab setup. You will execute a single script that calls all the scripts included in the step-by-step guides. 4 | 5 | >Note: The labs depending on a master `params.yaml` file that is used for environment specific configuration data. A sample `REDACTED-params.yaml` file is included at the root of this repo, named REDACTED-params.yaml. It is recommended you copy this file and rename it to params.yaml and place it in the `local-config/` directory, and then start making your adjustments. `local-config/` is included in the `.gitignore` so your version won't be included in an any future commits you have to the repo. 6 | 7 | ## Setup Environment Variable for params.yaml 8 | 9 | Set the PARAMS_YAML environment variable to the path of your `params.yaml` file. If you followed the recommendation, the value would be `local-config/param.yaml`, however you may choose otherwise. This may be the case if you are using multiple `params.yaml` files in the case of AWS and vSphere deployments. 10 | 11 | ```bash 12 | # Update the the path from the default if you have a different params.yaml file name or location. 13 | export PARAMS_YAML=local-config/params.yaml 14 | ``` 15 | 16 | Ensure that your copy of `params.yaml` indicates your IaaS as `aws`, `azure` or `vsphere` appropriately. 17 | 18 | ## Azure Only - Configure the Azure CLI and Accept the TKG Azure Base Image License 19 | 20 | Ensure the `az` CLI is installed and configured. The deploy all script will use `az` to deploy TKG. 21 | 22 | To run management cluster VMs on Azure, [accept the license](https://docs.vmware.com/en/VMware-Tanzu-Kubernetes-Grid/2.1/tkg-deploy-mc-21/mgmt-reqs-prep-azure.html#accept-the-base-image-license-4) for their base Kubernetes version and machine OS. 23 | 24 | ``` 25 | az vm image terms accept --publisher vmware-inc --offer tkg-capi-2022-06-24 --plan k8s-1dot27dot5-ubuntu-2004 26 | ``` 27 | 28 | ## Execute the Deploy All Script 29 | 30 | Now you can execute the following script to perform all of those tasks: 31 | 32 | ```bash 33 | ./scripts/deploy-all.sh 34 | ``` 35 | 36 | >Note: This process should take about 30 minutes to complete. 37 | 38 | ## Tear Down 39 | 40 | Execute the following script to tear down your environment. 41 | 42 | ```bash 43 | ./scripts/delete-all.sh 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/baseline-lab-setup/step-by-step.md: -------------------------------------------------------------------------------- 1 | # Step by Step Setup 2 | 3 | The following labs guide you through the steps to create the three clusters considered the baseline setup. 4 | 5 | >Note: The labs depend on a master `params.yaml` file that is used for environment specific configuration data. A sample `REDACTED-params.yaml` file is included at the root of this repo, named REDACTED-params.yaml. It is recommended you copy this file and rename it to params.yaml and place it in the `local-config/` directory, and then start making your adjustments. `local-config/` is included in the `.gitignore` so your version won't be included in an any future commits you have to the repo. 6 | 7 | ## Setup Environment Variable for params.yaml 8 | 9 | Set the PARAMS_YAML environment variable to the path of your `params.yaml` file. If you followed the recommendation, the value would be `local-config/params.yaml`, however you may choose otherwise. This may be the case if you are using multiple `params.yaml` files in the case of AWS and vSphere deployments. 10 | 11 | ```bash 12 | # Update the the path from the default if you have a different params.yaml file name or location. 13 | export PARAMS_YAML=local-config/params.yaml 14 | ``` 15 | 16 | ## Management Cluster 17 | ### 1. [Install Management Cluster](../mgmt-cluster/01_install_tkg_mgmt.md) 18 | ### 2. [Attach Management Cluster to TMC](../mgmt-cluster/02_attach_tmc_mgmt.md) 19 | ### 3. [Configure DNS and Prep Certificate Signing](../mgmt-cluster/03_dns_certs_mgmt.md) 20 | ### 4. [Configure Okta](../mgmt-cluster/04_okta_mgmt.md) 21 | ### 5. [Install Contour Ingress Controller](../mgmt-cluster/06_contour_mgmt.md) 22 | ### 6. [Update Pinniped Configuration](../mgmt-cluster/07_update_pinniped_config_mgmt.md) 23 | ### 7. [Add monitoring](../mgmt-cluster/08_monitoring_mgmt.md) 24 | 25 | ## Setup Shared Services Cluster 26 | ### 1. [Create new Shared Services Cluster](../shared-services-cluster/01_install_tkg_ssc.md) 27 | ### 2. [Attach Shared Services Cluster to TMC](../shared-services-cluster/02_attach_tmc_ssc.md) 28 | ### 3. [Set policy on Shared Services Cluster and Namespace](../shared-services-cluster/03_policy_ssc.md) 29 | ### 4. [Install Contour Ingress Controller](../shared-services-cluster/04_contour_ssc.md) 30 | ### 5. [Install ElasticSearch and Kibana](../shared-services-cluster/06_ek_ssc.md) 31 | ### 6. [Install FluentBit](../shared-services-cluster/07_fluentbit_ssc.md) 32 | ### 7. [Add monitoring to cluster](../shared-services-cluster/08_monitoring_ssc.md) 33 | ### 8. [Deploy Minio to Shared Services Cluster](../shared-services-cluster/08_5_minio_ssc.md) 34 | ### 8. [Enable Data Protection and Setup Nightly Backup](../shared-services-cluster/09_velero_ssc.md) 35 | ### 9. [Install Harbor](../shared-services-cluster/10_harbor.md) 36 | 37 | ## Finalize Management Cluster 38 | ### 1. [Install FluentBit](../mgmt-cluster/09_fluentbit_mgmt.md) 39 | ### 2. [Enable Data Protection and Setup Nightly Backup](../mgmt-cluster/10_velero_mgmt.md) 40 | 41 | ## Setup Workload Cluster 42 | ### 1. [Create new Workload Cluster](../workload-cluster/01_install_tkg_and_components_wlc.md) 43 | 44 | At this point you have the basis for the lab exercises! 45 | -------------------------------------------------------------------------------- /docs/bonus-labs/avi-ldap-auth/assign-auth-profile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/bonus-labs/avi-ldap-auth/assign-auth-profile.png -------------------------------------------------------------------------------- /docs/bonus-labs/avi-ldap-auth/auth-default-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/bonus-labs/avi-ldap-auth/auth-default-view.png -------------------------------------------------------------------------------- /docs/bonus-labs/avi-ldap-auth/profile-create.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/bonus-labs/avi-ldap-auth/profile-create.png -------------------------------------------------------------------------------- /docs/bonus-labs/avi-ldap-auth/role-mapping.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/bonus-labs/avi-ldap-auth/role-mapping.png -------------------------------------------------------------------------------- /docs/bonus-labs/cluster-autoscaling.md: -------------------------------------------------------------------------------- 1 | # Cluster Autoscaling 2 | 3 | Tanzu Kubernetes Grid supports cluster autoscaling leveragin the Cluster API provider. Depending on your lab configuration, you may have enabled cluster autoscaling when your clusters were provisioned. 4 | 5 | ## Lab Configuration Parameters 6 | 7 | You may have noticed the following keys in `params.yaml` that direct the use of cluster autoscaling for the shared services cluster and workload cluster. 8 | 9 | General purpose information on how cluster autoscaler works can be found in the upstream [FAQ](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md). 10 | 11 | ```yaml 12 | shared-services-cluster.autoscaler-enabled: true 13 | shared-services-cluster.worker-replicas: 2 # initial and minimum number of worker nodes 14 | shared-services-cluster.worker-replicas-max: 4 # maximum number of worker nodes 15 | workload-cluster.autoscaler-enabled: true 16 | workload-cluster.worker-replicas: 1 17 | workload-cluster.worker-replicas-max: 3 18 | ``` 19 | 20 | ## Run the Exercises 21 | 22 | For this lab, we will use the shared services cluster to exercise the capability, however you could equally run the commands on the workload cluster. 23 | 24 | The following series of commands demonstrate cluster autoscaling... 25 | 26 | ```bash 27 | 28 | # Choosing to use shared services cluster for the command sequence 29 | DEMO_CLUSTER_NAME=$(yq e .shared-services-cluster.name $PARAMS_YAML) 30 | WILD_CARD_FQDN=$(yq e .shared-services-cluster.ingress-fqdn $PARAMS_YAML) 31 | 32 | tanzu cluster get $DEMO_CLUSTER_NAME 33 | # see current number of workers 34 | 35 | kubectl config use-context $DEMO_CLUSTER_NAME-admin@$DEMO_CLUSTER_NAME 36 | 37 | # create new sample app from Kubernetes Up And Running (really could be any app) 38 | kubectl create ns kuard 39 | 40 | mkdir -p generated/$DEMO_CLUSTER_NAME/kuard 41 | cp kuard/* generated/$DEMO_CLUSTER_NAME/kuard 42 | 43 | export KUARD_FQDN=kuard.$(echo "$WILD_CARD_FQDN" | sed -e "s/^*.//") 44 | 45 | yq e -i '.spec.rules[0].host = env(KUARD_FQDN)' generated/$DEMO_CLUSTER_NAME/kuard/ingress.yaml 46 | 47 | kubectl apply -f generated/$DEMO_CLUSTER_NAME/kuard -n kuard 48 | 49 | # open browser accessing sample app 50 | open http://$KUARD_FQDN 51 | 52 | # scale deployment 53 | kubectl get nodes 54 | # notice 2 55 | kubectl scale deployment kuard --replicas 15 -n kuard 56 | kubectl get po -n kuard 57 | # notice pending pods 58 | # switch context to MC 59 | MANAGEMENT_CLUSTER_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 60 | kubectl config use-context $MANAGEMENT_CLUSTER_NAME-admin@$MANAGEMENT_CLUSTER_NAME 61 | # check out the autoscaler pod logs in default namespace 62 | kubectl get pods 63 | # check out the machines. A new one should be provisioning 64 | kubectl get machines 65 | 66 | # switch back to demo cluster 67 | kubectl config use-context $DEMO_CLUSTER_NAME-admin@$DEMO_CLUSTER_NAME 68 | 69 | # wait for additional nodes to be ready 70 | kubectl get nodes 71 | 72 | # when additional nodes are ready, pending pods should now be running 73 | kubectl get pods -n kuard -o wide 74 | 75 | # scale back down. if you wait 10 minutes or so the added nodes should be removed. 76 | kubectl scale deployment kuard --replicas 1 -n kuard 77 | 78 | ``` -------------------------------------------------------------------------------- /docs/bonus-labs/gitlab.md: -------------------------------------------------------------------------------- 1 | # Gitlab for CI/CD 2 | 3 | In this lab we will install Gitlab to the shared cluster via a Helm chart. The following modifications to the default chart values need to be made: 4 | - Use Contour Ingress 5 | - Generate certificate via Let's Encrypt 6 | - Updated URLs 7 | - Scaled down Gitlab resources 8 | 9 | Gitlab will also be managed via Tanzu Mission Control in a dedicated workspace. 10 | 11 | ## Set environment variables 12 | The following section should be added to or exist in your local params.yaml file: 13 | 14 | ```bash 15 | gitlab: 16 | namespace: gitlab 17 | tmc-workspace: gitlab-workspace 18 | ``` 19 | 20 | Once these are in place and correct, run the following to export the following into your shell: 21 | 22 | ```bash 23 | export TMC_CLUSTER_GROUP=$(yq e .tmc.cluster-group $PARAMS_YAML) 24 | export GITLAB_NAMESPACE=$(yq e .gitlab.namespace $PARAMS_YAML) 25 | export GITLAB_TMC_WORKSPACE=$TMC_CLUSTER_GROUP-$(yq e .gitlab.tmc-workspace $PARAMS_YAML) 26 | export CLUSTER_NAME=$(yq e .shared-services-cluster.name $PARAMS_YAML) 27 | export IAAS=$(yq e .iaas $PARAMS_YAML) 28 | export VMWARE_ID=$(yq e .vmware-id $PARAMS_YAML) 29 | ``` 30 | 31 | ## Create Gitlab namespace and prepare deployment file 32 | 33 | In order to deploy the Helm chart for Gitlab to a dedicated namespace, we need to create it first. To do this, we can use Tanzu Mission Control, as it is already running on our shared services cluster. This will create a "managed namespace", where we can assert additional control over what is deployed. 34 | 35 | NOTE: if you want to avoid using TMC, simply create the namespace in the shared-services cluster manually using "kubectl create namespace ${GITLAB_NAMESPACE}" 36 | 37 | ```bash 38 | tmc workspace create -n $GITLAB_TMC_WORKSPACE -d "Workspace for Gitlab" 39 | tmc cluster namespace create -c $VMWARE_ID-$CLUSTER_NAME-$IAAS -n $GITLAB_NAMESPACE -d "Gitlab product installation" -k $GITLAB_TMC_WORKSPACE -m attached -p attached 40 | ``` 41 | 42 | Generate the deployment file. This file (generated/$CLUSTER_NAME/gitlab/values-gitlab.yaml) will contain oeverrides to the default chart values. 43 | 44 | ```bash 45 | ./scripts/generate-gitlab.sh 46 | ``` 47 | 48 | ## Add helm repo and install Gitlab 49 | Add the repository to helm and use the generated deployment file to deploy the chart. 50 | 51 | ```bash 52 | CLUSTER_NAME=$(yq e .shared-services-cluster.name $PARAMS_YAML) 53 | helm repo add gitlab https://charts.gitlab.io/ 54 | helm repo update 55 | helm upgrade --install gitlab gitlab/gitlab -f generated/$CLUSTER_NAME/gitlab/values-gitlab.yaml -n $GITLAB_NAMESPACE 56 | ``` 57 | 58 | ## Validation Step 59 | 60 | Check to see if the pods, ingresses, and PVCs are up and running: 61 | 62 | ```bash 63 | kubectl get pod,pvc,ing,cert -n $GITLAB_NAMESPACE 64 | kubectl get -n $GITLAB_NAMESPACE secret gitlab-gitlab-initial-root-password -ojsonpath='{.data.password}' | base64 --decode ; echo 65 | ``` 66 | 67 | Go to the browser and use the FQDN for Gitlab to test it out. 68 | -------------------------------------------------------------------------------- /docs/bonus-labs/kubeapps.md: -------------------------------------------------------------------------------- 1 | # Install Kubeapps 2 | 3 | ## Set environment variables 4 | The following section should be added to or exist in your local params.yaml file: 5 | 6 | ```bash 7 | kubeapps: 8 | server-fqdn: kubeapps. 9 | oidc-issuer-fqdn: dex. 10 | ``` 11 | 12 | ## Prepare Okta for Kubeapps Client 13 | 14 | 1. Log into your Okta account you created as part of the [Okta Setup Lab](../mgmt-cluster/04_okta_mgmt.md). The URL should be in your `params.yaml` file under okta.auth-server-fqdn. 15 | 16 | 2. Choose Applications (side menu) > Applications. Then click Create App Integration button. Then select OIDC - OpenID Connect radio option. For Application Type, choose Web Application radio button. Then click Next button. 17 | 18 | 3. Complete the form as follows, and then click Done. 19 | - Give your app a name: `Kubeapps` 20 | - For Grant type, check Authorization Code 21 | - Sign-in redirect URIs: `https:///callback` 22 | ```bash 23 | echo "https://$(yq e .kubeapps.oidc-issuer-fqdn $PARAMS_YAML)/callback" 24 | ``` 25 | - Sign-out redirect URIs: `https:///logout` 26 | ```bash 27 | echo "https://$(yq e .kubeapps.oidc-issuer-fqdn $PARAMS_YAML)/logout" 28 | ``` 29 | 30 | 4. Capture `Client ID` and `Client Secret` for and put it in your $PARAMS_YAML file 31 | ```bash 32 | okta: 33 | kubeapps-dex-app-client-id: 34 | kubeapps-dex-app-client-secret: 35 | ``` 36 | 37 | 5. Choose Sign On tab > Edit **OpenID Connect ID Token** section 38 | - Groups claim type => `Filter` 39 | - Groups claim filter => **groups** Matches regex **.\*** 40 | 41 | ## Prepare Manifests and Deploy Dex 42 | 43 | Due to the way okta provides thin-tokens, if we directly integrated kubeapps with okta, we would not recieve group membership. Dex has the workflow to retrieve the group 44 | membership and generate a new JWT token for kubeapps. As such, we will deploy dex in the workload cluster to perform this mediation. 45 | 46 | ```bash 47 | ./dex/generate-and-apply-dex-yaml.sh $(yq e .workload-cluster.name $PARAMS_YAML) 48 | ``` 49 | 50 | ## Prepare Manifests and Deploy Kubeapps 51 | Kubeapps should be installed in the workload cluster, as it is going to be available to all users. Prepare and deploy the YAML manifests for the related kubeapps K8S objects. Manifest will be output into `generated/$CLUSTER_NAME/kubeapps/` in case you want to inspect. 52 | ```bash 53 | ./kubeapps/generate-and-apply-kubeapps-yaml.sh $(yq e .workload-cluster.name $PARAMS_YAML) 54 | ``` 55 | 56 | ## Validation Step 57 | 1. All kubeapps pods are in a running state: 58 | ```bash 59 | kubectl get po -n kubeapps 60 | ``` 61 | 2. Certificate is True and Ingress created: 62 | ```bash 63 | kubectl get cert,ing -n kubeapps 64 | ``` 65 | 3. Open a browser and navigate to https://<$KUBEAPPS_FQDN>. 66 | ```bash 67 | open https://$(yq e .kubeapps.server-fqdn $PARAMS_YAML) 68 | ``` 69 | 4. Login as `alana`, who is an admin on the cluster. You should be taken to the kubeapps home page 70 | -------------------------------------------------------------------------------- /docs/bonus-labs/prometheus_grafana.md: -------------------------------------------------------------------------------- 1 | # Add Prometheus and Grafana to Workload Cluster 2 | 3 | ## Overview 4 | 5 | Tanzu offers essential cluster monitoring with Prometheus and Grafana through TKG packages. When deployed to a cluster, you have metrics collection and storage, alerting, and dashboards. 6 | 7 | >Note: This is an in-cluster service. Using this approach, you would have to deploy Prometheus and Grafana to each cluster with individual storage and dashboards for each cluster. Alternatively, Tanzu Observability provides multi-cluster observability and is part of the Tanzu Advanced offering. 8 | 9 | In this lab we will be adding monitoring to the workload cluster. 10 | 11 | ## Set configuration parameters 12 | 13 | The scripts to prepare and execute the YAML to deploy prometheus and grafana depend on a parameters to be set. Ensure the following are set in `params.yaml`: 14 | 15 | ```yaml 16 | # Leave prometheus-fqdn blank if you choose not to expose, there is no auth 17 | workload-cluster.prometheus-fqdn: prometheus.highgarden.tkg-vsphere-lab.winterfell.live 18 | # Grafana has auth 19 | workload-cluster.grafana-fqdn: grafana.highgarden.tkg-vsphere-lab.winterfell.live 20 | grafana.admin-password: REDACTED 21 | ``` 22 | 23 | ## Prepare Manifests and Deploy Prometheus 24 | 25 | Prepare the YAML manifests for the related prometheus k8s objects. Manifests will be output into `generated/$CLUSTER_NAME/monitoring/` in case you want to inspect. 26 | 27 | ```bash 28 | ./scripts/generate-and-apply-prometheus-yaml.sh \ 29 | $(yq e .workload-cluster.name $PARAMS_YAML) \ 30 | $(yq e .workload-cluster.prometheus-fqdn $PARAMS_YAML) 31 | ``` 32 | 33 | ## Prometheus Validation Step 34 | 35 | 1. (Using Incognito Window) Access prometheus at the configured `workload-cluster.prometheus-fqdn` using `https://` 36 | 2. Enter `container_memory_working_set_bytes` into search box 37 | 3. Choose `Graph` for output 38 | 4. Click `Execute` button 39 | 5. View results 40 | 41 | ```bash 42 | open https://$(yq e .workload-cluster.prometheus-fqdn $PARAMS_YAML) 43 | ``` 44 | 45 | ## Prepare Manifests and Deploy Grafana 46 | 47 | Prepare the YAML manifests for the related grafana k8s objects. Manifests will be output into `generated/$CLUSTER_NAME/monitoring/` in case you want to inspect. 48 | 49 | ```bash 50 | ./scripts/generate-and-apply-grafana-yaml.sh \ 51 | $(yq e .workload-cluster.name $PARAMS_YAML) \ 52 | $(yq e .workload-cluster.grafana-fqdn $PARAMS_YAML) 53 | ``` 54 | 55 | ## Grafana Validation Step 56 | 57 | 1. (Using Incognito Window) Access grafana at the configured `workload-cluster.grafana-fqdn` using `https://` 58 | 2. Login with username `admin` and the password you specified as `grafana.admin-password` 59 | 3. Now we will import a dashboard. Choose `+` from left menu, then `Import` 60 | 4. Enter `13382` into the Import via grafana.com box, and choose `Load` 61 | 5. Choose `Prometheus` from the `Prometheus` dropdown menu, and then click `Import` 62 | 6. View the dashboard! 63 | 64 | ```bash 65 | open https://$(yq e .workload-cluster.grafana-fqdn $PARAMS_YAML) 66 | ``` 67 | -------------------------------------------------------------------------------- /docs/bonus-labs/to.md: -------------------------------------------------------------------------------- 1 | # Tanzu Observability by WaveFront 2 | 3 | You'll need a Wavefront url and API_KEY to integrate the clusters with Wavefront. 4 | 5 | The scripts to prepare the YAML to deploy TO depend on a parameters to be set. Ensure the following are set in `params.yaml`: 6 | 7 | ```yaml 8 | wavefront: 9 | # Your API Key 10 | api-key: foo-bar-foo 11 | # References to your wavefront instance 12 | url: https://surf.wavefront.com 13 | # Prefix to add to all your clusters in wavefront 14 | cluster-name-prefix: dpfeffer 15 | ``` 16 | 17 | ## Configuration through Tanzu Mission Control (TMC) 18 | 19 | TMC allows you to directly integrate with Tanzu Observability for clusters under management. This is a new feature for TMC and is only available through the UI. CLI integration is targeted for Q2 2021. As such, we don't have have a scripted option. 20 | 21 | 1. Log-in to TMC 22 | 2. Select your cluster from the cluster list 23 | 3. Choose Actions->Tanzu Observability by Wavefront->Add... 24 | 4. Add dialog. Either select existing TO credential from drop down or setup a new credential. To setup a new credentrial; for `Tanzu Observability URL` enter the result of `echo $(yq r $PARAMS_YAML wavefront.url)`. For `Tanzu Observability API token` enter the result of `echo $(yq r $PARAMS_YAML wavefront.api-key)` and then click `CONFIRM` button. 25 | 5. It should take about 2 minutes to complete enablement of the cluster, and then a little more to see the data flowing in Tanzu Observability. 26 | 27 | **Validation** 28 | 29 | 1. You have a new namespace created: `tanzu-observability-saas` 30 | 31 | ```bash 32 | kubectl get all -n tanzu-observability-saas 33 | ``` 34 | 35 | 2. Test it out. Choose Actions->Tanzu Observability by Wavefront->Open Tanzu Observability by Wavefront. A new browser tab will open directly on the Kubernetes Cluster Dashboard. Your cluster will be named as named $CLUSTER_NAME.attached.attached 36 | 37 | -------------------------------------------------------------------------------- /docs/bonus-labs/velero_restore.md: -------------------------------------------------------------------------------- 1 | # Velero Restore 2 | 3 | What if we accidentally delete our application or the namespace that we used for the application. This lab will show backing up and restoring with Velero. To start, let's perform a namespace-specific backup: 4 | 5 | ```bash 6 | velero backup create wlc-1-acme-fitness --include-namespaces acme-fitness 7 | Backup request "wlc-1-acme-fitness" submitted successfully. 8 | Run `velero backup describe wlc-1-acme-fitness` or `velero backup logs wlc-1-acme-fitness` for more details. 9 | 10 | velero backup describe wlc-1-acme-fitness 11 | 12 | ``` 13 | 14 | The first test is to delete the deployments and PVCs within the namespace. This will remove all pods and persistent data: 15 | 16 | ```bash 17 | kubectl delete deployment -n acme-fitness --all 18 | kubectl delete svc -n acme-fitness --all 19 | kubectl delete pvc -n acme-fitness --all 20 | kubectl get all,pvc -n acme-fitness 21 | ``` 22 | 23 | Now we can run the restore from velero: 24 | 25 | ```bash 26 | velero restore create wlc-1-acme-fitness-04-24-2020 --from-backup wlc-1-acme-fitness 27 | velero restore describe wlc-1-acme-fitness-04-24-2020 28 | k get all,pvc -n acme-fitness 29 | ``` 30 | 31 | That's it! Back in business. 32 | -------------------------------------------------------------------------------- /docs/guestbook-app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/guestbook-app.png -------------------------------------------------------------------------------- /docs/mgmt-cluster/02_attach_tmc_mgmt.md: -------------------------------------------------------------------------------- 1 | # Register Management Cluster to TMC 2 | 3 | ## Verify Active TMC Context 4 | 5 | The following scripts assume that you have an active `tmc` cli session. In order to test this, execute `tmc system context current` command to retrieve current context. If you don't have an active session, login using `tmc login` command. 6 | 7 | ## Register Management Cluster 8 | 9 | Execute the following script to register your TMC management cluster. It will create a cluster group as defined in `.tmc.cluster-group` in `params.yaml`. Then it will use the tmc cli to regiter the management cluster. 10 | 11 | ```bash 12 | ./scripts/tmc-register-mc.sh 13 | ``` 14 | 15 | ## Validation Step 16 | 17 | Go to the TMC UI and find your management. On the left, choose Administration, then Management Clusters in top nav, and choose your management cluster. 18 | 19 | ## Additional Notes 20 | 21 | - This lab does not leverage the workload cluster lifecycle management capabilities of TMC due to historical reasons 22 | 23 | ## Go to Next Step 24 | 25 | [Configure DNS and Prep Certificate Signing](03_dns_certs_mgmt.md) 26 | -------------------------------------------------------------------------------- /docs/mgmt-cluster/04_okta_mgmt.md: -------------------------------------------------------------------------------- 1 | # Setup an account with Okta for OpenID Connect (OIDC) 2 | 3 | Setup a free Okta account: https://developer.okta.com/signup/ 4 | 5 | ## Create Admin User 6 | 7 | Choose Directory (side menu) > People > Add Person: 8 | - Set First Name, Last Name and Email: (e.g: Alana Smith, alana@winterfell.live) 9 | - Password Set by Admin, YOUR_PASSWORD 10 | - Uncheck user must change password on first login 11 | 12 | ## Create Platform Team 13 | 14 | Choose Directory (side menu) > Groups and then > Add Group: 15 | - platform-team 16 | 17 | Click on `platform-team` group > Manage People: Then add `alana` to the `platform-team`. Save 18 | 19 | ## Create Application for TKG 20 | 21 | Choose Applications (side menu) > Applications. Then click `Create App Integration` button. Then select `OIDC - OpenID Connect` radio option. For Application Type, choose `Web Application` radio button. Then click `Next` button. 22 | - Give your app a name: TKG 23 | - For Grant type, check Authorization Code and Refresh Token 24 | - Sign-in redirect URIs: https://pinniped.../callback 25 | - Sign-out redirect URIs: https://pinniped.../logout 26 | > Note: Use your management-cluster.pinniped-fqdn domain as defined in your params.yaml 27 | 28 | Click `Save` button 29 | 30 | ## Retrieve Client ID and Client Secret 31 | 32 | Capture ClientID and Client Secret from Client Credentials card. Capture Okta Domain from General Settings. You will need to put these into your params.yaml file. 33 | 34 | ```yaml 35 | okta: 36 | auth-server-fqdn: # Your Okta Domain 37 | tkg-app-client-id: # Client Id 38 | tkg-app-client-secret: # Client Secret 39 | ``` 40 | 41 | ## Setup groups to be returned 42 | 43 | Go to Security (side menu) > API. Choose Authorization Servers tab and select `default` authorization Server. Select Scopes tab and click Add Scope. 44 | - name=groups 45 | - mark include in public metadata 46 | 47 | Click on Claims tab > Add Claim 48 | - name=groups 49 | - Include in toke type=ID Token 50 | - value type=Groups 51 | - Filter Matches regex => .* 52 | - Include in= The following scopes `groups` 53 | 54 | Now choose Applications (side menu) > Applications > Pick your app > Sign On tab > Edit **OpenID Connect ID Token** section 55 | - Groups claim type => Filter 56 | - Groups claim filter => **groups** Matches regex **.\*** 57 | 58 | ## Go to Next Step 59 | 60 | [Install Contour on Management Cluster](06_contour_mgmt.md) 61 | -------------------------------------------------------------------------------- /docs/mgmt-cluster/06_contour_mgmt.md: -------------------------------------------------------------------------------- 1 | # Install Contour on Management Cluster 2 | 3 | ## Deploy Contour 4 | 5 | Apply Contour configuration. We explicitly configure contour to use service type=LoadBalancer for Envoy. Use this script to apply yamls. 6 | ```bash 7 | ./scripts/generate-and-apply-contour-yaml.sh $(yq e .management-cluster.name $PARAMS_YAML) 8 | ``` 9 | 10 | ## Verify Contour 11 | 12 | Once it is deployed, you can see all pods `Running` and the the Load Balancer up. 13 | 14 | ```bash 15 | kubectl get pod,svc -n tanzu-system-ingress 16 | ``` 17 | 18 | ## Check out Cloud Load Balancer (for AWS and Azure) 19 | 20 | The EXTERNAL IP for AWS will be set to the name of the newly configured AWS Elastic Load Balancer, which will also be visible in the AWS UI and CLI. 21 | 22 | If using AWS: 23 | 24 | ```bash 25 | aws elb describe-load-balancers 26 | ``` 27 | 28 | If Azure: 29 | 30 | ```bash 31 | az network lb list 32 | ``` 33 | 34 | ## Setup DNS Management 35 | 36 | We will leverage [external-dns](https://github.com/kubernetes-sigs/external-dns) for kubernetes managed DNS updates using the user-managed package associated with TKG [Service Discovery with ExternalDNS](https://docs.vmware.com/en/VMware-Tanzu-Packages/2023.9.19/tanzu-packages/packages-externaldns.html). Any HTTPProxy, Ingress, or Service with annotations, `external-dns` will observe the change and make the desired updates within the DNS Provider: Route53 (default) or Google Cloud DNS or Azure DNS depending on the configuration of `dns.provider`. 37 | 38 | If we are leveraging Route53, we require access to AWS. See [external-dns docs](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md) for minimum access required for the AWS account you are using. If necessary, set the policy and assign to the user for the access key. So regardless of TKG IaaS, ensure the following are set in `params.yaml`: 39 | 40 | ```yaml 41 | aws: 42 | region: your-region 43 | access-key-id: your-access-key-id 44 | secret-access-key: your-secret-access-key 45 | ``` 46 | 47 | If we are leveraging Google Cloud DNS, we require access to Google Cloud and permissions to manage DNS zones. The next script will leverage the `gcloud` CLI to create the required service-account and permissions, therefore your `gcloud` should have been initialized in the local configuration as detailed in the [DNS Certs Management](03_dns_certs_mgmt.md) 48 | 49 | ```yaml 50 | gcloud: 51 | project: your-project-id 52 | ``` 53 | 54 | If we are Azure DNS, all the required resources and service accounts were created during [DNS Certs Management](03_dns_certs_mgmt.md). 55 | 56 | For any DNS provider, execute the script below to deploy `external-dns` and to apply the annotation. 57 | 58 | ```bash 59 | ./scripts/generate-and-apply-external-dns-yaml.sh $(yq e .management-cluster.name $PARAMS_YAML) 60 | ``` 61 | 62 | ## Prepare and Apply Cluster Issuer Manifests 63 | 64 | Prepare the YAML manifests for the contour cluster issuer. Manifest will be output into `clusters/$CLUSTER_NAME/contour/generated/` in case you want to inspect. It is assumed that if you IaaS is AWS or Azure, then you will use the `http` challenge type and if your IaaS is vSphere, you will use the `dns` challenge type as a non-interfacing environment. If using the `dns` challenge, this script assumes Route 53 DNS by default unless `dns.provider` is set to `gcloud-dns` or `azure-dns`. 65 | 66 | ```bash 67 | ./scripts/generate-and-apply-cluster-issuer-yaml.sh $(yq e .management-cluster.name $PARAMS_YAML) 68 | ``` 69 | 70 | ## Verify Cluster Issuer 71 | 72 | Check to see that the ClusterIssuer is valid: 73 | 74 | ```bash 75 | kubectl get clusterissuer letsencrypt-contour-cluster-issuer -o yaml 76 | ``` 77 | 78 | Look for the status to be Ready: True 79 | 80 | ## Go to Next Step 81 | 82 | [Update Pinniped Config](07_update_pinniped_config_mgmt.md) 83 | -------------------------------------------------------------------------------- /docs/mgmt-cluster/07_update_pinniped_config_mgmt.md: -------------------------------------------------------------------------------- 1 | # Update Pinniped Config 2 | 3 | The default deployment approach is to leverage IP addresses and self signed certificates for the Pinniped supervisor endpoint. However, in our lab we will leverage FQDN's managed by your DNS provider and Let's Encrypt to generate valid SSL certificates. The `pinniped-addon` secret within the management cluster contains configuration information that drives this behavior. In addition, there is some patching that is required of key Pinniped resources. 4 | 5 | ## Run Configuration Update Script 6 | 7 | ```bash 8 | ./scripts/update-pinniped-configuration.sh 9 | ``` 10 | 11 | ## Verify Configuration 12 | 13 | There are several key resources that contain the pinniped configuration state. Let's get these resources to verify the specifications are as we expect. 14 | 15 | ```bash 16 | kubectl get cm pinniped-info -n kube-public -oyaml 17 | kubectl get federationdomain -n pinniped-supervisor -oyaml 18 | kubectl get jwtauthenticator -n pinniped-concierge -oyaml 19 | kubectl get oidcidentityprovider -n pinniped-supervisor -oyaml 20 | ``` 21 | 22 | 23 | ## Go to Next Step 24 | 25 | [Add Prometheus and Grafana to Management Cluster](08_monitoring_mgmt.md) 26 | -------------------------------------------------------------------------------- /docs/mgmt-cluster/08_monitoring_mgmt.md: -------------------------------------------------------------------------------- 1 | # Add Prometheus and Grafana to Management Cluster 2 | 3 | ## Overview 4 | 5 | Tanzu offers essential cluster monitoring with Prometheus and Grafana through TKG packages. When deployed to a cluster, you have metrics collection and storage, alerting, and dashboards. 6 | 7 | >Note: This is an in-cluster service. Using this approach, you would have to deploy Prometheus and Grafana to each cluster with individual storage and dashboards for each cluster. Alternatively, Tanzu Observability provides multi-cluster observability and is part of the Tanzu Advanced offering. 8 | 9 | In this lab we will be adding monitoring to the management cluster. 10 | 11 | ## Set configuration parameters 12 | 13 | The scripts to prepare and execute the YAML to deploy prometheus and grafana depend on a parameters to be set. Ensure the following are set in `params.yaml`: 14 | 15 | ```yaml 16 | # Leave prometheus-fqdn blank if you choose not to expose it, there is no auth 17 | management-cluster.prometheus-fqdn: prometheus.dragonstone.tkg-vsphere-lab.winterfell.live 18 | # Grafana has auth 19 | management-cluster.grafana-fqdn: grafana.dragonstone.tkg-vsphere-lab.winterfell.live 20 | grafana.admin-password: REDACTED 21 | ``` 22 | 23 | ## Prepare Manifests and Deploy Prometheus 24 | 25 | Prepare the YAML manifests for the related prometheus k8s objects. Manifests will be output into `generated/$CLUSTER_NAME/monitoring/` in case you want to inspect. 26 | 27 | ```bash 28 | ./scripts/generate-and-apply-prometheus-yaml.sh \ 29 | $(yq e .management-cluster.name $PARAMS_YAML) \ 30 | $(yq e .management-cluster.prometheus-fqdn $PARAMS_YAML) 31 | ``` 32 | 33 | ## Prometheus Validation Step 34 | 35 | 1. (Using Incognito Window) Access prometheus at the configured `management-cluster.prometheus-fqdn` using `https://` 36 | 2. Enter `container_memory_working_set_bytes` into search box 37 | 3. Choose `Graph` for output 38 | 4. Click `Execute` button 39 | 5. View results 40 | 41 | ```bash 42 | open https://$(yq e .management-cluster.prometheus-fqdn $PARAMS_YAML) 43 | ``` 44 | 45 | ## Prepare Manifests and Deploy Grafana 46 | 47 | Prepare the YAML manifests for the related grafana k8s objects. Manifests will be output into `generated/$CLUSTER_NAME/monitoring/` in case you want to inspect. 48 | 49 | ```bash 50 | ./scripts/generate-and-apply-grafana-yaml.sh \ 51 | $(yq e .management-cluster.name $PARAMS_YAML) \ 52 | $(yq e .management-cluster.grafana-fqdn $PARAMS_YAML) 53 | ``` 54 | 55 | ## Grafana Validation Step 56 | 57 | 1. (Using Incognito Window) Access grafana at the configured `management-cluster.grafana-fqdn` using `https://` 58 | 2. Login with username `admin` and the password you specified as `grafana.admin-password` 59 | 3. Now we will import a dashboard. Choose `+` from left menu, then `Import` 60 | 4. Enter `13382` into the Import via grafana.com box, and choose `Load` 61 | 5. Choose `Prometheus` from the `Prometheus` dropdown menu, and then click `Import` 62 | 6. View the dashboard! 63 | 64 | ```bash 65 | open https://$(yq e .management-cluster.grafana-fqdn $PARAMS_YAML) 66 | ``` 67 | 68 | ## Go to Next Step 69 | 70 | [Create new Shared Services Cluster](../shared-services-cluster/01_install_tkg_ssc.md) 71 | -------------------------------------------------------------------------------- /docs/mgmt-cluster/09_fluentbit_mgmt.md: -------------------------------------------------------------------------------- 1 | # Install FluentBit on Management Cluster 2 | 3 | **You must complete the [Install ElasticSearch and Kibana](../shared-services-cluster/06_ek_scc.md) lab prior to this lab.** 4 | 5 | ## Prepare Manifests and Deploy Fluent Bit 6 | 7 | Prepare the YAML manifests for the related fluent-bit K8S objects. Manifest will be output into `generated/$CLUSTER_NAME/fluent-bit/` in case you want to inspect. 8 | 9 | ```bash 10 | ./scripts/generate-and-apply-fluent-bit-yaml.sh $(yq e .management-cluster.name $PARAMS_YAML) 11 | ``` 12 | 13 | ## Validation Step 14 | 15 | Ensure that fluent bit pods are running 16 | 17 | ```bash 18 | kubectl get pods -n tanzu-system-logging 19 | ``` 20 | 21 | Access kibana. This leverages the wildcard DNS entry on the convoy ingress. Your base domain will be different than mine. 22 | 23 | ```bash 24 | open http://$(yq e .shared-services-cluster.kibana-fqdn $PARAMS_YAML) 25 | ``` 26 | 27 | You should see the kibana welcome screen. 28 | 29 | We assume you have already configured your kibana index during the configuration of [FluentBit for Shared Services Cluster](../shared-services-cluster/07_fluentbit_ssc.md). 30 | 31 | Click the Discover icon at the top of the left menu bar. You can start searching for management cluster logs. 32 | 33 | ## Go to Next Step 34 | 35 | [Install Velero and Setup Nightly Backup on Management Cluster](10_velero_mgmt.md) 36 | -------------------------------------------------------------------------------- /docs/mgmt-cluster/10_velero_mgmt.md: -------------------------------------------------------------------------------- 1 | # Install Velero and Setup Nightly Backup on Management Cluster 2 | 3 | ## Overview 4 | 5 | At this time the management cluster can not be managed by Tanzu Mission Control, and thus can't manage it's Data Protection as it does for our shared services cluster. However, under TMC leverages velero under the covers, so we can take on the data protection configuration ourself. 6 | 7 | ## Install Velero client 8 | 9 | It is assumed you have already downloaded velero cli from [Enable Data Protection and Setup Nightly Backup for Shared Services Cluster](../shared-services-cluster/09_velero_ssc.md). 10 | 11 | ## Target Locations 12 | 13 | Your backup will be stored based upon the IaaS you are using. 14 | 15 | - `vSphere` will target the Minio server you deployed 16 | - `Azure` will create a storage account in your cluster resource group and backups will go there 17 | - `AWS` will go into AWS S3 and backups will go there 18 | 19 | Credentials to access the target storage location are stored at `generated/$CLUSTER_NAME/velero/credentials-velero`. 20 | 21 | If using Cloud Gate for AWS, no credentials will be stored and you will use the IAM of the node. 22 | 23 | ## Set configuration parameters 24 | 25 | The scripts to prepare the YAML to deploy velero depend on a parameters to be set. Ensure the following are set in `params.yaml` based upon your environment: 26 | 27 | ```yaml 28 | velero.bucket: my-bucket 29 | ``` 30 | 31 | ## Prepare Manifests and Deploy Velero 32 | 33 | Prepare the YAML manifests for the related velero K8S objects and then run the following script to install velero and configure a nightly backup. 34 | 35 | ```bash 36 | ./scripts/velero.sh $(yq e .management-cluster.name $PARAMS_YAML) 37 | ``` 38 | 39 | ## Validation Step 40 | 41 | Ensure schedule is created and the first backup is starting 42 | 43 | ```bash 44 | velero schedule get 45 | velero backup get | grep daily 46 | ``` 47 | 48 | ## Go to Next Step 49 | 50 | Now management cluster steps are complete, on to the workload cluster. 51 | 52 | [Create new Workload Cluster](../workload-cluster/01_install_tkg_and_components_wlc.md) 53 | -------------------------------------------------------------------------------- /docs/misc/cloud_dns_sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/misc/cloud_dns_sample.png -------------------------------------------------------------------------------- /docs/shared-services-cluster/01_install_tkg_ssc.md: -------------------------------------------------------------------------------- 1 | # Create new Shared Services Cluster 2 | 3 | Here we will deploy a new workload cluster for use as the Shared Services cluster. The shared services cluster in our opinionated deployment is a special purpose workload cluster where common services are deployed. All workload clusters will have similar steps for initial setup. 4 | 5 | Here we are pulling the following values from the `params.yaml` file. See examples 6 | 7 | ```yaml 8 | # the DNS CN to be used for Pinniped service 9 | management-cluster.pinniped-fqdn: pinniped.mgmt.tkg-aws-lab.winterfell.live 10 | shared-services-cluster.name: dorn 11 | shared-services-cluster.worker-replicas: 2 12 | iaas: aws 13 | ``` 14 | 15 | We need to setup a cluster configuration file for our new workload cluster. 16 | 17 | Then we ask the management cluster to create the new workload cluster. 18 | 19 | >Special Note for AWS Deployments: We will deploy the workload clusters in the same VPC and subnets as the management cluster. This lab only generates a VPC and subnets once prior to the deployment of the MC. 20 | 21 | All of the steps above can be accomplished by running the following script: 22 | 23 | ```bash 24 | ./scripts/deploy-workload-cluster.sh \ 25 | $(yq e .shared-services-cluster.name $PARAMS_YAML) \ 26 | $(yq e .shared-services-cluster.worker-replicas $PARAMS_YAML) \ 27 | $(yq e .shared-services-cluster.controlplane-endpoint $PARAMS_YAML) \ 28 | $(yq e '.shared-services-cluster.kubernetes-version // null' $PARAMS_YAML) 29 | ``` 30 | 31 | >Note: The kubernetes-version parameter is optional for the script and if you don't have it in your configuration, then it will default to the default version of the tanzu cli. You can get a list of valid options to supply in the kubernetes-version parameter by issuing the `tanzu kubernetes-release get` and choose the appropriate value from the name column. 32 | 33 | >Note: Wait until your cluster has been created. It may take 12 minutes. 34 | 35 | >Note: Once cluster is created your kubeconfig will already have the new context as the active one with the necessary credentials. 36 | 37 | >Note: You can view the cluster-config.yaml file generated for this cluster at `generated/$CLUSTER_NAME/cluster-config.yaml`. 38 | 39 | ## Go to Next Step 40 | 41 | [Attach Shared Services Cluster to TMC](02_attach_tmc_ssc.md) 42 | -------------------------------------------------------------------------------- /docs/shared-services-cluster/02_attach_tmc_ssc.md: -------------------------------------------------------------------------------- 1 | # Attach Shared Services Cluster to TMC 2 | 3 | We want to have all kubernetes cluster under TMC management. As such, execute the following script to attach your cluster to TMC. 4 | 5 | >Note: The script leverages values specified in your params.yaml file to use for the cluster name and cluster group. 6 | 7 | ```bash 8 | ./scripts/tmc-attach.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 9 | ``` 10 | 11 | ## Validation Step 12 | 13 | Go to the TMC UI and find your cluster. It should take a few minutes to appear clean. 14 | 15 | In the Cluster view, go to `Add-ons > Tanzu Repositories` and disable the `tanzu-standard` repository if using version `v2023.7.13_update.2` 16 |
17 | 18 | ## Go to Next Step 19 | 20 | [Set policy on Shared Services Cluster](03_policy_ssc.md) 21 | -------------------------------------------------------------------------------- /docs/shared-services-cluster/03_policy_ssc.md: -------------------------------------------------------------------------------- 1 | # Set policy on Shared Services Cluster 2 | 3 | ## Setup Access Policy for platform-team to have cluster.admin role 4 | 5 | ```bash 6 | ./scripts/tmc-policy.sh \ 7 | $(yq e .shared-services-cluster.name $PARAMS_YAML) \ 8 | cluster.admin \ 9 | platform-team 10 | ``` 11 | 12 | ### Validation Step 13 | 14 | 1. Access TMC UI 15 | 2. Select Policies on the left nav 16 | 3. Choose Access and then select your shared services cluster 17 | 4. Observe direct Access Policy => Set cluster.admin permission to the platform-team group 18 | 19 | ## Go to Next Step 20 | 21 | [Install Contour on Shared Services Cluster](04_contour_ssc.md) 22 | -------------------------------------------------------------------------------- /docs/shared-services-cluster/04_contour_ssc.md: -------------------------------------------------------------------------------- 1 | # Install Contour on Shared Services Cluster 2 | 3 | ## Deploy Cert Manager 4 | 5 | Our solution leverages cert manager to generate valid ssl certs. Cert-manager was deployed automatically into the management cluster, however it an optional component for workload clusters. Use this script to deploy cert manager into the cluster using TKG packages. 6 | 7 | ```bash 8 | ./scripts/deploy-cert-manager.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 9 | ``` 10 | 11 | ## Deploy Contour 12 | 13 | Generate and apply Contour configuration. We sepecifically specify type=LoadBalancer for Envoy. Use the script to apply manifests. 14 | 15 | ```bash 16 | ./scripts/generate-and-apply-contour-yaml.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 17 | ``` 18 | 19 | ## Verify Contour 20 | 21 | Once it is deployed, you can see all pods `Running` and the the Load Balancer up. 22 | 23 | ```bash 24 | kubectl get pod,svc -n tanzu-system-ingress 25 | ``` 26 | 27 | ## Setup DNS for Wildcard Domain Contour Ingress 28 | 29 | Just as we did for the management cluster, we will leverage [external-dns](https://github.com/kubernetes-sigs/external-dns) for kubernetes managed DNS updates. Same choice of DNS Provider will be used. 30 | 31 | Execute the script below to deploy `external-dns` and to apply the annotation to the envoy service. 32 | 33 | ```bash 34 | ./scripts/generate-and-apply-external-dns-yaml.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 35 | ``` 36 | 37 | ## Prepare and Apply Cluster Issuer Manifests 38 | 39 | Prepare the YAML manifests for the contour cluster issuer. Manifest will be output into `generated/$CLUSTER_NAME/contour/` in case you want to inspect. It is assumed that if you IaaS is AWS, then you will use the `http` challenge type and if your IaaS is vSphere, you will use the `dns` challenge type as a non-internet facing environment. 40 | 41 | ```bash 42 | ./scripts/generate-and-apply-cluster-issuer-yaml.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 43 | ``` 44 | 45 | ## Verify Cluster Issuer 46 | 47 | Check to see that the ClusterIssuer is valid: 48 | 49 | ```bash 50 | kubectl get clusterissuer letsencrypt-contour-cluster-issuer -o yaml 51 | ``` 52 | 53 | Look for the status to be Ready: True 54 | 55 | ## Go to Next Step 56 | 57 | [Install Elasticsearch and Kibana](06_ek_ssc.md) 58 | -------------------------------------------------------------------------------- /docs/shared-services-cluster/06_ek_ssc.md: -------------------------------------------------------------------------------- 1 | # Install Elasticsearch and Kibana 2 | 3 | We will deploy Elasticsearch and Kibana as a target for logs. This is [one of several potential targets for TKG to send logs](https://docs.vmware.com/en/VMware-Tanzu-Packages/2023.9.19/tanzu-packages/packages-fluentbit.html). 4 | 5 | This is a minimalist and POC quality deployment of Elasticsearh and Kibana. This is not a component of Tanzu. This deployment is just for the purpose of demonstration purpose. See notes below if you face issues with the Elasticsearch deployment. 6 | 7 | ## Set configuration parameters 8 | 9 | The scripts to prepare the YAML to deploy elasticsearch and kibana depend on a parameters to be set. Ensure the following are set in `params.yaml`: 10 | 11 | ```yaml 12 | # the DNS CN to be used for elasticsearch service 13 | shared-services-cluster.elasticsearch-fqdn: elasticsearch.dorn.tkg-aws-e2-lab.winterfell.live 14 | # the DNS CN to be used for kibana service 15 | shared-services-cluster.kibana-fqdn: logs.dorn.tkg-aws-e2-lab.winterfell.live 16 | ``` 17 | 18 | ## Prepare Manifests and Deploy Elasticsearch and Kibana 19 | 20 | Elasticsearch and kibana images are pulled from Docker Hub. Ensure your credentials are in the `params.yaml` file in order to avoid rate limit errors. 21 | 22 | ```yaml 23 | dockerhub: 24 | username: REDACTED # Your dockerhub username 25 | password: REDACTED # Your dockerhub password 26 | email: REDACTED # Your dockerhub email 27 | ``` 28 | 29 | Prepare the YAML manifests for the related elasticsearch and kibana K8S objects. Manifests will be output into `generated/$SHARED_SERVICES_CLUSTER_NAME/elasticsearch-kibana/` in case you want to inspect. 30 | 31 | ```bash 32 | ./scripts/generate-and-apply-elasticsearch-kibana-yaml.sh 33 | ``` 34 | 35 | ## Validation Step 36 | 37 | Get an response back from elasticsearch rest interface 38 | 39 | ```bash 40 | curl -v http://$(yq e .shared-services-cluster.elasticsearch-fqdn $PARAMS_YAML) 41 | ``` 42 | 43 | ## Go to Next Step 44 | 45 | [Install FluentBit on Shared Services Cluster](07_fluentbit_ssc.md) 46 | 47 | ## Troubleshooting Steps 48 | 49 | Your probably don't need these now, but may if your lab environment is running for any extended period of time. 50 | 51 | Some notes about this "POC/Quality" Elasticsearch / Kibana deployment. 52 | - As Elasticsearch is used to demonstrate Tanzu capability, the is no effort being placed into deploying elasticsearch with best practices. 53 | - There is only one elasticsearch node created. 54 | - You will notice that the indexes are in yellow status and will not become green. This is because the replica shared can not run on the same node as the primary shard. 55 | - A curator cronjob is created to delete indexes older than 1 day. This is to ensure we don't fill up our peristent volume disk. 56 | 57 | Here are some troubleshooting commands. Update FQDN below with your configuration 58 | 59 | ```bash 60 | export ELASTICSEARCH_CN=$(yq e .shared-services-cluster.elasticsearch-fqdn $PARAMS_YAML) 61 | 62 | # Get General information from elasticsearch 63 | curl "http://$ELASTICSEARCH_CN" 64 | 65 | # Get Size/Status of Indexes. Notice that logstash-YYYY.MM.DD is in yellow (typically a shard in the index is not active) 66 | curl "http://$ELASTICSEARCH_CN/_cluster/aat/indices" 67 | 68 | # Get status of the shards. Notice the replica is unalocated. 69 | curl "http://$ELASTICSEARCH_CN/_cat/shards?v&h=n,index,shard,prirep,state,sto,sc,unassigned.reason,unassigned.details&s=sto,index" 70 | 71 | # Since the index name changes based upon date, let's use this long command to retrieve the unassigned index name, to be used in ex command 72 | export INDEX_NAME=`curl "http://$ELASTICSEARCH_CN/_cat/shards?v&h=n,index,shard,prirep,state,sto,sc,unassigned.reason,unassigned.details&s=sto,index" | grep UNASSIGNED | awk '{print $(1)}'` 73 | 74 | # Get details of the allocation status for the unallocated shard. See that it is not allocated because we only have one node in our cluster and replica can not be placed on same node as the primary 75 | curl -X GET "http://$ELASTICSEARCH_CN/_cluster/allocation/explain?pretty" -H 'Content-Type: application/json' -d"{\"index\": \"$INDEX_NAME\",\"shard\": 0,\"primary\": false }" 76 | ``` 77 | -------------------------------------------------------------------------------- /docs/shared-services-cluster/07_fluentbit_ssc.md: -------------------------------------------------------------------------------- 1 | # Install FluentBit on Shared Services Cluster 2 | 3 | ## Set configuration parameters 4 | 5 | The scripts to prepare the YAML to deploy fluent-bit depend on a parameters to be set. Ensure the following are set in `params.yaml': 6 | 7 | ```yaml 8 | shared-services-cluster.elasticsearch-fqdn: elasticsearch.dorn.tkg-aws-e2-lab.winterfell.live 9 | shared-services-cluster.kibana-fqdn: logs.dorn.tkg-aws-e2-lab.winterfell.live 10 | ``` 11 | 12 | ## Prepare Manifests and Deploy Fluent Bit 13 | 14 | Prepare the YAML manifests for the related fluent-bit K8S objects. Manifest will be output into `generated/$SHARED_SERVICES_CLUSTER_NAME/fluent-bit/` in case you want to inspect. 15 | 16 | ```bash 17 | ./scripts/generate-and-apply-fluent-bit-yaml.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 18 | ``` 19 | 20 | ## Validation Step 21 | 22 | Ensure that fluent bit pods are running. 23 | 24 | ```bash 25 | kubectl get pods -n tanzu-system-logging 26 | ``` 27 | 28 | ## Test Log Access 29 | 30 | 1. Access kibana. This leverages the wildcard DNS entry on the convoy ingress. Your base domain will be different than mine. 31 | 32 | ```bash 33 | open http://$(yq e .shared-services-cluster.kibana-fqdn $PARAMS_YAML) 34 | ``` 35 | 36 | 2. You should see the kibana welcome screen. 37 | 38 | 3. Click the Discover icon at the top of the left menu bar. 39 | 40 | 4. You will see widget to create an index pattern. Enter `logstash-*` and click next step. 41 | 42 | 5. Select @timestamp for the Time filter field name. and then click Create index pattern. 43 | 44 | 6. Now click the Discover icon at the top of the left menu bar. You can start searching for logs. 45 | 46 | ## Go to Next Step 47 | 48 | [Add Prometheus and Grafana to Shared Services Cluster](08_monitoring_ssc.md) 49 | -------------------------------------------------------------------------------- /docs/shared-services-cluster/08_5_minio_ssc.md: -------------------------------------------------------------------------------- 1 | # Install Minio 2 | 3 | We will deploy Minio as a target for harbor images and Velero backups. 4 | 5 | This is a minimalist and POC quality deployment of Minio. This is not a component of Tanzu. This deployment is just for the purpose of demonstration purpose. 6 | 7 | ## Set configuration parameters 8 | 9 | The scripts to prepare the YAML to deploy minio depend on a parameters to be set. Ensure the following are set in `params.yaml`: 10 | 11 | ```yaml 12 | minio: 13 | server-fqdn: minio.dorn.tkg-aws-e2-lab.winterfell.live 14 | root-user: foo 15 | root-password: bar 16 | persistence-size: 40Gi 17 | ``` 18 | 19 | ## Prepare Manifests and Deploy Minio 20 | 21 | Minio images are pulled from Docker Hub. Ensure your credentials are in the `params.yaml` file in order to avoid rate limit errors. 22 | 23 | ```yaml 24 | dockerhub: 25 | username: REDACTED # Your dockerhub username 26 | password: REDACTED # Your dockerhub password 27 | email: REDACTED # Your dockerhub email 28 | ``` 29 | 30 | Prepare the YAML manifests for the related minio K8S objects. Manifests will be output into `generated/$SHARED_SERVICES_CLUSTER_NAME/minio/` in case you want to inspect. 31 | 32 | ```bash 33 | ./scripts/generate-and-apply-minio-yaml.sh 34 | ``` 35 | 36 | ## Validation Step 37 | 38 | Visit minio UI and login with your credentials 39 | 40 | ```bash 41 | open http://$(yq e .minio.server-fqdn $PARAMS_YAML):9000 42 | ``` 43 | 44 | ## Go to Next Step 45 | 46 | [Enable Data Protection and Setup Nightly Backup on Shared Services Cluster](09_velero_ssc.md) 47 | 48 | -------------------------------------------------------------------------------- /docs/shared-services-cluster/08_monitoring_ssc.md: -------------------------------------------------------------------------------- 1 | # Add Prometheus and Grafana to Shared Services Cluster 2 | 3 | ## Overview 4 | 5 | Just as we did for the management cluster in [Install Monitoring](../mgmt-cluster/08_monitoring_mgmt.md) step, we will add cluster monitoring to the shared services cluster. The explanation is brief and validation steps skipped in order to reduce redundancy. Refer to the management cluster step for more details. 6 | 7 | ## Prepare Manifests and Deploy Prometheus 8 | 9 | Prepare the YAML manifests for the related prometheus k8s objects. Manifests will be output into `generated/$CLUSTER_NAME/monitoring/` in case you want to inspect. 10 | 11 | ```bash 12 | ./scripts/generate-and-apply-prometheus-yaml.sh \ 13 | $(yq e .shared-services-cluster.name $PARAMS_YAML) \ 14 | $(yq e .shared-services-cluster.prometheus-fqdn $PARAMS_YAML) 15 | ``` 16 | 17 | ## Prepare Manifests and Deploy Grafana 18 | 19 | Prepare the YAML manifests for the related grafana k8s objects. Manifests will be output into `generated/$CLUSTER_NAME/monitoring/` in case you want to inspect. 20 | 21 | ```bash 22 | ./scripts/generate-and-apply-grafana-yaml.sh \ 23 | $(yq e .shared-services-cluster.name $PARAMS_YAML) \ 24 | $(yq e .shared-services-cluster.grafana-fqdn $PARAMS_YAML) 25 | ``` 26 | 27 | ## Go to Next Step 28 | 29 | [Install Minio on Shared Services Cluster](08_5_minio_ssc.md) 30 | -------------------------------------------------------------------------------- /docs/shared-services-cluster/09_velero_ssc.md: -------------------------------------------------------------------------------- 1 | # Enable Data Protection and Setup Nightly Backup on Shared Services Cluster 2 | 3 | ## Install Velero client 4 | 5 | Even though Tanzu Mission Control will manage your data protection amd lifecycle velero on the cluster, at times it may be useful to have the velero cli. 6 | 7 | Download and install the Velero cli from the TKG 2.1.0 page at https://www.vmware.com/go/get-tkg. 8 | 9 | ## Set configuration parameters 10 | 11 | The scripts to prepare the YAML to deploy velero depend on a parameters to be set. Ensure the following are set in `params.yaml` based upon your environment. This should be the target location you created above 12 | 13 | ```yaml 14 | tmc.data-protection-backup-location-name: my-tmc-data-protection-target-location 15 | velero.bucket: velero-backups 16 | ``` 17 | 18 | ## Setup Your Data Protection Target 19 | 20 | We will place our TMC Data Protection backups in the Minio server we deployed. 21 | 22 | Follow the Tanzu Mission Control docs: 23 | 24 | - [Create a Account Credential](https://docs.vmware.com/en/VMware-Tanzu-Mission-Control/services/tanzumc-using/GUID-30DAD680-FA77-48E3-990B-1DFC250372FA.html). Again, this is IaaS dependent. For AWS you will provide your S3 credentials. For vSphere your Minio account credentials. For Azure, your Azure Blob Storage credentials. 25 | - [Create a Target Location](https://docs.vmware.com/en/VMware-Tanzu-Mission-Control/services/tanzumc-using/GUID-867683CE-8AF0-4DC7-9121-81AD507EDB3B.html?hWord=N4IghgNiBcIC5gE4HMCmcAEED2BjMcAltgHYDOIAvkA) with your IaaS dependent target location. **You must set your bucket name to match `velero.bucket` in the parmams.yaml file** 26 | 27 | ## Enable Data Protection on Your Cluster 28 | 29 | Orchestrate commands for the `tmc` cli to enable data protection on the cluster and then setup a daily backup. 30 | 31 | ```bash 32 | ./scripts/dataprotection.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 33 | ``` 34 | 35 | ## Validation Step 36 | 37 | Ensure schedule is created and the first backup is starting 38 | 39 | ```bash 40 | velero schedule get 41 | velero backup get | grep daily 42 | ``` 43 | 44 | ## Go to Next Step 45 | 46 | [Install Harbor](../shared-services-cluster/10_harbor.md) 47 | -------------------------------------------------------------------------------- /docs/shared-services-cluster/harbor-oidc-config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/shared-services-cluster/harbor-oidc-config.png -------------------------------------------------------------------------------- /docs/shared-services-cluster/tanzu-repo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/shared-services-cluster/tanzu-repo.png -------------------------------------------------------------------------------- /docs/tkg-deployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/tkg-deployment.png -------------------------------------------------------------------------------- /docs/tkg-lab-base.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/tkg-lab-base.png -------------------------------------------------------------------------------- /docs/tkg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/tkg.png -------------------------------------------------------------------------------- /docs/troubleshooting/DomainDNS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/troubleshooting/DomainDNS.png -------------------------------------------------------------------------------- /docs/troubleshooting/HostedZone1Details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/troubleshooting/HostedZone1Details.png -------------------------------------------------------------------------------- /docs/troubleshooting/HostedZone2Details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/troubleshooting/HostedZone2Details.png -------------------------------------------------------------------------------- /docs/troubleshooting/HostedZones.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/troubleshooting/HostedZones.png -------------------------------------------------------------------------------- /docs/troubleshooting/Readme.md: -------------------------------------------------------------------------------- 1 | Lab Participant Issues Found 2 | 3 | 1. DNS Setup Challenges. Check out [DNS Setup Deep Dive](dns-setup.md) 4 | 2. YQ required updated version of python 5 | 3. Ubuntu challenges with access to config.yml 6 | -------------------------------------------------------------------------------- /docs/troubleshooting/dns-setup.md: -------------------------------------------------------------------------------- 1 | # DNS Setup 2 | 3 | In order for the labs to work, you need to have an appropriate domain and DNS set up. What this boils down to is a global domain that you own/control, with delegation of a subdomain for the lab work. Many users will already have it set up for other purposes. An example of this would look like: 4 | * example.com (owned domain name) 5 | * tkg-aws-lab.example.com (NS Record -> Route53 Hosted Zone #1) 6 | * tkg-vsphere-lab.example.com (NS Record -> Route53 Hosted Zone #2) 7 | * tkg-aws-lab.example.com (Managed by Route53 as a Public Hosted Zone) 8 | * \*.tkg-mgmt.tkg-aws-lab.example.com (Route53 Record Set - CNAME -> AWS LB) 9 | * \*.tkg-shared.tkg-aws-lab.example.com (Route53 Record Set - CNAME -> AWS LB) 10 | * etc... 11 | * tkg-vsphere-lab.example.com (Managed by Route53 as a Public Hosted Zone) 12 | * \*.tkg-mgmt.tkg-vsphere-lab.example.com (Route53 Record Set - A Record -> Metal LB IP) 13 | * \*.tkg-shared.tkg-vsphere-lab.example.com (Route53 Record Set - A Record -> Metal LB IP) 14 | * etc... 15 | * homelab.example.com (NS Record -> Google Cloud DNS) # Not in scope for lab - for example only 16 | * opsman.homelab.example.com (Managed by GCloud DNS zone A Record -> 192.168.x.x) # Not in scope for lab - for example only 17 | * etc... 18 | 19 | ## Screen Shots 20 | 21 | This is what it looks liek to have 2 Hosted Zones in AWS Route53. Each zone will independently manage all DNS entries within that subdomain. The lab scripts will create entires as needed. Note that when created, the Zone ID can be obtained from here and pasted into the params file. 22 | 23 | ![HostedZone](HostedZones.png) 24 | 25 | Here is what the initial top-level domain delegation would look like. This is from Google Domains, but the same strategy will work for other providers, such as GoDaddy. The trick is to create a Hosted Zone first (where you want to manage all entries for the lab) and then paste the NS entries created into your domain management's DNS area. This dlegates all lookups to the new Hosted Zone. 26 | 27 | ![DomainDNS](DomainDNS.png) 28 | 29 | Once this linkage is set up, you can add entries to each hosted zone manually or via script. The lab will update Route53 automatically, using you AWS Access/Secret key and the Hosted Zone ID that you set into the params file. Once the labs are completed, this is what the Hosted Zone will look like. There are 2 examples here, one for AWS and one for vSphere. This is because of the way K8s LoadBalancers are managed. On AWS, an EC2 Load Balancer is created for each cluster's API endpoint -and- for each Service type LoadBalancer created in the cluster. On vSphere, we are using NSX ALB. 30 | 31 | ### Hosted Zone for AWS-deployed Lab 32 | ![AWSZoneDetails](HostedZone1Details.png) 33 | ### Hosted Zone for vSphere-deployed Lab 34 | ![vSphereZoneDetails](HostedZone2Details.png) 35 | 36 | ## About the Ingress Controller 37 | 38 | As part of configuring your params.yaml file, note that we ask do define several FQDNs in advance - these do not need to be manually created in the Hosted Zone as Record Sets, nor are they specifically created by scripts. Rather, we use a wildcard (seen above), such that any name that falls within that wildcard will be resolved. In a Kubernetes cluster that utilizes an Ingress Controller (which the lab does - Contour), all traffic goes there and then is directed to the correct service/pods via Ingress Rules. These will be created as necessary. 39 | 40 | 41 | A resulting nslookup call will show the IP that was created for the LoadBalancer and populated into the Hosted Zone by the script: 42 | 43 | ```bash 44 | # Within Shared Cluster 45 | andrew@ubuntu-jump:~/tkg/tkg-lab$ k get svc envoy -n tanzu-system-ingress 46 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 47 | envoy LoadBalancer 100.65.251.92 192.168.1.176 80:30256/TCP,443:30425/TCP 7d18h 48 | 49 | # NSLOOKUP for anything in shared cluster: 50 | andrew@ubuntu-jump:~/tkg/tkg-lab$ nslookup logs.tkg-shared.tkg-vsphere-lab.arg-pivotal.com 51 | Server: 8.8.8.8 52 | Address: 8.8.8.8#53 53 | 54 | Non-authoritative answer: 55 | Name: logs.tkg-shared.tkg-vsphere-lab.arg-pivotal.com 56 | Address: 192.168.1.176 57 | 58 | ``` 59 | -------------------------------------------------------------------------------- /docs/workload-cluster/01_install_tkg_and_components_wlc.md: -------------------------------------------------------------------------------- 1 | # Create new Workload Cluster 2 | 3 | The example workload cluster leverages common components that were used to create the shared services cluster: 4 | 5 | - Creating a workload cluster enabled for OIDC 6 | - Attaching the newly created cluster to TMC 7 | - Applying default policy on the cluster allowing platform-team admin access 8 | - Setting up contour for ingress with a cluster certificate issuer 9 | - Setting up fluent-bit to send logging to the centralized Elasticsearch server on shared services cluster 10 | - Setting up prometheus and grafana for monitoring 11 | - Setting up daily Velero backups 12 | 13 | Here we are pulling the following values from the `params.yaml` file. See examples: 14 | 15 | ```yaml 16 | workload-cluster: 17 | worker-replicas: 2 18 | name: highgarden 19 | ingress-fqdn: '*.highgarden.tkg-aws-e2-lab.winterfell.live' 20 | prometheus-fqdn: prometheus.highgarden.tkg-aws-e2-lab.winterfell.live 21 | grafana-fqdn: grafana.highgarden.tkg-aws-e2-lab.winterfell.live 22 | ``` 23 | 24 | Now you can execute the following script to perform all of those tasks: 25 | 26 | ```bash 27 | ./scripts/deploy-all-workload-cluster-components.sh 28 | ``` 29 | 30 | After the script finishes atttaching the cluster to TMC, go to the TMC UI and find your cluster. In the Cluster view, go to `Add-ons > Tanzu Repositories` and disable the `tanzu-standard` repository if using version `v2023.7.13_update.2` 31 |
32 | 33 | >Note: Wait until your cluster has been created and components installed. It may take 12 minutes. 34 | 35 | >Note: Once cluster is created your kubeconfig already has the new context as the active one with the necessary credentials 36 | 37 | ## Validation Step 38 | 39 | There are lots of potential validation steps, but let's focus on the ability to login. 40 | 41 | ```bash 42 | tanzu cluster kubeconfig get $(yq e .workload-cluster.name $PARAMS_YAML) 43 | kubectl config use-context tanzu-cli-$(yq e .workload-cluster.name $PARAMS_YAML)@$(yq e .workload-cluster.name $PARAMS_YAML) 44 | kubectl get pods 45 | ``` 46 | 47 | A browser window will launch and you will be redirected to Okta. Login as `alana`. You should see the results of your pod request. 48 | 49 | ## Congrats, Foundational Lab is Complete 50 | 51 | You are now welcome to continue on with the Acme Fitness lab, or explore our bonus labs. Visit the [Main Readme](../../Readme.md) to continue. 52 | -------------------------------------------------------------------------------- /docs/workload-cluster/tanzu-repo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/docs/workload-cluster/tanzu-repo.png -------------------------------------------------------------------------------- /elasticsearch-kibana/01-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: elasticsearch-kibana -------------------------------------------------------------------------------- /elasticsearch-kibana/02-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: elasticsearch 5 | namespace: elasticsearch-kibana 6 | labels: 7 | app: elasticsearch 8 | spec: 9 | serviceName: elasticsearch 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: elasticsearch 14 | template: 15 | metadata: 16 | labels: 17 | app: elasticsearch 18 | spec: 19 | containers: 20 | - name: elasticsearch 21 | image: docker.io/bitnami/elasticsearch:7.2.1 22 | resources: 23 | limits: 24 | cpu: 1000m 25 | requests: 26 | cpu: 100m 27 | ports: 28 | - containerPort: 9200 29 | name: rest 30 | protocol: TCP 31 | - containerPort: 9300 32 | name: inter-node 33 | protocol: TCP 34 | volumeMounts: 35 | - name: data 36 | mountPath: /usr/share/elasticsearch/data 37 | env: 38 | - name: cluster.name 39 | value: k8s-logs 40 | - name: node.name 41 | valueFrom: 42 | fieldRef: 43 | fieldPath: metadata.name 44 | - name: discovery.seed_hosts 45 | value: "elasticsearch-0.elasticsearch" 46 | - name: cluster.initial_master_nodes 47 | value: "elasticsearch-0" 48 | - name: ES_JAVA_OPTS 49 | value: "-Xms512m -Xmx512m" 50 | initContainers: 51 | - name: fix-permissions 52 | image: docker.io/bitnami/bitnami-shell:10-debian-10-r138 53 | command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"] 54 | securityContext: 55 | privileged: true 56 | volumeMounts: 57 | - name: data 58 | mountPath: /usr/share/elasticsearch/data 59 | - name: increase-vm-max-map 60 | image: docker.io/bitnami/bitnami-shell:10-debian-10-r138 61 | command: ["sysctl", "-w", "vm.max_map_count=262144"] 62 | securityContext: 63 | privileged: true 64 | - name: increase-fd-ulimit 65 | image: docker.io/bitnami/bitnami-shell:10-debian-10-r138 66 | command: ["sh", "-c", "ulimit -n 65536"] 67 | securityContext: 68 | privileged: true 69 | volumeClaimTemplates: 70 | - metadata: 71 | name: data 72 | labels: 73 | app: elasticsearch 74 | spec: 75 | accessModes: [ "ReadWriteOnce" ] 76 | # storageClassName: do-block-storage 77 | resources: 78 | requests: 79 | storage: 40Gi -------------------------------------------------------------------------------- /elasticsearch-kibana/03-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: elasticsearch 5 | namespace: elasticsearch-kibana 6 | labels: 7 | app: elasticsearch 8 | spec: 9 | selector: 10 | app: elasticsearch 11 | type: ClusterIP 12 | ports: 13 | - port: 9200 14 | name: rest 15 | - port: 9300 16 | name: inter-node -------------------------------------------------------------------------------- /elasticsearch-kibana/curator-cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | action_file.yml: |- 4 | --- 5 | actions: 6 | 1: 7 | action: delete_indices 8 | description: "Clean up ES by deleting old indices" 9 | options: 10 | timeout_override: 11 | continue_if_exception: False 12 | disable_action: False 13 | ignore_empty_list: True 14 | filters: 15 | - filtertype: age 16 | source: name 17 | direction: older 18 | timestring: '%Y.%m.%d' 19 | unit: days 20 | unit_count: 1 21 | field: 22 | stats_result: 23 | epoch: 24 | exclude: False 25 | config.yml: |- 26 | --- 27 | client: 28 | hosts: 29 | - elasticsearch 30 | port: 9200 31 | kind: ConfigMap 32 | metadata: 33 | labels: 34 | app.kubernetes.io/component: curator 35 | name: curator-elasticsearch-curator 36 | namespace: elasticsearch-kibana 37 | -------------------------------------------------------------------------------- /elasticsearch-kibana/curator-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | labels: 5 | app: curator 6 | name: curator-elasticsearch-curator 7 | namespace: elasticsearch-kibana 8 | spec: 9 | concurrencyPolicy: Allow 10 | failedJobsHistoryLimit: 1 11 | jobTemplate: 12 | metadata: 13 | labels: 14 | app: curator 15 | spec: 16 | template: 17 | metadata: 18 | labels: 19 | app: curator 20 | spec: 21 | containers: 22 | - args: 23 | - --config 24 | - /etc/es-curator/config.yml 25 | - /etc/es-curator/action_file.yml 26 | command: 27 | - curator 28 | image: docker.io/bitnami/elasticsearch-curator:5.8.4-debian-10-r0 29 | imagePullPolicy: IfNotPresent 30 | name: curator-elasticsearch-curator 31 | resources: {} 32 | terminationMessagePath: /dev/termination-log 33 | terminationMessagePolicy: File 34 | volumeMounts: 35 | - mountPath: /etc/es-curator 36 | name: config-volume 37 | dnsPolicy: ClusterFirst 38 | restartPolicy: Never 39 | schedulerName: default-scheduler 40 | terminationGracePeriodSeconds: 30 41 | volumes: 42 | - configMap: 43 | defaultMode: 420 44 | name: curator-elasticsearch-curator 45 | name: config-volume 46 | schedule: '0 * * * *' 47 | successfulJobsHistoryLimit: 3 48 | suspend: false 49 | -------------------------------------------------------------------------------- /elasticsearch-kibana/template/03b-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: elasticsearch 5 | namespace: elasticsearch-kibana 6 | labels: 7 | app: elasticsearch 8 | annotations: 9 | kubernetes.io/ingress.class: contour 10 | spec: 11 | rules: 12 | - host: elasticsearch.mgmt.tkg-vsp-lab.hyrulelab.com 13 | http: 14 | paths: 15 | - pathType: Prefix 16 | path: "/" 17 | backend: 18 | service: 19 | name: elasticsearch 20 | port: 21 | number: 9200 22 | -------------------------------------------------------------------------------- /elasticsearch-kibana/template/04-kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana 5 | namespace: elasticsearch-kibana 6 | labels: 7 | app: kibana 8 | spec: 9 | ports: 10 | - port: 5601 11 | selector: 12 | app: kibana 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: kibana 18 | namespace: elasticsearch-kibana 19 | labels: 20 | app: kibana 21 | spec: 22 | replicas: 1 23 | selector: 24 | matchLabels: 25 | app: kibana 26 | template: 27 | metadata: 28 | labels: 29 | app: kibana 30 | spec: 31 | containers: 32 | - name: kibana 33 | image: docker.io/bitnami/kibana:7.2.1 34 | resources: 35 | limits: 36 | cpu: 1000m 37 | requests: 38 | cpu: 100m 39 | env: 40 | - name: ELASTICSEARCH_URL 41 | value: http://elasticsearch:9200 42 | ports: 43 | - containerPort: 5601 44 | 45 | -------------------------------------------------------------------------------- /elasticsearch-kibana/template/05-kibana-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: kibana 5 | namespace: elasticsearch-kibana 6 | labels: 7 | app: kibana 8 | annotations: 9 | kubernetes.io/ingress.class: contour 10 | spec: 11 | rules: 12 | - host: logs.mgmt.tkg-vsp-lab.hyrulelab.com 13 | http: 14 | paths: 15 | - pathType: Prefix 16 | path: "/" 17 | backend: 18 | service: 19 | name: kibana 20 | port: 21 | number: 5601 22 | -------------------------------------------------------------------------------- /gitlab/values-gitlab.yaml: -------------------------------------------------------------------------------- 1 | certmanager: 2 | install: false 3 | certmanager-issuer: 4 | email: CERT_MANAGER_EMAIL 5 | gitlab: 6 | gitlab-shell: 7 | minReplicas: 1 8 | maxReplicas: 1 9 | sidekiq: 10 | minReplicas: 1 11 | maxReplicas: 1 12 | task-runner: 13 | enabled: false 14 | webservice: 15 | minReplicas: 1 16 | maxReplicas: 1 17 | gitlab-runner: 18 | install: false 19 | global: 20 | hosts: 21 | domain: GITLAB_BASE_FQDN 22 | externalIP: EXTERNAL_LB_IP 23 | ingress: 24 | annotations: 25 | cert-manager.io/cluster-issuer: letsencrypt-contour-cluster-issuer 26 | kubernetes.io/tls-acme: true 27 | projectcontour.io/ingress.class: contour 28 | minio: 29 | resources: 30 | requests: 31 | cpu: 10m 32 | memory: 64Mi 33 | nginx-ingress: 34 | enabled: false 35 | controller: 36 | minAvailable: 0 37 | replicaCount: 1 38 | resources: 39 | requests: 40 | cpu: 50m 41 | memory: 100Mi 42 | defaultBackend: 43 | minAvailable: 0 44 | replicaCount: 1 45 | resources: 46 | requests: 47 | cpu: 5m 48 | memory: 5Mi 49 | prometheus: 50 | install: false 51 | redis: 52 | resources: 53 | requests: 54 | cpu: 10m 55 | memory: 64Mi 56 | registry: 57 | hpa: 58 | minReplicas: 1 59 | maxReplicas: 1 60 | -------------------------------------------------------------------------------- /keys/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/keys/.gitkeep -------------------------------------------------------------------------------- /kuard/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kuard 5 | labels: 6 | app: kuard 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: kuard 11 | replicas: 1 12 | template: 13 | metadata: 14 | labels: 15 | app: kuard 16 | spec: 17 | containers: 18 | - name: kuard 19 | image: gcr.io/kuar-demo/kuard-amd64:blue 20 | ports: 21 | - containerPort: 8080 22 | resources: 23 | requests: 24 | memory: "768Mi" 25 | cpu: "500m" 26 | limits: 27 | memory: "768Mi" 28 | cpu: "500m" 29 | -------------------------------------------------------------------------------- /kuard/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: kuard 5 | labels: 6 | app: kuard 7 | spec: 8 | rules: 9 | - host: kuard.platform-sandbox-1.tanzu-poc.company.com 10 | http: 11 | paths: 12 | - pathType: Prefix 13 | path: "/" 14 | backend: 15 | service: 16 | name: kuard 17 | port: 18 | number: 8080 19 | -------------------------------------------------------------------------------- /kuard/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kuard 5 | labels: 6 | app: kuard 7 | spec: 8 | selector: 9 | app: kuard 10 | ports: 11 | - port: 8080 12 | targetPort: 8080 13 | type: ClusterIP 14 | -------------------------------------------------------------------------------- /kubeapps/01-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: kubeapps 5 | -------------------------------------------------------------------------------- /kubeapps/generate-and-apply-kubeapps-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/../scripts/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster_name as args" 8 | exit 1 9 | fi 10 | 11 | export CLUSTER_NAME=$1 12 | export OIDC_ISSUER_URL=https://$(yq e .kubeapps.oidc-issuer-fqdn $PARAMS_YAML) 13 | export KUBEAPPS_FQDN=$(yq e .kubeapps.server-fqdn $PARAMS_YAML) 14 | 15 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 16 | 17 | echo "Beginning Kubeapps install..." 18 | 19 | mkdir -p generated/$CLUSTER_NAME/kubeapps 20 | 21 | # 01-namespace.yaml 22 | 23 | cp kubeapps/01-namespace.yaml generated/$CLUSTER_NAME/kubeapps/01-namespace.yaml 24 | export ISSUER_URL_FLAG=" --oidc-issuer-url=$OIDC_ISSUER_URL" 25 | 26 | # kubeapps-values.yaml 27 | cp kubeapps/kubeapps-values.yaml generated/$CLUSTER_NAME/kubeapps/kubeapps-values.yaml 28 | yq e -i ".ingress.hostname = env(KUBEAPPS_FQDN)" generated/$CLUSTER_NAME/kubeapps/kubeapps-values.yaml 29 | yq e -i '.authProxy.additionalFlags.[0] = env(ISSUER_URL_FLAG)' generated/$CLUSTER_NAME/kubeapps/kubeapps-values.yaml 30 | 31 | # jwt-authenticator 32 | cp kubeapps/kubeapps-jwt-authenticator.yaml generated/$CLUSTER_NAME/kubeapps/kubeapps-jwt-authenticator.yaml 33 | yq e -i ".spec.issuer = env(OIDC_ISSUER_URL)" generated/$CLUSTER_NAME/kubeapps/kubeapps-jwt-authenticator.yaml 34 | 35 | kubectl apply -f generated/$CLUSTER_NAME/kubeapps/kubeapps-jwt-authenticator.yaml 36 | 37 | helm repo add bitnami https://charts.bitnami.com/bitnami 38 | 39 | kubectl apply -f generated/$CLUSTER_NAME/kubeapps/01-namespace.yaml 40 | helm upgrade --install kubeapps --namespace kubeapps bitnami/kubeapps -f generated/$CLUSTER_NAME/kubeapps/kubeapps-values.yaml --version=7.4.0 41 | -------------------------------------------------------------------------------- /kubeapps/kubeapps-jwt-authenticator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: authentication.concierge.pinniped.dev/v1alpha1 2 | kind: JWTAuthenticator 3 | metadata: 4 | name: kubeapps-jwt-authenticator 5 | spec: 6 | audience: kubeapps 7 | claims: 8 | groups: "groups" 9 | username: "email" 10 | issuer: # dynamically populated with oidc issuer url, e.g. https://dev-677945.okta.com -------------------------------------------------------------------------------- /kubeapps/kubeapps-values.yaml: -------------------------------------------------------------------------------- 1 | useHelm3: true 2 | allowNamespaceDiscovery: true 3 | ingress: 4 | enabled: true 5 | certManager: true 6 | hostname: 7 | tls: true 8 | annotations: 9 | ingress.kubernetes.io/force-ssl-redirect: "true" 10 | ingress.kubernetes.io/proxy-body-size: "0" 11 | kubernetes.io/ingress.class: "contour" 12 | cert-manager.io/cluster-issuer: "letsencrypt-contour-cluster-issuer" 13 | kubernetes.io/tls-acme: "true" 14 | 15 | # Auth Proxy for OIDC support 16 | # ref: https://github.com/kubeapps/kubeapps/blob/master/docs/user/using-an-OIDC-provider.md 17 | authProxy: 18 | enabled: true 19 | provider: oidc 20 | cookieSecret: bm90LWdvb2Qtc2VjcmV0Cg== 21 | clientID: kubeapps 22 | clientSecret: FOO_SECRET 23 | 24 | # Pinniped Support 25 | # https://liveandletlearn.net/post/kubeapps-on-tanzu-kubernetes-grid-13-part-2/ 26 | pinnipedProxy: 27 | enabled: true 28 | defaultAuthenticatorName: kubeapps-jwt-authenticator 29 | image: 30 | repository: bitnami/kubeapps-pinniped-proxy 31 | 32 | clusters: 33 | - name: default 34 | pinnipedConfig: 35 | enable: true 36 | 37 | apprepository: 38 | initialRepos: 39 | - name: vac-repo 40 | url: https://charts.app-catalog.vmware.com/demo 41 | -------------------------------------------------------------------------------- /local-config/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tanzu-Solutions-Engineering/tkg-lab/543b938976a6335d382d017466f20bb39c75f4b5/local-config/.gitkeep -------------------------------------------------------------------------------- /overlay/trust-certificate/configmap.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: #@ "{}-ca-cert".format(data.values.ca) 7 | data: 8 | ca.crt: #@ data.values.certificate 9 | 10 | -------------------------------------------------------------------------------- /overlay/trust-certificate/overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | #@ load("@ytt:overlay", "overlay") 3 | 4 | #! Add and trust the Let's Encrypt CA certificate authority in a given deployment 5 | #@overlay/match by=overlay.subset({"kind":"Deployment"}),expects="1+" 6 | --- 7 | spec: 8 | template: 9 | spec: 10 | volumes: 11 | #@overlay/append 12 | - name: letsencrypt 13 | configMap: 14 | name: #@ "{}-ca-cert".format(data.values.ca) 15 | defaultMode: 420 16 | items: 17 | - key: ca.crt 18 | path: #@ "{}.pem".format(data.values.ca) 19 | containers: 20 | #@overlay/match by=overlay.all,expects="1+" 21 | #@overlay/match-child-defaults missing_ok=True 22 | - volumeMounts: 23 | #@overlay/append 24 | - name: letsencrypt 25 | mountPath: /etc/ssl/certs/letsencrypt.pem 26 | subPath: #@ "{}.pem".format(data.values.ca) 27 | -------------------------------------------------------------------------------- /overlay/trust-certificate/values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | ca: 4 | certificate: 5 | -------------------------------------------------------------------------------- /pipeline.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - name: hello-world 3 | plan: 4 | - task: say-hello 5 | config: 6 | platform: linux 7 | image_resource: 8 | type: docker-image 9 | source: {repository: alpine} 10 | params: 11 | HELLO: ((hello)) 12 | run: 13 | path: /bin/sh 14 | args: ["-c", "echo $HELLO"] 15 | -------------------------------------------------------------------------------- /scripts/01-prep-aws-objects.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | export AWS_REGION=$(yq e .aws.region $PARAMS_YAML) 7 | 8 | if [ -z "$AWS_ACCESS_KEY_ID" ]; then 9 | export AWS_ACCESS_KEY_ID=$(yq e .aws.access-key-id $PARAMS_YAML) 10 | fi 11 | 12 | if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then 13 | export AWS_SECRET_ACCESS_KEY=$(yq e .aws.secret-access-key $PARAMS_YAML) 14 | fi 15 | 16 | tanzu management-cluster permissions aws set 17 | 18 | TKG_ENVIRONMENT_NAME=$(yq e .environment-name $PARAMS_YAML) 19 | SSH_KEY_FILE_NAME=$TKG_ENVIRONMENT_NAME-ssh.pem 20 | 21 | mkdir -p keys/ 22 | if [[ ! -f ./keys/$SSH_KEY_FILE_NAME ]]; then 23 | aws ec2 delete-key-pair --key-name tkg-$TKG_ENVIRONMENT_NAME-default --region $AWS_REGION 24 | aws ec2 create-key-pair --key-name tkg-$TKG_ENVIRONMENT_NAME-default --region $AWS_REGION --output json | jq .KeyMaterial -r > keys/$SSH_KEY_FILE_NAME 25 | fi 26 | 27 | 28 | # Use Terraform to pave the networking. Pass in cluster names so that the appropriate tags can be put on the subnets. 29 | MC_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 30 | SSC_NAME=$(yq e .shared-services-cluster.name $PARAMS_YAML) 31 | WLC_NAME=$(yq e .workload-cluster.name $PARAMS_YAML) 32 | terraform -chdir=terraform/aws init 33 | terraform -chdir=terraform/aws fmt 34 | terraform -chdir=terraform/aws apply -auto-approve -var="mc_name=$MC_NAME" -var="ssc_name=$SCC_NAME" -var="wlc_name=$WLC_NAME" -var="aws_region=$AWS_REGION" 35 | -------------------------------------------------------------------------------- /scripts/01-prep-vsphere-objects.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | function echo_found() { 7 | message=${1:-"skipping"} 8 | echo -e "\033[1;32mfound\033[0m, $message" 9 | } 10 | 11 | function echo_notfound() { 12 | message=${1:-"creating"} 13 | echo -e "\033[1;32mnot found\033[0m, $message" 14 | } 15 | 16 | function ensure_upload_template() { 17 | template_inventory_folder=$1 18 | template_name=$2 19 | template_ova_path=$3 20 | if [ "$template_inventory_folder" = "" ]; then 21 | template_path=$template_name 22 | else 23 | template_path="$template_inventory_folder/$template_name" 24 | fi 25 | 26 | echo -n "Checking for template at $template_path: " 27 | if [[ "$(govc vm.info $template_path)" == *"$template_name"* ]]; then 28 | echo_found 29 | else 30 | echo_notfound 31 | govc import.ova -folder $template_inventory_folder $template_ova_path 32 | govc vm.markastemplate $template_inventory_folder/$template_name 33 | fi 34 | 35 | } 36 | 37 | # Get cluster name and prepare cluster-config file 38 | export CLUSTER_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 39 | mkdir -p generated/$CLUSTER_NAME 40 | cp config-templates/vsphere-mc-config.yaml generated/$CLUSTER_NAME/cluster-config.yaml 41 | 42 | # Get vSphere configuration vars from params.yaml 43 | export GOVC_URL=$(yq e .vsphere.server $PARAMS_YAML) 44 | export GOVC_USERNAME=$(yq e .vsphere.username $PARAMS_YAML) 45 | export GOVC_PASSWORD=$(yq e .vsphere.password $PARAMS_YAML) 46 | export GOVC_INSECURE=$(yq e .vsphere.insecure $PARAMS_YAML) 47 | export GOVC_DATASTORE=$(yq e .vsphere.datastore $PARAMS_YAML) 48 | export TEMPLATE_FOLDER=$(yq e .vsphere.template-folder $PARAMS_YAML) 49 | export DATACENTER=$(yq e .vsphere.datacenter $PARAMS_YAML) 50 | export NETWORK=$(yq e .vsphere.network $PARAMS_YAML) 51 | export TLS_THUMBPRINT=$(yq e .vsphere.tls-thumbprint $PARAMS_YAML) 52 | export GOVC_RESOURCE_POOL=$(yq e .vsphere.resource-pool $PARAMS_YAML) 53 | export LOCAL_OVA_FOLDER=$(yq e .vsphere.local-ova-folder $PARAMS_YAML) 54 | 55 | # Write vars into cluster-config file 56 | yq e -i '.VSPHERE_SERVER = env(GOVC_URL)' generated/$CLUSTER_NAME/cluster-config.yaml 57 | yq e -i '.VSPHERE_USERNAME = env(GOVC_USERNAME)' generated/$CLUSTER_NAME/cluster-config.yaml 58 | yq e -i '.VSPHERE_PASSWORD = strenv(GOVC_PASSWORD)' generated/$CLUSTER_NAME/cluster-config.yaml 59 | yq e -i '.VSPHERE_DATASTORE = env(GOVC_DATASTORE)' generated/$CLUSTER_NAME/cluster-config.yaml 60 | yq e -i '.VSPHERE_FOLDER = env(TEMPLATE_FOLDER)' generated/$CLUSTER_NAME/cluster-config.yaml 61 | yq e -i '.VSPHERE_DATACENTER = env(DATACENTER)' generated/$CLUSTER_NAME/cluster-config.yaml 62 | yq e -i '.VSPHERE_NETWORK = env(NETWORK)' generated/$CLUSTER_NAME/cluster-config.yaml 63 | yq e -i '.VSPHERE_TLS_THUMBPRINT = strenv(TLS_THUMBPRINT)' generated/$CLUSTER_NAME/cluster-config.yaml 64 | yq e -i '.VSPHERE_RESOURCE_POOL = env(GOVC_RESOURCE_POOL)' generated/$CLUSTER_NAME/cluster-config.yaml 65 | # The rest of the cluster-config needs to be set manually 66 | 67 | # Create SSH key 68 | mkdir -p keys/ 69 | TKG_ENVIRONMENT_NAME=$(yq e .environment-name $PARAMS_YAML) 70 | tkg_key_file="./keys/$TKG_ENVIRONMENT_NAME-ssh" 71 | echo -n "Checking for existing SSH key at $tkg_key_file: " 72 | if [ -f "$tkg_key_file" ]; then 73 | echo_found "skipping generation" 74 | else 75 | echo_notfound "generating" 76 | ssh-keygen -t rsa -b 4096 -f $tkg_key_file -q -N "" 77 | fi 78 | export VSPHERE_SSH_PUB_KEY=$(cat $tkg_key_file.pub) 79 | yq e -i '.VSPHERE_SSH_AUTHORIZED_KEY = env(VSPHERE_SSH_PUB_KEY)' generated/$CLUSTER_NAME/cluster-config.yaml 80 | 81 | # Upload TKG k8s OVA: Both Ubuntu and Photon 82 | # TODO: Must update exact sha's once GA version is released 83 | ensure_upload_template $TEMPLATE_FOLDER photon-3-kube-v1.27.5 $LOCAL_OVA_FOLDER/photon-3-kube-v1.27.5+vmware.1-tkg.1-cac282289bb29b217b808a2b9b0c0c46.ova 84 | ensure_upload_template $TEMPLATE_FOLDER ubuntu-2004-efi-kube-v1.27.5 $LOCAL_OVA_FOLDER/ubuntu-2004-kube-v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st.ova -------------------------------------------------------------------------------- /scripts/02-deploy-aws-mgmt-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | export CLUSTER=$(yq e .management-cluster.name $PARAMS_YAML) 7 | 8 | mkdir -p generated/$CLUSTER 9 | cp config-templates/aws-mc-config.yaml generated/$CLUSTER/cluster-config.yaml 10 | 11 | export REGION=$(yq e .aws.region $PARAMS_YAML) 12 | export SSH_KEY_NAME=tkg-$(yq e .environment-name $PARAMS_YAML)-default 13 | export OIDC_ISSUER_URL=https://$(yq e .okta.auth-server-fqdn $PARAMS_YAML) 14 | export OIDC_CLIENT_ID=$(yq e .okta.tkg-app-client-id $PARAMS_YAML) 15 | export OIDC_CLIENT_SECRET=$(yq e .okta.tkg-app-client-secret $PARAMS_YAML) 16 | export WORKER_REPLICAS=$(yq e .management-cluster.worker-replicas $PARAMS_YAML) 17 | export AWS_CONTROL_PLANE_MACHINE_TYPE=$(yq e .aws.control-plane-machine-type $PARAMS_YAML) 18 | export AWS_NODE_MACHINE_TYPE=$(yq e .aws.node-machine-type $PARAMS_YAML) 19 | export AWS_VPC_ID=$(terraform -chdir=terraform/aws output -raw vpc_id) 20 | export AWS_PUBLIC_SUBNET_ID=$(terraform -chdir=terraform/aws output -raw public_subnet) 21 | export AWS_PRIVATE_SUBNET_ID=$(terraform -chdir=terraform/aws output -raw private_subnet) 22 | 23 | yq e -i '.CLUSTER_NAME = env(CLUSTER)' generated/$CLUSTER/cluster-config.yaml 24 | yq e -i '.AWS_REGION = env(REGION)' generated/$CLUSTER/cluster-config.yaml 25 | yq e -i '.AWS_SSH_KEY_NAME = env(SSH_KEY_NAME)' generated/$CLUSTER/cluster-config.yaml 26 | yq e -i '.OIDC_IDENTITY_PROVIDER_ISSUER_URL = env(OIDC_ISSUER_URL)' generated/$CLUSTER/cluster-config.yaml 27 | yq e -i '.OIDC_IDENTITY_PROVIDER_CLIENT_ID = env(OIDC_CLIENT_ID)' generated/$CLUSTER/cluster-config.yaml 28 | yq e -i '.OIDC_IDENTITY_PROVIDER_CLIENT_SECRET = env(OIDC_CLIENT_SECRET)' generated/$CLUSTER/cluster-config.yaml 29 | yq e -i '.WORKER_MACHINE_COUNT = env(WORKER_REPLICAS)' generated/$CLUSTER/cluster-config.yaml 30 | yq e -i '.CONTROL_PLANE_MACHINE_TYPE = env(AWS_CONTROL_PLANE_MACHINE_TYPE)' generated/$CLUSTER/cluster-config.yaml 31 | yq e -i '.NODE_MACHINE_TYPE = env(AWS_NODE_MACHINE_TYPE)' generated/$CLUSTER/cluster-config.yaml 32 | yq e -i '.AWS_VPC_ID = env(AWS_VPC_ID)' generated/$CLUSTER/cluster-config.yaml 33 | yq e -i '.AWS_PUBLIC_SUBNET_ID = env(AWS_PUBLIC_SUBNET_ID)' generated/$CLUSTER/cluster-config.yaml 34 | yq e -i '.AWS_PRIVATE_SUBNET_ID = env(AWS_PRIVATE_SUBNET_ID)' generated/$CLUSTER/cluster-config.yaml 35 | 36 | tanzu management-cluster create --file=generated/$CLUSTER/cluster-config.yaml -v 6 37 | -------------------------------------------------------------------------------- /scripts/02-deploy-azure-mgmt-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source "$TKG_LAB_SCRIPTS/set-env.sh" 5 | 6 | ##################### 7 | # get variables 8 | ##################### 9 | 10 | # Get cluster name and prepare cluster-config file 11 | export CLUSTER_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 12 | export CLUSTER_CONFIG="generated/$CLUSTER_NAME/cluster-config.yaml" 13 | 14 | export AZURE_CLIENT_ID=$(yq e .azure.client-id $PARAMS_YAML) 15 | export AZURE_CLIENT_SECRET=$(yq e .azure.client-secret $PARAMS_YAML) 16 | export AZURE_CONTROL_PLANE_MACHINE_TYPE=$(yq e .azure.control-plane-machine-type $PARAMS_YAML) 17 | export AZURE_LOCATION=$(yq e .azure.location $PARAMS_YAML) 18 | export AZURE_NODE_MACHINE_TYPE=$(yq e .azure.node-machine-type $PARAMS_YAML) 19 | export AZURE_SUBSCRIPTION_ID=$(yq e .azure.subscription-id $PARAMS_YAML) 20 | export AZURE_TENANT_ID=$(yq e .azure.tenant-id $PARAMS_YAML) 21 | export OIDC_ISSUER_URL=https://$(yq e .okta.auth-server-fqdn $PARAMS_YAML) 22 | export OIDC_CLIENT_ID=$(yq e .okta.tkg-app-client-id $PARAMS_YAML) 23 | export OIDC_CLIENT_SECRET=$(yq e .okta.tkg-app-client-secret $PARAMS_YAML) 24 | export WORKER_REPLICAS=$(yq e .management-cluster.worker-replicas $PARAMS_YAML) 25 | 26 | ################################### 27 | # set variables into cluster config 28 | ################################### 29 | yq e -i '.CLUSTER_NAME = env(CLUSTER_NAME)' "$CLUSTER_CONFIG" 30 | yq e -i '.AZURE_CLIENT_ID = env(AZURE_CLIENT_ID)' "$CLUSTER_CONFIG" 31 | yq e -i '.AZURE_CLIENT_SECRET = env(AZURE_CLIENT_SECRET)' "$CLUSTER_CONFIG" 32 | yq e -i '.AZURE_CONTROL_PLANE_MACHINE_TYPE = env(AZURE_CONTROL_PLANE_MACHINE_TYPE)' "$CLUSTER_CONFIG" 33 | yq e -i '.AZURE_LOCATION = env(AZURE_LOCATION)' "$CLUSTER_CONFIG" 34 | yq e -i '.AZURE_NODE_MACHINE_TYPE = env(AZURE_NODE_MACHINE_TYPE)' "$CLUSTER_CONFIG" 35 | yq e -i '.AZURE_SUBSCRIPTION_ID = env(AZURE_SUBSCRIPTION_ID)' "$CLUSTER_CONFIG" 36 | yq e -i '.AZURE_TENANT_ID = env(AZURE_TENANT_ID)' "$CLUSTER_CONFIG" 37 | yq e -i '.OIDC_IDENTITY_PROVIDER_ISSUER_URL = env(OIDC_ISSUER_URL)' "$CLUSTER_CONFIG" 38 | yq e -i '.OIDC_IDENTITY_PROVIDER_CLIENT_ID = env(OIDC_CLIENT_ID)' "$CLUSTER_CONFIG" 39 | yq e -i '.OIDC_IDENTITY_PROVIDER_CLIENT_SECRET = env(OIDC_CLIENT_SECRET)' "$CLUSTER_CONFIG" 40 | yq e -i '.WORKER_MACHINE_COUNT = env(WORKER_REPLICAS)' "$CLUSTER_CONFIG" 41 | 42 | # create the cluster 43 | tanzu management-cluster create --file=$CLUSTER_CONFIG -v 6 44 | -------------------------------------------------------------------------------- /scripts/02-deploy-vsphere-mgmt-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source "$TKG_LAB_SCRIPTS/set-env.sh" 5 | 6 | ##################### 7 | # get variables 8 | ##################### 9 | 10 | # Get cluster name and prepare cluster-config file 11 | export CLUSTER_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 12 | export CLUSTER_CONFIG="generated/$CLUSTER_NAME/cluster-config.yaml" 13 | 14 | export CONTROLPLANE_ENDPOINT=$(yq e .management-cluster.controlplane-endpoint $PARAMS_YAML) 15 | export OIDC_IDENTITY_PROVIDER_ISSUER_URL=https://$(yq e .okta.auth-server-fqdn $PARAMS_YAML) 16 | export OIDC_IDENTITY_PROVIDER_CLIENT_ID=$(yq e .okta.tkg-app-client-id $PARAMS_YAML) 17 | export OIDC_IDENTITY_PROVIDER_CLIENT_SECRET=$(yq e .okta.tkg-app-client-secret $PARAMS_YAML) 18 | export WORKER_REPLICAS=$(yq e .management-cluster.worker-replicas $PARAMS_YAML) 19 | export AVI_CA_DATA_B64=$(yq e .avi.avi-ca-data $PARAMS_YAML) 20 | export AVI_CLOUD_NAME=$(yq e .avi.avi-cloud-name $PARAMS_YAML) 21 | export AVI_CONTROLLER=$(yq e .avi.avi-controller $PARAMS_YAML) 22 | export AVI_DATA_NETWORK=$(yq e .avi.avi-data-network $PARAMS_YAML) 23 | export AVI_DATA_NETWORK_CIDR=$(yq e .avi.avi-data-network-cidr $PARAMS_YAML) 24 | export AVI_MANAGEMENT_CLUSTER_VIP_NETWORK_NAME=$(yq e .avi.avi-management-cluster-vip-network $PARAMS_YAML) 25 | export AVI_MANAGEMENT_CLUSTER_VIP_NETWORK_CIDR=$(yq e .avi.avi-management-cluster-vip-network-cidr $PARAMS_YAML) 26 | export AVI_LABELS=$(yq e .avi.avi-labels $PARAMS_YAML) 27 | export AVI_PASSWORD=$(yq e .avi.avi-password $PARAMS_YAML) 28 | export AVI_SERVICE_ENGINE_GROUP=$(yq e .avi.avi-service-engine-group $PARAMS_YAML) 29 | export AVI_USERNAME=$(yq e .avi.avi-username $PARAMS_YAML) 30 | NODE_OS=$(yq e .vsphere.node-os $PARAMS_YAML) 31 | if [ "$NODE_OS" = "photon" ]; 32 | then 33 | export NODE_OS="photon" 34 | export NODE_VERSION="3" 35 | else 36 | export NODE_OS="ubuntu" 37 | export NODE_VERSION="20.04" 38 | fi 39 | 40 | ################################### 41 | # set variables into cluster config 42 | ################################### 43 | yq e -i '.CLUSTER_NAME = env(CLUSTER_NAME)' "$CLUSTER_CONFIG" 44 | yq e -i '.VSPHERE_CONTROL_PLANE_ENDPOINT = env(CONTROLPLANE_ENDPOINT)' "$CLUSTER_CONFIG" 45 | yq e -i '.OIDC_IDENTITY_PROVIDER_ISSUER_URL = env(OIDC_IDENTITY_PROVIDER_ISSUER_URL)' "$CLUSTER_CONFIG" 46 | yq e -i '.OIDC_IDENTITY_PROVIDER_CLIENT_ID = env(OIDC_IDENTITY_PROVIDER_CLIENT_ID)' "$CLUSTER_CONFIG" 47 | yq e -i '.OIDC_IDENTITY_PROVIDER_CLIENT_SECRET = env(OIDC_IDENTITY_PROVIDER_CLIENT_SECRET)' "$CLUSTER_CONFIG" 48 | yq e -i '.WORKER_MACHINE_COUNT = env(WORKER_REPLICAS)' "$CLUSTER_CONFIG" 49 | yq e -i '.AVI_CA_DATA_B64 = strenv(AVI_CA_DATA_B64)' "$CLUSTER_CONFIG" 50 | yq e -i '.AVI_CLOUD_NAME = env(AVI_CLOUD_NAME)' "$CLUSTER_CONFIG" 51 | yq e -i '.AVI_CONTROLLER = env(AVI_CONTROLLER)' "$CLUSTER_CONFIG" 52 | yq e -i '.AVI_DATA_NETWORK = env(AVI_DATA_NETWORK)' "$CLUSTER_CONFIG" 53 | yq e -i '.AVI_DATA_NETWORK_CIDR = env(AVI_DATA_NETWORK_CIDR)' "$CLUSTER_CONFIG" 54 | yq e -i '.AVI_MANAGEMENT_CLUSTER_VIP_NETWORK_NAME = env(AVI_MANAGEMENT_CLUSTER_VIP_NETWORK_NAME)' "$CLUSTER_CONFIG" 55 | yq e -i '.AVI_MANAGEMENT_CLUSTER_VIP_NETWORK_CIDR = env(AVI_MANAGEMENT_CLUSTER_VIP_NETWORK_CIDR)' "$CLUSTER_CONFIG" 56 | yq e -i '.AVI_LABELS = strenv(AVI_LABELS)' "$CLUSTER_CONFIG" 57 | yq e -i '.AVI_PASSWORD = strenv(AVI_PASSWORD)' "$CLUSTER_CONFIG" 58 | yq e -i '.AVI_SERVICE_ENGINE_GROUP = env(AVI_SERVICE_ENGINE_GROUP)' "$CLUSTER_CONFIG" 59 | yq e -i '.AVI_USERNAME = env(AVI_USERNAME)' "$CLUSTER_CONFIG" 60 | yq e -i '.OS_NAME = env(NODE_OS)' "$CLUSTER_CONFIG" 61 | yq e -i '.OS_VERSION = env(NODE_VERSION)' "$CLUSTER_CONFIG" 62 | 63 | tanzu management-cluster create --file=$CLUSTER_CONFIG -v 6 -y 64 | -------------------------------------------------------------------------------- /scripts/03-post-deploy-mgmt-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | MANAGEMENT_CLUSTER_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 7 | 8 | kubectl config use-context $MANAGEMENT_CLUSTER_NAME-admin@$MANAGEMENT_CLUSTER_NAME 9 | 10 | # Wait for apps to finish reconciling 11 | while [[ $(kubectl get apps -n tkg-system -oyaml | yq e '.items[] | select(.status.friendlyDescription != "Reconcile succeeded") | .metadata.name' | wc -l) -ne 0 ]] ; do 12 | echo "Waiting for apps to finish reconciling" 13 | sleep 5 14 | done 15 | 16 | # Create namespace and Package Repository for User Managed Packages 17 | kubectl create ns tanzu-user-managed-packages --dry-run=client --output yaml | kubectl apply -f - 18 | $TKG_LAB_SCRIPTS/deploy-tanzu-standard-package-repo.sh $MANAGEMENT_CLUSTER_NAME -------------------------------------------------------------------------------- /scripts/add-dockerhub-pull-secret.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply namespace as args" 8 | exit 1 9 | fi 10 | 11 | NAMESPACE=$1 12 | DOCKER_HUB_USER=$(yq e '.dockerhub.username // null' $PARAMS_YAML) 13 | DOCKER_HUB_PASSWORD=$(yq e '.dockerhub.password // null' $PARAMS_YAML) 14 | DOCKER_HUB_EMAIL=$(yq e '.dockerhub.email // null' $PARAMS_YAML) 15 | 16 | if [ "$DOCKER_HUB_USER" == null ] || [ "$DOCKER_HUB_USER" = "REDACTED" ] || \ 17 | [ "$DOCKER_HUB_PASSWORD" == null ] || [ "$DOCKER_HUB_PASSWORD" = "REDACTED" ] || \ 18 | [ "$DOCKER_HUB_EMAIL" == null ] || [ "$DOCKER_HUB_EMAIL" = "REDACTED" ]; then 19 | echo "Failed. Must set dockerhub settings in param file" 20 | exit 1 21 | fi 22 | 23 | kubectl create secret docker-registry docker-hub-creds \ 24 | --docker-server=docker.io \ 25 | --docker-username=$DOCKER_HUB_USER \ 26 | --docker-password=$DOCKER_HUB_PASSWORD \ 27 | --docker-email=$DOCKER_HUB_EMAIL \ 28 | --namespace=$NAMESPACE \ 29 | --dry-run=client --output yaml | kubectl apply -f - 30 | 31 | kubectl patch serviceaccount default -p '{"imagePullSecrets": [{"name": "docker-hub-creds"}]}' --namespace=$NAMESPACE 32 | -------------------------------------------------------------------------------- /scripts/apply-acme-fitness-quota.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | CLUSTER_NAME=$(yq e .workload-cluster.name $PARAMS_YAML) 4 | 5 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 6 | 7 | kubectl apply -f acme-fitness/acme-fitness-namespace-settings.yaml 8 | -------------------------------------------------------------------------------- /scripts/dataprotection.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster name" 8 | exit 1 9 | fi 10 | 11 | echo "Enabling TMC data protection (powered by Velero)..." 12 | 13 | CLUSTER_NAME=$1 14 | BACKUP_LOCATION=$(yq e .tmc.data-protection-backup-location-name $PARAMS_YAML) 15 | 16 | IAAS=$(yq e .iaas $PARAMS_YAML) 17 | VMWARE_ID=$(yq e .vmware-id $PARAMS_YAML) 18 | 19 | tmc cluster dataprotection create --management-cluster-name attached \ 20 | --provisioner-name attached \ 21 | --cluster-name ${VMWARE_ID}-${CLUSTER_NAME}-${IAAS} \ 22 | --backup-location-names ${BACKUP_LOCATION} 23 | 24 | # Wait for it to be ready 25 | while [[ $(tmc cluster dataprotection get -m attached -p attached --cluster-name ${VMWARE_ID}-${CLUSTER_NAME}-${IAAS} | yq e -o=json | jq .status.phase -r) != "READY" ]] ; do 26 | echo Velero is not yet ready 27 | sleep 5 28 | done 29 | 30 | # Setup the backup schedule 31 | tmc cluster dataprotection schedule create --management-cluster-name attached \ 32 | --provisioner-name attached \ 33 | --cluster-name ${VMWARE_ID}-${CLUSTER_NAME}-${IAAS} \ 34 | --backup-location-name ${BACKUP_LOCATION} \ 35 | --name daily \ 36 | --rate "0 7 * * *" \ 37 | --ttl "72h0m0s" 38 | -------------------------------------------------------------------------------- /scripts/delete-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | VMWARE_ID=$(yq e .vmware-id $PARAMS_YAML) 7 | MC_CLUSTER_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 8 | IAAS=$(yq e .iaas $PARAMS_YAML) 9 | 10 | tmc cluster delete $VMWARE_ID-$(yq e .shared-services-cluster.name $PARAMS_YAML)-$(yq e .iaas $PARAMS_YAML) -m attached -p attached --force 11 | tmc cluster delete $VMWARE_ID-$(yq e .workload-cluster.name $PARAMS_YAML)-$(yq e .iaas $PARAMS_YAML) -m attached -p attached --force 12 | 13 | tanzu login --server $MC_CLUSTER_NAME 14 | tanzu cluster delete $(yq e .workload-cluster.name $PARAMS_YAML) --yes 15 | tanzu cluster delete $(yq e .shared-services-cluster.name $PARAMS_YAML) --yes 16 | 17 | #Wait for clusters to be deleted 18 | while tanzu cluster list | grep deleting ; [ $? -eq 0 ]; do 19 | echo "Waiting for clusters to be deleted" 20 | sleep 5 21 | done 22 | 23 | kubectl config use-context $MC_CLUSTER_NAME-admin@$MC_CLUSTER_NAME 24 | tmc managementcluster deregister $MC_CLUSTER_NAME --force --kubeconfig ~/.kube/config 25 | 26 | tanzu management-cluster delete -y 27 | 28 | if [ "$IAAS" = "aws" ]; 29 | then 30 | 31 | terraform -chdir=terraform/aws destroy -y 32 | 33 | fi -------------------------------------------------------------------------------- /scripts/deploy-all-workload-cluster-components.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | # Workload Step 1 7 | $TKG_LAB_SCRIPTS/deploy-workload-cluster.sh \ 8 | $(yq e .workload-cluster.name $PARAMS_YAML) \ 9 | $(yq e .workload-cluster.worker-replicas $PARAMS_YAML) \ 10 | $(yq e .workload-cluster.controlplane-endpoint $PARAMS_YAML) \ 11 | $(yq e '.shared-services-cluster.kubernetes-version // null' $PARAMS_YAML) 12 | # Workload Step 2 13 | $TKG_LAB_SCRIPTS/tmc-attach.sh $(yq e .workload-cluster.name $PARAMS_YAML) 14 | # Workload Step 3 15 | $TKG_LAB_SCRIPTS/tmc-policy.sh \ 16 | $(yq e .workload-cluster.name $PARAMS_YAML) \ 17 | cluster.admin \ 18 | platform-team 19 | # Workload Step 4 20 | IAAS=$(yq e .iaas $PARAMS_YAML) 21 | $TKG_LAB_SCRIPTS/deploy-cert-manager.sh $(yq e .workload-cluster.name $PARAMS_YAML) 22 | $TKG_LAB_SCRIPTS/generate-and-apply-contour-yaml.sh $(yq e .workload-cluster.name $PARAMS_YAML) 23 | $TKG_LAB_SCRIPTS/generate-and-apply-external-dns-yaml.sh $(yq e .workload-cluster.name $PARAMS_YAML) 24 | $TKG_LAB_SCRIPTS/generate-and-apply-cluster-issuer-yaml.sh $(yq e .workload-cluster.name $PARAMS_YAML) 25 | # Workload Step 6 26 | $TKG_LAB_SCRIPTS/generate-and-apply-fluent-bit-yaml.sh $(yq e .workload-cluster.name $PARAMS_YAML) 27 | # Workload Step 7 28 | # $TKG_LAB_SCRIPTS/deploy-wavefront.sh $(yq e .workload-cluster.name $PARAMS_YAML) 29 | $TKG_LAB_SCRIPTS/generate-and-apply-prometheus-yaml.sh \ 30 | $(yq e .workload-cluster.name $PARAMS_YAML) \ 31 | $(yq e .workload-cluster.prometheus-fqdn $PARAMS_YAML) 32 | $TKG_LAB_SCRIPTS/generate-and-apply-grafana-yaml.sh \ 33 | $(yq e .workload-cluster.name $PARAMS_YAML) \ 34 | $(yq e .workload-cluster.grafana-fqdn $PARAMS_YAML) 35 | # Workload Step 8 36 | $TKG_LAB_SCRIPTS/dataprotection.sh $(yq e .workload-cluster.name $PARAMS_YAML) 37 | -------------------------------------------------------------------------------- /scripts/deploy-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | IAAS=$(yq e .iaas $PARAMS_YAML) 7 | 8 | # Management Step 1 9 | $TKG_LAB_SCRIPTS/01-prep-$IAAS-objects.sh 10 | $TKG_LAB_SCRIPTS/02-deploy-$IAAS-mgmt-cluster.sh 11 | $TKG_LAB_SCRIPTS/03-post-deploy-mgmt-cluster.sh 12 | # Management Step 2 13 | $TKG_LAB_SCRIPTS/tmc-register-mc.sh 14 | # Management Step 3 15 | $TKG_LAB_SCRIPTS/create-dns-zone.sh 16 | $TKG_LAB_SCRIPTS/retrieve-lets-encrypt-ca-cert.sh 17 | # Management Step 6 18 | $TKG_LAB_SCRIPTS/generate-and-apply-contour-yaml.sh $(yq e .management-cluster.name $PARAMS_YAML) 19 | $TKG_LAB_SCRIPTS/generate-and-apply-external-dns-yaml.sh $(yq e .management-cluster.name $PARAMS_YAML) 20 | $TKG_LAB_SCRIPTS/generate-and-apply-cluster-issuer-yaml.sh $(yq e .management-cluster.name $PARAMS_YAML) 21 | # Management Step 7 22 | $TKG_LAB_SCRIPTS/update-pinniped-configuration.sh 23 | # Management Step 8 24 | $TKG_LAB_SCRIPTS/generate-and-apply-prometheus-yaml.sh \ 25 | $(yq e .management-cluster.name $PARAMS_YAML) \ 26 | $(yq e .management-cluster.prometheus-fqdn $PARAMS_YAML) 27 | $TKG_LAB_SCRIPTS/generate-and-apply-grafana-yaml.sh \ 28 | $(yq e .management-cluster.name $PARAMS_YAML) \ 29 | $(yq e .management-cluster.grafana-fqdn $PARAMS_YAML) 30 | 31 | # Shared Services Step 1 32 | $TKG_LAB_SCRIPTS/deploy-workload-cluster.sh \ 33 | $(yq e .shared-services-cluster.name $PARAMS_YAML) \ 34 | $(yq e .shared-services-cluster.worker-replicas $PARAMS_YAML) \ 35 | $(yq e .shared-services-cluster.controlplane-endpoint $PARAMS_YAML) \ 36 | $(yq e '.shared-services-cluster.kubernetes-version // null' $PARAMS_YAML) 37 | # Shared Services Step 2 38 | $TKG_LAB_SCRIPTS/tmc-attach.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 39 | # Shared Services Step 3 40 | $TKG_LAB_SCRIPTS/tmc-policy.sh \ 41 | $(yq e .shared-services-cluster.name $PARAMS_YAML ) \ 42 | cluster.admin \ 43 | platform-team 44 | # Shared Services Step 4 45 | $TKG_LAB_SCRIPTS/deploy-cert-manager.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 46 | $TKG_LAB_SCRIPTS/generate-and-apply-contour-yaml.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 47 | $TKG_LAB_SCRIPTS/generate-and-apply-external-dns-yaml.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 48 | $TKG_LAB_SCRIPTS/generate-and-apply-cluster-issuer-yaml.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 49 | # Shared Services Step 5 50 | $TKG_LAB_SCRIPTS/generate-and-apply-elasticsearch-kibana-yaml.sh 51 | # Shared Services Step 6 52 | $TKG_LAB_SCRIPTS/generate-and-apply-fluent-bit-yaml.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 53 | # Shared Services Step 7 54 | $TKG_LAB_SCRIPTS/generate-and-apply-prometheus-yaml.sh \ 55 | $(yq e .shared-services-cluster.name $PARAMS_YAML) \ 56 | $(yq e .shared-services-cluster.prometheus-fqdn $PARAMS_YAML) 57 | $TKG_LAB_SCRIPTS/generate-and-apply-grafana-yaml.sh \ 58 | $(yq e .shared-services-cluster.name $PARAMS_YAML) \ 59 | $(yq e .shared-services-cluster.grafana-fqdn $PARAMS_YAML) 60 | # Shared Services Step 8 61 | $TKG_LAB_SCRIPTS/generate-and-apply-minio-yaml.sh 62 | # Shared Services Step 9 63 | $TKG_LAB_SCRIPTS/dataprotection.sh $(yq e .shared-services-cluster.name $PARAMS_YAML) 64 | # Shared Services Step 10 65 | $TKG_LAB_SCRIPTS/generate-and-apply-harbor-yaml.sh \ 66 | $(yq e .management-cluster.name $PARAMS_YAML) \ 67 | $(yq e .shared-services-cluster.name $PARAMS_YAML) 68 | 69 | # Management Step 9 70 | $TKG_LAB_SCRIPTS/generate-and-apply-fluent-bit-yaml.sh $(yq e .management-cluster.name $PARAMS_YAML) 71 | # Management Step 10 72 | $TKG_LAB_SCRIPTS/velero.sh $(yq e .management-cluster.name $PARAMS_YAML) 73 | 74 | # Workload Step 1 75 | $TKG_LAB_SCRIPTS/deploy-all-workload-cluster-components.sh 76 | -------------------------------------------------------------------------------- /scripts/deploy-cert-manager.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster_name as args" 8 | exit 1 9 | fi 10 | 11 | CLUSTER_NAME=$1 12 | 13 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 14 | 15 | # Retrieve the most recent version number. There may be more than one version available and we are assuming that the most recent is listed last, 16 | # thus supplying -1 as the index of the array 17 | VERSION=$(tanzu package available list cert-manager.tanzu.vmware.com -n tanzu-user-managed-packages -oyaml --summary=false | yq e '. | sort_by(.released-at)' | yq e ".[-1].version") 18 | 19 | tanzu package install cert-manager \ 20 | --package cert-manager.tanzu.vmware.com \ 21 | --version $VERSION \ 22 | --namespace tanzu-user-managed-packages -------------------------------------------------------------------------------- /scripts/deploy-tanzu-standard-package-repo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster_name as arg" 8 | exit 1 9 | fi 10 | 11 | CLUSTER_NAME=$1 12 | 13 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 14 | 15 | # Create tanzu-standard package repository 16 | tanzu package repository add tanzu-standard --url projects.registry.vmware.com/tkg/packages/standard/repo:v2023.9.19 --namespace tanzu-user-managed-packages -------------------------------------------------------------------------------- /scripts/deploy-wavefront-tracing.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | if [ ! $# -eq 1 ]; then 4 | echo "Must supply cluster name as args" 5 | exit 1 6 | fi 7 | CLUSTER_NAME=$1 8 | IAAS=$(yq r params.yaml iaas) 9 | 10 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 11 | 12 | WAVEFRONT_API_KEY=$(yq r params.yaml wavefront.api-key) 13 | WAVEFRONT_URL=$(yq r params.yaml wavefront.url) 14 | WAVEFRONT_PREFIX=$(yq r params.yaml wavefront.cluster-name-prefix) 15 | WAVEFRONT_JAEGER_NAME=$(yq r params.yaml wavefront.jaeger-app-name-prefix) 16 | WORKLOAD_CLUSTER_NAME=$(yq r params.yaml workload-cluster.name) 17 | 18 | # Replace cluster tag in wf-preprocessor.yml 19 | sed -i "s/CLUSTERTAGNAME/$WAVEFRONT_PREFIX-$WORKLOAD_CLUSTER_NAME/g" wavefront/wf-preprocessor.yml 20 | 21 | kubectl create namespace wavefront 22 | helm repo add wavefront https://wavefronthq.github.io/helm/ 23 | helm repo update 24 | helm upgrade --install wavefront wavefront/wavefront -f wavefront/wf-preprocessor.yml \ 25 | --set wavefront.url=$WAVEFRONT_URL \ 26 | --set wavefront.token=$WAVEFRONT_API_KEY \ 27 | --set clusterName=$WAVEFRONT_PREFIX-$CLUSTER_NAME-$IAAS \ 28 | --set proxy.args="--traceJaegerApplicationName $WAVEFRONT_JAEGER_NAME" \ 29 | --namespace wavefront 30 | -------------------------------------------------------------------------------- /scripts/deploy-wavefront.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster name as args" 8 | exit 1 9 | fi 10 | CLUSTER_NAME=$1 11 | IAAS=$(yq e .iaas $PARAMS_YAML) 12 | 13 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 14 | 15 | WAVEFRONT_API_KEY=$(yq e .wavefront.api-key $PARAMS_YAML) 16 | WAVEFRONT_URL=$(yq e .wavefront.url $PARAMS_YAML) 17 | WAVEFRONT_PREFIX=$(yq e .wavefront.cluster-name-prefix $PARAMS_YAML) 18 | 19 | kubectl create namespace wavefront 20 | helm repo add wavefront https://wavefronthq.github.io/helm/ 21 | helm repo update 22 | helm upgrade --install wavefront wavefront/wavefront \ 23 | --set wavefront.url=$WAVEFRONT_URL \ 24 | --set wavefront.token=$WAVEFRONT_API_KEY \ 25 | --set clusterName=$WAVEFRONT_PREFIX-$CLUSTER_NAME-$IAAS \ 26 | --set events.enabled=true \ 27 | --set kubeStateMetrics.enabled=true \ 28 | --namespace wavefront 29 | -------------------------------------------------------------------------------- /scripts/generate-acme-fitness-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster_name as args" 8 | exit 1 9 | fi 10 | 11 | CLUSTER_NAME=$1 12 | export ACME_FITNESS_CN=$(yq e .acme-fitness.fqdn $PARAMS_YAML) 13 | 14 | mkdir -p generated/$CLUSTER_NAME/acme-fitness/ 15 | cp acme-fitness/template/acme-fitness-frontend-ingress.yaml generated/$CLUSTER_NAME/acme-fitness/ 16 | 17 | # Create the ingress to access acme fitness website 18 | yq e -i ".spec.tls[0].hosts[0] = env(ACME_FITNESS_CN)" generated/$CLUSTER_NAME/acme-fitness/acme-fitness-frontend-ingress.yaml 19 | yq e -i ".spec.rules[0].host = env(ACME_FITNESS_CN)" generated/$CLUSTER_NAME/acme-fitness/acme-fitness-frontend-ingress.yaml 20 | -------------------------------------------------------------------------------- /scripts/generate-and-apply-argocd-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/../scripts/set-env.sh 5 | 6 | export CLUSTER_NAME=$(yq e .shared-services-cluster.name $PARAMS_YAML) 7 | export ARGOCD_CN=$(yq e .argocd.server-fqdn $PARAMS_YAML) 8 | export ARGOCD_PASSWORD=$(yq e .argocd.password $PARAMS_YAML) 9 | export ARGOCD_HTPASSWORD=$(htpasswd -nbBC 10 "" $ARGOCD_PASSWORD | tr -d ':\n' | sed 's/$2y/$2a/') 10 | 11 | mkdir -p generated/$CLUSTER_NAME/argocd/ 12 | cp argocd/01-namespace.yaml generated/$CLUSTER_NAME/argocd/ 13 | cp argocd/values.yaml generated/$CLUSTER_NAME/argocd/ 14 | cp argocd/httpproxy.yaml generated/$CLUSTER_NAME/argocd/ 15 | 16 | yq e -i '.spec.virtualhost.fqdn = env(ARGOCD_CN)' generated/$CLUSTER_NAME/argocd/httpproxy.yaml 17 | yq e -i '.server.certificate.domain = env(ARGOCD_CN)' generated/$CLUSTER_NAME/argocd/values.yaml 18 | yq e -i '.configs.secret.argocdServerAdminPassword = env(ARGOCD_HTPASSWORD)' generated/$CLUSTER_NAME/argocd/values.yaml 19 | 20 | echo "Beginning ArgoCD install..." 21 | 22 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 23 | 24 | kubectl apply -f generated/$CLUSTER_NAME/argocd/01-namespace.yaml 25 | 26 | helm repo add argo https://argoproj.github.io/argo-helm 27 | helm upgrade --install argocd argo/argo-cd \ 28 | -f generated/$CLUSTER_NAME/argocd/values.yaml \ 29 | --namespace argocd \ 30 | --version "3.5.0" 31 | kubectl apply -f generated/$CLUSTER_NAME/argocd/httpproxy.yaml 32 | -------------------------------------------------------------------------------- /scripts/generate-and-apply-concourse-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | CLUSTER_NAME=$(yq e .shared-services-cluster.name $PARAMS_YAML) 4 | export CONCOURSE_FQDN=$(yq e .concourse.fqdn $PARAMS_YAML) 5 | export CONCOURSE_URL=https://$CONCOURSE_FQDN 6 | CONCOURSE_NAMESPACE=$(yq e .concourse.namespace $PARAMS_YAML) 7 | OKTA_AUTH_SERVER_CN=$(yq e .okta.auth-server-fqdn $PARAMS_YAML) 8 | export OKTA_AUTH_SERVER_CA_CERT="$(cat keys/letsencrypt-ca.pem)" 9 | export OKTA_CONCOURSE_APP_CLIENT_ID=$(yq e .okta.concourse-app-client-id $PARAMS_YAML) 10 | export OKTA_CONCOURSE_APP_CLIENT_SECRET=$(yq e .okta.concourse-app-client-secret $PARAMS_YAML) 11 | export OIDC_ISSUER=https://$OKTA_AUTH_SERVER_CN/oauth2/default 12 | ADMIN_PASSWORD=$(yq e .concourse.admin-password $PARAMS_YAML) 13 | export ADMIN_CREDS="admin:$ADMIN_PASSWORD" 14 | 15 | mkdir -p generated/$CLUSTER_NAME/concourse/ 16 | 17 | cp concourse/concourse-values-contour-template.yaml generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml 18 | 19 | yq e -i --unwrapScalar=false ".secrets.oidcCaCert = strenv(OKTA_AUTH_SERVER_CA_CERT)" generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml 20 | yq e -i ".secrets.localUsers = env(ADMIN_CREDS)" generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml 21 | yq e -i ".web.ingress.hosts[0] = env(CONCOURSE_FQDN)" generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml 22 | yq e -i ".web.ingress.tls[0].hosts[0] = env(CONCOURSE_FQDN)" generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml 23 | yq e -i ".concourse.web.externalUrl = env(CONCOURSE_URL)" generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml 24 | yq e -i ".concourse.web.auth.oidc.issuer = env(OIDC_ISSUER)" generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml 25 | yq e -i ".secrets.oidcClientId = env(OKTA_CONCOURSE_APP_CLIENT_ID)" generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml 26 | yq e -i ".secrets.oidcClientSecret = env(OKTA_CONCOURSE_APP_CLIENT_SECRET)" generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml 27 | 28 | # generate the helm manifest and make sure the web pod trusts let's encrypt 29 | helm repo add concourse https://concourse-charts.storage.googleapis.com/ 30 | helm repo update 31 | 32 | # NOTE: Through testing setting the OIDC CA in values.yaml did not work as expected, so we are mounting the let's encrypt CA onto the worker pods via ytt overlay 33 | helm template concourse concourse/concourse -f generated/$CLUSTER_NAME/concourse/concourse-values-contour.yaml --namespace $CONCOURSE_NAMESPACE --version=14.5.6 | 34 | ytt -f - -f overlay/trust-certificate --ignore-unknown-comments \ 35 | --data-value certificate="$(cat keys/letsencrypt-ca.pem)" \ 36 | --data-value ca=letsencrypt > generated/$CLUSTER_NAME/concourse/helm-manifest.yaml 37 | 38 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 39 | 40 | kapp deploy -a concourse \ 41 | -f generated/$CLUSTER_NAME/concourse/helm-manifest.yaml \ 42 | -n $CONCOURSE_NAMESPACE \ 43 | -y 44 | -------------------------------------------------------------------------------- /scripts/generate-and-apply-contour-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster_name as arg" 8 | exit 1 9 | fi 10 | 11 | CLUSTER_NAME=$1 12 | IAAS=$(yq e .iaas $PARAMS_YAML) 13 | 14 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 15 | 16 | mkdir -p generated/$CLUSTER_NAME/contour/ 17 | 18 | # The default for vSphere is NodePort for envoy, so we must set it to LoadBalancer. The following is not necessary for azure and aws, but it doesn't hurt. 19 | yq e '.envoy.service.type = "LoadBalancer"' --null-input > generated/$CLUSTER_NAME/contour/contour-data-values.yaml 20 | 21 | # See TKG-4309. TKG modified the default for contour to Cluster. Setting back to TCE defualt which is Local. Amongst other things, this allows for SourceIP to be preserved and in cases where there is only 22 | # SSL HTTPProxy, allows contour to satisfy the AWS LoadBalance Healthcheck. 23 | yq e -i '.envoy.service.externalTrafficPolicy = "Local"' generated/$CLUSTER_NAME/contour/contour-data-values.yaml 24 | 25 | # Retrieve the most recent version number. There may be more than one version available and we are assuming that the most recent is listed last, 26 | # thus supplying -1 as the index of the array 27 | VERSION=$(tanzu package available list contour.tanzu.vmware.com -n tanzu-user-managed-packages -oyaml --summary=false | yq e '. | sort_by(.released-at)' | yq e ".[-1].version") 28 | tanzu package install contour \ 29 | --package contour.tanzu.vmware.com \ 30 | --version $VERSION \ 31 | --namespace tanzu-user-managed-packages \ 32 | --values-file generated/$CLUSTER_NAME/contour/contour-data-values.yaml 33 | -------------------------------------------------------------------------------- /scripts/generate-and-apply-elasticsearch-kibana-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | export ELASTICSEARCH_CN=$(yq e .shared-services-cluster.elasticsearch-fqdn $PARAMS_YAML) 7 | export KIBANA_CN=$(yq e .shared-services-cluster.kibana-fqdn $PARAMS_YAML) 8 | CLUSTER_NAME=$(yq e .shared-services-cluster.name $PARAMS_YAML) 9 | 10 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 11 | 12 | mkdir -p generated/$CLUSTER_NAME/ek/ 13 | cp elasticsearch-kibana/*.yaml generated/$CLUSTER_NAME/ek/ 14 | cp elasticsearch-kibana/template/*.yaml generated/$CLUSTER_NAME/ek/ 15 | 16 | yq e -i ".spec.rules[0].host = env(ELASTICSEARCH_CN)" generated/$CLUSTER_NAME/ek/03b-ingress.yaml 17 | yq e -i ".spec.rules[0].host = env(KIBANA_CN)" generated/$CLUSTER_NAME/ek/05-kibana-ingress.yaml 18 | 19 | kubectl apply -f generated/$CLUSTER_NAME/ek/01-namespace.yaml 20 | 21 | # Add image pull secret with dockerhub creds 22 | $TKG_LAB_SCRIPTS/add-dockerhub-pull-secret.sh elasticsearch-kibana 23 | 24 | kubectl apply -f generated/$CLUSTER_NAME/ek/ 25 | 26 | #Wait for pod to be ready 27 | while kubectl get po -n elasticsearch-kibana elasticsearch-0 | grep Running ; [ $? -ne 0 ]; do 28 | echo Elasticsearch is not yet ready 29 | sleep 5 30 | done 31 | -------------------------------------------------------------------------------- /scripts/generate-and-apply-fluent-bit-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster_name as args" 8 | exit 1 9 | fi 10 | 11 | export CLUSTER_NAME=$1 12 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 13 | 14 | export TKG_ENVIRONMENT_NAME=$(yq e .environment-name $PARAMS_YAML) 15 | 16 | if [ $(yq e .shared-services-cluster.name $PARAMS_YAML) = $CLUSTER_NAME ]; 17 | then 18 | export ELASTICSEARCH_CN=elasticsearch.elasticsearch-kibana 19 | export ELASTICSEARCH_PORT="9200" 20 | else 21 | export ELASTICSEARCH_CN=$(yq e .shared-services-cluster.elasticsearch-fqdn $PARAMS_YAML) 22 | export ELASTICSEARCH_PORT="80" 23 | fi 24 | 25 | mkdir -p generated/$CLUSTER_NAME/fluent-bit/ 26 | 27 | export CONFIG_OUTPUTS=$(cat << EOF 28 | [OUTPUT] 29 | Name es 30 | Match * 31 | Host $ELASTICSEARCH_CN 32 | Port $ELASTICSEARCH_PORT 33 | Generate_ID On 34 | Logstash_Format On 35 | Replace_Dots On 36 | Retry_Limit False 37 | Buffer_Size False 38 | tls Off 39 | EOF 40 | ) 41 | export CONFIG_FILTERS=$(cat << EOF 42 | [FILTER] 43 | Name kubernetes 44 | Match kube.* 45 | Kube_URL https://kubernetes.default.svc:443 46 | Kube_CA_File /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 47 | Kube_Token_File /var/run/secrets/kubernetes.io/serviceaccount/token 48 | Kube_Tag_Prefix kube.var.log.containers. 49 | Merge_Log On 50 | Merge_Log_Key log_processed 51 | K8S-Logging.Parser On 52 | K8S-Logging.Exclude On 53 | 54 | [FILTER] 55 | Name record_modifier 56 | Match * 57 | Record tkg_cluster $CLUSTER_NAME 58 | Record tkg_instance $TKG_ENVIRONMENT_NAME 59 | EOF 60 | ) 61 | export POD_ANNOTATIONS=$(cat << EOF 62 | prometheus.io/scrape: "true" 63 | prometheus.io/path: "/api/v1/metrics/prometheus" 64 | prometheus.io/port: "2020" 65 | EOF 66 | ) 67 | yq e ".fluent_bit.config.outputs = strenv(CONFIG_OUTPUTS)" --null-input > generated/$CLUSTER_NAME/fluent-bit/fluent-bit-data-values.yaml 68 | yq e -i ".fluent_bit.config.filters = strenv(CONFIG_FILTERS)" generated/$CLUSTER_NAME/fluent-bit/fluent-bit-data-values.yaml 69 | yq e -i ".fluent_bit.daemonset.podAnnotations = env(POD_ANNOTATIONS)" generated/$CLUSTER_NAME/fluent-bit/fluent-bit-data-values.yaml 70 | 71 | # Retrieve the most recent version number. There may be more than one version available and we are assuming that the most recent is listed last, 72 | # thus supplying -1 as the index of the array 73 | VERSION=$(tanzu package available list fluent-bit.tanzu.vmware.com -n tanzu-user-managed-packages -oyaml --summary=false | yq e '. | sort_by(.released-at)' | yq e ".[-1].version") 74 | tanzu package install fluent-bit \ 75 | --package fluent-bit.tanzu.vmware.com \ 76 | --version $VERSION \ 77 | --namespace tanzu-user-managed-packages \ 78 | --values-file generated/$CLUSTER_NAME/fluent-bit/fluent-bit-data-values.yaml 79 | -------------------------------------------------------------------------------- /scripts/generate-and-apply-grafana-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 2 ]; then 7 | echo "Must supply cluster name and grafana fqdn as args" 8 | exit 1 9 | fi 10 | CLUSTER_NAME=$1 11 | export GRAFANA_FQDN=$2 12 | GRAFANA_PASSWORD=$(yq e .grafana.admin-password $PARAMS_YAML) 13 | 14 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 15 | 16 | mkdir -p generated/$CLUSTER_NAME/monitoring/ 17 | 18 | kubectl create ns tanzu-system-dashboards --dry-run=client -oyaml | kubectl apply -f - 19 | 20 | # Create certificate 21 | cp tkg-extensions-mods-examples/monitoring/grafana-cert.yaml generated/$CLUSTER_NAME/monitoring/grafana-cert.yaml 22 | yq e -i ".spec.dnsNames[0] = env(GRAFANA_FQDN)" generated/$CLUSTER_NAME/monitoring/grafana-cert.yaml 23 | kubectl apply -f generated/$CLUSTER_NAME/monitoring/grafana-cert.yaml 24 | # Wait for cert to be ready 25 | while kubectl get certificates -n tanzu-system-dashboards grafana-cert | grep True ; [ $? -ne 0 ]; do 26 | echo Grafana certificate is not yet ready 27 | sleep 5 28 | done 29 | 30 | # TODO: Created https://github.com/vmware-tanzu/community-edition/issues/2947 requsting that a paramater be added to allow you to specify secret name 31 | # instead of providing the cert in data values.yaml. Once that has been delivered and flows downstream, we can update this section of the code 32 | 33 | # Read Grafana certificate details and store in files 34 | export GRAFANA_CERT_CRT=$(kubectl get secret grafana-cert-tls -n tanzu-system-dashboards -o=jsonpath={.data."tls\.crt"} | base64 --decode) 35 | export GRAFANA_CERT_KEY=$(kubectl get secret grafana-cert-tls -n tanzu-system-dashboards -o=jsonpath={.data."tls\.key"} | base64 --decode) 36 | 37 | if [ `uname -s` = 'Darwin' ]; 38 | then 39 | export ADMIN_PASSWORD=$(echo -n $GRAFANA_PASSWORD | base64) 40 | else 41 | export ADMIN_PASSWORD=$(echo -n $GRAFANA_PASSWORD | base64 -w 0) 42 | fi 43 | 44 | yq e ".grafana.secret.admin_password = env(ADMIN_PASSWORD)" --null-input > generated/$CLUSTER_NAME/monitoring/grafana-data-values.yaml 45 | yq e -i '.grafana.service.type = "ClusterIP"' generated/$CLUSTER_NAME/monitoring/grafana-data-values.yaml 46 | yq e -i ".ingress.virtual_host_fqdn = env(GRAFANA_FQDN)" generated/$CLUSTER_NAME/monitoring/grafana-data-values.yaml -i 47 | yq e -i '.ingress.tlsCertificate."tls.crt" = strenv(GRAFANA_CERT_CRT)' generated/$CLUSTER_NAME/monitoring/grafana-data-values.yaml 48 | yq e -i '.ingress.tlsCertificate."tls.key" = strenv(GRAFANA_CERT_KEY)' generated/$CLUSTER_NAME/monitoring/grafana-data-values.yaml 49 | yq e -i '.namespace = "tanzu-system-dashboards"' generated/$CLUSTER_NAME/monitoring/grafana-data-values.yaml 50 | 51 | # Apply Monitoring 52 | # Retrieve the most recent version number. There may be more than one version available and we are assuming that the most recent is listed last, 53 | # thus supplying -1 as the index of the array 54 | VERSION=$(tanzu package available list grafana.tanzu.vmware.com -n tanzu-user-managed-packages -oyaml --summary=false | yq e '. | sort_by(.released-at)' | yq e ".[-1].version") 55 | tanzu package install grafana \ 56 | --package grafana.tanzu.vmware.com \ 57 | --version $VERSION \ 58 | --namespace tanzu-user-managed-packages \ 59 | --values-file generated/$CLUSTER_NAME/monitoring/grafana-data-values.yaml 60 | -------------------------------------------------------------------------------- /scripts/generate-and-apply-minio-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | export MINIO_CN=$(yq e .minio.server-fqdn $PARAMS_YAML) 7 | CLUSTER_NAME=$(yq e .shared-services-cluster.name $PARAMS_YAML) 8 | 9 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 10 | 11 | mkdir -p generated/$CLUSTER_NAME/minio/ 12 | 13 | kubectl create ns minio --dry-run=client --output yaml | kubectl apply -f - 14 | 15 | # Add image pull secret with dockerhub creds 16 | $TKG_LAB_SCRIPTS/add-dockerhub-pull-secret.sh minio 17 | 18 | cat > generated/$CLUSTER_NAME/minio/minio-data-values.yaml << EOF 19 | global: 20 | imagePullSecrets: 21 | - docker-hub-creds 22 | auth: 23 | rootUser: 24 | rootPassword: 25 | service: 26 | type: LoadBalancer 27 | annotations: 28 | defaultBuckets: 29 | persistence: 30 | size: 31 | EOF 32 | 33 | export ROOT_USER=$(yq e .minio.root-user $PARAMS_YAML) 34 | export ROOT_PASSWORD=$(yq e .minio.root-password $PARAMS_YAML) 35 | export PERSITENCE_SIZE=$(yq e '.minio.persistence-size // "40Gi"' $PARAMS_YAML) 36 | export SERVICE_ANNOTATION='{"external-dns.alpha.kubernetes.io/hostname": "'$MINIO_CN'"}' 37 | export VELERO_BUCKET=$(yq e .velero.bucket $PARAMS_YAML) 38 | 39 | yq e -i ".auth.rootUser = env(ROOT_USER)" generated/$CLUSTER_NAME/minio/minio-data-values.yaml 40 | yq e -i ".auth.rootPassword = env(ROOT_PASSWORD)" generated/$CLUSTER_NAME/minio/minio-data-values.yaml 41 | yq e -i ".defaultBuckets = env(VELERO_BUCKET)" generated/$CLUSTER_NAME/minio/minio-data-values.yaml 42 | yq e -i ".persistence.size = env(PERSITENCE_SIZE)" generated/$CLUSTER_NAME/minio/minio-data-values.yaml 43 | # yq e -i ".service.annotations = env(SERVICE_ANNOTATION)" generated/$CLUSTER_NAME/minio/minio-data-values.yaml 44 | 45 | helm repo add bitnami https://charts.bitnami.com/bitnami 46 | 47 | helm upgrade --install minio --namespace minio bitnami/minio -f generated/$CLUSTER_NAME/minio/minio-data-values.yaml 48 | 49 | # Wait for pod to be ready 50 | while kubectl get po -n minio | grep Running ; [ $? -ne 0 ]; do 51 | echo Minio is not yet ready 52 | sleep 5 53 | done 54 | 55 | # HACK: I was unable to use the helm chart anotation or else Avi would not provide me an external addres. I needed to annotate after the 56 | # service had its address assigned. 57 | kubectl annotate service minio "external-dns.alpha.kubernetes.io/hostname=$MINIO_CN." -n minio 58 | -------------------------------------------------------------------------------- /scripts/generate-and-apply-prometheus-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 2 ]; then 7 | echo "Must supply cluster name and prometheus fqdn as args" 8 | exit 1 9 | fi 10 | CLUSTER_NAME=$1 11 | export PROMETHEUS_FQDN=$2 12 | 13 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 14 | 15 | mkdir -p generated/$CLUSTER_NAME/monitoring/ 16 | 17 | kubectl create ns tanzu-system-monitoring --dry-run=client --output yaml | kubectl apply -f - 18 | 19 | # Create certificate 20 | cp tkg-extensions-mods-examples/monitoring/prometheus-cert.yaml generated/$CLUSTER_NAME/monitoring/prometheus-cert.yaml 21 | yq e -i ".spec.dnsNames[0] = env(PROMETHEUS_FQDN)" generated/$CLUSTER_NAME/monitoring/prometheus-cert.yaml 22 | kubectl apply -f generated/$CLUSTER_NAME/monitoring/prometheus-cert.yaml 23 | # Wait for cert to be ready 24 | while kubectl get certificates -n tanzu-system-monitoring prometheus-cert | grep True ; [ $? -ne 0 ]; do 25 | echo prometheus certificate is not yet ready 26 | sleep 5 27 | done 28 | 29 | # TODO: Created https://github.com/vmware-tanzu/community-edition/issues/2946 requsting that a paramater be added to allow you to specify secret name 30 | # instead of providing the cert in data values.yaml. Once that has been delivered and flows downstream, we can update this section of the code 31 | # Read prometheus certificate details and store in files 32 | export PROMETHEUS_CERT_CRT=$(kubectl get secret prometheus-cert-tls -n tanzu-system-monitoring -o=jsonpath={.data."tls\.crt"} | base64 --decode) 33 | export PROMETHEUS_CERT_KEY=$(kubectl get secret prometheus-cert-tls -n tanzu-system-monitoring -o=jsonpath={.data."tls\.key"} | base64 --decode) 34 | 35 | export TRUE_VALUE=true 36 | yq e ".ingress.enabled = env(TRUE_VALUE)" --null-input > generated/$CLUSTER_NAME/monitoring/prometheus-data-values.yaml 37 | yq e -i ".ingress.virtual_host_fqdn = env(PROMETHEUS_FQDN)" generated/$CLUSTER_NAME/monitoring/prometheus-data-values.yaml 38 | yq e -i '.ingress.tlsCertificate."tls.crt" = strenv(PROMETHEUS_CERT_CRT)' generated/$CLUSTER_NAME/monitoring/prometheus-data-values.yaml 39 | yq e -i '.ingress.tlsCertificate."tls.key" = strenv(PROMETHEUS_CERT_KEY)' generated/$CLUSTER_NAME/monitoring/prometheus-data-values.yaml 40 | yq e -i '.namespace = "tanzu-system-monitoring"' generated/$CLUSTER_NAME/monitoring/prometheus-data-values.yaml 41 | 42 | # Apply Monitoring 43 | # Retrieve the most recent version number. There may be more than one version available and we are assuming that the most recent is listed last, 44 | # thus supplying -1 as the index of the array 45 | VERSION=$(tanzu package available list prometheus.tanzu.vmware.com -n tanzu-user-managed-packages -oyaml --summary=false | yq e '. | sort_by(.released-at)' | yq e ".[-1].version") 46 | tanzu package install prometheus \ 47 | --package prometheus.tanzu.vmware.com \ 48 | --version $VERSION \ 49 | --namespace tanzu-user-managed-packages \ 50 | --values-file generated/$CLUSTER_NAME/monitoring/prometheus-data-values.yaml 51 | -------------------------------------------------------------------------------- /scripts/generate-and-apply-tmc-acme-fitness-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster name as args" 8 | exit 1 9 | fi 10 | CLUSTER_NAME=$1 11 | 12 | IAAS=$(yq e .iaas $PARAMS_YAML) 13 | 14 | export TMC_ACME_FITNESS_WORKSPACE_NAME=$(yq e .acme-fitness.tmc-workspace $PARAMS_YAML) 15 | export VMWARE_ID=$(yq e .vmware-id $PARAMS_YAML) 16 | export TMC_CLUSTER_NAME=$VMWARE_ID-$CLUSTER_NAME-$IAAS 17 | 18 | mkdir -p generated/$CLUSTER_NAME/tmc 19 | cp -r tmc/config/* generated/$CLUSTER_NAME/tmc/ 20 | 21 | # acme-fitness-dev.yaml 22 | yq e -i ".fullName.name = env(TMC_ACME_FITNESS_WORKSPACE_NAME)" generated/$CLUSTER_NAME/tmc/workspace/acme-fitness-dev.yaml 23 | yq e -i ".meta.labels.origin = env(VMWARE_ID)" generated/$CLUSTER_NAME/tmc/workspace/acme-fitness-dev.yaml 24 | 25 | # tkg-wlc-acme-fitness.yaml 26 | yq e -i ".fullName.clusterName = env(TMC_CLUSTER_NAME)" generated/$CLUSTER_NAME/tmc/namespace/tkg-wlc-acme-fitness.yaml 27 | yq e -i ".meta.labels.origin = env(VMWARE_ID)" generated/$CLUSTER_NAME/tmc/namespace/tkg-wlc-acme-fitness.yaml 28 | yq e -i ".spec.workspaceName = env(TMC_ACME_FITNESS_WORKSPACE_NAME)" generated/$CLUSTER_NAME/tmc/namespace/tkg-wlc-acme-fitness.yaml 29 | 30 | if tmc workspace list | grep -q $TMC_ACME_FITNESS_WORKSPACE_NAME; then 31 | tmc workspace delete $TMC_ACME_FITNESS_WORKSPACE_NAME 32 | fi 33 | tmc workspace create -f generated/$CLUSTER_NAME/tmc/workspace/acme-fitness-dev.yaml 34 | tmc workspace iam add-binding $TMC_ACME_FITNESS_WORKSPACE_NAME --role workspace.edit --groups acme-fitness-devs 35 | tmc cluster namespace create -f generated/$CLUSTER_NAME/tmc/namespace/tkg-wlc-acme-fitness.yaml 36 | -------------------------------------------------------------------------------- /scripts/generate-gitlab.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | CLUSTER_NAME=$(yq e .shared-services-cluster.name $PARAMS_YAML) 4 | GITLAB_BASE_FQDN="$(yq e .shared-services-cluster.name $PARAMS_YAML).$(yq e .subdomain $PARAMS_YAML)" 5 | IAAS=$(yq e .iaas $PARAMS_YAML) 6 | LETS_ENCRYPT_EMAIL=$(yq e .lets-encrypt-acme-email $PARAMS_YAML) 7 | mkdir -p generated/$CLUSTER_NAME/gitlab/ 8 | 9 | cp gitlab/values-gitlab.yaml generated/$CLUSTER_NAME/gitlab/values-gitlab.yaml 10 | 11 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 12 | 13 | # Grab the external IP or name 14 | if [ "$IAAS" = "vsphere" ]; then 15 | EXT_NAME=`kubectl get svc envoy -n tanzu-system-ingress -o jsonpath='{.status.loadBalancer.ingress[0].ip}'` 16 | else 17 | EXT_NAME=`kubectl get svc envoy -n tanzu-system-ingress -o jsonpath='{.status.loadBalancer.ingress[0].hostname}'` 18 | fi 19 | 20 | sed -i -e "s/GITLAB_BASE_FQDN/$GITLAB_BASE_FQDN/g" generated/$CLUSTER_NAME/gitlab/values-gitlab.yaml 21 | sed -i -e "s/EXTERNAL_LB_IP/$EXT_NAME/g" generated/$CLUSTER_NAME/gitlab/values-gitlab.yaml 22 | sed -i -e "s/CERT_MANAGER_EMAIL/$LETS_ENCRYPT_EMAIL/g" generated/$CLUSTER_NAME/gitlab/values-gitlab.yaml 23 | # Remove original file copy created by sed on mac's. noop for linux 24 | rm generated/$CLUSTER_NAME/gitlab/values-gitlab.yaml-e 25 | -------------------------------------------------------------------------------- /scripts/inject-dex-client-kubeapps.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | 3 | MGMT_CLUSTER_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 4 | 5 | ## Adds additional path to the Shared Services DEX Entry for kubeapps OIDC 6 | export KUBEAPPS_URL="https://$(yq e .kubeapps.server-fqdn $PARAMS_YAML)/oauth2/callback" 7 | 8 | yq e -i '.staticClients[1].redirectURIs[0] = env(KUBEAPPS_URL)' generated/$MGMT_CLUSTER_NAME/pinniped/dex-cm-config.yaml 9 | yq e -i '.staticClients[1].id = "kubeapps"' generated/$MGMT_CLUSTER_NAME/pinniped/dex-cm-config.yaml 10 | yq e -i '.staticClients[1].name = "kubeapps"' generated/$MGMT_CLUSTER_NAME/pinniped/dex-cm-config.yaml 11 | yq e -i '.staticClients[1].secret = "FOO_SECRET"' generated/$MGMT_CLUSTER_NAME/pinniped/dex-cm-config.yaml 12 | 13 | kubectl config use-context $MGMT_CLUSTER_NAME-admin@$MGMT_CLUSTER_NAME 14 | 15 | kubectl create cm dex -n tanzu-system-auth --from-file=config.yaml=generated/$MGMT_CLUSTER_NAME/pinniped/dex-cm-config.yaml -o yaml --dry-run=client | kubectl apply -f- 16 | # And bounce dex 17 | kubectl set env deployment dex --env="LAST_RESTART=$(date)" --namespace tanzu-system-auth 18 | 19 | -------------------------------------------------------------------------------- /scripts/prep-cluster-overlays.sh: -------------------------------------------------------------------------------- 1 | cp -R .tanzu/tkg/providers/ytt/* ~/.tanzu/tkg/providers/ytt 2 | cp -R .tanzu/tkg/providers/infrastructure-vsphere/ytt/* ~/.tanzu/tkg/providers/infrastructure-providers/ytt -------------------------------------------------------------------------------- /scripts/register-cluster-argocd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/../scripts/set-env.sh 5 | 6 | export CLUSTER_NAME=$(yq e .workload-cluster.name $PARAMS_YAML) 7 | export ARGOCD_CN=$(yq e .argocd.server-fqdn $PARAMS_YAML) 8 | export ARGOCD_PASSWORD=$(yq e .argocd.password $PARAMS_YAML) 9 | 10 | # Login with the cli 11 | argocd login $ARGOCD_CN --username admin --password $ARGOCD_PASSWORD 12 | 13 | # Collect the necessary configuration 14 | mkdir -p generated/$CLUSTER_NAME/argocd/ 15 | cp argocd/01-namespace.yaml generated/$CLUSTER_NAME/argocd/ 16 | cp argocd/02-serviceaccount.yaml generated/$CLUSTER_NAME/argocd/ 17 | cp argocd/03-clusterrolebinding.yaml generated/$CLUSTER_NAME/argocd/ 18 | 19 | # Apply the configuration 20 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 21 | kubectl apply -f generated/$CLUSTER_NAME/argocd/01-namespace.yaml 22 | kubectl apply -f generated/$CLUSTER_NAME/argocd/02-serviceaccount.yaml 23 | kubectl apply -f generated/$CLUSTER_NAME/argocd/03-clusterrolebinding.yaml 24 | 25 | # Create kubeconfig context with Service Account secret 26 | export TOKEN_SECRET=$(kubectl get serviceaccount -n argocd argocd -o jsonpath='{.secrets[0].name}') 27 | export TOKEN=$(kubectl get secret -n argocd $TOKEN_SECRET -o jsonpath='{.data.token}' | base64 --decode) 28 | kubectl config set-credentials $CLUSTER_NAME-argocd-token-user --token $TOKEN 29 | kubectl config set-context $CLUSTER_NAME-argocd-token-user@$CLUSTER_NAME \ 30 | --user $CLUSTER_NAME-argocd-token-user \ 31 | --cluster $CLUSTER_NAME 32 | 33 | # Add the config setup with the service account you created 34 | argocd cluster add $CLUSTER_NAME-argocd-token-user@$CLUSTER_NAME 35 | 36 | # See the clusters added 37 | argocd cluster list 38 | -------------------------------------------------------------------------------- /scripts/retrieve-acme-fitness-source.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash -e 2 | rm -rf acme_fitness_demo 3 | git clone https://github.com/vmwarecloudadvocacy/acme_fitness_demo.git 4 | cd acme_fitness_demo 5 | git checkout 158bbe2 6 | cd .. 7 | rm -rf acme_fitness_demo/.git 8 | rm -rf acme_fitness_demo/aws-fargate 9 | rm -rf acme_fitness_demo/docker-compose 10 | -------------------------------------------------------------------------------- /scripts/retrieve-lets-encrypt-ca-cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | curl https://letsencrypt.org/certs/isrg-root-x1-cross-signed.pem -o keys/letsencrypt-ca.pem 4 | 5 | # Remove the carriage return character that lets encrypt pem file has as a line ending 6 | if [ `uname -s` = 'Darwin' ]; 7 | then 8 | sed -i '' $'s/\x0D//' keys/letsencrypt-ca.pem 9 | else 10 | sed -i -e $'s/\x0D//' keys/letsencrypt-ca.pem 11 | fi 12 | 13 | chmod 600 keys/letsencrypt-ca.pem -------------------------------------------------------------------------------- /scripts/set-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | : ${PARAMS_YAML?"Need to set PARAMS_YAML environment variable"} 4 | 5 | # Give some information timestamps to know how long things take 6 | echo $(date) 7 | -------------------------------------------------------------------------------- /scripts/tmc-attach.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 1 ]; then 7 | echo "Must supply cluster name args" 8 | exit 1 9 | fi 10 | 11 | CLUSTER_NAME=$1 12 | 13 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 14 | 15 | IAAS=$(yq e .iaas $PARAMS_YAML) 16 | VMWARE_ID=$(yq e .vmware-id $PARAMS_YAML) 17 | TMC_CLUSTER_GROUP=$(yq e .tmc.cluster-group $PARAMS_YAML) 18 | 19 | mkdir -p generated/$CLUSTER_NAME 20 | 21 | if tmc system context current | grep -q IDToken; then 22 | echo "Currently logged into TMC." 23 | else 24 | echo "Please login to tmc before you continue." 25 | exit 1 26 | fi 27 | 28 | if tmc clustergroup list | grep -q $TMC_CLUSTER_GROUP; then 29 | echo "Cluster group $TMC_CLUSTER_GROUP found." 30 | else 31 | echo "Cluster group $TMC_CLUSTER_GROUP not found. Automatically creating it." 32 | tmc clustergroup create -n $TMC_CLUSTER_GROUP 33 | fi 34 | 35 | TMC_CLUSTER_NAME=$VMWARE_ID-$CLUSTER_NAME-$IAAS 36 | ATTACH=true 37 | 38 | if tmc cluster list | grep -q $TMC_CLUSTER_NAME; then 39 | if [ "$(tmc cluster get $TMC_CLUSTER_NAME -p attached -m attached | yq e '.status.health' -)" == "HEALTHY" ]; then 40 | echo "Cluster is already attached and healthy." 41 | ATTACH=false 42 | else 43 | echo "Cluster is already attached and unhealthy, likely an old reference. Will detach and re-attach." 44 | echo "Detaching cluster." 45 | tmc cluster delete $TMC_CLUSTER_NAME -m attached -p attached --force 46 | 47 | while tmc cluster list | grep -q $TMC_CLUSTER_NAME; do 48 | echo Waiting for cluster to finish detaching. 49 | sleep 5 50 | done 51 | 52 | fi 53 | fi 54 | 55 | if $ATTACH; then 56 | echo "Attaching cluster now." 57 | tmc cluster attach \ 58 | --name $TMC_CLUSTER_NAME \ 59 | --labels origin=$VMWARE_ID \ 60 | --labels iaas=$IAAS \ 61 | --cluster-group $TMC_CLUSTER_GROUP \ 62 | --output generated/$CLUSTER_NAME/tmc.yaml 63 | fi 64 | 65 | kubectl apply -f generated/$CLUSTER_NAME/tmc.yaml 66 | echo "$CLUSTER_NAME registered with TMC" 67 | 68 | # After the script finishes atttaching the cluster to TMC, go to the TMC UI and find your cluster. In the Cluster view, go to `Add-ons > Tanzu Repositories` and disable the `tanzu-standard` repository if using version `v2023.7.13_update.2` -------------------------------------------------------------------------------- /scripts/tmc-policy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | if [ ! $# -eq 3 ]; then 7 | echo "Must supply cluster name, role, and groups as args" 8 | exit 1 9 | fi 10 | 11 | CLUSTER_NAME=$1 12 | ROLE=$2 13 | POLICY_GROUPS=$3 14 | IAAS=$(yq e .iaas $PARAMS_YAML) 15 | 16 | VMWARE_ID=$(yq e .vmware-id $PARAMS_YAML) 17 | 18 | tmc cluster iam add-binding $VMWARE_ID-$CLUSTER_NAME-$IAAS \ 19 | --role $ROLE \ 20 | --groups $POLICY_GROUPS \ 21 | --management-cluster-name attached \ 22 | --provisioner-name attached 23 | -------------------------------------------------------------------------------- /scripts/tmc-register-mc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | CLUSTER_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 7 | 8 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 9 | 10 | IAAS=$(yq e .iaas $PARAMS_YAML) 11 | TMC_CLUSTER_GROUP=$(yq e .tmc.cluster-group $PARAMS_YAML) 12 | 13 | if tmc system context current | grep -q IDToken; then 14 | echo "Currently logged into TMC." 15 | else 16 | echo "Please login to tmc before you continue." 17 | exit 1 18 | fi 19 | 20 | if tmc clustergroup list | grep -q $TMC_CLUSTER_GROUP; then 21 | echo "Cluster group $TMC_CLUSTER_GROUP found." 22 | else 23 | echo "Cluster group $TMC_CLUSTER_GROUP not found. Automattically creating it." 24 | tmc clustergroup create -n $TMC_CLUSTER_GROUP 25 | fi 26 | 27 | mkdir -p generated/$CLUSTER_NAME/tmc 28 | 29 | REGISTER=true 30 | 31 | if tmc managementcluster list | grep -q $CLUSTER_NAME; then 32 | if [ "$(tmc managementcluster get $CLUSTER_NAME | yq e '.status.phase' -)" == "READY" ]; then 33 | echo "Management Cluster is already registered and ready." 34 | REGISTER=false 35 | else 36 | echo "Management Cluster is already registered and not READY, likely an old reference. Will deregistery and re-register." 37 | echo "Deregistering managemnet cluster." 38 | 39 | # HACK: Kubeconfig should not be required, OLYMP-26147 has been created address this. Set current context to managment cluster 40 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 41 | tmc managementcluster deregister $CLUSTER_NAME --force --kubeconfig ~/.kube/config 42 | 43 | while tmc managementcluster list | grep -q $CLUSTER_NAME; do 44 | echo Waiting for management cluster to finish deregistering. 45 | sleep 5 46 | done 47 | 48 | fi 49 | fi 50 | 51 | if $REGISTER; then 52 | echo "Registering management cluster now." 53 | tmc managementcluster register $CLUSTER_NAME \ 54 | --default-cluster-group $TMC_CLUSTER_GROUP \ 55 | --kubernetes-provider-type TKG 56 | 57 | TMC_REGISTRATION_URL=$(tmc managementcluster get $CLUSTER_NAME | yq e .status.registrationUrl -) 58 | 59 | # tanzu management-cluster register command has been removed since v1.4.1 60 | kubectl apply -f $TMC_REGISTRATION_URL 61 | 62 | echo "$CLUSTER_NAME registered as management-cluster with TMC" 63 | 64 | mv k8s-register-manifest.yaml generated/$CLUSTER_NAME/tmc/ 65 | 66 | while [ "$(tmc managementcluster get $CLUSTER_NAME | yq e '.status.phase' -)" != "READY" ]; do 67 | echo Waiting for management cluster to have registration status of READY. 68 | sleep 5 69 | done 70 | 71 | fi 72 | -------------------------------------------------------------------------------- /scripts/update-pinniped-configuration.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | source $TKG_LAB_SCRIPTS/set-env.sh 5 | 6 | CLUSTER_NAME=$(yq e .management-cluster.name $PARAMS_YAML) 7 | export PINNIPED_CN=$(yq e .management-cluster.pinniped-fqdn $PARAMS_YAML) 8 | export PINNIPED_ENDPOINT=https://$PINNIPED_CN 9 | 10 | kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME 11 | 12 | mkdir -p generated/$CLUSTER_NAME/pinniped/ 13 | 14 | cp tkg-extensions-mods-examples/authentication/pinniped/pinniped-ingress.yaml generated/$CLUSTER_NAME/pinniped/pinniped-ingress.yaml 15 | cp tkg-extensions-mods-examples/authentication/pinniped/pinniped-certificate.yaml generated/$CLUSTER_NAME/pinniped/pinniped-certificate.yaml 16 | 17 | yq e -i '.spec.dnsNames[0] = env(PINNIPED_CN)' generated/$CLUSTER_NAME/pinniped/pinniped-certificate.yaml 18 | yq e -i '.spec.virtualhost.fqdn = env(PINNIPED_CN)' generated/$CLUSTER_NAME/pinniped/pinniped-ingress.yaml 19 | 20 | kubectl apply -f generated/$CLUSTER_NAME/pinniped/pinniped-ingress.yaml 21 | 22 | while dig $PINNIPED_CN | grep "ANSWER SECTION" ; [ $? -ne 0 ]; do 23 | echo Waiting for external-dns to complete configuration of DNS to satisfy for $PINNIPED_CN 24 | sleep 5 25 | done 26 | 27 | kubectl apply -f generated/$CLUSTER_NAME/pinniped/pinniped-certificate.yaml 28 | 29 | while kubectl get certificate custom-pinniped-cert -n pinniped-supervisor | grep True ; [ $? -ne 0 ]; do 30 | echo Pinniped certificate is not yet ready 31 | sleep 5 32 | done 33 | 34 | if [ `uname -s` = 'Darwin' ]; 35 | then 36 | export CA_BUNDLE=`cat keys/letsencrypt-ca.pem | base64` 37 | else 38 | export CA_BUNDLE=`cat keys/letsencrypt-ca.pem | base64 -w 0` 39 | fi 40 | 41 | # Copy secret generated from certificate to a new one to combine both cert and CA data 42 | kubectl get secret custom-auth-cert-tls -n pinniped-supervisor -oyaml > generated/$CLUSTER_NAME/pinniped/pinniped-secret-with-ca.yaml 43 | yq e -i '.metadata.name = "custom-auth-cert-tls-with-ca"' generated/$CLUSTER_NAME/pinniped/pinniped-secret-with-ca.yaml 44 | yq e -i '.data."ca.crt" = strenv(CA_BUNDLE)' generated/$CLUSTER_NAME/pinniped/pinniped-secret-with-ca.yaml 45 | yq e -i 'del(.metadata.annotations)' generated/$CLUSTER_NAME/pinniped/pinniped-secret-with-ca.yaml 46 | yq e -i 'del(.metadata.creationTimestamp)' generated/$CLUSTER_NAME/pinniped/pinniped-secret-with-ca.yaml 47 | yq e -i 'del(.metadata.uid)' generated/$CLUSTER_NAME/pinniped/pinniped-secret-with-ca.yaml 48 | yq e -i 'del(.metadata.resourceVersion)' generated/$CLUSTER_NAME/pinniped/pinniped-secret-with-ca.yaml 49 | kubectl apply -f generated/$CLUSTER_NAME/pinniped/pinniped-secret-with-ca.yaml 50 | 51 | # Update add-on values 52 | kubectl get secret $CLUSTER_NAME-pinniped-package -n tkg-system -ojsonpath="{.data.values\.yaml}" | base64 --decode > generated/$CLUSTER_NAME/pinniped/pinniped-package-values.yaml 53 | yq e -i '.custom_tls_secret = "custom-auth-cert-tls-with-ca"' generated/$CLUSTER_NAME/pinniped/pinniped-package-values.yaml 54 | yq e -i '.pinniped.supervisor_svc_external_dns = env(PINNIPED_ENDPOINT)' generated/$CLUSTER_NAME/pinniped/pinniped-package-values.yaml 55 | yq e -i '.pinniped.supervisor.service.type = "ClusterIP"' generated/$CLUSTER_NAME/pinniped/pinniped-package-values.yaml 56 | 57 | 58 | # Deleting the existing job. It will be recreated when the pinniped-addon secret is updated below. And then gives us a chance to wait until job is competed 59 | kubectl delete job pinniped-post-deploy-job -n pinniped-supervisor 60 | 61 | if [ `uname -s` = 'Darwin' ]; 62 | then 63 | NEW_VALUES=`cat generated/$CLUSTER_NAME/pinniped/pinniped-package-values.yaml | base64` 64 | 65 | else 66 | NEW_VALUES=`cat generated/$CLUSTER_NAME/pinniped/pinniped-package-values.yaml | base64 -w 0` 67 | fi 68 | 69 | kubectl patch secret $CLUSTER_NAME-pinniped-package -n tkg-system -p '{"data": {"values.yaml": "'$NEW_VALUES'"}}' 70 | 71 | # Wait until job is completed. 72 | while kubectl get jobs -n pinniped-supervisor | grep "1/1"; [ $? -ne 0 ]; do 73 | echo "Waiting for pinniped-post-deploy-job job to be completed" 74 | sleep 5 75 | done 76 | -------------------------------------------------------------------------------- /terraform/.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # Terraform lockfile 5 | .terraform.lock.hcl 6 | 7 | # .tfstate files 8 | *.tfstate 9 | *.tfstate.* 10 | 11 | # Crash log files 12 | crash.log 13 | 14 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as 15 | # password, private keys, and other secrets. These should not be part of version 16 | # control as they are data points which are potentially sensitive and subject 17 | # to change depending on the environment. 18 | *.tfvars 19 | 20 | # Ignore override files as they are usually used to override resources locally and so 21 | # are not checked in 22 | override.tf 23 | override.tf.json 24 | *_override.tf 25 | *_override.tf.json 26 | 27 | # Ignore CLI configuration files 28 | .terraformrc 29 | terraform.rc 30 | 31 | .DS_Store 32 | -------------------------------------------------------------------------------- /terraform/aws/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.aws_region 3 | } 4 | 5 | locals { 6 | name = "ex-${replace(basename(path.cwd), "_", "-")}" 7 | 8 | tags = { 9 | Name = "TKGVPC" 10 | } 11 | } 12 | 13 | ################################################################################ 14 | # VPC Module 15 | ################################################################################ 16 | 17 | module "vpc" { 18 | source = "github.com/terraform-aws-modules/terraform-aws-vpc" 19 | 20 | name = local.name 21 | cidr = "172.16.0.0/16" 22 | 23 | azs = ["${var.aws_region}a"] 24 | private_subnets = ["172.16.0.0/24"] 25 | public_subnets = ["172.16.3.0/24"] 26 | 27 | enable_ipv6 = false 28 | 29 | enable_nat_gateway = true 30 | single_nat_gateway = true 31 | map_public_ip_on_launch = true 32 | 33 | igw_tags = { 34 | "Name" = "tkg-inet-gw" 35 | } 36 | 37 | public_subnet_tags = { 38 | "kubernetes.io/role/elb" = "1" 39 | "kubernetes.io/cluster/${var.mc_name}" = "shared" 40 | "kubernetes.io/cluster/${var.ssc_name}" = "shared" 41 | "kubernetes.io/cluster/${var.wlc_name}" = "shared" 42 | } 43 | 44 | private_subnet_tags = { 45 | "kubernetes.io/role/internal-elb" = "1" 46 | "kubernetes.io/cluster/${var.mc_name}" = "shared" 47 | "kubernetes.io/cluster/${var.ssc_name}" = "shared" 48 | "kubernetes.io/cluster/${var.wlc_name}" = "shared" 49 | } 50 | 51 | public_subnet_tags_per_az = { 52 | "${var.aws_region}a" = { 53 | "Name" = "pub-a" 54 | } 55 | } 56 | 57 | private_subnet_tags_per_az = { 58 | "${var.aws_region}a" = { 59 | "Name" = "priv-a" 60 | } 61 | } 62 | 63 | tags = local.tags 64 | 65 | } 66 | 67 | -------------------------------------------------------------------------------- /terraform/aws/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | description = "The ID of the VPC" 3 | value = module.vpc.vpc_id 4 | } 5 | 6 | output "private_subnet" { 7 | description = "First private subnet id" 8 | value = module.vpc.private_subnets[0] 9 | } 10 | 11 | output "public_subnet" { 12 | description = "First public subnet id" 13 | value = module.vpc.public_subnets[0] 14 | } 15 | -------------------------------------------------------------------------------- /terraform/aws/variables.tf: -------------------------------------------------------------------------------- 1 | variable "mc_name" { 2 | description = "Management cluster name" 3 | default = "default_mc" 4 | } 5 | variable "ssc_name" { 6 | description = "Shared services cluster name" 7 | default = "default_ssc" 8 | } 9 | variable "wlc_name" { 10 | description = "Workload cluster name" 11 | default = "default_wlc" 12 | } 13 | variable "aws_region" { 14 | description = "Aws Region" 15 | default = "us-east-2" 16 | } -------------------------------------------------------------------------------- /terraform/aws/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13.1" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 3.73" 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/authentication/pinniped/pinniped-certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: custom-pinniped-cert 5 | namespace: pinniped-supervisor 6 | spec: 7 | # Secret names are always required. 8 | secretName: custom-auth-cert-tls 9 | duration: 2160h # 90d 10 | renewBefore: 360h # 15d 11 | subject: 12 | organizations: 13 | - vmware 14 | isCA: false 15 | privateKey: 16 | size: 2048 17 | algorithm: RSA 18 | encoding: PKCS1 19 | # At least one of a DNS Name, USI SAN, or IP address is required. 20 | dnsNames: # Populated by scripts 21 | issuerRef: 22 | name: letsencrypt-contour-cluster-issuer 23 | # We can reference ClusterIssuers by changing the kind here. 24 | # The default value is Issuer (i.e. a locally namespaced Issuer) 25 | kind: ClusterIssuer -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/authentication/pinniped/pinniped-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: projectcontour.io/v1 2 | kind: HTTPProxy 3 | metadata: 4 | name: pinniped 5 | namespace: pinniped-supervisor 6 | spec: 7 | virtualhost: 8 | fqdn: # populated by scripts 9 | tls: 10 | passthrough: true 11 | tcpproxy: 12 | services: 13 | - name: pinniped-supervisor 14 | port: 443 15 | -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/ingress/contour/contour-cluster-issuer-dns-aws.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-contour-cluster-issuer 5 | namespace: tanzu-system-ingress 6 | spec: 7 | acme: 8 | email: "jaguilar@pivotal.io" 9 | privateKeySecretRef: 10 | name: acme-account-key 11 | server: https://acme-v02.api.letsencrypt.org/directory 12 | solvers: 13 | - dns01: 14 | route53: 15 | region: eu-central-1 16 | accessKeyID: AKIAIOSFODNN7EXAMPLE 17 | secretAccessKeySecretRef: 18 | name: prod-route53-credentials-secret 19 | key: secret-access-key 20 | -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/ingress/contour/contour-cluster-issuer-dns-azure.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-contour-cluster-issuer 5 | namespace: tanzu-system-ingress 6 | spec: 7 | acme: 8 | email: "jaguilar@pivotal.io" 9 | privateKeySecretRef: 10 | name: acme-account-key 11 | server: https://acme-v02.api.letsencrypt.org/directory 12 | solvers: 13 | - dns01: 14 | azureDNS: 15 | clientID: AZURE_CERT_MANAGER_SP_APP_ID 16 | clientSecretSecretRef: 17 | # The following is the secret we created in Kubernetes. Issuer will use this to present challenge to Azure DNS. 18 | name: azure-dns-service-account 19 | key: client-secret 20 | subscriptionID: AZURE_SUBSCRIPTION_ID 21 | tenantID: AZURE_TENANT_ID 22 | resourceGroupName: AZURE_DNS_ZONE_RESOURCE_GROUP 23 | hostedZoneName: AZURE_DNS_ZONE 24 | # Azure Cloud Environment, default to AzurePublicCloud 25 | environment: AzurePublicCloud 26 | -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/ingress/contour/contour-cluster-issuer-dns-gcloud.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-contour-cluster-issuer 5 | namespace: tanzu-system-ingress 6 | spec: 7 | acme: 8 | email: "jaguilar@pivotal.io" 9 | privateKeySecretRef: 10 | name: acme-account-key 11 | server: https://acme-v02.api.letsencrypt.org/directory 12 | solvers: 13 | - dns01: 14 | cloudDNS: 15 | # The ID of the GCP project 16 | project: MY_PROJECT_NAME 17 | # This is the secret used to access the service account 18 | serviceAccountSecretRef: 19 | name: gcloud-dns-service-account 20 | key: credentials.json 21 | -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/monitoring/grafana-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: grafana-cert 5 | namespace: tanzu-system-dashboards 6 | spec: 7 | # Secret names are always required. 8 | secretName: grafana-cert-tls 9 | duration: 2160h # 90d 10 | renewBefore: 360h # 15d 11 | subject: 12 | organizations: 13 | - vmware 14 | isCA: false 15 | privateKey: 16 | size: 2048 17 | algorithm: RSA 18 | encoding: PKCS1 19 | # At least one of a DNS Name, USI SAN, or IP address is required. 20 | dnsNames: 21 | # 22 | issuerRef: 23 | name: letsencrypt-contour-cluster-issuer 24 | # We can reference ClusterIssuers by changing the kind here. 25 | # The default value is Issuer (i.e. a locally namespaced Issuer) 26 | kind: ClusterIssuer 27 | -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/monitoring/prometheus-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: prometheus-cert 5 | namespace: tanzu-system-monitoring 6 | spec: 7 | # Secret names are always required. 8 | secretName: prometheus-cert-tls 9 | duration: 2160h # 90d 10 | renewBefore: 360h # 15d 11 | subject: 12 | organizations: 13 | - vmware 14 | isCA: false 15 | privateKey: 16 | size: 2048 17 | algorithm: RSA 18 | encoding: PKCS1 19 | # At least one of a DNS Name, USI SAN, or IP address is required. 20 | dnsNames: 21 | # 22 | issuerRef: 23 | name: letsencrypt-contour-cluster-issuer 24 | # We can reference ClusterIssuers by changing the kind here. 25 | # The default value is Issuer (i.e. a locally namespaced Issuer) 26 | kind: ClusterIssuer -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/registry/harbor/02-certs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: harbor-cert 5 | namespace: tanzu-system-registry 6 | spec: 7 | # Secret names are always required. 8 | secretName: harbor-cert-tls 9 | duration: 2160h # 90d 10 | renewBefore: 360h # 15d 11 | subject: 12 | organizations: 13 | - vmware 14 | isCA: false 15 | privateKey: 16 | size: 2048 17 | algorithm: RSA 18 | encoding: PKCS1 19 | # At least one of a DNS Name, USI SAN, or IP address is required. 20 | dnsNames: 21 | # 22 | issuerRef: 23 | name: letsencrypt-contour-cluster-issuer 24 | # We can reference ClusterIssuers by changing the kind here. 25 | # The default value is Issuer (i.e. a locally namespaced Issuer) 26 | kind: ClusterIssuer 27 | -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/registry/harbor/overlay-timeout-increase.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.subset({"kind": "HTTPProxy","metadata":{"name":"harbor-httpproxy"}}) 4 | --- 5 | spec: 6 | routes: 7 | #@overlay/match by=lambda indexOrKey, left, right: left["conditions"][0]["prefix"].endswith("/v2/") 8 | - timeoutPolicy: 9 | response: 300s -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/service-discovery/external-dns/external-dns-data-values-aws-with-contour.yaml.example: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # The namespace in which to deploy ExternalDNS. 4 | namespace: tanzu-system-service-discovery 5 | 6 | # Deployment related configuration 7 | deployment: 8 | args: 9 | - --metrics-address=:7979 #! ensures external dns exposes metrics 10 | - --source=service 11 | - --source=ingress 12 | - --source=contour-httpproxy # Provide this to enable Contour HTTPProxy support. Must have Contour installed or external-dns will fail. 13 | - --domain-filter=external-dns-test.<< DOMAIN >> # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones 14 | - --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization 15 | - --registry=txt 16 | - --txt-owner-id=<< HOSTED ZONE ID >> 17 | - --txt-prefix=txt # disambiguates TXT records from CNAME records 18 | - --provider=aws 19 | - --aws-zone-type=public # only look at public hosted zones (valid values are public, private or no value for both) 20 | - --aws-prefer-cname 21 | env: 22 | - name: AWS_ACCESS_KEY_ID 23 | valueFrom: 24 | secretKeyRef: 25 | name: route53-credentials 26 | key: aws_access_key_id 27 | - name: AWS_SECRET_ACCESS_KEY 28 | valueFrom: 29 | secretKeyRef: 30 | name: route53-credentials 31 | key: aws_secret_access_key 32 | securityContext: {} 33 | volumeMounts: [] 34 | volumes: [] 35 | -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/service-discovery/external-dns/external-dns-data-values-azure-with-contour.yaml.example: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # The namespace in which to deploy ExternalDNS. 4 | namespace: tanzu-system-service-discovery 5 | 6 | deployment: 7 | args: 8 | - --metrics-address=:7979 #! ensures external dns exposes metrics 9 | - --source=service 10 | - --source=ingress 11 | - --source=contour-httpproxy #! configure external-dns to read Contour HTTPProxy resources 12 | - --domain-filter=my-zone.example.org #! zone where services are deployed 13 | - --policy=upsert-only #! would prevent ExternalDNS from deleting any records, omit to enable full synchronization 14 | - --provider=azure 15 | - --azure-resource-group=externaldns 16 | - --registry=txt 17 | volumeMounts: 18 | - name: azure-config-file 19 | mountPath: /etc/kubernetes 20 | readOnly: true 21 | volumes: 22 | - name: azure-config-file 23 | secret: 24 | secretName: azure-config-file 25 | items: 26 | - key: externaldns-config.json 27 | path: azure.json 28 | -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/service-discovery/external-dns/external-dns-data-values-google-with-contour.yaml.example: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # The namespace in which to deploy ExternalDNS. 4 | namespace: tanzu-system-service-discovery 5 | 6 | deployment: 7 | args: 8 | - --metrics-address=:7979 #! ensures external dns exposes metrics 9 | - --source=service 10 | - --source=ingress 11 | - --source=contour-httpproxy #! configure external-dns to read Contour HTTPProxy resources 12 | - --domain-filter=my-zone.example.org #! zone where services are deployed 13 | - --provider=google 14 | - --policy=upsert-only #! would prevent ExternalDNS from deleting any records, omit to enable full synchronization 15 | - --google-project=REPlACE_ME_WITH_GCP_PROJECT_ID 16 | - --registry=txt 17 | env: 18 | - name: GOOGLE_APPLICATION_CREDENTIALS 19 | value: /etc/secrets/service-account/credentials.json 20 | volumeMounts: 21 | - name: google-service-account 22 | mountPath: /etc/secrets/service-account/ 23 | volumes: 24 | - name: google-service-account 25 | secret: 26 | secretName: gcloud-dns-credentials 27 | items: 28 | - key: credentials.json 29 | path: credentials.json 30 | -------------------------------------------------------------------------------- /tkg-extensions-mods-examples/service-discovery/external-dns/metrics-overlay.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: metrics-overlay 5 | stringData: 6 | metrics-overlay.yaml: | 7 | #@ load("@ytt:overlay", "overlay") 8 | #@overlay/match by=overlay.subset({"metadata":{"name":"external-dns"}, "kind": "Deployment"}) 9 | --- 10 | spec: 11 | template: 12 | #@overlay/match missing_ok=True 13 | metadata: 14 | #@overlay/match missing_ok=True 15 | annotations: 16 | prometheus.io/scrape: "true" 17 | prometheus.io/path: "/metrics" 18 | prometheus.io/port: "7979" 19 | -------------------------------------------------------------------------------- /tmc/config/namespace/tkg-wlc-acme-fitness.yaml: -------------------------------------------------------------------------------- 1 | fullName: 2 | clusterName: se-dpfeffer-wlc-1 3 | managementClusterName: attached 4 | name: acme-fitness 5 | provisionerName: attached 6 | meta: 7 | labels: 8 | origin: dpfeffer 9 | spec: 10 | workspaceName: dpfeffer-acme-fitness-dev 11 | -------------------------------------------------------------------------------- /tmc/config/workspace/acme-fitness-dev.yaml: -------------------------------------------------------------------------------- 1 | fullName: 2 | name: dpfeffer-acme-fitness-dev 3 | meta: 4 | labels: 5 | origin: dpfeffer 6 | --------------------------------------------------------------------------------