├── .envrc ├── .gitignore ├── README.md ├── acme_fitnessv2 ├── cluster1.yml ├── cluster2.yml ├── install_cluster1.sh ├── install_cluster1_istio.sh ├── install_cluster2.sh ├── istio.yml ├── secrets.yml └── traffic │ ├── Dockerfile │ ├── build_docker.sh │ ├── launch_docker.sh │ ├── locustfile.py │ ├── requirements.txt │ └── start.sh ├── argo-rollouts ├── install.sh └── namespace.yml ├── avi └── readme.md ├── cert-manager ├── clusterissuer.yml └── install.sh ├── concourse ├── Helper │ ├── .gitignore │ ├── Dockerfile │ └── build-concourse-helper.sh ├── install │ ├── certificate.yml │ ├── helm.yml │ ├── ingress.yml │ ├── install.sh │ ├── values.yml │ └── virtualservice.yml ├── pipeline │ ├── charts-syncer.yml │ ├── demo-app.yml │ ├── fly.sh │ └── tbs-update.yml ├── show-pipeline-variables.sh └── tasks │ ├── build-image │ ├── build-image.sh │ └── build-image.yml │ ├── charts-syncer │ ├── charts-syncer.sh │ └── charts-syncer.yml │ ├── create-wavefront-event │ ├── create-wavefront-event.sh │ └── create-wavefront-event.yml │ ├── deploy-image │ ├── deploy-image.sh │ └── deploy-image.yml │ └── deploy-tbs-dependencies │ ├── deploy-tbs-dependencies.sh │ └── deploy-tbs-dependencies.yml ├── contour ├── install-helm.sh └── install.sh ├── demo-app ├── argorollouts │ └── demo-app.yml ├── deploy │ ├── deployment.yml │ ├── install.sh │ ├── services.yml │ ├── values.yaml │ └── virtualservice.yml ├── gemfire │ ├── README.md │ ├── cluster.yml │ ├── install.sh │ └── service-redis.yml ├── image │ ├── image.yml │ ├── install.sh │ └── values.yaml ├── mysql │ ├── helm.yml │ ├── install.sh │ └── readme ├── postgres │ ├── demo-app.yml │ ├── get-password.sh │ └── install.sh └── traffic.sh ├── demo-magic.sh ├── demo-tbs-only-quiet.sh ├── demo.sh ├── fluent-bit └── grafana-loki.sh ├── harbor ├── certificate.yml ├── helm-goharbor.yml ├── helm.yml ├── install.sh ├── namespace.yml ├── readme.md └── values.yml ├── healthwatch ├── install.sh └── role.yml ├── kubeapps ├── cluster-role-binding.yml ├── configure-kubeapps.sh ├── helm.yml ├── install.sh ├── service-account.yml ├── values.yml └── virtualservice.yml ├── metallb ├── apps.yml ├── databases.yml ├── install.sh ├── namespace.yml └── testing.yml ├── minio ├── helm.yml ├── install.sh ├── values.yml └── virtualservice.yml ├── prometheus ├── clusterrole.yml ├── clusterrolebinding.yml ├── config.yml ├── deployment.yml ├── install.sh ├── namespace.yml ├── readme.md ├── sa.yml ├── values.yml └── virtualservice.yml ├── spring-petclinic └── image │ ├── harbor-docker-creds.yml │ ├── image.yml │ ├── install.sh │ ├── tbs-service-account.yml │ └── values.yaml ├── storage ├── README.md ├── csi-vsphere-template.conf ├── install.sh ├── storageclass.yml ├── vsphere-csi-controller-deployment.yaml ├── vsphere-csi-controller-rbac.yaml └── vsphere-csi-node-ds.yaml ├── tac ├── sync-proxy-cache.sh ├── sync-replication-rules.sh ├── tac-replication-rules.yaml └── tac.yaml ├── tbs ├── README.md ├── install-tbs-dependencies.sh ├── install.sh ├── java-buildpack │ ├── configmap.yml │ ├── image.yml │ └── secret.yml ├── notary.sh ├── tbs-update-stack-100.0.30.sh └── tbs-update-stack-100.0.66.sh ├── tds ├── gemfire │ ├── install.sh │ └── relocate.sh ├── mysql │ └── relocate.sh ├── postgres │ ├── README │ └── install.sh └── rabbitmq │ ├── README.md │ ├── cluster.yml │ ├── install-cluster.sh │ ├── install.sh │ └── values.yml ├── tkgi ├── create_user.sh ├── pull-configs.sh ├── push-configs.sh ├── readme.md └── service-topology │ └── profile.json ├── tkgs ├── add-user.sh ├── app-developer.yml ├── create-cluster.sh ├── psp-disable.yml ├── register-management-cluster.yml.template ├── register.sh └── varlib-patchfile.yaml ├── tmc ├── policydemo │ ├── mynodeport.yml │ ├── privileged.yml │ └── quota.yml ├── roles │ ├── apps-user.yaml │ ├── databases-user.yaml │ └── tbs-user.yaml └── tmc-attach-cluster.sh ├── tobs ├── README.md ├── collector │ ├── cm.yaml │ ├── ds.yaml │ ├── role.yaml │ ├── rolebinding.yaml │ └── sa.yaml ├── install.sh ├── namespace.yml ├── prometheus │ ├── adapter.yaml │ ├── clusterrole.yml │ ├── clusterrolebinding.yml │ ├── config.yml │ ├── deployment.yml │ ├── sa.yml │ ├── values.yml │ └── virtualservice.yml └── proxy │ ├── proxy.yaml │ └── values.yaml ├── tsm ├── certificate.yml ├── gateway.yml └── install.sh ├── velero ├── README.md ├── create-backup.sh ├── create-restore.sh ├── credentials-velero ├── demo.sh ├── install-csi.sh └── install-restic.sh └── vsphere └── lab-off.sh /.envrc: -------------------------------------------------------------------------------- 1 | echo "Loading lots of things from Vault" 2 | export APPS_DOMAIN="$(vault kv get --field apps secrets/domain)" 3 | export HARBOR_DOMAIN="$(vault kv get --field harbor secrets/domain)" 4 | export NOTARY_DOMAIN="$(vault kv get --field notary secrets/domain)" 5 | export PRIMARY_DOMAIN="$(vault kv get --field primary secrets/domain)" 6 | 7 | export PIVNET_LOGIN="$(vault kv get --field=user secrets/pivnet)" 8 | export PIVNET_PASSWORD="$(vault kv get --field=password secrets/pivnet)" 9 | export PIVNET_API_TOKEN="$(vault kv get --field=token secrets/pivnet)" 10 | 11 | export WAVEFRONT_API_TOKEN="$(vault kv get --field token secrets/wavefront)" 12 | export WAVEFRONT_URL="$(vault kv get --field url secrets/wavefront)" 13 | 14 | export TKGI_HOSTNAME="$(vault kv get --field host secrets/tkgi)" 15 | export TKGI_USER="$(vault kv get --field user secrets/tkgi)" 16 | export TKGI_PASSWORD="$(vault kv get --field password secrets/tkgi)" 17 | export TKGI_ADMIN_CLIENT_SECRET="$(vault kv get --field admin_client_secret secrets/tkgi)" 18 | 19 | export TMC_CLUSTER_GROUP_NAME="$(vault kv get --field cluster_group_name secrets/tmc)" 20 | 21 | export PROXY_HOST="$(vault kv get --field host secrets/proxy)" 22 | export PROXY_PORT="$(vault kv get --field port secrets/proxy)" 23 | 24 | export VCENTER_USER="$(vault kv get --field user secrets/vsphere)" 25 | export VCENTER_PASSWORD="$(vault kv get --field password secrets/vsphere)" 26 | export VCENTER_DATACENTER="$(vault kv get --field datacenter secrets/vsphere)" 27 | export VCENTER_IP="$(vault kv get --field ip secrets/vsphere)" 28 | 29 | export HARBOR_USERNAME="$(vault kv get --field=user secrets/harbor)" 30 | export HARBOR_PASSWORD="$(vault kv get --field=password secrets/harbor)" 31 | 32 | export GOVC_INSECURE="true" 33 | export GOVC_PASSWORD="$(vault kv get --field=password secrets/esxi)" 34 | export GOVC_USERNAME="$(vault kv get --field=user secrets/esxi)" 35 | 36 | export OM_TARGET="$(vault kv get --field=target secrets/opsman)" 37 | export OM_USERNAME="$(vault kv get --field=username secrets/opsman)" 38 | export OM_PASSWORD="$(vault kv get --field=password secrets/opsman)" 39 | 40 | 41 | # YTT homelab 42 | export YTT_HOMELAB_apps_domain="$APPS_DOMAIN" 43 | export YTT_HOMELAB_primary_domain="$PRIMARY_DOMAIN" 44 | export YTT_HOMELAB_harbor_domain="$HARBOR_DOMAIN" 45 | 46 | # YTT wavefront 47 | export YTT_WAVEFRONT_wavefront_token="$WAVEFRONT_API_TOKEN" 48 | export YTT_WAVEFRONT_wavefront_url="$WAVEFRONT_URL/api" 49 | 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | pipeline/vars.sh 2 | 3 | tbs/build-service-1.1.4.tar 4 | tbs/descriptor-100.0.87.yaml 5 | tbs/tbs-install/ 6 | 7 | 8 | tmc/k8s-attach-manifest.yaml 9 | 10 | storage/csi-vsphere.conf 11 | 12 | acme_fitness/acme_fitness_demo 13 | emojivoto/emojivoto 14 | 15 | 16 | tds/gemfire/gemfire-operator-*.tgz 17 | tds/gemfire/gemfire-operator 18 | 19 | tds/postgres/postgres-for-kubernetes* 20 | 21 | tds/rabbitmq/tanzu-rabbitMQ-for-kubernetes-*.tar 22 | tds/rabbitmq/operator-install 23 | 24 | tds/mysql/tanzu-mysql-deployment-templates-* 25 | tds/mysql/tanzu-mysql-deployment-templates-*.tgz 26 | 27 | tkgi/bosh.yml 28 | tkgi/tkgi.yml -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tanzu Demo 2 | The goal of this repo is to store ready-to-install manifests for Tanzu products as well as popular open-source Kubernetes tools. From here you can demo any functionality you need. 3 | 4 | * Building source-to-image with Tanzu Build Service 5 | * Using Helm in an airgapped scenario with Tanzu Application Catalog and Harbor 6 | * Harbor is an OCI compliant registry with tons of features 7 | * Self-service TLS certificates with cert-manager 8 | * CI+CD with Concourse 9 | * Ingress and mesh with Istio 10 | * Quickly explore Helm repos Kubeapps 11 | * Cluster metrics with Tanzu Observability 12 | * Backup and restore both etcd and PVs with Velero + Restic 13 | * Manage policy across all your clusters with Tanzu Mission Control 14 | * Turn a K8s cluster into a multi-tenant microservice host with Tanzu Application Service 15 | * Store objects, including Velero backups, in MinIO 16 | 17 | 18 | ### Preparation 19 | 1. Copy `.envrc.template` to `.envrc` and fill out all the values 20 | 1. Use `direnv` to load those values into your environment 21 | 22 | ### Pre-reqs 23 | * Ability to make DNS entries for a domain you own 24 | * `tkgi` to create and authenticate to K8s clusters 25 | * `direnv` to handle environment variables 26 | * `helm` to install Helm charts 27 | * `kapp` to install non-Helm software 28 | * `bash` to run all the install scripts 29 | * `kubectl` and `kubeseal` to create `SealedSecrets` 30 | * `mkcert` for all TLS certs (via cert-manager) 31 | 32 | ### Architecture Decisions 33 | * Two clusters. Everything but TAS runs in one cluster. TAS runs in the other cluster. 34 | * This repo is full of default usernames and passwords. It's meant to be easy to setup and use as a demo environment. It's not meant to be a production environment. 35 | * I use TKGI for my Kubernetes clusters. Most of this project is not dependent on TKGI but the Concourse tasks use the `tkgi` CLI to authenticate 36 | * If a piece of software has a Helm chart, I use the Helm chart 37 | * If a piece of software does not have a Helm chart then I use `ytt` to template and `kapp` to install 38 | * I use environment variables heavily 39 | * Demo environments don't need Lets Encrypt so this project uses `mkcert` which is much easier 40 | * The Concourse tasks are not generic or re-usable. This is to make them easier to read and understand. 41 | 42 | 43 | ## Component descriptions 44 | 45 | ### vSphere Storage 46 | Every cluster that has stateful workloads needs a `StorageClass` so that `PersistentVolumes` can be created automatically via `PersistentVolumeClaims`. 47 | 48 | ### Service Mesh 49 | Istio can be used both for Ingress as well as sidecar proxying. 50 | 51 | ### cert-manager 52 | [cert-manager](https://cert-manager.io/docs/) allows you to create certificates as Kubernetes resources. It supports a variety of backends. In this repo we are using `mkcert` as a CA and using cert-manager in CA mode. 53 | 54 | ### Harbor 55 | Harbor is an OCI image registry with lots of great security features. Harbor uses Trivy to scan your images for CVEs and can prevent images with CVEs from being downloaded. 56 | 57 | ### Tanzu Build Service 58 | Tanzu Build Service (TBS) uses Cloud Native Buildpacks to turn source code into OCI images. 59 | 60 | ### Concourse 61 | Concourse is a container workflow tool commonly used for "CI/CD". Container workflow tools are the "glue" to connect pieces of the software delivery chain together. In this repo Concourse is used to direct a git commit to TBS and then send the resulting image to the Deployment controller. 62 | 63 | ### Kubeapps 64 | Kubeapps is a GUI for Helm that makes it easy to explore Helm repos 65 | 66 | ### Wavefront 67 | The Concourse pipeline in this project creates a Wavefront Event after a new image is deployed. In order for this to work, you need to setup Wavefront. Follow these steps to get Wavefront ready: 68 | 1. Follow the [Spring Boot Wavefront tutorial](https://docs.wavefront.com/wavefront_springboot_tutorial.html) to get Spring-Petclinic integrated with Wavefront 69 | 1. Clone the default dashboard Wavefront creates for you 70 | 1. Edit the clone 71 | 1. Click "Settings" 72 | 1. Click "Advanced" 73 | 1. Add the following events query `events(name="tanzu-gitops-spring-petclinic-deploy")` 74 | 1. In your dashboard at the top right where it says "Show Events" change it to "From Dashboard Settings". This will cause your events query to be the source of events for all charts in your dashboard. 75 | 76 | 77 | ## Potentially helpful Prometheus queries 78 | I'll switch to Grafana eventually but I need to get a better grasp of the metrics 79 | * `rate(node_network_receive_bytes_total{device="eth0"}[1m])` 80 | * `rate(node_cpu_seconds_total{mode="user"}[1m])` 81 | * `node_memory_MemFree_bytes` 82 | * `node_memory_Active_bytes` 83 | 84 | ## TODO 85 | * Concourse Helm chart uses deprecated RBAC and uses Docker Hub. Use a newer Helm chart 86 | * Switch Rabbit image relocation to use `docker load` instead of `docker import` 87 | * Add no-op tasks like `unit tests` and `static code analysis` to better illustrate TBS' role 88 | * How to version control the core platform that gets installed 89 | * Get MinIO going so Concourse can trigger on a new artifact using the s3 resource. This can illustrate using TBS with pre-compiled artifacts -------------------------------------------------------------------------------- /acme_fitnessv2/cluster1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: cart-redis 6 | labels: 7 | app: cart-redis 8 | service: cart-redis 9 | spec: 10 | ports: 11 | - port: 6379 12 | name: tcp-redis-cart 13 | selector: 14 | app: cart-redis 15 | service: cart-redis 16 | --- 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | metadata: 20 | name: cart-redis 21 | labels: 22 | app: cart-redis 23 | service: cart-redis 24 | spec: 25 | selector: 26 | matchLabels: 27 | app: cart-redis # has to match .spec.template.metadata.labels 28 | service: cart-redis 29 | replicas: 1 30 | template: 31 | metadata: 32 | labels: 33 | app: cart-redis # has to match .spec.selector.matchLabels 34 | service: cart-redis 35 | spec: 36 | containers: 37 | - name: cart-redis 38 | image: redis:5.0.3-alpine 39 | command: 40 | - "redis-server" 41 | imagePullPolicy: Always 42 | resources: 43 | requests: 44 | cpu: "100m" 45 | memory: "100Mi" 46 | ports: 47 | - name: tcp-redis 48 | containerPort: 6379 49 | protocol: "TCP" 50 | env: 51 | - name: REDIS-HOST 52 | value: 'cart-redis' 53 | - name: REDIS_PASS 54 | valueFrom: 55 | secretKeyRef: 56 | name: redis-pass 57 | key: password 58 | volumeMounts: 59 | - mountPath: /var/lib/redis 60 | name: redis-data 61 | volumes: 62 | - name: redis-data 63 | emptyDir: {} 64 | --- 65 | apiVersion: v1 66 | kind: Service 67 | metadata: 68 | name: cart 69 | labels: 70 | app: cart 71 | service: cart 72 | spec: 73 | ports: 74 | - name: http-cart 75 | protocol: TCP 76 | port: 5000 77 | selector: 78 | app: cart 79 | service: cart 80 | --- 81 | apiVersion: apps/v1 82 | kind: Deployment 83 | metadata: 84 | name: cart 85 | labels: 86 | app: cart 87 | service: cart 88 | spec: 89 | selector: 90 | matchLabels: 91 | app: cart 92 | service: cart 93 | strategy: 94 | type: Recreate 95 | replicas: 1 96 | template: 97 | metadata: 98 | labels: 99 | app: cart 100 | service: cart 101 | spec: 102 | volumes: 103 | - name: cart-data 104 | emptyDir: {} 105 | containers: 106 | - image: gcr.io/vmwarecloudadvocacy/acmeshop-cart:1.0.0 107 | name: cart 108 | env: 109 | - name: REDIS_HOST 110 | value: 'cart-redis' 111 | - name: REDIS_PASS 112 | valueFrom: 113 | secretKeyRef: 114 | name: redis-pass 115 | key: password 116 | - name: REDIS_PORT 117 | value: '6379' 118 | - name: CART_PORT 119 | value: '5000' 120 | ports: 121 | - containerPort: 5000 122 | name: http-cart 123 | volumeMounts: 124 | - mountPath: "/data" 125 | name: "cart-data" 126 | resources: 127 | requests: 128 | memory: "64Mi" 129 | cpu: "100m" 130 | limits: 131 | memory: "256Mi" 132 | cpu: "500m" 133 | --- 134 | apiVersion: v1 135 | kind: Service 136 | metadata: 137 | name: shopping 138 | labels: 139 | app: shopping 140 | service: shopping 141 | spec: 142 | ports: 143 | - name: http-shopping 144 | protocol: TCP 145 | port: 3000 146 | selector: 147 | app: shopping 148 | service: shopping 149 | --- 150 | apiVersion: apps/v1 # for versions before 1.8.0 use apps/v1beta1 151 | kind: Deployment 152 | metadata: 153 | name: shopping 154 | labels: 155 | app: shopping 156 | service: shopping 157 | spec: 158 | selector: 159 | matchLabels: 160 | app: shopping 161 | service: shopping 162 | strategy: 163 | type: Recreate 164 | replicas: 1 165 | template: 166 | metadata: 167 | labels: 168 | app: shopping 169 | service: shopping 170 | spec: 171 | containers: 172 | - image: gcr.io/vmwarecloudadvocacy/acmeshop-front-end:rel1 173 | name: shopping 174 | env: 175 | - name: FRONTEND_PORT 176 | value: '3000' 177 | - name: USERS_HOST 178 | value: 'users' 179 | - name: CATALOG_HOST 180 | value: 'catalog.tsm.demo' 181 | - name: ORDER_HOST 182 | value: 'order' 183 | - name: CART_HOST 184 | value: 'cart' 185 | - name: USERS_PORT 186 | value: '8081' 187 | - name: CATALOG_PORT 188 | value: '8082' 189 | - name: CART_PORT 190 | value: '5000' 191 | - name: ORDER_PORT 192 | value: '6000' 193 | ports: 194 | - containerPort: 3000 195 | name: http-shopping 196 | --- 197 | apiVersion: v1 198 | kind: Service 199 | metadata: 200 | name: order-mongo 201 | labels: 202 | app: order-mongo 203 | service: order-mongo 204 | spec: 205 | ports: 206 | - port: 27017 207 | name: mongo-order 208 | protocol: TCP 209 | selector: 210 | app: order-mongo 211 | service: order-mongo 212 | --- 213 | apiVersion: apps/v1 214 | kind: Deployment 215 | metadata: 216 | name: order-mongo 217 | labels: 218 | app: order-mongo 219 | service: order-mongo 220 | spec: 221 | selector: 222 | matchLabels: 223 | app: order-mongo # has to match .spec.template.metadata.labels 224 | service: order-mongo 225 | replicas: 1 226 | template: 227 | metadata: 228 | labels: 229 | app: order-mongo # has to match .spec.selector.matchLabels 230 | service: order-mongo 231 | spec: 232 | containers: 233 | - name: order-mongo 234 | image: mongo:4 235 | resources: 236 | {} 237 | ports: 238 | - name: mongo-order 239 | containerPort: 27017 240 | protocol: "TCP" 241 | env: 242 | - name: MONGO_INITDB_ROOT_USERNAME 243 | value: 'mongoadmin' 244 | - name: MONGO_INITDB_ROOT_PASSWORD 245 | valueFrom: 246 | secretKeyRef: 247 | name: order-mongo-pass 248 | key: password 249 | volumeMounts: 250 | - mountPath: /data/db 251 | name: mongodata 252 | volumes: 253 | - name: mongodata 254 | emptyDir: {} 255 | --- 256 | apiVersion: v1 257 | kind: Service 258 | metadata: 259 | name: order 260 | labels: 261 | app: order 262 | service: order 263 | spec: 264 | ports: 265 | - name: http-order 266 | protocol: TCP 267 | port: 6000 268 | selector: 269 | app: order 270 | service: order 271 | --- 272 | apiVersion: apps/v1 # for versions before 1.8.0 use apps/v1beta1 273 | kind: Deployment 274 | metadata: 275 | name: order 276 | labels: 277 | app: order 278 | service: order 279 | spec: 280 | selector: 281 | matchLabels: 282 | app: order 283 | service: order 284 | strategy: 285 | type: Recreate 286 | replicas: 1 287 | template: 288 | metadata: 289 | labels: 290 | app: order 291 | service: order 292 | spec: 293 | volumes: 294 | - name: order-data 295 | emptyDir: {} 296 | containers: 297 | - image: gcr.io/vmwarecloudadvocacy/acmeshop-order:1.0.1 298 | name: order 299 | env: 300 | - name: ORDER_DB_HOST 301 | value: 'order-mongo' 302 | - name: ORDER_DB_PASSWORD 303 | valueFrom: 304 | secretKeyRef: 305 | name: order-mongo-pass 306 | key: password 307 | - name: ORDER_DB_PORT 308 | value: '27017' 309 | - name: ORDER_DB_USERNAME 310 | value: 'mongoadmin' 311 | - name: ORDER_PORT 312 | value: '6000' 313 | - name: PAYMENT_PORT 314 | value: '9000' 315 | - name: PAYMENT_HOST 316 | value: 'payment' 317 | ports: 318 | - containerPort: 6000 319 | name: http-order 320 | volumeMounts: 321 | - mountPath: "/data" 322 | name: "order-data" 323 | resources: 324 | requests: 325 | memory: "64Mi" 326 | cpu: "100m" 327 | limits: 328 | memory: "256Mi" 329 | cpu: "500m" 330 | --- 331 | apiVersion: v1 332 | kind: Service 333 | metadata: 334 | name: payment 335 | labels: 336 | app: payment 337 | service: payment 338 | spec: 339 | ports: 340 | - name: http-payment 341 | protocol: TCP 342 | port: 9000 343 | selector: 344 | app: payment 345 | service: payment 346 | --- 347 | apiVersion: apps/v1 348 | kind: Deployment 349 | metadata: 350 | name: payment 351 | labels: 352 | app: payment 353 | service: payment 354 | spec: 355 | selector: 356 | matchLabels: 357 | app: payment 358 | service: payment 359 | strategy: 360 | type: Recreate 361 | replicas: 1 362 | template: 363 | metadata: 364 | labels: 365 | app: payment 366 | service: payment 367 | spec: 368 | containers: 369 | - image: gcr.io/vmwarecloudadvocacy/acmeshop-payment:1.0.0 370 | name: payment 371 | env: 372 | - name: PAYMENT_PORT 373 | value: '9000' 374 | ports: 375 | - containerPort: 9000 376 | name: http-payment 377 | --- 378 | apiVersion: v1 379 | kind: ConfigMap 380 | metadata: 381 | name: users-initdb-config 382 | data: 383 | seed.js: | 384 | db.users.insertMany([ 385 | {"firstname":"Walter","lastname":"White","email":"walter@acmefitness.com","username":"walter","password":"6837ea9b06409112a824d113927ad74fabc5c76e","salt":""} 386 | ,{"firstname":"Dwight","lastname":"Schrute","email":"dwight@acmefitness.com","username":"dwight","password":"6837ea9b06409112a824d113927ad74fabc5c76e","salt":""} 387 | ,{"firstname":"Eric","lastname":"Cartman","email":"eric@acmefitness.com","username":"eric","password":"6837ea9b06409112a824d113927ad74fabc5c76e","salt":""} 388 | ,{"firstname":"Han","lastname":"Solo","email":"han@acmefitness.com","username":"han","password":"6837ea9b06409112a824d113927ad74fabc5c76e","salt":""} 389 | ,{"firstname":"Phoebe","lastname":"Buffay","email":"phoebe@acmefitness.com","username":"phoebe","password":"6837ea9b06409112a824d113927ad74fabc5c76e","salt":""} 390 | ,{"firstname":"Elaine","lastname":"Benes","email":"elaine@acmefitness.com","username":"elaine","password":"6837ea9b06409112a824d113927ad74fabc5c76e","salt":""} 391 | ]); 392 | --- 393 | apiVersion: v1 394 | kind: Service 395 | metadata: 396 | name: users-mongo 397 | labels: 398 | app: users-mongo 399 | service: users-mongo 400 | spec: 401 | ports: 402 | - port: 27017 403 | name: mongo-users 404 | protocol: TCP 405 | selector: 406 | app: users-mongo 407 | service: users-mongo 408 | --- 409 | apiVersion: apps/v1 410 | kind: Deployment 411 | metadata: 412 | name: users-mongo 413 | labels: 414 | app: users-mongo 415 | service: users-db 416 | spec: 417 | selector: 418 | matchLabels: 419 | app: users-mongo # has to match .spec.template.metadata.labels 420 | service: users-mongo 421 | replicas: 1 422 | template: 423 | metadata: 424 | labels: 425 | app: users-mongo # has to match .spec.selector.matchLabels 426 | service: users-mongo 427 | spec: 428 | containers: 429 | - name: users-mongo 430 | image: mongo:4 431 | resources: 432 | {} 433 | ports: 434 | - name: mongo-users 435 | containerPort: 27017 436 | protocol: "TCP" 437 | env: 438 | - name: MONGO_INITDB_ROOT_USERNAME 439 | value: 'mongoadmin' 440 | - name: MONGO_INITDB_DATABASE 441 | value: 'acmefit' 442 | - name: MONGO_INITDB_ROOT_PASSWORD 443 | valueFrom: 444 | secretKeyRef: 445 | name: users-mongo-pass 446 | key: password 447 | volumeMounts: 448 | - mountPath: /data/db 449 | name: mongodata 450 | - mountPath: /docker-entrypoint-initdb.d 451 | name: mongo-initdb 452 | volumes: 453 | - name: mongodata 454 | emptyDir: {} 455 | - name: mongo-initdb 456 | configMap: 457 | name: users-initdb-config 458 | --- 459 | apiVersion: v1 460 | kind: Service 461 | metadata: 462 | name: users 463 | labels: 464 | app: users 465 | service: users 466 | spec: 467 | ports: 468 | - name: http-users 469 | protocol: TCP 470 | port: 8081 471 | selector: 472 | app: users 473 | service: users 474 | --- 475 | apiVersion: apps/v1 476 | kind: Deployment 477 | metadata: 478 | name: users 479 | labels: 480 | app: users 481 | service: users 482 | spec: 483 | selector: 484 | matchLabels: 485 | app: users 486 | service: users 487 | strategy: 488 | type: Recreate 489 | replicas: 1 490 | template: 491 | metadata: 492 | labels: 493 | app: users 494 | service: users 495 | spec: 496 | volumes: 497 | - name: users-data 498 | emptyDir: {} 499 | containers: 500 | - image: gcr.io/vmwarecloudadvocacy/acmeshop-user:1.0.0 501 | name: users 502 | env: 503 | - name: USERS_DB_HOST 504 | value: 'users-mongo' 505 | - name: USERS_DB_PASSWORD 506 | valueFrom: 507 | secretKeyRef: 508 | name: users-mongo-pass 509 | key: password 510 | - name: USERS_DB_PORT 511 | value: '27017' 512 | - name: USERS_DB_USERNAME 513 | value: 'mongoadmin' 514 | - name: USERS_PORT 515 | value: '8081' 516 | ports: 517 | - containerPort: 8081 518 | name: http-users 519 | volumeMounts: 520 | - mountPath: "/data" 521 | name: "users-data" 522 | resources: 523 | requests: 524 | memory: "64Mi" 525 | cpu: "100m" 526 | limits: 527 | memory: "256Mi" 528 | cpu: "500m" 529 | --- -------------------------------------------------------------------------------- /acme_fitnessv2/cluster2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: catalog-initdb-config 6 | data: 7 | seed.js: | 8 | db.catalog.insertMany([ 9 | {"name":"Yoga Mat","shortdescription":"Magic Yoga Mat!","description":"Our Yoga Mat is magic. You will twist into a human pretzel with the greatest of ease. Never done Yoga before? This mat will turn you into an instant professional with barely any work. It’s the American way!. Namaste!","imageurl1":"/static/images/yogamat_square.jpg","imageurl2":"/static/images/yogamat_thumb2.jpg","imageurl3":"/static/images/yogamat_thumb3.jpg","price":62.5,"tags":["mat"]} 10 | ,{"name":"Water Bottle","shortdescription":"The last Water Bottle you'll ever buy!","description":"Our Water Bottle only has to be filled once! That's right. ONCE. Unlimited water, for the rest of your life. Doesn't that $34.99 seem a lot more reasonable now? Stop buying all those other water bottles that you have to keep refilling like a sucker. Get the ACME bottle today!","imageurl1":"/static/images/bottle_square.jpg","imageurl2":"/static/images/bottle_thumb2.jpg","imageurl3":"/static/images/bottle_thumb3.jpg","price":34.9900016784668,"tags":["bottle"]} 11 | ,{"name":"Fit Bike","shortdescription":"Get Light on our Fit Bike!", "description":"Ride like the wind on your very own ACME Fit Bike. Have you ever wanted to travel as fast as a MotoGP racer on a bicycle with tiny tires?! Me too! Get the Fit Bike, and you'll vroom your way into fitness in 30 seconds flat!","imageurl1":"/static/images/bicycle_square.jpg","imageurl2":"/static/images/bicycle_thumb2.jpg","imageurl3":"/static/images/bicycle_thumb3.jpg", "price":499.99,"tags":["bicycle"]} 12 | ,{"name":"Basket Ball","shortdescription":"World's Roundest Basketball!","description":"That's right. You heard me correctly. The worlds ROUNDEST basketball. Are you tired of your current basketball simply not being round enough. Then it's time to step up to the ACME Basketball. Get your round on!","imageurl1":"/static/images/basketball_square.jpg","imageurl2":"/static/images/basketball_thumb2.jpg","imageurl3":"/static/images/basketball_thumb3.jpg","price":110.75,"tags":["basketball"]} 13 | ,{"name":"Smart Watch","shortdescription":"The watch that makes you smarter!","description":"Do you have trouble remembering things? Can you not remember what day it is? Do you need a robot with a cute women's voice to tell you when to stand up and walk around? Then boy do we have the watch for you! Get the ACME Smart Watch, and never have to remember anything ever again!","imageurl1":"/static/images/smartwatch_square.jpg","imageurl2":"/static/images/smartwatch_thumb2.jpg","imageurl3":"/static/images/smartwatch_thumb3.jpg","price":399.5899963378906,"tags":["watch"]} 14 | ,{"name":"Red Pants","shortdescription":"Because who doesn't need red pants??", "description":"Have you found yourself walking around tech conferences in the same old jeans and vendor t-shirt? Do you need to up your pants game? ACME Red Pants are 100% GUARANTEED to take you to a whole new level. Women will want to meet you. Men will want to be you. You are... Fancy Pants. What are you waiting for??","imageurl1":"/static/images/redpants_square.jpg","imageurl2":"/static/images/redpants_thumb2.jpg","imageurl3":"/static/images/redpants_thumb3.jpg", "price":99.0,"tags":["clothing"]} 15 | ,{"name":"Running shoes","shortdescription":"Mama says they was magic shoes!", "description":"And she was right! Are you slow? Out of shape? But still ready to take on Usain Bolt in the 100? Then strap up your ACME Running Shoes and Run Forest, Run! These shoes will make you run the 100 in 2.5 flat!","imageurl1":"/static/images/shoes_square.jpg","imageurl2":"/static/images/shoes_thumb2.jpg","imageurl3":"/static/images/shoes_thumb3.jpg", "price":120.00,"tags":["running"]} 16 | ,{"name":"Weights","shortdescription":"Get ripped without breaking a sweat!","description":"Are you ready to get Pumped Up with Hanz and Franz? Or get swole like Arnold? It's time to hit the Add to Cart button on the ACME Weights. Just 45 seconds a day, 3 days a week, and you'll be showing those Muscle Beach clowns how it's done in no time!","imageurl1":"/static/images/weights_square.jpg","imageurl2":"/static/images/weights_thumb2.jpg","imageurl3":"/static/images/weights_thumb3.jpg", "price":49.99,"tags":["weight"]} ]); 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: catalog-mongo 22 | labels: 23 | app: catalog-mongo 24 | service: catalog-mongo 25 | spec: 26 | ports: 27 | - port: 27017 28 | name: mongo-catalog 29 | protocol: TCP 30 | selector: 31 | app: catalog-mongo 32 | service: catalog-mongo 33 | --- 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: catalog-mongo 38 | labels: 39 | app: catalog-mongo 40 | service: catalog-mongo 41 | spec: 42 | selector: 43 | matchLabels: 44 | app: catalog-mongo # has to match .spec.template.metadata.labels 45 | service: catalog-mongo 46 | replicas: 1 47 | template: 48 | metadata: 49 | labels: 50 | app: catalog-mongo # has to match .spec.selector.matchLabels 51 | service: catalog-mongo 52 | spec: 53 | containers: 54 | - name: catalog-mongo 55 | image: mongo:4 56 | resources: 57 | {} 58 | ports: 59 | - name: mongo-catalog 60 | containerPort: 27017 61 | protocol: "TCP" 62 | env: 63 | - name: MONGO_INITDB_ROOT_USERNAME 64 | value: 'mongoadmin' 65 | - name: MONGO_INITDB_DATABASE 66 | value: 'acmefit' 67 | - name: MONGO_INITDB_ROOT_PASSWORD 68 | valueFrom: 69 | secretKeyRef: 70 | name: catalog-mongo-pass 71 | key: password 72 | volumeMounts: 73 | - mountPath: /data/db 74 | name: mongodata 75 | - mountPath: /docker-entrypoint-initdb.d 76 | name: mongo-initdb 77 | volumes: 78 | - name: mongodata 79 | emptyDir: {} 80 | - name: mongo-initdb 81 | configMap: 82 | name: catalog-initdb-config 83 | --- 84 | apiVersion: v1 85 | kind: Service 86 | metadata: 87 | name: catalog 88 | labels: 89 | app: catalog 90 | service: catalog 91 | spec: 92 | ports: 93 | - name: http-catalog 94 | protocol: TCP 95 | port: 8082 96 | selector: 97 | app: catalog 98 | service: catalog 99 | --- 100 | apiVersion: apps/v1 101 | kind: Deployment 102 | metadata: 103 | name: catalog 104 | labels: 105 | app: catalog 106 | service: catalog 107 | version: v1 108 | spec: 109 | selector: 110 | matchLabels: 111 | app: catalog 112 | service: catalog 113 | version: v1 114 | strategy: 115 | type: Recreate 116 | replicas: 1 117 | template: 118 | metadata: 119 | labels: 120 | app: catalog 121 | service: catalog 122 | version: v1 123 | spec: 124 | volumes: 125 | - name: catalog-data 126 | emptyDir: {} 127 | containers: 128 | - image: gcr.io/vmwarecloudadvocacy/acmeshop-catalog:rel1 129 | name: catalog 130 | env: 131 | - name: CATALOG_DB_HOST 132 | value: 'catalog-mongo' 133 | - name: CATALOG_DB_PASSWORD 134 | valueFrom: 135 | secretKeyRef: 136 | name: catalog-mongo-pass 137 | key: password 138 | - name: CATALOG_DB_PORT 139 | value: '27017' 140 | - name: CATALOG_DB_USERNAME 141 | value: 'mongoadmin' 142 | - name: CATALOG_PORT 143 | value: '8082' 144 | ports: 145 | - containerPort: 8082 146 | name: http-catalog 147 | volumeMounts: 148 | - mountPath: "/data" 149 | name: "catalog-data" 150 | resources: 151 | requests: 152 | memory: "64Mi" 153 | cpu: "100m" 154 | limits: 155 | memory: "256Mi" 156 | cpu: "500m" -------------------------------------------------------------------------------- /acme_fitnessv2/install_cluster1.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy \ 6 | --into-ns acme-fitness \ 7 | -a acme-fitness-cluster1 \ 8 | -f cluster1.yml \ 9 | -f secrets.yml -------------------------------------------------------------------------------- /acme_fitnessv2/install_cluster1_istio.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy \ 6 | -a acme-fitness-cluster1-istio \ 7 | -f istio.yml -------------------------------------------------------------------------------- /acme_fitnessv2/install_cluster2.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy \ 6 | --into-ns acme-fitness \ 7 | -a acme-fitness-cluster2 \ 8 | -f cluster2.yml \ 9 | -f secrets.yml -------------------------------------------------------------------------------- /acme_fitnessv2/istio.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: Gateway 3 | metadata: 4 | name: acme-gateway 5 | namespace: acme-fitness 6 | spec: 7 | selector: 8 | istio: ingressgateway # use istio default controller 9 | servers: 10 | - port: 11 | number: 443 12 | name: https 13 | protocol: HTTPS 14 | tls: 15 | mode: SIMPLE 16 | credentialName: acme-fitness-tls 17 | hosts: 18 | - "acme-fitness.lab.home" 19 | --- 20 | apiVersion: networking.istio.io/v1alpha3 21 | kind: VirtualService 22 | metadata: 23 | name: acme 24 | namespace: acme-fitness 25 | spec: 26 | hosts: 27 | - "acme-fitness.lab.home" 28 | gateways: 29 | - acme-gateway 30 | http: 31 | - match: 32 | - uri: 33 | prefix: / 34 | route: 35 | - destination: 36 | host: shopping 37 | port: 38 | number: 3000 39 | --- 40 | apiVersion: cert-manager.io/v1alpha2 41 | kind: Certificate 42 | metadata: 43 | name: acme-fitness-tls 44 | namespace: istio-system 45 | spec: 46 | dnsNames: 47 | - "acme-fitness.lab.home" 48 | issuerRef: 49 | kind: ClusterIssuer 50 | name: mkcert 51 | secretName: acme-fitness-tls -------------------------------------------------------------------------------- /acme_fitnessv2/secrets.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: redis-pass 6 | type: Opaque 7 | data: 8 | password: cGFzc3dvcmQ= 9 | --- 10 | apiVersion: v1 11 | kind: Secret 12 | metadata: 13 | name: catalog-mongo-pass 14 | type: Opaque 15 | data: 16 | password: cGFzc3dvcmQ= 17 | --- 18 | apiVersion: v1 19 | kind: Secret 20 | metadata: 21 | name: order-mongo-pass 22 | type: Opaque 23 | data: 24 | password: cGFzc3dvcmQ= 25 | --- 26 | apiVersion: v1 27 | kind: Secret 28 | metadata: 29 | name: users-mongo-pass 30 | type: Opaque 31 | data: 32 | password: cGFzc3dvcmQ= 33 | --- -------------------------------------------------------------------------------- /acme_fitnessv2/traffic/Dockerfile: -------------------------------------------------------------------------------- 1 | from python:2.7.18 2 | 3 | copy locustfile.py /root/ 4 | copy requirements.txt /root/ 5 | copy start.sh /root/ 6 | 7 | run pip install -r /root/requirements.txt 8 | 9 | workdir /root/ 10 | 11 | -------------------------------------------------------------------------------- /acme_fitnessv2/traffic/build_docker.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | docker build -t acme_fitness_traffic . -------------------------------------------------------------------------------- /acme_fitnessv2/traffic/launch_docker.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | docker run --rm -it -p 8089:8089 acme_fitness_traffic /root/start.sh -------------------------------------------------------------------------------- /acme_fitnessv2/traffic/locustfile.py: -------------------------------------------------------------------------------- 1 | # This program will generate traffic for ACME Fitness Shop App. It simulates both Authenticated and Guest user scenarios. You can run this program either from Command line or from 2 | # the web based UI. Refer to the "locust" documentation for further information. 3 | 4 | from locust import HttpLocust, TaskSet, task, TaskSequence, seq_task, Locust 5 | import random 6 | 7 | # List of users (pre-loaded into ACME Fitness shop) 8 | users = ["eric", "phoebe", "dwight", "han"] 9 | 10 | # List of products within the catalog 11 | products = [] 12 | 13 | import logging 14 | 15 | # GuestUserBrowsing simulates traffic for a Guest User (Not logged in) 16 | class GuestUserBrowsing(TaskSequence): 17 | 18 | def on_start(self): 19 | self.getProducts() 20 | 21 | def listCatalogItems(self): 22 | items = self.client.get("/products",verify=False).json()["data"] 23 | for item in items: 24 | products.append(item["id"]) 25 | return products 26 | 27 | @task(1) 28 | def getProducts(self): 29 | logging.info("Guest User - Get Products") 30 | self.client.get("/products",verify=False) 31 | 32 | @task(2) 33 | def getProduct(self): 34 | logging.info("Guest User - Get a product") 35 | products = self.listCatalogItems() 36 | id = random.choice(products) 37 | product = self.client.get("/products/"+ id,verify=False).json() 38 | logging.info("Product info - " + str(product)) 39 | products[:] = [] 40 | 41 | # AuthUserBrowsing simulates traffic for Authenticated Users (Logged in) 42 | class AuthUserBrowsing(TaskSequence): 43 | 44 | def on_start(self): 45 | self.login() 46 | 47 | @seq_task(1) 48 | @task(1) 49 | def login(self): 50 | user = random.choice(users) 51 | logging.info("Auth User - Login user " + user) 52 | body = self.client.post("/login/", json={"username": user, "password":"vmware1!"},verify=False).json() 53 | self.locust.userid = body["token"] 54 | 55 | @seq_task(2) 56 | @task(1) 57 | def getProducts(self): 58 | logging.info("Auth User - Get Catalog") 59 | self.client.get("/products",verify=False) 60 | 61 | @seq_task(3) 62 | @task(2) 63 | def getProduct(self): 64 | logging.info("Auth User - Get a product") 65 | products = self.listCatalogItems() 66 | id = random.choice(products) 67 | product = self.client.get("/products/"+ id,verify=False).json() 68 | logging.info("Product info - " + str(product)) 69 | products[:] = [] 70 | 71 | 72 | @seq_task(4) 73 | @task(2) 74 | def addToCart(self): 75 | self.listCatalogItems() 76 | productid = random.choice(products) 77 | logging.info("Add to Cart for user " + self.locust.userid) 78 | cart = self.client.post("/cart/item/add/" + self.locust.userid, json={ 79 | "name": productid, 80 | "price": "100", 81 | "shortDescription": "Test add to cart", 82 | "quantity": random.randint(1,2), 83 | "itemid": productid 84 | },verify=False) 85 | products[:] = [] 86 | 87 | 88 | @seq_task(5) 89 | @task(1) 90 | def checkout(self): 91 | userCart = self.client.get("/cart/items/" + self.locust.userid,verify=False).json() 92 | order = self.client.post("/order/add/"+ self.locust.userid, json={ "userid":"8888", 93 | "firstname":"Eric", 94 | "lastname": "Cartman", 95 | "address":{ 96 | "street":"20 Riding Lane Av", 97 | "city":"San Francisco", 98 | "zip":"10201", 99 | "state": "CA", 100 | "country":"USA"}, 101 | "email":"jblaze@marvel.com", 102 | "delivery":"UPS/FEDEX", 103 | "card":{ 104 | "type":"amex/visa/mastercard/bahubali", 105 | "number":"349834797981", 106 | "expMonth":"12", 107 | "expYear": "2022", 108 | "ccv":"123" 109 | }, 110 | "cart":[ 111 | {"id":"1234", "description":"redpants", "quantity":"1", "price":"4"}, 112 | {"id":"5678", "description":"bluepants", "quantity":"1", "price":"4"} 113 | ], 114 | "total":"100"},verify=False) 115 | 116 | 117 | def listCatalogItems(self): 118 | items = self.client.get("/products",verify=False).json()["data"] 119 | for item in items: 120 | products.append(item["id"]) 121 | return products 122 | 123 | @task(2) 124 | def index(self): 125 | self.client.get("/",verify=False) 126 | 127 | 128 | class UserBehavior(TaskSet): 129 | 130 | tasks = {AuthUserBrowsing:2, GuestUserBrowsing:1} 131 | 132 | 133 | class WebSiteUser(HttpLocust): 134 | 135 | task_set = UserBehavior 136 | userid = "" 137 | min_wait = 2000 138 | max_wait = 10000 139 | 140 | 141 | -------------------------------------------------------------------------------- /acme_fitnessv2/traffic/requirements.txt: -------------------------------------------------------------------------------- 1 | certifi==2019.3.9 2 | chardet==3.0.4 3 | Click==7.0 4 | Flask==1.0.2 5 | gevent==1.4.0 6 | greenlet==0.4.15 7 | idna==2.8 8 | itsdangerous==1.1.0 9 | Jinja2==2.10.1 10 | locustio==0.11.0 11 | MarkupSafe==1.1.1 12 | msgpack==0.6.1 13 | pyzmq==18.0.1 14 | requests==2.21.0 15 | six==1.12.0 16 | urllib3==1.24.2 17 | Werkzeug==0.15.2 -------------------------------------------------------------------------------- /acme_fitnessv2/traffic/start.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | locust --host=https://acme-fitness.lab.home 6 | 7 | -------------------------------------------------------------------------------- /argo-rollouts/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a argo-rollouts \ 6 | -f namespace.yml \ 7 | --into-ns=argo-rollouts \ 8 | -f https://raw.githubusercontent.com/argoproj/argo-rollouts/stable/manifests/install.yaml 9 | -------------------------------------------------------------------------------- /argo-rollouts/namespace.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: argo-rollouts -------------------------------------------------------------------------------- /avi/readme.md: -------------------------------------------------------------------------------- 1 | First step, get the Avi Controller located on my.vmware.com. It's called "NSX Advanced Load Balancer". The download link passes you through to the Avi website. Download the OVA 2 | 3 | On the download page it mentions a "default password". That is actually the 'sysadmin default login key' which the OVA install process asks you for. You can just leave it blank and it fills it in with that default password -------------------------------------------------------------------------------- /cert-manager/clusterissuer.yml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: mkcert 5 | spec: 6 | ca: 7 | secretName: mkcert -------------------------------------------------------------------------------- /cert-manager/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | helm upgrade --install cert-manager cert-manager \ 6 | --repo https://charts.jetstack.io \ 7 | --version 1.2.0 \ 8 | --namespace cert-manager \ 9 | --create-namespace \ 10 | --set installCRDs=true \ 11 | --wait 12 | 13 | 14 | kapp deploy -a cert-manager \ 15 | -f clusterissuer.yml \ 16 | -f <(kubectl create secret generic mkcert \ 17 | --from-file=tls.crt="$(mkcert -CAROOT)"/rootCA.pem \ 18 | --from-file=tls.key="$(mkcert -CAROOT)"/rootCA-key.pem \ 19 | --namespace cert-manager \ 20 | --dry-run=client \ 21 | -o yaml) -------------------------------------------------------------------------------- /concourse/Helper/.gitignore: -------------------------------------------------------------------------------- 1 | spring-petclinic/ 2 | kubectl 3 | tkgi 4 | kp 5 | kapp 6 | ytt 7 | pivnet 8 | charts-syncer 9 | helm 10 | rootCA.pem -------------------------------------------------------------------------------- /concourse/Helper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | RUN apt-get update \ 4 | && apt-get install -y --no-install-recommends docker.io git wget curl ca-certificates \ 5 | && rm -rf /var/lib/apt/lists/* 6 | 7 | 8 | COPY kp /usr/bin/ 9 | COPY kubectl /usr/bin/ 10 | COPY pivnet /usr/bin/ 11 | COPY tkgi /usr/bin/ 12 | COPY charts-syncer /usr/bin/ 13 | COPY helm /usr/bin 14 | COPY rootCA.pem /tmp/ 15 | 16 | 17 | 18 | RUN cat /tmp/rootCA.pem >> /etc/ssl/certs/ca-certificates.crt -------------------------------------------------------------------------------- /concourse/Helper/build-concourse-helper.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | cp "$(mkcert -CAROOT)/rootCA.pem" . 6 | docker build -t $HARBOR_DOMAIN/library/concourse-helper:1 . 7 | docker push $HARBOR_DOMAIN/library/concourse-helper:1 -------------------------------------------------------------------------------- /concourse/install/certificate.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: cert-manager.io/v1alpha2 4 | kind: Certificate 5 | metadata: 6 | name: concourse 7 | namespace: concourse 8 | spec: 9 | dnsNames: 10 | - #@ "concourse." + data.values.primary_domain 11 | issuerRef: 12 | kind: ClusterIssuer 13 | name: mkcert 14 | secretName: concourse-tls -------------------------------------------------------------------------------- /concourse/install/helm.yml: -------------------------------------------------------------------------------- 1 | concourse: 2 | web: 3 | tls: 4 | enabled: false 5 | kubernetes: 6 | keepNamespaces: false 7 | web: 8 | service: 9 | api: 10 | type: ClusterIP 11 | ingress: 12 | enabled: false -------------------------------------------------------------------------------- /concourse/install/ingress.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | 4 | apiVersion: networking.k8s.io/v1 5 | kind: Ingress 6 | metadata: 7 | name: concourse 8 | namespace: concourse 9 | spec: 10 | tls: 11 | - hosts: 12 | - #@ "concourse." + data.values.primary_domain 13 | secretName: concourse-tls 14 | rules: 15 | - host: #@ "concourse." + data.values.primary_domain 16 | http: 17 | paths: 18 | - path: / 19 | pathType: Prefix 20 | backend: 21 | service: 22 | name: concourse-web 23 | port: 24 | number: 8080 -------------------------------------------------------------------------------- /concourse/install/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | helm upgrade --install concourse concourse \ 6 | --repo https://concourse-charts.storage.googleapis.com \ 7 | --create-namespace \ 8 | --namespace concourse \ 9 | --version 14.6.0 \ 10 | --values helm.yml \ 11 | --set concourse.web.externalUrl="https://concourse.databases.$PRIMARY_DOMAIN" \ 12 | --wait 13 | 14 | 15 | kapp deploy \ 16 | -a concourse \ 17 | -f <(kubectl create secret generic tanzu-gitops \ 18 | --namespace concourse-main \ 19 | --from-literal=tkgi_url="https://${TKGI_HOSTNAME}:9021" \ 20 | --from-literal=tkgi_user="${TKGI_USER}" \ 21 | --from-literal=tkgi_password="${TKGI_PASSWORD}" \ 22 | --from-file=ca_cert="$(mkcert -CAROOT)/rootCA.pem" \ 23 | --from-literal=wavefront_api_token="${WAVEFRONT_API_TOKEN}" \ 24 | --from-literal=wavefront_url="${WAVEFRONT_URL}" \ 25 | --from-literal=pivnet_api_token="${PIVNET_API_TOKEN}" \ 26 | --from-literal=pivnet_username="${PIVNET_LOGIN}" \ 27 | --from-literal=pivnet_password="${PIVNET_PASSWORD}" \ 28 | --dry-run=client \ 29 | -o yaml) \ 30 | -f <(ytt --data-values-env=YTT_HOMELAB \ 31 | -f virtualservice.yml \ 32 | -f values.yml) 33 | -------------------------------------------------------------------------------- /concourse/install/values.yml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /concourse/install/virtualservice.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: networking.istio.io/v1alpha3 4 | kind: VirtualService 5 | metadata: 6 | name: concourse 7 | namespace: concourse 8 | spec: 9 | hosts: 10 | - concourse.databases.lab.home 11 | gateways: 12 | - app-gateway.istio-system.svc.cluster.local 13 | http: 14 | - match: 15 | - uri: 16 | prefix: / 17 | route: 18 | - destination: 19 | host: concourse-web 20 | port: 21 | number: 8080 -------------------------------------------------------------------------------- /concourse/pipeline/charts-syncer.yml: -------------------------------------------------------------------------------- 1 | resources: 2 | - name: my-timer 3 | type: time 4 | source: 5 | interval: 720m 6 | - name: concourse-helper 7 | type: docker-image 8 | source: 9 | repository: ((harbordomain))/library/concourse-helper 10 | tag: 1 11 | ca_certs: 12 | - domain: ((harbordomain)) 13 | cert: | 14 | ((tanzu-gitops.ca_cert)) 15 | - name: tanzu-gitops 16 | type: git 17 | source: 18 | uri: https://github.com/techgnosis/tanzu-gitops.git 19 | branch: master 20 | paths: 21 | - "concourse/tasks/**" 22 | 23 | jobs: 24 | - name: sync-helm-charts-to-harbor 25 | public: true 26 | serial: true 27 | plan: 28 | - get: my-timer 29 | trigger: true 30 | - get: concourse-helper 31 | - get: tanzu-gitops 32 | - task: sync-tac-to-harbor 33 | image: concourse-helper 34 | file: tanzu-gitops/concourse/tasks/charts-syncer/charts-syncer.yml 35 | 36 | -------------------------------------------------------------------------------- /concourse/pipeline/demo-app.yml: -------------------------------------------------------------------------------- 1 | resources: 2 | - name: demo-app 3 | type: git 4 | source: 5 | uri: https://github.com/techgnosis/demo-app.git 6 | branch: master 7 | - name: tanzu-gitops 8 | type: git 9 | source: 10 | uri: https://github.com/techgnosis/tanzu-gitops.git 11 | branch: master 12 | paths: 13 | - "concourse/tasks/**" 14 | - name: demo-app-image 15 | type: docker-image 16 | source: 17 | repository: ((harbordomain))/library/demo-app 18 | tag: latest 19 | ca_certs: 20 | - domain: ((harbordomain)) 21 | cert: | 22 | ((tanzu-gitops.ca_cert)) 23 | - name: concourse-helper 24 | type: docker-image 25 | source: 26 | repository: ((harbordomain))/library/concourse-helper 27 | tag: 1 28 | ca_certs: 29 | - domain: ((harbordomain)) 30 | cert: | 31 | ((tanzu-gitops.ca_cert)) 32 | 33 | 34 | 35 | 36 | jobs: 37 | - name: build-with-tanzu-build-service 38 | public: true 39 | serial: true 40 | plan: 41 | - get: demo-app 42 | trigger: true 43 | - get: concourse-helper 44 | - get: tanzu-gitops 45 | - task: handoff-to-tanzu-build-service 46 | image: concourse-helper 47 | file: tanzu-gitops/concourse/tasks/build-image/build-image.yml 48 | input_mapping: 49 | source-code: demo-app 50 | params: 51 | tkgicluster: databases 52 | tkgiapi: ((tanzu-gitops.tkgi_url)) 53 | tkgiuser: ((tanzu-gitops.tkgi_user)) 54 | tkgipassword: ((tanzu-gitops.tkgi_password)) 55 | image: demo-app 56 | 57 | 58 | 59 | - name: deploy-to-cluster 60 | public: true 61 | serial: true 62 | plan: 63 | - get: demo-app-image 64 | trigger: true 65 | - get: tanzu-gitops 66 | - get: concourse-helper 67 | - task: create-wavefront-event 68 | image: concourse-helper 69 | file: tanzu-gitops/concourse/tasks/create-wavefront-event/create-wavefront-event.yml 70 | params: 71 | WAVEFRONT_API_TOKEN: ((tanzu-gitops.wavefront_api_token)) 72 | WAVEFRONT_URL: ((tanzu-gitops.wavefront_url)) 73 | - task: handoff-to-kubernetes 74 | file: tanzu-gitops/concourse/tasks/deploy-image/deploy-image.yml 75 | image: concourse-helper 76 | input_mapping: 77 | image: demo-app-image 78 | params: 79 | tkgicluster: apps 80 | tkgiapi: ((tanzu-gitops.tkgi_url)) 81 | tkgiuser: ((tanzu-gitops.tkgi_user)) 82 | tkgipassword: ((tanzu-gitops.tkgi_password)) 83 | harbordomain: ((harbordomain)) 84 | namespace: demo-app 85 | deployment: demo-app 86 | container: demo-app 87 | image: demo-app 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /concourse/pipeline/fly.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | fly login \ 6 | --target=lab \ 7 | --concourse-url="https://concourse.databases.$PRIMARY_DOMAIN" \ 8 | --username=test \ 9 | --password=test 10 | 11 | fly set-pipeline -t lab \ 12 | -p demo-app \ 13 | -c demo-app.yml \ 14 | -v harbordomain=$HARBOR_DOMAIN 15 | 16 | fly set-pipeline -t lab \ 17 | -p tbs-update \ 18 | -c tbs-update.yml \ 19 | -v harbordomain=$HARBOR_DOMAIN 20 | 21 | fly set-pipeline -t lab \ 22 | -p tanzu-app-catalog-sync \ 23 | -c charts-syncer.yml \ 24 | -v harbordomain=$HARBOR_DOMAIN 25 | -------------------------------------------------------------------------------- /concourse/pipeline/tbs-update.yml: -------------------------------------------------------------------------------- 1 | resource_types: 2 | - name: pivnet 3 | type: docker-image 4 | source: 5 | repository: pivotalcf/pivnet-resource 6 | tag: v2.0.0 7 | 8 | resources: 9 | - name: tbs-dependencies 10 | type: pivnet 11 | source: 12 | api_token: ((tanzu-gitops.pivnet_api_token)) 13 | product_slug: tbs-dependencies 14 | - name: concourse-helper 15 | type: docker-image 16 | source: 17 | repository: ((harbordomain))/library/concourse-helper 18 | tag: 1 19 | ca_certs: 20 | - domain: ((harbordomain)) 21 | cert: | 22 | ((tanzu-gitops.ca_cert)) 23 | - name: tanzu-gitops 24 | type: git 25 | source: 26 | uri: https://github.com/techgnosis/tanzu-gitops.git 27 | branch: master 28 | paths: 29 | - "concourse/tasks/**" 30 | 31 | jobs: 32 | - name: deploy-tbs-dependencies 33 | public: true 34 | serial: true 35 | plan: 36 | - get: concourse-helper 37 | - get: tanzu-gitops 38 | - get: tbs-dependencies 39 | trigger: true 40 | - task: deploy-tbs-dependencies 41 | image: concourse-helper 42 | file: tanzu-gitops/concourse/tasks/deploy-tbs-dependencies/deploy-tbs-dependencies.yml 43 | params: 44 | tkgicluster: cluster1 45 | tkgiapi: ((tanzu-gitops.tkgi_url)) 46 | tkgiuser: ((tanzu-gitops.tkgi_user)) 47 | tkgipassword: ((tanzu-gitops.tkgi_password)) 48 | pivnetusername: ((tanzu-gitops.pivnet_username)) 49 | pivnetpassword: ((tanzu-gitops.pivnet_password)) 50 | harbordomain: ((harbordomain)) 51 | 52 | 53 | -------------------------------------------------------------------------------- /concourse/show-pipeline-variables.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # Show me everything that looks like (( anything )) 6 | rg '\(\(.+\)\)' -------------------------------------------------------------------------------- /concourse/tasks/build-image/build-image.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -xeuo pipefail 4 | 5 | #tkgi login -a ${tkgiapi} \ 6 | #-u ${tkgiuser} \ 7 | #-p ${tkgipassword} \ 8 | #-k 9 | 10 | #PKS_USER_PASSWORD=${tkgipassword} tkgi get-credentials ${tkgicluster} 11 | 12 | KUBECTL_VSPHERE_PASSWORD=Tkgs-admin1! \ 13 | kubectl vsphere login \ 14 | --server=10.0.3.2 \ 15 | --vsphere-username tkgs-admin@vsphere.local \ 16 | --tanzu-kubernetes-cluster-name jmusselwhite-shared-services \ 17 | --tanzu-kubernetes-cluster-namespace myfriendthenamespace \ 18 | --insecure-skip-tls-verify 19 | 20 | kubectl config use-context jmusselwhite-shared-services 21 | cd source-code 22 | REVISION=$(git rev-parse HEAD) 23 | kp -n demo-app image patch ${image} --git-revision ${REVISION} 24 | 25 | -------------------------------------------------------------------------------- /concourse/tasks/build-image/build-image.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | inputs: 4 | - name: tanzu-gitops 5 | - name: source-code 6 | 7 | params: 8 | tkgicluster: 9 | tkgiapi: 10 | tkgiuser: 11 | tkgipassword: 12 | image: 13 | 14 | run: 15 | path: "tanzu-gitops/concourse/tasks/build-image/build-image.sh" -------------------------------------------------------------------------------- /concourse/tasks/charts-syncer/charts-syncer.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -xeuo pipefail 4 | 5 | DATE="2021-01-01" 6 | 7 | charts-syncer sync --config tanzu-gitops/tac/tac.yaml --from-date $DATE -------------------------------------------------------------------------------- /concourse/tasks/charts-syncer/charts-syncer.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | 4 | inputs: 5 | - name: tanzu-gitops 6 | 7 | 8 | run: 9 | path: "tanzu-gitops/concourse/tasks/charts-syncer/charts-syncer.sh" -------------------------------------------------------------------------------- /concourse/tasks/create-wavefront-event/create-wavefront-event.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | START_TIME=$(date +%s000) 6 | sleep 1 7 | END_TIME=$(date +%s000) 8 | 9 | 10 | curl \ 11 | -X POST \ 12 | --header "Content-Type: application/json" \ 13 | --header "Accept: application/json" \ 14 | --header "Authorization: Bearer ${WAVEFRONT_API_TOKEN}" \ 15 | -d "{ 16 | \"name\": \"jmusselwhite-demoapp\", 17 | \"annotations\": { 18 | \"severity\": \"info\", 19 | \"type\": \"image deploy\", 20 | \"details\": \"new demoapp image deployed\" 21 | }, 22 | \"startTime\": "${START_TIME}", 23 | \"endTime\": "${END_TIME}" 24 | }" "${WAVEFRONT_URL}/api/v2/event" -------------------------------------------------------------------------------- /concourse/tasks/create-wavefront-event/create-wavefront-event.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | inputs: 4 | - name: tanzu-gitops 5 | 6 | 7 | params: 8 | WAVEFRONT_API_TOKEN: 9 | WAVEFRONT_URL: 10 | 11 | 12 | run: 13 | path: "tanzu-gitops/concourse/tasks/create-wavefront-event/create-wavefront-event.sh" -------------------------------------------------------------------------------- /concourse/tasks/deploy-image/deploy-image.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -xeuo pipefail 4 | 5 | #tkgi login -a ${tkgiapi} \ 6 | #-u ${tkgiuser} \ 7 | #-p ${tkgipassword} \ 8 | #-k 9 | 10 | #PKS_USER_PASSWORD=${tkgipassword} tkgi get-credentials ${tkgicluster} 11 | 12 | KUBECTL_VSPHERE_PASSWORD=Tkgs-admin1! \ 13 | kubectl vsphere login \ 14 | --server=10.0.3.2 \ 15 | --vsphere-username tkgs-admin@vsphere.local \ 16 | --tanzu-kubernetes-cluster-name jmusselwhite-frontend \ 17 | --tanzu-kubernetes-cluster-namespace myfriendthenamespace \ 18 | --insecure-skip-tls-verify 19 | 20 | kubectl config use-context jmusselwhite-frontend 21 | export DIGEST=$(cat image/digest) 22 | kubectl -n ${namespace} set image deployment/${deployment} ${container}=${harbordomain}/library/${image}@${DIGEST} -------------------------------------------------------------------------------- /concourse/tasks/deploy-image/deploy-image.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | 4 | inputs: 5 | - name: tanzu-gitops 6 | - name: image 7 | 8 | 9 | params: 10 | tkgicluster: 11 | tkgiapi: 12 | tkgiuser: 13 | tkgipassword: 14 | harbordomain: 15 | namespace: 16 | deployment: 17 | container: 18 | image: 19 | 20 | run: 21 | path: "tanzu-gitops/concourse/tasks/deploy-image/deploy-image.sh" -------------------------------------------------------------------------------- /concourse/tasks/deploy-tbs-dependencies/deploy-tbs-dependencies.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -xeuo pipefail 4 | 5 | tkgi login -a ${tkgiapi} \ 6 | -u ${tkgiuser} \ 7 | -p ${tkgipassword} \ 8 | -k 9 | 10 | tkgi get-credentials ${tkgicluster} 11 | 12 | docker login registry.pivotal.io -u ${pivnetusername} -p ${pivnetpassword} 13 | 14 | # Do not delete this echo statement. For some reason 15 | # if you do then the docker login afterward will try 16 | # to login to 'domain' instead of the value of harbordomain. 17 | # It's got to be something about how docker login processes 18 | # text...who knows. 19 | # I know you don't believe me - just try it 20 | echo "${harbordomain}" 21 | docker login "${harbordomain}" -u admin -p "Harbor12345" 22 | 23 | kp import -f ./tbs-dependencies/descriptor-*.yaml -------------------------------------------------------------------------------- /concourse/tasks/deploy-tbs-dependencies/deploy-tbs-dependencies.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | 4 | inputs: 5 | - name: tanzu-gitops 6 | - name: tbs-dependencies 7 | 8 | 9 | params: 10 | tkgicluster: 11 | tkgiapi: 12 | tkgiuser: 13 | tkgipassword: 14 | pivnetuser: 15 | pivnetpassword: 16 | harbordomain: 17 | 18 | run: 19 | path: "tanzu-gitops/concourse/tasks/deploy-tbs-dependencies/deploy-tbs-dependencies.sh" -------------------------------------------------------------------------------- /contour/install-helm.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | helm upgrade --install contour contour \ 6 | --repo https://charts.trials.tac.bitnami.com/demo \ 7 | --create-namespace \ 8 | --namespace contour \ 9 | --version 4.1.3 \ 10 | --wait -------------------------------------------------------------------------------- /contour/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a contour \ 6 | -f https://projectcontour.io/quickstart/contour.yaml -------------------------------------------------------------------------------- /demo-app/argorollouts/demo-app.yml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Rollout 3 | metadata: 4 | name: demo-app 5 | namespace: demo-app 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: demo-app 11 | template: 12 | metadata: 13 | labels: 14 | app: demo-app 15 | spec: 16 | containers: 17 | - name: demo-app 18 | image: harbor.lab.home/library/demo-app:latest 19 | ports: 20 | - containerPort: 8080 21 | env: 22 | - name: WAVEFRONT_API_TOKEN 23 | valueFrom: 24 | secretKeyRef: 25 | name: wavefront 26 | key: wavefront_api_token 27 | - name: WAVEFRONT_URL 28 | valueFrom: 29 | secretKeyRef: 30 | name: wavefront 31 | key: wavefront_url 32 | # wait a minimum 30 seconds before sending traffic 33 | minReadySeconds: 30 34 | # Deployment strategy to use during updates 35 | strategy: 36 | canary: 37 | # CanaryService holds the name of a service which selects pods with canary version and don't select any pods with stable version. +optional 38 | canaryService: spring-petclinic-canary 39 | # StableService holds the name of a service which selects pods with stable version and don't select any pods with canary version. +optional 40 | stableService: spring-petclinic-stable 41 | # The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update. +optional 42 | maxUnavailable: 0 43 | # The maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods. +optional 44 | maxSurge: 1 45 | 46 | 47 | steps: 48 | - setWeight: 20 49 | - pause: 50 | duration: 20s 51 | - setWeight: 40 52 | - pause: 53 | duration: 20s 54 | - setWeight: 80 55 | - pause: 56 | duration: 20s 57 | # Anti Affinity configuration between desired and previous replicaset. Only one must be specified 58 | antiAffinity: 59 | preferredDuringSchedulingIgnoredDuringExecution: 60 | weight: 50 # Between 1 - 100 61 | # Traffic routing specifies ingress controller or service mesh configuration to achieve 62 | # advanced traffic splitting. If omitted, will achieve traffic split via a weighted 63 | # replica counts between the canary and stable ReplicaSet. 64 | trafficRouting: 65 | 66 | # NGINX Ingress Controller routing configuration 67 | nginx: 68 | stableIngress: spring-petclinic # required -------------------------------------------------------------------------------- /demo-app/deploy/deployment.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: demo-app 6 | namespace: demo-app 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: demo-app 12 | template: 13 | metadata: 14 | labels: 15 | app: demo-app 16 | annotations: #! Istio overwrites these annotations but I think they need to be here so Istio knows what to scrape and merge 17 | prometheus.io/port: "8080" 18 | prometheus.io/scrape: "true" 19 | spec: 20 | containers: 21 | - name: demo-app 22 | imagePullPolicy: Always 23 | image: #@ data.values.harbor_domain + "/library/demo-app:latest" 24 | ports: 25 | - containerPort: 8080 26 | env: 27 | - name: DEMO_APP_DB_TYPE 28 | value: mysql 29 | - name: DEMO_APP_MYSQL_HOSTNAME 30 | value: mysql.demo.tanzu 31 | - name: DEMO_APP_MYSQL_DATABASE 32 | value: demo-app 33 | - name: DEMO_APP_MYSQL_USERNAME 34 | value: demo-app 35 | - name: DEMO_APP_MYSQL_PASSWORD 36 | value: demo-app -------------------------------------------------------------------------------- /demo-app/deploy/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a demo-app \ 6 | -f <(ytt --data-values-env=YTT_HOMELAB \ 7 | -f deployment.yml \ 8 | -f services.yml \ 9 | -f virtualservice.yml \ 10 | -f values.yaml) -------------------------------------------------------------------------------- /demo-app/deploy/services.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: demo-app 5 | namespace: demo-app 6 | spec: 7 | selector: 8 | app: demo-app 9 | ports: 10 | - name: http 11 | protocol: TCP 12 | port: 8080 -------------------------------------------------------------------------------- /demo-app/deploy/values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /demo-app/deploy/virtualservice.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: networking.istio.io/v1alpha3 4 | kind: VirtualService 5 | metadata: 6 | name: demo-app 7 | namespace: demo-app 8 | spec: 9 | hosts: 10 | - demo-app.apps.lab.home 11 | gateways: 12 | - app-gateway.istio-system.svc.cluster.local 13 | http: 14 | - match: 15 | - uri: 16 | prefix: /write 17 | route: 18 | - destination: 19 | host: demo-app 20 | port: 21 | number: 8080 -------------------------------------------------------------------------------- /demo-app/gemfire/README.md: -------------------------------------------------------------------------------- 1 | what's the relationship between the Pods named NAME-server and the server from `gfsh start server` 2 | 3 | 4 | start server \ 5 | --name=server1 \ 6 | --redis-port=11211 \ 7 | --J=-Dgemfireredis.regiontype=PARTITION_PERSISTENT 8 | 9 | 10 | Starting a Geode Server in /data/server1... 11 | ......... 12 | Server in /data/server1 on gemfire1-locator-0.gemfire1-locator.gemfire-cluster.svc.cluster.local[40404] as server1 is currently online. 13 | Process ID: 11468 14 | Uptime: 6 seconds 15 | Geode Version: 1.13.1 16 | Java Version: 11.0.9.1 17 | Log File: /data/server1/server1.log 18 | JVM Arguments: -Dgemfire.default.locators=10.200.35.135[10334] -Dgemfire.start-dev-rest-api=false -Dgemfire.use-cluster-configuration=true -Dgemfire.redis-port=11211 -Dgemfireredis.regiontype=PARTITION_PERSISTENT -Dgemfire.launcher.registerSignalHandlers=true -Djava.awt.headless=true -Dsun.rmi.dgc.server.gcInterval=9223372036854775806 19 | Class-Path: /gemfire/lib/geode-core-1.13.1.jar:/gemfire/lib/geode-dependencies.jar 20 | -------------------------------------------------------------------------------- /demo-app/gemfire/cluster.yml: -------------------------------------------------------------------------------- 1 | apiVersion: gemfire.tanzu.vmware.com/v1 2 | kind: GemFireCluster 3 | metadata: 4 | name: gemfire1 5 | namespace: demo-app 6 | spec: 7 | image: harbor.lab.home/library/gemfire-k8s:1.0.0 8 | servers: 9 | overrides: 10 | gemfireProperties: 11 | redis-port: "11211" -------------------------------------------------------------------------------- /demo-app/gemfire/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a gemfire-cluster \ 6 | -n demo-app \ 7 | -f cluster.yml -------------------------------------------------------------------------------- /demo-app/gemfire/service-redis.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gemfire-redis 5 | namespace: gemfire-cluster 6 | spec: 7 | selector: 8 | app: gemfire1-server 9 | type: ClusterIP 10 | ports: 11 | - name: redis 12 | port: 11211 13 | -------------------------------------------------------------------------------- /demo-app/image/image.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: kpack.io/v1alpha1 4 | kind: Image 5 | metadata: 6 | name: demo-app 7 | namespace: demo-app 8 | spec: 9 | tag: #@ data.values.harbor_domain + "/library/demo-app" 10 | builder: 11 | name: base 12 | kind: ClusterBuilder 13 | source: 14 | git: 15 | url: https://github.com/techgnosis/demo-app.git 16 | revision: master -------------------------------------------------------------------------------- /demo-app/image/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a demo-app-image \ 6 | -n demo-app \ 7 | -f <(REGISTRY_PASSWORD=Harbor12345 kp secret create harbor-creds \ 8 | --registry harbor.lab.home \ 9 | --registry-user admin \ 10 | --dry-run \ 11 | --output yaml \ 12 | -n demo-app) \ 13 | -f <(ytt --data-values-env YTT_HOMELAB \ 14 | -f image.yml \ 15 | -f values.yaml) -------------------------------------------------------------------------------- /demo-app/image/values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /demo-app/mysql/helm.yml: -------------------------------------------------------------------------------- 1 | auth: 2 | rootPassword: demo-app 3 | database: demo-app 4 | username: demo-app 5 | password: demo-app 6 | metrics: 7 | enabled: true -------------------------------------------------------------------------------- /demo-app/mysql/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | helm upgrade --install mysql mysql \ 6 | --repo https://charts.trials.tac.bitnami.com/demo \ 7 | --version 8.4.1 \ 8 | --namespace demo-app \ 9 | --values helm.yml \ 10 | --wait -------------------------------------------------------------------------------- /demo-app/mysql/readme: -------------------------------------------------------------------------------- 1 | mysql --host=localhost --user=demo-app --password=demo-app demo-app 2 | 3 | Metrics come from a separate container that is fronted by a Service. 4 | Istio does not scrape Services so we are relying on TO Collector to get the MySQL metrics -------------------------------------------------------------------------------- /demo-app/postgres/demo-app.yml: -------------------------------------------------------------------------------- 1 | apiVersion: sql.tanzu.vmware.com/v1 2 | kind: Postgres 3 | metadata: 4 | name: demo-app 5 | namespace: demo-app 6 | spec: 7 | memory: "800Mi" 8 | cpu: "0.5" 9 | storageClassName: vsan 10 | serviceType: ClusterIP 11 | storageSize: 300M 12 | pgConfig: 13 | dbname: demo-app 14 | username: demo-app -------------------------------------------------------------------------------- /demo-app/postgres/get-password.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kubectl get secrets testdb-db-secret -n postgres-dbs -o jsonpath='{.data.password}' | base64 -d -------------------------------------------------------------------------------- /demo-app/postgres/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a demo-app-postgres-database \ 6 | -n demo-app \ 7 | -f demo-app.yml -------------------------------------------------------------------------------- /demo-app/traffic.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | while true 6 | do 7 | curl https://demo-app.apps.$PRIMARY_DOMAIN/write 8 | sleep 1 9 | done -------------------------------------------------------------------------------- /demo-magic.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ############################################################################### 4 | # 5 | # demo-magic.sh 6 | # 7 | # Copyright (c) 2015 Paxton Hare 8 | # 9 | # This script lets you script demos in bash. It runs through your demo script when you press 10 | # ENTER. It simulates typing and runs commands. 11 | # 12 | ############################################################################### 13 | 14 | # the speed to "type" the text 15 | TYPE_SPEED=20 16 | 17 | # no wait after "p" or "pe" 18 | NO_WAIT=false 19 | 20 | # if > 0, will pause for this amount of seconds before automatically proceeding with any p or pe 21 | PROMPT_TIMEOUT=0 22 | 23 | # don't show command number unless user specifies it 24 | SHOW_CMD_NUMS=false 25 | 26 | 27 | # handy color vars for pretty prompts 28 | BLACK="\033[0;30m" 29 | BLUE="\033[0;34m" 30 | GREEN="\033[0;32m" 31 | GREY="\033[0;90m" 32 | CYAN="\033[0;36m" 33 | RED="\033[0;31m" 34 | PURPLE="\033[0;35m" 35 | BROWN="\033[0;33m" 36 | WHITE="\033[1;37m" 37 | COLOR_RESET="\033[0m" 38 | 39 | C_NUM=0 40 | 41 | # prompt and command color which can be overriden 42 | DEMO_PROMPT="$ " 43 | DEMO_CMD_COLOR=$WHITE 44 | DEMO_COMMENT_COLOR=$GREY 45 | 46 | ## 47 | # prints the script usage 48 | ## 49 | function usage() { 50 | echo -e "" 51 | echo -e "Usage: $0 [options]" 52 | echo -e "" 53 | echo -e "\tWhere options is one or more of:" 54 | echo -e "\t-h\tPrints Help text" 55 | echo -e "\t-d\tDebug mode. Disables simulated typing" 56 | echo -e "\t-n\tNo wait" 57 | echo -e "\t-w\tWaits max the given amount of seconds before proceeding with demo (e.g. '-w5')" 58 | echo -e "" 59 | } 60 | 61 | ## 62 | # wait for user to press ENTER 63 | # if $PROMPT_TIMEOUT > 0 this will be used as the max time for proceeding automatically 64 | ## 65 | function wait() { 66 | if [[ "$PROMPT_TIMEOUT" == "0" ]]; then 67 | read -rs 68 | else 69 | read -rst "$PROMPT_TIMEOUT" 70 | fi 71 | } 72 | 73 | ## 74 | # print command only. Useful for when you want to pretend to run a command 75 | # 76 | # takes 1 param - the string command to print 77 | # 78 | # usage: p "ls -l" 79 | # 80 | ## 81 | function p() { 82 | if [[ ${1:0:1} == "#" ]]; then 83 | cmd=$DEMO_COMMENT_COLOR$1$COLOR_RESET 84 | else 85 | cmd=$DEMO_CMD_COLOR$1$COLOR_RESET 86 | fi 87 | 88 | # render the prompt 89 | x=$(PS1="$DEMO_PROMPT" "$BASH" --norc -i &1 | sed -n '${s/^\(.*\)exit$/\1/p;}') 90 | 91 | # show command number is selected 92 | if $SHOW_CMD_NUMS; then 93 | printf "[$((++C_NUM))] $x" 94 | else 95 | printf "$x" 96 | fi 97 | 98 | # wait for the user to press a key before typing the command 99 | if !($NO_WAIT); then 100 | wait 101 | fi 102 | 103 | if [[ -z $TYPE_SPEED ]]; then 104 | echo -en "$cmd" 105 | else 106 | echo -en "$cmd" | pv -qL $[$TYPE_SPEED+(-2 + RANDOM%5)]; 107 | fi 108 | 109 | # wait for the user to press a key before moving on 110 | if !($NO_WAIT); then 111 | wait 112 | fi 113 | echo "" 114 | } 115 | 116 | ## 117 | # Prints and executes a command 118 | # 119 | # takes 1 parameter - the string command to run 120 | # 121 | # usage: pe "ls -l" 122 | # 123 | ## 124 | function pe() { 125 | # print the command 126 | p "$@" 127 | 128 | # execute the command 129 | eval "$@" 130 | } 131 | 132 | ## 133 | # Enters script into interactive mode 134 | # 135 | # and allows newly typed commands to be executed within the script 136 | # 137 | # usage : cmd 138 | # 139 | ## 140 | function cmd() { 141 | # render the prompt 142 | x=$(PS1="$DEMO_PROMPT" "$BASH" --norc -i &1 | sed -n '${s/^\(.*\)exit$/\1/p;}') 143 | printf "$x\033[0m" 144 | read command 145 | eval "${command}" 146 | } 147 | 148 | 149 | function check_pv() { 150 | command -v pv >/dev/null 2>&1 || { 151 | 152 | echo "" 153 | echo -e "${RED}##############################################################" 154 | echo "# HOLD IT!! I require pv but it's not installed. Aborting." >&2; 155 | echo -e "${RED}##############################################################" 156 | echo "" 157 | echo -e "${COLOR_RESET}Installing pv:" 158 | echo "" 159 | echo -e "${BLUE}Mac:${COLOR_RESET} $ brew install pv" 160 | echo "" 161 | echo -e "${BLUE}Other:${COLOR_RESET} http://www.ivarch.com/programs/pv.shtml" 162 | echo -e "${COLOR_RESET}" 163 | exit 1; 164 | } 165 | } 166 | 167 | check_pv 168 | # 169 | # handle some default params 170 | # -h for help 171 | # -d for disabling simulated typing 172 | # 173 | while getopts ":dhncw:" opt; do 174 | case $opt in 175 | h) 176 | usage 177 | exit 1 178 | ;; 179 | d) 180 | unset TYPE_SPEED 181 | ;; 182 | n) 183 | NO_WAIT=true 184 | ;; 185 | c) 186 | SHOW_CMD_NUMS=true 187 | ;; 188 | w) 189 | PROMPT_TIMEOUT=$OPTARG 190 | ;; 191 | esac 192 | done 193 | -------------------------------------------------------------------------------- /demo-tbs-only-quiet.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | source ./demo-magic.sh 4 | 5 | clear 6 | 7 | pe 'kp build logs spring-petclinic -n images' 8 | pe 'tbs/tbs-update-stack-100.0.30.sh' 9 | pe 'kp build logs spring-petclinic -n images' 10 | -------------------------------------------------------------------------------- /demo.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | source ./demo-magic.sh 4 | 5 | # How to reset 6 | # Delete MySQL with Helm and spring-petclinic with kapp 7 | # Delete the PVC in spring-petclinic 8 | # Delete the spring-petclinic namespace from TMC UI 9 | # Reset TBS back to 19 10 | # Delete all the MySQL and common Helm charts 11 | 12 | #1 - secure image pipeline for existing software 13 | #2 - platform operators providing databases and K8s access to development teams with TAC and TMC 14 | #3 - low-K8s "easy mode" 15 | #4 - microservices architecture 16 | #5 - constant backdrop of observability 17 | 18 | 19 | clear 20 | 21 | p 'Welcome. Lets get started by showing an existing pipeline being triggered by a code change' 22 | pe 'kp build logs product-api -n images' 23 | p 'Lets take a look at the image YAML' 24 | pe 'bat images/product-api.yml' 25 | 26 | p 'But what about keeping the images up-to-date?' 27 | pe 'tbs/tbs-update-stack-100.0.30.sh' 28 | pe 'kp build logs product-api -n images' 29 | 30 | 31 | p 'Lets add a new Spring Boot application to our environment. The app is called spring-petclinic' 32 | p 'First we need a database' 33 | p 'Next step is to put the MySQL chart into Harbor' 34 | cd tac 35 | pe './sync.sh' 36 | cd .. 37 | p 'Lets take a look at Harbor Helm repo and our image replication' 38 | 39 | 40 | p 'Helm repo is ready. We need to provision a new namespace for our application. Lets use Tanzu Mission Control' 41 | p 'Lets login as our spring-petclinic developer and begin deploying' 42 | pe 'tkgi get-kubeconfig cluster2 --username spring-petclinic-dev --password spring-petclinic-dev --api https://tkgi.lab.home' 43 | p 'We havent been assigned any permissions yet. Lets confirm' 44 | pe 'kubectl get pods -n product-api' 45 | pe 'kubectl get pods -n spring-petclinic' 46 | cd apps/spring-petclinic/mysql 47 | pe './install.sh' 48 | cd ../../.. 49 | p 'It seems we lack permissons. Lets fix that in Tanzu Mission Control' 50 | pe 'kubectl get pods -n product-api' 51 | pe 'kubectl get pods -n spring-petclinic' 52 | p 'Lets try our install again' 53 | cd apps/spring-petclinic/mysql 54 | pe './install.sh' 55 | p 'And now we can install our app' 56 | cd .. 57 | pe './install.sh' 58 | cd ../.. 59 | 60 | p 'Lets consider a more managed app experience' 61 | cmd 62 | cmd 63 | pe 'cf push test-app -p apps/test-app/test-app' 64 | 65 | p 'Lets talk microservices and service mesh' 66 | 67 | p 'And last but not least, Tanzu Observability is receiving metrics from all of our sources' 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /fluent-bit/grafana-loki.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | helm upgrade --install fluent-bit fluent-bit \ 6 | --repo https://grafana.github.io/helm-charts \ 7 | --set loki.serviceName=loki.lab.home \ 8 | --namespace fluent-bit \ 9 | --create-namespace -------------------------------------------------------------------------------- /harbor/certificate.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: cert-manager.io/v1alpha2 4 | kind: Certificate 5 | metadata: 6 | name: harbor 7 | namespace: harbor 8 | spec: 9 | dnsNames: 10 | - #@ data.values.harbor_domain 11 | - #@ "notary." + data.values.primary_domain 12 | issuerRef: 13 | kind: ClusterIssuer 14 | name: mkcert 15 | secretName: harbor-tls -------------------------------------------------------------------------------- /harbor/helm-goharbor.yml: -------------------------------------------------------------------------------- 1 | expose: 2 | type: loadBalancer 3 | tls: 4 | enabled: true 5 | certSource: secret 6 | secret: 7 | secretName: harbor-tls 8 | notarySecretName: harbor-tls 9 | persistence: 10 | persistentVolumeClaim: 11 | registry: 12 | size: 100Gi 13 | metrics: 14 | enabled: true -------------------------------------------------------------------------------- /harbor/helm.yml: -------------------------------------------------------------------------------- 1 | service: 2 | type: LoadBalancer 3 | tls: 4 | enabled: true 5 | existingSecret: harbor-tls 6 | notaryExistingSecret: harbor-tls 7 | persistence: 8 | persistentVolumeClaim: 9 | registry: 10 | size: 100Gi -------------------------------------------------------------------------------- /harbor/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | 6 | kapp deploy -a harbor \ 7 | -f <(ytt --data-values-env=YTT_HOMELAB \ 8 | -f certificate.yml \ 9 | -f namespace.yml \ 10 | -f values.yml) 11 | 12 | helm upgrade --install harbor harbor \ 13 | --repo https://charts.trials.tac.bitnami.com/demo \ 14 | --version 9.8.3 \ 15 | --values helm.yml \ 16 | --set externalURL="https://$HARBOR_DOMAIN" \ 17 | --set harborAdminPassword="$HARBOR_PASSWORD" \ 18 | --namespace harbor -------------------------------------------------------------------------------- /harbor/namespace.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: harbor -------------------------------------------------------------------------------- /harbor/readme.md: -------------------------------------------------------------------------------- 1 | Don't use Harbor behind an ingress gateway of any kind. It just causes problems. -------------------------------------------------------------------------------- /harbor/values.yml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /healthwatch/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a hw2 -f role.yml -------------------------------------------------------------------------------- /healthwatch/role.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: healthwatch 6 | rules: 7 | - resources: 8 | - pods/proxy 9 | - pods 10 | - nodes 11 | - nodes/proxy 12 | - namespace/pods 13 | - endpoints 14 | - services 15 | verbs: 16 | - get 17 | - watch 18 | - list 19 | apiGroups: 20 | - "" 21 | - nonResourceURLs: ["/metrics"] 22 | verbs: ["get"] 23 | --- 24 | apiVersion: rbac.authorization.k8s.io/v1 25 | kind: ClusterRoleBinding 26 | metadata: 27 | name: healthwatch 28 | roleRef: 29 | apiGroup: "" 30 | kind: ClusterRole 31 | name: healthwatch 32 | subjects: 33 | - apiGroup: "" 34 | kind: User 35 | name: healthwatch 36 | namespace: pks-system -------------------------------------------------------------------------------- /kubeapps/cluster-role-binding.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kubeapps 5 | subjects: 6 | - kind: ServiceAccount 7 | name: kubeapps 8 | namespace: kubeapps 9 | roleRef: 10 | kind: ClusterRole 11 | name: cluster-admin 12 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /kubeapps/configure-kubeapps.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kubectl get secret -n kubeapps $(kubectl -n kubeapps get serviceaccount kubeapps -o jsonpath='{range .secrets[*]}{.name}{"\n"}{end}' | grep kubeapps-token) -o jsonpath='{.data.token}' -o go-template='{{.data.token | base64decode}}' && echo 6 | -------------------------------------------------------------------------------- /kubeapps/helm.yml: -------------------------------------------------------------------------------- 1 | useHelm3: true 2 | apprepository: 3 | initialRepos: 4 | - name: harbor 5 | url: https://harbor.lab.home/chartrepo/library 6 | caCert: | 7 | -----BEGIN CERTIFICATE----- 8 | MIIEwTCCAymgAwIBAgIQCQWTa8uR9Z3HNksvW8FfeTANBgkqhkiG9w0BAQsFADB5 9 | MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExJzAlBgNVBAsMHmphbWVz 10 | QEphbWVzcy1NYWNCb29rLVByby5sb2NhbDEuMCwGA1UEAwwlbWtjZXJ0IGphbWVz 11 | QEphbWVzcy1NYWNCb29rLVByby5sb2NhbDAeFw0xOTExMDcwMTU3NTJaFw0yOTEx 12 | MDcwMTU3NTJaMHkxHjAcBgNVBAoTFW1rY2VydCBkZXZlbG9wbWVudCBDQTEnMCUG 13 | A1UECwweamFtZXNASmFtZXNzLU1hY0Jvb2stUHJvLmxvY2FsMS4wLAYDVQQDDCVt 14 | a2NlcnQgamFtZXNASmFtZXNzLU1hY0Jvb2stUHJvLmxvY2FsMIIBojANBgkqhkiG 15 | 9w0BAQEFAAOCAY8AMIIBigKCAYEAtumB922ikHSwPXj6M0f+O7VJ+6v3NtqAgosH 16 | j/ba/BT4NpWwhCySyjou9HBbUhDIK0LWsU6kmQNUvwkR3ZJAeULRkwnTVXWfLF9Y 17 | pGCKn1Vf7baX2DPr5rAU7fHMXZC+yngqRe95TFcHXmZA+1OFCRHq/gbtGFwy8Q/T 18 | WlBHGBwQ9bbWmsVPgVumNLIfBzAiMI6qph+TCWD9Z64XvDoY3CVtrpykS8sU+INm 19 | vW+D4U2d3vwoR0UuU0Z6fJAnjsQwV9GEU09gtALQX+EZ861cwCTW6gtnyaDdE6ks 20 | 888MnjP4RMzEUsMPRbmgE7kXBd5BZs7DolyYrCVHmpu1Nnr7XcgVRjeKVo34/qlq 21 | BGFxNwXhbN4EZaZEJYekRuLAS/PxklYA/WIImXDiFfwVMOWWmmdYDidz3UhVzIg7 22 | vrpe5zLdL6wEVN39DcRjZNWaN75rRcv9B8wz1d1o5YhkGC+sIUUmoK/pa9PWx+3q 23 | OISbnw79JVlOE/gfT4gpwvYWs42fAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwICBDAS 24 | BgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBRq+uJ9dHzPcCVuQYJyh9qBD0SN 25 | gTANBgkqhkiG9w0BAQsFAAOCAYEAIG3vcBSbXL9WzX1RsYgTnMlgDGxdcc0e3+66 26 | mlLwP/gX+mOenu9MeocKcpme20TWdx9sXtL9jZsIs0M+Qjh2pPBfZoRRL1noWHgl 27 | HvYH5AeGm5Kb1hkO8djjghMfet47lOOl5Tca4SWPZTbtDzufPttG1gI821VmlWTZ 28 | UZrlWJpLIeEOmchA1QhjzKI3c/0Hf4CqQGr1ozYmmGCyzu0EGvbdVJbkhJw/lSFF 29 | QJMLAoO4j01JU40+PiGQtyLlC8RfhHD5/nhvPB7dQPQ5jQrum8JHheEAsqVI45AL 30 | wlxvLLh8urgNgB5nQK0rpUaa0aQpyxntphcQSjW0NyQJMxdNl33lWW53w9xZmebl 31 | T/cIDjMVkkbC9LiIJt8L6yFxMsRMEmYhat/nKEJFMsHKa51dIXCeF+yYzFiJbK3s 32 | A/ODno9mjhVe23GGcYwqF8qE4jXISkKaUpCu3hs6T4iFciMZVL0sN+y+8dvu/SxD 33 | 9NOtbmZ8XCEJHjvS7z7qQWevPn5+ 34 | -----END CERTIFICATE----- 35 | 36 | clusters: 37 | - name: apps -------------------------------------------------------------------------------- /kubeapps/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | 6 | helm upgrade --install kubeapps kubeapps \ 7 | --repo https://charts.trials.tac.bitnami.com/demo \ 8 | --version 5.0.0 \ 9 | --namespace kubeapps \ 10 | --create-namespace \ 11 | --wait \ 12 | --values=helm.yml 13 | 14 | # Note - the service account is for logging in to the dashboard 15 | # It is not something that can be created via the Helm chart 16 | 17 | kapp deploy -a kubeapps -f <(ytt --data-values-env=YTT_HOMELAB \ 18 | -f virtualservice.yml \ 19 | -f values.yml \ 20 | -f service-account.yml \ 21 | -f cluster-role-binding.yml) -------------------------------------------------------------------------------- /kubeapps/service-account.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kubeapps 5 | namespace: kubeapps -------------------------------------------------------------------------------- /kubeapps/values.yml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /kubeapps/virtualservice.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: networking.istio.io/v1alpha3 4 | kind: VirtualService 5 | metadata: 6 | name: kubeapps-virtual-service 7 | namespace: kubeapps 8 | spec: 9 | hosts: 10 | - #@ "kubeapps." + data.values.apps_domain 11 | gateways: 12 | - app-gateway.istio-system.svc.cluster.local 13 | http: 14 | - match: 15 | - uri: 16 | prefix: / 17 | route: 18 | - destination: 19 | host: kubeapps 20 | port: 21 | number: 80 -------------------------------------------------------------------------------- /metallb/apps.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: metallb-system 5 | name: config 6 | data: 7 | config: | 8 | address-pools: 9 | - name: vlan502 10 | protocol: layer2 11 | addresses: 12 | - 10.0.2.20-10.0.2.29 13 | -------------------------------------------------------------------------------- /metallb/databases.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: metallb-system 5 | name: config 6 | data: 7 | config: | 8 | address-pools: 9 | - name: vlan502 10 | protocol: layer2 11 | addresses: 12 | - 10.0.2.30-10.0.2.39 13 | -------------------------------------------------------------------------------- /metallb/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | CONFIG=$1 6 | 7 | kapp deploy -a metallb \ 8 | -f "${CONFIG}" \ 9 | -f https://raw.githubusercontent.com/metallb/metallb/v0.9.5/manifests/namespace.yaml \ 10 | -f https://raw.githubusercontent.com/metallb/metallb/v0.9.5/manifests/metallb.yaml \ 11 | -f <(kubectl -n metallb-system create secret generic memberlist \ 12 | --from-literal=secretkey="$(openssl rand -base64 128)" \ 13 | --dry-run=client \ 14 | -o yaml) 15 | 16 | 17 | 18 | 19 | 20 | # helm upgrade --install metallb metallb \ 21 | # --repo https://charts.bitnami.com/bitnami \ 22 | # --version 0.9.5 \ 23 | # --namespace metallb-system \ 24 | # --set existingConfigMap=config \ 25 | # --wait 26 | 27 | -------------------------------------------------------------------------------- /metallb/namespace.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: metallb-system -------------------------------------------------------------------------------- /metallb/testing.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: metallb-system 5 | name: config 6 | data: 7 | config: | 8 | address-pools: 9 | - name: vlan502 10 | protocol: layer2 11 | addresses: 12 | - 10.0.2.40-10.0.2.49 13 | -------------------------------------------------------------------------------- /minio/helm.yml: -------------------------------------------------------------------------------- 1 | accessKey: 2 | password: accesskey 3 | secretKey: 4 | password: secretkey -------------------------------------------------------------------------------- /minio/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | helm upgrade --install minio minio \ 6 | --repo https://charts.trials.tac.bitnami.com/demo \ 7 | --version 3.7.9 \ 8 | --wait \ 9 | --namespace minio \ 10 | --create-namespace \ 11 | --values helm.yml 12 | 13 | kapp deploy -a minio -f <(ytt --data-values-env=YTT_HOMELAB \ 14 | -f virtualservice.yml \ 15 | -f values.yml) -------------------------------------------------------------------------------- /minio/values.yml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /minio/virtualservice.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: networking.istio.io/v1alpha3 4 | kind: VirtualService 5 | metadata: 6 | name: minio-virtual-service 7 | namespace: minio 8 | spec: 9 | hosts: 10 | - #@ "minio." + data.values.apps_domain 11 | gateways: 12 | - app-gateway.istio-system.svc.cluster.local 13 | http: 14 | - match: 15 | - uri: 16 | prefix: / 17 | route: 18 | - destination: 19 | host: minio 20 | port: 21 | number: 9000 -------------------------------------------------------------------------------- /prometheus/clusterrole.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus 5 | rules: 6 | - apiGroups: [""] 7 | resources: 8 | - pods 9 | verbs: ["get", "list", "watch"] -------------------------------------------------------------------------------- /prometheus/clusterrolebinding.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus 12 | namespace: prometheus -------------------------------------------------------------------------------- /prometheus/config.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: prometheus 5 | namespace: prometheus 6 | data: 7 | prometheus.yml: | 8 | global: 9 | scrape_interval: 15s 10 | scrape_configs: 11 | - job_name: 'kubernetes-pods' 12 | 13 | kubernetes_sd_configs: 14 | - role: pod 15 | 16 | relabel_configs: 17 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] 18 | action: keep 19 | regex: true 20 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] 21 | action: replace 22 | target_label: __metrics_path__ 23 | regex: (.+) 24 | - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] 25 | action: replace 26 | regex: ([^:]+)(?::\d+)?;(\d+) 27 | replacement: $1:$2 28 | target_label: __address__ 29 | - action: labelmap 30 | regex: __meta_kubernetes_pod_label_(.+) 31 | - source_labels: [__meta_kubernetes_namespace] 32 | action: replace 33 | target_label: kubernetes_namespace 34 | - source_labels: [__meta_kubernetes_pod_name] 35 | action: replace 36 | target_label: kubernetes_pod_name 37 | 38 | 39 | -------------------------------------------------------------------------------- /prometheus/deployment.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus 5 | namespace: prometheus 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: prometheus 11 | template: 12 | metadata: 13 | labels: 14 | app: prometheus 15 | spec: 16 | serviceAccountName: prometheus 17 | volumes: 18 | - name: prometheus-volume 19 | configMap: 20 | name: prometheus 21 | containers: 22 | - name: prometheus 23 | image: index.docker.io/prom/prometheus:v2.24.1 24 | command: ['prometheus'] 25 | args: ["--config.file", "/tmp/prometheus.yml"] 26 | volumeMounts: 27 | - name: prometheus-volume 28 | mountPath: /tmp/prometheus.yml 29 | subPath: prometheus.yml 30 | ports: 31 | - containerPort: 9090 32 | --- 33 | apiVersion: v1 34 | kind: Service 35 | metadata: 36 | name: prometheus 37 | namespace: prometheus 38 | spec: 39 | type: ClusterIP 40 | selector: 41 | app: prometheus 42 | ports: 43 | - protocol: TCP 44 | port: 9090 45 | -------------------------------------------------------------------------------- /prometheus/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a prometheus \ 6 | -f <(ytt --data-values-env=YTT_HOMELAB \ 7 | -f namespace.yml \ 8 | -f clusterrole.yml \ 9 | -f clusterrolebinding.yml \ 10 | -f config.yml \ 11 | -f deployment.yml \ 12 | -f sa.yml \ 13 | -f values.yml \ 14 | -f virtualservice.yml) -------------------------------------------------------------------------------- /prometheus/namespace.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: prometheus -------------------------------------------------------------------------------- /prometheus/readme.md: -------------------------------------------------------------------------------- 1 | This is purely standalone Prometheus. It doesn't connect to Tanzu Observability at all. See the `tobs` folder for more integration -------------------------------------------------------------------------------- /prometheus/sa.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus 5 | namespace: prometheus -------------------------------------------------------------------------------- /prometheus/values.yml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /prometheus/virtualservice.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: networking.istio.io/v1alpha3 4 | kind: VirtualService 5 | metadata: 6 | name: prometheus-virtual-service 7 | namespace: prometheus 8 | spec: 9 | hosts: 10 | - prometheus.apps.lab.home 11 | - prometheus.databases.lab.home 12 | gateways: 13 | - app-gateway.istio-system.svc.cluster.local 14 | http: 15 | - match: 16 | - uri: 17 | prefix: / 18 | route: 19 | - destination: 20 | host: prometheus 21 | port: 22 | number: 9090 -------------------------------------------------------------------------------- /spring-petclinic/image/harbor-docker-creds.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: harbor-creds 6 | namespace: spring-petclinic-image 7 | annotations: 8 | kpack.io/docker: #@ "https://" + data.values.harbor_domain 9 | type: kubernetes.io/basic-auth 10 | stringData: 11 | username: admin 12 | password: Harbor12345 -------------------------------------------------------------------------------- /spring-petclinic/image/image.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: kpack.io/v1alpha1 4 | kind: Image 5 | metadata: 6 | name: spring-petclinic 7 | namespace: spring-petclinic-image 8 | spec: 9 | tag: #@ data.values.harbor_domain + "/library/spring-petclinic" 10 | serviceAccount: tbs-service-account 11 | builder: 12 | name: base 13 | kind: ClusterBuilder 14 | source: 15 | git: 16 | url: https://github.com/spring-projects/spring-petclinic.git 17 | revision: main -------------------------------------------------------------------------------- /spring-petclinic/image/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a spring-petclinic-image \ 6 | -n spring-petclinic-image \ 7 | -f <(ytt --data-values-env YTT_HOMELAB \ 8 | -f image.yml \ 9 | -f tbs-service-account.yml \ 10 | -f harbor-docker-creds.yml \ 11 | -f values.yaml) -------------------------------------------------------------------------------- /spring-petclinic/image/tbs-service-account.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: tbs-service-account 5 | namespace: spring-petclinic-image 6 | secrets: 7 | - name: harbor-creds -------------------------------------------------------------------------------- /spring-petclinic/image/values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /storage/README.md: -------------------------------------------------------------------------------- 1 | templates are for CNS CSI driver 2.1.0 2 | 3 | https://github.com/kubernetes-sigs/vsphere-csi-driver 4 | https://vsphere-csi-driver.sigs.k8s.io/ -------------------------------------------------------------------------------- /storage/csi-vsphere-template.conf: -------------------------------------------------------------------------------- 1 | [Global] 2 | cluster-id = "" 3 | 4 | [VirtualCenter ""] 5 | insecure-flag = "true" 6 | user = "" 7 | password = "" 8 | port = "443" 9 | datacenters = "" -------------------------------------------------------------------------------- /storage/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | CLUSTER_NAME=$1 6 | 7 | cp csi-vsphere-template.conf csi-vsphere.conf 8 | sd "" $CLUSTER_NAME csi-vsphere.conf 9 | sd "" $VCENTER_USER csi-vsphere.conf 10 | sd "" $VCENTER_PASSWORD csi-vsphere.conf 11 | sd "" $VCENTER_DATACENTER csi-vsphere.conf 12 | sd "" $VCENTER_IP csi-vsphere.conf 13 | 14 | kapp deploy -a cns-csi-driver \ 15 | -f <(kubectl create secret generic vsphere-config-secret \ 16 | --from-file=csi-vsphere.conf \ 17 | --namespace=kube-system \ 18 | --dry-run=client \ 19 | -o yaml) \ 20 | -f vsphere-csi-controller-rbac.yaml \ 21 | -f vsphere-csi-controller-deployment.yaml \ 22 | -f vsphere-csi-node-ds.yaml \ 23 | -f storageclass.yml -------------------------------------------------------------------------------- /storage/storageclass.yml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: vsan 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | provisioner: csi.vsphere.vmware.com 8 | parameters: 9 | storagepolicyname: "vSAN Default Storage Policy" -------------------------------------------------------------------------------- /storage/vsphere-csi-controller-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | name: vsphere-csi-controller 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: vsphere-csi-controller 11 | template: 12 | metadata: 13 | labels: 14 | app: vsphere-csi-controller 15 | role: vsphere-csi 16 | spec: 17 | serviceAccountName: vsphere-csi-controller 18 | tolerations: 19 | - operator: "Exists" 20 | effect: NoSchedule 21 | - operator: "Exists" 22 | effect: NoExecute 23 | dnsPolicy: "Default" 24 | containers: 25 | - name: csi-attacher 26 | image: quay.io/k8scsi/csi-attacher:v3.0.0 27 | args: 28 | - "--v=4" 29 | - "--timeout=300s" 30 | - "--csi-address=$(ADDRESS)" 31 | - "--leader-election" 32 | env: 33 | - name: ADDRESS 34 | value: /csi/csi.sock 35 | volumeMounts: 36 | - mountPath: /csi 37 | name: socket-dir 38 | - name: csi-resizer 39 | image: quay.io/k8scsi/csi-resizer:v1.0.0 40 | args: 41 | - "--v=4" 42 | - "--timeout=300s" 43 | - "--csi-address=$(ADDRESS)" 44 | - "--leader-election" 45 | env: 46 | - name: ADDRESS 47 | value: /csi/csi.sock 48 | volumeMounts: 49 | - mountPath: /csi 50 | name: socket-dir 51 | - name: vsphere-csi-controller 52 | image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 53 | args: 54 | - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" 55 | - "--fss-namespace=$(CSI_NAMESPACE)" 56 | imagePullPolicy: "Always" 57 | env: 58 | - name: CSI_ENDPOINT 59 | value: unix:///csi/csi.sock 60 | - name: X_CSI_MODE 61 | value: "controller" 62 | - name: VSPHERE_CSI_CONFIG 63 | value: "/etc/cloud/csi-vsphere.conf" 64 | - name: LOGGER_LEVEL 65 | value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION 66 | - name: INCLUSTER_CLIENT_QPS 67 | value: "100" 68 | - name: INCLUSTER_CLIENT_BURST 69 | value: "100" 70 | - name: CSI_NAMESPACE 71 | valueFrom: 72 | fieldRef: 73 | fieldPath: metadata.namespace 74 | volumeMounts: 75 | - mountPath: /etc/cloud 76 | name: vsphere-config-volume 77 | readOnly: true 78 | - mountPath: /csi 79 | name: socket-dir 80 | ports: 81 | - name: healthz 82 | containerPort: 9808 83 | protocol: TCP 84 | livenessProbe: 85 | httpGet: 86 | path: /healthz 87 | port: healthz 88 | initialDelaySeconds: 10 89 | timeoutSeconds: 3 90 | periodSeconds: 5 91 | failureThreshold: 3 92 | - name: liveness-probe 93 | image: quay.io/k8scsi/livenessprobe:v2.1.0 94 | args: 95 | - "--v=4" 96 | - "--csi-address=/csi/csi.sock" 97 | volumeMounts: 98 | - name: socket-dir 99 | mountPath: /csi 100 | - name: vsphere-syncer 101 | image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0 102 | args: 103 | - "--leader-election" 104 | - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" 105 | - "--fss-namespace=$(CSI_NAMESPACE)" 106 | imagePullPolicy: "Always" 107 | env: 108 | - name: FULL_SYNC_INTERVAL_MINUTES 109 | value: "30" 110 | - name: VSPHERE_CSI_CONFIG 111 | value: "/etc/cloud/csi-vsphere.conf" 112 | - name: LOGGER_LEVEL 113 | value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION 114 | - name: INCLUSTER_CLIENT_QPS 115 | value: "100" 116 | - name: INCLUSTER_CLIENT_BURST 117 | value: "100" 118 | - name: CSI_NAMESPACE 119 | valueFrom: 120 | fieldRef: 121 | fieldPath: metadata.namespace 122 | volumeMounts: 123 | - mountPath: /etc/cloud 124 | name: vsphere-config-volume 125 | readOnly: true 126 | - name: csi-provisioner 127 | image: quay.io/k8scsi/csi-provisioner:v2.0.0 128 | args: 129 | - "--v=4" 130 | - "--timeout=300s" 131 | - "--csi-address=$(ADDRESS)" 132 | - "--leader-election" 133 | - "--default-fstype=ext4" 134 | env: 135 | - name: ADDRESS 136 | value: /csi/csi.sock 137 | volumeMounts: 138 | - mountPath: /csi 139 | name: socket-dir 140 | volumes: 141 | - name: vsphere-config-volume 142 | secret: 143 | secretName: vsphere-config-secret 144 | - name: socket-dir 145 | emptyDir: {} 146 | --- 147 | apiVersion: v1 148 | data: 149 | "csi-migration": "false" # csi-migration feature is only available for vSphere 7.0U1 150 | kind: ConfigMap 151 | metadata: 152 | name: internal-feature-states.csi.vsphere.vmware.com 153 | namespace: kube-system 154 | --- 155 | apiVersion: storage.k8s.io/v1 156 | kind: CSIDriver 157 | metadata: 158 | name: csi.vsphere.vmware.com 159 | spec: 160 | attachRequired: true 161 | podInfoOnMount: false 162 | --- 163 | 164 | -------------------------------------------------------------------------------- /storage/vsphere-csi-controller-rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: ServiceAccount 2 | apiVersion: v1 3 | metadata: 4 | name: vsphere-csi-controller 5 | namespace: kube-system 6 | --- 7 | kind: ClusterRole 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | metadata: 10 | name: vsphere-csi-controller-role 11 | rules: 12 | - apiGroups: [""] 13 | resources: ["nodes", "persistentvolumeclaims", "pods", "configmaps"] 14 | verbs: ["get", "list", "watch"] 15 | - apiGroups: [""] 16 | resources: ["persistentvolumeclaims/status"] 17 | verbs: ["patch"] 18 | - apiGroups: [""] 19 | resources: ["persistentvolumes"] 20 | verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] 21 | - apiGroups: [""] 22 | resources: ["events"] 23 | verbs: ["get", "list", "watch", "create", "update", "patch"] 24 | - apiGroups: ["coordination.k8s.io"] 25 | resources: ["leases"] 26 | verbs: ["get", "watch", "list", "delete", "update", "create"] 27 | - apiGroups: ["storage.k8s.io"] 28 | resources: ["storageclasses", "csinodes"] 29 | verbs: ["get", "list", "watch"] 30 | - apiGroups: ["storage.k8s.io"] 31 | resources: ["volumeattachments"] 32 | verbs: ["get", "list", "watch", "patch"] 33 | - apiGroups: ["cns.vmware.com"] 34 | resources: ["cnsvspherevolumemigrations"] 35 | verbs: ["create", "get", "list", "watch", "update", "delete"] 36 | - apiGroups: ["apiextensions.k8s.io"] 37 | resources: ["customresourcedefinitions"] 38 | verbs: ["get", "create"] 39 | - apiGroups: ["storage.k8s.io"] 40 | resources: ["volumeattachments/status"] 41 | verbs: ["patch"] 42 | --- 43 | kind: ClusterRoleBinding 44 | apiVersion: rbac.authorization.k8s.io/v1 45 | metadata: 46 | name: vsphere-csi-controller-binding 47 | subjects: 48 | - kind: ServiceAccount 49 | name: vsphere-csi-controller 50 | namespace: kube-system 51 | roleRef: 52 | kind: ClusterRole 53 | name: vsphere-csi-controller-role 54 | apiGroup: rbac.authorization.k8s.io 55 | -------------------------------------------------------------------------------- /storage/vsphere-csi-node-ds.yaml: -------------------------------------------------------------------------------- 1 | kind: DaemonSet 2 | apiVersion: apps/v1 3 | metadata: 4 | name: vsphere-csi-node 5 | namespace: kube-system 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: vsphere-csi-node 10 | updateStrategy: 11 | type: "RollingUpdate" 12 | rollingUpdate: 13 | maxUnavailable: 1 14 | template: 15 | metadata: 16 | labels: 17 | app: vsphere-csi-node 18 | role: vsphere-csi 19 | spec: 20 | dnsPolicy: "Default" 21 | containers: 22 | - name: node-driver-registrar 23 | image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 24 | args: 25 | - "--v=5" 26 | - "--csi-address=$(ADDRESS)" 27 | - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" 28 | - "--health-port=9809" 29 | env: 30 | - name: ADDRESS 31 | value: /csi/csi.sock 32 | - name: DRIVER_REG_SOCK_PATH 33 | value: /var/vcap/data/kubelet/plugins/csi.vsphere.vmware.com/csi.sock 34 | securityContext: 35 | privileged: true 36 | volumeMounts: 37 | - name: plugin-dir 38 | mountPath: /csi 39 | - name: registration-dir 40 | mountPath: /registration 41 | ports: 42 | - containerPort: 9809 43 | name: healthz 44 | livenessProbe: 45 | httpGet: 46 | path: /healthz 47 | port: healthz 48 | initialDelaySeconds: 5 49 | timeoutSeconds: 5 50 | - name: vsphere-csi-node 51 | image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 52 | args: 53 | - "--fss-name=internal-feature-states.csi.vsphere.vmware.com" 54 | - "--fss-namespace=$(CSI_NAMESPACE)" 55 | imagePullPolicy: "Always" 56 | env: 57 | - name: NODE_NAME 58 | valueFrom: 59 | fieldRef: 60 | fieldPath: spec.nodeName 61 | - name: CSI_ENDPOINT 62 | value: unix:///csi/csi.sock 63 | - name: X_CSI_MODE 64 | value: "node" 65 | - name: X_CSI_SPEC_REQ_VALIDATION 66 | value: "false" 67 | - name: VSPHERE_CSI_CONFIG 68 | value: "/etc/cloud/csi-vsphere.conf" 69 | - name: X_CSI_DEBUG 70 | value: "true" 71 | - name: LOGGER_LEVEL 72 | value: "PRODUCTION" # Options: DEVELOPMENT, PRODUCTION 73 | - name: CSI_NAMESPACE 74 | valueFrom: 75 | fieldRef: 76 | fieldPath: metadata.namespace 77 | securityContext: 78 | privileged: true 79 | capabilities: 80 | add: ["SYS_ADMIN"] 81 | allowPrivilegeEscalation: true 82 | volumeMounts: 83 | - name: vsphere-config-volume 84 | mountPath: /etc/cloud 85 | readOnly: true 86 | - name: plugin-dir 87 | mountPath: /csi 88 | - name: pods-mount-dir 89 | mountPath: /var/vcap/data/kubelet 90 | # needed so that any mounts setup inside this container are 91 | # propagated back to the host machine. 92 | mountPropagation: "Bidirectional" 93 | - name: device-dir 94 | mountPath: /dev 95 | ports: 96 | - containerPort: 9808 97 | name: healthz 98 | livenessProbe: 99 | httpGet: 100 | path: /healthz 101 | port: healthz 102 | initialDelaySeconds: 10 103 | timeoutSeconds: 5 104 | - name: liveness-probe 105 | image: quay.io/k8scsi/livenessprobe:v2.1.0 106 | args: 107 | - "--v=4" 108 | - "--csi-address=/csi/csi.sock" 109 | volumeMounts: 110 | - name: plugin-dir 111 | mountPath: /csi 112 | volumes: 113 | - name: vsphere-config-volume 114 | secret: 115 | secretName: vsphere-config-secret 116 | - name: registration-dir 117 | hostPath: 118 | path: /var/vcap/data/kubelet/plugins_registry 119 | type: Directory 120 | - name: plugin-dir 121 | hostPath: 122 | path: /var/vcap/data/kubelet/plugins/csi.vsphere.vmware.com 123 | type: DirectoryOrCreate 124 | - name: pods-mount-dir 125 | hostPath: 126 | path: /var/vcap/data/kubelet 127 | type: Directory 128 | - name: device-dir 129 | hostPath: 130 | path: /dev 131 | tolerations: 132 | - effect: NoExecute 133 | operator: Exists 134 | - effect: NoSchedule 135 | operator: Exists 136 | -------------------------------------------------------------------------------- /tac/sync-proxy-cache.sh: -------------------------------------------------------------------------------- 1 | DATE="2021-03-01" 2 | 3 | charts-syncer sync --config tac.yaml --from-date $DATE 4 | 5 | -------------------------------------------------------------------------------- /tac/sync-replication-rules.sh: -------------------------------------------------------------------------------- 1 | DATE="2021-03-01" 2 | 3 | charts-syncer sync --config tac-replication-rules.yaml --from-date $DATE 4 | 5 | -------------------------------------------------------------------------------- /tac/tac-replication-rules.yaml: -------------------------------------------------------------------------------- 1 | source: 2 | repo: 3 | kind: HELM 4 | url: https://charts.trials.tac.bitnami.com/demo 5 | target: 6 | repoName: harbor 7 | containerRegistry: harbor.lab.home 8 | containerRepository: library 9 | repo: 10 | kind: HARBOR 11 | url: https://harbor.lab.home/chartrepo/library 12 | auth: 13 | username: admin 14 | password: Harbor12345 15 | charts: 16 | - redis -------------------------------------------------------------------------------- /tac/tac.yaml: -------------------------------------------------------------------------------- 1 | source: 2 | repo: 3 | kind: HELM 4 | url: https://charts.trials.tac.bitnami.com/demo 5 | target: 6 | repoName: harbor 7 | containerRegistry: harbor.lab.home 8 | containerRepository: gcr.io/sys-2b0109it/demo/bitnami 9 | repo: 10 | kind: HARBOR 11 | url: https://harbor.lab.home/chartrepo/library 12 | auth: 13 | username: admin 14 | password: Harbor12345 15 | charts: 16 | - redis -------------------------------------------------------------------------------- /tbs/README.md: -------------------------------------------------------------------------------- 1 | Notes learned from real engagements: 2 | 3 | 1. kbld does not actually rely on Docker. At most it uses the credentials from Docker to reach your registries, but even those credentials can be provided by environment variables. 4 | 2. `kp import` does not rely on kpack to do the image importing. `kp` itself does the work. TBS installs that use internal Git repos and internal image registries do NOT need to set any proxy values. 5 | 3. You can put both the registry secret and the git secret in the same service account and it works fine. Make two lists, instead of adding to the existing list. 6 | 4. You need to provide the Java buildpack with a maven settings.xml in order for it to use the on-prem Nexus. -------------------------------------------------------------------------------- /tbs/install-tbs-dependencies.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kp import -f descriptor-100.0.87.yaml -------------------------------------------------------------------------------- /tbs/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | if [ -d "tbs-install" ]; then 6 | rm -rf tbs-install && mkdir tbs-install 7 | fi 8 | 9 | if [ ! -d "tbs-install" ]; then 10 | mkdir tbs-install 11 | fi 12 | 13 | 14 | 15 | tar -xvf build-service-1.1.4.tar -C tbs-install 16 | 17 | 18 | kbld relocate \ 19 | -f ./tbs-install/images.lock \ 20 | --lock-output ./tbs-install/images-relocated.lock \ 21 | --repository "$HARBOR_DOMAIN/library/build-service" 22 | 23 | 24 | kapp deploy -a tanzu-build-service \ 25 | -f <(ytt -f ./tbs-install/values.yaml \ 26 | -f ./tbs-install/manifests/ \ 27 | -f "$(mkcert -CAROOT)"/rootCA.pem \ 28 | -v docker_repository="$HARBOR_DOMAIN/library/build-service" \ 29 | -v docker_username="$HARBOR_USERNAME" \ 30 | -v docker_password="$HARBOR_PASSWORD" \ 31 | | kbld -f ./tbs-install/images-relocated.lock -f-) -------------------------------------------------------------------------------- /tbs/java-buildpack/configmap.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: settings-binding-metadata 5 | namespace: images 6 | data: 7 | kind: maven -------------------------------------------------------------------------------- /tbs/java-buildpack/image.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kpack.io/v1alpha1 2 | kind: Image 3 | metadata: 4 | name: sample-binding-with-secret 5 | spec: 6 | tag: my-registry.com/repo 7 | builder: 8 | kind: ClusterBuilder 9 | name: default 10 | source: 11 | git: 12 | url: https://github.com/buildpack/sample-java-app.git 13 | revision: 0eccc6c2f01d9f055087ebbf03526ed0623e014a 14 | build: 15 | bindings: 16 | - name: settings 17 | secretRef: 18 | name: settings-xml 19 | metadataRef: 20 | name: settings-binding-metadata -------------------------------------------------------------------------------- /tbs/java-buildpack/secret.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: settings-xml 5 | namespace: images 6 | type: Opaque 7 | stringData: 8 | settings.xml: | 9 | 11 | 12 | 13 | 14 | nexus 15 | nexus 16 | https://nexus.whatever 17 | * 18 | 19 | 20 | 21 | 22 | 23 | nexus 24 | my_login 25 | my_password 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /tbs/notary.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | NOTARY_SERVER_URL="https://notary.lab.home:4443" 6 | IMAGE_REPOSITORY="harbor.lab.home/library/demo-app" 7 | 8 | notary -s "${NOTARY_SERVER_URL}" init "${IMAGE_REPOSITORY}" 9 | notary -s "${NOTARY_SERVER_URL}" key rotate "${IMAGE_REPOSITORY}" snapshot -r 10 | notary -s "${NOTARY_SERVER_URL}" publish "${IMAGE_REPOSITORY}" -------------------------------------------------------------------------------- /tbs/tbs-update-stack-100.0.30.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kp clusterstack update base \ 6 | --build-image=registry.pivotal.io/tbs-dependencies/build-base:202010201325 \ 7 | --run-image=registry.pivotal.io/tbs-dependencies/run-base:202010201325 -------------------------------------------------------------------------------- /tbs/tbs-update-stack-100.0.66.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kp clusterstack update base \ 6 | --build-image=registry.pivotal.io/tanzu-base-bionic-stack/build@sha256:3230de1daa9b37ddc5f38ea6f9a208e2f911d4e187baa3fa3c9f0630dbb018bb \ 7 | --run-image=registry.pivotal.io/tanzu-base-bionic-stack/run@sha256:70accedc26b4d9230ce6ae4b1e55a0588bb14447e074d8fc65bd3ced4211630b -------------------------------------------------------------------------------- /tds/gemfire/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | 6 | helm upgrade --install gemfire-operator gemfire-operator-1.0.0.tgz \ 7 | --set controllerImage="$HARBOR_DOMAIN/library/gemfire-controller:1.0.0" \ 8 | --namespace gemfire-system \ 9 | --create-namespace \ 10 | --wait -------------------------------------------------------------------------------- /tds/gemfire/relocate.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | docker pull registry.pivotal.io/tanzu-gemfire-for-kubernetes/gemfire-controller:1.0.0 6 | docker tag registry.pivotal.io/tanzu-gemfire-for-kubernetes/gemfire-controller:1.0.0 $HARBOR_DOMAIN/library/gemfire-controller:1.0.0 7 | docker push $HARBOR_DOMAIN/library/gemfire-controller:1.0.0 8 | 9 | 10 | docker pull registry.pivotal.io/tanzu-gemfire-for-kubernetes/gemfire-k8s:1.0.0 11 | docker tag registry.pivotal.io/tanzu-gemfire-for-kubernetes/gemfire-k8s:1.0.0 $HARBOR_DOMAIN/library/gemfire-k8s:1.0.0 12 | docker push $HARBOR_DOMAIN/library/gemfire-k8s:1.0.0 -------------------------------------------------------------------------------- /tds/mysql/relocate.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | docker pull registry.pivotal.io/tanzu-mysql-for-kubernetes/tanzu-mysql-instance:0.2.0 6 | docker tag registry.pivotal.io/tanzu-mysql-for-kubernetes/tanzu-mysql-instance:0.2.0 $HARBOR_DOMAIN/library/tanzu-mysql-instance:0.2.0 7 | docker push $HARBOR_DOMAIN/library/tanzu-mysql-instance:0.2.0 8 | 9 | docker pull registry.pivotal.io/tanzu-mysql-for-kubernetes/tanzu-mysql-operator:0.2.0 10 | docker tag registry.pivotal.io/tanzu-mysql-for-kubernetes/tanzu-mysql-operator:0.2.0 $HARBOR_DOMAIN/library/tanzu-mysql-operator:0.2.0 11 | docker push $HARBOR_DOMAIN/library/tanzu-mysql-operator:0.2.0 -------------------------------------------------------------------------------- /tds/postgres/README: -------------------------------------------------------------------------------- 1 | 1.0.0 - 2 | For some reason the templates in the operator Helm chart are hardcoded to deploy to default namespace. Don't bother with 3 | --create-namespace or --namespace -------------------------------------------------------------------------------- /tds/postgres/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | POSTGRES_DIR="postgres-for-kubernetes-v1.1.0" 6 | 7 | if [ -d $POSTGRES_DIR ]; then 8 | echo "Deleting $POSTGRES_DIR" 9 | rm -rf $POSTGRES_DIR 10 | fi 11 | 12 | tar -xvf postgres-for-kubernetes-v1.1.0.tar.gz 13 | 14 | docker load -i $POSTGRES_DIR/images/postgres-instance 15 | docker load -i $POSTGRES_DIR/images/postgres-operator 16 | 17 | docker tag postgres-instance:v1.1.0 $HARBOR_DOMAIN/library/postgres-instance:v1.1.0 18 | docker tag postgres-operator:v1.1.0 $HARBOR_DOMAIN/library/postgres-operator:v1.1.0 19 | 20 | docker push $HARBOR_DOMAIN/library/postgres-instance:v1.1.0 21 | docker push $HARBOR_DOMAIN/library/postgres-operator:v1.1.0 22 | 23 | kapp deploy -a postgres-operator \ 24 | -f <(kubectl create secret docker-registry postgres-harbor \ 25 | --docker-server="$HARBOR_DOMAIN" \ 26 | --docker-username="$HARBOR_USERNAME" \ 27 | --docker-password="$HARBOR_PASSWORD" \ 28 | --dry-run=client \ 29 | -o yaml) 30 | 31 | helm upgrade --install postgres-operator ./$POSTGRES_DIR/operator \ 32 | --wait \ 33 | --set operatorImageRepository="$HARBOR_DOMAIN/library/postgres-operator" \ 34 | --set postgresImageRepository="$HARBOR_DOMAIN/library/postgres-instance" \ 35 | --set dockerRegistrySecretName="postgres-harbor" -------------------------------------------------------------------------------- /tds/rabbitmq/README.md: -------------------------------------------------------------------------------- 1 | Operator image is located in Tanzu Registry as well as in the tar file that contains the deployment manifests 2 | 3 | I'm going to use the image in tar file for now since I have to download it anyway -------------------------------------------------------------------------------- /tds/rabbitmq/cluster.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: v1 4 | kind: Namespace 5 | metadata: 6 | name: rabbitmq-cluster 7 | --- 8 | apiVersion: rabbitmq.com/v1beta1 9 | kind: RabbitmqCluster 10 | metadata: 11 | name: rabbitmqcluster 12 | namespace: rabbitmq-cluster 13 | spec: 14 | image: #@ data.values.harbor_domain + "/library/vmware-tanzu-rabbitmq:2020.12" -------------------------------------------------------------------------------- /tds/rabbitmq/install-cluster.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a rabbitmq-cluster \ 6 | -f <(ytt --data-values-env=YTT_HOMELAB \ 7 | -f cluster.yml \ 8 | -f values.yml) -------------------------------------------------------------------------------- /tds/rabbitmq/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | FOLDER="operator-install" 6 | 7 | if [ -d "$FOLDER" ]; then 8 | rm -rf "$FOLDER" && mkdir "$FOLDER" 9 | fi 10 | 11 | if [ ! -d "$FOLDER" ]; then 12 | mkdir "$FOLDER" 13 | fi 14 | 15 | 16 | 17 | tar -xvf tanzu-rabbitMQ-for-kubernetes-1.0.0.tar -C "$FOLDER" 18 | 19 | # relocate Tanzu RabbitMQ image 20 | docker pull registry.pivotal.io/rabbitmq/vmware-tanzu-rabbitmq:2020.12 21 | docker tag registry.pivotal.io/rabbitmq/vmware-tanzu-rabbitmq:2020.12 $HARBOR_DOMAIN/library/vmware-tanzu-rabbitmq:2020.12 22 | docker push harbor.lab.home/library/vmware-tanzu-rabbitmq:2020.12 23 | 24 | # using the image from the tar file for now 25 | # relocate Tanzu RabbitMQ operator image 26 | # docker pull registry.pivotal.io/p-rabbitmq-for-kubernetes/cluster-operator:1.4.0 27 | # docker tag registry.pivotal.io/p-rabbitmq-for-kubernetes/cluster-operator:1.4.0 $HARBOR_DOMAIN/library/cluster-operator:1.4.0 28 | # docker push $HARBOR_DOMAIN/library/cluster-operator:1.4.0 29 | 30 | # import the Tanzu operator image 31 | docker import "$FOLDER"/release-artifacts/images/cluster-operator.tar $HARBOR_DOMAIN/library/cluster-operator:1.4.0 32 | docker push harbor.lab.home/library/cluster-operator:1.4.0 33 | 34 | # Use the Tanzu operator image instead of open-source 35 | sd \ 36 | "image: rabbitmqoperator/cluster-operator:1.4.0" \ 37 | "image: harbor.lab.home/library/cluster-operator:1.4.0" \ 38 | "$FOLDER"/release-artifacts/manifests/cluster-operator.yml 39 | 40 | # install 41 | kapp deploy \ 42 | -a tanzu-rabbitmq-operator \ 43 | -f $FOLDER/release-artifacts/manifests/cluster-operator.yml 44 | -------------------------------------------------------------------------------- /tds/rabbitmq/values.yml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /tkgi/create_user.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | uaa-cli target https://$TKGI_HOSTNAME:8443 6 | uaa-cli get-client-credentials-token admin -s $TKGI_ADMIN_CLIENT_SECRET 7 | 8 | uaa-cli create-user tanzu-gitops \ 9 | --email tanzu-gitops@notreal.com \ 10 | --password tanzu-gitops 11 | 12 | uaa-cli create-user app-developer \ 13 | --email app-developer@notreal.com \ 14 | --password app-developer 15 | 16 | uaa-cli add-member pks.clusters.manage tanzu-gitops -------------------------------------------------------------------------------- /tkgi/pull-configs.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | om \ 6 | staged-config \ 7 | --product-name pivotal-container-service \ 8 | --include-credentials > tkgi.yml 9 | 10 | 11 | om \ 12 | staged-director-config --no-redact > bosh.yml 13 | 14 | -------------------------------------------------------------------------------- /tkgi/push-configs.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | om \ 6 | configure-director --config bosh.yml 7 | 8 | om \ 9 | configure-product --config tkgi.yml 10 | -------------------------------------------------------------------------------- /tkgi/readme.md: -------------------------------------------------------------------------------- 1 | Make sure you disable 'Upgrade All clusters' errand 2 | 3 | TKGI clusters expect two worker nodes otherwise the default Pods in kube-system can't deploy -------------------------------------------------------------------------------- /tkgi/service-topology/profile.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "enable-service-topology", 3 | "description": "Enable Service Topology", 4 | "experimental_customizations": [ 5 | { 6 | "component": "kubelet", 7 | "arguments": { 8 | "feature-gates": "ServiceTopology=true,EndpointSlice=true" 9 | } 10 | }, 11 | { 12 | "component": "kube-apiserver", 13 | "arguments": { 14 | "feature-gates": "ServiceTopology=true,EndpointSlice=true" 15 | } 16 | }, 17 | { 18 | "component": "kube-controller-manager", 19 | "arguments": { 20 | "feature-gates": "ServiceTopology=true,EndpointSlice=true" 21 | } 22 | }, 23 | { 24 | "component": "kube-proxy", 25 | "arguments": { 26 | "feature-gates": "ServiceTopology=true,EndpointSlice=true" 27 | } 28 | }, 29 | { 30 | "component": "kube-scheduler", 31 | "arguments": { 32 | "feature-gates": "ServiceTopology=true,EndpointSlice=true" 33 | } 34 | } 35 | ] 36 | } -------------------------------------------------------------------------------- /tkgs/add-user.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kubectl apply -f app-developer.yml -f psp-disable.yml -------------------------------------------------------------------------------- /tkgs/app-developer.yml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-cluster-user-app-developer 5 | roleRef: 6 | kind: ClusterRole 7 | name: cluster-admin 8 | apiGroup: rbac.authorization.k8s.io 9 | subjects: 10 | - kind: User 11 | name: sso:app-developer@vsphere.local 12 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /tkgs/create-cluster.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | NAME=$1 6 | 7 | # best-effort-medium is 2/8 8 | # best-effort-xlarge is 4/32 9 | 10 | tmc cluster create \ 11 | --template tkgs \ 12 | --allowed-storage-classes vsan-default-storage-policy \ 13 | --storage-class vsan-default-storage-policy \ 14 | --default-storage-class vsan-default-storage-policy \ 15 | --version v1.19.7+vmware.1-tkg.1.fc82c41 \ 16 | --cluster-group jmusselwhite \ 17 | --instance-type best-effort-medium \ 18 | --worker-instance-type best-effort-xlarge \ 19 | --worker-node-count 3 \ 20 | --name $NAME -------------------------------------------------------------------------------- /tkgs/psp-disable.yml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: psp:authenticated 5 | roleRef: 6 | kind: ClusterRole 7 | name: psp:vmware-system-privileged 8 | apiGroup: rbac.authorization.k8s.io 9 | subjects: 10 | - kind: Group 11 | name: system:authenticated 12 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /tkgs/register-management-cluster.yml.template: -------------------------------------------------------------------------------- 1 | apiVersion: installers.tmc.cloud.vmware.com/v1alpha1 2 | kind: AgentInstall 3 | metadata: 4 | name: tmc-agent-installer-config 5 | namespace: 6 | spec: 7 | operation: INSTALL 8 | registrationLink: 9 | -------------------------------------------------------------------------------- /tkgs/register.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | NAMESPACE=$1 6 | URL=$2 7 | FILE=register-management-cluster.yml 8 | 9 | cp $FILE.template $FILE 10 | 11 | sd "" $NAMESPACE $FILE 12 | sd "" $URL $FILE 13 | 14 | kapp deploy -a tsm-management-registration -f $FILE -------------------------------------------------------------------------------- /tkgs/varlib-patchfile.yaml: -------------------------------------------------------------------------------- 1 | spec: 2 | topology: 3 | workers: 4 | volumes: 5 | - name: "varlib" 6 | mountPath: "/var/lib" 7 | capacity: 8 | storage: "50Gi" -------------------------------------------------------------------------------- /tmc/policydemo/mynodeport.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mynodeport 5 | spec: 6 | type: NodePort 7 | ports: 8 | - name: someport 9 | port: 10000 -------------------------------------------------------------------------------- /tmc/policydemo/privileged.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: privileged 5 | namespace: demo 6 | spec: 7 | containers: 8 | - name: web 9 | image: nginx 10 | securityContext: 11 | privileged: true 12 | resources: 13 | requests: 14 | cpu: 1000m 15 | memory: 2Gi 16 | limits: 17 | cpu: 1000m 18 | memory: 2Gi -------------------------------------------------------------------------------- /tmc/policydemo/quota.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: quota 5 | namespace: demo 6 | spec: 7 | containers: 8 | - name: web 9 | image: nginx 10 | resources: 11 | requests: 12 | cpu: 3000m 13 | memory: 12Gi 14 | limits: 15 | cpu: 3000m 16 | memory: 12Gi -------------------------------------------------------------------------------- /tmc/roles/apps-user.yaml: -------------------------------------------------------------------------------- 1 | type: 2 | kind: Role 3 | package: vmware.tanzu.manage.v1alpha1.iam.role 4 | version: v1alpha1 5 | fullName: 6 | name: apps-user 7 | meta: 8 | description: user can use basic deployment primitives as well as Argo deployment resources 9 | spec: 10 | isDeprecated: false 11 | resources: 12 | - NAMESPACE 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - pods 18 | - pods/log 19 | - secrets 20 | - deployments 21 | - configmaps 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | - create 27 | - update 28 | - delete 29 | - patch 30 | - apiGroups: 31 | - "argoproj.io" 32 | resources: 33 | - analysisruns 34 | - analysistemplates 35 | - clusteranalysistemplates 36 | - experiments 37 | - rollouts 38 | verbs: 39 | - get 40 | - list 41 | - watch 42 | - create 43 | - update 44 | - delete 45 | - patch -------------------------------------------------------------------------------- /tmc/roles/databases-user.yaml: -------------------------------------------------------------------------------- 1 | type: 2 | kind: Role 3 | package: vmware.tanzu.manage.v1alpha1.iam.role 4 | version: v1alpha1 5 | fullName: 6 | name: databases-user 7 | meta: 8 | description: can create an instance of any supported operator 9 | spec: 10 | isDeprecated: false 11 | resources: 12 | - NAMESPACE 13 | rules: 14 | - apiGroups: # postgres 15 | - sql.tanzu.vmware.com 16 | resources: 17 | - postgres 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - create 23 | - update 24 | - patch 25 | - delete 26 | - apiGroups: # gemfire 27 | - gemfire.tanzu.vmware.com 28 | resources: 29 | - gemfireclusters 30 | verbs: 31 | - get 32 | - list 33 | - watch 34 | - create 35 | - update 36 | - patch 37 | - delete 38 | - apiGroups: # pod logs 39 | - "" 40 | resources: 41 | - pods 42 | - pods/log 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - "" 49 | resources: 50 | - configmaps 51 | verbs: 52 | - get 53 | - list 54 | - create 55 | - update 56 | - delete 57 | - patch 58 | - watch -------------------------------------------------------------------------------- /tmc/roles/tbs-user.yaml: -------------------------------------------------------------------------------- 1 | type: 2 | kind: Role 3 | package: vmware.tanzu.manage.v1alpha1.iam.role 4 | version: v1alpha1 5 | fullName: 6 | name: tbs-user 7 | meta: 8 | description: tbs user 9 | spec: 10 | isDeprecated: false 11 | resources: 12 | - NAMESPACE 13 | rules: 14 | - apiGroups: 15 | - kpack.io 16 | resources: 17 | - images 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - create 23 | - update 24 | - patch 25 | - delete 26 | - apiGroups: 27 | - "" 28 | resources: 29 | - pods 30 | - pods/log 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiGroups: 36 | - "" 37 | resources: 38 | - secrets 39 | verbs: 40 | - create 41 | - update 42 | - patch 43 | - delete 44 | - get 45 | - apiGroups: 46 | - kpack.io 47 | resources: 48 | - builds 49 | verbs: 50 | - get 51 | - list 52 | - watch 53 | - update 54 | - patch 55 | - apiGroups: 56 | - "" 57 | resources: 58 | - serviceaccounts 59 | verbs: 60 | - get 61 | - update 62 | - patch 63 | - create 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - configmaps 68 | verbs: 69 | - get 70 | - list 71 | - create 72 | - update 73 | - delete 74 | - patch 75 | - watch -------------------------------------------------------------------------------- /tmc/tmc-attach-cluster.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | CLUSTER_NAME=$1 6 | 7 | tmc cluster attach \ 8 | --cluster-group $TMC_CLUSTER_GROUP_NAME \ 9 | --name $TMC_CLUSTER_GROUP_NAME-$CLUSTER_NAME 10 | 11 | #--http-proxy-url "http://192.168.1.52:8888" 12 | 13 | kapp deploy -a tmc -f k8s-attach-manifest.yaml 14 | -------------------------------------------------------------------------------- /tobs/README.md: -------------------------------------------------------------------------------- 1 | 1. Use the `collector` folder to install the Wavefront Collector with custom HTTP proxy settings 2 | 2. Use the `prometheus` folder to install Prometheus and the Wavefront Prometheus storage adapter 3 | 4 | The `collector` folder is configured OOTB to gather K8s metrics from the hosts, while the `prometheus` folder is only setup for Pods, but you can more easily query those metrics before they reach Wavefront using the `promtheus` folder -------------------------------------------------------------------------------- /tobs/collector/cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: collector-config 5 | namespace: tanzu-observability 6 | data: 7 | collector.yaml: | 8 | clusterName: cluster1 9 | enableEvents: false 10 | enableDiscovery: true 11 | flushInterval: 30s 12 | 13 | sinks: 14 | - proxyAddress: wavefront-proxy.wavefront-collector.svc.cluster.local:2878 15 | testMode: false 16 | filters: 17 | # Filter out generated labels 18 | tagExclude: 19 | - 'label?controller?revision*' 20 | - 'label?pod?template*' 21 | - 'annotation_kubectl_kubernetes_io_last_applied_configuration' 22 | 23 | sources: 24 | kubernetes_source: 25 | url: 'https://kubernetes.default.svc' 26 | kubeletPort: 10250 27 | kubeletHttps: true 28 | useServiceAccount: true 29 | insecure: true 30 | prefix: 'kubernetes.' 31 | 32 | filters: 33 | metricDenyList: 34 | - 'kubernetes.sys_container.*' 35 | 36 | internal_stats_source: 37 | prefix: 'kubernetes.' 38 | 39 | kubernetes_state_source: 40 | prefix: 'kubernetes.' 41 | 42 | telegraf_sources: 43 | # enable all telegraf plugins 44 | - plugins: [] 45 | 46 | # discovery rules for auto-discovery of pods and services 47 | discovery: 48 | enable_runtime_plugins: true 49 | 50 | plugins: 51 | 52 | ########################################################################## 53 | # Kubernetes component auto discovery 54 | ########################################################################## 55 | 56 | # auto-discover kube DNS 57 | - name: kube-dns-discovery 58 | type: prometheus 59 | selectors: 60 | images: 61 | - '*kube-dns/sidecar*' 62 | labels: 63 | k8s-app: 64 | - kube-dns 65 | port: 10054 66 | path: /metrics 67 | scheme: http 68 | prefix: kube.dns. 69 | filters: 70 | metricAllowList: 71 | - 'kube.dns.http.request.duration.microseconds' 72 | - 'kube.dns.http.request.size.bytes' 73 | - 'kube.dns.http.requests.total.counter' 74 | - 'kube.dns.http.response.size.bytes' 75 | - 'kube.dns.kubedns.dnsmasq.*' 76 | - 'kube.dns.process.*' 77 | 78 | # auto-discover coredns 79 | - name: coredns-discovery 80 | type: prometheus 81 | selectors: 82 | images: 83 | - '*coredns:*' 84 | labels: 85 | k8s-app: 86 | - kube-dns 87 | port: 9153 88 | path: /metrics 89 | scheme: http 90 | prefix: kube.coredns. 91 | filters: 92 | metricAllowList: 93 | - 'kube.coredns.coredns.cache.*' 94 | - 'kube.coredns.coredns.dns.request.count.total.counter' 95 | - 'kube.coredns.coredns.dns.request.duration.seconds' 96 | - 'kube.coredns.coredns.dns.request.size.bytes' 97 | - 'kube.coredns.coredns.dns.request.type.count.total.counter' 98 | - 'kube.coredns.coredns.dns.response.rcode.count.total.counter' 99 | - 'kube.coredns.coredns.dns.response.size.bytes' 100 | - 'kube.coredns.process.*' 101 | -------------------------------------------------------------------------------- /tobs/collector/ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: wavefront-collector 5 | namespace: tanzu-observability 6 | labels: 7 | k8s-app: wavefront-collector 8 | name: wavefront-collector 9 | spec: 10 | selector: 11 | matchLabels: 12 | k8s-app: wavefront-collector 13 | template: 14 | metadata: 15 | labels: 16 | k8s-app: wavefront-collector 17 | spec: 18 | tolerations: 19 | - effect: NoSchedule 20 | key: node.alpha.kubernetes.io/role 21 | operator: Exists 22 | - effect: NoSchedule 23 | key: node-role.kubernetes.io/master 24 | operator: Exists 25 | 26 | serviceAccountName: wavefront-collector 27 | 28 | containers: 29 | - name: wavefront-collector 30 | image: wavefronthq/wavefront-kubernetes-collector:1.2.6 31 | imagePullPolicy: IfNotPresent 32 | command: 33 | - /wavefront-collector 34 | - --daemon=true 35 | - --config-file=/etc/collector/collector.yaml 36 | volumeMounts: 37 | - name: procfs 38 | mountPath: /host/proc 39 | readOnly: true 40 | - mountPath: /etc/collector/ 41 | name: collector-config 42 | readOnly: true 43 | env: 44 | - name: HOST_PROC 45 | value: /host/proc 46 | - name: POD_NODE_NAME 47 | valueFrom: 48 | fieldRef: 49 | apiVersion: v1 50 | fieldPath: spec.nodeName 51 | - name: POD_NAMESPACE_NAME 52 | valueFrom: 53 | fieldRef: 54 | apiVersion: v1 55 | fieldPath: metadata.namespace 56 | volumes: 57 | - name: procfs 58 | hostPath: 59 | path: /proc 60 | - name: collector-config 61 | configMap: 62 | name: collector-config 63 | -------------------------------------------------------------------------------- /tobs/collector/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | annotations: 5 | rbac.authorization.kubernetes.io/autoupdate: "true" 6 | creationTimestamp: null 7 | labels: 8 | kubernetes.io/bootstrapping: rbac-defaults 9 | name: wavefront-collector 10 | rules: 11 | - apiGroups: 12 | - "" 13 | resources: 14 | - events 15 | - namespaces 16 | - nodes 17 | - nodes/stats 18 | - pods 19 | - replicationcontrollers 20 | - services 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - configmaps 29 | verbs: 30 | - get 31 | - update 32 | - create 33 | - list 34 | - watch 35 | 36 | # required for kubernetes_state_source 37 | - apiGroups: 38 | - apps 39 | resources: 40 | - daemonsets 41 | - deployments 42 | - statefulsets 43 | - replicasets 44 | verbs: 45 | - get 46 | - list 47 | - watch 48 | - apiGroups: 49 | - batch 50 | resources: 51 | - jobs 52 | - cronjobs 53 | verbs: 54 | - get 55 | - list 56 | - watch 57 | - apiGroups: 58 | - autoscaling 59 | resources: 60 | - horizontalpodautoscalers 61 | verbs: 62 | - get 63 | - list 64 | - watch 65 | 66 | - nonResourceURLs: ["/metrics"] 67 | verbs: 68 | - get 69 | -------------------------------------------------------------------------------- /tobs/collector/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: wavefront-collector 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: wavefront-collector 9 | subjects: 10 | - kind: ServiceAccount 11 | name: wavefront-collector 12 | namespace: tanzu-observability 13 | -------------------------------------------------------------------------------- /tobs/collector/sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: wavefront-collector 5 | namespace: tanzu-observability 6 | -------------------------------------------------------------------------------- /tobs/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a tanzu-observability \ 6 | -f namespace.yml \ 7 | -f <(ytt --data-values-env=YTT_WAVEFRONT \ 8 | -f proxy) \ 9 | -f <(ytt --data-values-env=YTT_HOMELAB \ 10 | -f prometheus) -------------------------------------------------------------------------------- /tobs/namespace.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: tanzu-observability -------------------------------------------------------------------------------- /tobs/prometheus/adapter.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | name: prometheus-storage-adapter 5 | namespace: tanzu-observability 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: prometheus-storage-adapter 11 | template: 12 | metadata: 13 | labels: 14 | app: prometheus-storage-adapter 15 | spec: 16 | containers: 17 | - name: prometheus-storage-adapter 18 | image: wavefronthq/prometheus-storage-adapter:latest 19 | command: 20 | - /bin/adapter 21 | - -listen=1234 22 | - -proxy=wavefront-proxy 23 | - -proxy-port=2878 24 | - -prefix=tanzu-gitops 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: storage-adapter-service 30 | namespace: tanzu-observability 31 | spec: 32 | selector: 33 | app: prometheus-storage-adapter 34 | ports: 35 | - name: adapter-port 36 | protocol: TCP 37 | port: 80 38 | targetPort: 1234 39 | -------------------------------------------------------------------------------- /tobs/prometheus/clusterrole.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus 5 | rules: 6 | - apiGroups: [""] 7 | resources: 8 | - pods 9 | verbs: ["get", "list", "watch"] -------------------------------------------------------------------------------- /tobs/prometheus/clusterrolebinding.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus 12 | namespace: tanzu-observability -------------------------------------------------------------------------------- /tobs/prometheus/config.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: prometheus 5 | namespace: tanzu-observability 6 | data: 7 | prometheus.yml: | 8 | global: 9 | scrape_interval: 15s 10 | remote_write: 11 | - url: "http://storage-adapter-service/receive" 12 | scrape_configs: 13 | - job_name: 'kubernetes-pods' 14 | 15 | kubernetes_sd_configs: 16 | - role: pod 17 | 18 | relabel_configs: 19 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] 20 | action: keep 21 | regex: true 22 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] 23 | action: replace 24 | target_label: __metrics_path__ 25 | regex: (.+) 26 | - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] 27 | action: replace 28 | regex: ([^:]+)(?::\d+)?;(\d+) 29 | replacement: $1:$2 30 | target_label: __address__ 31 | - action: labelmap 32 | regex: __meta_kubernetes_pod_label_(.+) 33 | - source_labels: [__meta_kubernetes_namespace] 34 | action: replace 35 | target_label: kubernetes_namespace 36 | - source_labels: [__meta_kubernetes_pod_name] 37 | action: replace 38 | target_label: kubernetes_pod_name 39 | 40 | 41 | -------------------------------------------------------------------------------- /tobs/prometheus/deployment.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus 5 | namespace: tanzu-observability 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: prometheus 11 | template: 12 | metadata: 13 | labels: 14 | app: prometheus 15 | spec: 16 | serviceAccountName: prometheus 17 | volumes: 18 | - name: prometheus-volume 19 | configMap: 20 | name: prometheus 21 | containers: 22 | - name: prometheus 23 | image: index.docker.io/prom/prometheus:v2.24.1 24 | command: ['prometheus'] 25 | args: ["--config.file", "/tmp/prometheus.yml"] 26 | volumeMounts: 27 | - name: prometheus-volume 28 | mountPath: /tmp/prometheus.yml 29 | subPath: prometheus.yml 30 | ports: 31 | - containerPort: 9090 32 | --- 33 | apiVersion: v1 34 | kind: Service 35 | metadata: 36 | name: prometheus 37 | namespace: tanzu-observability 38 | spec: 39 | type: ClusterIP 40 | selector: 41 | app: prometheus 42 | ports: 43 | - protocol: TCP 44 | port: 9090 45 | -------------------------------------------------------------------------------- /tobs/prometheus/sa.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus 5 | namespace: tanzu-observability -------------------------------------------------------------------------------- /tobs/prometheus/values.yml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | harbor_domain: "" 4 | apps_domain: "" 5 | primary_domain: "" -------------------------------------------------------------------------------- /tobs/prometheus/virtualservice.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: networking.istio.io/v1alpha3 4 | kind: VirtualService 5 | metadata: 6 | name: prometheus-virtual-service 7 | namespace: tanzu-observability 8 | spec: 9 | hosts: 10 | - #@ "prometheus." + data.values.apps_domain 11 | gateways: 12 | - app-gateway.istio-system.svc.cluster.local 13 | http: 14 | - match: 15 | - uri: 16 | prefix: / 17 | route: 18 | - destination: 19 | host: prometheus 20 | port: 21 | number: 9090 -------------------------------------------------------------------------------- /tobs/proxy/proxy.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | labels: 7 | app: wavefront-proxy 8 | name: wavefront-proxy 9 | name: wavefront-proxy 10 | namespace: tanzu-observability 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app: wavefront-proxy 16 | template: 17 | metadata: 18 | labels: 19 | app: wavefront-proxy 20 | spec: 21 | containers: 22 | - name: wavefront-proxy 23 | image: wavefronthq/proxy:9.1 24 | imagePullPolicy: IfNotPresent 25 | env: 26 | - name: WAVEFRONT_URL 27 | value: #@ data.values.wavefront_url 28 | - name: WAVEFRONT_TOKEN 29 | value: #@ data.values.wavefront_token 30 | - name: WAVEFRONT_PROXY_ARGS 31 | value: "--proxyHost 192.168.1.52 --proxyPort 8888" 32 | ports: 33 | - containerPort: 2878 34 | protocol: TCP 35 | securityContext: 36 | privileged: false 37 | --- 38 | apiVersion: v1 39 | kind: Service 40 | metadata: 41 | name: wavefront-proxy 42 | labels: 43 | app: wavefront-proxy 44 | namespace: tanzu-observability 45 | spec: 46 | ports: 47 | - name: wavefront 48 | port: 2878 49 | protocol: TCP 50 | selector: 51 | app: wavefront-proxy 52 | -------------------------------------------------------------------------------- /tobs/proxy/values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | wavefront_url: "" 4 | wavefront_token: "" -------------------------------------------------------------------------------- /tsm/certificate.yml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1alpha2 2 | kind: Certificate 3 | metadata: 4 | name: wildcard-tls 5 | namespace: istio-system 6 | spec: 7 | dnsNames: 8 | - "*.apps.lab.home" 9 | - "*.databases.lab.home" 10 | issuerRef: 11 | kind: ClusterIssuer 12 | name: mkcert 13 | secretName: wildcard-tls -------------------------------------------------------------------------------- /tsm/gateway.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: networking.istio.io/v1alpha3 4 | kind: Gateway 5 | metadata: 6 | name: app-gateway 7 | namespace: istio-system 8 | spec: 9 | selector: 10 | istio: ingressgateway 11 | servers: 12 | - port: 13 | number: 443 14 | name: https 15 | protocol: HTTPS 16 | tls: 17 | mode: SIMPLE 18 | credentialName: wildcard-tls 19 | hosts: 20 | - "*.apps.lab.home" 21 | - "*.databases.lab.home" -------------------------------------------------------------------------------- /tsm/install.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | kapp deploy -a istio-tls -f certificate.yml -f gateway.yml -------------------------------------------------------------------------------- /velero/README.md: -------------------------------------------------------------------------------- 1 | # Velero 2 | 3 | `install.sh` 4 | 5 | Follow the commented out instructions to fix 6 | 7 | ### MinIO 8 | MinIO is installed on K8s at https://minio.lab.home 9 | 10 | ### Update `mc` client 11 | `mc update` 12 | 13 | ### Configure MinIO server 14 | `mc alias set tanzu-minio https://minio.lab.home accesskey secretkey` 15 | 16 | ### List buckets 17 | `mc ls tanzu-minio` 18 | 19 | ### Make bucket 20 | `mc mb tanzu-minio ` -------------------------------------------------------------------------------- /velero/create-backup.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | echo 'BACKUP_NUMBER=$1' 6 | 7 | BACKUP_NUMBER=$1 8 | 9 | velero backup create spring-petclinic-$BACKUP_NUMBER \ 10 | --include-namespaces=spring-petclinic -------------------------------------------------------------------------------- /velero/create-restore.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | echo 'BACKUP_NUMBER=$1' 6 | echo 'RESTORE_NUMBER=$2' 7 | 8 | BACKUP_NUMBER=$1 9 | RESTORE_NUMBER=$2 10 | 11 | 12 | 13 | velero restore create spring-petclinic-restore-$RESTORE_NUMBER \ 14 | --from-backup spring-petclinic-$BACKUP_NUMBER -------------------------------------------------------------------------------- /velero/credentials-velero: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id=accesskey 3 | aws_secret_access_key=secretkey -------------------------------------------------------------------------------- /velero/demo.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | source ../demo-magic.sh 4 | 5 | BACKUP_NUMBER=$1 6 | 7 | pe 'mc alias list' 8 | 9 | 10 | pe 'mc tree tanzu-minio' 11 | 12 | p 'velero install \ 13 | --provider aws \ 14 | --plugins velero/velero-plugin-for-aws:v1.0.0 \ 15 | --bucket velero \ 16 | --secret-file ./credentials-velero \ 17 | --use-volume-snapshots=false \ 18 | --backup-location-config region=minio,s3ForcePathStyle="true",s3Url=https://minio.lab.home,insecureSkipTLSVerify="true"' 19 | 20 | pe "velero backup create spring-petclinic-$BACKUP_NUMBER \ 21 | --include-namespaces=spring-petclinic \ 22 | --snapshot-volumes \ 23 | --volume-snapshot-locations vsl-vsphere" 24 | 25 | pe "watch velero backup describe spring-petclinic-$BACKUP_NUMBER" 26 | 27 | pe 'mc tree tanzu-minio' 28 | 29 | pe "mc ls tanzu-minio/velero/backups/spring-petclinic-$BACKUP_NUMBER" -------------------------------------------------------------------------------- /velero/install-csi.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | 6 | kapp deploy -a velero -f <(velero install \ 7 | --provider aws \ 8 | --plugins velero/velero-plugin-for-aws:v1.1.0,velero/velero-plugin-for-csi:v0.1.0 \ 9 | --bucket velero \ 10 | --secret-file ./credentials-velero \ 11 | --features=EnableCSI \ 12 | --use-volume-snapshots=true \ 13 | --backup-location-config region=minio,s3ForcePathStyle="true",s3Url=https://minio.lab.home \ 14 | --cacert "$(mkcert -CAROOT)/rootCA.pem" \ 15 | --dry-run \ 16 | -o yaml) 17 | 18 | #velero client config set features=EnableCSI -------------------------------------------------------------------------------- /velero/install-restic.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | 6 | kapp deploy -a velero -f <(velero install \ 7 | --provider aws \ 8 | --plugins velero/velero-plugin-for-aws:v1.1.0 \ 9 | --bucket velero \ 10 | --secret-file ./credentials-velero \ 11 | --use-volume-snapshots=false \ 12 | --use-restic \ 13 | --backup-location-config region=minio,s3ForcePathStyle="true",s3Url=https://minio.lab.home \ 14 | --cacert "$(mkcert -CAROOT)/rootCA.pem" \ 15 | --dry-run \ 16 | -o yaml) 17 | 18 | 19 | kubectl patch daemonset restic \ 20 | --namespace velero \ 21 | --type='json' \ 22 | --patch='[{"op": "replace", "path": "/spec/template/spec/volumes/0/hostPath/path", "value":"/var/vcap/data/kubelet/pods"}]' -------------------------------------------------------------------------------- /vsphere/lab-off.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # there are 3 stages of autostart 6 | # no autostart 7 | # unordered autostart 8 | # ordered autostart 9 | # the default for host.autostart.add is unordered which is fine 10 | 11 | # Note: turn off the hosts at the ESXi level 12 | # If you use vCenter you might turn off the host that is running vCenter 13 | 14 | 15 | function configure_autostart() { 16 | # I would use GOVC_URL but it doesnt work in the xargs statement 17 | ESXI_HOST=$1 18 | # using runtime.powerState to ignore stemcell VMs 19 | govc find -type m -u="$ESXI_HOST" . -runtime.powerState poweredOn | xargs -I {} govc host.autostart.add -u="$ESXI_HOST" "{}" 20 | govc host.autostart.info -u="$ESXI_HOST" 21 | } 22 | 23 | function shutdown_host() { 24 | ESXI_HOST=$1 25 | HOST_NAME=$2 26 | govc host.shutdown -u="$ESXI_HOST" -f=true "$HOST_NAME" 27 | } 28 | 29 | configure_autostart "https://top.rack.lab.home" 30 | configure_autostart "https://middle.rack.lab.home" 31 | configure_autostart "https://bottom.rack.lab.home" 32 | 33 | shutdown_host "https://top.rack.lab.home" "top" 34 | shutdown_host "https://middle.rack.lab.home" "middle" 35 | shutdown_host "https://bottom.rack.lab.home" "bottom" 36 | 37 | 38 | 39 | 40 | --------------------------------------------------------------------------------