├── .gitignore ├── assets ├── vision.png └── deploy-tap.png ├── carvel-package ├── manifests │ ├── namespace.yml │ ├── package-metadata.yml │ ├── package-install.yml │ ├── rbac.yml │ ├── package-0.3.0.yml │ ├── secret.yml │ ├── cm.yml │ ├── package-0.1.0.yml │ └── package-0.2.0.yml ├── dashboard-manifest │ ├── config │ │ ├── 00_dashboard-namespace.yaml │ │ ├── 08_dashboard-cluster-issuer.yaml │ │ ├── 10_dashboard-issuer.yaml │ │ ├── 04_dashboard-configmap.yaml │ │ ├── 01_dashboard-serviceaccount.yaml │ │ ├── 02_dashboard-service-scraper.yaml │ │ ├── 11_dashboard-admin-role.yaml │ │ ├── 02_dashboard-service.yaml │ │ ├── 09_dashboard-certificate.yaml │ │ ├── 03_dashboard-secret.yaml │ │ ├── 07_dashboard-ingress.yaml │ │ ├── 06_dashboard-deployment-scraper.yaml │ │ ├── 06_dashboard-deployment.yaml │ │ └── 05_dashboard-rbac.yaml │ └── values.yml ├── pkg-manifests │ ├── package-install.yml │ ├── rbac.yml │ ├── package-template.yml │ └── package-metadata.yml ├── gen_pkg_img.sh └── README.md ├── scripts ├── tools │ ├── rm-finalizer-ns.sh │ ├── delete_ns_resources.sh │ ├── inject_ca.sh │ └── gen-selfsigned-cert.sh ├── db │ ├── postgres-instance.yml │ ├── uninstall_postgresql.sh │ ├── install_postgresql.sh │ └── tds-postgres.sh └── tap.sh ├── supplychain ├── quarkus-sc │ ├── helpers.lib.yml │ ├── k8s │ │ ├── clusterstore.yml │ │ ├── clusterstack.yml │ │ └── clusterbuilder.yml │ ├── values.yaml │ ├── workload.yaml │ ├── templates │ │ ├── git-source-template.yaml │ │ ├── kpack-image-template.yaml │ │ └── app-deploy-template.yaml │ └── supply-chain.yaml └── tap-sc │ ├── basic-image-to-url.yml │ └── source-to-url.yml ├── runtimes ├── buildpacks │ ├── clusterstore.yml │ ├── clusterstack.yml │ └── clusterbuilder.yml ├── build │ └── kpack-image.yml ├── deploy │ └── quarkus-kapp.yml └── README.md ├── tanzu-spring-boot-demo ├── config │ ├── catalog.yaml │ └── workload.yaml └── Tiltfile ├── demo-scenario.md ├── export-secrets.md ├── demo.md ├── packages.md └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | 3 | _temp 4 | /scripts/test.sh 5 | -------------------------------------------------------------------------------- /assets/vision.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/halkyonio/tap/HEAD/assets/vision.png -------------------------------------------------------------------------------- /assets/deploy-tap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/halkyonio/tap/HEAD/assets/deploy-tap.png -------------------------------------------------------------------------------- /carvel-package/manifests/namespace.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: pkg-demo -------------------------------------------------------------------------------- /scripts/tools/rm-finalizer-ns.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | NS=$1 3 | kubectl get ns $NS -o json | jq '.spec.finalizers = []' | kubectl replace --raw "/api/v1/namespaces/$NS/finalize" -f - -------------------------------------------------------------------------------- /supplychain/quarkus-sc/helpers.lib.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | #@ def img(name): 4 | #@ return "/".join([ 5 | #@ data.values.image_prefix, 6 | #@ name 7 | #@ ]) 8 | #@ end -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/00_dashboard-namespace.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: v1 4 | kind: Namespace 5 | metadata: 6 | name: #@ data.values.namespace 7 | -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/08_dashboard-cluster-issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: selfsigned-issuer 5 | spec: 6 | selfSigned: {} -------------------------------------------------------------------------------- /runtimes/buildpacks/clusterstore.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kpack.io/v1alpha2 2 | kind: ClusterStore 3 | metadata: 4 | name: runtime 5 | spec: 6 | sources: 7 | - image: ghcr.io/halkyonio/quarkus-builder:jvm -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/10_dashboard-issuer.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: cert-manager.io/v1 4 | kind: Issuer 5 | metadata: 6 | name: my-ca-issuer 7 | namespace: #@ data.values.namespace 8 | spec: 9 | ca: 10 | secretName: k8s-ui-secret -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/04_dashboard-configmap.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | kind: ConfigMap 4 | apiVersion: v1 5 | metadata: 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | name: kubernetes-dashboard-settings 9 | namespace: #@ data.values.namespace 10 | -------------------------------------------------------------------------------- /supplychain/quarkus-sc/k8s/clusterstore.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | #@ load("helpers.lib.yml", "img") 3 | 4 | --- 5 | apiVersion: kpack.io/v1alpha2 6 | kind: ClusterStore 7 | metadata: 8 | name: runtime 9 | spec: 10 | sources: 11 | - image: #@ img("buildpacks-quarkus-builder:jvm") -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/01_dashboard-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | name: kubernetes-dashboard 9 | namespace: #@ data.values.namespace 10 | -------------------------------------------------------------------------------- /runtimes/buildpacks/clusterstack.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kpack.io/v1alpha2 2 | kind: ClusterStack 3 | metadata: 4 | name: runtime 5 | spec: 6 | id: "io.quarkus.buildpacks.stack.jvm" 7 | buildImage: 8 | image: "ghcr.io/halkyonio/quarkus-stack:run" 9 | runImage: 10 | image: "ghcr.io/halkyonio/quarkus-stack:build" -------------------------------------------------------------------------------- /carvel-package/manifests/package-metadata.yml: -------------------------------------------------------------------------------- 1 | apiVersion: data.packaging.carvel.dev/v1alpha1 2 | kind: PackageMetadata 3 | metadata: 4 | name: kubernetes-dashboard.halkyonio.io 5 | namespace: pkg-demo 6 | spec: 7 | displayName: "Kubernetes dashboard" 8 | longDescription: "Kubernetes dashboard" 9 | shortDescription: "Kubernetes dashboard" 10 | categories: 11 | - demo -------------------------------------------------------------------------------- /supplychain/quarkus-sc/k8s/clusterstack.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | #@ load("helpers.lib.yml", "img") 3 | 4 | --- 5 | apiVersion: kpack.io/v1alpha2 6 | kind: ClusterStack 7 | metadata: 8 | name: runtime 9 | spec: 10 | id: "io.quarkus.buildpacks.stack.jvm" 11 | buildImage: 12 | image: #@ img("buildpacks-quarkus-run:jvm") 13 | runImage: 14 | image: #@ img("buildpacks-quarkus-build:jvm") -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/02_dashboard-service-scraper.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | kind: Service 4 | apiVersion: v1 5 | metadata: 6 | labels: 7 | k8s-app: dashboard-metrics-scraper 8 | name: dashboard-metrics-scraper 9 | namespace: #@ data.values.namespace 10 | spec: 11 | ports: 12 | - port: 8000 13 | targetPort: 8000 14 | selector: 15 | k8s-app: dashboard-metrics-scraper -------------------------------------------------------------------------------- /carvel-package/pkg-manifests/package-install.yml: -------------------------------------------------------------------------------- 1 | apiVersion: packaging.carvel.dev/v1alpha1 2 | kind: PackageInstall 3 | metadata: 4 | name: kubernetes-dashboard 5 | namespace: pkg-demo 6 | spec: 7 | serviceAccountName: carvel-package 8 | packageRef: 9 | refName: kubernetes-dashboard.halkyonio.io 10 | versionSelection: 11 | constraints: 0.1.0 12 | values: 13 | - secretRef: 14 | name: k8s-ui-values -------------------------------------------------------------------------------- /runtimes/build/kpack-image.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kpack.io/v1alpha2 2 | kind: Image 3 | metadata: 4 | name: quarkus-petclinic-image 5 | namespace: tap-demo 6 | spec: 7 | tag: ghcr.io/halkyonio/quarkus-tap-petclinic 8 | serviceAccount: default 9 | builder: 10 | kind: ClusterBuilder 11 | name: runtime 12 | source: 13 | git: 14 | url: https://github.com/halkyonio/quarkus-tap-petclinic 15 | revision: main -------------------------------------------------------------------------------- /supplychain/quarkus-sc/values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | service_account_name: default 4 | namespace: tap-demo 5 | image_prefix: ghcr.io/halkyonio 6 | runtime: 7 | deliverable_type: quarkus 8 | kubernetes_io_partof: quarkus-java-web-app 9 | buildpack: 10 | kpack_cluster_builder: runtime 11 | workload: 12 | name: quarkus-app 13 | github: 14 | url: https://github.com/halkyonio/quarkus-tap-petclinic.git 15 | ref: main 16 | -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/11_dashboard-admin-role.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | name: dashboard-admin 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: cluster-admin 11 | subjects: 12 | - kind: ServiceAccount 13 | name: kubernetes-dashboard 14 | namespace: #@ data.values.namespace -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/02_dashboard-service.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | kind: Service 4 | apiVersion: v1 5 | metadata: 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | annotations: 9 | projectcontour.io/upstream-protocol.tls: "443" 10 | name: kubernetes-dashboard 11 | namespace: #@ data.values.namespace 12 | spec: 13 | ports: 14 | - port: 443 15 | targetPort: 8443 16 | selector: 17 | k8s-app: kubernetes-dashboard 18 | -------------------------------------------------------------------------------- /runtimes/buildpacks/clusterbuilder.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kpack.io/v1alpha2 2 | kind: ClusterBuilder 3 | metadata: 4 | name: runtime 5 | spec: 6 | order: 7 | - group: 8 | - id: "io.quarkus.buildpacks.buildpack" 9 | serviceAccountRef: 10 | name: kp-default-repository-serviceaccount 11 | namespace: kpack 12 | stack: 13 | name: runtime 14 | kind: ClusterStack 15 | store: 16 | name: runtime 17 | kind: ClusterStore 18 | tag: 'ghcr.io/halkyonio/quarkus-builder:jvm' -------------------------------------------------------------------------------- /tanzu-spring-boot-demo/config/catalog.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: backstage.io/v1alpha1 3 | kind: Component 4 | metadata: 5 | name: spring-tap-petclinic 6 | description: Spring Tap Petclinic App 7 | tags: 8 | - tanzu 9 | - java 10 | - spring-boot 11 | - web 12 | annotations: 13 | 'backstage.io/kubernetes-label-selector': 'app.kubernetes.io/part-of=spring-tap-petclinic' 14 | 'backstage.io/techdocs-ref': dir:. 15 | spec: 16 | type: service 17 | lifecycle: experimental 18 | owner: default-team 19 | system: spring-tap-petclinic -------------------------------------------------------------------------------- /carvel-package/manifests/package-install.yml: -------------------------------------------------------------------------------- 1 | apiVersion: packaging.carvel.dev/v1alpha1 2 | kind: PackageInstall 3 | metadata: 4 | name: kubernetes-dashboard 5 | namespace: pkg-demo 6 | spec: 7 | serviceAccountName: carvel-package 8 | packageRef: 9 | refName: kubernetes-dashboard.halkyonio.io 10 | versionSelection: 11 | constraints: 0.2.0 12 | # Values to be included in package's templating step 13 | # (currently only included in the first templating step) (optional) 14 | #values: 15 | # - secretRef: 16 | # name: dashboard-values -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/09_dashboard-certificate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | #@yaml/text-templated-strings 4 | --- 5 | apiVersion: cert-manager.io/v1 6 | kind: Certificate 7 | metadata: 8 | name: my-selfsigned-ca 9 | namespace: #@ data.values.namespace 10 | spec: 11 | isCA: true 12 | commonName: k8s-ui.(@= data.values.vm_ip @).nip.io 13 | secretName: k8s-ui-secret 14 | privateKey: 15 | algorithm: ECDSA 16 | size: 256 17 | issuerRef: 18 | name: selfsigned-issuer 19 | kind: ClusterIssuer 20 | group: cert-manager.io -------------------------------------------------------------------------------- /supplychain/quarkus-sc/k8s/clusterbuilder.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | #@ load("helpers.lib.yml", "img") 3 | 4 | --- 5 | apiVersion: kpack.io/v1alpha2 6 | kind: ClusterBuilder 7 | metadata: 8 | name: runtime 9 | spec: 10 | order: 11 | - group: 12 | - id: "io.quarkus.buildpacks.buildpack" 13 | serviceAccountRef: 14 | name: kp-default-repository-serviceaccount 15 | namespace: kpack 16 | stack: 17 | name: runtime 18 | kind: ClusterStack 19 | store: 20 | name: runtime 21 | kind: ClusterStore 22 | tag: #@ img("buildpacks-quarkus-builder:jvm") -------------------------------------------------------------------------------- /supplychain/quarkus-sc/workload.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: carto.run/v1alpha1 4 | kind: Workload 5 | metadata: 6 | name: #@ data.values.workload.name 7 | namespace: #@ data.values.namespace 8 | labels: 9 | apps.tanzu.vmware.com/workload-type: #@ data.values.runtime.deliverable_type 10 | app.kubernetes.io/part-of: #@ data.values.runtime.kubernetes_io_partof 11 | spec: 12 | serviceAccountName: #@ data.values.service_account_name 13 | source: 14 | git: 15 | url: #@ data.values.github.url 16 | ref: 17 | branch: #@ data.values.github.ref 18 | -------------------------------------------------------------------------------- /supplychain/quarkus-sc/templates/git-source-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: carto.run/v1alpha1 2 | kind: ClusterSourceTemplate 3 | metadata: 4 | name: source 5 | spec: 6 | urlPath: .status.artifact.url 7 | revisionPath: .status.artifact.revision 8 | 9 | template: 10 | apiVersion: source.toolkit.fluxcd.io/v1beta1 11 | kind: GitRepository 12 | metadata: 13 | name: $(workload.metadata.name)$ 14 | spec: 15 | interval: 1m0s 16 | url: $(workload.spec.source.git.url)$ 17 | ref: $(workload.spec.source.git.ref)$ 18 | ignore: | 19 | # include images dir 20 | !/src/main/resources/META-INF/resources/images 21 | -------------------------------------------------------------------------------- /carvel-package/manifests/rbac.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: carvel-package 6 | namespace: pkg-demo 7 | --- 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: carvel-package 12 | rules: 13 | - apiGroups: ["*"] 14 | resources: ["*"] 15 | verbs: ["*"] 16 | --- 17 | kind: ClusterRoleBinding 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | metadata: 20 | name: carvel-package 21 | subjects: 22 | - kind: ServiceAccount 23 | name: carvel-package 24 | namespace: pkg-demo 25 | roleRef: 26 | apiGroup: rbac.authorization.k8s.io 27 | kind: ClusterRole 28 | name: carvel-package -------------------------------------------------------------------------------- /carvel-package/pkg-manifests/rbac.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: carvel-package 6 | namespace: pkg-demo 7 | imagePullSecrets: 8 | - name: ghcr-creds 9 | --- 10 | kind: ClusterRole 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | metadata: 13 | name: carvel-package 14 | rules: 15 | - apiGroups: ["*"] 16 | resources: ["*"] 17 | verbs: ["*"] 18 | --- 19 | kind: ClusterRoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: carvel-package 23 | subjects: 24 | - kind: ServiceAccount 25 | name: carvel-package 26 | namespace: pkg-demo 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: ClusterRole 30 | name: carvel-package -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/03_dashboard-secret.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | name: kubernetes-dashboard-certs 9 | namespace: #@ data.values.namespace 10 | type: Opaque 11 | 12 | --- 13 | 14 | apiVersion: v1 15 | kind: Secret 16 | metadata: 17 | labels: 18 | k8s-app: kubernetes-dashboard 19 | name: kubernetes-dashboard-csrf 20 | namespace: #@ data.values.namespace 21 | type: Opaque 22 | data: 23 | csrf: "" 24 | 25 | --- 26 | 27 | apiVersion: v1 28 | kind: Secret 29 | metadata: 30 | labels: 31 | k8s-app: kubernetes-dashboard 32 | name: kubernetes-dashboard-key-holder 33 | namespace: #@ data.values.namespace 34 | type: Opaque 35 | -------------------------------------------------------------------------------- /supplychain/quarkus-sc/templates/kpack-image-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: carto.run/v1alpha1 2 | kind: ClusterImageTemplate 3 | metadata: 4 | name: image 5 | spec: 6 | params: 7 | - name: image_prefix 8 | default: some-default-prefix- 9 | 10 | imagePath: .status.latestImage 11 | 12 | template: 13 | apiVersion: kpack.io/v1alpha2 14 | kind: Image 15 | metadata: 16 | name: $(workload.metadata.name)$ 17 | spec: 18 | tag: $(params.image_prefix)$/$(workload.metadata.name)$ 19 | serviceAccountName: $(params.service_account)$ 20 | builder: 21 | kind: ClusterBuilder 22 | name: $(params.kpack_cluster_builder)$ 23 | source: 24 | blob: 25 | url: $(sources.source.url)$ 26 | build: 27 | env: $(workload.spec.build.env)$ 28 | -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/values.yml: -------------------------------------------------------------------------------- 1 | #@data/values-schema 2 | --- 3 | #@schema/desc "IP address of the VM." 4 | vm_ip: 0.0.0.0 5 | 6 | #@schema/desc "Namespace where the k8S UI should be installed" 7 | namespace: kubernetes-dashboard 8 | 9 | image: 10 | dashboard: 11 | #@schema/desc "Dashboard image repository" 12 | repository: kubernetesui/dashboard 13 | #@schema/desc "Dashboard version of the image to be installed" 14 | tag: v2.5.1 15 | #@schema/desc "Dashboard strategy to use to pull the image" 16 | pullPolicy: IfNotPresent 17 | scrapper: 18 | #@schema/desc "Scrapper image repository" 19 | repository: kubernetesui/metrics-scraper 20 | #@schema/desc "Scrapper version of the image to be installed" 21 | tag: v1.0.7 22 | #@schema/desc "Scrapper strategy to use to pull the image" 23 | pullPolicy: IfNotPresent -------------------------------------------------------------------------------- /carvel-package/manifests/package-0.3.0.yml: -------------------------------------------------------------------------------- 1 | apiVersion: data.packaging.carvel.dev/v1alpha1 2 | kind: Package 3 | metadata: 4 | name: kubernetes-dashboard.halkyonio.io.0.2.0 5 | namespace: pkg-demo 6 | spec: 7 | version: 0.2.0 8 | refName: kubernetes-dashboard.halkyonio.io 9 | releaseNotes: | 10 | Initial release of the Kubernetes Helm dashboard 11 | valuesSchema: 12 | openAPIv3: #@ yaml.decode(data.values.openapi)["components"]["schemas"]["dataValues"] 13 | template: 14 | spec: 15 | deploy: 16 | - kapp: {} 17 | fetch: 18 | - imgpkgBundle: 19 | # TODO 20 | image: ghcr.io/halkyonio/TODO 21 | template: 22 | - ytt: 23 | paths: 24 | - config 25 | - values.yml 26 | - kbld: 27 | paths: 28 | - '-' 29 | - .imgpkg/images.yml -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/07_dashboard-ingress.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | #@yaml/text-templated-strings 4 | --- 5 | apiVersion: networking.k8s.io/v1 6 | kind: Ingress 7 | metadata: 8 | name: kubernetes-dashboard 9 | namespace: #@ data.values.namespace 10 | labels: 11 | k8s-app: kubernetes-dashboard 12 | annotations: 13 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 14 | service.alpha.kubernetes.io/app-protocols: '{"https":"HTTPS"}' 15 | projectcontour.io/ingress.class: contour 16 | spec: 17 | rules: 18 | - host: k8s-ui.(@= data.values.vm_ip @).nip.io 19 | http: 20 | paths: 21 | - backend: 22 | service: 23 | name: kubernetes-dashboard 24 | port: 25 | number: 443 26 | path: / 27 | pathType: ImplementationSpecific 28 | tls: 29 | - hosts: 30 | - k8s-ui.(@= data.values.vm_ip @).nip.io 31 | secretName: k8s-ui-secret 32 | 33 | -------------------------------------------------------------------------------- /carvel-package/manifests/secret.yml: -------------------------------------------------------------------------------- 1 | # kubectl delete secret/dashboard-values 2 | # kubectl create secret generic dashboard-values --from-file=dashboard/values.yml 3 | # kubectl get secret dashboard-values -o yaml 4 | # 5 | apiVersion: v1 6 | data: 7 | values.yml: ZXh0cmFBcmdzOgogIC0gLS1hdXRvLWdlbmVyYXRlLWNlcnRpZmljYXRlcz1mYWxzZQogIC0gLS10bHMtY2VydC1maWxlPWV4dHJhL3Rscy5jcnQKICAtIC0tdGxzLWtleS1maWxlPWV4dHJhL3Rscy5rZXkKZXh0cmFWb2x1bWVzOgogIC0gbmFtZTogY2VydHMtc2VsZnNpZ25lZAogICAgc2VjcmV0OgogICAgICBkZWZhdWx0TW9kZTogNDIwCiAgICAgIHNlY3JldE5hbWU6IGs4cy11aS1zZWNyZXQKZXh0cmFWb2x1bWVNb3VudHM6CiAgLSBtb3VudFBhdGg6IC9jZXJ0cy9leHRyYQogICAgbmFtZTogY2VydHMtc2VsZnNpZ25lZAogICAgcmVhZE9ubHk6IHRydWUKCmluZ3Jlc3M6CiAgZW5hYmxlZDogdHJ1ZQogIGFubm90YXRpb25zOgogICAgcHJvamVjdGNvbnRvdXIuaW8vaW5ncmVzcy5jbGFzczogY29udG91cgogIGhvc3RzOgogICAgLSBrOHMtdWkuMTAuMC43Ny41MS5uaXAuaW8KICB0bHM6CiAgICAtIHNlY3JldE5hbWU6IGs4cy11aS1zZWNyZXQKICAgICAgaG9zdHM6CiAgICAgICAgLSBrOHMtdWkuMTAuMC43Ny41MS5uaXAuaW8Kc2VydmljZToKICBhbm5vdGF0aW9uczoKICAgIHByb2plY3Rjb250b3VyLmlvL3Vwc3RyZWFtLXByb3RvY29sLnRsczogIjQ0MyI= 8 | kind: Secret 9 | metadata: 10 | name: dashboard-values 11 | namespace: pkg-demo 12 | type: Opaque 13 | -------------------------------------------------------------------------------- /carvel-package/manifests/cm.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboard 5 | namespace: pkg-demo 6 | data: 7 | config.yml: |- 8 | --- 9 | apiVersion: v1 10 | kind: Namespace 11 | metadata: 12 | name: kubernetes-dashboard 13 | --- 14 | apiVersion: cert-manager.io/v1 15 | kind: ClusterIssuer 16 | metadata: 17 | name: selfsigned-issuer 18 | spec: 19 | selfSigned: {} 20 | --- 21 | apiVersion: cert-manager.io/v1 22 | kind: Certificate 23 | metadata: 24 | name: my-selfsigned-ca 25 | namespace: kubernetes-dashboard 26 | spec: 27 | isCA: true 28 | commonName: k8s-ui.$VM_IP.nip.io 29 | secretName: k8s-ui-secret 30 | privateKey: 31 | algorithm: ECDSA 32 | size: 256 33 | issuerRef: 34 | name: selfsigned-issuer 35 | kind: ClusterIssuer 36 | group: cert-manager.io 37 | --- 38 | apiVersion: cert-manager.io/v1 39 | kind: Issuer 40 | metadata: 41 | name: my-ca-issuer 42 | namespace: kubernetes-dashboard 43 | spec: 44 | ca: 45 | secretName: k8s-ui-secret 46 | -------------------------------------------------------------------------------- /carvel-package/pkg-manifests/package-template.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") # for reading data values (generated via ytt's data-values-schema-inspect mode). 2 | #@ load("@ytt:yaml", "yaml") # for dynamically decoding the output of ytt's data-values-schema-inspect 3 | --- 4 | apiVersion: data.packaging.carvel.dev/v1alpha1 5 | kind: Package 6 | metadata: 7 | name: #@ "kubernetes-dashboard.halkyonio.io." + data.values.version 8 | namespace: pkg-demo 9 | spec: 10 | version: #@ data.values.version 11 | refName: kubernetes-dashboard.halkyonio.io 12 | releaseNotes: | 13 | Initial release of the Kubernetes Helm dashboard 14 | valuesSchema: 15 | openAPIv3: #@ yaml.decode(data.values.openapi)["components"]["schemas"]["dataValues"] 16 | template: 17 | spec: 18 | fetch: 19 | - imgpkgBundle: 20 | image: #@ "ghcr.io/halkyonio/packages/kubernetes-dashboard:" + data.values.version 21 | template: 22 | - ytt: 23 | paths: 24 | - "config/" 25 | - "config/values.yml" 26 | - kbld: 27 | paths: 28 | - '-' 29 | - .imgpkg/images.yml 30 | deploy: 31 | - kapp: {} -------------------------------------------------------------------------------- /tanzu-spring-boot-demo/Tiltfile: -------------------------------------------------------------------------------- 1 | SOURCE_IMAGE = os.getenv("SOURCE_IMAGE", default='kind-registry:5000/tap/spring-tap-petclinic-demo1') 2 | LOCAL_PATH = os.getenv("LOCAL_PATH", default='.') 3 | NAMESPACE = os.getenv("NAMESPACE", default='demo1') 4 | APP_NAME = "sprint-tap-petclinic" 5 | WORKLOAD_PATH = "config/workload.yaml" 6 | 7 | k8s_custom_deploy( 8 | APP_NAME, 9 | apply_cmd="tanzu apps workload apply -f " + WORKLOAD_PATH + " --live-update" + 10 | " --local-path " + LOCAL_PATH + 11 | " --source-image " + SOURCE_IMAGE + 12 | " --namespace " + NAMESPACE + 13 | " --yes >/dev/null" + 14 | " && kubectl get workload " + APP-NAME + " --namespace " + NAMESPACE + " -o yaml", 15 | delete_cmd="tanzu apps workload delete -f " + WORKLOAD_PATH + " --namespace " + NAMESPACE + " --yes" , 16 | deps=['pom.xml', './target/classes'], 17 | container_selector='workload', 18 | live_update=[ 19 | sync('./target/classes', '/workspace/BOOT-INF/classes') 20 | ] 21 | ) 22 | 23 | k8s_resource(APP-NAME, port_forwards=["8080:8080"], 24 | extra_pod_selectors=[{'carto.run/workload-name': APP-NAME, 'app.kubernetes.io/component': 'run'}]) 25 | 26 | allow_k8s_contexts('kubernetes-admin@kubernetes') -------------------------------------------------------------------------------- /carvel-package/manifests/package-0.1.0.yml: -------------------------------------------------------------------------------- 1 | apiVersion: data.packaging.carvel.dev/v1alpha1 2 | kind: Package 3 | metadata: 4 | name: kubernetes-dashboard.halkyonio.io.0.1.0 5 | namespace: pkg-demo 6 | spec: 7 | version: 0.1.0 8 | refName: kubernetes-dashboard.halkyonio.io 9 | releaseNotes: | 10 | Initial release of the Kubernetes Helm dashboard 11 | template: 12 | # See this doc page for more information: https://carvel.dev/kapp-controller/docs/v0.34.0/app-spec/ 13 | spec: 14 | fetch: 15 | - git: 16 | url: https://github.com/kubernetes/dashboard.git 17 | ref: origin/master 18 | subPath: aio/deploy/helm-chart/kubernetes-dashboard 19 | template: 20 | - helmTemplate: 21 | valuesFrom: 22 | - secretRef: 23 | name: dashboard-values 24 | - ytt: 25 | inline: 26 | paths: 27 | remove.yml: | 28 | #@ load("@ytt:overlay", "overlay") 29 | #@overlay/match by=overlay.subset({"kind":"Deployment"}),expects="1+" 30 | --- 31 | metadata: 32 | #@overlay/match by=overlay.subset(None),when=1 33 | #@overlay/remove 34 | annotations: 35 | deploy: 36 | - kapp: {} -------------------------------------------------------------------------------- /tanzu-spring-boot-demo/config/workload.yaml: -------------------------------------------------------------------------------- 1 | # https://docs.vmware.com/en/VMware-Tanzu-Application-Platform/1.3/tap/GUID-intellij-extension-getting-started.html 2 | apiVersion: carto.run/v1alpha1 3 | kind: Workload 4 | metadata: 5 | name: spring-tap-petclinic 6 | labels: 7 | apps.tanzu.vmware.com/workload-type: web 8 | app.kubernetes.io/part-of: spring-tap-petclinic 9 | spec: 10 | # tanzu apps workload update spring-tap-petclinic --annotation "autoscaling.knative.dev/scaleDownDelay=15m" 11 | # params: 12 | # - name: annotations 13 | # value: 14 | # autoscaling.knative.dev/scaleDownDelay: 15m 15 | # autoscaling.knative.dev/minScale: "1" 16 | 17 | # tanzu apps workload update spring-tap-petclinic --annotation "autoscaling.knative.dev/scaleDownDelay=15m" --env "SPRING_PROFILES_ACTIVE=postgresql 18 | # env: 19 | # - name: SPRING_PROFILES_ACTIVE 20 | # value: postgres 21 | 22 | # tanzu services instances list -o wide 23 | # tanzu apps workload update spring-tap-petclinic --service-ref "db=sql.tanzu.vmware.com/v1:Postgres:tap-demo:postgres-db" 24 | # 25 | # serviceClaims: 26 | # - name: db 27 | # ref: 28 | # apiVersion: sql.tanzu.vmware.com/v1 29 | # kind: Postgres 30 | # name: postgres-db 31 | source: 32 | git: 33 | url: https://github.com/halkyonio/spring-tap-petclinic.git 34 | ref: 35 | branch: main -------------------------------------------------------------------------------- /supplychain/quarkus-sc/supply-chain.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: carto.run/v1alpha1 4 | kind: ClusterSupplyChain 5 | metadata: 6 | name: quarkus-supply-chain 7 | spec: 8 | selector: 9 | apps.tanzu.vmware.com/workload-type: #@ data.values.runtime.deliverable_type 10 | params: 11 | - name: deliverable_type 12 | value: #@ data.values.runtime.deliverable_type 13 | - name: image_prefix 14 | value: #@ data.values.image_prefix 15 | - name: service_account 16 | value: #@ data.values.service_account_name 17 | - name: kpack_cluster_builder 18 | value: #@ data.values.buildpack.kpack_cluster_builder 19 | resources: 20 | # 21 | # source-provider fluxcd/GitRepository 22 | # <--[src]-- image-builder kpack/Image 23 | # <--[img]-- deployer kapp-ctrl/App 24 | # 25 | - name: source-provider 26 | templateRef: 27 | kind: ClusterSourceTemplate 28 | name: source 29 | 30 | - name: image-builder 31 | templateRef: 32 | kind: ClusterImageTemplate 33 | name: image 34 | sources: 35 | - resource: source-provider 36 | name: source 37 | 38 | - name: deployer 39 | templateRef: 40 | kind: ClusterTemplate 41 | name: app-deploy 42 | images: 43 | - resource: image-builder 44 | name: image 45 | -------------------------------------------------------------------------------- /carvel-package/manifests/package-0.2.0.yml: -------------------------------------------------------------------------------- 1 | apiVersion: data.packaging.carvel.dev/v1alpha1 2 | kind: Package 3 | metadata: 4 | name: kubernetes-dashboard.halkyonio.io.0.2.0 5 | namespace: pkg-demo 6 | spec: 7 | version: 0.2.0 8 | refName: kubernetes-dashboard.halkyonio.io 9 | releaseNotes: | 10 | Initial release of the Kubernetes Helm dashboard 11 | template: 12 | # See the App CRD doc page for more information about how to configure this section: https://carvel.dev/kapp-controller/docs/v0.34.0/app-spec/ 13 | spec: 14 | noopDelete: true 15 | fetch: 16 | - git: 17 | url: https://github.com/kubernetes/dashboard.git 18 | ref: origin/master 19 | subPath: aio/deploy/helm-chart/kubernetes-dashboard 20 | - inline: 21 | pathsFrom: 22 | - configMapRef: 23 | # Additional k8s manifest needed to create a Certificate for the k8s dashboard 24 | name: dashboard 25 | template: 26 | - helmTemplate: 27 | valuesFrom: 28 | - secretRef: 29 | name: dashboard-values 30 | - ytt: 31 | inline: 32 | paths: 33 | remove.yml: | 34 | #@ load("@ytt:overlay", "overlay") 35 | #@overlay/match by=overlay.subset({"kind":"Deployment"}),expects="1+" 36 | --- 37 | metadata: 38 | #@overlay/match by=overlay.subset(None),when=1 39 | #@overlay/remove 40 | annotations: 41 | deploy: 42 | - kapp: {} -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/06_dashboard-deployment-scraper.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | kind: Deployment 4 | apiVersion: apps/v1 5 | metadata: 6 | labels: 7 | k8s-app: dashboard-metrics-scraper 8 | name: dashboard-metrics-scraper 9 | namespace: #@ data.values.namespace 10 | spec: 11 | replicas: 1 12 | revisionHistoryLimit: 10 13 | selector: 14 | matchLabels: 15 | k8s-app: dashboard-metrics-scraper 16 | template: 17 | metadata: 18 | labels: 19 | k8s-app: dashboard-metrics-scraper 20 | spec: 21 | securityContext: 22 | seccompProfile: 23 | type: RuntimeDefault 24 | containers: 25 | - name: dashboard-metrics-scraper 26 | image: #@ data.values.image.scrapper.repository + ":" + data.values.image.scrapper.tag 27 | imagePullPolicy: #@ data.values.image.scrapper.pullPolicy 28 | ports: 29 | - containerPort: 8000 30 | protocol: TCP 31 | livenessProbe: 32 | httpGet: 33 | scheme: HTTP 34 | path: / 35 | port: 8000 36 | initialDelaySeconds: 30 37 | timeoutSeconds: 30 38 | volumeMounts: 39 | - mountPath: /tmp 40 | name: tmp-volume 41 | securityContext: 42 | allowPrivilegeEscalation: false 43 | readOnlyRootFilesystem: true 44 | runAsUser: 1001 45 | runAsGroup: 2001 46 | serviceAccountName: kubernetes-dashboard 47 | nodeSelector: 48 | "kubernetes.io/os": linux 49 | #! Comment the following tolerations if Dashboard must not be deployed on master 50 | tolerations: 51 | - key: node-role.kubernetes.io/master 52 | effect: NoSchedule 53 | volumes: 54 | - name: tmp-volume 55 | emptyDir: {} -------------------------------------------------------------------------------- /scripts/tools/delete_ns_resources.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Execute this command locally 4 | # 5 | # ./delete_ns_resources.sh 6 | # 7 | # or delete_ns_resources.sh 8 | # ssh -i @ -p "bash -s" -- < ./delete_ns_resources.sh 9 | # 10 | # Define the following env vars: 11 | # - REMOTE_HOME_DIR: home directory where files will be installed within the remote VM 12 | # - NAMESPACE: user's namespace to be deleted and containing Tanzu workloads, k8s resources, ... 13 | # 14 | 15 | # Defining some colors for output 16 | RED='\033[0;31m' 17 | NC='\033[0m' # No Color 18 | YELLOW='\033[0;33m' 19 | GREEN='\033[0;32m' 20 | BLUE='\033[0;34m' 21 | MAGENTA='\033[0;35m' 22 | CYAN='\033[0;36m' 23 | WHITE='\033[0;37m' 24 | 25 | repeat_char(){ 26 | COLOR=${1} 27 | for i in {1..50}; do echo -ne "${!COLOR}$2${NC}"; done 28 | } 29 | 30 | log_msg() { 31 | COLOR=${1} 32 | MSG="${@:2}" 33 | echo -e "\n${!COLOR}## ${MSG}${NC}" 34 | } 35 | 36 | log() { 37 | MSG="${@:2}" 38 | echo; repeat_char ${1} '#'; log_msg ${1} ${MSG}; repeat_char ${1} '#'; echo 39 | } 40 | 41 | KUBE_CFG_FILE=${1:-config} 42 | export KUBECONFIG=$HOME/.kube/${KUBE_CFG_FILE} 43 | 44 | REMOTE_HOME_DIR=${REMOTE_HOME_DIR:-$HOME} 45 | NAMESPACE=${NAMESPACE} 46 | 47 | log "GREEN" "Delete the workload(s) created under the namespace: $NAMESPACE" 48 | tanzu apps workload list -n $NAMESPACE | awk '(NR>1)' | while read name app status age; 49 | do 50 | if [[ $app != exit ]]; then 51 | echo "Deleting the $name workload under $NAMESPACE" 52 | tanzu -n $NAMESPACE apps workload delete $name --yes 53 | fi 54 | done 55 | 56 | log "GREEN" "Delete the other resources" 57 | kubectl delete "$(kubectl api-resources --namespaced=true --verbs=delete -o name | tr "\n" "," | sed -e 's/,$//')" --all -n $NAMESPACE 58 | 59 | log "GREEN" "Delete the namespace: $NAMESPACE" 60 | kubectl delete ns $NAMESPACE -------------------------------------------------------------------------------- /runtimes/deploy/quarkus-kapp.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kappctrl.k14s.io/v1alpha1 2 | kind: App 3 | metadata: 4 | name: quarkus-petclinic 5 | namespace: tap-install 6 | spec: 7 | serviceAccountName: tap-service-account 8 | fetch: 9 | - inline: 10 | paths: 11 | manifest.yml: | 12 | --- 13 | apiVersion: kapp.k14s.io/v1alpha1 14 | kind: Config 15 | rebaseRules: 16 | - path: [metadata, annotations, serving.knative.dev/creator] 17 | type: copy 18 | sources: [new, existing] 19 | resourceMatchers: &matchers 20 | - apiVersionKindMatcher: {apiVersion: serving.knative.dev/v1, kind: Service} 21 | - path: [metadata, annotations, serving.knative.dev/lastModifier] 22 | type: copy 23 | sources: [new, existing] 24 | resourceMatchers: *matchers 25 | --- 26 | apiVersion: serving.knative.dev/v1 27 | kind: Service 28 | metadata: 29 | name: quarkus-petclinic 30 | spec: 31 | template: 32 | metadata: 33 | annotations: 34 | # See: https://knative.dev/docs/serving/autoscaling/scale-bounds/#configuring-scale-bounds 35 | autoscaling.knative.dev/scaleDownDelay: "15m" 36 | client.knative.dev/user-image: "" 37 | labels: 38 | tanzu.app.live.view: "true" 39 | tanzu.app.live.view.application.name: "quarkus-petclinic" 40 | # tanzu.app.live.view.application.flavours: "quarkus" 41 | spec: 42 | containers: 43 | - image: 95.217.159.244:32500/quarkus-petclinic@sha256:848008bac5d0dea730450ec5ac844e28fd92a0a59f9cad9e62ff7cf28da44b5f 44 | securityContext: 45 | runAsUser: 1000 46 | template: 47 | - ytt: {} 48 | deploy: 49 | - kapp: {} -------------------------------------------------------------------------------- /scripts/db/postgres-instance.yml: -------------------------------------------------------------------------------- 1 | apiVersion: sql.tanzu.vmware.com/v1 2 | kind: Postgres 3 | metadata: 4 | name: postgres-db 5 | spec: 6 | storageClassName: local-path 7 | storageSize: 800M 8 | cpu: "0.8" 9 | memory: 800Mi 10 | monitorStorageClassName: local-path 11 | monitorStorageSize: 1G 12 | resources: 13 | monitor: 14 | limits: 15 | cpu: 800m 16 | memory: 800Mi 17 | requests: 18 | cpu: 800m 19 | memory: 800Mi 20 | metrics: 21 | limits: 22 | cpu: 100m 23 | memory: 100Mi 24 | requests: 25 | cpu: 100m 26 | memory: 100Mi 27 | pgConfig: 28 | dbname: postgres-db 29 | username: pgadmin 30 | appUser: pgappuser 31 | postgresVersion: 32 | name: postgres-14 # View available versions with `kubectl get postgresversion` 33 | serviceType: ClusterIP 34 | monitorPodConfig: 35 | affinity: 36 | podAntiAffinity: 37 | preferredDuringSchedulingIgnoredDuringExecution: 38 | - podAffinityTerm: 39 | labelSelector: 40 | matchExpressions: 41 | - key: type 42 | operator: In 43 | values: 44 | - data 45 | - monitor 46 | - key: postgres-instance 47 | operator: In 48 | values: 49 | - postgres-db 50 | topologyKey: kubernetes.io/hostname 51 | weight: 100 52 | dataPodConfig: 53 | affinity: 54 | podAntiAffinity: 55 | preferredDuringSchedulingIgnoredDuringExecution: 56 | - podAffinityTerm: 57 | labelSelector: 58 | matchExpressions: 59 | - key: type 60 | operator: In 61 | values: 62 | - data 63 | - monitor 64 | - key: postgres-instance 65 | operator: In 66 | values: 67 | - postgres-db 68 | topologyKey: kubernetes.io/hostname 69 | weight: 100 70 | -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/06_dashboard-deployment.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | kind: Deployment 4 | apiVersion: apps/v1 5 | metadata: 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | name: kubernetes-dashboard 9 | namespace: #@ data.values.namespace 10 | spec: 11 | replicas: 1 12 | revisionHistoryLimit: 10 13 | selector: 14 | matchLabels: 15 | k8s-app: kubernetes-dashboard 16 | template: 17 | metadata: 18 | labels: 19 | k8s-app: kubernetes-dashboard 20 | spec: 21 | securityContext: 22 | seccompProfile: 23 | type: RuntimeDefault 24 | containers: 25 | - name: kubernetes-dashboard 26 | image: #@ data.values.image.dashboard.repository + ":" + data.values.image.dashboard.tag 27 | imagePullPolicy: #@ data.values.image.dashboard.pullPolicy 28 | ports: 29 | - containerPort: 8443 30 | protocol: TCP 31 | args: 32 | - --namespace=kubernetes-dashboard 33 | - --auto-generate-certificates=false 34 | - --tls-cert-file=extra/tls.crt 35 | - --tls-key-file=extra/tls.key 36 | volumeMounts: 37 | - mountPath: /certs/extra 38 | name: certs-selfsigned 39 | readOnly: true 40 | #! Create on-disk volume to store exec logs 41 | - mountPath: /tmp 42 | name: tmp-volume 43 | livenessProbe: 44 | httpGet: 45 | scheme: HTTPS 46 | path: / 47 | port: 8443 48 | initialDelaySeconds: 30 49 | timeoutSeconds: 30 50 | securityContext: 51 | allowPrivilegeEscalation: false 52 | readOnlyRootFilesystem: true 53 | runAsUser: 1001 54 | runAsGroup: 2001 55 | volumes: 56 | - name: certs-selfsigned 57 | secret: 58 | defaultMode: 420 59 | secretName: k8s-ui-secret 60 | - name: tmp-volume 61 | emptyDir: {} 62 | serviceAccountName: kubernetes-dashboard 63 | nodeSelector: 64 | "kubernetes.io/os": linux 65 | #! Comment the following tolerations if Dashboard must not be deployed on master 66 | tolerations: 67 | - key: node-role.kubernetes.io/master 68 | effect: NoSchedule 69 | -------------------------------------------------------------------------------- /carvel-package/dashboard-manifest/config/05_dashboard-rbac.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | kind: Role 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | name: kubernetes-dashboard 9 | namespace: #@ data.values.namespace 10 | rules: 11 | #! Allow Dashboard to get, update and delete Dashboard exclusive secrets. 12 | - apiGroups: [""] 13 | resources: ["secrets"] 14 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] 15 | verbs: ["get", "update", "delete"] 16 | #! Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. 17 | - apiGroups: [""] 18 | resources: ["configmaps"] 19 | resourceNames: ["kubernetes-dashboard-settings"] 20 | verbs: ["get", "update"] 21 | #! Allow Dashboard to get metrics. 22 | - apiGroups: [""] 23 | resources: ["services"] 24 | resourceNames: ["heapster", "dashboard-metrics-scraper"] 25 | verbs: ["proxy"] 26 | - apiGroups: [""] 27 | resources: ["services/proxy"] 28 | resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] 29 | verbs: ["get"] 30 | 31 | --- 32 | kind: ClusterRole 33 | apiVersion: rbac.authorization.k8s.io/v1 34 | metadata: 35 | labels: 36 | k8s-app: kubernetes-dashboard 37 | name: kubernetes-dashboard 38 | rules: 39 | #! Allow Metrics Scraper to get metrics from the Metrics server 40 | - apiGroups: ["metrics.k8s.io"] 41 | resources: ["pods", "nodes"] 42 | verbs: ["get", "list", "watch"] 43 | 44 | --- 45 | apiVersion: rbac.authorization.k8s.io/v1 46 | kind: RoleBinding 47 | metadata: 48 | labels: 49 | k8s-app: kubernetes-dashboard 50 | name: kubernetes-dashboard 51 | namespace: #@ data.values.namespace 52 | roleRef: 53 | apiGroup: rbac.authorization.k8s.io 54 | kind: Role 55 | name: kubernetes-dashboard 56 | subjects: 57 | - kind: ServiceAccount 58 | name: kubernetes-dashboard 59 | namespace: #@ data.values.namespace 60 | 61 | --- 62 | apiVersion: rbac.authorization.k8s.io/v1 63 | kind: ClusterRoleBinding 64 | metadata: 65 | name: kubernetes-dashboard 66 | roleRef: 67 | apiGroup: rbac.authorization.k8s.io 68 | kind: ClusterRole 69 | name: kubernetes-dashboard 70 | subjects: 71 | - kind: ServiceAccount 72 | name: kubernetes-dashboard 73 | namespace: #@ data.values.namespace 74 | -------------------------------------------------------------------------------- /supplychain/tap-sc/basic-image-to-url.yml: -------------------------------------------------------------------------------- 1 | apiVersion: carto.run/v1alpha1 2 | kind: ClusterSupplyChain 3 | metadata: 4 | name: basic-image-to-url 5 | spec: 6 | params: 7 | - default: main 8 | name: gitops_branch 9 | - default: supplychain 10 | name: gitops_user_name 11 | - default: supplychain 12 | name: gitops_user_email 13 | - default: supplychain@cluster.local 14 | name: gitops_commit_message 15 | - default: "" 16 | name: gitops_ssh_secret 17 | resources 18 | # 19 | # from(image-provider) --uses--> ClusterImageTemplate/image-provider-template 20 | # to(config-provider) --uses--> ClusterConfigTemplate/convention-template 21 | # to(app-config) --uses--> ClusterConfigTemplate/config-template 22 | # to(config-writer) --uses--> ClusterTemplate/config-writer-template 23 | # 24 | # To(deliverable) --uses--> ClusterTemplate/deliverable-template 25 | # 26 | - name: deliverable 27 | params: 28 | - name: registry 29 | value: 30 | repository: tap 31 | server: registry.harbor.10.0.77.176.nip.io:32443 32 | templateRef: 33 | kind: ClusterTemplate 34 | name: deliverable-template 35 | 36 | - name: image-provider 37 | params: 38 | - name: serviceAccount 39 | value: default 40 | templateRef: 41 | kind: ClusterImageTemplate 42 | name: image-provider-template 43 | 44 | - images: 45 | - name: image 46 | resource: image-provider 47 | name: config-provider 48 | params: 49 | - name: serviceAccount 50 | value: default 51 | templateRef: 52 | kind: ClusterConfigTemplate 53 | name: convention-template 54 | 55 | - configs: 56 | - name: config 57 | resource: config-provider 58 | name: app-config 59 | templateRef: 60 | kind: ClusterConfigTemplate 61 | name: config-template 62 | 63 | - configs: 64 | - name: config 65 | resource: app-config 66 | name: config-writer 67 | params: 68 | - name: serviceAccount 69 | value: default 70 | - name: registry 71 | value: 72 | repository: tap 73 | server: registry.harbor.10.0.77.176.nip.io:32443 74 | templateRef: 75 | kind: ClusterTemplate 76 | name: config-writer-template 77 | selector: 78 | apps.tanzu.vmware.com/workload-type: web 79 | selectorMatchFields: 80 | - key: spec.image 81 | operator: Exists -------------------------------------------------------------------------------- /scripts/db/uninstall_postgresql.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Execute this command locally 4 | # 5 | # ./db/uninstall_postgresql.sh 6 | # 7 | # or remotely 8 | # ssh -i @ -p "bash -s" -- < ./db/uninstall_postgresql.sh 9 | # 10 | # Define the following env vars: 11 | # - NAMESPACE: Namespace where the postgresql instance should be deleted 12 | 13 | # Defining some colors for output 14 | RED='\033[0;31m' 15 | NC='\033[0m' # No Color 16 | YELLOW='\033[0;33m' 17 | GREEN='\033[0;32m' 18 | BLUE='\033[0;34m' 19 | MAGENTA='\033[0;35m' 20 | CYAN='\033[0;36m' 21 | WHITE='\033[0;37m' 22 | 23 | repeat_char(){ 24 | COLOR=${1} 25 | for i in {1..50}; do echo -ne "${!COLOR}$2${NC}"; done 26 | } 27 | 28 | log_msg() { 29 | COLOR=${1} 30 | MSG="${@:2}" 31 | echo -e "\n${!COLOR}## ${MSG}${NC}" 32 | } 33 | 34 | log() { 35 | MSG="${@:2}" 36 | echo; repeat_char ${1} '#'; log_msg ${1} ${MSG}; repeat_char ${1} '#'; echo 37 | } 38 | 39 | KUBE_CFG_FILE=${KUBE_CFG_FILE:-config} 40 | export KUBECONFIG=$HOME/.kube/${KUBE_CFG_FILE} 41 | #NAMESPACE=${NAMESPACE:-tap-demo} 42 | 43 | #log "YELLOW" "Deleting the regsecret secret" 44 | #kubectl -n $NAMESPACE delete secret regsecret --ignore-not-found 45 | 46 | #log "YELLOW" "Delete the postgresql instance" 47 | #kubectl delete Postgres/postgres-db -n $NAMESPACE --ignore-not-found 48 | 49 | log "YELLOW" "Uninstalling the Helm chart of postgresql" 50 | helm uninstall tanzu-postgresql -n db 51 | if [ $? -eq 0 ]; then 52 | echo "Helm chart removed" 53 | else 54 | echo "Let's continue ..." 55 | fi 56 | 57 | log "YELLOW" "Removing the installation folder of posgresql & pv100, pv101" 58 | rm -rf $HOME/postgresql 59 | 60 | log "YELLOW" "Removing RBAC" 61 | kubectl delete ClusterRoleBinding/postgres-operator-cluster-role-binding 62 | kubectl delete ClusterRole/podspecable-binding 63 | kubectl delete ClusterRole/postgres-editor 64 | kubectl delete ClusterRole/postgres-operator-cluster-role 65 | kubectl delete ClusterRole/postgres-viewer 66 | kubectl delete ClusterRole/postgresbackup-viewer-role 67 | kubectl delete ClusterRole/postgresbackupschedule-editor-role 68 | kubectl delete ClusterRole/postgresbackupschedule-viewer-role 69 | kubectl delete ClusterRole/postgresbackuplocation-editor-role 70 | kubectl delete ClusterRole/postgresbackuplocation-viewer-role 71 | kubectl delete ClusterRole/postgresrestore-editor-role 72 | kubectl delete ClusterRole/postgresrestore-viewer-role 73 | kubectl delete ClusterRole/postgresversion-editor-role 74 | kubectl delete ClusterRole/postgresversion-viewer-role 75 | 76 | log "YELLOW" "Some misc resources" 77 | kubectl delete PostgresVersion/postgres-11 78 | kubectl delete PostgresVersion/postgres-12 79 | kubectl delete PostgresVersion/postgres-13 80 | kubectl delete PostgresVersion/postgres-14 81 | 82 | kubectl delete MutatingWebhookConfiguration/postgres-operator-mutating-webhook-configuration 83 | kubectl delete ValidatingWebhookConfiguration/postgres-operator-validating-webhook-configuration 84 | 85 | kubectl delete ns db 86 | 87 | 88 | -------------------------------------------------------------------------------- /scripts/db/install_postgresql.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Execute this command locally 4 | # 5 | # ./install_postgresql.sh 6 | # 7 | # or remotely 8 | # ssh -i @ -p "bash -s" -- < ./install_postgresql.sh 9 | # 10 | # Define the following env vars: 11 | # - REGISTRY_SERVER: Tanzu image registry hostname 12 | # - REGISTRY_USERNAME: user to be used to be authenticated against the Tanzu image registry 13 | # - REGISTRY_PASSWORD: password to be used to be authenticated against the Tanzu image registry 14 | # - POSTGRESQL_VERSION: Version of the Postgresl Operator to be installed (e.g. 1.5.0) 15 | 16 | set -e 17 | 18 | # Defining some colors for output 19 | RED='\033[0;31m' 20 | NC='\033[0m' # No Color 21 | YELLOW='\033[0;33m' 22 | GREEN='\033[0;32m' 23 | BLUE='\033[0;34m' 24 | MAGENTA='\033[0;35m' 25 | CYAN='\033[0;36m' 26 | WHITE='\033[0;37m' 27 | 28 | repeat_char(){ 29 | COLOR=${1} 30 | for i in {1..50}; do echo -ne "${!COLOR}$2${NC}"; done 31 | } 32 | 33 | log_msg() { 34 | COLOR=${1} 35 | MSG="${@:2}" 36 | echo -e "\n${!COLOR}## ${MSG}${NC}" 37 | } 38 | 39 | log() { 40 | MSG="${@:2}" 41 | echo; repeat_char ${1} '#'; log_msg ${1} ${MSG}; repeat_char ${1} '#'; echo 42 | } 43 | 44 | REGISTRY_SERVER=${REGISTRY_SERVER:-registry.tanzu.vmware.com} 45 | REGISTRY_USERNAME=${REGISTRY_USERNAME} 46 | REGISTRY_PASSWORD=${REGISTRY_PASSWORD} 47 | 48 | POSTGRESQL_VERSION=${POSTGRESQL_VERSION:-1.5.0} 49 | POSTGRES_API_GROUP=sql.tanzu.vmware.com 50 | POSTGRES_API_VERSION=v1 51 | POSTGRES_KIND=Postgres 52 | POSTGRES_RESOURCE_NAME=postgres 53 | 54 | KUBE_CFG_FILE=${KUBE_CFG_FILE:-config} 55 | export KUBECONFIG=$HOME/.kube/${KUBE_CFG_FILE} 56 | 57 | log "CYAN" "Helm login to $REGISTRY_SERVER" 58 | export HELM_EXPERIMENTAL_OCI=1 59 | helm registry login $REGISTRY_SERVER \ 60 | --username=$REGISTRY_USERNAME \ 61 | --password=$REGISTRY_PASSWORD 62 | 63 | log "CYAN" "Docker login to $REGISTRY_SERVER" 64 | docker login $REGISTRY_SERVER \ 65 | -u $REGISTRY_USERNAME \ 66 | -p $REGISTRY_PASSWORD 67 | 68 | PRODUCT_NAME="tanzu-sql-postgres" 69 | log "CYAN" "Accept the EULA licence for the product $PRODUCT_NAME and version: $POSTGRESQL_VERSION" 70 | pivnet accept-eula -p $PRODUCT_NAME -r ${POSTGRESQL_VERSION} 71 | 72 | log "CYAN" "Pull the docker images: postgres-instance and postgres-operator" 73 | docker pull registry.tanzu.vmware.com/tanzu-sql-postgres/postgres-instance:v${POSTGRESQL_VERSION} 74 | docker pull registry.tanzu.vmware.com/tanzu-sql-postgres/postgres-operator:v${POSTGRESQL_VERSION} 75 | 76 | 77 | if [[ -d "$HOME/postgresql" ]]; then 78 | echo "$HOME/postgresql already exists on the machine." 79 | else 80 | log "CYAN" "Pull the postgres-operator-chart from VMWare Tanzu registry" 81 | helm pull oci://$REGISTRY_SERVER/tanzu-sql-postgres/postgres-operator-chart --version v$POSTGRESQL_VERSION --untar --untardir $HOME/postgresql 82 | log "CYAN" "Install the tanzu postgresql operator within the namespace db using helm" 83 | kubectl create ns db 84 | helm install tanzu-postgresql $HOME/postgresql/postgres-operator -n db --wait 85 | fi -------------------------------------------------------------------------------- /supplychain/tap-sc/source-to-url.yml: -------------------------------------------------------------------------------- 1 | apiVersion: carto.run/v1alpha1 2 | kind: ClusterSupplyChain 3 | metadata: 4 | name: source-to-url 5 | spec: 6 | params: 7 | - default: main 8 | name: gitops_branch 9 | - default: supplychain 10 | name: gitops_user_name 11 | - default: supplychain 12 | name: gitops_user_email 13 | - default: supplychain@cluster.local 14 | name: gitops_commit_message 15 | - default: "" 16 | name: gitops_ssh_secret 17 | resources: 18 | # 19 | # from(source-provider) --uses--> ClusterSourceTemplate/source-template 20 | # to(image-builder) --uses--> ClusterConfigTemplate/convention-template 21 | # to(app-config) --uses--> ClusterConfigTemplate/config-template (This is where the k8s resources are created such as ServiceBinding, Knative service or Rsourceclaim (optional)) 22 | # to(config-writer) --uses--> ClusterTemplate/config-writer-template 23 | # 24 | # To(deliverable) --uses--> ClusterTemplate/deliverable-template 25 | # 26 | - name: source-provider 27 | params: 28 | - name: serviceAccount 29 | value: default 30 | - name: gitImplementation 31 | value: go-git 32 | templateRef: 33 | kind: ClusterSourceTemplate 34 | name: source-template 35 | 36 | - name: deliverable 37 | params: 38 | - name: registry 39 | value: 40 | repository: tap 41 | server: registry.harbor.10.0.77.176.nip.io:32443 42 | templateRef: 43 | kind: ClusterTemplate 44 | name: deliverable-template 45 | 46 | - name: image-builder 47 | params: 48 | - name: serviceAccount 49 | value: default 50 | - name: clusterBuilder 51 | value: default 52 | - name: registry 53 | value: 54 | repository: tap 55 | server: registry.harbor.10.0.77.176.nip.io:32443 56 | sources: 57 | - name: source 58 | resource: source-provider 59 | templateRef: 60 | kind: ClusterImageTemplate 61 | name: kpack-template 62 | 63 | - images: 64 | - name: image 65 | resource: image-builder 66 | name: config-provider 67 | params: 68 | - name: serviceAccount 69 | value: default 70 | templateRef: 71 | kind: ClusterConfigTemplate 72 | name: convention-template 73 | 74 | - configs: 75 | - name: config 76 | resource: config-provider 77 | name: app-config 78 | templateRef: 79 | kind: ClusterConfigTemplate 80 | name: config-template 81 | 82 | - configs: 83 | - name: config 84 | resource: app-config 85 | name: config-writer 86 | params: 87 | - name: serviceAccount 88 | value: default 89 | - name: registry 90 | value: 91 | repository: tap 92 | server: registry.harbor.10.0.77.176.nip.io:32443 93 | templateRef: 94 | kind: ClusterTemplate 95 | name: config-writer-template 96 | selector: 97 | apps.tanzu.vmware.com/workload-type: web -------------------------------------------------------------------------------- /carvel-package/gen_pkg_img.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Execute this command locally 4 | # 5 | # ./gen_pkg_img.sh 6 | # 7 | # Define the following env vars: 8 | # - XXXX: blabla 9 | 10 | set -e 11 | 12 | # Defining some colors for output 13 | RED='\033[0;31m' 14 | NC='\033[0m' # No Color 15 | YELLOW='\033[0;33m' 16 | GREEN='\033[0;32m' 17 | BLUE='\033[0;34m' 18 | MAGENTA='\033[0;35m' 19 | CYAN='\033[0;36m' 20 | WHITE='\033[0;37m' 21 | 22 | repeat_char(){ 23 | COLOR=${1} 24 | for i in {1..50}; do echo -ne "${!COLOR}$2${NC}"; done 25 | } 26 | 27 | log_msg() { 28 | COLOR=${1} 29 | MSG="${@:2}" 30 | echo -e "\n${!COLOR}## ${MSG}${NC}" 31 | } 32 | 33 | log() { 34 | MSG="${@:2}" 35 | echo; repeat_char ${1} '#'; log_msg ${1} ${MSG}; repeat_char ${1} '#'; echo 36 | } 37 | 38 | IMG_REPO_HOST=${1:-ghcr.io/halkyonio} 39 | 40 | REPOSITORY_NAME="demo-repo" 41 | PKG_DIR_NAME="kubernetes-dashboard" 42 | PKG_FQN="kubernetes-dashboard.halkyonio.io" 43 | PKG_VERSION=0.1.0 44 | 45 | PROJECT_DIR=$(pwd) 46 | TEMP_DIR=$(pwd)/_temp 47 | 48 | rm -rf $TEMP_DIR 49 | mkdir -p $TEMP_DIR/packages/$PKG_DIR_NAME 50 | 51 | pushd $TEMP_DIR/packages/$PKG_DIR_NAME 52 | 53 | log_msg "CYAN" "Let’s create the Carvel package bundle folders (config, .imgpkg)" 54 | mkdir -p $PKG_VERSION/bundle/{config,.imgpkg} 55 | 56 | log_msg "CYAN" "Copy the k8s dashboard manifests and values files" 57 | cp -r $PROJECT_DIR/dashboard-manifest/config/*.yaml $PKG_VERSION/bundle/config 58 | cp $PROJECT_DIR/dashboard-manifest/values.yml $PKG_VERSION/bundle/config/values.yml 59 | 60 | log_msg "CYAN" "let’s use kbld to record which container images are used" 61 | kbld -f $PKG_VERSION/bundle/config/ --imgpkg-lock-output $PKG_VERSION/bundle/.imgpkg/images.yml 62 | 63 | log_msg "CYAN" "Create an image bundle using the content of package-contents" 64 | imgpkg push -b $IMG_REPO_HOST/packages/kubernetes-dashboard:$PKG_VERSION -f $PKG_VERSION/bundle/ 65 | 66 | log_msg "CYAN" "Export the OpenAPI Schema" 67 | ytt -f $PKG_VERSION/bundle/config/values.yml --data-values-schema-inspect -o openapi-v3 > schema-openapi.yml 68 | 69 | log_msg "CYAN" "Generate the Package CR and copy it within the $PKG_DIR_NAME/$PKG_VERSION directory" 70 | ytt -f $PROJECT_DIR/pkg-manifests/package-template.yml \ 71 | --data-value-file openapi=schema-openapi.yml \ 72 | -v version="$PKG_VERSION" \ 73 | > $PKG_VERSION/package.yml 74 | 75 | log_msg "CYAN" "Remove the file generated containing the OpenAPI schema values" 76 | rm schema-openapi.yml 77 | 78 | log_msg "CYAN" "Copy the PackageMetadata CR within the $PKG_DIR_NAME directory" 79 | cp $PROJECT_DIR/pkg-manifests/package-metadata.yml metadata.yml 80 | 81 | log_msg "CYAN" "Bundle the package and push it to the repository" 82 | mkdir -p $TEMP_DIR/repo/.imgpkg 83 | 84 | # Generate the ImagesLock file matching the image version and the SHA of the package's images 85 | kbld -f $TEMP_DIR/packages/ --imgpkg-lock-output $TEMP_DIR/repo/.imgpkg/images.yml 86 | # Copy the packages within the repo 87 | rsync -a --exclude='.imgpkg/' -r $TEMP_DIR/packages/ $TEMP_DIR/repo/packages 88 | # Create and push the repository image 89 | imgpkg push -b $IMG_REPO_HOST/packages/$REPOSITORY_NAME:$PKG_VERSION -f $TEMP_DIR/repo 90 | 91 | popd 92 | 93 | 94 | -------------------------------------------------------------------------------- /carvel-package/README.md: -------------------------------------------------------------------------------- 1 | # How to create a Carvel Package and to deploy Kubernetes dashboard 2 | 3 | - Setup first a kind cluster using the following [bash script](https://github.com/snowdrop/k8s-infra/blob/main/kind/kind-reg-ingress.sh) 4 | - Generate the images containing the content of the Kubernetes dashboard repository and packages using the following bash script: `./gen_pkg_img.sh` 5 | **REMARK**: Do not forget to change the container image host property of the script `IMG_REPO_HOST` to point to your preferred repository (docker.io, quay.io, ...) 6 | 7 | - Install next the kapp controller and certificate manager (which is needed when the kubernetes dashboard is installed to populate a selfsigned certificate) 8 | ```bash 9 | kapp deploy -a kubectl -f https://github.com/vmware-tanzu/carvel-kapp-controller/releases/latest/download/release.yml -y 10 | kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.2/cert-manager.yaml 11 | ``` 12 | - Create the demo namespace and the secret containing the credentials to access the container registry 13 | ```bash 14 | kubectl create ns pkg-demo 15 | 16 | kubectl create secret docker-registry ghcr-creds \ 17 | -n pkg-demo \ 18 | --docker-server="ghcr.io" \ 19 | --docker-username=GHCR_USERNAME \ 20 | --docker-password=GHCR_PASSWORD 21 | ``` 22 | - Create the package values file containing the different parameters supported by the packages such as: `VM_IP` address, namespace, etc 23 | ```bash 24 | cat < k8s-ui-values.yaml 25 | vm_ip: 10.0.77.51 26 | namespace: kubernetes-dashboard 27 | EOF 28 | ``` 29 | - Create the secret to be used by the `PackageInstall` 30 | ```bash 31 | kubectl -n pkg-demo delete secret k8s-ui-values 32 | kubectl -n pkg-demo create secret generic k8s-ui-values --from-file=values.yaml=k8s-ui-values.yaml 33 | ``` 34 | - Deploy/install the Kubernetes Dashboard using the Carvel `Package, PackageMetadata and PackageInstall` CR 35 | ```bash 36 | kapp deploy -a pkg-k8d-ui \ 37 | -f pkg-manifests/rbac.yml \ 38 | -f pkg-manifests/package-install.yml \ 39 | -f pkg-manifests/package-metadata.yml \ 40 | -f _temp/kubernetes-dashboard/0.1.0/package.yml \ 41 | -y 42 | 43 | kubectl describe packageinstall/kubernetes-dashboard -n pkg-demo 44 | ``` 45 | 46 | - Alternatively, you can use `tanzu client` as it will simplify your life. Add the repo, check the available values and install it 47 | ```bash 48 | tanzu package repository add demo-repo --url ghcr.io/halkyonio/packages/demo-repo:0.1.0 49 | 50 | tanzu package repository list 51 | tanzu package available list -n default 52 | tanzu package available get kubernetes-dashboard.halkyonio.io/0.1.0 --values-schema 53 | 54 | tanzu package install k8s-ui -p kubernetes-dashboard.halkyonio.io -v 0.1.0 --values-file k8s-ui-values.yaml -n default 55 | tanzu package installed get k8s-ui 56 | - Retrieving installation details for k8s-ui... I0325 15:04:09.963841 13445 request.go:665] Waited for 1.035994952s due to client-side throttling, not priority and fairness, request: GET:https://10.0.77.51:6443/apis/sources.knative.dev/v1alpha1?timeout=32s 57 | - Retrieving installation details for k8s-ui... 58 | NAME: k8s-ui 59 | PACKAGE-NAME: kubernetes-dashboard.halkyonio.io 60 | PACKAGE-VERSION: 0.1.0 61 | STATUS: Reconcile succeeded 62 | CONDITIONS: [{ReconcileSucceeded True }] 63 | USEFUL-ERROR-MESSAGE: 64 | ``` 65 | - To delete it 66 | ```bash 67 | kapp delete -a pkg-k8d-ui -y 68 | or 69 | tanzu package installed delete k8s-ui -y 70 | tanzu package repository delete demo-repo -y 71 | ``` 72 | 73 | # Dummy project to test Carvel Package with Helm 74 | 75 | - Setup first a kind cluster using the following [bash script](https://github.com/snowdrop/k8s-infra/blob/main/kind/kind-reg-ingress.sh) 76 | - Install next the kapp controller and cert manager 77 | ```bash 78 | kapp deploy -a kubectl -f https://github.com/vmware-tanzu/carvel-kapp-controller/releases/latest/download/release.yml -y 79 | kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.2/cert-manager.yaml 80 | ``` 81 | - Create the configmap 82 | - Deploy/install the dummy package project 83 | ```bash 84 | kapp deploy -a pkg-helm -f manifests/ -y 85 | kubectl describe packageinstall/kubernetes-dashboard -n pkg-demo 86 | ``` 87 | - Due to some issues with the finalizers, it is then needed to edit the resources to remove them 88 | ```bash 89 | kubectl delete ClusterIssuer/selfsigned-issuer 90 | kapp delete -a pkg-helm -y 91 | ctrl-c 92 | kubectl edit -n pkg-demo PackageInstall/kubernetes-dashboard 93 | NS=pkg-demo 94 | kubectl get ns $NS -o json | jq '.spec.finalizers = []' | kubectl replace --raw "/api/v1/namespaces/$NS/finalize" -f - 95 | ``` 96 | -------------------------------------------------------------------------------- /scripts/tools/inject_ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Generate the Self signed certificate using openssl 4 | REG_SERVER=harbor.65.108.148.216.nip.io 5 | mkdir -p certs/${REG_SERVER} 6 | 7 | create_openssl_cfg() { 8 | CFG=$(cat < req.cnf 36 | 37 | echo "==== Create the self signed certificate certificate and client key files" 38 | openssl req -x509 \ 39 | -nodes \ 40 | -days 365 \ 41 | -newkey rsa:4096 \ 42 | -keyout certs/${REG_SERVER}/client.key \ 43 | -out certs/${REG_SERVER}/client.crt \ 44 | -config req.cnf \ 45 | -sha256 46 | 47 | # Kind configuration 48 | kindCfg=$(cat < /usr/local/share/ca-certificates/ca.crt && update-ca-certificates && systemctl restart containerd 108 | # EOF 109 | # 110 | # cat <<'EOF' | kubectl apply -f - 111 | # apiVersion: apps/v1 112 | # kind: DaemonSet 113 | # metadata: 114 | # namespace: kube-system 115 | # name: node-custom-setup 116 | # labels: 117 | # k8s-app: node-custom-setup 118 | # spec: 119 | # selector: 120 | # matchLabels: 121 | # k8s-app: node-custom-setup 122 | # template: 123 | # metadata: 124 | # labels: 125 | # k8s-app: node-custom-setup 126 | # spec: 127 | # hostPID: true 128 | # hostNetwork: true 129 | # initContainers: 130 | # - name: init-node 131 | # command: ["nsenter"] 132 | # args: ["--mount=/proc/1/ns/mnt", "--", "sh", "-c", "$(SETUP_SCRIPT)"] 133 | # image: debian 134 | # env: 135 | # - name: TRUSTED_CERT 136 | # valueFrom: 137 | # configMapKeyRef: 138 | # name: trusted-ca 139 | # key: ca.crt 140 | # - name: SETUP_SCRIPT 141 | # valueFrom: 142 | # configMapKeyRef: 143 | # name: setup-script 144 | # key: setup.sh 145 | # securityContext: 146 | # privileged: true 147 | # containers: 148 | # - name: wait 149 | # image: k8s.gcr.io/pause:3.1 150 | # EOF -------------------------------------------------------------------------------- /supplychain/quarkus-sc/templates/app-deploy-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: carto.run/v1alpha1 2 | kind: ClusterTemplate 3 | metadata: 4 | name: app-deploy 5 | spec: 6 | ytt: | 7 | #@ load("@ytt:data", "data") 8 | #@ load("@ytt:yaml", "yaml") 9 | 10 | #@ def img(): 11 | #@ return "/".join([ 12 | #@ data.values.params.image_prefix, 13 | #@ data.values.workload.metadata.name 14 | #@ ]) 15 | #@ end 16 | 17 | #@ def merge_labels(fixed_values): 18 | #@ labels = {} 19 | #@ if hasattr(data.values.workload.metadata, "labels"): 20 | #@ labels.update(data.values.workload.metadata.labels) 21 | #@ end 22 | #@ labels.update(fixed_values) 23 | #@ return labels 24 | #@ end 25 | 26 | #@ def merge_annotations(fixed_values): 27 | #@ annotations = {} 28 | #@ if hasattr(data.values.workload.metadata, "annotations"): 29 | #@ annotations.update(data.values.workload.metadata.annotations) 30 | #@ end 31 | #@ annotations.update(fixed_values) 32 | #@ return annotations 33 | #@ end 34 | 35 | #@ def get_claims_extension(): 36 | #@ return None 37 | #@ end 38 | 39 | #@ def merge_claims_extension(claim, claims_extension): 40 | #@ if claims_extension == None: 41 | #@ return claim.ref 42 | #@ end 43 | #@ extension = claims_extension.get(claim.name) 44 | #@ if extension == None: 45 | #@ return claim.ref 46 | #@ end 47 | #@ extension.update(claim.ref) 48 | #@ return extension 49 | #@ end 50 | 51 | #@ def manifest(): 52 | --- 53 | apiVersion: kapp.k14s.io/v1alpha1 54 | kind: Config 55 | rebaseRules: 56 | - path: 57 | - metadata 58 | - annotations 59 | - serving.knative.dev/creator 60 | type: copy 61 | sources: [new, existing] 62 | resourceMatchers: &matchers 63 | - apiVersionKindMatcher: 64 | apiVersion: serving.knative.dev/v1 65 | kind: Service 66 | - path: 67 | - metadata 68 | - annotations 69 | - serving.knative.dev/lastModifier 70 | type: copy 71 | sources: [new, existing] 72 | resourceMatchers: *matchers 73 | 74 | --- 75 | apiVersion: serving.knative.dev/v1 76 | kind: Service 77 | metadata: 78 | name: #@ data.values.workload.metadata.name 79 | spec: 80 | template: 81 | metadata: 82 | annotations: 83 | autoscaling.knative.dev/minScale: "1" 84 | autoscaling.knative.dev/scaleDownDelay: "15m" 85 | labels: 86 | tanzu.app.live.view: "true" 87 | tanzu.app.live.view.application.name: #@ data.values.workload.metadata.name 88 | tanzu.app.live.view.application.flavours: #@ data.values.params.deliverable_type 89 | app.kubernetes.io/component: service 90 | app.tanzu.vmware.com/deliverable-type: #@ data.values.workload.metadata.name 91 | #@ if/end hasattr(data.values.workload.metadata, "labels") and hasattr(data.values.workload.metadata.labels, "app.kubernetes.io/part-of"): 92 | app.kubernetes.io/part-of: #@ data.values.workload.metadata.labels["app.kubernetes.io/part-of"] 93 | spec: 94 | serviceAccountName: #@ data.values.params.service_account 95 | containers: 96 | - name: workload 97 | image: #@ img() 98 | securityContext: 99 | runAsUser: 1000 100 | 101 | --- 102 | #@ claims_extension = get_claims_extension() 103 | #@ for s in data.values.workload.spec.serviceClaims: 104 | --- 105 | apiVersion: services.apps.tanzu.vmware.com/v1alpha1 106 | kind: ResourceClaim 107 | metadata: 108 | name: #@ data.values.workload.metadata.name + '-' + s.name 109 | annotations: #@ merge_annotations({}) 110 | labels: #@ merge_labels({ "app.kubernetes.io/component": "run", "carto.run/workload-name": data.values.workload.metadata.name }) 111 | spec: 112 | ref: #@ merge_claims_extension(s, claims_extension) 113 | --- 114 | apiVersion: servicebinding.io/v1alpha3 115 | kind: ServiceBinding 116 | metadata: 117 | name: #@ data.values.workload.metadata.name + '-' + s.name 118 | annotations: #@ merge_annotations({}) 119 | labels: #@ merge_labels({ "app.kubernetes.io/component": "run", "carto.run/workload-name": data.values.workload.metadata.name }) 120 | spec: 121 | name: #@ s.name 122 | service: 123 | apiVersion: services.apps.tanzu.vmware.com/v1alpha1 124 | kind: ResourceClaim 125 | name: #@ data.values.workload.metadata.name + '-' + s.name 126 | workload: 127 | apiVersion: serving.knative.dev/v1 128 | kind: Service 129 | name: #@ data.values.workload.metadata.name 130 | #@ end 131 | #@ end 132 | 133 | --- 134 | apiVersion: kappctrl.k14s.io/v1alpha1 135 | kind: App 136 | metadata: 137 | name: #@ data.values.workload.metadata.name 138 | spec: 139 | serviceAccountName: #@ data.values.params.service_account 140 | fetch: 141 | - inline: 142 | paths: 143 | manifest.yml: #@ yaml.encode(manifest()) 144 | template: 145 | - ytt: {} 146 | deploy: 147 | - kapp: {} -------------------------------------------------------------------------------- /scripts/tools/gen-selfsigned-cert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Script generating a selfsigned cert and keys for the server "${REGISTRY_HOSTNAME}.$VM_IP.nip.io" 5 | # Copying the files to /etc/pki/ca-trust/source/anchors/ and /etc/docker/certs.d/ to trust them 6 | # 7 | # Execute this command locally 8 | # 9 | # ./tools/gen-selfsigned-cert.sh 10 | # VM_IP=10.0.76.43 ./tools/gen-selfsigned-cert.sh 11 | # 12 | # or remotely 13 | # ssh -i @ -p "bash -s" -- < ./tools/gen-selfsigned-cert.sh 14 | # 15 | # Define the following env vars: 16 | # - REMOTE_HOME_DIR: home directory where files will be installed within the remote VM 17 | # - VM_IP: IP address of the VM where the cluster is running 18 | # 19 | VM_IP=${VM_IP:=127.0.0.1} 20 | REMOTE_HOME_DIR=${REMOTE_HOME_DIR:-$HOME} 21 | CERT_DIR=$PWD/certs 22 | VM_IP_AND_DOMAIN_NAME="$VM_IP.nip.io" 23 | REGISTRY_HOSTNAME=${REGISTRY_HOSTNAME:-registry.harbor} 24 | 25 | # Defining some colors for output 26 | RED='\033[0;31m' 27 | NC='\033[0m' # No Color 28 | YELLOW='\033[0;33m' 29 | GREEN='\033[0;32m' 30 | BLUE='\033[0;34m' 31 | MAGENTA='\033[0;35m' 32 | CYAN='\033[0;36m' 33 | WHITE='\033[0;37m' 34 | 35 | repeat_char(){ 36 | COLOR=${1} 37 | for i in {1..50}; do echo -ne "${!COLOR}$2${NC}"; done 38 | } 39 | 40 | log_line() { 41 | COLOR=${1} 42 | MSG="${@:2}" 43 | echo -e "${!COLOR}## ${MSG}${NC}" 44 | } 45 | 46 | log_msg() { 47 | COLOR=${1} 48 | MSG="${@:2}" 49 | echo -e "\n${!COLOR}## ${MSG}${NC}" 50 | } 51 | 52 | log() { 53 | MSG="${@:2}" 54 | echo; repeat_char ${1} '#'; log_msg ${1} ${MSG}; repeat_char ${1} '#'; echo 55 | } 56 | 57 | repeat(){ 58 | local start=1 59 | local end=${1:-80} 60 | local str="${2:-=}" 61 | local range=$(seq $start $end) 62 | for i in $range ; do echo -n "${str}"; done 63 | } 64 | 65 | check_os() { 66 | platform='unknown' 67 | unamestr=$(uname) 68 | if [[ "$unamestr" == 'Linux' ]]; then 69 | platform='linux' 70 | elif [[ "$unamestr" == 'Darwin' ]]; then 71 | platform='darwin' 72 | fi 73 | } 74 | 75 | create_openssl_cfg() { 76 | CFG=$(cat < ${CERT_DIR}/req.cnf 119 | 120 | #log "CYAN" "Generate the CA certificate and private key." 121 | #openssl req -x509 \ 122 | # -nodes \ 123 | # -sha512 \ 124 | # -days 3650 \ 125 | # -newkey rsa:4096 \ 126 | # -subj "/C=CN/ST=Namur/L=Florennes/O=RedHat/OU=Snowdrop/CN=${VM_IP_AND_DOMAIN_NAME}" \ 127 | # -keyout ${CERT_DIR}/ca.key \ 128 | # -out ${CERT_DIR}/ca.crt 129 | 130 | log "CYAN" "Create the self signed certificate and server key." 131 | openssl req -x509 \ 132 | -nodes \ 133 | -days 365 \ 134 | -newkey rsa:4096 \ 135 | -sha256 \ 136 | -keyout ${CERT_DIR}/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}/tls.key \ 137 | -out ${CERT_DIR}/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}/tls.crt \ 138 | -config ${CERT_DIR}/req.cnf 139 | 140 | if [[ $platform == 'linux' ]]; then 141 | log_line "CYAN" "Copy the tls.crt to /etc/pki/ca-trust/source/anchors/ and trust the certificate" 142 | sudo cp ${CERT_DIR}/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}/tls.crt /etc/pki/ca-trust/source/anchors/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}.crt 143 | sudo update-ca-trust 144 | 145 | log_line "CYAN" "Copy the tls.crt to /etc/docker/certs.d/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME} and restart docker daemon" 146 | sudo mkdir -p /etc/docker/certs.d/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME} 147 | sudo cp ${CERT_DIR}/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}/tls.crt /etc/docker/certs.d/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}/tls.crt 148 | sudo systemctl restart docker 149 | elif [[ $platform == 'darwin' ]]; then 150 | log_line "CYAN" "Copy the *.crt, *.key file to ~/.docker/certs.d/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}" 151 | mv ${CERT_DIR}/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}/tls.crt ${CERT_DIR}/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}/tls.cert 152 | cp ${CERT_DIR}/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}/*.{key,cert} $REMOTE_HOME_DIR/.docker/certs.d/${REGISTRY_HOSTNAME}.${VM_IP_AND_DOMAIN_NAME}/ 153 | fi 154 | 155 | -------------------------------------------------------------------------------- /demo-scenario.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | **NOTE**: This file contains commands that I'm personally using against a private internal VM 4 | 5 | # Setup K8S Config locally on the Developer machine to access VM 6 | ``` 7 | konfig import -p -s _temp/config.yml 8 | kubectx kubernetes-admin@kubernetes 9 | ``` 10 | # SSH to the VM 11 | ``` 12 | pass-team 13 | CLOUD=openstack && VM=k123-fedora35-01 && ssh-vm $CLOUD $VM 14 | ``` 15 | ## Open UI & Get Tokens 16 | ``` 17 | alias chrome="/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --args --incognito" 18 | 19 | VM_IP=10.0.77.176 20 | chrome http://tap-gui.$VM_IP.nip.io/ 21 | chrome http://k8s-ui.$VM_IP.nip.io/ 22 | // Tilt 23 | chrome http://localhost:10350/ 24 | ``` 25 | # Install kubeapps and get the Kubeapps token (optional) 26 | ``` 27 | helm repo add bitnami https://charts.bitnami.com/bitnami 28 | helm uninstall kubeapps -n kubeapps 29 | cat < $HOME/tanzu/kubeapps-values.yml 30 | dashboard: 31 | image: 32 | repository: bitnami/kubeapps-dashboard 33 | kubeops: 34 | enabled: true 35 | image: 36 | repository: bitnami/kubeapps-kubeops 37 | kubeappsapis: 38 | image: 39 | repository: bitnami/kubeapps-apis 40 | enabledPlugins: 41 | - resources 42 | - kapp-controller-packages 43 | - helm-packages 44 | packaging: 45 | helm: 46 | enabled: true 47 | carvel: 48 | enabled: true 49 | featureFlags: 50 | operators: false 51 | EOF 52 | kubectl create ns kubeapps 53 | helm install kubeapps -n kubeapps bitnami/kubeapps -f $HOME/tanzu/kubeapps-values.yml 54 | cat < "thisisunsafe" 185 | 186 | #kubectl get secret/k8s-ui-secret -n kubernetes-dashboard -o jsonpath="{.data.ca\.crt}" | base64 -d > _temp/ca.crt 187 | #sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain _temp/ca.crt 188 | 189 | 190 | 191 | -------------------------------------------------------------------------------- /export-secrets.md: -------------------------------------------------------------------------------- 1 | Table of Contents 2 | ================= 3 | 4 | * [Using the ClusterPullSecret controller](#using-the-clusterpullsecret-controller) 5 | * [Instructions](#instructions) 6 | * [The Carvel Secretgen approach](#the-carvel-secretgen-approach) 7 | * [Instructions](#instructions-1) 8 | 9 | # Using the ClusterPullSecret controller 10 | 11 | This solution relies on a Controller able using an existing kubernetes Docker secret to create a new one within the target namespaces and to patch the SA with the property `imagePullSecret` 12 | using the ClusterPullSecret [controller](https://github.com/alexellis/registry-creds/) 13 | 14 | ## Instructions 15 | 16 | Create first a Kind k8s cluster with a secured registry and install the controller 17 | ```bash 18 | bash <( curl -s https://raw.githubusercontent.com/snowdrop/k8s-infra/main/kind/kind-tls-secured-reg.sh) 19 | kubectl apply -f https://raw.githubusercontent.com/alexellis/registry-creds/master/manifest.yaml 20 | ``` 21 | Pull and push a hello application image to the local private registry 22 | ```bash 23 | docker pull gcr.io/google-samples/hello-app:1.0 24 | docker tag tag gcr.io/google-samples/hello-app:1.0 localhost:5000/hello-app:1.0 25 | docker login localhost:5000 -u admin -p snowdrop 26 | docker push localhost:5000/hello-app:1.0 27 | ``` 28 | 29 | Create a docker-registry secret containing the credentials to access locally the images registry 30 | ```bash 31 | export REG_SERVER=registry.local:5000 32 | export REG_USERNAME=admin 33 | export REG_PASSWORD=snowdrop 34 | 35 | kubectl delete secret local-reg-creds --namespace kube-system 36 | 37 | kubectl create secret docker-registry local-reg-creds \ 38 | --namespace kube-system \ 39 | --docker-server=$REG_SERVER \ 40 | --docker-username=$REG_USERNAME \ 41 | --docker-password=$REG_PASSWORD 42 | ``` 43 | Alternatively, you can also create a secret using an existing docker cfg file 44 | ```bash 45 | kubectl create secret generic local-reg-creds \ 46 | --namespace kube-system \ 47 | --from-file=.dockerconfigjson=$HOME/.docker/config.json \ 48 | --type=kubernetes.io/dockerconfigjson 49 | ``` 50 | Create the `ClusterPullSecret` 51 | ```bash 52 | kubectl create ns demo 53 | kubectl delete secret --all -n demo 54 | kubectl delete sa --all -n demo 55 | 56 | kubectl delete ClusterPullSecret/local-reg-creds 57 | cat <) 38 | 39 | [Optional Flags - Used by the Prepare Action] 40 | -n or --namespace: Namespace where the repository, package, operator will be deployed (Default: db) 41 | --tds-version: Version of the Tanzu Database Service repository to be installed (Default: 1.7.3) 42 | 43 | [Optional Flags - Used by the Instance Action] 44 | -n or --namespace: Namespace where the repository, package, operate will 45 | --tds-version: Version of the Tanzu Database Service repository to be installed (Default: 1.7.3) 46 | " 47 | 48 | #################################### 49 | ## Section to declare the functions 50 | #################################### 51 | exec_command() { 52 | if ! $@ ; then 53 | rc=$? 54 | fixme "Command '$@' failed" 55 | exit $rc 56 | fi 57 | } 58 | 59 | repeat_char(){ 60 | COLOR=${1} 61 | for i in {1..70}; do echo -ne "${!COLOR}$2${NC}"; done 62 | } 63 | 64 | msg() { 65 | COLOR=${1} 66 | MSG="${@:2}" 67 | echo -e "\n${!COLOR}## ${MSG}${NC}" 68 | } 69 | 70 | note() { 71 | echo -e "\n${BLUE}NOTE:${NC} $1" 72 | } 73 | 74 | warn() { 75 | echo -e "\n${YELLOW}WARN:${NC} $1" 76 | } 77 | 78 | fixme() { 79 | echo -e "\n${RED}FIXME:${NC} $1" 80 | } 81 | 82 | log() { 83 | MSG="${@:2}" 84 | echo; repeat_char ${1} '#'; msg ${1} ${MSG}; repeat_char ${1} '#'; echo 85 | } 86 | 87 | repeat(){ 88 | local start=1 89 | local end=${1:-80} 90 | local str="${2:-=}" 91 | local range=$(seq $start $end) 92 | for i in $range ; do echo -n "${str}"; done 93 | } 94 | 95 | machine_os() { 96 | unameOut="$(uname -s)" 97 | case "${unameOut}" in 98 | Linux*) machine_os=Linux;; 99 | Darwin*) machine_os=Mac;; 100 | *) machine_os="UNKNOWN:${unameOut}" 101 | esac 102 | } 103 | 104 | ############################################################################ 105 | ## Check if flags are passed and set the variables using the flogs passed 106 | ############################################################################ 107 | if [[ $# == 0 ]]; then 108 | fixme "No Flags were passed. Run with --help flag to get usage information" 109 | exit 1 110 | fi 111 | 112 | while test $# -gt 0; do 113 | case "$1" in 114 | -a | --action) 115 | shift 116 | action=$1 117 | shift;; 118 | --registry-url) 119 | shift 120 | registry_url=$1 121 | shift;; 122 | --registry-username) 123 | shift 124 | registry_username=$1 125 | shift;; 126 | --registry-password) 127 | shift 128 | registry_password=$1 129 | shift;; 130 | --registry-owner) 131 | shift 132 | registry_owner=$1 133 | shift;; 134 | --tds-version) 135 | shift 136 | tds_version=$1 137 | shift;; 138 | -ns | --namespace) 139 | shift 140 | db_namespace=$1 141 | shift;; 142 | -h | --help) 143 | echo "$HELP_CONTENT" 144 | exit 1;; 145 | *) 146 | fixme "$1 is note a recognized flag!" 147 | exit 1 148 | ;; 149 | esac 150 | done 151 | 152 | ####################################################### 153 | ## Set default values when no optional flags are passed 154 | ####################################################### 155 | : ${tds_version:="1.7.3"} 156 | : ${tds_repository_name:="tanzu-data-services-repository"} 157 | : ${tds_namespace:="db"} 158 | 159 | ####################################################### 160 | ## Set local default values 161 | ####################################################### 162 | postgres_api_group="sql.tanzu.vmware.com" 163 | postgres_api_version="v1" 164 | postgres_kind="Postgres" 165 | postgres_resource_name="postgres" 166 | 167 | # Check machine os 168 | machine_os 169 | if [[ $machine_os != "Mac" && $machine_os != "Linux" ]]; then 170 | fixme "Only Mac and Linux are currently supported. your machine returned the type of $machine_os" 171 | exit 1 172 | fi 173 | 174 | # Validate that an action was passed 175 | if ! [[ $action ]]; then 176 | fixme "Please pass a valid action using the flag (e.g. --action create)" 177 | exit 1 178 | fi 179 | 180 | # Actions to executed 181 | case $action in 182 | prepare) 183 | log "BLUE" "Preparing to install the TDS repository ${tds_version}, package and operator" 184 | # Validate if Mandatory Flags were supplied 185 | if ! [[ ${registry_username} && ${registry_password} && ${registry_url} && ${registry_owner} ]]; then 186 | fixme "Mandatory flags were note passed: --registry-url, --registry-owner, --registry-username, --registry-password" 187 | exit 1 188 | fi 189 | 190 | if ! command -v tanzu &> /dev/null 191 | then 192 | warn "Tanzu client is not installed" 193 | exit 1 194 | fi 195 | 196 | note "Creating the namespace: ${tds_namespace}" 197 | kubectl create ns ${tds_namespace} --dry-run=client -o yaml | kubectl apply -f - 198 | 199 | note "Populating the secret containing the registry credentials, create it and export it to all the namespaces" 200 | tanzu secret registry add registry-credentials \ 201 | --username ${registry_username} \ 202 | --password ${registry_password} \ 203 | --server ${registry_url} \ 204 | -n ${tds_namespace} \ 205 | --export-to-all-namespaces --yes 206 | 207 | note "Adding the tanzu-data-services-repository" 208 | tanzu package repository add ${tds_repository_name} \ 209 | --url ${registry_url}/${registry_owner}/tds-packages \ 210 | -n ${tds_namespace} 211 | 212 | note "Installing the Postgresql Operator from the package postgres-operator.sql.tanzu.vmware.com version: ${tds_version}" 213 | #tanzu package install postgres-operator \ 214 | # -p postgres-operator.sql.tanzu.vmware.com \ 215 | # -v ${tds_version} \ 216 | # -n ${tds_namespace} 217 | # #-f 218 | 219 | log "BLUE" "Done";; 220 | instance) 221 | log "BLUE" "Creating an instance" 222 | # Validate if Mandatory Flags were supplied 223 | if ! [[ ${db_namespace} ]]; then 224 | log "YELLOW" "Mandatory flags were note passed: --ns. use --help for usage information" 225 | exit 1 226 | fi 227 | msg "CYAN" "kubectl create ns ${db_namespace} --dry-run=client -o yaml | kubectl apply -f -" 228 | log "BLUE" "Instance of Postgres created under ${db_namespace}.";; 229 | delete) 230 | note "Deleting an instance" 231 | note "tanzu package repository delete tanzu-data-services-repository -n tap-install" 232 | note "Done.";; 233 | remove) 234 | note "Remove the Tanzu postgresql package" 235 | tanzu package installed delete postgres-operator -n ${tds_namespace} -y 236 | note "Remove now the Tanzu TDS repository" 237 | tanzu package repository delete ${tds_repository_name} -n ${tds_namespace} -y 238 | note "Done.";; 239 | *) 240 | fixme "Unknown action passed: $action. Please use --help." 241 | exit 1 242 | esac -------------------------------------------------------------------------------- /runtimes/README.md: -------------------------------------------------------------------------------- 1 | ## Steps executed to use the Quarkus builder image 2 | 3 | **WARNING**: Page deprecated as content is mainly used as a garage of instructions, tests, ... !!!! 4 | 5 | ```bash 6 | git clone https://github.com/quarkusio/quarkus-buildpacks.git && cd quarkus-buildpacks 7 | 8 | # Generate the buildpacks image (pack ...) 9 | ./create-buildpacks.sh 10 | 11 | # Tag and push the images to a private docker registry 12 | export REGISTRY_URL="ghcr.io/halkyonio" 13 | docker tag codejive/buildpacks-quarkus-builder:jvm $REGISTRY_URL/quarkus-builder:jvm 14 | docker tag codejive/buildpacks-quarkus-run:jvm $REGISTRY_URL/quarkus-stack:run 15 | docker tag codejive/buildpacks-quarkus-build:jvm $REGISTRY_URL/quarkus-stack:build 16 | 17 | docker push $REGISTRY_URL/quarkus-builder:jvm 18 | docker push $REGISTRY_URL/quarkus-stack:run 19 | docker push $REGISTRY_URL/quarkus-stack:build 20 | 21 | # Create the kpack ClusterStore, ClusterBuilder and ClusterStack Custom resources 22 | pushd runtimes 23 | KUBECONFIG= 24 | kapp deploy -a quarkus-builder \ 25 | --kubeconfig $KUBECONFIG \ 26 | -f buildpacks/clusterstore.yml \ 27 | -f buildpacks/clusterstack.yml \ 28 | -f buildpacks/clusterbuilder.yml 29 | popd 30 | # To delete the kapp "quarkus-builder" installed 31 | kapp delete -a quarkus-builder -y 32 | ``` 33 | 34 | ## Update TAP 35 | 36 | Edit the `tap-values.yml` file to configure the `cluster_builder` field to use the `runtime` ClusterBuilder 37 | ```yaml 38 | ootb_supply_chain_basic: 39 | # cluster_builder: default 40 | cluster_builder: runtime 41 | ... 42 | ``` 43 | Next, update the TAP package which will ultimately update the `ClusterSupplyChain/source-to-url` 44 | ```bash 45 | tanzu package installed update tap -p tap.tanzu.vmware.com -v 1.0.0 --values-file tap-values.yml -n tap-install 46 | ``` 47 | When done, we can create a new Quarkus workload 48 | ```bash 49 | KUBECONFIG_PATH= 50 | QUARKUS_DEMO_GIT="https://github.com/halkyonio/quarkus-tap-petclinic.git" 51 | DEMO_NAMESPACE=tap-demo 52 | tanzu apps workload create quarkus-java-web-app \ 53 | --kubeconfig $KUBECONFIG_PATH \ 54 | -n $DEMO_NAMESPACE \ 55 | --git-repo $QUARKUS_DEMO_GIT \ 56 | --git-branch main \ 57 | --type web \ 58 | --label app.kubernetes.io/part-of=quarkus-java-web-app \ 59 | --yes 60 | ``` 61 | 62 | ## Trick to allow a quarkus application to work with Application Live View 63 | 64 | Actually, this is already sort of possible via plugins that app live view allows to create. Essentially you’d need a new “app-flavour” for quarkus, 65 | The label on such app needs to `tanzu.app.live.view.application.flavours: quarkus`. 66 | You’d need to follow **[Extensibility](https://https://docs.vmware.com/en/Application-Live-View-for-VMware-Tanzu/0.1/docs/GUID-extensibility.html)** doc to create a UI plugin. 67 | 68 | ``` 69 | The backend endpoint would be: 70 | /instance/{id}/actuator/** 71 | (i.e. /instance/abc-id/actuator/app-memory) 72 | ``` 73 | 74 | Now if apps actuator path is configured with label: `tanzu.app.live.view.application.actuator.path: quarkus` 75 | instead of the default which is actuator on the app you’d be hitting endpoint `/quarkus/app-memory` the response json 76 | for which you should be able to handle in your UI plugin. 77 | 78 | ## Deploy a service 79 | 80 | - Install the Service operator 81 | ```bash 82 | kapp -y deploy --app rmq-operator --file https://github.com/rabbitmq/cluster-operator/releases/download/v1.9.0/cluster-operator.yml 83 | ``` 84 | 85 | - Configure the RBAC clusterroles; reader and admin 86 | ```bash 87 | cat < `kappctrl.k14s.io/v1alpha1` !!! 219 | 220 | Use the `kp client` to create an image for the local project 221 | ```bash 222 | kp image create quarkus-petclinic-image \ 223 | --tag / \ 224 | --local-path /path/to/local/source/code \ 225 | --builder my-builder \ 226 | -n my-namespace 227 | ``` 228 | 229 | ```bash 230 | kapp delete -a quarkus-petclinic -y 231 | kapp deploy -a quarkus-petclinic -f ./deploy/quarkus-kapp.yml 232 | ``` 233 | ## Access the Quarkus Petclinic UI from your browser 234 | ```bash 235 | export ENVOY_NODE_PORT=$(kubectl get svc/envoy -n contour-external -o jsonpath='{.spec.ports[0].nodePort}') 236 | export VM_IP=95.217.159.244 237 | echo "Quarkus Petclinic demo: http://quarkus-petclinic.tap-install.$VM_IP.nip.io:$ENVOY_NODE_PORT" 238 | open -na "Google Chrome" --args --incognito http://petclinic.tap-install.$VM_IP.nip.io:$ENVOY_NODE_PORT 239 | ``` -------------------------------------------------------------------------------- /carvel-package/pkg-manifests/package-metadata.yml: -------------------------------------------------------------------------------- 1 | apiVersion: data.packaging.carvel.dev/v1alpha1 2 | kind: PackageMetadata 3 | metadata: 4 | name: kubernetes-dashboard.halkyonio.io 5 | namespace: pkg-demo 6 | spec: 7 | displayName: "Kubernetes dashboard" 8 | iconSVGBase64: <?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->

<svg
   xmlns:dc="http://purl.org/dc/elements/1.1/"
   xmlns:cc="http://creativecommons.org/ns#"
   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
   xmlns:svg="http://www.w3.org/2000/svg"
   xmlns="http://www.w3.org/2000/svg"
   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
   width="722.8457"
   height="701.96637"
   id="svg2"
   version="1.1"
   inkscape:version="0.48.4 r9939"
   sodipodi:docname="logo.svg"
   inkscape:export-filename="/home/thockin/src/kubernetes/new.png"
   inkscape:export-xdpi="460.95001"
   inkscape:export-ydpi="460.95001">
  <defs
     id="defs4" />
  <sodipodi:namedview
     id="base"
     pagecolor="#ffffff"
     bordercolor="#666666"
     borderopacity="1.0"
     inkscape:pageopacity="0.0"
     inkscape:pageshadow="2"
     inkscape:zoom="16.190509"
     inkscape:cx="277.56851"
     inkscape:cy="157.54494"
     inkscape:document-units="px"
     inkscape:current-layer="g3052"
     showgrid="false"
     inkscape:window-width="1519"
     inkscape:window-height="822"
     inkscape:window-x="51"
     inkscape:window-y="25"
     inkscape:window-maximized="0"
     inkscape:snap-global="false"
     fit-margin-top="10"
     fit-margin-left="10"
     fit-margin-right="10"
     fit-margin-bottom="10" />
  <metadata
     id="metadata7">
    <rdf:RDF>
      <cc:Work
         rdf:about="">
        <dc:format>image/svg+xml</dc:format>
        <dc:type
           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
        <dc:title />
      </cc:Work>
    </rdf:RDF>
  </metadata>
  <g
     inkscape:label="Layer 1"
     inkscape:groupmode="layer"
     id="layer1"
     transform="translate(-6.3260942,-174.7524)">
    <g
       id="g3052">
      <path
         style="fill:#326ce5;fill-opacity:1;stroke:#ffffff;stroke-width:0;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
         d="m 365.3125,184.8125 a 46.724621,46.342246 0 0 0 -17.90625,4.53125 l -244.34375,116.75 a 46.724621,46.342246 0 0 0 -25.28125,31.4375 L 17.5,599.78125 A 46.724621,46.342246 0 0 0 23.84375,635.3125 46.724621,46.342246 0 0 0 26.5,639 l 169.125,210.28125 a 46.724621,46.342246 0 0 0 36.53125,17.4375 L 503.375,866.65625 A 46.724621,46.342246 0 0 0 539.90625,849.25 L 708.96875,638.9375 A 46.724621,46.342246 0 0 0 718,599.71875 l -60.375,-262.25 a 46.724621,46.342246 0 0 0 -25.28125,-31.4375 l -244.375,-116.6875 A 46.724621,46.342246 0 0 0 365.3125,184.8125 z"
         id="path3055"
         inkscape:connector-curvature="0"
         inkscape:export-filename="new.png"
         inkscape:export-xdpi="250.55"
         inkscape:export-ydpi="250.55" />
      <path
         inkscape:connector-curvature="0"
         id="path3059"
         d="m 367.73366,274.05962 c -8.07696,8.2e-4 -14.62596,7.27591 -14.625,16.25 1e-5,0.13773 0.0282,0.26934 0.0312,0.40625 -0.0119,1.21936 -0.0708,2.68836 -0.0312,3.75 0.19262,5.176 1.3209,9.13749 2,13.90625 1.23028,10.20666 2.26117,18.66736 1.625,26.53125 -0.61869,2.9654 -2.80288,5.67741 -4.75,7.5625 l -0.34375,6.1875 c -8.77682,0.72717 -17.61235,2.05874 -26.4375,4.0625 -37.97461,8.62218 -70.67008,28.18307 -95.5625,54.59375 -1.61522,-1.10193 -4.44103,-3.12914 -5.2813,-3.75 -2.61117,0.35262 -5.25021,1.15829 -8.6875,-0.84375 -6.54491,-4.40563 -12.50587,-10.48693 -19.71875,-17.8125 -3.30498,-3.50419 -5.69832,-6.84101 -9.625,-10.21875 -0.89172,-0.76707 -2.25258,-1.80455 -3.25,-2.59375 -3.06988,-2.44757 -6.6907,-3.72402 -10.1875,-3.84375 -4.49589,-0.15394 -8.82394,1.60385 -11.65625,5.15625 -5.03521,6.31538 -3.42312,15.96805 3.59375,21.5625 0.0712,0.0567 0.14702,0.10078 0.21875,0.15625 0.96422,0.78162 2.14496,1.78313 3.03125,2.4375 4.16687,3.07655 7.9732,4.65145 12.125,7.09375 8.747,5.40181 15.99837,9.88086 21.75,15.28125 2.24602,2.39417 2.63858,6.61292 2.9375,8.4375 l 4.6875,4.1875 c -25.09342,37.76368 -36.70686,84.40946 -29.8437,131.9375 l -6.125,1.78125 c -1.6143,2.08461 -3.89541,5.36474 -6.2813,6.34375 -7.52513,2.37021 -15.99424,3.24059 -26.21875,4.3125 -4.80031,0.39915 -8.94218,0.16095 -14.03125,1.125 -1.12008,0.21218 -2.68072,0.61877 -3.90625,0.90625 -0.0426,0.009 -0.0824,0.0216 -0.125,0.0312 -0.0668,0.0155 -0.15456,0.0479 -0.21875,0.0625 -8.62014,2.08279 -14.15774,10.006 -12.375,17.8125 1.78316,7.80833 10.20314,12.55677 18.875,10.6875 0.0626,-0.0143 0.1535,-0.0167 0.21875,-0.0312 0.0979,-0.0224 0.18409,-0.0699 0.28125,-0.0937 1.20885,-0.26536 2.72377,-0.5606 3.78125,-0.84375 5.00334,-1.33963 8.62694,-3.30796 13.125,-5.03125 9.67694,-3.47077 17.69173,-6.37022 25.5,-7.5 3.26118,-0.25542 6.69711,2.01216 8.40625,2.96875 l 6.375,-1.09375 c 14.67018,45.48282 45.41416,82.24502 84.34375,105.3125 l -2.65625,6.375 c 0.95742,2.47542 2.01341,5.8247 1.30022,8.26932 -2.83868,7.3612 -7.70097,15.13097 -13.23772,23.79318 -2.68085,4.00192 -5.42453,7.10761 -7.84375,11.6875 -0.5789,1.09589 -1.31618,2.77932 -1.875,3.9375 -3.75884,8.04236 -1.00164,17.3052 6.21875,20.78125 7.26575,3.49788 16.28447,-0.19134 20.1875,-8.25 0.006,-0.0114 0.0257,-0.0198 0.0312,-0.0312 0.004,-0.009 -0.004,-0.0225 0,-0.0312 0.55593,-1.14255 1.34353,-2.64437 1.8125,-3.71875 2.07213,-4.74702 2.76161,-8.81506 4.21875,-13.40625 3.86962,-9.72014 5.99567,-19.91903 11.32258,-26.27411 1.45868,-1.74023 3.83681,-2.4095 6.30242,-3.06964 l 3.3125,-6 c 33.93824,13.0268 71.92666,16.52246 109.875,7.90625 8.65697,-1.96557 17.01444,-4.50945 25.09375,-7.5625 0.93098,1.65133 2.66113,4.8257 3.125,5.625 2.50559,0.81518 5.24044,1.23614 7.46875,4.53125 3.98539,6.80898 6.7109,14.86416 10.03125,24.59375 1.45738,4.59111 2.17762,8.65933 4.25,13.40625 0.47234,1.08195 1.256,2.60486 1.8125,3.75 3.89482,8.08484 12.94212,11.78667 20.21875,8.28125 7.2195,-3.4779 9.97974,-12.7399 6.21875,-20.78125 -0.55889,-1.15814 -1.3273,-2.84164 -1.90625,-3.9375 -2.41946,-4.57976 -5.1627,-7.65448 -7.84375,-11.65625 -5.53721,-8.66192 -10.12968,-15.8577 -12.96875,-23.21875 -1.18711,-3.79657 0.20028,-6.15774 1.125,-8.625 -0.55378,-0.63477 -1.73881,-4.22009 -2.4375,-5.90625 40.4574,-23.88816 70.29856,-62.02129 84.3125,-106.0625 1.8924,0.29742 5.18154,0.87936 6.25,1.09375 2.19954,-1.4507 4.22194,-3.34352 8.1875,-3.03125 7.80832,1.12937 15.82288,4.02973 25.5,7.5 4.49815,1.72306 8.1216,3.72313 13.125,5.0625 1.05749,0.28309 2.57238,0.5472 3.78125,0.8125 0.0972,0.0238 0.1833,0.0714 0.28125,0.0937 0.0653,0.0146 0.15615,0.0169 0.21875,0.0312 8.67236,1.86695 17.09384,-2.87871 18.875,-10.6875 1.78074,-7.80696 -3.7543,-15.73201 -12.375,-17.8125 -1.25393,-0.28513 -3.03225,-0.76938 -4.25,-1 -5.08912,-0.96378 -9.23092,-0.7261 -14.03125,-1.125 -10.22456,-1.07138 -18.6935,-1.94269 -26.21875,-4.3125 -3.06826,-1.19028 -5.25103,-4.84124 -6.31255,-6.34375 l -5.90625,-1.71875 c 3.06226,-22.15442 2.23655,-45.21134 -3.0625,-68.28125 -5.34839,-23.28471 -14.80037,-44.58084 -27.40625,-63.34375 1.51505,-1.37729 4.37619,-3.91091 5.1875,-4.65625 0.23716,-2.62417 0.0334,-5.37553 2.75,-8.28125 5.75134,-5.40069 13.00329,-9.87898 21.75,-15.28125 4.15167,-2.44252 7.98954,-4.01698 12.15625,-7.09375 0.94225,-0.69576 2.2289,-1.79759 3.21875,-2.59375 7.01538,-5.59633 8.63058,-15.24842 3.59375,-21.5625 -5.03683,-6.31408 -14.79712,-6.90883 -21.8125,-1.3125 -0.99856,0.79085 -2.35353,1.82252 -3.25,2.59375 -3.9265,3.37796 -6.35145,6.71439 -9.65625,10.21875 -7.21249,7.32595 -13.17407,13.43777 -19.71875,17.84375 -2.83601,1.65106 -6.98996,1.07978 -8.87505,0.96875 l -5.5625,3.96875 c -31.7188,-33.26057 -74.90466,-54.52546 -121.40605,-58.6563 -0.13006,-1.94872 -0.30045,-5.47117 -0.34375,-6.53125 -1.90371,-1.82165 -4.20342,-3.37686 -4.78125,-7.3125 -0.63617,-7.86389 0.42597,-16.32459 1.65625,-26.53125 0.6791,-4.76876 1.80738,-8.73025 2,-13.90625 0.0438,-1.17663 -0.0265,-2.88401 -0.0312,-4.15625 -9.6e-4,-8.97409 -6.54804,-16.25082 -14.625,-16.25 z m -18.3125,113.4375 -4.34375,76.71875 -0.3125,0.15625 c -0.29134,6.86335 -5.93996,12.34375 -12.875,12.34375 -2.84081,0 -5.46294,-0.91229 -7.59375,-2.46875 l -0.125,0.0625 -62.90625,-44.59375 c 19.33365,-19.01115 44.06291,-33.06039 72.5625,-39.53125 5.20599,-1.18203 10.40966,-2.0591 15.59375,-2.6875 z m 36.65625,0 c 33.27347,4.09232 64.04501,19.15882 87.625,42.25 l -62.5,44.3125 -0.21875,-0.0937 c -5.54745,4.05169 -13.36343,3.04639 -17.6875,-2.375 -1.77132,-2.22096 -2.70072,-4.83239 -2.8125,-7.46875 l -0.0625,-0.0312 z m -147.625,70.875 57.4375,51.375 -0.0625,0.3125 c 5.18437,4.50697 5.94888,12.32794 1.625,17.75 -1.7712,2.22105 -4.14208,3.71074 -6.6875,4.40625 l -0.0625,0.25 -73.625,21.25 c -3.74728,-34.26517 4.32855,-67.57364 21.375,-95.34375 z m 258.15625,0.0312 c 8.5341,13.83256 14.99655,29.28214 18.84375,46.03125 3.80106,16.54828 4.75499,33.06697 3.1875,49.03125 l -74,-21.3125 -0.0625,-0.3125 c -6.6265,-1.81104 -10.69893,-8.55162 -9.15625,-15.3125 0.63203,-2.76962 2.10222,-5.11264 4.09375,-6.84375 l -0.0312,-0.15625 57.125,-51.125 z m -140.65625,55.3125 23.53125,0 14.625,18.28125 -5.25,22.8125 -21.125,10.15625 -21.1875,-10.1875 -5.25,-22.8125 z m 75.4375,62.5625 c 0.99997,-0.0505 1.99558,0.0396 2.96875,0.21875 l 0.125,-0.15625 76.15625,12.875 c -11.1455,31.3131 -32.47281,58.44018 -60.96875,76.59375 l -29.5625,-71.40625 0.0937,-0.125 c -2.71561,-6.30999 0.002,-13.70956 6.25,-16.71875 1.59965,-0.77041 3.27089,-1.19701 4.9375,-1.28125 z m -127.90625,0.3125 c 5.81174,0.0815 11.02462,4.11525 12.375,10.03125 0.63219,2.76958 0.3245,5.51375 -0.71875,7.9375 l 0.21875,0.28125 -29.25,70.6875 c -27.34716,-17.5486 -49.12927,-43.82403 -60.78125,-76.06245 l 75.5,-12.8125 0.125,0.15625 c 0.84451,-0.15541 1.701,-0.2304 2.53125,-0.21875 z m 63.78125,30.9688 c 2.02445,-0.0744 4.07865,0.34098 6.03125,1.28125 2.55951,1.23253 4.53673,3.17319 5.78125,5.5 l 0.28125,0 37.21875,67.25 c -4.83029,1.61923 -9.79609,3.00308 -14.875,4.15625 -28.46453,6.4629 -56.83862,4.50467 -82.53125,-4.25 l 37.125,-67.125 0.0625,0 c 2.22767,-4.16441 6.45247,-6.64887 10.90625,-6.8125 z"
         style="font-size:medium;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;color:#000000;fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:0.25;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;font-family:Sans;-inkscape-font-specification:Sans"
         sodipodi:nodetypes="ccccccccsccccscssccsccccccccscccsccccccccccccccscccscsccsccccscscsccccccccscccscsccccsccccscscscccccccccccccccscccsccccccccccccscccccscccccccccccccccccccccccscccscccccccccscccscccc"
         inkscape:export-filename="./path3059.png"
         inkscape:export-xdpi="250.55"
         inkscape:export-ydpi="250.55" />
    </g>
  </g>
</svg>
 9 | longDescription: "Kubernetes dashboard" 10 | shortDescription: "Kubernetes dashboard" 11 | categories: 12 | - demo -------------------------------------------------------------------------------- /demo.md: -------------------------------------------------------------------------------- 1 | Table of Contents 2 | ================= 3 | 4 | * [Demo 1: Tanzu Java Web](#demo-1-tanzu-java-web) 5 | * [Demo 2: Spring Petclinic & TAP GUI](#demo-2-spring-petclinic--tap-gui) 6 | * [Demo 3: Spring Petclinic & Service claim of Postgresql](#demo-3-spring-petclinic--service-claim-of-postgresql) 7 | * [Demo 4: Quarkus App + DB](#demo-4-quarkus-app--db) 8 | 9 | ## Demo 1: Tanzu Java Web 10 | 11 | See Getting started [guide](https://docs.vmware.com/en/VMware-Tanzu-Application-Platform/1.5/tap/getting-started.html) of Tap 1.5 12 | 13 | ## Demo 2: Spring Petclinic & TAP GUI 14 | 15 | - Create on the TAP cluster, a `demo2` namespace: 16 | ```bash 17 | ./scripts/tap.sh populateUserNamespace demo2 18 | ``` 19 | - Create a `workload` using the following github project 20 | 21 | ```bash 22 | APP=spring-tap-petclinic 23 | tanzu apps workload apply $APP \ 24 | -n demo2 \ 25 | --annotation "autoscaling.knative.dev/scaleDownDelay=15m" \ 26 | --annotation "autoscaling.knative.dev/minScale=1" \ 27 | --git-repo https://github.com/halkyonio/$APP.git \ 28 | --git-branch main \ 29 | --type web \ 30 | --label app.kubernetes.io/part-of=$APP \ 31 | -y 32 | ``` 33 | 34 | - Tail to check the build process or status of the workload/component 35 | 36 | ```bash 37 | tanzu apps -n demo2 workload tail $APP --since 1m --timestamp 38 | tanzu apps -n demo2 workload get $APP 39 | 40 | [snowdrop@tap15 tap]$ tanzu apps workload get spring-tap-petclinic --namespace demo2 41 | 📡 Overview 42 | name: spring-tap-petclinic 43 | type: web 44 | namespace: demo2 45 | 46 | 💾 Source 47 | type: git 48 | url: https://github.com/halkyonio/spring-tap-petclinic.git 49 | branch: main 50 | 51 | 📦 Supply Chain 52 | name: source-to-url 53 | 54 | NAME READY HEALTHY UPDATED RESOURCE 55 | source-provider True True 4m33s gitrepositories.source.toolkit.fluxcd.io/spring-tap-petclinic 56 | image-provider True True 2m51s images.kpack.io/spring-tap-petclinic 57 | config-provider True True 2m44s podintents.conventions.carto.run/spring-tap-petclinic 58 | app-config True True 2m44s configmaps/spring-tap-petclinic 59 | service-bindings True True 2m44s configmaps/spring-tap-petclinic-with-claims 60 | api-descriptors True True 2m44s configmaps/spring-tap-petclinic-with-api-descriptors 61 | config-writer True True 2m35s runnables.carto.run/spring-tap-petclinic-config-writer 62 | 63 | 🚚 Delivery 64 | name: delivery-basic 65 | 66 | NAME READY HEALTHY UPDATED RESOURCE 67 | source-provider True True 2m30s imagerepositories.source.apps.tanzu.vmware.com/spring-tap-petclinic-delivery 68 | deployer True True 2m24s apps.kappctrl.k14s.io/spring-tap-petclinic 69 | 70 | 💬 Messages 71 | No messages found. 72 | 73 | 🛶 Pods 74 | NAME READY STATUS RESTARTS AGE 75 | spring-tap-petclinic-00001-deployment-65ffccfd47-dmqwc 2/2 Running 0 2m31s 76 | spring-tap-petclinic-build-1-build-pod 0/1 Completed 0 4m34s 77 | spring-tap-petclinic-config-writer-cfrpg-pod 0/1 Completed 0 2m45s 78 | 79 | 🚢 Knative Services 80 | NAME READY URL 81 | spring-tap-petclinic Ready http://spring-tap-petclinic.demo2.10.0.77.164.sslip.io 82 | ... 83 | ``` 84 | - Open the URL of the service within your browser: http://spring-tap-petclinic.demo2..sslip.io/ 85 | - Next, register the catalog-onfo.yaml file of the project `https://github.com/halkyonio/$APP/blob/main/catalog-info.yaml` using the screen `http://tap-gui..sslip.io/catalog-import` 86 | - Look to the resource health, beans, etc information using the screen `http://tap-gui..sslip.io/catalog/default/component/spring-tap-petclinic/workloads/pod` 87 | - Cleanup 88 | 89 | ```bash 90 | tanzu apps workload -n demo2 delete $APP 91 | ``` 92 | 93 | ## Demo 3: Spring Petclinic & Service claim of Postgresql 94 | 95 | This example extends the previous and will demonstrate how to bind a Postgresql DB with the Spring application. 96 | 97 | - First, review if the Posgresql service is available using the `tanzu service class` command: 98 | ```bash 99 | tanzu service class get postgresql-unmanaged 100 | NAME: postgresql-unmanaged 101 | DESCRIPTION: PostgreSQL by Bitnami 102 | READY: true 103 | 104 | PARAMETERS: 105 | KEY DESCRIPTION TYPE DEFAULT REQUIRED 106 | storageGB The desired storage capacity of the database, in Gigabytes. integer 1 false 107 | ``` 108 | - Next, create a new namespace `demo3` 109 | ```bash 110 | ./scripts/tap.sh populateUserNamespace demo3 111 | ``` 112 | - Claim a service within the namespace `demo3` using the class `postgresql-unmanaged` 113 | ```bash 114 | tanzu service class-claim create postgresql-1 --class postgresql-unmanaged -n demo3 115 | ``` 116 | - Please run this command to see if the service has been created and secret 117 | ```bash 118 | tanzu services class-claims get postgresql-1 --namespace demo3 119 | ``` 120 | - Obtain the Service Claim reference by running the following command: 121 | 122 | ```bash 123 | tanzu service class-claim get postgresql-1 -n demo3 124 | Name: postgresql-1 125 | Namespace: demo3 126 | Claim Reference: services.apps.tanzu.vmware.com/v1alpha1:ClassClaim:postgresql-1 127 | Class Reference: 128 | Name: postgresql-unmanaged 129 | Parameters: None 130 | Status: 131 | Ready: True 132 | Claimed Resource: 133 | Name: b839bb44-8bdf-404f-8641-a8e422dfdb16 134 | Namespace: demo3 135 | Group: 136 | Version: v1 137 | Kind: Secret 138 | ``` 139 | >**Tip**: You can get the name of the claim using `kubectl get classClaim/postgresql-1 -n demo3 -ojson | jq -r .metadata.name` 140 | 141 | - Use the `Workload` of the [spring boot petclinic repo](https://github.com/halkyonio/spring-tap-petclinic.git) and configure the `service-ref` like also pass as env var the property to tell to Spring to use the `application-postgresql.properties` file 142 | 143 | ```bash 144 | APP=spring-tap-petclinic 145 | CLAIM_NAME=postgresql-1 146 | CLAIM_REF=$(kubectl get classClaim/$CLAIM_NAME -n demo3 -ojson | jq -r .metadata.name) 147 | tanzu apps workload create $APP \ 148 | -n demo3 \ 149 | --git-repo https://github.com/halkyonio/$APP.git \ 150 | --git-branch main \ 151 | --annotation "autoscaling.knative.dev/scaleDownDelay=15m" \ 152 | --annotation "autoscaling.knative.dev/minScale=1" \ 153 | --label app.kubernetes.io/part-of=$APP \ 154 | --type web \ 155 | --env "SPRING_PROFILES_ACTIVE=postgres" \ 156 | --service-ref "db=services.apps.tanzu.vmware.com/v1alpha1:ClassClaim:$CLAIM_REF" 157 | ``` 158 | 159 | - Check the status of the workload 160 | 161 | ```bash 162 | tanzu apps workload get -n demo3 spring-tap-petclinic 163 | ... 164 | NAME STATUS RESTARTS AGE 165 | spring-tap-petclinic-build-10-build-pod Succeeded 0 5h18m 166 | spring-tap-petclinic-00015-deployment-75575545fd-k4b27 Running 0 4h50m 167 | ``` 168 | 169 | - Review the content of the Pod's manifest to see if the secret has been well mounted as a volume, that the ENV VAR `SERVICE_BINDING_ROOT` is there and if the following mount `/bindings/db` exists. 170 | 171 | ```bash 172 | kubectl get pod -l "app=spring-tap-petclinic-00002" -n demo3 -o yaml | grep -A 4 volume 173 | volumeMounts: 174 | - mountPath: /bindings/db 175 | name: binding-d9cb99c4e655c91104670a7cc22c8bff9585d79a 176 | readOnly: true 177 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 178 | -- 179 | volumeMounts: 180 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 181 | name: kube-api-access-f266x 182 | readOnly: true 183 | dnsPolicy: ClusterFirst 184 | -- 185 | volumes: 186 | - name: binding-d9cb99c4e655c91104670a7cc22c8bff9585d79a 187 | projected: 188 | defaultMode: 420 189 | sources: 190 | ``` 191 | - Next, extract from the `Deliverable` resource the source image, and grab the OCI bundle pushed within the local registry using the `imgpkg` tool to see which YAML resources have been generated ;-) 192 | ```bash 193 | IMG_SHA=$(kubectl get deliverable/spring-tap-petclinic -n demo3 -o jsonpath='{.spec.source.image}') 194 | imgpkg pull --registry-verify-certs=false \ 195 | -b $IMG_SHA \ 196 | -o $(pwd)/sb 197 | ``` 198 | - Cleanup 199 | 200 | ```bash 201 | tanzu apps workload -n demo3 delete $APP 202 | ``` 203 | 204 | ## Demo 4: Quarkus App + DB 205 | 206 | TODO: To be reviewed !! 207 | 208 | This example illustrates how to use the quarkus runtime and a Database service on a platform running TAP. As the current platform is not able to build by default 209 | the fat-jar used by Quarkus, it has been needed to create a new supply chain able to perform such a build. The scenario that we will follow part of this demo will 210 | do: 211 | 212 | - Git clone a github [quarkus application](https://github.com/halkyonio/quarkus-tap-petclinic) using Fluxcd 213 | - Build an image using the Quarkus [buildpacks](https://github.com/quarkusio/quarkus-buildpacks) and kpack 214 | - Deploy the application as knative serving 215 | - Bind the Service using the Service Binding Operator and Service Toolkit 216 | 217 | In order to use the Quarkus Buildpacks builder image, it is needed that first we tag the `codejive/***` images to the registry where we have access (docker.io, gcr.io, quay.io, ...) 218 | 219 | ```bash 220 | export REGISTRY_URL="ghcr.io/halkyonio" 221 | docker pull codejive/buildpacks-quarkus-builder:jvm 222 | docker pull codejive/buildpacks-quarkus-run:jvm 223 | docker pull codejive/buildpacks-quarkus-build:jvm 224 | 225 | docker tag codejive/buildpacks-quarkus-builder:jvm $REGISTRY_URL/buildpacks-quarkus-builder:jvm 226 | docker tag codejive/buildpacks-quarkus-run:jvm $REGISTRY_URL/buildpacks-quarkus-run:jvm 227 | docker tag codejive/buildpacks-quarkus-build:jvm $REGISTRY_URL/buildpacks-quarkus-build:jvm 228 | 229 | docker push $REGISTRY_URL/buildpacks-quarkus-builder:jvm 230 | docker push $REGISTRY_URL/buildpacks-quarkus-run:jvm 231 | docker push $REGISTRY_URL/buildpacks-quarkus-build:jvm 232 | ``` 233 | 234 | If you plan to use a local private registry it is then needed to patch the Knative configmap `config-deployment` 235 | to add the following parameter `registriesSkippingTagResolving` and to rollout 236 | 237 | ```bash 238 | kubectl patch pkgi tap -n tap-install -p '{"spec":{"paused":true}}' --type=merge 239 | kubectl patch pkgi cnrs -n tap-install -p '{"spec":{"paused":true}}' --type=merge 240 | kubectl edit cm/config-deployment -n knative-serving 241 | ... 242 | registriesSkippingTagResolving: registry.harbor.10.0.77.176.nip.io:32443 243 | kubectl rollout status deployment -n controller -n knative-serving 244 | ``` 245 | 246 | 247 | When done, we can install the Quarkus supply chain and templates files as an application using kapp 248 | 249 | ```bash 250 | ./scripts/tap.sh populateUserNamespace demo4 251 | pushd supplychain/quarkus-sc 252 | kapp deploy --yes -a quarkus-supply-chain \ 253 | -n demo4 \ 254 | -f <(ytt --ignore-unknown-comments -f ./values.yaml -f helpers.lib.yml -f ./k8s -f ./templates -f supply-chain.yaml) 255 | ``` 256 | **Note**: If you use a local private registry, override the values of the values.yaml file using the ytt parameter `-v image_prefix=registry.harbor.10.0.77.176.nip.io:32443/quarkus` 257 | 258 | When done, deploy the `quarkus-app` workload using either `kapp` 259 | 260 | ```bash 261 | kapp deploy --yes -a quarkus-app -n demo4 \ 262 | -f <(ytt --ignore-unknown-comments -f workload.yaml -f ./values.yaml) 263 | popd 264 | ``` 265 | 266 | or create the workload using the `Tanzu client` 267 | 268 | ```bash 269 | tanzu apps workload create quarkus-app \ 270 | -n demo4 \ 271 | --git-repo https://github.com/halkyonio/quarkus-tap-petclinic.git \ 272 | --git-branch main \ 273 | --type quarkus \ 274 | --label app.kubernetes.io/part-of=quarkus-petclinic-app \ 275 | -y 276 | tanzu apps workload -n demo4 tail quarkus-app --since 10m --timestamp 277 | ``` 278 | 279 | Observe the build/deployment of the application 280 | 281 | ```bash 282 | tanzu apps workload get quarkus-app -n demo4 283 | # quarkus-app: Ready 284 | --- 285 | lastTransitionTime: "2022-02-09T15:58:01Z" 286 | message: "" 287 | reason: Ready 288 | status: "True" 289 | type: Ready 290 | 291 | Workload pods 292 | NAME STATE AGE 293 | quarkus-app-build-1-build-pod Succeeded 2m20s 294 | 295 | or using the kubectl tree plugin 296 | 297 | ## List the supply chain resources created to perform the build 298 | kubectl tree workload quarkus-app -n demo4 299 | NAMESPACE NAME READY REASON AGE 300 | demo4 Workload/quarkus-app True Ready 2m55s 301 | demo4 ├─App/quarkus-app - 102s 302 | demo4 ├─GitRepository/quarkus-app True GitOperationSucceed 2m49s 303 | demo4 └─Image/quarkus-app True 2m40s 304 | demo4 ├─Build/quarkus-app-build-1 - 2m40s 305 | demo4 │ └─Pod/quarkus-app-build-1-build-pod False PodCompleted 2m39s 306 | demo4 └─SourceResolver/quarkus-app-source True 2m40s 307 | ``` 308 | 309 | wait till the deployment is done and get then the URL fo the service 310 | ```bash 311 | kubectl get ksvc/quarkus-app -n demo4 312 | NAME URL LATESTCREATED LATESTREADY READY REASON 313 | quarkus-app http://quarkus-app.demo4..nip.io quarkus-app-00001 quarkus-app-00001 True 314 | ``` 315 | 316 | And now, do the job to bind the microservice to a postgresql DB ;-) 317 | 318 | Obtain a service reference by running: 319 | 320 | ```bash 321 | tanzu service instance list -owide -A 322 | NAMESPACE NAME KIND SERVICE TYPE AGE SERVICE REF 323 | tap-demo postgres-db Postgres postgresql 19m sql.tanzu.vmware.com/v1:Postgres:postgres-db 324 | ``` 325 | 326 | Finally, do the binding 327 | 328 | ```bash 329 | tanzu apps workload apply quarkus-app \ 330 | -n demo4 \ 331 | --git-repo https://github.com/halkyonio/quarkus-tap-petclinic.git \ 332 | --git-branch service-binding \ 333 | --type quarkus \ 334 | --label app.kubernetes.io/part-of=quarkus-petclinic-app \ 335 | --service-ref "db=sql.tanzu.vmware.com/v1:Postgres:postgres-db" 336 | ``` 337 | 338 | **Note**: If the service is running in another namespace, it is then needed to create a ResourceClaim to expose it to all the namespaces. 339 | Before to execute the following command, be sure that no other `ResourceClaim` exists on the platform !! 340 | 341 | ```text 342 | cat <**Note**: The full list of the releases and their EOL support is available from the [releases page](https://network.pivotal.io/products/tanzu-application-platform/releases). 27 | 28 | By supporting the [Supply Chain choreograph](https://cartographer.sh/) pattern, TAP allows 29 | to decouple the path (build, deploy, scan, test, ...) to move a microservice to different kubernetes environments 30 | from the development lifecycle process followed by the developers. 31 | 32 | ![vision.png](assets/vision.png) 33 | 34 | ![deploy-tap](assets/deploy-tap.png) 35 | 36 | ## Components 37 | 38 | TAP rely on the following components which are installed as versioned packages from OCI bundles. 39 | 40 | >**Note**: You can get more information about the packages (version, description) from this [page](./packages.md): 41 | 42 | - `CNCF Buildpacks` builder images able to convert your source code into a secure, efficient, production ready container image 43 | - `CNCF Knative`serving and eventing, 44 | - `kpack` controller able to build images using `Buildpacks`, 45 | - `CNCF Contour` to route the traffic internally or externally using `Ingress` 46 | - `kapp` controller to install/uninstall k8s resources using templates (ytt, ...) 47 | - `Application Live & Application Accelerator` to guide the Architects/Developers to design/deploy/monitor applications on k8s. 48 | - `Tekton pipelines` and `FluxCD` to fetch the sources (git, ...) 49 | - `Convention` controller able to change the `Workloads` according to METADATA (framework, runtime, ...) 50 | - `Service Binding & Toolkit` able to manage locally the services, 51 | - `Cartographer` which allows `App Operators` to create pre-approved paths to production by integrating Kubernetes resources with the elements of toolchains (e.g. Jenkins, CI/CD,...). 52 | - `CNCF Crossplane` control plane which dynamically deploy service instances (e.g. AWS RDS) with Services Toolkit and the pre-installed Bitnami Services. 53 | - `Bitnami service` Helm charts supported by TAP (MySQL, PostgreSQL, RabbitMQ and Redis) 54 | - `Application Configuration Service` component provides a Kubernetes-native experience to enable the runtime configuration of existing Spring applications (instead of using Spring Cloud config server) 55 | - `Spring Cloud Gateway` component able to route internal or external API requests to application services that expose APIs. 56 | 57 | ## Prerequisites 58 | 59 | The following [installation](https://docs.vmware.com/en/VMware-Tanzu-Application-Platform/1.5/tap/GUID-prerequisites.html) guide explains what the prerequisites are. 60 | 61 | TL&DR; It is needed to: 62 | 63 | - Have a [Tanzu account](https://account.run.pivotal.io/z/uaa/sign-up) on `https://network.tanzu.vmware.com/` to download the software or to access the registry `registry.tanzu.vmware.com`, 64 | - Accept the needed [EULA](https://docs.vmware.com/en/VMware-Tanzu-Application-Platform/1.5/tap/install-tanzu-cli.html#accept-the-end-user-license-agreements-0) 65 | - Have a kind cluster >= 1.24 installed with a private docker registry. Use this [script](https://github.com/snowdrop/k8s-infra/blob/main/kind/kind.sh) 66 | - Have a Linux VM machine with at least 8 CPUs, 8 GB of RAM and 100Gb (if you plan to use locally a container registry) 67 | - Private container registry such as docker registry 68 | 69 | ## Instructions 70 | 71 | ### Introduction 72 | 73 | The instructions of the official [guide](https://docs.vmware.com/en/VMware-Tanzu-Application-Platform/1.5/tap/overview.html) have been followed to install the release [1.5.0](https://docs.vmware.com/en/VMware-Tanzu-Application-Platform/1.5/tap/GUID-release-notes.html). 74 | 75 | To simplify your life, we have designed a [bash script](scripts/tap.sh) which allows to install the different bits in a VM: 76 | 77 | 1. [Tanzu client](https://github.com/vmware-tanzu/tanzu-framework/blob/main/docs/cli/getting-started.md) and plugins (package, application, secret, etc) 78 | 2. [Cluster Essentials](https://network.tanzu.vmware.com/products/tanzu-cluster-essentials/) 79 | - [Carvel tools](https://carvel.dev/): ytt, imgpkg, kbld, kapp 80 | - [Kapp controller](https://carvel.dev/kapp-controller/), 81 | - [Secretgen controller](https://github.com/vmware-tanzu/carvel-secretgen-controller) 82 | 3. TAP Repository 83 | 84 | A repository is an image bundle containing different k8s manifests, templates, files able to install/configure the TAP packages. 85 | Such a repository is managed using the Tanzu command `tanzu package repository ...` 86 | 4. TAP Packages 87 | 88 | The packages are the building blocks or components part of the TAP platform. Each of them will install a specific feature such as Knative, cartographer, contour, cnrs, ... 89 | They are managed using the following command `tanzu package available | tanzu package installed ...` 90 | 91 | >**NOTE**: Some additional kubernetes tools which are very helpful (e.g: k9s, helm, krew) can be installed using the command `./scripts/tap.sh kube-tools` 92 | 93 | ### How to install TAP 94 | 95 | To install TAP, create first a kind cluster and secured container registry using this script: 96 | ```bash 97 | curl -s -L "https://raw.githubusercontent.com/snowdrop/k8s-infra/main/kind/kind.sh" | \ 98 | bash -s install \ 99 | --secure-registry \ 100 | --skip-ingress-installation \ 101 | --registry-user admin \ 102 | --registry-password snowdrop \ 103 | --server-ip 104 | ``` 105 | >**Tip**: Use the `-h` of the kind.sh script to see the others options ! 106 | 107 | >**Warning**: If you deploy TAP on a remote VM, then it is mandatory to specify the option `--server-ip ` to expose the kubernetes API server to access it remotely ! 108 | 109 | Next, execute the [tap.sh](scripts/tap.sh) bash script locally and configure the following parameters: 110 | 111 | - **LOCAL_REGISTRY**: Boolean used to tell if we will use a local registry. Default: false 112 | - **INSTALL_TANZU_CLI**: Boolean used to install the Tanzu tools: pivnet and Tanzu client. Default: true 113 | - **REGISTRY_SERVER**: registry DNS name (docker.io, ghcr.io, quay.io, registry.harbor..nip.io:) 114 | - **REGISTRY_OWNER**: docker user account, ghcr.io ORG owner, container project (e.g: tap - `registry.harbor..nip.io:/tap`) 115 | - **REGISTRY_USERNAME**: username to be used to log on to the registry 116 | - **REGISTRY_PASSWORD**: password to be used to log on to the registry 117 | - **REGISTRY_CA_PATH**: Path of the CA certificate used your container registry (optional) 118 | - **TANZU_REG_SERVER**: Tanzu registry from where packages, images can be pulled (e.g: registry.tanzu.vmware.com) 119 | - **TANZU_REG_USERNAME**: user to be used to be authenticated against the Tanzu registry 120 | - **TANZU_REG_PASSWORD**: password to be used to be authenticated against the Tanzu registry 121 | 122 | >**Warning**: As the script will download different `products` from https://network.tanzu.vmware.com/ 123 | using [pivnet](https://github.com/pivotal-cf/pivnet-cli), then it is mandatory to configure the following parameter and to 124 | have a [Tanzu network account like an API account](https://tanzu.vmware.com/developer/guides/tanzu-network-gs/): 125 | 126 | - **TANZU_PIVNET_LEGACY_API_TOKEN**: Token used by pivnet CLI to login to the Tanzu products website 127 | 128 | Finally, define the home directory and IP address of the VM hosting TAP and the kubernetes cluster: 129 | 130 | - **REMOTE_HOME_DIR**: home directory where files will be installed within the VM. Default: $HOME 131 | - **VM_IP**: IP address of the VM where the cluster is running 132 | 133 | >**IMPORTANT**: We recommend to relocate the TAP repository [images](https://docs.vmware.com/en/VMware-Tanzu-Application-Platform/1.5/tap/GUID-install-air-gap.html#relocate-images-to-a-registry-0) 134 | to your registry from the Tanzu registry before to perform the installation to speed the process if you re-install it. 135 | 136 | In this case, set the `COPY_PACKAGES` parameter to `TRUE` the first time you will install TAP as the images will be copied using `imgpkg tool`. 137 | 138 | >**Tip**: Use the `-h` of the ./scripts/tap.sh script to see the others options we currently support ! 139 | 140 | Example of installation 141 | ```bash 142 | VM_IP= 143 | LOCAL_REGISTRY="true" 144 | REGISTRY_SERVER= 145 | REGISTRY_OWNER= 146 | REGISTRY_USERNAME= 147 | REGISTRY_PASSWORD= 148 | REGISTRY_CA_PATH= 149 | TANZU_REG_SERVER= 150 | TANZU_REG_USERNAME= 151 | TANZU_REG_PASSWORD= 152 | TANZU_PIVNET_LEGACY_API_TOKEN= 153 | COPY_PACKAGES="false" 154 | INSTALL_TANZU_CLI="true" 155 | ./scripts/tap.sh 156 | 157 | or 158 | 159 | ssh -i ~/.ssh/id_server_private_key snowdrop@10.0.77.176 -p 22 \ 160 | REMOTE_HOME_DIR="/home/snowdrop" \ 161 | VM_IP="10.0.77.176" \ 162 | LOCAL_REGISTRY="true" \ 163 | REGISTRY_SERVER="10.0.77.176.nip.io:5000" \ 164 | REGISTRY_OWNER="tap" \ 165 | REGISTRY_USERNAME="admin" \ 166 | REGISTRY_PASSWORD="snowdrop" \ 167 | REGISTRY_CA_PATH="/home/snowdrop/.registry/certs/kind-registry/client.crt" \ 168 | TANZU_REG_SERVER="registry.tanzu.vmware.com" \ 169 | TANZU_REG_USERNAME="" \ 170 | TANZU_REG_PASSWORD="**Note**: See our demo page [here](demo.md) which covers more examples. 238 | 239 | ## Additional information 240 | 241 | ### Using a private registry 242 | 243 | As mentioned within the previous section, when we plan to use a private local registry such as Harbor, docker registry, etc some additional steps are required such as: 244 | 245 | 1. Get the CA certificate file from the registry and set the parameter `REGISTRY_CA_PATH` for the bash script 246 | 247 | 2. Get the TAP packages and push them to the private registry 248 | 249 | ```bash 250 | imgpkg copy -b registry.tanzu.vmware.com/tanzu-application-platform/tap-packages:1.5.0 --to-tar packages.tar 251 | imgpkg copy --tar packages.tar --to-repo /tap/tap-packages 252 | ``` 253 | 254 | 3. Define the TAP `shared` key within the `tap-values.yaml` file to pass the `ca_cert_data` (see [doc](https://docs.vmware.com/en/VMware-Tanzu-Application-Platform/1.5/tap/GUID-view-package-config.html)) 255 | ```bash 256 | shared: 257 | ca_cert_data: | 258 | -----BEGIN CERTIFICATE----- 259 | MIIDFDCCAfygAwIBAgIRAJqAGNrteyM97HLF2i1OhpQwDQYJKoZIhvcNAQELBQAw 260 | FDESMBAGA1UEAxMJaGFyYm9yLWNhMB4XDTIyMDYwMzEwMDc1M1oXDTIzMDYwMzEw 261 | ... 262 | H1H7yyFbxeaRK33ctKxXq2FzEYePYQ0BdTw36O8/R5CXwTMYvbG+kRMmNlRNHhD7 263 | 82elfYZx4DxrWcap2uqrvrR8A8jnV5oa/sBoqcY6U1rIXG2mkVXvuvihOjIm8wHy 264 | 8dHt3pESuqbOo2aDt9uP77sBIjho0JBT 265 | -----END CERTIFICATE----- 266 | ... 267 | ``` 268 | 269 | >**NOTE**: The steps 2 and 3 are managed by the `install.sh` script ! 270 | 271 | >**Tip**: You can set up a docker registry using our [kind secured script](https://raw.githubusercontent.com/snowdrop/k8s-infra/main/kind/kind.sh) :-) 272 | 273 | ### Tanzu Client 274 | 275 | FYI: The `install.sh` bash script installs the Tanzu client as described hereafter like the carvel tools: imgpkg, kapp, kbld, pivnet ! 276 | 277 | The Tanzu [client](https://network.tanzu.vmware.com/products/tanzu-application-platform/#/releases/1095326) can be installed locally on a machine 278 | having access to the k8s cluster running TAP using the pivnet tool. 279 | 280 | According to the TAP release that you would like to install, select the appropriate `product-file-id` and `release-version` from the 281 | download page of the Tanzu Application Platform product/release - https://network.tanzu.vmware.com/products/tanzu-application-platform. 282 | 283 | Next, install the tool using by example the following instructions on a Mac machine. 284 | 285 | **Note**: The instructions are equivalent on Linux except the TAR file to be downloaded ! 286 | 287 | ```bash 288 | pivnet download-product-files --product-slug='tanzu-application-platform' --release-version='1.5.0' --product-file-id=1212837 289 | tar -vxf tanzu-framework-darwin-amd64.tar 290 | install cli/core/v0.11.4/tanzu-core-darwin_amd64 /usr/local/bin/tanzu 291 | export TANZU_CLI_NO_INIT=true 292 | tanzu plugin install --local cli all 293 | tanzu plugin list 294 | ``` 295 | 296 | ### Review what it has been installed 297 | 298 | - Check the status of the TAP packages installed and if all the packages are well deployed 299 | 300 | ```bash 301 | tanzu package installed list -n tap-install 302 | / Retrieving installed packages... 303 | NAME PACKAGE-NAME PACKAGE-VERSION STATUS 304 | accelerator accelerator.apps.tanzu.vmware.com 1.0.0 Reconcile succeeded 305 | appliveview run.appliveview.tanzu.vmware.com 1.0.1 Reconcile succeeded 306 | appliveview-conventions build.appliveview.tanzu.vmware.com 1.0.1 Reconcile succeeded 307 | buildservice buildservice.tanzu.vmware.com 1.4.2 Reconcile succeeded 308 | cartographer cartographer.tanzu.vmware.com 0.1.0 Reconcile succeeded 309 | cert-manager cert-manager.tanzu.vmware.com 1.5.3+tap.1 Reconcile succeeded 310 | cnrs cnrs.tanzu.vmware.com 1.0.0 Reconcile succeeded 311 | contour contour.tanzu.vmware.com 1.08.2+tap.1 Reconcile succeeded 312 | conventions-controller controller.conventions.apps.tanzu.vmware.com 0.5.0 Reconcile succeeded 313 | developer-conventions developer-conventions.tanzu.vmware.com 0.5.0-build.1 Reconcile succeeded 314 | fluxcd-source-controller fluxcd.source.controller.tanzu.vmware.com 0.16.0 Reconcile succeeded 315 | ootb-delivery-basic ootb-delivery-basic.tanzu.vmware.com 0.5.1 Reconcile succeeded 316 | ootb-supply-chain-basic ootb-supply-chain-basic.tanzu.vmware.com 0.5.1 Reconcile succeeded 317 | ootb-templates ootb-templates.tanzu.vmware.com 0.5.1 Reconcile succeeded 318 | service-bindings service-bindings.labs.vmware.com 0.6.0 Reconcile succeeded 319 | services-toolkit services-toolkit.tanzu.vmware.com 0.5.0 Reconcile succeeded 320 | source-controller controller.source.apps.tanzu.vmware.com 0.2.0 Reconcile succeeded 321 | spring-boot-conventions spring-boot-conventions.tanzu.vmware.com 0.3.0 Reconcile succeeded 322 | tap tap.tanzu.vmware.com 1.0.0 Reconcile succeeded 323 | tap-gui tap-gui.tanzu.vmware.com 1.0.1 Reconcile succeeded 324 | tap-telemetry tap-telemetry.tanzu.vmware.com 0.1.2 Reconcile succeeded 325 | tekton-pipelines tekton.tanzu.vmware.com 0.30.0 Reconcile succeeded 326 | 327 | # or individually 328 | tanzu package installed get -n tap-install 329 | ``` 330 | 331 | ### Change TAP configuration 332 | 333 | - If some parameters should be changed, you can first check the list of the available values for a package: 334 | 335 | ```bash 336 | tanzu package available get ootb-supply-chain-basic.tanzu.vmware.com/0.5.1 -n tap-install --values-schema 337 | ``` 338 | 339 | - Next edit and change the `tap-values.yaml` file created 340 | - Update finally the TAP package using the following command: 341 | 342 | ```bash 343 | tanzu package installed update tap -p tap.tanzu.vmware.com -v 1.0.0 --values-file tap-values.yml -n tap-install 344 | ``` 345 | 346 | - To install a package individually, use the following [documentation](https://docs.vmware.com/en/Tanzu-Application-Platform/1.0/tap/GUID-install-components.html) page 347 | 348 | ## Clean 349 | 350 | To uninstall the TAP repository and the packages, execute this command `./scripts/tap.sh remove`. 351 | 352 | >**Tip**: If you want to clean everything (e.g demo namespaces), then create a new kind kubernetes cluster ;-) 353 | 354 | That's all ! 355 | 356 | -------------------------------------------------------------------------------- /scripts/tap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Execute this command locally 4 | # 5 | # ./install 6 | # 7 | # or remotely 8 | # ssh -i @ -p "bash -s" -- < ./tap.sh 9 | # 10 | # Define the following env vars: 11 | # - REMOTE_HOME_DIR: home directory where files will be installed within the remote VM 12 | # - VM_IP: IP address of the VM where the cluster is running 13 | # - LOCAL_REGISTRY: Boolean used to tell if we will use a local registry 14 | # - REGISTRY_SERVER: image registry server (docker.io, gcr.io, localhost:5000) 15 | # - REGISTRY_OWNER: docker user, ghcr.io ORG owner 16 | # - REGISTRY_USERNAME: username to be used to log on the registry 17 | # - REGISTRY_PASSWORD: password to be used to log on the registry 18 | # - TANZU_PIVNET_LEGACY_API_TOKEN: Token used by pivnet to login 19 | # - TANZU_REG_SERVER: registry.tanzu.vmware.com 20 | # - TANZU_REG_USERNAME: user to be used to be authenticated against the Tanzu image registry 21 | # - TANZU_REG_PASSWORD: password to be used to be authenticated against the Tanzu image registry 22 | # - COPY_PACKAGES: Copy package image bundles from Tanzu to your favorite image registries 23 | # - REGISTRY_CA_PATH: Path of the CA certificate used by the local private container registry 24 | # - INSTALL_TANZU_CLI: Boolean used to install the Tanzu tools: pivnet and Tanzu client 25 | 26 | set -e 27 | 28 | # TO BE REVIEWED AS IT CONFLICTS WITH FUNCTIONS 29 | # KUBE_CFG_FILE=${1:-config} 30 | # export KUBECONFIG=$HOME/.kube/${KUBE_CFG_FILE} 31 | 32 | COPY_PACKAGES=${COPY_PACKAGES:-false} 33 | REMOTE_HOME_DIR=${REMOTE_HOME_DIR:-$HOME} 34 | DEST_DIR="/usr/local/bin" 35 | TANZU_TEMP_DIR="$REMOTE_HOME_DIR/tanzu" 36 | 37 | VM_IP=${VM_IP:-127.0.0.1} 38 | LOCAL_REGISTRY=${LOCAL_REGISTRY:-false} 39 | INSTALL_TANZU_CLI=${INSTALL_TANZU_CLI:-true} 40 | REGISTRY_SERVER=${REGISTRY_SERVER:-docker.io} 41 | REGISTRY_OWNER=${REGISTRY_OWNER} 42 | REGISTRY_USERNAME=${REGISTRY_USERNAME} 43 | REGISTRY_PASSWORD=${REGISTRY_PASSWORD} 44 | REGISTRY_CA_PATH=${REGISTRY_CA_PATH} 45 | 46 | # Token stored under your profile: https://network.tanzu.vmware.com/users/dashboard/edit-profile 47 | TANZU_PIVNET_LEGACY_API_TOKEN=${TANZU_PIVNET_LEGACY_API_TOKEN} 48 | TANZU_REG_SERVER=${TANZU_REG_SERVER} 49 | TANZU_REG_USERNAME=${TANZU_REG_USERNAME} 50 | TANZU_REG_PASSWORD=${TANZU_REG_PASSWORD} 51 | 52 | INGRESS_DOMAIN=$VM_IP.sslip.io 53 | 54 | NAMESPACE_DEMO="tap-demo" 55 | NAMESPACE_TAP="tap-install" 56 | 57 | # https://github.com/pivotal-cf/pivnet-cli/releases 58 | PIVNET_CLI_VERSION="3.0.1" 59 | 60 | TAP_VERSION="1.5.0" 61 | 62 | TANZU_CLI_VERSION="v0.28.1" 63 | TANZU_CLIENT_FILE_ID="1446073" 64 | TANZU_CLIENT_NAME="tanzu-framework-linux-amd64" 65 | 66 | TANZU_CLUSTER_ESSENTIALS_VERSION="1.5.0" 67 | TANZU_CLUSTER_ESSENTIALS_FILE_ID="1460876" 68 | TANZU_CLUSTER_ESSENTIALS_IMAGE_SHA="sha256:79abddbc3b49b44fc368fede0dab93c266ff7c1fe305e2d555ed52d00361b446" 69 | 70 | TAP_AUTH_FILE_ID="1309818" 71 | TAP_AUTH_NAME="tap-auth" 72 | TAP_AUTH_VERSION="1.1.0-beta.1" 73 | 74 | # Do not use the RAW URL but instead the Github HTTPS URL followed by blob/main 75 | TAP_GIT_CATALOG_REPO=https://github.com/halkyonio/tap-catalog-blank/blob/main 76 | 77 | # Kubernetes Dashboard 78 | K8S_GUI_VERSION=v2.7.0 79 | 80 | ################### 81 | # Global parameters 82 | ################### 83 | NC='\033[0m' # No Color 84 | COLOR_RESET="\033[0m" # Reset color 85 | BLACK="\033[0;30m" 86 | BLUE='\033[0;34m' 87 | BROWN="\033[0;33m" 88 | GREEN='\033[0;32m' 89 | GREY="\033[0;90m" 90 | CYAN='\033[0;36m' 91 | MAGENTA='\033[0;35m' 92 | RED='\033[0;31m' 93 | PURPLE="\033[0;35m" 94 | WHITE='\033[0;37m' 95 | YELLOW='\033[0;33m' 96 | 97 | newline=$'\n' 98 | 99 | ############### 100 | ## Functions ## 101 | ############### 102 | fmt() { 103 | COLOR="WHITE" 104 | MSG="${@:1}" 105 | echo -e "${!COLOR} ${MSG}${NC}" 106 | } 107 | 108 | generate_eyecatcher(){ 109 | COLOR=${1} 110 | for i in {1..50}; do echo -ne "${!COLOR}$2${NC}"; done 111 | } 112 | 113 | log_msg() { 114 | COLOR=${1} 115 | MSG="${@:2}" 116 | echo -e "\n${!COLOR}## ${MSG}${NC}" 117 | } 118 | 119 | log_line() { 120 | COLOR=${1} 121 | MSG="${@:2}" 122 | echo -e "${!COLOR}## ${MSG}${NC}" 123 | } 124 | 125 | log() { 126 | MSG="${@:2}" 127 | echo; generate_eyecatcher ${1} '#'; log_msg ${1} ${MSG}; generate_eyecatcher ${1} '#'; echo 128 | } 129 | 130 | check_os() { 131 | PLATFORM='unknown' 132 | unamestr=$(uname) 133 | if [[ "$unamestr" == 'Linux' ]]; then 134 | PLATFORM='linux' 135 | elif [[ "$unamestr" == 'Darwin' ]]; then 136 | PLATFORM='darwin' 137 | fi 138 | log "CYAN" "OS type: $PLATFORM" 139 | } 140 | 141 | check_distro() { 142 | DISTRO=$( cat /etc/*-release | tr [:upper:] [:lower:] | grep -Poi '(debian|ubuntu|red hat|centos|fedora)' | uniq ) 143 | if [ -z $DISTRO ]; then 144 | DISTRO='unknown' 145 | fi 146 | log "CYAN" "Detected Linux distribution: $DISTRO" 147 | } 148 | 149 | check_arch() { 150 | ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" 151 | if [ -z $ARCH ]; then 152 | ARCH='unknown' 153 | fi 154 | log "CYAN" "Detected Arch: $ARCH" 155 | } 156 | 157 | generate_ca_cert_data_yaml() { 158 | if [ -n "$REGISTRY_CA_PATH" ]; then 159 | caCertFormated=$(awk '{printf " %s\n", $0}' < ${REGISTRY_CA_PATH}) 160 | echo "$caCertFormated" 161 | fi 162 | } 163 | 164 | patch_kapp_configmap() { 165 | if [ -n "$REGISTRY_CA_PATH" ]; then 166 | caCertFormated=$(awk '{printf " %s\n", $0}' < ${REGISTRY_CA_PATH}) 167 | configMap=$(cat < /dev/null; then 278 | log "CYAN" "Executing installation Part I of the TAP guide" 279 | log "CYAN" "Installing pivnet tool ..." 280 | wget -q -c https://github.com/pivotal-cf/pivnet-cli/releases/download/v$PIVNET_CLI_VERSION/pivnet-linux-amd64-$PIVNET_CLI_VERSION 281 | chmod +x pivnet-linux-amd64-$PIVNET_CLI_VERSION && mv pivnet-linux-amd64-$PIVNET_CLI_VERSION pivnet && sudo cp pivnet ${DEST_DIR} 282 | pivnet version 283 | fi 284 | 285 | log "CYAN" "Pivnet log in to Tanzu " 286 | pivnet login --api-token=${TANZU_PIVNET_LEGACY_API_TOKEN} 287 | 288 | pushd ${TANZU_TEMP_DIR} 289 | 290 | log "CYAN" "Install the Tanzu client & plug-ins for version: $TANZU_CLI_VERSION.1" 291 | log "CYAN" "Download the Tanzu client and extract it" 292 | pivnet download-product-files --product-slug='tanzu-application-platform' --release-version=${TAP_VERSION} --product-file-id=$TANZU_CLIENT_FILE_ID 293 | tar -vxf $TANZU_CLIENT_NAME-$TANZU_CLI_VERSION.1.tar 294 | 295 | log "CYAN" "Set env var TANZU_CLI_NO_INIT to true to assure the local downloaded versions of the CLI core and plug-ins are installed" 296 | export TANZU_CLI_NO_INIT=true 297 | mkdir -p $HOME/.tanzu 298 | sudo install cli/core/$TANZU_CLI_VERSION/tanzu-core-linux_amd64 ${DEST_DIR}/tanzu 299 | tanzu version 300 | 301 | log "CYAN" "Enable tanzu completion for bash" 302 | printf "\n# Tanzu shell completion\nsource '$HOME/.tanzu/completion.bash.inc'\n" >> $HOME/.bash_profile 303 | tanzu completion bash > $HOME/.tanzu/completion.bash.inc 304 | 305 | log "CYAN" "Clean install Tanzu CLI plug-ins now" 306 | export TANZU_CLI_NO_INIT=true 307 | tanzu plugin install --local cli all 308 | tanzu plugin list 309 | 310 | log "CYAN" "Install the RBAC/AUTH plugin" 311 | pivnet download-product-files --product-slug=$TAP_AUTH_NAME --release-version=$TAP_AUTH_VERSION --product-file-id=$TAP_AUTH_FILE_ID 312 | tar -vxf tanzu-auth-plugin_$TAP_AUTH_VERSION.tar.gz 313 | tanzu plugin install rbac --local linux-amd64 314 | popd 315 | } 316 | 317 | clusterEssentials() { 318 | pushd ${TANZU_TEMP_DIR} 319 | 320 | # Download Cluster Essentials for VMware Tanzu 321 | log "CYAN" "Set the Cluster Essentials product ID for version $TANZU_CLUSTER_ESSENTIALS_VERSION" 322 | log "CYAN" "Download the tanzu-cluster-essentials ... " 323 | pivnet download-product-files --product-slug='tanzu-cluster-essentials' --release-version=$TANZU_CLUSTER_ESSENTIALS_VERSION --product-file-id=$TANZU_CLUSTER_ESSENTIALS_FILE_ID 324 | mkdir -p tanzu-cluster-essentials && tar -xvf tanzu-cluster-essentials-linux-amd64-$TANZU_CLUSTER_ESSENTIALS_VERSION.tgz -C ./tanzu-cluster-essentials 325 | 326 | log "CYAN" "Creates a secret containing the local CA certificate for the kapp controller named: kapp-controller-config" 327 | if [[ "$LOCAL_REGISTRY" == "true" ]]; then 328 | kubectl create namespace kapp-controller --dry-run=client -o yaml | kubectl apply -f - 329 | kubectl delete secret kapp-controller-config --namespace kapp-controller --ignore-not-found=true 330 | kubectl create secret generic kapp-controller-config \ 331 | --namespace kapp-controller \ 332 | --from-file caCerts=$REGISTRY_CA_PATH 333 | fi 334 | 335 | log "CYAN" "Install Cluster essentials (kapp, kbld, ytt, imgpkg)" 336 | log "CYAN" "Configure and run install.sh, which installs kapp-controller and secretgen-controller on your cluster" 337 | export INSTALL_BUNDLE=registry.tanzu.vmware.com/tanzu-cluster-essentials/cluster-essentials-bundle@$TANZU_CLUSTER_ESSENTIALS_IMAGE_SHA 338 | export INSTALL_REGISTRY_HOSTNAME=$TANZU_REG_SERVER 339 | export INSTALL_REGISTRY_USERNAME=$TANZU_REG_USERNAME 340 | export INSTALL_REGISTRY_PASSWORD=$TANZU_REG_PASSWORD 341 | cd ./tanzu-cluster-essentials 342 | export KUBECONFIG=${REMOTE_HOME_DIR}/.kube/config 343 | ./install.sh -y 344 | 345 | log "CYAN" "Install the carvel tools: kapp, ytt, imgpkg & kbld onto your $PATH:" 346 | sudo cp ytt ${DEST_DIR} 347 | sudo cp kapp ${DEST_DIR} 348 | sudo cp imgpkg ${DEST_DIR} 349 | sudo cp kbld ${DEST_DIR} 350 | 351 | popd 352 | 353 | log "CYAN" "Wait till the pod of kapp-controller and secretgen-controller are running" 354 | kubectl rollout status deployment/kapp-controller -n kapp-controller 355 | kubectl rollout status deployment/secretgen-controller -n secretgen-controller 356 | 357 | # log "CYAN" "Create the variable containing the patch data for caCerts if there is a CA cert" 358 | # patch_kapp_configmap 359 | # 360 | # log "CYAN" "Patch the kapp_controller configmap and rollout" 361 | # kubectl patch -n kapp-controller cm/kapp-controller-config --type merge --patch "$configMap" 362 | # kubectl rollout restart deployment/kapp-controller -n kapp-controller 363 | } 364 | 365 | relocateImages() { 366 | log "CYAN" "Login to the Tanzu and target registries where we will copy the packages" 367 | docker login ${REGISTRY_SERVER} -u ${REGISTRY_USERNAME} -p ${REGISTRY_PASSWORD} 368 | docker login ${TANZU_REG_SERVER} -u ${TANZU_REG_USERNAME} -p ${TANZU_REG_PASSWORD} 369 | 370 | log "CYAN" "Relocate the repository image bundle from Tanzu to ${REGISTRY_SERVER}/${REGISTRY_OWNER}" 371 | echo " imgpkg copy --concurrency 1 --registry-ca-cert-path ${REGISTRY_CA_PATH} -b ${TANZU_REG_SERVER}/tanzu-application-platform/tap-packages:${TAP_VERSION} --to-repo ${REGISTRY_SERVER}/${REGISTRY_OWNER}/tap-packages" 372 | imgpkg copy \ 373 | --concurrency 1 \ 374 | --registry-ca-cert-path ${REGISTRY_CA_PATH} \ 375 | -b ${TANZU_REG_SERVER}/tanzu-application-platform/tap-packages:${TAP_VERSION} \ 376 | --to-repo ${REGISTRY_SERVER}/${REGISTRY_OWNER}/tap-packages 377 | } 378 | 379 | setupTapNamespaces() { 380 | log "CYAN" "Create a namespace called ${NAMESPACE_TAP} for deploying the packages" 381 | kubectl create ns ${NAMESPACE_TAP} --dry-run=client -o yaml | kubectl apply -f - 382 | 383 | log "CYAN" "Creating for grype the namespace : ${NAMESPACE_DEMO}" 384 | kubectl create ns ${NAMESPACE_DEMO} --dry-run=client -o yaml | kubectl apply -f - 385 | } 386 | 387 | createRegistryCreds() { 388 | log "CYAN" "Create a secret hosting the credentials to access the container registry: ${REGISTRY_SERVER}" 389 | tanzu secret registry add registry-credentials \ 390 | --username ${REGISTRY_USERNAME} \ 391 | --password ${REGISTRY_PASSWORD} \ 392 | --server ${REGISTRY_SERVER} \ 393 | --namespace ${NAMESPACE_TAP} \ 394 | --export-to-all-namespaces \ 395 | --yes 396 | 397 | log "CYAN" "Create a secret hosting the credentials to access the container registry: ${REGISTRY_SERVER} for the build-services." 398 | log "YELLOW" "To fix issue: https://github.com/halkyonio/tap/issues/33" 399 | tanzu secret registry add kp-default-repository-creds \ 400 | --username ${REGISTRY_USERNAME} \ 401 | --password ${REGISTRY_PASSWORD} \ 402 | --server ${REGISTRY_SERVER} \ 403 | --namespace ${NAMESPACE_TAP} 404 | } 405 | 406 | addTapRepository() { 407 | log "CYAN" "Deploy the TAP package repository" 408 | tanzu package repository add tanzu-tap-repository \ 409 | --url ${REGISTRY_SERVER}/${REGISTRY_OWNER}/tap-packages:${TAP_VERSION} \ 410 | -n ${NAMESPACE_TAP} 411 | } 412 | 413 | createConfigFile() { 414 | log "CYAN" "Create first the tap-values.yaml file to configure the TAP profile ..." 415 | # See: https://docs.vmware.com/en/VMware-Tanzu-Application-Platform/1.5/tap/install.html#full-profile-3 416 | cat > ${TANZU_TEMP_DIR}/tap-values.yml <> ${TANZU_TEMP_DIR}/tap-values.yml 432 | cat << EOF >> ${TANZU_TEMP_DIR}/tap-values.yml 433 | ceip_policy_disclosed: true # Installation fails if this is set to 'false' 434 | 435 | #The above keys are minimum numbers of entries needed in tap-values.yaml to get a functioning TAP Full profile installation. 436 | #Below are the keys which may have default values set, but can be overridden. 437 | 438 | profile: full # Can take iterate, build, run, view. 439 | 440 | supply_chain: basic # Can take testing, testing_scanning. 441 | 442 | ootb_supply_chain_basic: # Based on supply_chain set above, can be changed to ootb_supply_chain_testing, ootb_supply_chain_testing_scanning. 443 | service_account: default 444 | registry: 445 | server: # Takes the value from the shared section by default. Can be overridden 446 | repository: # Takes the value from the shared section by default. Can be overridden 447 | gitops: 448 | ssh_secret: "" # Takes "" as value by default; but can be overridden 449 | 450 | crossplane: 451 | registryCaBundleConfig: 452 | name: ca-bundle-config # ConfigMap name 453 | key: ca-bundle # ConfigMap key pointing to the CA certificate 454 | 455 | #cnrs: 456 | # domain_name: "$VM_IP.sslip.io" 457 | # provider: local 458 | 459 | contour: 460 | envoy: 461 | service: 462 | type: ClusterIP 463 | hostPorts: 464 | enable: true 465 | 466 | buildservice: 467 | # Dockerhub has the form kp_default_repository: "my-dockerhub-user/build-service" or kp_default_repository: "index.docker.io/my-user/build-service" 468 | # Takes the value from the shared section by default, but can be overridden by setting a different value. 469 | kp_default_repository: "${REGISTRY_SERVER}/${REGISTRY_OWNER}/build-service" 470 | kp_default_repository_secret: 471 | name: kp-default-repository-creds 472 | namespace: ${NAMESPACE_TAP} 473 | exclude_dependencies: false # Needed when using profile = full 474 | 475 | tap_gui: 476 | service_type: ClusterIP 477 | metadataStoreAutoconfiguration: true # Create a service account, the Kubernetes control plane token and the requisite app_config block to enable communications between Tanzu Application Platform GUI and SCST - Store. 478 | ingressEnabled: "true" 479 | ingressDomain: "$INGRESS_DOMAIN" 480 | app_config: 481 | app: 482 | baseUrl: http://tap-gui.$INGRESS_DOMAIN 483 | catalog: 484 | locations: 485 | - type: url 486 | target: $TAP_GIT_CATALOG_REPO/catalog-info.yaml 487 | backend: 488 | baseUrl: http://tap-gui.$INGRESS_DOMAIN 489 | cors: 490 | origin: http://tap-gui.$INGRESS_DOMAIN 491 | 492 | metadata_store: 493 | ns_for_export_app_cert: "*" 494 | app_service_type: ClusterIP # Defaults to LoadBalancer. If shared.ingress_domain is set earlier, this must be set to ClusterIP. 495 | 496 | scanning: 497 | metadataStore: 498 | url: "" # Configuration is moved, so set this string to empty. 499 | 500 | grype: 501 | namespace: ${NAMESPACE_DEMO} 502 | targetImagePullSecret: registry-credentials 503 | # In a single cluster, the connection between the scanning pod and the metadata store happens inside the cluster and does not pass through ingress. This is automatically configured, you do not need to provide an ingress connection to the store. 504 | 505 | policy: 506 | tuf_enabled: false # By default, TUF initialization and keyless verification are deactivated. 507 | EOF 508 | cat ${TANZU_TEMP_DIR}/tap-values.yml 509 | } 510 | 511 | 512 | installTapPackages() { 513 | log "WARN" "Due to the following X.505 Cert issue: https://github.com/halkyonio/tap/issues/34, it is needed to create another Crossplane ConfigMap to load the registry CA certificate" 514 | kubectl create ns crossplane-system --dry-run=client -o yaml | kubectl apply -f - 515 | kubectl -n crossplane-system create cm ca-bundle-config \ 516 | --from-file=ca-bundle=${REGISTRY_CA_PATH} 517 | 518 | log "CYAN" "Installing the TAP packages ..." 519 | tanzu package install tap -p tap.tanzu.vmware.com \ 520 | --wait-check-interval 10s \ 521 | -v ${TAP_VERSION} \ 522 | --values-file ${TANZU_TEMP_DIR}/tap-values.yml \ 523 | -n ${NAMESPACE_TAP} || true 524 | 525 | log "CYAN" "Wait till TAP installation is over" 526 | resp=$(tanzu package installed get tap -n ${NAMESPACE_TAP} -o json | jq -r .[].status) 527 | while [[ "$resp" != "Reconcile succeeded" ]]; do 528 | echo "TAP installation status: $resp"; 529 | sleep 10s; 530 | resp=$(tanzu package installed get tap -n ${NAMESPACE_TAP} -o json | jq -r .[].status); 531 | done 532 | } 533 | 534 | populateUserNamespace() { 535 | if [ -v 1 ]; then 536 | NAMESPACE_DEMO=$1 537 | else 538 | NAMESPACE_DEMO=demo 539 | fi 540 | 541 | cat < /dev/null; then 620 | log "CYAN" "Installing Helm" 621 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 622 | chmod 700 get_helm.sh 623 | ./get_helm.sh 624 | fi 625 | 626 | log "CYAN" "Checking if kubectl is installed..." 627 | if ! command -v kubectl &> /dev/null; then 628 | set -x 629 | curl -sLO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/${PLATFORM}/${ARCH}/kubectl" 630 | chmod +x ./kubectl; sudo mv ./kubectl ${DEST_DIR}/kubectl 631 | set +x 632 | fi 633 | 634 | log "CYAN" "Checking if kind exists..." 635 | if ! command -v kind &> /dev/null; then 636 | set -x 637 | curl -sLo ./kind https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-${PLATFORM}-${ARCH} 638 | chmod +x ./kind; sudo mv ./kind ${DEST_DIR}/kind 639 | set +x 640 | fi 641 | 642 | log "CYAN" "Checking if k9s exists..." 643 | if ! command -v k9s &> /dev/null; then 644 | sudo yum install jq -y 645 | wget -q https://github.com/derailed/k9s/releases/download/${K9S_VERSION}/k9s_Linux_x86_64.tar.gz && tar -vxf k9s_Linux_x86_64.tar.gz 646 | sudo cp k9s ${DEST_DIR} 647 | fi 648 | 649 | log "CYAN" "Checking if kubectl krew exists..." 650 | if ! command -v ${KREW_ROOT:-$HOME/.krew}/bin/kubectl-krew &> /dev/null; then 651 | log "CYAN" "Install kubectl krew tool - https://krew.sigs.k8s.io/docs/user-guide/setup/install/" 652 | ( 653 | set -x; cd "$(mktemp -d)" && 654 | OS="$(uname | tr '[:upper:]' '[:lower:]')" && 655 | ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" && 656 | KREW="krew-${OS}_${ARCH}" && 657 | curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" && 658 | tar zxvf "${KREW}.tar.gz" && 659 | ./"${KREW}" install krew 660 | ) 661 | 662 | log "CYAN" "Install kubectl-tree - https://github.com/ahmetb/kubectl-tree" 663 | ${KREW_ROOT:-$HOME/.krew}/bin/kubectl-krew install tree 664 | 665 | log "CYAN" "Install kubectl-ctx, kubectl-ns - https://github.com/ahmetb/kubectx" 666 | ${KREW_ROOT:-$HOME/.krew}/bin/kubectl-krew install ctx 667 | ${KREW_ROOT:-$HOME/.krew}/bin/kubectl-krew install ns 668 | 669 | log "CYAN" "Install kubectl-konfig - https://github.com/corneliusweig/konfig" 670 | ${KREW_ROOT:-$HOME/.krew}/bin/kubectl-krew install konfig 671 | 672 | BASHRC_D_DIR="$HOME/.bashrc.d" 673 | if [ ! -d ${BASHRC_D_DIR} ]; then 674 | mkdir -p ${BASHRC_D_DIR} 675 | fi 676 | 677 | log "CYAN" "Export krew PATH to ${BASHRC_D_DIR}/krew.path" 678 | echo "PATH=\"${KREW_ROOT:-$HOME/.krew}/bin:$PATH\"" > ${BASHRC_D_DIR}/krew.path 679 | 680 | log "CYAN" "Create kubectl & plugins aliases to ${BASHRC_D_DIR}/aliases" 681 | cat < ${BASHRC_D_DIR}/aliases 682 | # kubectl shortcut -> kc 683 | alias kc='kubectl' 684 | # kubectl shortcut -> k 685 | alias k='kubectl' 686 | 687 | # kubectl krew 688 | alias krew='kubectl krew' 689 | 690 | # kubectl tree 691 | alias ktree='kubectl tree' 692 | 693 | # kubectl ns 694 | alias kns='kubectl ns' 695 | 696 | # kubectl ctx 697 | alias kctx='kubectl ctx' 698 | 699 | # kubectl konfig 700 | alias konfig='kubectl konfig' 701 | EOF 702 | 703 | log "CYAN" "$(cat ${BASHRC_D_DIR}/aliases)" 704 | log "WARN" "Source now the .bashrc file: \". $HOME/.bashrc\" in your terminal" 705 | 706 | fi 707 | } 708 | 709 | # TODO: To be reviewed 710 | # log "CYAN" "Relocating the build images whn using full profile, installing the repository and packages" 711 | # TBS_FULL_VERSION="" 712 | # imgpkg copy -b ${TANZU_REG_SERVER}/tanzu-application-platform/full-tbs-deps-package-repo:${TBS_FULL_VERSION} \ 713 | # --to-repo ${REGISTRY_SERVER}/${REGISTRY_OWNER}/tbs-full-deps 714 | # 715 | # tanzu package repository add tbs-full-deps-repository \ 716 | # --url ${REGISTRY_SERVER}/${REGISTRY_OWNER}/tbs-full-deps:${TBS_FULL_VERSION} \ 717 | # --namespace ${NAMESPACE_TAP} 718 | # 719 | # tanzu package install full-tbs-deps -p full-tbs-deps.tanzu.vmware.com -v ${TBS_FULL_VERSION} -n ${NAMESPACE_TAP} 720 | 721 | case $1 in 722 | -h) usage; exit;; 723 | kube-tools) kubeTools; exit;; 724 | install) 725 | init 726 | if [[ "$INSTALL_TANZU_CLI" == "true" ]]; then 727 | tanzuCli 728 | fi 729 | clusterEssentials 730 | if [[ "$COPY_PACKAGES" == "true" ]]; then 731 | relocateImages 732 | fi 733 | setupTapNamespaces 734 | createRegistryCreds 735 | addTapRepository 736 | createConfigFile 737 | installTapPackages 738 | listTapPackages 739 | exit 740 | ;; 741 | remove) remove; exit;; 742 | tanzuCli) 743 | init 744 | tanzuCli 745 | exit 746 | ;; 747 | clusterEssentials) 748 | init 749 | clusterEssentials 750 | exit 751 | ;; 752 | createConfigFile) 753 | init 754 | createConfigFile 755 | exit;; 756 | relocateImages) relocateImages; exit;; 757 | createRegistryCreds) createRegistryCreds; exit;; 758 | addTapRepository) addTapRepository; exit;; 759 | installTapPackages) installTapPackages; exit;; 760 | listTapPackages) listTapPackages; exit;; 761 | deployKubernetesDashboard) deployKubernetesDashboard; exit;; 762 | populateUserNamespace) "$@"; exit;; 763 | *) usage; exit;; 764 | esac --------------------------------------------------------------------------------