├── .gitattributes ├── .gitignore ├── .sops.yaml ├── README.md ├── gitops.drawio.png ├── management-cluster ├── README.md ├── audit-config.yaml ├── calico.yaml ├── capa │ └── manifests │ │ ├── infrastructure-components.yaml │ │ ├── kapp-config.yaml │ │ ├── kbld-config.yaml │ │ └── overlays │ │ └── capa-overlay.yaml ├── capi │ └── manifests │ │ ├── bootstrap-kubeadm │ │ └── v0.3.10 │ │ │ └── bootstrap-components.yaml │ │ ├── cluster-api │ │ └── v0.3.10 │ │ │ └── core-components.yaml │ │ ├── control-plane-kubeadm │ │ └── v0.3.10 │ │ │ └── control-plane-components.yaml │ │ ├── kapp-config.yaml │ │ ├── kbld-config.yaml │ │ └── overlays │ │ └── capi-overlay.yaml ├── create-cluster.sh ├── deploy │ ├── kapp-controller │ │ └── manifests │ │ │ ├── cluster-values.yaml │ │ │ ├── kapp-controller-additional.yaml │ │ │ ├── kapp-controller-clusteradmin.yaml │ │ │ └── release.yaml │ ├── sealed-secrets │ │ └── manifests │ │ │ ├── sealed-secrets-additional.yaml │ │ │ └── sealed-secrets-v0.12.2.yaml │ ├── workload-clusters │ │ └── manifests │ │ │ ├── workload-values.yaml │ │ │ └── workload.yaml │ └── workload-secrets │ │ ├── examples │ │ ├── cluster-config-secrets.yaml │ │ └── workload-secrets.yaml │ │ └── manifests │ │ └── .gitignore ├── management-secrets.pem ├── populate-management.sh ├── postcreate-assist │ ├── README.md │ ├── _ytt_lib │ │ └── kapp-controller │ │ │ ├── deploy-app.yaml │ │ │ ├── initial-values-example.yaml │ │ │ └── release.yaml │ └── postcreate-overlay.yaml ├── secured-updated.yaml ├── single-node │ ├── README.md │ └── master-toleration-overlay.yaml ├── template-assist │ ├── README.md │ ├── template-assist-values.yaml │ └── template-overlay.yaml ├── workload-cluster-secured-v1alpha3.yaml ├── workload-cluster-secured-v1apha2.yaml ├── workload-vsphere.yaml └── workload │ └── manifests │ ├── .gitignore │ ├── .gitkeep │ ├── 00-cluster-values.yaml │ ├── README.md │ ├── clusters │ ├── .gitkeep │ └── watersafety.yaml │ ├── kapp-config.yaml │ ├── overlays │ ├── cluster-auth-overlay.yaml │ ├── cluster-controlPlaneLoadBalancer-overlay.yaml │ ├── cluster-encryption-overlay.yaml │ ├── cluster-host-overlay.yaml │ ├── cluster-ntp-overlay.yaml │ ├── cluster-upgrade-overlay.yaml │ └── cluster-vpc-overlay.yaml │ └── secret.yaml ├── registry └── registry │ └── manifests │ ├── .gitignore │ ├── registry-values.yaml │ └── registry.yaml ├── supervisor-cluster └── local.yaml ├── workload-vsphere ├── calico │ └── calico.yaml ├── kube-vip │ ├── controller.yaml │ ├── kube-vip.yaml │ └── plndr-config.yaml └── storage-class │ └── ssd01.yaml └── workload ├── .gitignore ├── README.md ├── apps └── manifests │ └── namespace.yaml ├── build-service ├── .gitignore ├── install.sh └── service-account.yaml ├── calico └── manifests │ ├── calico.yaml │ ├── calico │ ├── calico.lib.yaml │ └── calico_overlay.lib.yaml │ └── values.yaml ├── cert-manager ├── helm │ ├── cert-manager-values.yaml │ ├── generate-manifests.sh │ └── overlay-helmtemplate.yaml └── manifests │ ├── cert-manager-additional.yaml │ ├── cert-manager.yaml │ ├── kapp-config.yaml │ └── kbld-config.yaml ├── contour ├── manifests │ ├── contour-certs.yaml.disable │ ├── contour-overlay.yaml │ ├── contour-quickstart.yaml │ ├── contour-values.yaml │ ├── kapp-config.yaml │ └── tls-certificate-delegation.yaml └── upgrade.sh ├── deploy ├── 0-sealed-secrets.yaml ├── 0-storageclass.yaml ├── apps-permissions.yaml ├── cert-manager.yaml ├── config │ └── all-my-secrets.sops.yaml ├── contour.yaml ├── dex.yaml ├── external-dns.yaml ├── gangway.yaml ├── gitlab-runner.yaml ├── kiam.yaml ├── knative.yaml.disabled ├── kuard.yaml ├── letsencrypt.yaml ├── metrics-server.yaml ├── monitoring.yaml.disabled └── values99.yaml ├── dex ├── helm │ ├── dex-values.yaml │ ├── generate-manifests.sh │ └── overlay-helmtemplate.yaml └── manifests │ ├── dex-additional.yaml │ ├── dex-config.yaml │ ├── dex-values.yaml │ ├── dex.yaml │ └── kapp-config.yaml ├── external-dns ├── helm │ ├── external-dns-values.yaml │ ├── generate-manifests.sh │ └── overlay-helmtemplate.yaml └── manifests │ ├── external-dns-config.yaml │ ├── external-dns-overlay.yaml │ ├── external-dns-values.yaml │ ├── external-dns.yaml │ └── kapp-config.yaml ├── gangway └── manifests │ ├── gangway-values.yaml │ └── gangway.yaml ├── gitlab-runner ├── helm │ ├── generate-manifests.sh │ ├── gitlab-runner-values.yaml │ └── overlay-helmtemplate.yaml └── manifests │ ├── gitlab-runner-additional.yaml │ ├── gitlab-runner-values.yaml │ └── gitlab-runner.yaml ├── jupyterlab-auth ├── Dockerfile └── manifests │ ├── 00-values.yaml │ ├── kbld-config.yaml │ └── notebook-sts.yaml ├── kiam ├── helm │ ├── generate-manifests.sh │ ├── kiam-values.yaml │ └── overlay-helmtemplate.yaml └── manifests │ ├── kiam-certs.yaml │ ├── kiam-config.yaml │ ├── kiam-values.yaml │ └── kiam.yaml ├── knative └── manifests │ ├── overlay-knative.yaml │ ├── serving-cert-manager.yaml │ ├── serving-core.yaml │ ├── serving-net-contour.yaml │ ├── serving-nscert.yaml │ └── serving-values.yaml ├── kuard ├── helm │ ├── generate-manifests.sh │ ├── oauth2-proxy-values.yaml │ └── overlay-helmtemplate.yaml └── manifests │ ├── kuard-additional.yaml │ ├── kuard-proxy.yaml │ ├── kuard-values.yaml │ └── kuard.yaml ├── kube-bench ├── job-master.yaml └── job-node.yaml ├── letsencrypt └── manifests │ ├── letsencrypt-issuer.yaml │ └── letsencrypt-values.yaml ├── metrics-server ├── helm │ ├── generate-manifests.sh │ ├── metrics-server-values.yaml │ └── overlay-helmtemplate.yaml └── manifests │ └── metrics-server.yaml ├── monitoring ├── helm │ ├── generate-manifests.sh │ ├── grafana-overlay-helmtemplate.yaml │ ├── grafana-values.yaml │ ├── monitoring-values.yaml │ └── overlay-helmtemplate.yaml ├── jsonnet │ ├── .gitignore │ ├── build.sh │ ├── crd-overlay.yaml │ ├── jsonnetfile.json │ ├── jsonnetfile.lock.json │ └── prometheus-operator.jsonnet └── manifests │ ├── grafana-config.yaml │ ├── grafana-httpproxy.yaml │ ├── grafana-values.yaml │ ├── kapp-config.yaml │ └── out │ ├── alertmanager-alertmanager.yaml │ ├── alertmanager-secret.yaml │ ├── alertmanager-service.yaml │ ├── alertmanager-serviceAccount.yaml │ ├── alertmanager-serviceMonitor.yaml │ ├── grafana-dashboardDatasources.yaml │ ├── grafana-dashboardDefinitions.yaml │ ├── grafana-dashboardSources.yaml │ ├── grafana-deployment.yaml │ ├── grafana-service.yaml │ ├── grafana-serviceAccount.yaml │ ├── grafana-serviceMonitor.yaml │ ├── kube-state-metrics-clusterRole.yaml │ ├── kube-state-metrics-clusterRoleBinding.yaml │ ├── kube-state-metrics-deployment.yaml │ ├── kube-state-metrics-service.yaml │ ├── kube-state-metrics-serviceAccount.yaml │ ├── kube-state-metrics-serviceMonitor.yaml │ ├── node-exporter-clusterRole.yaml │ ├── node-exporter-clusterRoleBinding.yaml │ ├── node-exporter-daemonset.yaml │ ├── node-exporter-service.yaml │ ├── node-exporter-serviceAccount.yaml │ ├── node-exporter-serviceMonitor.yaml │ ├── prometheus-adapter-apiService.yaml │ ├── prometheus-adapter-clusterRole.yaml │ ├── prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml │ ├── prometheus-adapter-clusterRoleBinding.yaml │ ├── prometheus-adapter-clusterRoleBindingDelegator.yaml │ ├── prometheus-adapter-clusterRoleServerResources.yaml │ ├── prometheus-adapter-configMap.yaml │ ├── prometheus-adapter-deployment.yaml │ ├── prometheus-adapter-roleBindingAuthReader.yaml │ ├── prometheus-adapter-service.yaml │ ├── prometheus-adapter-serviceAccount.yaml │ ├── prometheus-adapter-serviceMonitor.yaml │ ├── prometheus-clusterRole.yaml │ ├── prometheus-clusterRoleBinding.yaml │ ├── prometheus-kubeControllerManagerPrometheusDiscoveryService.yaml │ ├── prometheus-kubeSchedulerPrometheusDiscoveryService.yaml │ ├── prometheus-operator-serviceMonitor.yaml │ ├── prometheus-prometheus.yaml │ ├── prometheus-roleBindingConfig.yaml │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ ├── prometheus-roleConfig.yaml │ ├── prometheus-roleSpecificNamespaces.yaml │ ├── prometheus-rules.yaml │ ├── prometheus-service.yaml │ ├── prometheus-serviceAccount.yaml │ ├── prometheus-serviceMonitor.yaml │ ├── prometheus-serviceMonitorApiserver.yaml │ ├── prometheus-serviceMonitorCoreDNS.yaml │ ├── prometheus-serviceMonitorKubeControllerManager.yaml │ ├── prometheus-serviceMonitorKubeScheduler.yaml │ ├── prometheus-serviceMonitorKubelet.yaml │ └── setup │ ├── 0namespace-namespace.yaml │ ├── prometheus-operator-0alertmanagerCustomResourceDefinition.yaml │ ├── prometheus-operator-0podmonitorCustomResourceDefinition.yaml │ ├── prometheus-operator-0probeCustomResourceDefinition.yaml │ ├── prometheus-operator-0prometheusCustomResourceDefinition.yaml │ ├── prometheus-operator-0prometheusruleCustomResourceDefinition.yaml │ ├── prometheus-operator-0servicemonitorCustomResourceDefinition.yaml │ ├── prometheus-operator-0thanosrulerCustomResourceDefinition.yaml │ ├── prometheus-operator-clusterRole.yaml │ ├── prometheus-operator-clusterRoleBinding.yaml │ ├── prometheus-operator-deployment.yaml │ ├── prometheus-operator-service.yaml │ └── prometheus-operator-serviceAccount.yaml ├── oauth2-proxy └── helm │ ├── generate-manifests.sh │ ├── oauth2-proxy-values.yaml │ └── overlay-helmtemplate.yaml ├── opa-external-auth ├── opa-external-auth.yaml └── policy.rego.lib.txt ├── package-images.sh ├── postgresql-ha ├── helm │ ├── generate-manifests.sh │ └── postgresql-ha-values.yaml └── manifests │ ├── 00-values.yaml │ ├── kapp-config.yaml │ ├── kbld-config.yaml │ ├── postgresql-ha-additional.yaml │ ├── postgresql-ha-overlay.yaml │ └── postgresql.yaml ├── sealed-secrets └── manifests │ └── sealed-secrets-v0.12.5.yaml ├── secrets ├── seal.sh └── sonarqube-config.yaml ├── sonarqube ├── helm │ ├── generate-manifests.sh │ ├── oauth2-proxy-values.yaml │ ├── overlay-helmtemplate.yaml │ ├── postgresql-values.yaml │ └── sonarqube-values.yaml └── manifests │ ├── kapp-config.yaml │ ├── sonarqube-additional.yaml │ ├── sonarqube-config.sealed.yaml │ ├── sonarqube-db.yaml │ ├── sonarqube-proxy.yaml │ └── sonarqube.yaml ├── storageclass └── manifests │ └── storageclass-aws-ebs.yaml ├── tag-job └── manifests │ ├── tag-job-values.yaml │ └── tag-job.yaml ├── update-charts.sh └── workload-secrets.pem /.gitattributes: -------------------------------------------------------------------------------- 1 | *.sops.yaml diff=sopsdiffer 2 | .sops.yaml diff=yaml 3 | *.sops.key diff=sopsdiffer -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | master.key -------------------------------------------------------------------------------- /.sops.yaml: -------------------------------------------------------------------------------- 1 | # creation rules are evaluated sequentially, the first match wins 2 | 3 | # WARNING: DO NOT PUT DECRYPTION RULES IN HERE 4 | # KAPP-CONTROLLER WILL IGNORE THIS FILE, SO IMPORTANT TO ONLY 5 | # DO ENCRYPTION RULES. 6 | creation_rules: 7 | - pgp: '041305B172A31BAE0459112C726238C01C0E1A2C' -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tanzu Kubernetes Grid GitOps Flow Sample 2 | 3 | ![Overall workflow diagram](gitops.drawio.png?raw=true "TKG GitOps Flow") 4 | 5 | **Last Run Against TKG 1.1.3** 6 | 7 | This is a sample showing the power of Cluster API and Tanzu Kubernetes Grid in a GitOps work model. 8 | 9 | You start with the Management Cluster, do a typical install of TKG following the directions [here](https://docs.vmware.com/en/VMware-Tanzu-Kubernetes-Grid/1.1/vmware-tanzu-kubernetes-grid-11/GUID-index.html) 10 | 11 | 1. Fork this repository, rename references in the code pointing to https://github.com/voor/cluster-api-gitops to your fork. Push those changes. 12 | 1. **(Optional for deploying workloads into a new cluster upon creation)** Go into `management-cluster/deploy/workload-secrets` and change the example file accordingly. 13 | 1. This assumes you are using [CloudFlare](https://www.cloudflare.com/) to manage your domain specified in the domain secret, if you are not you'll need to change the [`letsencrypt`](workload/letsencrypt/manifests/letsencrypt-issuer.yaml#L19-L21) and [`external-dns`](workload/external-dns/manifests/external-dns.yaml#L67-L78) programs in the workload folder accordingly. 14 | 1. You have a GitHub OAuth2.0 application setup, or you'll need to change [`dex-config.yaml`](workload/dex/manifests/dex-config.yaml#L35-L47) to point elsewhere. 15 | 1. Populate the management cluster, go into the `management-cluster` folder and run: 16 | ``` 17 | ./populate-management.sh 18 | ``` 19 | 1. Create your first workload cluster: 20 | ``` 21 | ./create-cluster.sh hello-world -c 1 -p dev -w 1 22 | ``` 23 | 1. Commit the `${CLUSTER_NAME}`.yaml file that was added into `workload/manifests` into git and push it. **(If you skipped the optional step you will need to remove the `${CLUSTER_NAME}-apps` App CR)** 24 | 1. Wait a few minutes. 25 | 1. **(Optional for deploying workloads into a new cluster upon creation)** Visit the URL `https://kuard.apps.${DOMAIN}` where `${DOMAIN}` is the url you specified in the `workload-secrets` yaml. 26 | 1. Or if you didn't do the optional step, use `tkg get credentials` to get your new clusters credentials. 27 | 28 | ## Scaling Clusters 29 | 30 | ### Workers 31 | Go into the yaml for the workload cluster and change the number of `MachineDeployment` replicas. 32 | 33 | ### Masters 34 | Go into the yaml for the workload cluster and change the number of `KubeadmControlPlane` replicas. 35 | 36 | ## Upgrading Clusters 37 | 38 | Modify the `00-cluster-values.yaml` to roll the machines to a new AMI. Modify the Kubernetes versions to roll the system containers. 39 | 40 | ## Changing Workloads 41 | 42 | Everything for workload clusters is found in the `workload` folder in this repository, this sample takes a few commonly used applications that are both Helm, Jsonnet, and pure manifest based. 43 | 44 | ### Helm 45 | 46 | Helm applications are committed directly as manifests to remove any surprises and make this more air-gapped friendly. [kapp-controller](https://github.com/k14s/kapp-controller) supports helm charts directly, so see there for more documentation on how to modify this to pull directly from a helm repository. 47 | 48 | ### Pure Manifests 49 | 50 | Everything is run through `ytt`, so even if you are checking out manifests directly from a release (and if you are, you can always just fetch over URL), you can apply overlays or other modifications without committing them directly to the release files. 51 | -------------------------------------------------------------------------------- /gitops.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/voor/cluster-api-gitops/e30df7fec01199f26a210ffdd422ae597314b63d/gitops.drawio.png -------------------------------------------------------------------------------- /management-cluster/README.md: -------------------------------------------------------------------------------- 1 | # Management Cluster 2 | 3 | After you've applied this `management-cluster.yaml` in your temporary bootstrap cluster, you'll need to initialize it with Cluster API components again: 4 | 5 | ``` 6 | kubectl get secret central-kubeconfig -o jsonpath={.data.value} | base64 -d > ~/.kube/central-kubeconfig.config 7 | # Remove the taint from the master (change to correct node name) 8 | KUBECONFIG=~/.kube/central-kubeconfig.config kubectl taint nodes node-role.kubernetes.io/master:NoSchedule- 9 | 10 | # kapp in calico 11 | kapp --kubeconfig ~/.kube/central-kubeconfig.config deploy -a calico -f calico.yaml -c 12 | 13 | # Deploy AWS provider 14 | KUBECONFIG=~/.kube/central-kubeconfig.config clusterctl init --core cluster-api:v0.3.0 --bootstrap kubeadm:v0.3.0 --control-plane kubeadm:v0.3.0 --infrastructure aws:v0.5.0 15 | 16 | # Note that you are pointing to your bootstrap cluster here 17 | clusterctl move --to-kubeconfig=/home/${USER}/.kube/central-kubeconfig.config 18 | 19 | # kapp in workload 20 | kapp --kubeconfig ~/.kube/central-kubeconfig.config deploy -a workload -f workload-cluster.yaml 21 | ``` -------------------------------------------------------------------------------- /management-cluster/capa/manifests/kapp-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kapp.k14s.io/v1alpha1 2 | kind: Config 3 | 4 | rebaseRules: 5 | 6 | # clusterctl label 7 | - paths: 8 | - [metadata, labels, clusterctl.cluster.x-k8s.io] 9 | - [metadata, labels, clusterctl.cluster.x-k8s.io/lifecycle] 10 | type: copy 11 | sources: [new, existing] 12 | resourceMatchers: 13 | - allMatcher: {} 14 | 15 | # ignore bootstrap credentials, they are dynamically updated 16 | - path: [data, credentials] 17 | type: copy 18 | sources: [existing] 19 | resourceMatchers: 20 | - kindNamespaceNameMatcher: {kind: Secret, namespace: capa-system, name: capa-manager-bootstrap-credentials} -------------------------------------------------------------------------------- /management-cluster/capa/manifests/kbld-config.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | apiVersion: kbld.k14s.io/v1alpha1 5 | kind: Config 6 | minimumRequiredVersion: 0.25.0 7 | overrides: 8 | - image: registry.tkg.vmware.run/cluster-api-aws/cluster-api-aws-controller:v0.6.2_vmware.1 9 | newImage: gcr.io/kubernetes-development-244305/cluster-api/cluster-api-aws-controller:v0.6.2_vmware.1 10 | preresolved: true 11 | - image: registry.tkg.vmware.run/cluster-api/kube-rbac-proxy:v0.4.1_vmware.2 12 | newImage: gcr.io/kubernetes-development-244305/cluster-api/kube-rbac-proxy:v0.4.1_vmware.2 13 | preresolved: true -------------------------------------------------------------------------------- /management-cluster/capa/manifests/overlays/capa-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | #@overlay/match by=overlay.subset({"kind":"CustomResourceDefinition"}),expects="0+" 5 | --- 6 | spec: 7 | #@overlay/match when=1 8 | conversion: 9 | #@overlay/match when=1 10 | webhook: 11 | #@overlay/match when=1 12 | clientConfig: 13 | #@overlay/remove 14 | caBundle: 15 | 16 | #@overlay/remove 17 | status: 18 | 19 | #@overlay/match by=overlay.subset({"kind":"ValidatingWebhookConfiguration"}),expects="0+" 20 | --- 21 | metadata: 22 | #@overlay/remove 23 | creationTimestamp: 24 | #@overlay/match missing_ok=True 25 | webhooks: 26 | #@overlay/match by=overlay.all,expects="1+" 27 | - clientConfig: 28 | #@overlay/remove 29 | caBundle: 30 | 31 | #@overlay/match by=overlay.subset({"kind":"MutatingWebhookConfiguration"}),expects="0+" 32 | --- 33 | metadata: 34 | #@overlay/remove 35 | creationTimestamp: 36 | #@overlay/match missing_ok=True 37 | webhooks: 38 | #@overlay/match by=overlay.all,expects="1+" 39 | - clientConfig: 40 | #@overlay/remove 41 | caBundle: 42 | 43 | 44 | #@overlay/match by=overlay.subset({"kind":"Deployment"}),expects="0+" 45 | --- 46 | metadata: 47 | #@overlay/match missing_ok=True 48 | annotations: 49 | kapp.k14s.io/update-strategy: "fallback-on-replace" 50 | spec: 51 | template: 52 | spec: 53 | #@overlay/match when=1 54 | tolerations: 55 | #@overlay/match by="key" 56 | #@overlay/remove 57 | - 58 | effect: NoSchedule 59 | key: node-role.kubernetes.io/master 60 | 61 | #@ def capa_controller(): 62 | kind: Deployment 63 | metadata: 64 | name: capa-controller-manager 65 | namespace: capa-system 66 | #@ end 67 | 68 | #@ def capa_controller_webhook(): 69 | kind: Deployment 70 | metadata: 71 | name: capa-controller-manager 72 | namespace: capi-webhook-system 73 | #@ end 74 | 75 | #@ capi_controller_manager = overlay.subset(capa_controller()) 76 | #@ capi_controller_webhook = overlay.subset(capa_controller_webhook()) 77 | 78 | #@overlay/match by=overlay.or_op(capi_controller_manager, capi_controller_webhook),expects=2 79 | --- 80 | spec: 81 | template: 82 | spec: 83 | containers: 84 | #@overlay/match by="name" 85 | - name: manager 86 | args: 87 | #@overlay/match by=lambda _,a,b: "--feature-gates=" in a, expects="0+" 88 | - --feature-gates=EKS=false,MachinePool=false -------------------------------------------------------------------------------- /management-cluster/capi/manifests/kapp-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kapp.k14s.io/v1alpha1 2 | kind: Config 3 | 4 | rebaseRules: 5 | 6 | # clusterctl label 7 | - path: [metadata, labels, clusterctl.cluster.x-k8s.io] 8 | type: copy 9 | sources: [new, existing] 10 | resourceMatchers: &group 11 | - allMatcher: {} 12 | 13 | - path: [metadata, labels, clusterctl.cluster.x-k8s.io/lifecycle] 14 | type: copy 15 | sources: [new, existing] 16 | resourceMatchers: *group 17 | 18 | # ignore cluster role with aggregationRule 19 | - path: [rules] 20 | type: copy 21 | sources: [existing, new] 22 | resourceMatchers: 23 | - notMatcher: 24 | matcher: 25 | emptyFieldMatcher: 26 | path: [aggregationRule] -------------------------------------------------------------------------------- /management-cluster/capi/manifests/kbld-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kbld.k14s.io/v1alpha1 2 | kind: Config 3 | minimumRequiredVersion: 0.25.0 4 | overrides: 5 | - image: registry.tkg.vmware.run/cluster-api/cluster-api-controller:v0.3.10_vmware.1 6 | newImage: registry.tkg.vmware.run/cluster-api/cluster-api-controller@sha256:b54e71f46eedf8ce14f82edb95552f9e5b9165ce925adfd926421a1059441906 7 | preresolved: true 8 | - image: registry.tkg.vmware.run/cluster-api/kube-rbac-proxy:v0.4.1_vmware.2 9 | newImage: registry.tkg.vmware.run/cluster-api/kube-rbac-proxy@sha256:6b83d791388546ce66372f621435d55bea57d892675c3180d85a4d9dc7bb818f 10 | preresolved: true 11 | - image: registry.tkg.vmware.run/cluster-api/kubeadm-bootstrap-controller:v0.3.10_vmware.1 12 | newImage: registry.tkg.vmware.run/cluster-api/kubeadm-bootstrap-controller@sha256:c640191c7604ef15f82a45bf0d91844cb98a0b6e4ddce960e8d7a5156f80904c 13 | preresolved: true 14 | - image: registry.tkg.vmware.run/cluster-api/kubeadm-control-plane-controller:v0.3.10_vmware.1 15 | newImage: registry.tkg.vmware.run/cluster-api/kubeadm-control-plane-controller@sha256:c232b7632f59a991e2c9d36e4ca4c59c330338bcceba9424abda296acc8ddd55 16 | preresolved: true 17 | -------------------------------------------------------------------------------- /management-cluster/capi/manifests/overlays/capi-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | #@overlay/match by=overlay.subset({"kind":"CustomResourceDefinition"}),expects="0+" 5 | --- 6 | spec: 7 | #@overlay/match when=1 8 | conversion: 9 | #@overlay/match when=1 10 | webhook: 11 | #@overlay/match when=1 12 | clientConfig: 13 | #@overlay/remove 14 | caBundle: 15 | 16 | #@overlay/remove 17 | status: 18 | 19 | #@overlay/match by=overlay.subset({"kind":"ValidatingWebhookConfiguration"}),expects="0+" 20 | --- 21 | #@overlay/match missing_ok=True 22 | webhooks: 23 | #@overlay/match by=overlay.all,expects="1+" 24 | - clientConfig: 25 | #@overlay/remove 26 | caBundle: 27 | 28 | #@overlay/match by=overlay.subset({"kind":"MutatingWebhookConfiguration"}),expects="0+" 29 | --- 30 | metadata: 31 | #@overlay/remove 32 | creationTimestamp: 33 | #@overlay/match missing_ok=True 34 | webhooks: 35 | #@overlay/match by=overlay.all,expects="1+" 36 | - clientConfig: 37 | #@overlay/remove 38 | caBundle: 39 | 40 | #@ def capi_kubeadm_bootstrap(): 41 | kind: Deployment 42 | metadata: 43 | name: capi-kubeadm-bootstrap-controller-manager 44 | namespace: capi-kubeadm-bootstrap-system 45 | #@ end 46 | 47 | #@ def capi_kubeadm_bootstrap_webhook(): 48 | kind: Deployment 49 | metadata: 50 | name: capi-kubeadm-bootstrap-controller-manager 51 | namespace: capi-webhook-system 52 | #@ end 53 | 54 | #@ kubeadm_bootstrap = overlay.subset(capi_kubeadm_bootstrap()) 55 | #@ kubeadm_bootstrap_webhook = overlay.subset(capi_kubeadm_bootstrap_webhook()) 56 | 57 | #@overlay/match by=overlay.or_op(kubeadm_bootstrap, kubeadm_bootstrap_webhook),expects=2 58 | --- 59 | spec: 60 | template: 61 | spec: 62 | containers: 63 | #@overlay/match by="name" 64 | - name: manager 65 | args: 66 | #@overlay/match by=lambda _,a,b: "--feature-gates=" in a, expects="0+" 67 | - --feature-gates=MachinePool=false 68 | 69 | #@ def capi_controller(): 70 | kind: Deployment 71 | metadata: 72 | name: capi-controller-manager 73 | namespace: capi-system 74 | #@ end 75 | 76 | #@ def capi_controller_webhook(): 77 | kind: Deployment 78 | metadata: 79 | name: capi-controller-manager 80 | namespace: capi-webhook-system 81 | #@ end 82 | 83 | #@ capi_controller_manager = overlay.subset(capi_controller()) 84 | #@ capi_controller_webhook = overlay.subset(capi_controller_webhook()) 85 | 86 | #@overlay/match by=overlay.or_op(capi_controller_manager, capi_controller_webhook),expects=2 87 | --- 88 | spec: 89 | template: 90 | spec: 91 | containers: 92 | #@overlay/match by="name" 93 | - name: manager 94 | args: 95 | #@overlay/match by=lambda _,a,b: "--feature-gates=" in a, expects="0+" 96 | - --feature-gates=ClusterResourceSet=true,MachinePool=false 97 | 98 | #@overlay/match by=overlay.subset({"kind":"Deployment"}),expects="0+" 99 | --- 100 | metadata: 101 | #@overlay/match missing_ok=True 102 | annotations: 103 | kapp.k14s.io/update-strategy: "fallback-on-replace" 104 | spec: 105 | template: 106 | spec: 107 | #@overlay/match when=1 108 | tolerations: 109 | #@overlay/match by="key" 110 | #@overlay/remove 111 | - 112 | effect: NoSchedule 113 | key: node-role.kubernetes.io/master 114 | -------------------------------------------------------------------------------- /management-cluster/create-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | USAGE="Usage: $0 CLUSTER_NAME (additional tkg create cluster parameters)" 3 | 4 | if [ "$#" == "0" ]; then 5 | echo "$USAGE" 6 | exit 1 7 | fi 8 | 9 | set -eux 10 | 11 | CLUSTER_NAME=$1 12 | 13 | shift 14 | 15 | export AWS_NODE_AZ=unused 16 | export AWS_PRIVATE_NODE_CIDR=unused 17 | export AWS_PUBLIC_NODE_CIDR=unused 18 | export AWS_REGION=us-gov-west-1 19 | export AWS_SSH_KEY_NAME=overridden 20 | export AWS_AMI_ID=ignored 21 | export AWS_VPC_ID=overriden 22 | export CONTROL_PLANE_MACHINE_TYPE=m5.large 23 | export NODE_MACHINE_TYPE=m5.large 24 | 25 | tkg create cluster -i aws:v0.5.4 $CLUSTER_NAME $@ --dry-run 2>/dev/null | ytt --ignore-unknown-comments --data-value cluster_name=${CLUSTER_NAME} -f template-assist -f- > workload/manifests/clusters/${CLUSTER_NAME}.yaml 26 | -------------------------------------------------------------------------------- /management-cluster/deploy/kapp-controller/manifests/cluster-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | clusteradminrole: false -------------------------------------------------------------------------------- /management-cluster/deploy/kapp-controller/manifests/kapp-controller-additional.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.subset({"kind":"Deployment"}) 4 | --- 5 | spec: 6 | template: 7 | spec: 8 | #@overlay/match missing_ok=True 9 | tolerations: 10 | #@overlay/match by="key", missing_ok=True 11 | - 12 | effect: NoSchedule 13 | key: node-role.kubernetes.io/master 14 | -------------------------------------------------------------------------------- /management-cluster/deploy/kapp-controller/manifests/kapp-controller-clusteradmin.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | #@ if data.values.clusteradminrole: 4 | --- 5 | apiVersion: v1 6 | kind: ServiceAccount 7 | metadata: 8 | name: kapp-controller-clusteradmin 9 | namespace: kapp-controller 10 | --- 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | kind: ClusterRoleBinding 13 | metadata: 14 | name: kapp-controller-clusteradmin-cluster-role-binding 15 | roleRef: 16 | apiGroup: rbac.authorization.k8s.io 17 | kind: ClusterRole 18 | name: cluster-admin 19 | subjects: 20 | - kind: ServiceAccount 21 | name: kapp-controller-clusteradmin 22 | namespace: kapp-controller 23 | #@ end -------------------------------------------------------------------------------- /management-cluster/deploy/kapp-controller/manifests/release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: apps.kappctrl.k14s.io 6 | spec: 7 | additionalPrinterColumns: 8 | - JSONPath: .status.friendlyDescription 9 | description: Friendly description 10 | name: Description 11 | type: string 12 | - JSONPath: .status.deploy.startedAt 13 | description: Last time app started being deployed. Does not mean anything was 14 | changed. 15 | name: Since-Deploy 16 | type: date 17 | - JSONPath: .metadata.creationTimestamp 18 | description: |- 19 | CreationTimestamp is a timestamp representing the server time when this object was created. 20 | It is not guaranteed to be set in happens-before order across separate operations. 21 | Clients may not set this value. It is represented in RFC3339 form and is in UTC. 22 | Populated by the system. Read-only. Null for lists. 23 | More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata 24 | name: Age 25 | type: date 26 | group: kappctrl.k14s.io 27 | names: 28 | kind: App 29 | listKind: AppList 30 | plural: apps 31 | singular: app 32 | scope: Namespaced 33 | subresources: 34 | status: {} 35 | versions: 36 | - name: v1alpha1 37 | served: true 38 | storage: true 39 | --- 40 | apiVersion: apps/v1 41 | kind: Deployment 42 | metadata: 43 | annotations: 44 | kbld.k14s.io/images: | 45 | - Metas: 46 | - Path: /Users/dk/workspace/k14s-go/src/github.com/k14s/kapp-controller 47 | Type: local 48 | - Dirty: false 49 | RemoteURL: git@github.com:k14s/kapp-controller 50 | SHA: e66d0d12829c4fadb79b91b00a565c2357517f64 51 | Type: git 52 | URL: index.docker.io/k14s/kapp-controller@sha256:038107419ba4e3cb73087fb00bf1d3e1be83dabdcc55939b5d117f18b1dac66c 53 | name: kapp-controller 54 | namespace: kapp-controller 55 | spec: 56 | replicas: 1 57 | revisionHistoryLimit: 0 58 | selector: 59 | matchLabels: 60 | app: kapp-controller 61 | template: 62 | metadata: 63 | labels: 64 | app: kapp-controller 65 | spec: 66 | containers: 67 | - args: [] 68 | env: 69 | - name: KAPPCTRL_MEM_TMP_DIR 70 | value: /etc/kappctrl-mem-tmp 71 | image: index.docker.io/k14s/kapp-controller@sha256:038107419ba4e3cb73087fb00bf1d3e1be83dabdcc55939b5d117f18b1dac66c 72 | name: kapp-controller 73 | resources: 74 | requests: 75 | cpu: 120m 76 | memory: 100Mi 77 | securityContext: 78 | runAsGroup: 2000 79 | runAsUser: 1000 80 | volumeMounts: 81 | - mountPath: /etc/kappctrl-mem-tmp 82 | name: template-fs 83 | securityContext: 84 | fsGroup: 3000 85 | serviceAccount: kapp-controller-sa 86 | volumes: 87 | - emptyDir: 88 | medium: Memory 89 | name: template-fs 90 | --- 91 | apiVersion: v1 92 | kind: Namespace 93 | metadata: 94 | name: kapp-controller 95 | --- 96 | apiVersion: v1 97 | kind: ServiceAccount 98 | metadata: 99 | name: kapp-controller-sa 100 | namespace: kapp-controller 101 | --- 102 | apiVersion: rbac.authorization.k8s.io/v1 103 | kind: ClusterRole 104 | metadata: 105 | name: kapp-controller-cluster-role 106 | rules: 107 | - apiGroups: 108 | - "" 109 | resources: 110 | - serviceaccounts 111 | - secrets 112 | - configmaps 113 | verbs: 114 | - get 115 | - apiGroups: 116 | - kappctrl.k14s.io 117 | resources: 118 | - apps 119 | - apps/status 120 | verbs: 121 | - '*' 122 | --- 123 | apiVersion: rbac.authorization.k8s.io/v1 124 | kind: ClusterRoleBinding 125 | metadata: 126 | name: kapp-controller-cluster-role-binding 127 | roleRef: 128 | apiGroup: rbac.authorization.k8s.io 129 | kind: ClusterRole 130 | name: kapp-controller-cluster-role 131 | subjects: 132 | - kind: ServiceAccount 133 | name: kapp-controller-sa 134 | namespace: kapp-controller -------------------------------------------------------------------------------- /management-cluster/deploy/sealed-secrets/manifests/sealed-secrets-additional.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | 4 | #@overlay/match by=overlay.subset({"kind":"Deployment"}) 5 | --- 6 | spec: 7 | template: 8 | spec: 9 | #@overlay/match missing_ok=True 10 | tolerations: 11 | #@overlay/match by="key", missing_ok=True 12 | - 13 | effect: NoSchedule 14 | key: node-role.kubernetes.io/master -------------------------------------------------------------------------------- /management-cluster/deploy/workload-clusters/manifests/workload-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | git: 5 | url: https://github.com/voor/cluster-api-gitops 6 | branch: origin/default 7 | secretRef: management-git-secret 8 | 9 | workload_secretRef: 10 | #! This file contains secrets for configuring workload clusters. 11 | - workload-config-secrets 12 | - workload-git-secrets 13 | -------------------------------------------------------------------------------- /management-cluster/deploy/workload-clusters/manifests/workload.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: v1 5 | kind: Namespace 6 | metadata: 7 | name: workload-clusters 8 | --- 9 | apiVersion: v1 10 | kind: ServiceAccount 11 | metadata: 12 | name: workload-sa 13 | namespace: workload-clusters 14 | --- 15 | apiVersion: kappctrl.k14s.io/v1alpha1 16 | kind: App 17 | metadata: 18 | name: workload-clusters 19 | namespace: workload-clusters 20 | annotations: 21 | kapp.k14s.io/disable-wait: "true" 22 | spec: 23 | serviceAccountName: workload-sa 24 | fetch: 25 | - git: 26 | url: #@ data.values.git.url 27 | ref: #@ data.values.git.branch 28 | subPath: management-cluster/workload/manifests 29 | secretRef: 30 | name: #@ data.values.git.secretRef 31 | template: 32 | - ytt: 33 | ignoreUnknownComments: true 34 | inline: 35 | pathsFrom: 36 | #@ for secretRef in data.values.workload_secretRef: 37 | - secretRef: 38 | name: #@ secretRef 39 | #@ end 40 | deploy: 41 | - kapp: {} 42 | --- 43 | apiVersion: rbac.authorization.k8s.io/v1beta1 44 | kind: RoleBinding 45 | metadata: 46 | name: workload-role-binding 47 | namespace: workload-clusters 48 | roleRef: 49 | apiGroup: rbac.authorization.k8s.io 50 | kind: Role 51 | name: workload-role 52 | subjects: 53 | - kind: ServiceAccount 54 | name: workload-sa 55 | namespace: workload-clusters 56 | --- 57 | apiVersion: rbac.authorization.k8s.io/v1beta1 58 | kind: Role 59 | metadata: 60 | name: workload-role 61 | namespace: workload-clusters 62 | rules: 63 | - apiGroups: 64 | - kappctrl.k14s.io 65 | - cluster.x-k8s.io 66 | - infrastructure.cluster.x-k8s.io 67 | - controlplane.cluster.x-k8s.io 68 | - bootstrap.cluster.x-k8s.io 69 | resources: 70 | - '*' 71 | verbs: 72 | - '*' 73 | - apiGroups: 74 | - "" 75 | resources: 76 | - secrets 77 | - configmaps 78 | verbs: 79 | - '*' 80 | - apiGroups: 81 | - "" 82 | resources: 83 | - pods 84 | verbs: 85 | - get 86 | - list 87 | - watch -------------------------------------------------------------------------------- /management-cluster/deploy/workload-secrets/examples/cluster-config-secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: workload-config-secrets 6 | namespace: workload-clusters 7 | stringData: 8 | zzauthsecrets.yaml: | 9 | #@data/values 10 | --- 11 | oidc: 12 | client_id: REMOVED 13 | issuer_url: https://planetvoor.auth0.com 14 | etcd: 15 | aescbc: ENCRYPTIONKEY 16 | --- 17 | apiVersion: v1 18 | kind: Secret 19 | metadata: 20 | name: workload-git-secrets 21 | namespace: workload-clusters 22 | stringData: 23 | git-secrets.yaml: | 24 | #@data/values 25 | --- 26 | domain: tanzu.world 27 | --- 28 | apiVersion: v1 29 | kind: Secret 30 | metadata: 31 | name: management-git-secret 32 | namespace: workload-clusters 33 | stringData: 34 | username: voor-cibot 35 | password: REMOVED 36 | -------------------------------------------------------------------------------- /management-cluster/deploy/workload-secrets/manifests/.gitignore: -------------------------------------------------------------------------------- 1 | *.yaml 2 | -------------------------------------------------------------------------------- /management-cluster/management-secrets.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIErjCCApagAwIBAgIRAJPa9I83mBJ8kotl8vpGP40wDQYJKoZIhvcNAQELBQAw 3 | ADAeFw0yMDAzMDcwMTE1MTFaFw0zMDAzMDUwMTE1MTFaMAAwggIiMA0GCSqGSIb3 4 | DQEBAQUAA4ICDwAwggIKAoICAQCxdjX3dc8eolCfa1i+QKBxpXJnF5BpKWTZaXN7 5 | bdai3/q+QFheRMivZ4QD+Wi2iKyf8UNiiQYsdkapoWli+tp4XWyjjTeJ2DAQTzRW 6 | eViwMu1qDfdJKJN9zj4VOg8Sie4FNCmgLjUrM1QCO9JtYxYeWl22/ZujLZLTkkcP 7 | I4c8HRfFfnWJ+NlDhoHMd1SeDmf16Qho7+Gufnh09/DsOGvWeNclpLmC7cX9H6AK 8 | vaWOREBlqc90KqNhwTML/DsUT4qJHj9we2gmsjC76aDPl46//AKJ9YxvxoKzPMFK 9 | 9Zyzd1aoTVkBUsTYbRsAsnBn3G1VHaiXCMPY3535NEwcOqrc3fZ+WJrH5R5WKYFE 10 | C7NuzkJDW/hDQjdOBw42RWpGZkT04KNrnLkz5cE3xHZMpqOfFeegWQROauyzOYzV 11 | kFVkAWsXv6QNJbCgTdp6NBUG0imZw3ZlYa61GkqPFw87e8bzk1at5OApwt7W35Mt 12 | DbAvMrJa7FGPCQEkYhPFiZLERPJ5f6Q2zkKZS3xvhIpaMRK/AgrCitOe5aDd5n30 13 | /VrKspF83bc2zyEn3bF6w08YwFlFVe+aA8e4rlpcEqH0VWD793bm5aZ5mcsUN2W5 14 | EOqONz7TVLc/1JXu8C+liaQ/XRcZud/foMirM8q3dmB0WuNZDd5LN423Jw2THVSa 15 | 234wxQIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAAEwDwYDVR0TAQH/BAUwAwEB/zAN 16 | BgkqhkiG9w0BAQsFAAOCAgEAOCxgfm/i1bXIz9j4tEM21X32L15VPDApLdUjnuds 17 | 6td9o2Evg4AF3PguTgRBBfzKsG4E7hwWHESRBjnhz7I6aF5hOj8qhs5lXOtind1Z 18 | vKIx9Uia61qIGdXaP8T4ubShdhwi43g2pDjnOhYc9dLkR6LM6tXii4qPeFbMmGzz 19 | vQWvXJsAliO4L3dj3jRVoH8N8Em6Mmj7JYApwiy8eQzVx4SiQ61ai04ypkhD5TZ1 20 | BQZcMgfN/LcanN3faOUXlGGMjXRqNk6eMQff21jJ9GOgRzcNZkCcis0uhf8L0QvX 21 | Im3hww308wfbNtRsPGoyAbPHqtAtJt0103djY6OmIEV99R96zDsAGkpGwLsWsSXy 22 | 9UU9dA2sfVYHpKNQjdVFWNqRo6Fph4GN1b0jFya4Z4BETGnbjE0XAqRgggBOrVl8 23 | 2tcU/XJGxH7ajySksL0vRZQS9m/TsVAHVOFRiKJ+jV10k7sdNEEEnG6ZCCSvZ96v 24 | HyCV4a5CEd68B9KjbpB0gJkrNancyXnsPBTdzUR0mvIir5UEjjDoZ+YNV58CCpxP 25 | pf9ByZxCrJ8Sx6pEyhJKtjFwQSoT23q326bGcFOaDYsD9QkFmT9moOLbtDqRhBKb 26 | ylTrsOeXZByN0/hE+vyY4glP90Ilt1bLI4RcrQPik385dmy5eKXt4AuRbRBPYLPP 27 | lis= 28 | -----END CERTIFICATE----- 29 | -------------------------------------------------------------------------------- /management-cluster/populate-management.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -eux 4 | 5 | echo "Populates the management cluster with necessary apps." 6 | 7 | # If we have an existing secret apply it first. 8 | if [ -f "master.key" ]; then 9 | kubectl apply -f master.key 10 | fi 11 | 12 | for app in sealed-secrets kapp-controller workload-clusters workload-secrets; do 13 | ytt --ignore-unknown-comments -f deploy/${app}/manifests | kapp -y deploy -n kube-system -a ${app} -f - 14 | done -------------------------------------------------------------------------------- /management-cluster/postcreate-assist/README.md: -------------------------------------------------------------------------------- 1 | # Deprecated Folder 2 | 3 | This was an old way to modify the postcreate entry directly instead of creating a separate one, it made more sense to have different App CRs instead of overloading the CNI one. -------------------------------------------------------------------------------- /management-cluster/postcreate-assist/_ytt_lib/kapp-controller/deploy-app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kappctrl.k14s.io/v1alpha1 3 | kind: App 4 | metadata: 5 | name: deploy 6 | namespace: kapp-controller 7 | spec: 8 | fetch: 9 | - git: 10 | url: https://github.com/voor/cluster-api-gitops 11 | ref: origin/default 12 | subPath: workload/deploy 13 | secretRef: 14 | name: #@ data.values.git.secretRef 15 | template: 16 | - ytt: 17 | ignoreUnknownComments: true 18 | deploy: 19 | - kapp: 20 | deploy: 21 | rawOptions: 22 | - "--wait=false" 23 | -------------------------------------------------------------------------------- /management-cluster/postcreate-assist/_ytt_lib/kapp-controller/initial-values-example.yaml: -------------------------------------------------------------------------------- 1 | # --- 2 | # apiVersion: v1 3 | # kind: Secret 4 | # metadata: 5 | # name: knative-values 6 | # namespace: kapp-controller 7 | # stringData: 8 | # serving-values.yaml: | 9 | # #@data/values 10 | # --- 11 | # domain: tanzu.world 12 | # --- 13 | # apiVersion: v1 14 | # kind: Secret 15 | # metadata: 16 | # name: dex-values 17 | # namespace: kapp-controller 18 | # stringData: 19 | # dex-values.yaml: | 20 | # #@data/values 21 | # --- 22 | # github_client_id: CHANGEME 23 | # github_client_secret: CHANGEME 24 | # grafana_client_id: grafana-client 25 | # grafana_client_secret: CHANGEME 26 | # sonarqube_client_id: sonarqube-client 27 | # sonarqube_client_secret: CHANGEME 28 | # --- 29 | # apiVersion: v1 30 | # kind: Secret 31 | # metadata: 32 | # name: certmanager-values 33 | # namespace: kapp-controller 34 | # stringData: 35 | # cert-manager-values.yaml: | 36 | # #@data/values 37 | # --- 38 | # acme_email: rcanvo@gmail.com 39 | # acme_url: "https://acme-v02.api.letsencrypt.org/directory" 40 | # cloudflare_token: CHANGEME 41 | 42 | # acme_url: "https://acme-v02.api.letsencrypt.org/directory" 43 | # acme_url: "https://acme-staging-v02.api.letsencrypt.org/directory" -------------------------------------------------------------------------------- /management-cluster/postcreate-assist/_ytt_lib/kapp-controller/release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: apps.kappctrl.k14s.io 6 | spec: 7 | additionalPrinterColumns: 8 | - JSONPath: .status.friendlyDescription 9 | description: Friendly description 10 | name: Description 11 | type: string 12 | - JSONPath: .metadata.creationTimestamp 13 | description: |- 14 | CreationTimestamp is a timestamp representing the server time when this object was created. 15 | It is not guaranteed to be set in happens-before order across separate operations. 16 | Clients may not set this value. It is represented in RFC3339 form and is in UTC. 17 | Populated by the system. Read-only. Null for lists. 18 | More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata 19 | name: Age 20 | type: date 21 | group: kappctrl.k14s.io 22 | names: 23 | kind: App 24 | listKind: AppList 25 | plural: apps 26 | singular: app 27 | scope: Namespaced 28 | subresources: 29 | status: {} 30 | versions: 31 | - name: v1alpha1 32 | served: true 33 | storage: true 34 | --- 35 | apiVersion: apps/v1 36 | kind: Deployment 37 | metadata: 38 | name: kapp-controller 39 | namespace: kapp-controller 40 | spec: 41 | replicas: 1 42 | revisionHistoryLimit: 0 43 | selector: 44 | matchLabels: 45 | app: kapp-controller 46 | template: 47 | metadata: 48 | labels: 49 | app: kapp-controller 50 | spec: 51 | securityContext: 52 | fsGroup: 3000 53 | runAsGroup: 2000 54 | runAsUser: 1000 55 | containers: 56 | - env: 57 | - name: KAPPCTRL_MEM_TMP_DIR 58 | value: /etc/kappctrl-mem-tmp 59 | image: index.docker.io/k14s/kapp-controller@sha256:6a6151bb0713a89cedb091531a2a8a2c98918acf6581769b2ee970c1ce615d38 60 | name: kapp-controller 61 | resources: 62 | requests: 63 | cpu: 120m 64 | memory: 100Mi 65 | volumeMounts: 66 | - mountPath: /etc/kappctrl-mem-tmp 67 | name: template-fs 68 | serviceAccount: kapp-controller-sa 69 | volumes: 70 | - emptyDir: 71 | medium: Memory 72 | name: template-fs 73 | --- 74 | apiVersion: v1 75 | kind: Namespace 76 | metadata: 77 | name: kapp-controller 78 | --- 79 | apiVersion: v1 80 | kind: ServiceAccount 81 | metadata: 82 | name: kapp-controller-sa 83 | namespace: kapp-controller 84 | --- 85 | apiVersion: rbac.authorization.k8s.io/v1 86 | kind: ClusterRole 87 | metadata: 88 | name: kapp-controller-cluster-role 89 | rules: 90 | - apiGroups: 91 | - '*' 92 | resources: 93 | - '*' 94 | verbs: 95 | - '*' 96 | --- 97 | apiVersion: rbac.authorization.k8s.io/v1 98 | kind: ClusterRoleBinding 99 | metadata: 100 | name: kapp-controller-cluster-role-binding 101 | roleRef: 102 | apiGroup: rbac.authorization.k8s.io 103 | kind: ClusterRole 104 | name: kapp-controller-cluster-role 105 | subjects: 106 | - kind: ServiceAccount 107 | name: kapp-controller-sa 108 | namespace: kapp-controller 109 | -------------------------------------------------------------------------------- /management-cluster/postcreate-assist/postcreate-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:library", "library") 3 | #@ load("@ytt:yaml", "yaml") 4 | 5 | #@overlay/match by=overlay.not_op(overlay.subset({"kind": "Secret"})),expects="0+" 6 | --- 7 | #@overlay/remove 8 | 9 | --- 10 | 11 | #@overlay/match by=overlay.subset({"kind": "Secret"}) 12 | --- 13 | stringData: 14 | #@overlay/replace via=lambda old,new: old+new 15 | calicoYaml: #@ yaml.encode(library.get("kapp-controller").eval()) -------------------------------------------------------------------------------- /management-cluster/single-node/README.md: -------------------------------------------------------------------------------- 1 | # Single Node YTT Overlay 2 | 3 | Have a single k8s node, and need your deployments to get scheduled on the master? Don't want to remove the taint so you have better control over what happens? Worry no more! With this simple `ytt` overlay to add the toleration to every deployment. 4 | 5 | ```shell 6 | kubectl get deployment -n cert-manager -o yaml | ytt -f single-node -f - | kubectl apply -f - 7 | ``` -------------------------------------------------------------------------------- /management-cluster/single-node/master-toleration-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #! kubectl get deployment -n cert-manager -o yaml | ytt -f single-node -f - | kubectl apply -f - 4 | 5 | #@overlay/match by=overlay.subset({"kind":"List"}) 6 | --- 7 | items: 8 | #@overlay/match by=overlay.subset({"kind":"Deployment"}),expects="1+" 9 | - 10 | spec: 11 | template: 12 | spec: 13 | #@overlay/match missing_ok=True 14 | tolerations: 15 | #@overlay/match by="key", missing_ok=True 16 | - 17 | effect: NoSchedule 18 | key: node-role.kubernetes.io/master -------------------------------------------------------------------------------- /management-cluster/template-assist/README.md: -------------------------------------------------------------------------------- 1 | # Kapp Controller Deployment 2 | 3 | This folder is not deployed from the regular deployment, but is instead initially populated from the management cluster for the workload cluster. 4 | 5 | ``` 6 | tkg create cluster snake --plan=dev --dry-run | ytt -f template-assist -f- > workload/manifests/snake.yaml 7 | ``` -------------------------------------------------------------------------------- /management-cluster/template-assist/template-assist-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | cluster_name: "" 4 | 5 | git: 6 | url: https://github.com/voor/cluster-api-gitops 7 | branch: origin/default 8 | secretRef: management-git-secret 9 | -------------------------------------------------------------------------------- /management-cluster/workload/manifests/.gitignore: -------------------------------------------------------------------------------- 1 | zzauthsecrets.yaml -------------------------------------------------------------------------------- /management-cluster/workload/manifests/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/voor/cluster-api-gitops/e30df7fec01199f26a210ffdd422ae597314b63d/management-cluster/workload/manifests/.gitkeep -------------------------------------------------------------------------------- /management-cluster/workload/manifests/00-cluster-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | domain: "" 5 | 6 | team: demo 7 | 8 | kubeadmConfigSpec: 9 | apiVersion: kubeadm.k8s.io/v1beta2 10 | kind: ClusterConfiguration 11 | imageRepository: registry.tkg.vmware.run 12 | kubernetesVersion: v1.19.1+vmware.3 13 | etcd: 14 | local: 15 | dataDir: /var/lib/etcd 16 | imageRepository: registry.tkg.vmware.run 17 | imageTag: v3.4.13_vmware.2 18 | dns: 19 | type: CoreDNS 20 | imageRepository: registry.tkg.vmware.run 21 | imageTag: v1.7.0_vmware.3 22 | ntp: 23 | servers: 24 | - 169.254.169.123 25 | 26 | aws: 27 | region: us-gov-east-1 28 | sshKeyName: default 29 | vpc: 30 | name: workload-clusters 31 | id: "vpc-077617e1cf2cfb9ac" 32 | subnets: 33 | - subnet-0b9f0a52213fb7d93 34 | - subnet-01f5b8ce2d88cfcce 35 | ami: 36 | id: "ami-0b67bd0ee39c2cba5" 37 | controlPlaneLoadBalancer: 38 | scheme: internet-facing 39 | 40 | oidc: 41 | client_id: "" 42 | issuer_url: "" 43 | username_claim: email 44 | groups_claim: groups 45 | 46 | etcd: 47 | aescbc: "" 48 | -------------------------------------------------------------------------------- /management-cluster/workload/manifests/README.md: -------------------------------------------------------------------------------- 1 | # Cluster Manifests 2 | 3 | Any clusters added into this folder are immediately created and reconciled from the management cluster. -------------------------------------------------------------------------------- /management-cluster/workload/manifests/clusters/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/voor/cluster-api-gitops/e30df7fec01199f26a210ffdd422ae597314b63d/management-cluster/workload/manifests/clusters/.gitkeep -------------------------------------------------------------------------------- /management-cluster/workload/manifests/overlays/cluster-auth-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | #@overlay/match by=overlay.subset({"kind":"KubeadmControlPlane"}),expects="0+" 5 | --- 6 | spec: 7 | kubeadmConfigSpec: 8 | clusterConfiguration: 9 | apiServer: 10 | #@overlay/match missing_ok=True 11 | extraArgs: 12 | #@overlay/match missing_ok=True 13 | oidc-client-id: #@ data.values.oidc.client_id 14 | #@overlay/match missing_ok=True 15 | oidc-issuer-url: #@ data.values.oidc.issuer_url 16 | #@overlay/match missing_ok=True 17 | oidc-username-claim: #@ data.values.oidc.username_claim 18 | #@overlay/match missing_ok=True 19 | oidc-groups-claim: #@ data.values.oidc.groups_claim -------------------------------------------------------------------------------- /management-cluster/workload/manifests/overlays/cluster-controlPlaneLoadBalancer-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | #@overlay/match by=overlay.subset({"kind":"AWSCluster"}),expects="0+" 5 | --- 6 | spec: 7 | #@overlay/match missing_ok=True 8 | controlPlaneLoadBalancer: 9 | #@overlay/match missing_ok=True 10 | scheme: #@ data.values.aws.controlPlaneLoadBalancer.scheme 11 | -------------------------------------------------------------------------------- /management-cluster/workload/manifests/overlays/cluster-host-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:yaml", "yaml") 3 | 4 | #@ def host_config(): 5 | - hostnamectl set-hostname {{ v1.local_hostname }}.{{ v1.region }}.compute.internal 6 | #@ end 7 | 8 | #@overlay/match by=overlay.subset({"kind":"KubeadmControlPlane"}),expects="0+" 9 | --- 10 | spec: 11 | kubeadmConfigSpec: 12 | initConfiguration: 13 | nodeRegistration: 14 | #@overlay/match missing_ok=True 15 | name: '{{ v1.local_hostname }}.{{ v1.region }}.compute.internal' 16 | clusterConfiguration: 17 | controllerManager: 18 | extraArgs: 19 | #@overlay/match missing_ok=True 20 | configure-cloud-routes: "false" 21 | joinConfiguration: 22 | nodeRegistration: 23 | #@overlay/match missing_ok=True 24 | name: '{{ v1.local_hostname }}.{{ v1.region }}.compute.internal' 25 | #@overlay/match missing_ok=True 26 | preKubeadmCommands: #@ host_config() 27 | 28 | #@overlay/match by=overlay.subset({"kind":"KubeadmConfigTemplate"}),expects="0+" 29 | --- 30 | spec: 31 | template: 32 | spec: 33 | joinConfiguration: 34 | nodeRegistration: 35 | #@overlay/match missing_ok=True 36 | name: '{{ v1.local_hostname }}.{{ v1.region }}.compute.internal' 37 | #@overlay/match missing_ok=True 38 | preKubeadmCommands: #@ host_config() -------------------------------------------------------------------------------- /management-cluster/workload/manifests/overlays/cluster-ntp-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:yaml", "yaml") 3 | #@ load("@ytt:data", "data") 4 | 5 | #@ def ntp_config(): 6 | enabled: true 7 | servers: #@ data.values.kubeadmConfigSpec.ntp.servers 8 | #@ end 9 | 10 | #@ def is_kubeadmcontrolplane(): 11 | kind: KubeadmControlPlane 12 | #@ end 13 | 14 | #@ def has_ntp_label(): 15 | metadata: 16 | labels: 17 | ntp: disabled 18 | #@ end 19 | 20 | #@ overlay_kubeadmcontrolplane = overlay.subset(is_kubeadmcontrolplane()) 21 | #@ not_has_ntp_label = overlay.not_op(overlay.subset(has_ntp_label())) 22 | #@overlay/match by=overlay.and_op(overlay_kubeadmcontrolplane, not_has_ntp_label),expects="0+" 23 | --- 24 | spec: 25 | kubeadmConfigSpec: 26 | #@overlay/match missing_ok=True 27 | ntp: #@ ntp_config() 28 | 29 | #@overlay/match by=overlay.subset({"kind":"KubeadmConfigTemplate"}),expects="0+" 30 | --- 31 | spec: 32 | template: 33 | spec: 34 | #@overlay/match missing_ok=True 35 | ntp: #@ ntp_config() 36 | -------------------------------------------------------------------------------- /management-cluster/workload/manifests/overlays/cluster-upgrade-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | #@overlay/match by=overlay.subset({"kind":"AWSMachineTemplate"}),expects="0+" 5 | --- 6 | metadata: 7 | #@overlay/match missing_ok=True 8 | annotations: 9 | kapp.k14s.io/delete-strategy: "orphan" 10 | kapp.k14s.io/versioned: "" 11 | kapp.k14s.io/num-versions: "10" 12 | spec: 13 | template: 14 | spec: 15 | #@overlay/match missing_ok=True 16 | ami: 17 | #@overlay/match missing_ok=True 18 | id: #@ data.values.aws.ami.id 19 | 20 | #@overlay/match by=overlay.subset({"kind":"AWSCluster"}),expects="0+" 21 | --- 22 | metadata: 23 | #@overlay/match missing_ok=True 24 | annotations: 25 | #@overlay/match missing_ok=True 26 | kapp.k14s.io/delete-strategy: "orphan" 27 | 28 | #@overlay/match by=overlay.subset({"kind":"KubeadmControlPlane"}),expects="0+" 29 | --- 30 | metadata: 31 | #@overlay/match missing_ok=True 32 | annotations: 33 | kapp.k14s.io/delete-strategy: "orphan" 34 | spec: 35 | kubeadmConfigSpec: 36 | clusterConfiguration: 37 | dns: 38 | imageRepository: #@ data.values.kubeadmConfigSpec.dns.imageRepository 39 | imageTag: #@ data.values.kubeadmConfigSpec.dns.imageTag 40 | type: #@ data.values.kubeadmConfigSpec.dns.type 41 | etcd: 42 | local: 43 | imageRepository: #@ data.values.kubeadmConfigSpec.etcd.local.imageRepository 44 | imageTag: #@ data.values.kubeadmConfigSpec.etcd.local.imageTag 45 | imageRepository: #@ data.values.kubeadmConfigSpec.imageRepository 46 | #@overlay/match missing_ok=True 47 | version: #@ data.values.kubeadmConfigSpec.kubernetesVersion 48 | 49 | #@overlay/match by=overlay.subset({"kind":"MachineDeployment"}),expects="0+" 50 | --- 51 | spec: 52 | template: 53 | spec: 54 | #@overlay/match missing_ok=True 55 | version: #@ data.values.kubeadmConfigSpec.kubernetesVersion 56 | 57 | #@overlay/match by=overlay.subset({"kind":"KubeadmConfigTemplate"}),expects="0+" 58 | --- 59 | metadata: 60 | #@overlay/match missing_ok=True 61 | annotations: 62 | kapp.k14s.io/delete-strategy: "orphan" 63 | kapp.k14s.io/versioned: "" 64 | kapp.k14s.io/num-versions: "10" 65 | -------------------------------------------------------------------------------- /management-cluster/workload/manifests/overlays/cluster-vpc-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | #@overlay/match by=overlay.not_op(overlay.subset({"apiVersion": "kapp.k14s.io/v1alpha1", "kind":"Config"})),expects="0+" 5 | --- 6 | #@overlay/match missing_ok=True 7 | metadata: 8 | #@overlay/match missing_ok=True 9 | namespace: #@ data.values.aws.vpc.name 10 | 11 | #@overlay/match by=overlay.subset({"kind":"Cluster"}),expects="0+" 12 | --- 13 | spec: 14 | controlPlaneRef: 15 | #@overlay/match missing_ok=True 16 | namespace: #@ data.values.aws.vpc.name 17 | infrastructureRef: 18 | #@overlay/match missing_ok=True 19 | namespace: #@ data.values.aws.vpc.name 20 | 21 | #@overlay/match by=overlay.subset({"kind":"AWSCluster"}),expects="0+" 22 | --- 23 | spec: 24 | #@overlay/match missing_ok=True 25 | region: #@ data.values.aws.region 26 | #@overlay/match missing_ok=True 27 | sshKeyName: #@ data.values.aws.sshKeyName 28 | #@overlay/match missing_ok=True 29 | networkSpec: 30 | #@overlay/match missing_ok=True 31 | subnets: 32 | #@ for subnet in data.values.aws.subnets: 33 | #@overlay/match by="id", missing_ok=True 34 | #@overlay/match-child-defaults missing_ok=True 35 | - id: #@ subnet 36 | #@ end 37 | vpc: 38 | #@overlay/match missing_ok=True 39 | #@overlay/remove 40 | cidrBlock: 41 | #@overlay/match missing_ok=True 42 | id: #@ data.values.aws.vpc.id 43 | #@overlay/match missing_ok=True 44 | bastion: 45 | #@overlay/match missing_ok=True 46 | enabled: false 47 | #@overlay/match missing_ok=True 48 | additionalTags: 49 | #@overlay/match missing_ok=True 50 | Environment: #@ data.values.aws.vpc.name 51 | #@overlay/match missing_ok=True 52 | Responsibility: #@ data.values.team 53 | 54 | #@overlay/match by=overlay.subset({"kind":"AWSMachineTemplate"}),expects="0+" 55 | --- 56 | spec: 57 | template: 58 | spec: 59 | #@overlay/match missing_ok=True 60 | sshKeyName: #@ data.values.aws.sshKeyName 61 | 62 | 63 | #@overlay/match by=overlay.subset({"kind":"KubeadmControlPlane"}),expects="0+" 64 | --- 65 | spec: 66 | kubeadmConfigSpec: 67 | clusterConfiguration: 68 | controllerManager: 69 | #@overlay/match missing_ok=True 70 | extraArgs: 71 | #@overlay/match missing_ok=True 72 | configure-cloud-routes: "false" 73 | -------------------------------------------------------------------------------- /management-cluster/workload/manifests/secret.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: i-am-a-silly-secret 7 | namespace: workload-clusters 8 | type: Opaque 9 | stringData: 10 | README: | 11 | This file exists because kapp-controller will typically fail on no resources. 12 | So this single resource exists to make sure kapp-controller has something to work with. -------------------------------------------------------------------------------- /registry/registry/manifests/.gitignore: -------------------------------------------------------------------------------- 1 | out/ 2 | -------------------------------------------------------------------------------- /registry/registry/manifests/registry-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | aws: 4 | region: "us-east-1" 5 | registry: 6 | image: "docker.io/bitnami/harbor-registry:2-debian-10" 7 | namespace: registry 8 | name: registry 9 | replicas: 1 10 | s3: 11 | accesskey: "" 12 | secretkey: "" 13 | bucket: "registry-05d29c2606" 14 | prefix: "images" 15 | secrets: 16 | crt: | 17 | -----BEGIN CERTIFICATE----- 18 | -----END CERTIFICATE----- 19 | key: | 20 | -----BEGIN RSA PRIVATE KEY----- 21 | -----END RSA PRIVATE KEY----- -------------------------------------------------------------------------------- /supervisor-cluster/local.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: run.tanzu.vmware.com/v1alpha1 #TKG API endpoint 2 | kind: TanzuKubernetesCluster #required parameter 3 | metadata: 4 | name: local #cluster name, user defined 5 | namespace: main #supervisor namespace 6 | spec: 7 | distribution: 8 | version: v1.16 #resolved kubernetes version 9 | settings: 10 | network: 11 | pods: 12 | cidrBlocks: 13 | - "100.96.0.0/11" # CIDR block used by Calico 14 | storage: 15 | defaultClass: "tkc-storage-policy" 16 | topology: 17 | controlPlane: 18 | count: 1 #number of control plane nodes 19 | class: best-effort-small #vmclass for control plane nodes 20 | storageClass: tkc-storage-policy #storageclass for control plane 21 | workers: 22 | count: 2 #number of worker nodes 23 | class: best-effort-medium #vmclass for worker nodes 24 | storageClass: tkc-storage-policy #storageclass for worker nodes -------------------------------------------------------------------------------- /workload-vsphere/kube-vip/controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: starboard 5 | namespace: kube-system 6 | --- 7 | kind: Role 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | metadata: 10 | name: starboard-role 11 | namespace: kube-system 12 | rules: 13 | - apiGroups: [""] 14 | resources: ["configmaps"] 15 | verbs: ["watch","get"] 16 | --- 17 | kind: RoleBinding 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | metadata: 20 | name: starboard-role-bind 21 | namespace: kube-system 22 | subjects: 23 | - kind: ServiceAccount 24 | name: starboard 25 | apiGroup: "" 26 | roleRef: 27 | kind: Role 28 | name: starboard-role 29 | apiGroup: "" 30 | --- 31 | apiVersion: apps/v1 32 | kind: DaemonSet 33 | metadata: 34 | name: starboard-ds 35 | namespace: kube-system 36 | labels: 37 | spec: 38 | selector: 39 | matchLabels: 40 | name: starboard-ds 41 | template: 42 | metadata: 43 | labels: 44 | name: starboard-ds 45 | spec: 46 | hostNetwork: true 47 | serviceAccountName: starboard 48 | containers: 49 | - name: starboard-ds 50 | image: plndr/starboard:0.1.1 51 | imagePullPolicy: Always 52 | securityContext: 53 | capabilities: 54 | add: 55 | - NET_ADMIN 56 | - NET_RAW 57 | - SYS_ADMIN 58 | --- 59 | apiVersion: v1 60 | kind: ServiceAccount 61 | metadata: 62 | name: plunder-cloud-controller 63 | namespace: kube-system 64 | --- 65 | apiVersion: rbac.authorization.k8s.io/v1 66 | kind: ClusterRole 67 | metadata: 68 | annotations: 69 | rbac.authorization.kubernetes.io/autoupdate: "true" 70 | name: system:plunder-cloud-controller-role 71 | rules: 72 | - apiGroups: [""] 73 | resources: ["configmaps", "endpoints","events","services/status"] 74 | verbs: ["*"] 75 | - apiGroups: [""] 76 | resources: ["nodes", "services"] 77 | verbs: ["list","get","watch"] 78 | --- 79 | kind: ClusterRoleBinding 80 | apiVersion: rbac.authorization.k8s.io/v1 81 | metadata: 82 | name: system:plunder-cloud-controller-binding 83 | roleRef: 84 | apiGroup: rbac.authorization.k8s.io 85 | kind: ClusterRole 86 | name: system:plunder-cloud-controller-role 87 | subjects: 88 | - kind: ServiceAccount 89 | name: plunder-cloud-controller 90 | namespace: kube-system 91 | --- 92 | apiVersion: v1 93 | kind: Pod 94 | metadata: 95 | creationTimestamp: null 96 | name: plndr-cloud-provider 97 | namespace: kube-system 98 | spec: 99 | containers: 100 | - command: 101 | - /plndr-cloud-provider 102 | image: plndr/plndr-cloud-provider:0.1.2 103 | name: plndr-cloud-provider 104 | imagePullPolicy: Always 105 | resources: {} 106 | serviceAccountName: plunder-cloud-controller -------------------------------------------------------------------------------- /workload-vsphere/kube-vip/kube-vip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: vip 5 | namespace: kube-vip-cluster 6 | --- 7 | kind: Role 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | metadata: 10 | name: vip-role 11 | namespace: kube-vip-cluster 12 | rules: 13 | - apiGroups: ["coordination.k8s.io"] 14 | resources: ["leases"] 15 | verbs: ["get", "create", "update", "list", "put"] 16 | - apiGroups: [""] 17 | resources: ["configmaps", "endpoints"] 18 | verbs: ["watch", "get"] 19 | --- 20 | kind: RoleBinding 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | metadata: 23 | name: vip-role-bind 24 | namespace: kube-vip-cluster 25 | subjects: 26 | - kind: ServiceAccount 27 | name: vip 28 | namespace: kube-vip-cluster 29 | apiGroup: "" 30 | roleRef: 31 | kind: Role 32 | name: vip-role 33 | namespace: kube-vip-cluster 34 | apiGroup: "" 35 | --- 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | creationTimestamp: null 40 | labels: 41 | app: kube-vip-cluster 42 | name: kube-vip-cluster 43 | namespace: kube-vip-cluster 44 | spec: 45 | replicas: 3 46 | selector: 47 | matchLabels: 48 | app: kube-vip-cluster 49 | strategy: {} 50 | template: 51 | metadata: 52 | creationTimestamp: null 53 | labels: 54 | app: kube-vip-cluster 55 | spec: 56 | affinity: 57 | podAntiAffinity: 58 | requiredDuringSchedulingIgnoredDuringExecution: 59 | - labelSelector: 60 | matchExpressions: 61 | - key: "app" 62 | operator: In 63 | values: 64 | - kube-vip-cluster 65 | topologyKey: "kubernetes.io/hostname" 66 | containers: 67 | - image: plndr/kube-vip:0.1.3 68 | imagePullPolicy: Always 69 | name: kube-vip 70 | command: 71 | - /kube-vip 72 | - service 73 | env: 74 | - name: vip_interface 75 | value: "ens192" 76 | - name: vip_configmap 77 | value: "plndr" 78 | - name: vip_arp 79 | value: "true" 80 | - name: vip_loglevel 81 | value: "5" 82 | resources: {} 83 | securityContext: 84 | capabilities: 85 | add: 86 | - NET_ADMIN 87 | hostNetwork: true 88 | serviceAccountName: vip 89 | status: {} -------------------------------------------------------------------------------- /workload-vsphere/kube-vip/plndr-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | cidr-default: 192.168.10.0/24 4 | kind: ConfigMap 5 | metadata: 6 | name: plndr 7 | namespace: kube-system 8 | -------------------------------------------------------------------------------- /workload-vsphere/storage-class/ssd01.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: generic 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | provisioner: csi.vsphere.vmware.com 8 | parameters: 9 | storagepolicyname: "k8s-storage-policy" -------------------------------------------------------------------------------- /workload/.gitignore: -------------------------------------------------------------------------------- 1 | master.key 2 | configuration-values/ 3 | tanzu-application-service/ 4 | images/ 5 | 6 | -------------------------------------------------------------------------------- /workload/README.md: -------------------------------------------------------------------------------- 1 | # Workload Cluster 2 | 3 | Once you have your cluster up, go ahead and grab the KUBECONFIG and add it to your primary config: 4 | 5 | ``` 6 | kubectl get secrets workload-kubeconfig -o jsonpath={.data.value} | base64 -d > ~/.kube/config.workload \ 7 | && KUBECONFIG=~/.kube/config.workload:~/.kube/config kubectl config view --flatten > ~/.kube/config.new \ 8 | && mv ~/.kube/config.new ~/.kube/config 9 | ``` 10 | Or with TKG: 11 | ``` 12 | tkg get credentials $CLUSTER_NAME 13 | ``` 14 | 15 | Now add some stuff! 16 | 17 | ## Prerequisites 18 | 19 | This installation will assume you have two things setup out of band: 20 | 21 | * This guide assumes AWS Govcloud, but should work just as easily in AWS Commercial, the reason for Govcloud is just to show cross-AWS region compliance (something often neglected). 22 | * You own a domain name, in this example we're using `tanzu.world` but it can be anything you want (seriously domains cost about $3 or less nowadays). 23 | * You are using [Cloudflare](https://www.cloudflare.com/) to manage the DNS. Even if you use someone else to register the domain name, you can change the nameservers to point to Cloudflare for free. Cloudflare has the easiest to consume API, even when compared to AWS Route 53 (also AWS Route 53 isn't available in govcloud regions for public domains). 24 | * You have an API **TOKEN** that has Zone.Zone `Read` and Zone.DNS `Edit` privileges, due to a bug in how Let's Encrypt works you'll also need Account.Account Settings `Read`. 25 | 26 | ## Initial Installs 27 | 28 | These are generic installs that don't have specific configuration required before they're installed. 29 | 30 | ``` 31 | # Calico is the CNI for the workload cluster 32 | # Skip this if using TKG as the CNI is already done for you 33 | kapp -y deploy -a calico -f calico/manifests 34 | # If you are recovering from another cluster, apply your master.key here: 35 | # kubectl apply -f master.key 36 | # Sealed Secrets is how you securely add secrets into the cluster that can still be stored in a git repository 37 | kapp -y deploy -a sealed-secrets -f sealed-secrets/manifests -c 38 | ``` 39 | 40 | ## Adding Secrets 41 | 42 | Next we need to "seal" the secrets so they're safe to store in git. This is specific configuration for our clusters. 43 | 44 | ``` 45 | # This is a PUBLIC cert so it's safe to commit! We grab the public cert so we don't need connectivity to the cluster for generating secrets. 46 | kubeseal --fetch-cert > workload-secrets.pem 47 | # Each of the files in the secrets/ folder needs to be modified to contain your configuration secrets. 48 | ./secrets/seal.sh # By default will take everything in the secrets folder and seal it. 49 | ``` 50 | 51 | ## Install Everything Else 52 | 53 | ``` 54 | 55 | ``` 56 | 57 | ### Backup Secrets 58 | 59 | ``` 60 | # DO NOT COMMIT THIS IS A PRIVATE KEY 61 | kubectl get secret -n kube-system -l sealedsecrets.bitnami.com/sealed-secrets-key -o yaml >master.key 62 | ``` 63 | 64 | 65 | ## Post-Secrets Installs 66 | 67 | Now that the secrets are properly populated, we can install tooling that was dependent on them: 68 | 69 | ``` 70 | kapp -y deploy -a external-dns -f external-dns 71 | ``` 72 | 73 | ## Adding Unique Config 74 | 75 | These are the files you'll need to edit for your cluster's variables. Edit the values in these files accordingly. 76 | 77 | * `workload/knative/overlays/dev/domain.yaml` 78 | * `workload/unique/letsencrypt-issuer.yaml` 79 | 80 | Install everything after modifying those files: 81 | 82 | ``` 83 | kapp -y deploy -a letsencrypt-issuer -f unique/letsencrypt-issuer.yaml 84 | kustomize build knative/overlays/dev | kapp -y deploy -a knative-serving -f - 85 | 86 | ``` 87 | 88 | ## Install Apps 89 | 90 | Now you just go ahead and build your apps. 91 | 92 | ``` 93 | kapp deploy -a sample-app -f apps/sample.yaml 94 | ``` -------------------------------------------------------------------------------- /workload/apps/manifests/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: apps 6 | -------------------------------------------------------------------------------- /workload/build-service/.gitignore: -------------------------------------------------------------------------------- 1 | artifacts/ 2 | *.tgz 3 | bundle.json 4 | ca.crt 5 | credentials.yaml 6 | kubeconfig.yml 7 | relocated.json 8 | gcr.io.creds.json -------------------------------------------------------------------------------- /workload/build-service/install.sh: -------------------------------------------------------------------------------- 1 | duffle install tanzu-build-service \ 2 | -c credentials.yaml \ 3 | --set kubernetes_env=another \ 4 | --set docker_registry=gcr.io \ 5 | --set docker_repository=gcr.io/pa-rvanvoorhees/build-service \ 6 | --set registry_username=_json_key \ 7 | --set registry_password="$(cat gcr.io.creds.json)" \ 8 | --set custom_builder_image="gcr.io/pa-rvanvoorhees/build-service/default-builder" \ 9 | -f build-service-0.1.0.tgz \ 10 | -m relocated.json -------------------------------------------------------------------------------- /workload/build-service/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: tanzu-build-service 5 | namespace: default 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | kind: ClusterRoleBinding 9 | metadata: 10 | name: tanzu-build-service 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: cluster-admin 15 | subjects: 16 | - kind: ServiceAccount 17 | name: tanzu-build-service 18 | namespace: default -------------------------------------------------------------------------------- /workload/calico/manifests/calico.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:library", "library") 2 | #@ load("@ytt:data", "data") 3 | #@ load("@ytt:template", "template") 4 | #@ load("@ytt:overlay", "overlay") 5 | 6 | #@ load("calico/calico.lib.yaml", "calicoyaml") 7 | #@ load("calico/calico_overlay.lib.yaml", "calicooverlay") 8 | 9 | --- #@ template.replace(overlay.apply(calicoyaml(), calicooverlay())) -------------------------------------------------------------------------------- /workload/calico/manifests/calico/calico_overlay.lib.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | #! @ load("/lib/helpers.star", "get_default_bom_data", "tkg_image_repo") 4 | #@ load("@ytt:yaml", "yaml") 5 | 6 | #@ def tkg_image_repo(): 7 | #@ return data.values.TKG_CUSTOM_IMAGE_REPOSITORY 8 | #@ end 9 | 10 | #@ def startswith(index, left, right): 11 | #@ return left.startswith(right) 12 | #@ end 13 | 14 | #@ bomData = data.values.bomData 15 | 16 | #@ def calicooverlay(): 17 | 18 | #@overlay/match by=overlay.subset({"kind":"DaemonSet"}) 19 | --- 20 | kind: DaemonSet 21 | spec: 22 | template: 23 | spec: 24 | initContainers: 25 | #@overlay/match by=overlay.subset({"name":"upgrade-ipam"}) 26 | - name: upgrade-ipam 27 | image: #@ "{}/{}:{}".format(tkg_image_repo(), bomData.images.calicoCniImage.imagePath, bomData.images.calicoCniImage.tag) 28 | #@overlay/match by=overlay.subset({"name":"install-cni"}) 29 | - name: install-cni 30 | image: #@ "{}/{}:{}".format(tkg_image_repo(), bomData.images.calicoCniImage.imagePath, bomData.images.calicoCniImage.tag) 31 | #@overlay/match by=overlay.subset({"name":"flexvol-driver"}) 32 | - name: flexvol-driver 33 | image: #@ "{}/{}:{}".format(tkg_image_repo(), bomData.images.calicoPodDaemonImage.imagePath, bomData.images.calicoPodDaemonImage.tag) 34 | containers: 35 | #@overlay/match by=overlay.subset({"name":"calico-node"}) 36 | - name: calico-node 37 | image: #@ "{}/{}:{}".format(tkg_image_repo(), bomData.images.calicoNodeImage.imagePath, bomData.images.calicoNodeImage.tag) 38 | env: 39 | #@overlay/match by=overlay.subset({"name":"CALICO_IPV4POOL_CIDR"}) 40 | - value: #@ data.values.CLUSTER_CIDR 41 | #@ if data.values.PROVIDER_TYPE == "azure": 42 | #@overlay/match by=overlay.subset({"name":"CALICO_IPV4POOL_IPIP"}) 43 | #@overlay/merge 44 | - name: CALICO_IPV4POOL_VXLAN 45 | value: Always 46 | #@overlay/match by=overlay.subset({"name":"FELIX_IPINIPMTU"}) 47 | #@overlay/remove 48 | - name: FELIX_IPINIPMTU 49 | livenessProbe: 50 | exec: 51 | command: 52 | #@overlay/remove 53 | #@overlay/match by=startswith 54 | - -bird-live 55 | readinessProbe: 56 | exec: 57 | command: 58 | #@overlay/remove 59 | #@overlay/match by=startswith 60 | - -bird-ready 61 | #@ end 62 | 63 | #@overlay/match by=overlay.subset({"kind":"Deployment"}) 64 | --- 65 | kind: Deployment 66 | spec: 67 | template: 68 | spec: 69 | containers: 70 | #@overlay/match by=overlay.subset({"name":"calico-kube-controllers"}) 71 | - image: #@ "{}/{}:{}".format(tkg_image_repo(), bomData.images.calicoKubecontrollerImage.imagePath, bomData.images.calicoKubecontrollerImage.tag) 72 | 73 | #@ if data.values.PROVIDER_TYPE == "azure": 74 | #@overlay/match by=overlay.subset({"kind":"ConfigMap"}) 75 | --- 76 | kind: ConfigMap 77 | data: 78 | calico_backend: "vxlan" 79 | #@ end 80 | 81 | #@ end 82 | -------------------------------------------------------------------------------- /workload/calico/manifests/values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | TKG_CUSTOM_IMAGE_REPOSITORY: registry.tkg.vmware.run 5 | 6 | CLUSTER_CIDR: 100.96.0.0/11 7 | 8 | PROVIDER_TYPE: aws 9 | 10 | bomData: 11 | images: 12 | calicoCniImage: 13 | imagePath: calico-all/cni-plugin 14 | tag: v3.11.3_vmware.1 15 | calicoKubecontrollerImage: 16 | imagePath: calico-all/kube-controllers 17 | tag: v3.11.3_vmware.1 18 | calicoNodeImage: 19 | imagePath: calico-all/node 20 | tag: v3.11.3_vmware.1 21 | calicoPodDaemonImage: 22 | imagePath: calico-all/pod2daemon 23 | tag: v3.11.3_vmware.1 -------------------------------------------------------------------------------- /workload/cert-manager/helm/cert-manager-values.yaml: -------------------------------------------------------------------------------- 1 | fullnameOverride: cert-manager 2 | 3 | installCRDs: true 4 | 5 | resources: 6 | requests: 7 | cpu: 10m 8 | memory: 32Mi 9 | 10 | ingressShim: 11 | resources: 12 | requests: 13 | cpu: 10m 14 | memory: 32Mi 15 | 16 | webhook: 17 | deploymentAnnotations: 18 | kapp.k14s.io/change-group: "change-groups.cert-manager.io/webhook" 19 | kapp.k14s.io/change-rule: "upsert before upserting change-groups.kapp.k14s.io/crds" -------------------------------------------------------------------------------- /workload/cert-manager/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add jetstack https://charts.jetstack.io 5 | helm repo update 6 | 7 | helm template cert-manager jetstack/cert-manager --version v0.16.1 -f cert-manager/helm/cert-manager-values.yaml --namespace cert-manager --include-crds > cert-manager/manifests/cert-manager.yaml -------------------------------------------------------------------------------- /workload/cert-manager/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | -------------------------------------------------------------------------------- /workload/cert-manager/manifests/cert-manager-additional.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | --- 4 | apiVersion: v1 5 | kind: Namespace 6 | metadata: 7 | name: cert-manager 8 | annotations: 9 | kapp.k14s.io/change-group: 'cert-manager.io/cert-manager-namespace' 10 | kapp.k14s.io/change-rule.1: "delete after deleting cert-manager.io/crds" 11 | kapp.k14s.io/disable-default-change-group-and-rules: 'true' 12 | 13 | 14 | #@overlay/match by=overlay.subset({"metadata":{"labels": {"app.kubernetes.io/name": "webhook"}}}),expects="1+" 15 | --- 16 | metadata: 17 | #@overlay/match missing_ok=True 18 | annotations: 19 | #@overlay/match missing_ok=True 20 | kapp.k14s.io/change-group: 'cert-manager.io/cert-manager-webhook' 21 | #@overlay/match missing_ok=True 22 | kapp.k14s.io/disable-default-change-group-and-rules: 'true' 23 | #@overlay/match missing_ok=True 24 | kapp.k14s.io/change-rule.1: "delete after deleting change-groups.kapp.k14s.io/crds" 25 | #@overlay/match missing_ok=True 26 | kapp.k14s.io/change-rule.2: "upsert after upserting change-groups.kapp.k14s.io/namespaces" 27 | 28 | #@overlay/match by=overlay.subset({"kind":"CustomResourceDefinition"}),expects="1+" 29 | --- 30 | metadata: 31 | #@overlay/match missing_ok=True 32 | annotations: 33 | #@overlay/match missing_ok=True 34 | kapp.k14s.io/change-group: 'cert-manager.io/crds' 35 | #@overlay/match missing_ok=True 36 | kapp.k14s.io/disable-default-change-group-and-rules: 'true' 37 | #@overlay/match missing_ok=True 38 | kapp.k14s.io/change-rule.1: "delete before deleting change-groups.kapp.k14s.io/namespaces" 39 | #@overlay/match missing_ok=True 40 | kapp.k14s.io/change-rule.2: "upsert after upserting change-groups.kapp.k14s.io/namespaces" -------------------------------------------------------------------------------- /workload/cert-manager/manifests/kapp-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kapp.k14s.io/v1alpha1 2 | kind: Config 3 | 4 | rebaseRules: 5 | 6 | # crd conversions 7 | - path: [spec, conversion, conversionReviewVersions] 8 | type: copy 9 | sources: [new, existing] 10 | resourceMatchers: 11 | - apiVersionKindMatcher: {apiVersion: apiextensions.k8s.io/v1beta1, kind: CustomResourceDefinition} 12 | 13 | - path: [spec, conversion, webhookClientConfig, service, port] 14 | type: copy 15 | sources: [new, existing] 16 | resourceMatchers: 17 | - apiVersionKindMatcher: {apiVersion: apiextensions.k8s.io/v1beta1, kind: CustomResourceDefinition} 18 | 19 | - path: [spec, conversion, webhookClientConfig, caBundle] 20 | type: copy 21 | sources: [new, existing] 22 | resourceMatchers: 23 | - apiVersionKindMatcher: {apiVersion: apiextensions.k8s.io/v1beta1, kind: CustomResourceDefinition} 24 | 25 | - path: [spec, version] 26 | type: copy 27 | sources: [new, existing] 28 | resourceMatchers: 29 | - apiVersionKindMatcher: {apiVersion: apiextensions.k8s.io/v1beta1, kind: CustomResourceDefinition} 30 | -------------------------------------------------------------------------------- /workload/cert-manager/manifests/kbld-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kbld.k14s.io/v1alpha1 2 | kind: Config 3 | minimumRequiredVersion: 0.24.0 4 | overrides: 5 | - image: quay.io/jetstack/cert-manager-cainjector:v0.16.1 6 | newImage: registry.tkg.vmware.run/cert-manager/cert-manager-cainjector:v0.16.1_vmware.1 7 | preresolved: true 8 | - image: quay.io/jetstack/cert-manager-controller:v0.16.1 9 | newImage: registry.tkg.vmware.run/cert-manager/cert-manager-controller:v0.16.1_vmware.1 10 | preresolved: true 11 | - image: quay.io/jetstack/cert-manager-webhook:v0.16.1 12 | newImage: registry.tkg.vmware.run/cert-manager/cert-manager-webhook:v0.16.1_vmware.1 13 | preresolved: true 14 | -------------------------------------------------------------------------------- /workload/contour/manifests/contour-certs.yaml.disable: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1alpha2 2 | kind: Issuer 3 | metadata: 4 | name: contour-selfsigned-issuer 5 | namespace: projectcontour 6 | spec: 7 | selfSigned: {} 8 | 9 | --- 10 | apiVersion: cert-manager.io/v1alpha2 11 | kind: Certificate 12 | metadata: 13 | name: contour-serving-cert 14 | namespace: projectcontour 15 | spec: 16 | secretName: contour-ca-tls 17 | commonName: "contour-ca" 18 | isCA: true 19 | issuerRef: 20 | name: contour-selfsigned-issuer 21 | usages: 22 | - "any" 23 | 24 | --- 25 | apiVersion: cert-manager.io/v1alpha2 26 | kind: Issuer 27 | metadata: 28 | name: contour-ca-issuer 29 | namespace: projectcontour 30 | spec: 31 | ca: 32 | secretName: contour-ca-tls 33 | 34 | --- 35 | apiVersion: cert-manager.io/v1alpha2 36 | kind: Certificate 37 | metadata: 38 | name: contourcert 39 | namespace: projectcontour 40 | spec: 41 | secretName: contourcert 42 | commonName: contour 43 | issuerRef: 44 | name: contour-ca-issuer 45 | usages: 46 | - "any" 47 | dnsNames: 48 | - "contour" 49 | - contour.projectcontour 50 | - contour.projectcontour.svc 51 | - contour.projectcontour.svc.cluster.local 52 | 53 | --- 54 | apiVersion: cert-manager.io/v1alpha2 55 | kind: Certificate 56 | metadata: 57 | name: envoycert 58 | namespace: projectcontour 59 | spec: 60 | secretName: envoycert 61 | commonName: envoy 62 | issuerRef: 63 | name: contour-ca-issuer 64 | usages: 65 | - "any" 66 | dnsNames: 67 | - "envoy" 68 | - envoy.projectcontour 69 | - envoy.projectcontour.svc 70 | - envoy.projectcontour.svc.cluster.local -------------------------------------------------------------------------------- /workload/contour/manifests/contour-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | #@overlay/match by=overlay.subset({"kind":"Service", "metadata":{"name":"envoy"}}) 5 | --- 6 | metadata: 7 | #@overlay/match missing_ok=True 8 | annotations: 9 | #@overlay/match missing_ok=True 10 | service.beta.kubernetes.io/aws-load-balancer-type: nlb 11 | 12 | #@overlay/match by=overlay.subset({"kind":"CustomResourceDefinition"}),expects="0+" 13 | --- 14 | #@overlay/remove 15 | status: 16 | 17 | #! #@overlay/match by=overlay.subset({"kind":"Job"}),expects="0+" 18 | #! --- 19 | #! #@overlay/remove 20 | 21 | #@overlay/match by=overlay.subset({"kind":"ConfigMap", "metadata":{"name":"contour"}}) 22 | --- 23 | metadata: 24 | #@overlay/match missing_ok=True 25 | annotations: 26 | kapp.k14s.io/versioned: "" 27 | kapp.k14s.io/num-versions: "4" 28 | -------------------------------------------------------------------------------- /workload/contour/manifests/contour-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | domain: "" -------------------------------------------------------------------------------- /workload/contour/manifests/kapp-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kapp.k14s.io/v1alpha1 3 | kind: Config 4 | 5 | rebaseRules: 6 | - path: [spec, healthCheckNodePort] 7 | type: copy 8 | sources: [new, existing] 9 | resourceMatchers: 10 | - apiVersionKindMatcher: 11 | apiVersion: v1 12 | kind: Service 13 | -------------------------------------------------------------------------------- /workload/contour/manifests/tls-certificate-delegation.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | --- 5 | apiVersion: projectcontour.io/v1 6 | kind: TLSCertificateDelegation 7 | metadata: 8 | name: wildcards 9 | namespace: apps 10 | spec: 11 | delegations: 12 | - secretName: #@ "apps.{}".format(data.values.domain) 13 | targetNamespaces: 14 | - "*" 15 | --- 16 | apiVersion: projectcontour.io/v1 17 | kind: TLSCertificateDelegation 18 | metadata: 19 | name: wildcards 20 | namespace: default 21 | spec: 22 | delegations: 23 | - secretName: #@ "default.{}".format(data.values.domain) 24 | targetNamespaces: 25 | - "*" 26 | -------------------------------------------------------------------------------- /workload/contour/upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | curl -SsL https://projectcontour.io/quickstart/contour.yaml -o contour/manifests/contour-quickstart.yaml -------------------------------------------------------------------------------- /workload/deploy/0-sealed-secrets.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: sealed-secrets 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/sealed-secrets/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | deploy: 22 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/0-storageclass.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: storageclass 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/storageclass/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | deploy: 22 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/apps-permissions.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: apps 8 | namespace: kapp-controller 9 | annotations: 10 | kapp.k14s.io/change-group: "gitops.tanzu.vmware.com/app-permissions" 11 | spec: 12 | serviceAccountName: kapp-controller-clusteradmin 13 | fetch: 14 | - git: 15 | url: #@ data.values.git_url 16 | ref: #@ data.values.git_branch 17 | subPath: workload/apps/manifests 18 | secretRef: 19 | name: #@ data.values.git.secretRef 20 | template: 21 | - ytt: 22 | ignoreUnknownComments: true 23 | deploy: 24 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/cert-manager.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: cert-manager 8 | namespace: kapp-controller 9 | annotations: 10 | kapp.k14s.io/change-group: "gitops.tanzu.vmware.com/cert-manager" 11 | spec: 12 | serviceAccountName: kapp-controller-clusteradmin 13 | fetch: 14 | - git: 15 | url: #@ data.values.git_url 16 | ref: #@ data.values.git_branch 17 | subPath: workload/cert-manager/manifests 18 | secretRef: 19 | name: #@ data.values.git.secretRef 20 | template: 21 | - ytt: 22 | ignoreUnknownComments: true 23 | - kbld: {} 24 | deploy: 25 | - kapp: 26 | delete: 27 | rawOptions: 28 | - "--apply-ignored=true" -------------------------------------------------------------------------------- /workload/deploy/contour.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: contour 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/contour/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | inline: 22 | pathsFrom: 23 | #@ for secretRef in data.values.contour_secretRef: 24 | - secretRef: 25 | name: #@ secretRef 26 | #@ end 27 | deploy: 28 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/dex.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: dex 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/dex/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | inline: 22 | pathsFrom: 23 | #@ for secretRef in data.values.dex_secretRef: 24 | - secretRef: 25 | name: #@ secretRef 26 | #@ end 27 | deploy: 28 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/external-dns.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: external-dns 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/external-dns/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | inline: 22 | pathsFrom: 23 | #@ for secretRef in data.values.externaldns_secretRef: 24 | - secretRef: 25 | name: #@ secretRef 26 | #@ end 27 | deploy: 28 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/gangway.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: gangway 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/gangway/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | inline: 22 | pathsFrom: 23 | #@ for secretRef in data.values.gangway_secretRef: 24 | - secretRef: 25 | name: #@ secretRef 26 | #@ end 27 | deploy: 28 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/gitlab-runner.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: gitlab-runner 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/gitlab-runner/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | inline: 22 | pathsFrom: 23 | #@ for secretRef in data.values.gitlab_runner_secretRef: 24 | - secretRef: 25 | name: #@ secretRef 26 | #@ end 27 | deploy: 28 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/kiam.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: kiam 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/kiam/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | inline: 22 | pathsFrom: 23 | #@ for secretRef in data.values.kiam_secretRef: 24 | - secretRef: 25 | name: #@ secretRef 26 | #@ end 27 | deploy: 28 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/knative.yaml.disabled: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: knative 8 | namespace: kapp-controller 9 | spec: 10 | fetch: 11 | - git: 12 | url: #@ data.values.git_url 13 | ref: #@ data.values.git_branch 14 | subPath: workload/knative/manifests 15 | secretRef: 16 | name: #@ data.values.git.secretRef 17 | template: 18 | - ytt: 19 | ignoreUnknownComments: true 20 | inline: 21 | pathsFrom: 22 | #@ for secretRef in data.values.knative_secretRef: 23 | - secretRef: 24 | name: #@ secretRef 25 | #@ end 26 | deploy: 27 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/kuard.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: kuard 8 | namespace: kapp-controller 9 | annotations: 10 | kapp.k14s.io/change-rule.1: "delete before deleting gitops.tanzu.vmware.com/app-permissions" 11 | kapp.k14s.io/change-rule.2: "upsert after upserting gitops.tanzu.vmware.com/app-permissions" 12 | spec: 13 | serviceAccountName: kapp-controller-clusteradmin 14 | fetch: 15 | - git: 16 | url: #@ data.values.git_url 17 | ref: #@ data.values.git_branch 18 | subPath: workload/kuard/manifests 19 | secretRef: 20 | name: #@ data.values.git.secretRef 21 | template: 22 | - ytt: 23 | ignoreUnknownComments: true 24 | inline: 25 | pathsFrom: 26 | #@ for secretRef in data.values.kuard_secretRef: 27 | - secretRef: 28 | name: #@ secretRef 29 | #@ end 30 | deploy: 31 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/letsencrypt.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: letsencrypt 8 | namespace: kapp-controller 9 | annotations: 10 | kapp.k14s.io/change-rule.1: "delete before deleting gitops.tanzu.vmware.com/cert-manager" 11 | kapp.k14s.io/change-rule.2: "upsert after upserting gitops.tanzu.vmware.com/cert-manager" 12 | spec: 13 | serviceAccountName: kapp-controller-clusteradmin 14 | fetch: 15 | - git: 16 | url: #@ data.values.git_url 17 | ref: #@ data.values.git_branch 18 | subPath: workload/letsencrypt/manifests 19 | secretRef: 20 | name: #@ data.values.git.secretRef 21 | template: 22 | - ytt: 23 | ignoreUnknownComments: true 24 | inline: 25 | pathsFrom: 26 | #@ for secretRef in data.values.letsencrypt_secretRef: 27 | - secretRef: 28 | name: #@ secretRef 29 | #@ end 30 | deploy: 31 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/metrics-server.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: metrics-server 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/metrics-server/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | deploy: 22 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/monitoring.yaml.disabled: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: kappctrl.k14s.io/v1alpha1 5 | kind: App 6 | metadata: 7 | name: monitoring 8 | namespace: kapp-controller 9 | spec: 10 | serviceAccountName: kapp-controller-clusteradmin 11 | fetch: 12 | - git: 13 | url: #@ data.values.git_url 14 | ref: #@ data.values.git_branch 15 | subPath: workload/monitoring/manifests 16 | secretRef: 17 | name: #@ data.values.git.secretRef 18 | template: 19 | - ytt: 20 | ignoreUnknownComments: true 21 | inline: 22 | pathsFrom: 23 | #@ for secretRef in data.values.monitoring_secretRef: 24 | - secretRef: 25 | name: #@ secretRef 26 | #@ end 27 | deploy: 28 | - kapp: {} -------------------------------------------------------------------------------- /workload/deploy/values99.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | git_url: https://github.com/voor/cluster-api-gitops 4 | git_branch: origin/default 5 | git: 6 | secretRef: workload-git-secret 7 | 8 | contour_secretRef: 9 | - domain-values 10 | 11 | kuard_secretRef: 12 | - domain-values 13 | - kuard-client-values 14 | 15 | externaldns_secretRef: 16 | - certmanager-values 17 | - aws-values 18 | - external-dns-values 19 | 20 | dex_secretRef: 21 | - domain-values 22 | - grafana-client-values 23 | - github-client-values 24 | - dex-saml-values 25 | - sonarqube-client-values 26 | - kuard-client-values 27 | 28 | monitoring_secretRef: 29 | - domain-values 30 | - grafana-client-values 31 | 32 | letsencrypt_secretRef: 33 | - domain-values 34 | - certmanager-values 35 | 36 | knative_secretRef: 37 | - domain-values 38 | 39 | gangway_secretRef: 40 | - domain-values 41 | - gangway-values 42 | 43 | kiam_secretRef: 44 | - kiam-values 45 | - aws-values 46 | 47 | gitlab_runner_secretRef: 48 | - gitlab-runner-values -------------------------------------------------------------------------------- /workload/dex/helm/dex-values.yaml: -------------------------------------------------------------------------------- 1 | image: vmware-docker-tkg.bintray.io/dex 2 | imageTag: "v2.22.0_vmware.1" 3 | 4 | telemetry: true 5 | 6 | grpc: false 7 | 8 | certs: 9 | web: 10 | create: false 11 | 12 | livenessProbe: 13 | initialDelaySeconds: 2 14 | failureThreshold: 3 15 | timeoutSeconds: 2 16 | 17 | readinessProbe: 18 | initialDelaySeconds: 8 19 | failureThreshold: 3 20 | timeoutSeconds: 1 21 | 22 | config: 23 | issuer: http://dex.example.com:8080 24 | 25 | connectors: [] 26 | -------------------------------------------------------------------------------- /workload/dex/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add stable https://kubernetes-charts.storage.googleapis.com/ 5 | helm repo update 6 | 7 | helm template dex stable/dex -f dex/helm/dex-values.yaml --namespace sso | ytt --ignore-unknown-comments -f- -f dex/helm/overlay-helmtemplate.yaml > dex/manifests/dex.yaml -------------------------------------------------------------------------------- /workload/dex/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.all,expects="0+" 4 | --- 5 | metadata: 6 | #@overlay/match missing_ok=True 7 | namespace: sso 8 | 9 | #@overlay/match by=overlay.subset({"kind": "Secret", "metadata":{"name":"dex", "labels": { "app.kubernetes.io/managed-by": "Helm" }}}) 10 | --- 11 | #@overlay/remove 12 | 13 | --- 14 | 15 | #@overlay/match by=overlay.subset({"kind": "ClusterRoleBinding"}) 16 | --- 17 | subjects: 18 | #@overlay/match by=overlay.all,expects="1+" 19 | - 20 | namespace: sso 21 | 22 | #@overlay/match by=overlay.subset({"kind": "RoleBinding"}) 23 | --- 24 | subjects: 25 | #@overlay/match by=overlay.all,expects="1+" 26 | - 27 | namespace: sso 28 | -------------------------------------------------------------------------------- /workload/dex/manifests/dex-additional.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: v1 5 | kind: Namespace 6 | metadata: 7 | name: sso 8 | --- 9 | apiVersion: projectcontour.io/v1 10 | kind: HTTPProxy 11 | metadata: 12 | name: dex 13 | namespace: sso 14 | spec: 15 | virtualhost: 16 | fqdn: #@ "login.sso.{}".format(data.values.domain) 17 | tls: 18 | secretName: #@ "sso/sso.{}".format(data.values.domain) 19 | routes: 20 | - services: 21 | - name: dex 22 | port: 32000 23 | -------------------------------------------------------------------------------- /workload/dex/manifests/dex-config.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | labels: 8 | app.kubernetes.io/name: dex 9 | helm.sh/chart: dex-2.10.0 10 | app.kubernetes.io/instance: dex 11 | app.kubernetes.io/version: 2.23.0 12 | app.kubernetes.io/managed-by: Helm 13 | annotations: 14 | kapp.k14s.io/versioned: "" 15 | kapp.k14s.io/num-versions: "4" 16 | name: dex 17 | namespace: sso 18 | stringData: 19 | #@yaml/text-templated-strings 20 | config.yaml: | 21 | issuer: https://login.sso.(@= data.values.domain @) 22 | frontend: 23 | theme: tkg 24 | web: 25 | http: 0.0.0.0:5556 26 | expiry: 27 | signingKeys: "10m" 28 | idTokens: "5m" 29 | logger: 30 | level: "debug" 31 | format: "json" 32 | oauth2: 33 | skipApprovalScreen: true 34 | storage: 35 | type: kubernetes 36 | config: 37 | inCluster: true 38 | connectors: 39 | - type: github 40 | id: github 41 | name: GitHub 42 | config: 43 | clientID: (@= data.values.github_client_id @) 44 | clientSecret: (@= data.values.github_client_secret @) 45 | redirectURI: https://login.sso.(@= data.values.domain @)/callback 46 | orgs: 47 | - name: (@= data.values.github_org @) 48 | - name: (@= data.values.github_org @)-Teams 49 | teams: 50 | - (@= data.values.github_team @) 51 | - type: saml 52 | id: saml 53 | name: SAML 54 | config: 55 | ssoURL: (@= data.values.saml_url @) 56 | caData: (@= data.values.saml_caData @) 57 | usernameAttr: name 58 | emailAttr: email 59 | groupsAttr: groups 60 | redirectURI: https://login.sso.(@= data.values.domain @)/callback 61 | staticClients: 62 | - id: (@= data.values.sonarqube_client_id @) 63 | secret: (@= data.values.sonarqube_client_secret @) 64 | name: 'SonarQube' 65 | redirectURIs: 66 | - 'https://sonar.apps.(@= data.values.domain @)/oauth2/callback' 67 | - id: (@= data.values.kuard_client_id @) 68 | secret: (@= data.values.kuard_client_secret @) 69 | name: 'Kuard' 70 | redirectURIs: 71 | - 'https://kuard.apps.(@= data.values.domain @)/oauth2/callback' 72 | - id: (@= data.values.grafana_client_id @) 73 | secret: (@= data.values.grafana_client_secret @) 74 | name: 'Grafana' 75 | redirectURIs: 76 | - 'https://grafana.monitoring.(@= data.values.domain @)/login/generic_oauth' -------------------------------------------------------------------------------- /workload/dex/manifests/dex-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | #! This is overridded by kapp controller. 5 | domain: "" 6 | 7 | github_org: Tanzu-Solutions-Engineering 8 | github_team: pa-federal 9 | 10 | github_client_id: "" 11 | github_client_secret: "" 12 | 13 | grafana_client_id: "" 14 | grafana_client_secret: "" 15 | 16 | sonarqube_client_id: "" 17 | sonarqube_client_secret: "" 18 | 19 | kuard_cookie_secret: "" 20 | kuard_client_id: "" 21 | kuard_client_secret: "" 22 | 23 | saml_url: "" 24 | saml_caData: "" -------------------------------------------------------------------------------- /workload/dex/manifests/kapp-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kapp.k14s.io/v1alpha1 2 | kind: Config 3 | ownershipLabelRules: 4 | - path: [spec, template, metadata, labels] 5 | resourceMatchers: 6 | - apiVersionKindMatcher: 7 | { apiVersion: bitnami.com/v1alpha1, kind: SealedSecret } 8 | -------------------------------------------------------------------------------- /workload/external-dns/helm/external-dns-values.yaml: -------------------------------------------------------------------------------- 1 | # image: 2 | # registry: docker.io 3 | # repository: bitnami/external-dns 4 | # tag: 0.7.3-debian-10-r0 5 | 6 | # provider: aws 7 | provider: cloudflare 8 | 9 | sources: 10 | - crd 11 | - service 12 | - contour-httpproxy 13 | 14 | crd: 15 | create: true 16 | 17 | aws: 18 | region: I-am-replaced-by-an-overlay-by-ytt 19 | 20 | cloudflare: 21 | secretName: cloudflare-api 22 | email: I-am-replaced-by-an-overlay-by-ytt 23 | proxied: false 24 | 25 | txtPrefix: txt 26 | 27 | rbac: 28 | pspEnabled: true 29 | 30 | metrics: 31 | enabled: true 32 | ## Metrics exporter pod Annotation and Labels 33 | ## 34 | # podAnnotations: 35 | # prometheus.io/scrape: "true" 36 | # prometheus.io/port: "7979" 37 | 38 | ## Prometheus Operator ServiceMonitor configuration 39 | ## 40 | serviceMonitor: 41 | enabled: false 42 | ## Namespace in which Prometheus is running 43 | ## 44 | namespace: monitoring 45 | 46 | podAnnotations: 47 | iam.amazonaws.com/role: -------------------------------------------------------------------------------- /workload/external-dns/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add bitnami https://charts.bitnami.com/bitnami 5 | helm repo update 6 | 7 | helm template external-dns bitnami/external-dns -f external-dns/helm/external-dns-values.yaml --namespace external-dns --include-crds | ytt --ignore-unknown-comments -f- -f external-dns/helm/overlay-helmtemplate.yaml > external-dns/manifests/external-dns.yaml -------------------------------------------------------------------------------- /workload/external-dns/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.not_op(overlay.subset({"kind": "ServiceMonitor"})),expects="0+" 4 | --- 5 | metadata: 6 | #@overlay/match missing_ok=True 7 | namespace: external-dns -------------------------------------------------------------------------------- /workload/external-dns/manifests/external-dns-config.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: v1 5 | kind: Namespace 6 | metadata: 7 | name: external-dns 8 | annotations: 9 | iam.amazonaws.com/permitted: ".*" 10 | --- 11 | apiVersion: v1 12 | kind: Secret 13 | metadata: 14 | name: cloudflare-api 15 | namespace: external-dns 16 | type: Opaque 17 | stringData: 18 | cloudflare_api_token: #@ data.values.cloudflare_token -------------------------------------------------------------------------------- /workload/external-dns/manifests/external-dns-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | #@overlay/match by=overlay.subset({"kind": "Deployment", "metadata":{"name":"external-dns"}}) 5 | --- 6 | spec: 7 | template: 8 | metadata: 9 | #@overlay/match missing_ok=True 10 | annotations: 11 | #@overlay/match missing_ok=True 12 | iam.amazonaws.com/role: #@ data.values.pod_role 13 | spec: 14 | containers: 15 | #@overlay/match by="name" 16 | - name: external-dns 17 | args: 18 | #@ if data.values.assumed_role != "": 19 | #@overlay/match by=lambda _,a,b: "--aws-assume-role=" in a, expects="0+" 20 | - #@ "--aws-assume-role={}".format(data.values.assumed_role) 21 | #@ end 22 | #@overlay/append 23 | - --aws-prefer-cname 24 | env: 25 | #@overlay/match by="name",when=1 26 | - name: CF_API_EMAIL 27 | value: #@ data.values.acme_email 28 | #@overlay/match by="name",when=1 29 | - name: AWS_DEFAULT_REGION 30 | value: #@ data.values.aws_region -------------------------------------------------------------------------------- /workload/external-dns/manifests/external-dns-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | #! For Cloudflare 5 | acme_email: "" 6 | acme_url: "" 7 | cloudflare_token: "" 8 | 9 | #! For AWS 10 | iam_role: "" 11 | assumed_role: "" 12 | aws_region: "" 13 | pod_role: "" -------------------------------------------------------------------------------- /workload/external-dns/manifests/kapp-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kapp.k14s.io/v1alpha1 2 | kind: Config 3 | ownershipLabelRules: 4 | - path: [spec, template, metadata, labels] 5 | resourceMatchers: 6 | - apiVersionKindMatcher: 7 | { apiVersion: bitnami.com/v1alpha1, kind: SealedSecret } 8 | -------------------------------------------------------------------------------- /workload/gangway/manifests/gangway-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | domain: "" 4 | 5 | auth: 6 | clientID: "" 7 | clientSecret: "" 8 | sesssionKey: "" 9 | authorizeURL: "" 10 | tokenURL: "" 11 | audience: "" 12 | redirectURL: "" 13 | usernameClaim: sub 14 | emailClaim: email 15 | 16 | management: 17 | cluster_name: "" 18 | apiServerURL: "" 19 | httpPath: "" -------------------------------------------------------------------------------- /workload/gitlab-runner/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add gitlab https://charts.gitlab.io 5 | helm repo update 6 | 7 | helm template s gitlab/gitlab-runner -f gitlab-runner/helm/gitlab-runner-values.yaml --namespace gitlab-runner | ytt --ignore-unknown-comments -f- -f gitlab-runner/helm/overlay-helmtemplate.yaml > gitlab-runner/manifests/gitlab-runner.yaml -------------------------------------------------------------------------------- /workload/gitlab-runner/helm/gitlab-runner-values.yaml: -------------------------------------------------------------------------------- 1 | fullnameOverride: gitlab-runner 2 | 3 | image: gitlab/gitlab-runner:ubuntu-v13.5.0 4 | 5 | ## The GitLab Server URL (with protocol) that want to register the runner against 6 | ## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-register 7 | ## 8 | gitlabUrl: https://gitlab.com/ 9 | 10 | ## Set the certsSecretName in order to pass custom certificates for GitLab Runner to use 11 | ## Provide resource name for a Kubernetes Secret Object in the same namespace, 12 | ## this is used to populate the /etc/gitlab-runner/certs directory 13 | ## ref: https://docs.gitlab.com/runner/configuration/tls-self-signed.html#supported-options-for-self-signed-certificates 14 | ## 15 | #certsSecretName: 16 | 17 | ## Configure the maximum number of concurrent jobs 18 | ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section 19 | ## 20 | concurrent: 5 21 | 22 | ## Defines in seconds how often to check GitLab for a new builds 23 | ## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section 24 | ## 25 | checkInterval: 30 26 | 27 | ## For RBAC support: 28 | rbac: 29 | create: true 30 | 31 | ## Run the gitlab-bastion container with the ability to deploy/manage containers of jobs 32 | ## cluster-wide or only within namespace 33 | clusterWideAccess: false 34 | 35 | ## If RBAC is disabled in this Helm chart, use the following Kubernetes Service Account name. 36 | ## 37 | # serviceAccountName: default 38 | 39 | ## Configuration for the Pods that the runner launches for each new job 40 | ## 41 | runners: 42 | ## Default container image to use for builds when none is specified 43 | ## 44 | image: ubuntu:18.04 45 | 46 | ## Run all containers with the privileged flag enabled 47 | ## This will allow the docker:stable-dind image to run if you need to run Docker 48 | ## commands. Please read the docs before turning this on: 49 | ## ref: https://docs.gitlab.com/runner/executors/kubernetes.html#using-docker-dind 50 | ## 51 | privileged: false 52 | 53 | ## Namespace to run Kubernetes jobs in (defaults to 'default') 54 | ## 55 | # namespace: 56 | 57 | ## Build Container specific configuration 58 | ## 59 | builds: 60 | # cpuLimit: 200m 61 | # memoryLimit: 256Mi 62 | cpuRequests: 100m 63 | memoryRequests: 128Mi 64 | 65 | ## Service Container specific configuration 66 | ## 67 | services: 68 | # cpuLimit: 200m 69 | # memoryLimit: 256Mi 70 | cpuRequests: 100m 71 | memoryRequests: 128Mi 72 | 73 | ## Helper Container specific configuration 74 | ## 75 | helpers: 76 | # cpuLimit: 200m 77 | # memoryLimit: 256Mi 78 | cpuRequests: 100m 79 | memoryRequests: 128Mi 80 | 81 | securityContext: 82 | runAsUser: 999 83 | fsGroup: 999 -------------------------------------------------------------------------------- /workload/gitlab-runner/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.all,expects="0+" 4 | --- 5 | metadata: 6 | #@overlay/match missing_ok=True 7 | namespace: gitlab-runner 8 | -------------------------------------------------------------------------------- /workload/gitlab-runner/manifests/gitlab-runner-additional.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | #@ load("@ytt:overlay", "overlay") 3 | 4 | --- 5 | apiVersion: v1 6 | kind: Namespace 7 | metadata: 8 | name: gitlab-runner 9 | annotations: 10 | iam.amazonaws.com/permitted: ".*" 11 | --- 12 | apiVersion: v1 13 | kind: Secret 14 | metadata: 15 | name: gitlab-runner 16 | namespace: gitlab-runner 17 | type: Opaque 18 | stringData: 19 | runner-registration-token: #@ data.values.registration_token 20 | runner-token: 21 | 22 | #@overlay/match by=overlay.subset({"kind": "Deployment", "metadata":{"name":"gitlab-runner"}}) 23 | --- 24 | spec: 25 | template: 26 | #@ if data.values.pod_role: 27 | metadata: 28 | #@overlay/match missing_ok=True 29 | annotations: 30 | #@overlay/match missing_ok=True 31 | iam.amazonaws.com/role: #@ data.values.pod_role 32 | #@ end 33 | spec: 34 | initContainers: 35 | #@overlay/match by="name" 36 | - name: configure 37 | env: 38 | #@overlay/match by="name" 39 | - name: CI_SERVER_URL 40 | value: #@ data.values.CI_SERVER_URL 41 | containers: 42 | #@overlay/match by="name" 43 | - name: gitlab-runner 44 | env: 45 | #@overlay/match by="name" 46 | - name: CI_SERVER_URL 47 | value: #@ data.values.CI_SERVER_URL -------------------------------------------------------------------------------- /workload/gitlab-runner/manifests/gitlab-runner-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | CI_SERVER_URL: 5 | pod_role: 6 | registration_token: -------------------------------------------------------------------------------- /workload/jupyterlab-auth/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/pa-rvanvoorhees/jupyter/datascience-notebook:notebook-6.2.0 2 | 3 | RUN echo "c.ServerApp.password = ''" >> ~/.jupyter/jupyter_notebook_config.py \ 4 | && echo "c.ServerApp.token = ''" >> ~/.jupyter/jupyter_notebook_config.py -------------------------------------------------------------------------------- /workload/jupyterlab-auth/manifests/00-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | groups: 5 | fqdn: 6 | 7 | certSecretName: 8 | 9 | proxy: 10 | env: 11 | OAUTH2_PROXY_CLIENT_ID: 12 | OAUTH2_PROXY_CLIENT_SECRET: 13 | OAUTH2_PROXY_COOKIE_SECRET: -------------------------------------------------------------------------------- /workload/jupyterlab-auth/manifests/kbld-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kbld.k14s.io/v1alpha1 3 | kind: Config 4 | minimumRequiredVersion: 0.24.0 5 | overrides: 6 | - image: datascience-notebook 7 | newImage: gcr.io/pa-rvanvoorhees/datascience-notebook:notebook-6.2.0-notoken 8 | preresolved: true 9 | - image: oauth2-proxy 10 | newImage: gcr.io/pa-rvanvoorhees/bitnami/oauth2-proxy:7 11 | preresolved: true -------------------------------------------------------------------------------- /workload/kiam/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add uswitch https://uswitch.github.io/kiam-helm-charts/charts/ 5 | helm repo update 6 | 7 | helm template kiam uswitch/kiam -f kiam/helm/kiam-values.yaml --namespace kube-system | ytt --ignore-unknown-comments -f- -f kiam/helm/overlay-helmtemplate.yaml > kiam/manifests/kiam.yaml -------------------------------------------------------------------------------- /workload/kiam/helm/kiam-values.yaml: -------------------------------------------------------------------------------- 1 | psp: 2 | # Specifies whether PodSecurityPolicies should be created 3 | create: true 4 | 5 | agent: 6 | tlsSecret: kiam-agent-tls 7 | tlsCerts: 8 | certFileName: tls.crt 9 | keyFileName: tls.key 10 | caFileName: ca.crt 11 | 12 | prometheus: 13 | servicemonitor: 14 | enabled: false 15 | labels: {} 16 | 17 | host: 18 | iptables: true 19 | 20 | extraEnv: 21 | - name: AWS_REGION 22 | valueFrom: 23 | secretKeyRef: 24 | name: kiam-config 25 | key: region 26 | - name: DEFAULT_AWS_REGION 27 | valueFrom: 28 | secretKeyRef: 29 | name: kiam-config 30 | key: region 31 | 32 | server: 33 | deployment: 34 | replicas: 1 # Change this to 3 for production 35 | enabled: true 36 | 37 | sslCertHostPath: /etc/ssl/certs 38 | 39 | extraArgs: 40 | region: $(AWS_REGION) 41 | session: tanzu 42 | 43 | extraEnv: 44 | - name: AWS_REGION 45 | valueFrom: 46 | secretKeyRef: 47 | name: kiam-config 48 | key: region 49 | - name: DEFAULT_AWS_REGION 50 | valueFrom: 51 | secretKeyRef: 52 | name: kiam-config 53 | key: region 54 | - name: ASSUME_ROLE_ARN 55 | valueFrom: 56 | secretKeyRef: 57 | name: kiam-config 58 | key: roleArn 59 | 60 | tlsSecret: kiam-server-tls 61 | tlsCerts: 62 | certFileName: tls.crt 63 | keyFileName: tls.key 64 | caFileName: ca.crt 65 | 66 | roleBaseArn: null 67 | 68 | assumeRoleArn: $(ASSUME_ROLE_ARN) 69 | 70 | prometheus: 71 | servicemonitor: 72 | enabled: false 73 | labels: {} 74 | 75 | nodeSelector: 76 | node-role.kubernetes.io/master: "" 77 | 78 | tolerations: 79 | - effect: NoSchedule 80 | key: node-role.kubernetes.io/master 81 | # these are automatically added to daemonsets, so we need them manually since deployment: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#taints-and-tolerations 82 | - key: "node-role.kubernetes.io/not-ready" 83 | effect: "NoSchedule" 84 | operator: "Exists" 85 | - key: "node-role.kubernetes.io/unreachable" 86 | effect: "NoSchedule" 87 | operator: "Exists" 88 | - key: "node-role.kubernetes.io/disk-pressure" 89 | effect: "NoSchedule" 90 | operator: "Exists" 91 | - key: "node-role.kubernetes.io/memory-pressure" 92 | effect: "NoSchedule" 93 | operator: "Exists" 94 | - key: "node-role.kubernetes.io/unschedulable" 95 | effect: "NoSchedule" 96 | operator: "Exists" 97 | - key: "node-role.kubernetes.io/network-unavailable" 98 | effect: "NoSchedule" 99 | operator: "Exists" -------------------------------------------------------------------------------- /workload/kiam/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.all,expects="0+" 4 | --- 5 | metadata: 6 | #@overlay/match missing_ok=True 7 | namespace: kube-system 8 | -------------------------------------------------------------------------------- /workload/kiam/manifests/kiam-certs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1alpha2 2 | kind: Issuer 3 | metadata: 4 | name: kiam-selfsigned-issuer 5 | namespace: kube-system 6 | spec: 7 | selfSigned: {} 8 | 9 | --- 10 | apiVersion: cert-manager.io/v1alpha2 11 | kind: Certificate 12 | metadata: 13 | name: kiam-serving-cert 14 | namespace: kube-system 15 | spec: 16 | secretName: kiam-ca-tls 17 | commonName: "kiam-ca" 18 | isCA: true 19 | issuerRef: 20 | name: kiam-selfsigned-issuer 21 | usages: 22 | - "any" 23 | 24 | --- 25 | apiVersion: cert-manager.io/v1alpha2 26 | kind: Issuer 27 | metadata: 28 | name: kiam-ca-issuer 29 | namespace: kube-system 30 | spec: 31 | ca: 32 | secretName: kiam-ca-tls 33 | 34 | --- 35 | apiVersion: cert-manager.io/v1alpha2 36 | kind: Certificate 37 | metadata: 38 | name: kiam-agent 39 | namespace: kube-system 40 | spec: 41 | secretName: kiam-agent-tls 42 | commonName: agent 43 | issuerRef: 44 | name: kiam-ca-issuer 45 | usages: 46 | - "any" 47 | 48 | --- 49 | apiVersion: cert-manager.io/v1alpha2 50 | kind: Certificate 51 | metadata: 52 | name: kiam-server 53 | namespace: kube-system 54 | spec: 55 | secretName: kiam-server-tls 56 | issuerRef: 57 | name: kiam-ca-issuer 58 | usages: 59 | - "any" 60 | dnsNames: 61 | - "localhost" 62 | - "kiam-server" 63 | - kiam-server.kube-system.svc 64 | - kiam-server.kube-system.svc.cluster.local 65 | ipAddresses: 66 | - "127.0.0.1" -------------------------------------------------------------------------------- /workload/kiam/manifests/kiam-config.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: kiam-config 8 | namespace: kube-system 9 | type: Opaque 10 | stringData: 11 | region: #@ data.values.aws_region 12 | roleArn: #@ data.values.kiam_role_arn -------------------------------------------------------------------------------- /workload/kiam/manifests/kiam-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | aws_region: "" 5 | kiam_role_arn: "" -------------------------------------------------------------------------------- /workload/knative/manifests/overlay-knative.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | --- 5 | #@overlay/match by=overlay.subset({"metadata":{"name":"config-certmanager"}}) 6 | --- 7 | data: 8 | #@overlay/remove missing_ok=True 9 | _example: 10 | #@overlay/match missing_ok=True 11 | issuerRef: | 12 | kind: ClusterIssuer 13 | name: letsencrypt-issuer 14 | 15 | #@overlay/match by=overlay.subset({"metadata":{"name":"config-network"}}) 16 | --- 17 | data: 18 | #@overlay/remove missing_ok=True 19 | _example: 20 | #@overlay/match missing_ok=True 21 | autoTLS: "Enabled" 22 | #@overlay/match missing_ok=True 23 | ingress.class: contour.ingress.networking.knative.dev 24 | #@overlay/match missing_ok=True 25 | httpProtocol: "Redirected" 26 | 27 | #@overlay/match by=overlay.subset({"metadata":{"name":"config-contour"}}) 28 | --- 29 | data: 30 | #@overlay/remove missing_ok=True 31 | _example: 32 | #@overlay/match missing_ok=True 33 | visibility: | 34 | ExternalIP: 35 | class: contour 36 | service: contour/envoy 37 | ClusterLocal: 38 | class: contour 39 | service: contour/envoy 40 | 41 | #@overlay/match by=overlay.subset({"metadata":{"name":"config-domain"}}) 42 | --- 43 | data: 44 | #@overlay/remove missing_ok=True 45 | _example: 46 | #@overlay/match missing_ok=True 47 | #@yaml/text-templated-strings 48 | (@= data.values.domain @): "" -------------------------------------------------------------------------------- /workload/knative/manifests/serving-nscert.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The Knative Authors 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | name: networking-ns-cert 19 | namespace: knative-serving 20 | labels: 21 | serving.knative.dev/release: "v0.13.1" 22 | networking.knative.dev/wildcard-certificate-provider: nscert 23 | spec: 24 | selector: 25 | matchLabels: 26 | app: networking-ns-cert 27 | template: 28 | metadata: 29 | annotations: 30 | cluster-autoscaler.kubernetes.io/safe-to-evict: "true" 31 | labels: 32 | app: networking-ns-cert 33 | serving.knative.dev/release: "v0.13.1" 34 | spec: 35 | serviceAccountName: controller 36 | containers: 37 | - name: networking-nscert 38 | # This is the Go import path for the binary that is containerized 39 | # and substituted here. 40 | image: gcr.io/knative-releases/knative.dev/serving/cmd/networking/nscert@sha256:981ea424b6c73260a3e932b89ef65fe6cbeff4c1d19749f4b946743e6df8266a 41 | resources: 42 | requests: 43 | cpu: 30m 44 | memory: 40Mi 45 | limits: 46 | cpu: 300m 47 | memory: 400Mi 48 | env: 49 | - name: SYSTEM_NAMESPACE 50 | valueFrom: 51 | fieldRef: 52 | fieldPath: metadata.namespace 53 | - name: CONFIG_LOGGING_NAME 54 | value: config-logging 55 | - name: CONFIG_OBSERVABILITY_NAME 56 | value: config-observability 57 | - # TODO(https://github.com/knative/pkg/pull/953): Remove stackdriver specific config 58 | name: METRICS_DOMAIN 59 | value: knative.dev/serving 60 | securityContext: 61 | allowPrivilegeEscalation: false 62 | ports: 63 | - name: metrics 64 | containerPort: 9090 65 | - name: profiling 66 | containerPort: 8008 67 | --- 68 | apiVersion: v1 69 | kind: Service 70 | metadata: 71 | labels: 72 | app: networking-ns-cert 73 | serving.knative.dev/release: "v0.13.1" 74 | networking.knative.dev/wildcard-certificate-provider: nscert 75 | name: networking-ns-cert 76 | namespace: knative-serving 77 | spec: 78 | ports: 79 | - # Define metrics and profiling for them to be accessible within service meshes. 80 | name: http-metrics 81 | port: 9090 82 | targetPort: 9090 83 | - name: http-profiling 84 | port: 8008 85 | targetPort: 8008 86 | selector: 87 | app: networking-ns-cert 88 | 89 | --- -------------------------------------------------------------------------------- /workload/knative/manifests/serving-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | #! This is overridded by kapp controller. 5 | domain: "" -------------------------------------------------------------------------------- /workload/kuard/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add stable https://kubernetes-charts.storage.googleapis.com/ 5 | helm repo update 6 | 7 | helm template kuard-proxy stable/oauth2-proxy -f kuard/helm/oauth2-proxy-values.yaml --namespace kuard | ytt --ignore-unknown-comments -f- -f kuard/helm/overlay-helmtemplate.yaml > kuard/manifests/kuard-proxy.yaml -------------------------------------------------------------------------------- /workload/kuard/helm/oauth2-proxy-values.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | existingSecret: kuard-proxy-oauth2-proxy 3 | existingConfig: kuard-proxy-oauth2-proxy 4 | 5 | securityContext: 6 | enabled: true 7 | -------------------------------------------------------------------------------- /workload/kuard/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.all,expects="0+" 4 | --- 5 | metadata: 6 | #@overlay/match missing_ok=True 7 | namespace: apps 8 | 9 | -------------------------------------------------------------------------------- /workload/kuard/manifests/kuard-additional.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: projectcontour.io/v1 5 | kind: HTTPProxy 6 | metadata: 7 | name: kuard-kuard 8 | namespace: apps 9 | spec: 10 | virtualhost: 11 | fqdn: #@ "kuard.apps.{}".format(data.values.domain) 12 | tls: 13 | secretName: #@ "apps/apps.{}".format(data.values.domain) 14 | routes: 15 | - services: 16 | - name: kuard-proxy-oauth2-proxy 17 | port: 80 18 | --- 19 | apiVersion: v1 20 | kind: ConfigMap 21 | metadata: 22 | labels: 23 | app: oauth2-proxy 24 | chart: oauth2-proxy-3.2.2 25 | heritage: Helm 26 | release: kuard-proxy 27 | name: kuard-proxy-oauth2-proxy 28 | namespace: apps 29 | data: 30 | #@yaml/text-templated-strings 31 | oauth2_proxy.cfg: |- 32 | provider = "oidc" 33 | provider_display_name = "Dex" 34 | skip_provider_button = true 35 | redirect_url = "oauth2/callback" 36 | upstreams = [ "http://kuard:8080" ] 37 | oidc_issuer_url = "(@= "https://login.sso.{}".format(data.values.domain) @)" 38 | pass_basic_auth = true 39 | pass_user_headers = true 40 | pass_host_header = true 41 | email_domains = [ 42 | "*" 43 | ] 44 | --- 45 | apiVersion: v1 46 | kind: Secret 47 | metadata: 48 | labels: 49 | app: oauth2-proxy 50 | chart: oauth2-proxy-3.2.2 51 | heritage: Helm 52 | release: kuard-proxy 53 | name: kuard-proxy-oauth2-proxy 54 | namespace: apps 55 | type: Opaque 56 | stringData: 57 | cookie-secret: #@ data.values.kuard_cookie_secret 58 | client-id: #@ data.values.kuard_client_id 59 | client-secret: #@ data.values.kuard_client_secret -------------------------------------------------------------------------------- /workload/kuard/manifests/kuard-proxy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app: oauth2-proxy 6 | chart: oauth2-proxy-3.2.2 7 | release: kuard-proxy 8 | heritage: Helm 9 | name: kuard-proxy-oauth2-proxy 10 | namespace: apps 11 | --- 12 | apiVersion: v1 13 | kind: Service 14 | metadata: 15 | labels: 16 | app: oauth2-proxy 17 | chart: oauth2-proxy-3.2.2 18 | release: kuard-proxy 19 | heritage: Helm 20 | name: kuard-proxy-oauth2-proxy 21 | namespace: apps 22 | spec: 23 | type: ClusterIP 24 | ports: 25 | - port: 80 26 | targetPort: http 27 | protocol: TCP 28 | name: http 29 | selector: 30 | app: oauth2-proxy 31 | release: kuard-proxy 32 | --- 33 | apiVersion: apps/v1 34 | kind: Deployment 35 | metadata: 36 | labels: 37 | app: oauth2-proxy 38 | chart: oauth2-proxy-3.2.2 39 | heritage: Helm 40 | release: kuard-proxy 41 | name: kuard-proxy-oauth2-proxy 42 | namespace: apps 43 | spec: 44 | replicas: 1 45 | selector: 46 | matchLabels: 47 | app: oauth2-proxy 48 | release: kuard-proxy 49 | template: 50 | metadata: 51 | annotations: 52 | checksum/config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b 53 | checksum/config-emails: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b 54 | checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 55 | checksum/google-secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 56 | labels: 57 | app: oauth2-proxy 58 | release: kuard-proxy 59 | spec: 60 | serviceAccountName: kuard-proxy-oauth2-proxy 61 | containers: 62 | - name: oauth2-proxy 63 | image: quay.io/pusher/oauth2_proxy:v5.1.0 64 | imagePullPolicy: IfNotPresent 65 | args: 66 | - --http-address=0.0.0.0:4180 67 | - --config=/etc/oauth2_proxy/oauth2_proxy.cfg 68 | env: 69 | - name: OAUTH2_PROXY_CLIENT_ID 70 | valueFrom: 71 | secretKeyRef: 72 | name: kuard-proxy-oauth2-proxy 73 | key: client-id 74 | - name: OAUTH2_PROXY_CLIENT_SECRET 75 | valueFrom: 76 | secretKeyRef: 77 | name: kuard-proxy-oauth2-proxy 78 | key: client-secret 79 | - name: OAUTH2_PROXY_COOKIE_SECRET 80 | valueFrom: 81 | secretKeyRef: 82 | name: kuard-proxy-oauth2-proxy 83 | key: cookie-secret 84 | ports: 85 | - containerPort: 4180 86 | name: http 87 | protocol: TCP 88 | livenessProbe: 89 | httpGet: 90 | path: /ping 91 | port: http 92 | scheme: HTTP 93 | initialDelaySeconds: 0 94 | timeoutSeconds: 1 95 | readinessProbe: 96 | httpGet: 97 | path: /ping 98 | port: http 99 | scheme: HTTP 100 | initialDelaySeconds: 0 101 | timeoutSeconds: 1 102 | successThreshold: 1 103 | periodSeconds: 10 104 | resources: {} 105 | volumeMounts: 106 | - mountPath: /etc/oauth2_proxy 107 | name: configmain 108 | securityContext: 109 | runAsNonRoot: true 110 | volumes: 111 | - configMap: 112 | defaultMode: 420 113 | name: kuard-proxy-oauth2-proxy 114 | name: configmain 115 | tolerations: [] 116 | -------------------------------------------------------------------------------- /workload/kuard/manifests/kuard-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | #! This is overridded by kapp controller. 5 | domain: "" 6 | 7 | #! Proxy configuration. 8 | kuard_cookie_secret: "" 9 | kuard_client_id: "" 10 | kuard_client_secret: "" -------------------------------------------------------------------------------- /workload/kuard/manifests/kuard.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | labels: 8 | app: kuard 9 | name: kuard 10 | namespace: apps 11 | spec: 12 | replicas: 3 13 | selector: 14 | matchLabels: 15 | app: kuard 16 | template: 17 | metadata: 18 | labels: 19 | app: kuard 20 | spec: 21 | containers: 22 | - image: gcr.io/kuar-demo/kuard-amd64:1 23 | name: kuard 24 | resources: 25 | limits: 26 | cpu: 200m 27 | memory: 200Mi 28 | requests: 29 | cpu: 100m 30 | memory: 100Mi 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | labels: 36 | app: kuard 37 | name: kuard 38 | namespace: apps 39 | spec: 40 | ports: 41 | - port: 8080 42 | protocol: TCP 43 | targetPort: 8080 44 | selector: 45 | app: kuard 46 | sessionAffinity: None 47 | type: ClusterIP 48 | --- -------------------------------------------------------------------------------- /workload/kube-bench/job-master.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: kube-bench-master 6 | spec: 7 | template: 8 | spec: 9 | hostPID: true 10 | nodeSelector: 11 | node-role.kubernetes.io/master: "" 12 | tolerations: 13 | - key: node-role.kubernetes.io/master 14 | operator: Exists 15 | effect: NoSchedule 16 | containers: 17 | - name: kube-bench 18 | image: aquasec/kube-bench:latest 19 | command: ["kube-bench", "master"] 20 | volumeMounts: 21 | - name: var-lib-etcd 22 | mountPath: /var/lib/etcd 23 | readOnly: true 24 | - name: etc-kubernetes 25 | mountPath: /etc/kubernetes 26 | readOnly: true 27 | # /usr/local/mount-from-host/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 28 | # You can omit this mount if you specify --version as part of the command. 29 | - name: usr-bin 30 | mountPath: /usr/local/mount-from-host/bin 31 | readOnly: true 32 | restartPolicy: Never 33 | volumes: 34 | - name: var-lib-etcd 35 | hostPath: 36 | path: "/var/lib/etcd" 37 | - name: etc-kubernetes 38 | hostPath: 39 | path: "/etc/kubernetes" 40 | - name: usr-bin 41 | hostPath: 42 | path: "/usr/bin" 43 | -------------------------------------------------------------------------------- /workload/kube-bench/job-node.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: kube-bench-node 6 | spec: 7 | template: 8 | spec: 9 | hostPID: true 10 | containers: 11 | - name: kube-bench 12 | image: aquasec/kube-bench:latest 13 | command: ["kube-bench", "node"] 14 | volumeMounts: 15 | - name: var-lib-kubelet 16 | mountPath: /var/lib/kubelet 17 | readOnly: true 18 | - name: etc-systemd 19 | mountPath: /etc/systemd 20 | readOnly: true 21 | - name: etc-kubernetes 22 | mountPath: /etc/kubernetes 23 | readOnly: true 24 | # /usr/local/mount-from-host/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 25 | # You can omit this mount if you specify --version as part of the command. 26 | - name: usr-bin 27 | mountPath: /usr/local/mount-from-host/bin 28 | readOnly: true 29 | restartPolicy: Never 30 | volumes: 31 | - name: var-lib-kubelet 32 | hostPath: 33 | path: "/var/lib/kubelet" 34 | - name: etc-systemd 35 | hostPath: 36 | path: "/etc/systemd" 37 | - name: etc-kubernetes 38 | hostPath: 39 | path: "/etc/kubernetes" 40 | - name: usr-bin 41 | hostPath: 42 | path: "/usr/bin" 43 | -------------------------------------------------------------------------------- /workload/letsencrypt/manifests/letsencrypt-issuer.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: cert-manager.io/v1alpha2 5 | kind: ClusterIssuer 6 | metadata: 7 | name: letsencrypt-issuer 8 | namespace: cert-manager 9 | spec: 10 | acme: 11 | email: #@ data.values.acme_email 12 | server: #@ data.values.acme_url 13 | privateKeySecretRef: 14 | name: letsencrypt-issuer-account-key 15 | solvers: 16 | - dns01: 17 | cloudflare: 18 | email: #@ data.values.acme_email 19 | apiTokenSecretRef: 20 | name: cloudflare-api-token-secret 21 | key: api-token 22 | --- 23 | apiVersion: cert-manager.io/v1alpha2 24 | kind: Certificate 25 | metadata: 26 | name: #@ "default.{}".format(data.values.domain) 27 | namespace: default 28 | spec: 29 | secretName: #@ "default.{}".format(data.values.domain) 30 | issuerRef: 31 | name: letsencrypt-issuer 32 | kind: ClusterIssuer 33 | dnsNames: 34 | - #@ "*.default.{}".format(data.values.domain) 35 | --- 36 | apiVersion: cert-manager.io/v1alpha2 37 | kind: Certificate 38 | metadata: 39 | name: #@ "apps.{}".format(data.values.domain) 40 | namespace: apps 41 | spec: 42 | secretName: #@ "apps.{}".format(data.values.domain) 43 | issuerRef: 44 | name: letsencrypt-issuer 45 | kind: ClusterIssuer 46 | dnsNames: 47 | - #@ "*.apps.{}".format(data.values.domain) 48 | --- 49 | apiVersion: cert-manager.io/v1alpha2 50 | kind: Certificate 51 | metadata: 52 | name: #@ "management.{}".format(data.values.domain) 53 | namespace: management 54 | spec: 55 | secretName: #@ "management.{}".format(data.values.domain) 56 | issuerRef: 57 | name: letsencrypt-issuer 58 | kind: ClusterIssuer 59 | dnsNames: 60 | - #@ "*.management.{}".format(data.values.domain) 61 | --- 62 | apiVersion: cert-manager.io/v1alpha2 63 | kind: Certificate 64 | metadata: 65 | name: #@ "sso.{}".format(data.values.domain) 66 | namespace: sso 67 | spec: 68 | secretName: #@ "sso.{}".format(data.values.domain) 69 | issuerRef: 70 | name: letsencrypt-issuer 71 | kind: ClusterIssuer 72 | dnsNames: 73 | - #@ "*.sso.{}".format(data.values.domain) 74 | 75 | #@ if data.values.monitoring_enabled: 76 | --- 77 | apiVersion: cert-manager.io/v1alpha2 78 | kind: Certificate 79 | metadata: 80 | name: #@ "monitoring.{}".format(data.values.domain) 81 | namespace: monitoring 82 | spec: 83 | secretName: #@ "monitoring.{}".format(data.values.domain) 84 | issuerRef: 85 | name: letsencrypt-issuer 86 | kind: ClusterIssuer 87 | dnsNames: 88 | - #@ "*.monitoring.{}".format(data.values.domain) 89 | #@ end 90 | 91 | --- 92 | apiVersion: v1 93 | kind: Secret 94 | metadata: 95 | name: cloudflare-api-token-secret 96 | namespace: cert-manager 97 | type: Opaque 98 | stringData: 99 | api-token: #@ data.values.cloudflare_token -------------------------------------------------------------------------------- /workload/letsencrypt/manifests/letsencrypt-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | acme_email: "" 4 | acme_url: "" 5 | 6 | domain: "" 7 | 8 | cloudflare_token: "" 9 | 10 | monitoring_enabled: false -------------------------------------------------------------------------------- /workload/metrics-server/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add bitnami https://charts.bitnami.com/bitnami 5 | helm repo update 6 | 7 | helm template metrics-server bitnami/metrics-server -f metrics-server/helm/metrics-server-values.yaml --namespace kube-system | ytt --ignore-unknown-comments -f- -f metrics-server/helm/overlay-helmtemplate.yaml > metrics-server/manifests/metrics-server.yaml -------------------------------------------------------------------------------- /workload/metrics-server/helm/metrics-server-values.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/voor/cluster-api-gitops/e30df7fec01199f26a210ffdd422ae597314b63d/workload/metrics-server/helm/metrics-server-values.yaml -------------------------------------------------------------------------------- /workload/metrics-server/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.all,expects="0+" 4 | --- 5 | metadata: 6 | #@overlay/match missing_ok=True 7 | namespace: kube-system -------------------------------------------------------------------------------- /workload/monitoring/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add bitnami https://charts.bitnami.com/bitnami 5 | helm repo update 6 | 7 | helm template monitoring bitnami/kube-prometheus -f monitoring/helm/monitoring-values.yaml --include-crds | ytt --ignore-unknown-comments -f- -f monitoring/helm/overlay-helmtemplate.yaml > monitoring/manifests/prometheus-operator.yaml 8 | helm template grafana bitnami/grafana -f monitoring/helm/grafana-values.yaml --include-crds | ytt --ignore-unknown-comments -f- -f monitoring/helm/grafana-overlay-helmtemplate.yaml > monitoring/manifests/grafana.yaml -------------------------------------------------------------------------------- /workload/monitoring/helm/grafana-overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | -------------------------------------------------------------------------------- /workload/monitoring/helm/grafana-values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | registry: docker.io 3 | repository: bitnami/grafana 4 | tag: 7.1.3-debian-10-r0 -------------------------------------------------------------------------------- /workload/monitoring/helm/monitoring-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | imageRegistry: docker.io 3 | 4 | operator: 5 | image: 6 | repository: bitnami/prometheus-operator 7 | tag: 0.41.0-debian-10-r5 8 | 9 | configmapReload: 10 | image: 11 | repository: bitnami/configmap-reload 12 | tag: 0.4.0-debian-10-r18 13 | 14 | prometheus: 15 | image: 16 | repository: bitnami/prometheus 17 | tag: 2.20.1-debian-10-r0 18 | 19 | thanos: 20 | ## Create a Thanos Sidecar container 21 | ## 22 | create: false 23 | ## Bitnami Thanos image 24 | ## ref: https://hub.docker.com/r/bitnami/thanos/tags/ 25 | ## 26 | image: 27 | repository: bitnami/thanos 28 | tag: 0.14.0-scratch-r3 29 | 30 | alertmanager: 31 | image: 32 | repository: bitnami/alertmanager 33 | tag: 0.21.0-debian-10-r41 34 | 35 | node-exporter: 36 | image: 37 | repository: bitnami/node-exporter 38 | tag: 1.0.1-debian-10-r40 39 | 40 | serviceMonitor: 41 | enabled: false 42 | 43 | kube-state-metrics: 44 | image: 45 | repository: bitnami/kube-state-metrics 46 | tag: 1.9.7-debian-10-r51 47 | 48 | serviceMonitor: 49 | enabled: false -------------------------------------------------------------------------------- /workload/monitoring/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.subset({"kind": "Deployment", "metadata": { "name": "monitoring-prometheus-oper-operator" }}) 4 | --- 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | #@overlay/match by="name" 10 | - name: prometheus-operator 11 | args: 12 | #@overlay/match by=lambda _,a,b: "--config-reloader-image=" in a, expects="0+" 13 | - --config-reloader-image=$(CONFIG_RELOADER_IMAGE) 14 | #@overlay/match by=lambda _,a,b: "--prometheus-config-reloader=" in a, expects="0+" 15 | - --prometheus-config-reloader=$(PROMETHEUS_CONFIG_RELOADER) 16 | #@overlay/match missing_ok=True 17 | env: 18 | #@overlay/match by="name",missing_ok=True 19 | - name: CONFIG_RELOADER_IMAGE 20 | valueFrom: 21 | configMapKeyRef: 22 | name: prometheus-operator-images 23 | key: config-reloader-image 24 | #@overlay/match by="name",missing_ok=True 25 | - name: PROMETHEUS_CONFIG_RELOADER 26 | valueFrom: 27 | configMapKeyRef: 28 | name: prometheus-operator-images 29 | key: prometheus-config-reloader 30 | 31 | --- 32 | apiVersion: v1 33 | kind: ConfigMap 34 | metadata: 35 | name: prometheus-operator-images 36 | namespace: monitoring 37 | labels: 38 | app.kubernetes.io/name: prometheus-operator 39 | app.kubernetes.io/instance: monitoring 40 | app.kubernetes.io/component: operator 41 | data: 42 | config-reloader-image: docker.io/bitnami/configmap-reload:0.4.0-debian-10-r18 43 | prometheus-config-reloader: docker.io/bitnami/prometheus-operator:0.41.0-debian-10-r5 44 | --- 45 | apiVersion: kbld.k14s.io/v1alpha1 46 | kind: Config 47 | minimumRequiredVersion: 0.24.0 48 | searchRules: 49 | - keyMatcher: 50 | path: [data, config-reloader-image] 51 | - keyMatcher: 52 | path: [data, prometheus-config-reloader] -------------------------------------------------------------------------------- /workload/monitoring/jsonnet/.gitignore: -------------------------------------------------------------------------------- 1 | vendor/ -------------------------------------------------------------------------------- /workload/monitoring/jsonnet/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script uses arg $1 (name of *.jsonnet file to use) to generate the manifests/*.yaml files. 4 | 5 | set -e 6 | set -x 7 | # only exit with zero if all commands of the pipeline exit successfully 8 | set -o pipefail 9 | 10 | rm -rf monitoring/manifests/out 11 | mkdir -p monitoring/manifests/out/setup 12 | # Calling gojsontoyaml is optional, but we would like to generate yaml, not json 13 | jsonnet -J monitoring/jsonnet/vendor -m monitoring/manifests/out "${1-monitoring/jsonnet/prometheus-operator.jsonnet}" | xargs -I{} sh -c 'cat {} | ytt -f monitoring/jsonnet/crd-overlay.yaml -f foo.yaml=- > {}.yaml' -- {} 14 | 15 | # Make sure to remove json files 16 | find monitoring/manifests/out -type f ! -name '*.yaml' -delete 17 | rm -rf monitoring/manifests/out/grafana-config.yaml -------------------------------------------------------------------------------- /workload/monitoring/jsonnet/crd-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.subset({"kind":"CustomResourceDefinition"}),expects="0+" 4 | --- 5 | #@overlay/remove 6 | status: -------------------------------------------------------------------------------- /workload/monitoring/jsonnet/jsonnetfile.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "dependencies": [ 4 | { 5 | "source": { 6 | "git": { 7 | "remote": "https://github.com/coreos/kube-prometheus", 8 | "subdir": "jsonnet/kube-prometheus" 9 | } 10 | }, 11 | "version": "master" 12 | } 13 | ], 14 | "legacyImports": true 15 | } 16 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/grafana-config.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: grafana-config 8 | namespace: monitoring 9 | type: Opaque 10 | stringData: 11 | #@yaml/text-templated-strings 12 | grafana.ini: | 13 | [auth] 14 | oauth_auto_login = true 15 | [auth.generic_oauth] 16 | api_url = https://login.sso.(@= data.values.domain @)/userinfo 17 | auth_url = https://login.sso.(@= data.values.domain @)/auth 18 | client_id = (@= data.values.grafana_client_id @) 19 | client_secret = (@= data.values.grafana_client_secret @) 20 | enabled = true 21 | scopes = openid email 22 | token_url = https://login.sso.(@= data.values.domain @)/token 23 | [dataproxy] 24 | timeout = 90 25 | [grafana_net] 26 | url = https://grafana.monitoring.(@= data.values.domain @) 27 | [server] 28 | enable_gzip = true 29 | root_url = https://grafana.monitoring.(@= data.values.domain @) 30 | [users] 31 | allow_sign_up = true 32 | auto_assign_org = true 33 | auto_assign_org_role = Admin 34 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/grafana-httpproxy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: projectcontour.io/v1 3 | kind: HTTPProxy 4 | metadata: 5 | name: monitoring-grafana 6 | namespace: monitoring 7 | spec: 8 | virtualhost: 9 | fqdn: grafana.monitoring.tanzu.world 10 | tls: 11 | secretName: monitoring/monitoring.tanzu.world 12 | routes: 13 | - services: 14 | - name: grafana 15 | port: 3000 16 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/grafana-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | grafana_client_id: "" 4 | grafana_client_secret: "" 5 | 6 | domain: "" 7 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/kapp-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kapp.k14s.io/v1alpha1 2 | kind: Config 3 | ownershipLabelRules: 4 | - path: [spec, template, metadata, labels] 5 | resourceMatchers: 6 | - apiVersionKindMatcher: 7 | { apiVersion: bitnami.com/v1alpha1, kind: SealedSecret } 8 | 9 | rebaseRules: 10 | - path: [metadata, annotations, pv.kubernetes.io/bind-completed] 11 | type: copy 12 | sources: [new, existing] 13 | resourceMatchers: &pvcs 14 | - apiVersionKindMatcher: 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | 18 | - path: [spec, volumeMode] 19 | type: copy 20 | sources: [new, existing] 21 | resourceMatchers: *pvcs 22 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/alertmanager-alertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: main 7 | namespace: monitoring 8 | spec: 9 | image: quay.io/prometheus/alertmanager:v0.21.0 10 | nodeSelector: 11 | kubernetes.io/os: linux 12 | replicas: 3 13 | securityContext: 14 | fsGroup: 2000 15 | runAsNonRoot: true 16 | runAsUser: 1000 17 | serviceAccountName: alertmanager-main 18 | version: v0.21.0 19 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/alertmanager-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {} 3 | kind: Secret 4 | metadata: 5 | name: alertmanager-main 6 | namespace: monitoring 7 | stringData: 8 | alertmanager.yaml: |- 9 | "global": 10 | "resolve_timeout": "5m" 11 | "inhibit_rules": 12 | - "equal": 13 | - "namespace" 14 | - "alertname" 15 | "source_match": 16 | "severity": "critical" 17 | "target_match_re": 18 | "severity": "warning|info" 19 | - "equal": 20 | - "namespace" 21 | - "alertname" 22 | "source_match": 23 | "severity": "warning" 24 | "target_match_re": 25 | "severity": "info" 26 | "receivers": 27 | - "name": "Default" 28 | - "name": "Watchdog" 29 | - "name": "Critical" 30 | "route": 31 | "group_by": 32 | - "namespace" 33 | "group_interval": "5m" 34 | "group_wait": "30s" 35 | "receiver": "Default" 36 | "repeat_interval": "12h" 37 | "routes": 38 | - "match": 39 | "alertname": "Watchdog" 40 | "receiver": "Watchdog" 41 | - "match": 42 | "severity": "critical" 43 | "receiver": "Critical" 44 | type: Opaque 45 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | alertmanager: main 6 | name: alertmanager-main 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9093 12 | targetPort: web 13 | selector: 14 | alertmanager: main 15 | app: alertmanager 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/alertmanager-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: alertmanager-main 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/alertmanager-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: alertmanager 6 | name: alertmanager 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | alertmanager: main 15 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/grafana-dashboardDatasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | datasources.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0= 4 | kind: Secret 5 | metadata: 6 | name: grafana-datasources 7 | namespace: monitoring 8 | type: Opaque 9 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/grafana-dashboardSources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | dashboards.yaml: |- 4 | { 5 | "apiVersion": 1, 6 | "providers": [ 7 | { 8 | "folder": "Default", 9 | "name": "0", 10 | "options": { 11 | "path": "/grafana-dashboard-definitions/0" 12 | }, 13 | "orgId": 1, 14 | "type": "file" 15 | } 16 | ] 17 | } 18 | kind: ConfigMap 19 | metadata: 20 | name: grafana-dashboards 21 | namespace: monitoring 22 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: http 11 | port: 3000 12 | targetPort: http 13 | selector: 14 | app: grafana 15 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/grafana-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/grafana-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | endpoints: 8 | - interval: 15s 9 | port: http 10 | selector: 11 | matchLabels: 12 | app: grafana 13 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/kube-state-metrics-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - configmaps 13 | - secrets 14 | - nodes 15 | - pods 16 | - services 17 | - resourcequotas 18 | - replicationcontrollers 19 | - limitranges 20 | - persistentvolumeclaims 21 | - persistentvolumes 22 | - namespaces 23 | - endpoints 24 | verbs: 25 | - list 26 | - watch 27 | - apiGroups: 28 | - extensions 29 | resources: 30 | - daemonsets 31 | - deployments 32 | - replicasets 33 | - ingresses 34 | verbs: 35 | - list 36 | - watch 37 | - apiGroups: 38 | - apps 39 | resources: 40 | - statefulsets 41 | - daemonsets 42 | - deployments 43 | - replicasets 44 | verbs: 45 | - list 46 | - watch 47 | - apiGroups: 48 | - batch 49 | resources: 50 | - cronjobs 51 | - jobs 52 | verbs: 53 | - list 54 | - watch 55 | - apiGroups: 56 | - autoscaling 57 | resources: 58 | - horizontalpodautoscalers 59 | verbs: 60 | - list 61 | - watch 62 | - apiGroups: 63 | - authentication.k8s.io 64 | resources: 65 | - tokenreviews 66 | verbs: 67 | - create 68 | - apiGroups: 69 | - authorization.k8s.io 70 | resources: 71 | - subjectaccessreviews 72 | verbs: 73 | - create 74 | - apiGroups: 75 | - policy 76 | resources: 77 | - poddisruptionbudgets 78 | verbs: 79 | - list 80 | - watch 81 | - apiGroups: 82 | - certificates.k8s.io 83 | resources: 84 | - certificatesigningrequests 85 | verbs: 86 | - list 87 | - watch 88 | - apiGroups: 89 | - storage.k8s.io 90 | resources: 91 | - storageclasses 92 | - volumeattachments 93 | verbs: 94 | - list 95 | - watch 96 | - apiGroups: 97 | - admissionregistration.k8s.io 98 | resources: 99 | - mutatingwebhookconfigurations 100 | - validatingwebhookconfigurations 101 | verbs: 102 | - list 103 | - watch 104 | - apiGroups: 105 | - networking.k8s.io 106 | resources: 107 | - networkpolicies 108 | verbs: 109 | - list 110 | - watch 111 | - apiGroups: 112 | - coordination.k8s.io 113 | resources: 114 | - leases 115 | verbs: 116 | - list 117 | - watch 118 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/kube-state-metrics-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: kube-state-metrics 12 | subjects: 13 | - kind: ServiceAccount 14 | name: kube-state-metrics 15 | namespace: monitoring 16 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/kube-state-metrics-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app.kubernetes.io/name: kube-state-metrics 14 | template: 15 | metadata: 16 | labels: 17 | app.kubernetes.io/name: kube-state-metrics 18 | app.kubernetes.io/version: 1.9.7 19 | spec: 20 | containers: 21 | - args: 22 | - --host=127.0.0.1 23 | - --port=8081 24 | - --telemetry-host=127.0.0.1 25 | - --telemetry-port=8082 26 | image: quay.io/coreos/kube-state-metrics:v1.9.7 27 | name: kube-state-metrics 28 | securityContext: 29 | runAsUser: 65534 30 | - args: 31 | - --logtostderr 32 | - --secure-listen-address=:8443 33 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 34 | - --upstream=http://127.0.0.1:8081/ 35 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1 36 | name: kube-rbac-proxy-main 37 | ports: 38 | - containerPort: 8443 39 | name: https-main 40 | securityContext: 41 | runAsUser: 65534 42 | - args: 43 | - --logtostderr 44 | - --secure-listen-address=:9443 45 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 46 | - --upstream=http://127.0.0.1:8082/ 47 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1 48 | name: kube-rbac-proxy-self 49 | ports: 50 | - containerPort: 9443 51 | name: https-self 52 | securityContext: 53 | runAsUser: 65534 54 | nodeSelector: 55 | kubernetes.io/os: linux 56 | serviceAccountName: kube-state-metrics 57 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/kube-state-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: https-main 13 | port: 8443 14 | targetPort: https-main 15 | - name: https-self 16 | port: 9443 17 | targetPort: https-self 18 | selector: 19 | app.kubernetes.io/name: kube-state-metrics 20 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/kube-state-metrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/kube-state-metrics-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | honorLabels: true 13 | interval: 30s 14 | port: https-main 15 | relabelings: 16 | - action: labeldrop 17 | regex: (pod|service|endpoint|namespace) 18 | scheme: https 19 | scrapeTimeout: 30s 20 | tlsConfig: 21 | insecureSkipVerify: true 22 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 23 | interval: 30s 24 | port: https-self 25 | scheme: https 26 | tlsConfig: 27 | insecureSkipVerify: true 28 | jobLabel: app.kubernetes.io/name 29 | selector: 30 | matchLabels: 31 | app.kubernetes.io/name: kube-state-metrics 32 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/node-exporter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: node-exporter 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/node-exporter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: node-exporter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: node-exporter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: node-exporter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/node-exporter-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: node-exporter 6 | app.kubernetes.io/version: v0.18.1 7 | name: node-exporter 8 | namespace: monitoring 9 | spec: 10 | selector: 11 | matchLabels: 12 | app.kubernetes.io/name: node-exporter 13 | template: 14 | metadata: 15 | labels: 16 | app.kubernetes.io/name: node-exporter 17 | app.kubernetes.io/version: v0.18.1 18 | spec: 19 | containers: 20 | - args: 21 | - --web.listen-address=127.0.0.1:9100 22 | - --path.procfs=/host/proc 23 | - --path.sysfs=/host/sys 24 | - --path.rootfs=/host/root 25 | - --no-collector.wifi 26 | - --no-collector.hwmon 27 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) 28 | image: quay.io/prometheus/node-exporter:v0.18.1 29 | name: node-exporter 30 | resources: 31 | limits: 32 | cpu: 250m 33 | memory: 180Mi 34 | requests: 35 | cpu: 102m 36 | memory: 180Mi 37 | volumeMounts: 38 | - mountPath: /host/proc 39 | name: proc 40 | readOnly: false 41 | - mountPath: /host/sys 42 | name: sys 43 | readOnly: false 44 | - mountPath: /host/root 45 | mountPropagation: HostToContainer 46 | name: root 47 | readOnly: true 48 | - args: 49 | - --logtostderr 50 | - --secure-listen-address=[$(IP)]:9100 51 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 52 | - --upstream=http://127.0.0.1:9100/ 53 | env: 54 | - name: IP 55 | valueFrom: 56 | fieldRef: 57 | fieldPath: status.podIP 58 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1 59 | name: kube-rbac-proxy 60 | ports: 61 | - containerPort: 9100 62 | hostPort: 9100 63 | name: https 64 | resources: 65 | limits: 66 | cpu: 20m 67 | memory: 40Mi 68 | requests: 69 | cpu: 10m 70 | memory: 20Mi 71 | hostNetwork: true 72 | hostPID: true 73 | nodeSelector: 74 | kubernetes.io/os: linux 75 | securityContext: 76 | runAsNonRoot: true 77 | runAsUser: 65534 78 | serviceAccountName: node-exporter 79 | tolerations: 80 | - operator: Exists 81 | volumes: 82 | - hostPath: 83 | path: /proc 84 | name: proc 85 | - hostPath: 86 | path: /sys 87 | name: sys 88 | - hostPath: 89 | path: / 90 | name: root 91 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/node-exporter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: node-exporter 6 | app.kubernetes.io/version: v0.18.1 7 | name: node-exporter 8 | namespace: monitoring 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: https 13 | port: 9100 14 | targetPort: https 15 | selector: 16 | app.kubernetes.io/name: node-exporter 17 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/node-exporter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/node-exporter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: node-exporter 6 | app.kubernetes.io/version: v0.18.1 7 | name: node-exporter 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | interval: 15s 13 | port: https 14 | relabelings: 15 | - action: replace 16 | regex: (.*) 17 | replacement: $1 18 | sourceLabels: 19 | - __meta_kubernetes_pod_node_name 20 | targetLabel: instance 21 | scheme: https 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | jobLabel: app.kubernetes.io/name 25 | selector: 26 | matchLabels: 27 | app.kubernetes.io/name: node-exporter 28 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-apiService.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.metrics.k8s.io 5 | spec: 6 | group: metrics.k8s.io 7 | groupPriorityMinimum: 100 8 | insecureSkipTLSVerify: true 9 | service: 10 | name: prometheus-adapter 11 | namespace: monitoring 12 | version: v1beta1 13 | versionPriority: 100 14 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-adapter 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes 10 | - namespaces 11 | - pods 12 | - services 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-clusterRoleAggregatedMetricsReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 6 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | name: system:aggregated-metrics-reader 9 | rules: 10 | - apiGroups: 11 | - metrics.k8s.io 12 | resources: 13 | - pods 14 | - nodes 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-adapter 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-adapter 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-clusterRoleBindingDelegator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: resource-metrics:system:auth-delegator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:auth-delegator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-adapter 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-clusterRoleServerResources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: resource-metrics-server-resources 5 | rules: 6 | - apiGroups: 7 | - metrics.k8s.io 8 | resources: 9 | - '*' 10 | verbs: 11 | - '*' 12 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-configMap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | config.yaml: |- 4 | "resourceRules": 5 | "cpu": 6 | "containerLabel": "container" 7 | "containerQuery": "sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}[5m])) by (<<.GroupBy>>)" 8 | "nodeQuery": "sum(1 - irate(node_cpu_seconds_total{mode=\"idle\"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)" 9 | "resources": 10 | "overrides": 11 | "namespace": 12 | "resource": "namespace" 13 | "node": 14 | "resource": "node" 15 | "pod": 16 | "resource": "pod" 17 | "memory": 18 | "containerLabel": "container" 19 | "containerQuery": "sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!=\"POD\",container!=\"\",pod!=\"\"}) by (<<.GroupBy>>)" 20 | "nodeQuery": "sum(node_memory_MemTotal_bytes{job=\"node-exporter\",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job=\"node-exporter\",<<.LabelMatchers>>}) by (<<.GroupBy>>)" 21 | "resources": 22 | "overrides": 23 | "instance": 24 | "resource": "node" 25 | "namespace": 26 | "resource": "namespace" 27 | "pod": 28 | "resource": "pod" 29 | "window": "5m" 30 | kind: ConfigMap 31 | metadata: 32 | name: adapter-config 33 | namespace: monitoring 34 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: prometheus-adapter 11 | strategy: 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | template: 16 | metadata: 17 | labels: 18 | name: prometheus-adapter 19 | spec: 20 | containers: 21 | - args: 22 | - --cert-dir=/var/run/serving-cert 23 | - --config=/etc/adapter/config.yaml 24 | - --logtostderr=true 25 | - --metrics-relist-interval=1m 26 | - --prometheus-url=http://prometheus-k8s.monitoring.svc.cluster.local:9090/ 27 | - --secure-port=6443 28 | image: directxman12/k8s-prometheus-adapter:v0.7.0 29 | name: prometheus-adapter 30 | ports: 31 | - containerPort: 6443 32 | volumeMounts: 33 | - mountPath: /tmp 34 | name: tmpfs 35 | readOnly: false 36 | - mountPath: /var/run/serving-cert 37 | name: volume-serving-cert 38 | readOnly: false 39 | - mountPath: /etc/adapter 40 | name: config 41 | readOnly: false 42 | nodeSelector: 43 | kubernetes.io/os: linux 44 | serviceAccountName: prometheus-adapter 45 | volumes: 46 | - emptyDir: {} 47 | name: tmpfs 48 | - emptyDir: {} 49 | name: volume-serving-cert 50 | - configMap: 51 | name: adapter-config 52 | name: config 53 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-roleBindingAuthReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: resource-metrics-auth-reader 5 | namespace: kube-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: extension-apiserver-authentication-reader 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-adapter 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: prometheus-adapter 6 | name: prometheus-adapter 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: https 11 | port: 443 12 | targetPort: 6443 13 | selector: 14 | name: prometheus-adapter 15 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-adapter 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-adapter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | name: prometheus-adapter 6 | name: prometheus-adapter 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | port: https 13 | scheme: https 14 | tlsConfig: 15 | insecureSkipVerify: true 16 | selector: 17 | matchLabels: 18 | name: prometheus-adapter 19 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus-k8s 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - nodes/metrics 10 | verbs: 11 | - get 12 | - nonResourceURLs: 13 | - /metrics 14 | verbs: 15 | - get 16 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: prometheus-k8s 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus-k8s 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus-k8s 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-kubeControllerManagerPrometheusDiscoveryService.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: kube-controller-manager 6 | name: kube-controller-manager-prometheus-discovery 7 | namespace: kube-system 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https-metrics 12 | port: 10257 13 | targetPort: 10257 14 | selector: 15 | component: kube-controller-manager 16 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-kubeSchedulerPrometheusDiscoveryService.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: kube-scheduler 6 | name: kube-scheduler-prometheus-discovery 7 | namespace: kube-system 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https-metrics 12 | port: 10259 13 | targetPort: 10259 14 | selector: 15 | component: kube-scheduler 16 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-operator-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.41.1 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | endpoints: 12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | honorLabels: true 14 | port: https 15 | scheme: https 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | selector: 19 | matchLabels: 20 | app.kubernetes.io/component: controller 21 | app.kubernetes.io/name: prometheus-operator 22 | app.kubernetes.io/version: v0.41.1 23 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: k8s 7 | namespace: monitoring 8 | spec: 9 | alerting: 10 | alertmanagers: 11 | - name: alertmanager-main 12 | namespace: monitoring 13 | port: web 14 | image: quay.io/prometheus/prometheus:v2.20.0 15 | nodeSelector: 16 | kubernetes.io/os: linux 17 | podMonitorNamespaceSelector: {} 18 | podMonitorSelector: {} 19 | replicas: 2 20 | resources: 21 | requests: 22 | memory: 400Mi 23 | retention: 30d 24 | ruleSelector: 25 | matchLabels: 26 | prometheus: k8s 27 | role: alert-rules 28 | securityContext: 29 | fsGroup: 2000 30 | runAsNonRoot: true 31 | runAsUser: 1000 32 | serviceAccountName: prometheus-k8s 33 | serviceMonitorNamespaceSelector: {} 34 | serviceMonitorSelector: {} 35 | storage: 36 | volumeClaimTemplate: 37 | apiVersion: v1 38 | kind: PersistentVolumeClaim 39 | spec: 40 | accessModes: 41 | - ReadWriteOnce 42 | resources: 43 | requests: 44 | storage: 100Gi 45 | storageClassName: generic 46 | version: v2.20.0 47 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-roleBindingConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: prometheus-k8s-config 10 | subjects: 11 | - kind: ServiceAccount 12 | name: prometheus-k8s 13 | namespace: monitoring 14 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-roleBindingSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | name: prometheus-k8s 7 | namespace: default 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: Role 11 | name: prometheus-k8s 12 | subjects: 13 | - kind: ServiceAccount 14 | name: prometheus-k8s 15 | namespace: monitoring 16 | - apiVersion: rbac.authorization.k8s.io/v1 17 | kind: RoleBinding 18 | metadata: 19 | name: prometheus-k8s 20 | namespace: kube-system 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: Role 24 | name: prometheus-k8s 25 | subjects: 26 | - kind: ServiceAccount 27 | name: prometheus-k8s 28 | namespace: monitoring 29 | - apiVersion: rbac.authorization.k8s.io/v1 30 | kind: RoleBinding 31 | metadata: 32 | name: prometheus-k8s 33 | namespace: monitoring 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: Role 37 | name: prometheus-k8s 38 | subjects: 39 | - kind: ServiceAccount 40 | name: prometheus-k8s 41 | namespace: monitoring 42 | - apiVersion: rbac.authorization.k8s.io/v1 43 | kind: RoleBinding 44 | metadata: 45 | name: prometheus-k8s 46 | namespace: projectcontour 47 | roleRef: 48 | apiGroup: rbac.authorization.k8s.io 49 | kind: Role 50 | name: prometheus-k8s 51 | subjects: 52 | - kind: ServiceAccount 53 | name: prometheus-k8s 54 | namespace: monitoring 55 | - apiVersion: rbac.authorization.k8s.io/v1 56 | kind: RoleBinding 57 | metadata: 58 | name: prometheus-k8s 59 | namespace: cert-manager 60 | roleRef: 61 | apiGroup: rbac.authorization.k8s.io 62 | kind: Role 63 | name: prometheus-k8s 64 | subjects: 65 | - kind: ServiceAccount 66 | name: prometheus-k8s 67 | namespace: monitoring 68 | kind: RoleBindingList 69 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-roleConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: prometheus-k8s-config 5 | namespace: monitoring 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-roleSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: Role 5 | metadata: 6 | name: prometheus-k8s 7 | namespace: default 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - services 13 | - endpoints 14 | - pods 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | - apiVersion: rbac.authorization.k8s.io/v1 20 | kind: Role 21 | metadata: 22 | name: prometheus-k8s 23 | namespace: kube-system 24 | rules: 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - services 29 | - endpoints 30 | - pods 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiVersion: rbac.authorization.k8s.io/v1 36 | kind: Role 37 | metadata: 38 | name: prometheus-k8s 39 | namespace: monitoring 40 | rules: 41 | - apiGroups: 42 | - "" 43 | resources: 44 | - services 45 | - endpoints 46 | - pods 47 | verbs: 48 | - get 49 | - list 50 | - watch 51 | - apiVersion: rbac.authorization.k8s.io/v1 52 | kind: Role 53 | metadata: 54 | name: prometheus-k8s 55 | namespace: projectcontour 56 | rules: 57 | - apiGroups: 58 | - "" 59 | resources: 60 | - services 61 | - endpoints 62 | - pods 63 | verbs: 64 | - get 65 | - list 66 | - watch 67 | - apiVersion: rbac.authorization.k8s.io/v1 68 | kind: Role 69 | metadata: 70 | name: prometheus-k8s 71 | namespace: cert-manager 72 | rules: 73 | - apiGroups: 74 | - "" 75 | resources: 76 | - services 77 | - endpoints 78 | - pods 79 | verbs: 80 | - get 81 | - list 82 | - watch 83 | kind: RoleList 84 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | prometheus: k8s 6 | name: prometheus-k8s 7 | namespace: monitoring 8 | spec: 9 | ports: 10 | - name: web 11 | port: 9090 12 | targetPort: web 13 | selector: 14 | app: prometheus 15 | prometheus: k8s 16 | sessionAffinity: ClientIP 17 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus-k8s 5 | namespace: monitoring 6 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: prometheus 6 | name: prometheus 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: web 12 | selector: 13 | matchLabels: 14 | prometheus: k8s 15 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-serviceMonitorCoreDNS.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: coredns 6 | name: coredns 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 15s 12 | port: metrics 13 | jobLabel: k8s-app 14 | namespaceSelector: 15 | matchNames: 16 | - kube-system 17 | selector: 18 | matchLabels: 19 | k8s-app: kube-dns 20 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/prometheus-serviceMonitorKubeScheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | k8s-app: kube-scheduler 6 | name: kube-scheduler 7 | namespace: monitoring 8 | spec: 9 | endpoints: 10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 11 | interval: 30s 12 | port: https-metrics 13 | scheme: https 14 | tlsConfig: 15 | insecureSkipVerify: true 16 | jobLabel: k8s-app 17 | namespaceSelector: 18 | matchNames: 19 | - kube-system 20 | selector: 21 | matchLabels: 22 | k8s-app: kube-scheduler 23 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/setup/0namespace-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/setup/prometheus-operator-0prometheusruleCustomResourceDefinition.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | controller-gen.kubebuilder.io/version: v0.2.4 6 | creationTimestamp: null 7 | name: prometheusrules.monitoring.coreos.com 8 | spec: 9 | group: monitoring.coreos.com 10 | names: 11 | kind: PrometheusRule 12 | listKind: PrometheusRuleList 13 | plural: prometheusrules 14 | singular: prometheusrule 15 | scope: Namespaced 16 | versions: 17 | - name: v1 18 | schema: 19 | openAPIV3Schema: 20 | description: PrometheusRule defines alerting rules for a Prometheus instance 21 | properties: 22 | apiVersion: 23 | description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 24 | type: string 25 | kind: 26 | description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 27 | type: string 28 | metadata: 29 | type: object 30 | spec: 31 | description: Specification of desired alerting rule definitions for Prometheus. 32 | properties: 33 | groups: 34 | description: Content of Prometheus rule file 35 | items: 36 | description: 'RuleGroup is a list of sequentially evaluated recording and alerting rules. Note: PartialResponseStrategy is only used by ThanosRuler and will be ignored by Prometheus instances. Valid values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response' 37 | properties: 38 | interval: 39 | type: string 40 | name: 41 | type: string 42 | partial_response_strategy: 43 | type: string 44 | rules: 45 | items: 46 | description: Rule describes an alerting or recording rule. 47 | properties: 48 | alert: 49 | type: string 50 | annotations: 51 | additionalProperties: 52 | type: string 53 | type: object 54 | expr: 55 | anyOf: 56 | - type: integer 57 | - type: string 58 | x-kubernetes-int-or-string: true 59 | for: 60 | type: string 61 | labels: 62 | additionalProperties: 63 | type: string 64 | type: object 65 | record: 66 | type: string 67 | required: 68 | - expr 69 | type: object 70 | type: array 71 | required: 72 | - name 73 | - rules 74 | type: object 75 | type: array 76 | type: object 77 | required: 78 | - spec 79 | type: object 80 | served: true 81 | storage: true 82 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/setup/prometheus-operator-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.41.1 8 | name: prometheus-operator 9 | rules: 10 | - apiGroups: 11 | - monitoring.coreos.com 12 | resources: 13 | - alertmanagers 14 | - alertmanagers/finalizers 15 | - prometheuses 16 | - prometheuses/finalizers 17 | - thanosrulers 18 | - thanosrulers/finalizers 19 | - servicemonitors 20 | - podmonitors 21 | - probes 22 | - prometheusrules 23 | verbs: 24 | - '*' 25 | - apiGroups: 26 | - apps 27 | resources: 28 | - statefulsets 29 | verbs: 30 | - '*' 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - configmaps 35 | - secrets 36 | verbs: 37 | - '*' 38 | - apiGroups: 39 | - "" 40 | resources: 41 | - pods 42 | verbs: 43 | - list 44 | - delete 45 | - apiGroups: 46 | - "" 47 | resources: 48 | - services 49 | - services/finalizers 50 | - endpoints 51 | verbs: 52 | - get 53 | - create 54 | - update 55 | - delete 56 | - apiGroups: 57 | - "" 58 | resources: 59 | - nodes 60 | verbs: 61 | - list 62 | - watch 63 | - apiGroups: 64 | - "" 65 | resources: 66 | - namespaces 67 | verbs: 68 | - get 69 | - list 70 | - watch 71 | - apiGroups: 72 | - authentication.k8s.io 73 | resources: 74 | - tokenreviews 75 | verbs: 76 | - create 77 | - apiGroups: 78 | - authorization.k8s.io 79 | resources: 80 | - subjectaccessreviews 81 | verbs: 82 | - create 83 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/setup/prometheus-operator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.41.1 8 | name: prometheus-operator 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: prometheus-operator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: prometheus-operator 16 | namespace: monitoring 17 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/setup/prometheus-operator-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.41.1 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | app.kubernetes.io/component: controller 15 | app.kubernetes.io/name: prometheus-operator 16 | template: 17 | metadata: 18 | labels: 19 | app.kubernetes.io/component: controller 20 | app.kubernetes.io/name: prometheus-operator 21 | app.kubernetes.io/version: v0.41.1 22 | spec: 23 | containers: 24 | - args: 25 | - --kubelet-service=kube-system/kubelet 26 | - --logtostderr=true 27 | - --config-reloader-image=jimmidyson/configmap-reload:v0.4.0 28 | - --prometheus-config-reloader=quay.io/coreos/prometheus-config-reloader:v0.41.1 29 | image: quay.io/coreos/prometheus-operator:v0.41.1 30 | name: prometheus-operator 31 | ports: 32 | - containerPort: 8080 33 | name: http 34 | resources: 35 | limits: 36 | cpu: 200m 37 | memory: 200Mi 38 | requests: 39 | cpu: 100m 40 | memory: 100Mi 41 | securityContext: 42 | allowPrivilegeEscalation: false 43 | - args: 44 | - --logtostderr 45 | - --secure-listen-address=:8443 46 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 47 | - --upstream=http://127.0.0.1:8080/ 48 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1 49 | name: kube-rbac-proxy 50 | ports: 51 | - containerPort: 8443 52 | name: https 53 | securityContext: 54 | runAsUser: 65534 55 | nodeSelector: 56 | beta.kubernetes.io/os: linux 57 | securityContext: 58 | runAsNonRoot: true 59 | runAsUser: 65534 60 | serviceAccountName: prometheus-operator 61 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/setup/prometheus-operator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.41.1 8 | name: prometheus-operator 9 | namespace: monitoring 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - name: https 14 | port: 8443 15 | targetPort: https 16 | selector: 17 | app.kubernetes.io/component: controller 18 | app.kubernetes.io/name: prometheus-operator 19 | -------------------------------------------------------------------------------- /workload/monitoring/manifests/out/setup/prometheus-operator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/version: v0.41.1 8 | name: prometheus-operator 9 | namespace: monitoring 10 | -------------------------------------------------------------------------------- /workload/oauth2-proxy/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add stable https://kubernetes-charts.storage.googleapis.com/ 5 | helm repo update 6 | 7 | helm template dex stable/dex -f dex/helm/dex-values.yaml | ytt --ignore-unknown-comments -f- -f dex/helm/overlay-helmtemplate.yaml > dex/manifests/dex.yaml -------------------------------------------------------------------------------- /workload/oauth2-proxy/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.all,expects="0+" 4 | --- 5 | metadata: 6 | #@overlay/match missing_ok=True 7 | namespace: sonar -------------------------------------------------------------------------------- /workload/opa-external-auth/opa-external-auth.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:base64", "base64") 2 | #@ load("@ytt:data", "data") 3 | #@ load("@ytt:yaml", "yaml") 4 | 5 | #! rego 6 | #@ load("policy.rego.lib.txt", "rego_auth_policy") 7 | 8 | --- 9 | apiVersion: v1 10 | data: 11 | policy.rego: #@ base64.encode(rego_auth_policy()) 12 | kind: Secret 13 | metadata: 14 | name: opa-policy 15 | namespace: apps 16 | type: Opaque 17 | --- 18 | apiVersion: apps/v1 19 | kind: Deployment 20 | metadata: 21 | name: opa 22 | namespace: apps 23 | labels: 24 | app: opa 25 | spec: 26 | replicas: 1 27 | selector: 28 | matchLabels: 29 | app: opa 30 | template: 31 | metadata: 32 | labels: 33 | app: opa 34 | name: opa 35 | spec: 36 | containers: 37 | - name: opa 38 | image: openpolicyagent/opa:0.26.0-envoy-6 39 | ports: 40 | - name: http 41 | containerPort: 8181 42 | - name: auth 43 | containerPort: 9191 44 | args: 45 | - "run" 46 | - "--addr=0.0.0.0:8181" 47 | - "--set=plugins.envoy_ext_authz_grpc.addr=:9191" 48 | - "--set=plugins.envoy_ext_authz_grpc.query=data.envoy.authz.allow" 49 | - "--h2c" 50 | - "--ignore=.*" #! exclude hidden dirs created by Kubernetes 51 | - "--log-level" 52 | - "debug" 53 | - "--log-format" 54 | - "text" 55 | - "--server" 56 | - "/policies" 57 | volumeMounts: 58 | - readOnly: true 59 | mountPath: /policies 60 | name: proxy-config 61 | volumes: 62 | - name: proxy-config 63 | secret: 64 | secretName: opa-policy 65 | --- 66 | apiVersion: v1 67 | kind: Service 68 | metadata: 69 | name: opa-auth 70 | labels: 71 | app: opa 72 | namespace: apps 73 | spec: 74 | type: ClusterIP 75 | ports: 76 | - port: 9191 77 | targetPort: auth 78 | protocol: TCP 79 | name: auth 80 | selector: 81 | app: opa 82 | --- 83 | apiVersion: projectcontour.io/v1alpha1 84 | kind: ExtensionService 85 | metadata: 86 | name: opa 87 | namespace: apps 88 | spec: 89 | protocol: h2c 90 | services: 91 | - name: opa-auth 92 | port: 9191 93 | --- 94 | apiVersion: apps/v1 95 | kind: Deployment 96 | metadata: 97 | name: ingress-conformance-echo 98 | namespace: apps 99 | spec: 100 | replicas: 1 101 | selector: 102 | matchLabels: 103 | app.kubernetes.io/name: ingress-conformance-echo 104 | template: 105 | metadata: 106 | labels: 107 | app.kubernetes.io/name: ingress-conformance-echo 108 | spec: 109 | containers: 110 | - name: conformance-echo 111 | image: k8s.gcr.io/ingressconformance/echoserver:v0.0.1@sha256:9b34b17f391f87fb2155f01da2f2f90b7a4a5c1110ed84cb5379faa4f570dc52 112 | ports: 113 | - name: http-api 114 | containerPort: 3000 115 | readinessProbe: 116 | httpGet: 117 | path: /health 118 | port: 3000 119 | --- 120 | apiVersion: v1 121 | kind: Service 122 | metadata: 123 | name: ingress-conformance-echo 124 | namespace: apps 125 | spec: 126 | ports: 127 | - name: http 128 | port: 80 129 | targetPort: http-api 130 | selector: 131 | app.kubernetes.io/name: ingress-conformance-echo 132 | --- 133 | apiVersion: projectcontour.io/v1 134 | kind: HTTPProxy 135 | metadata: 136 | name: echo 137 | namespace: apps 138 | spec: 139 | virtualhost: 140 | fqdn: echo.apps.tanzuplatform.com 141 | tls: 142 | secretName: apps.tanzuplatform.com 143 | authorization: 144 | extensionRef: 145 | name: opa 146 | namespace: apps 147 | routes: 148 | - services: 149 | - name: ingress-conformance-echo 150 | port: 80 151 | -------------------------------------------------------------------------------- /workload/opa-external-auth/policy.rego.lib.txt: -------------------------------------------------------------------------------- 1 | # (@ def rego_auth_policy(): -@) 2 | package envoy.authz 3 | 4 | import input.attributes.request.http as http_request 5 | 6 | default allow = false 7 | 8 | token = {"valid": valid, "payload": payload} { 9 | [_, encoded] := split(http_request.headers.authorization, " ") 10 | [valid, _, payload] := io.jwt.decode_verify(encoded, {"secret": "secret"}) 11 | } 12 | 13 | allow { 14 | is_token_valid 15 | action_allowed 16 | } 17 | 18 | is_token_valid { 19 | token.valid 20 | now := time.now_ns() / 1000000000 21 | token.payload.nbf <= now 22 | now < token.payload.exp 23 | } 24 | 25 | action_allowed { 26 | http_request.method == "GET" 27 | token.payload.role == "guest" 28 | glob.match("/people*", [], http_request.path) 29 | } 30 | 31 | action_allowed { 32 | http_request.method == "GET" 33 | token.payload.role == "admin" 34 | glob.match("/people*", [], http_request.path) 35 | } 36 | 37 | action_allowed { 38 | http_request.method == "POST" 39 | token.payload.role == "admin" 40 | glob.match("/people", [], http_request.path) 41 | lower(input.parsed_body.firstname) != base64url.decode(token.payload.sub) 42 | } 43 | 44 | # (@- end @) -------------------------------------------------------------------------------- /workload/package-images.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | FOLDERS=$(find . -maxdepth 2 -type d -name "manifests" -print) 4 | 5 | set -eux 6 | 7 | echo "Packages all container images in preparation for transport." 8 | 9 | mkdir -p ./images 10 | 11 | for appFolder in $FOLDERS; do 12 | app=${appFolder%"/manifests"} 13 | ytt --ignore-unknown-comments -f $appFolder | kbld -f - --lock-output ./images/${app}.lock 14 | echo "$app" 15 | done 16 | 17 | for appFolder in $FOLDERS; do 18 | app=${appFolder%"/manifests"} 19 | kbld package -f ./images/${app}.lock -o ./images/${app}.tar 20 | echo "$app" 21 | done -------------------------------------------------------------------------------- /workload/postgresql-ha/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | echo "#! DO NOT MODIFY THIS FILE DIRECTLY IT IS GENERATED FROM $0" > postgresql-ha/manifests/postgresql.yaml 5 | helm template postgresql-ha /home/voor/workspace/bitnami-charts/bitnami/postgresql-ha -f postgresql-ha/helm/postgresql-ha-values.yaml --namespace replaced --include-crds >> postgresql-ha/manifests/postgresql.yaml -------------------------------------------------------------------------------- /workload/postgresql-ha/helm/postgresql-ha-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | imageRegistry: myRegistryName 3 | 4 | ## PostgreSQL parameters 5 | ## 6 | postgresql: 7 | existingSecret: postgres-password-secret 8 | resources: 9 | limits: {} 10 | # cpu: 250m 11 | # memory: 256Mi 12 | requests: 13 | cpu: 250m 14 | memory: 256Mi 15 | ## Audit settings 16 | ## https://github.com/bitnami/bitnami-docker-postgresql#auditing 17 | ## 18 | audit: 19 | ## Log client hostnames 20 | ## 21 | logHostname: false 22 | ## Log connections to the server 23 | ## 24 | logConnections: false 25 | ## Log disconnections 26 | ## 27 | logDisconnections: false 28 | 29 | ## Pod disruption budget configuration 30 | ## 31 | pdb: 32 | ## Specifies whether a Pod disruption budget should be created 33 | ## 34 | create: true 35 | minAvailable: 1 36 | 37 | pgpool: 38 | existingSecret: pgpool-password-secret 39 | 40 | resources: 41 | limits: {} 42 | # cpu: 250m 43 | # memory: 256Mi 44 | requests: 45 | cpu: 250m 46 | memory: 256Mi 47 | 48 | ## Pod disruption budget configuration 49 | ## 50 | pdb: 51 | ## Specifies whether a Pod disruption budget should be created 52 | ## 53 | create: true 54 | minAvailable: 1 55 | 56 | replicaCount: 2 57 | 58 | ## PostgreSQL Prometheus exporter parameters 59 | ## 60 | metrics: 61 | enabled: true 62 | 63 | networkPolicy: 64 | enabled: true 65 | 66 | ## The Policy model to apply. When set to false, only pods with the correct 67 | ## client labels will have network access to the port PostgreSQL is listening 68 | ## on. When true, PostgreSQL will accept connections from any source 69 | ## (with the correct destination port). 70 | ## 71 | allowExternal: false 72 | 73 | persistence: 74 | enabled: true 75 | ## A manually managed Persistent Volume and Claim 76 | ## If defined, PVC must be created manually before volume will be bound 77 | ## The value is evaluated as a template 78 | ## 79 | # existingClaim: 80 | ## Persistent Volume Storage Class 81 | ## If defined, storageClassName: 82 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 83 | ## If undefined (the default) or set to null, no storageClassName spec is 84 | ## set, choosing the default provisioner. 85 | ## 86 | storageClass: "generic" -------------------------------------------------------------------------------- /workload/postgresql-ha/manifests/00-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | postgres: 5 | namespace: 6 | password: 7 | 8 | repmgr: 9 | password: 10 | 11 | pgpool: 12 | password: -------------------------------------------------------------------------------- /workload/postgresql-ha/manifests/kapp-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kapp.k14s.io/v1alpha1 2 | kind: Config 3 | ownershipLabelRules: 4 | - path: [spec, template, metadata, labels] 5 | resourceMatchers: 6 | - apiVersionKindMatcher: 7 | { apiVersion: bitnami.com/v1alpha1, kind: SealedSecret } 8 | -------------------------------------------------------------------------------- /workload/postgresql-ha/manifests/kbld-config.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | apiVersion: kbld.k14s.io/v1alpha1 5 | kind: Config 6 | minimumRequiredVersion: 0.24.0 7 | overrides: 8 | - image: myRegistryName/bitnami/pgpool:4.1.4-debian-10-r48 9 | newImage: gcr.io/sys-2b0109it/demo/bitnami/pgpool:4 10 | preresolved: true 11 | - image: myRegistryName/bitnami/postgres-exporter:0.8.0-debian-10-r269 12 | newImage: gcr.io/sys-2b0109it/demo/bitnami/postgres-exporter:0 13 | preresolved: true 14 | - image: myRegistryName/bitnami/postgresql-repmgr:11.9.0-debian-10-r82 15 | newImage: gcr.io/sys-2b0109it/demo/bitnami/postgresql-repmgr:11 16 | preresolved: true 17 | 18 | --- 19 | -------------------------------------------------------------------------------- /workload/postgresql-ha/manifests/postgresql-ha-additional.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | #@ def labels(): 4 | app.kubernetes.io/name: postgresql-ha 5 | app.kubernetes.io/instance: postgresql-ha 6 | app.kubernetes.io/managed-by: kapp-controller 7 | app.kubernetes.io/component: pgpool 8 | #@ end 9 | 10 | --- 11 | apiVersion: v1 12 | kind: Namespace 13 | metadata: 14 | name: #@ data.values.postgres.namespace 15 | annotations: 16 | iam.amazonaws.com/permitted: ".*" 17 | --- 18 | #! Source: postgresql-ha/templates/postgresql/secrets.yaml 19 | apiVersion: v1 20 | kind: Secret 21 | metadata: 22 | name: postgres-password-secret 23 | labels: #@ labels() 24 | type: Opaque 25 | stringData: 26 | postgresql-password: #@ data.values.postgres.password 27 | repmgr-password: #@ data.values.repmgr.password 28 | --- 29 | #! Source: postgresql-ha/templates/pgpool/secrets.yaml 30 | apiVersion: v1 31 | kind: Secret 32 | metadata: 33 | name: pgpool-password-secret 34 | labels: #@ labels() 35 | type: Opaque 36 | stringData: 37 | admin-password: #@ data.values.pgpool.password 38 | 39 | #! FIXME Add in SSL support for assessments -- this should be SSL in POSTGRES NOT PGPOOL 40 | #! --- 41 | #! apiVersion: cert-manager.io/v1alpha2 42 | #! kind: Certificate 43 | #! metadata: 44 | #! labels: #@ labels() 45 | #! name: database-tls-cert 46 | #! spec: 47 | #! dnsNames: 48 | #! - postgresql-ha-pgpool 49 | #! - postgresql-ha-postgresql-headless 50 | #! - postgresql-ha-postgresql 51 | #! - capa-webhook-service.capi-webhook-system.svc 52 | #! - capa-webhook-service.capi-webhook-system.svc.cluster.local 53 | #! issuerRef: 54 | #! kind: Issuer 55 | #! name: database-tls-issuer 56 | #! secretName: database-tls-certificate 57 | #! --- 58 | #! apiVersion: cert-manager.io/v1alpha2 59 | #! kind: Issuer 60 | #! metadata: 61 | #! labels: #@ labels() 62 | #! name: database-tls-issuer 63 | #! spec: 64 | #! selfSigned: {} -------------------------------------------------------------------------------- /workload/postgresql-ha/manifests/postgresql-ha-overlay.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@ load("@ytt:data", "data") 3 | 4 | #@overlay/match by=overlay.not_op(overlay.or_op(overlay.subset({"kind": "ServiceMonitor"}),overlay.subset({"kind": "Namespace"}))),expects="0+" 5 | --- 6 | #@overlay/match missing_ok=True 7 | metadata: 8 | #@overlay/match missing_ok=True 9 | namespace: #@ data.values.postgres.namespace -------------------------------------------------------------------------------- /workload/secrets/seal.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eux 3 | 4 | secret_files="${1:-secrets/*.yaml}" 5 | 6 | for secret in $secret_files 7 | do 8 | nopath=${secret:8} # Strips off secrets/ folder name 9 | sealed=${nopath%.yaml} # Removes yaml at the end 10 | outputfolder=${sealed%.*} # Removes optional numbering 11 | output=${outputfolder%-config} # Remove config at the end. 12 | kubeseal --cert workload-secrets.pem -o yaml > ${output}/manifests/${sealed}.sealed.yaml < ${secret} 13 | done 14 | -------------------------------------------------------------------------------- /workload/secrets/sonarqube-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: sonarqube-passwords 6 | labels: 7 | namespace: sonar 8 | type: Opaque 9 | stringData: 10 | postgresql-password: "EXAMPLE" 11 | cookie-secret: "EXAMPLE" 12 | client-id: "sonarqube-client" 13 | client-secret: EXAMPLE -------------------------------------------------------------------------------- /workload/sonarqube/helm/generate-manifests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -eux 3 | 4 | helm repo add bitnami https://charts.bitnami.com/bitnami 5 | helm repo add oteemocharts https://oteemo.github.io/charts 6 | helm repo add stable https://kubernetes-charts.storage.googleapis.com/ 7 | helm repo update 8 | 9 | helm template sonarqube-proxy stable/oauth2-proxy -f sonarqube/helm/oauth2-proxy-values.yaml --namespace sonar | ytt --ignore-unknown-comments -f- -f sonarqube/helm/overlay-helmtemplate.yaml > sonarqube/manifests/sonarqube-proxy.yaml 10 | helm template sonarqube-db bitnami/postgresql -f sonarqube/helm/postgresql-values.yaml --namespace sonar | ytt --ignore-unknown-comments -f- -f sonarqube/helm/overlay-helmtemplate.yaml > sonarqube/manifests/sonarqube-db.yaml 11 | helm template sonarqube oteemocharts/sonarqube -f sonarqube/helm/sonarqube-values.yaml --namespace sonar | ytt --ignore-unknown-comments -f- -f sonarqube/helm/overlay-helmtemplate.yaml > sonarqube/manifests/sonarqube.yaml -------------------------------------------------------------------------------- /workload/sonarqube/helm/oauth2-proxy-values.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | existingSecret: sonarqube-passwords 3 | 4 | configFile: |- 5 | provider = "oidc" 6 | provider_display_name = "Dex" 7 | redirect_url = "https://sonar.apps.tanzu.world/oauth2/callback" 8 | upstreams = [ "http://sonarqube-sonarqube:9000" ] 9 | oidc_issuer_url = "https://login.sso.tanzu.world" 10 | pass_basic_auth = true 11 | pass_user_headers = true 12 | pass_host_header = true 13 | ssl_insecure_skip_verify = true 14 | email_domains = [ 15 | "*" 16 | ] 17 | 18 | image: 19 | repository: "quay.io/oauth2-proxy/oauth2-proxy" 20 | tag: "v5.1.0" 21 | pullPolicy: "Always" 22 | 23 | securityContext: 24 | enabled: true 25 | -------------------------------------------------------------------------------- /workload/sonarqube/helm/overlay-helmtemplate.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | 3 | #@overlay/match by=overlay.all,expects="0+" 4 | --- 5 | metadata: 6 | #@overlay/match missing_ok=True 7 | namespace: sonar 8 | 9 | -------------------------------------------------------------------------------- /workload/sonarqube/helm/postgresql-values.yaml: -------------------------------------------------------------------------------- 1 | 2 | ## Bitnami PostgreSQL image version 3 | ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ 4 | ## 5 | image: 6 | registry: docker.io 7 | repository: bitnami/postgresql 8 | tag: 11.7.0-debian-10-r87 9 | 10 | ## PostgreSQL user (has superuser privileges if username is `postgres`) 11 | ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run 12 | postgresqlUsername: sonarUser 13 | 14 | ## Create a database 15 | ## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run 16 | ## 17 | postgresqlDatabase: sonarDB 18 | 19 | existingSecret: sonarqube-passwords 20 | 21 | ## PostgreSQL data Persistent Volume Storage Class 22 | ## If defined, storageClassName: 23 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 24 | ## If undefined (the default) or set to null, no storageClassName spec is 25 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 26 | ## GKE, AWS & OpenStack) 27 | ## 28 | persistence: 29 | storageClass: "generic" 30 | accessModes: 31 | - ReadWriteOnce 32 | size: 8Gi 33 | -------------------------------------------------------------------------------- /workload/sonarqube/manifests/kapp-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kapp.k14s.io/v1alpha1 2 | kind: Config 3 | ownershipLabelRules: 4 | - path: [spec, template, metadata, labels] 5 | resourceMatchers: 6 | - apiVersionKindMatcher: 7 | { apiVersion: bitnami.com/v1alpha1, kind: SealedSecret } 8 | 9 | rebaseRules: 10 | - path: [metadata, annotations, pv.kubernetes.io/bind-completed] 11 | type: copy 12 | sources: [new, existing] 13 | resourceMatchers: &pvcs 14 | - apiVersionKindMatcher: 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | 18 | - path: [spec, volumeMode] 19 | type: copy 20 | sources: [new, existing] 21 | resourceMatchers: *pvcs 22 | -------------------------------------------------------------------------------- /workload/sonarqube/manifests/sonarqube-additional.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: sonar 6 | --- 7 | apiVersion: projectcontour.io/v1 8 | kind: HTTPProxy 9 | metadata: 10 | name: sonarqube-sonarqube 11 | namespace: sonar 12 | spec: 13 | virtualhost: 14 | fqdn: sonar.apps.tanzu.world 15 | tls: 16 | secretName: apps/apps.tanzu.world 17 | routes: 18 | - services: 19 | - name: sonarqube-proxy-oauth2-proxy 20 | port: 80 21 | -------------------------------------------------------------------------------- /workload/sonarqube/manifests/sonarqube-config.sealed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: bitnami.com/v1alpha1 2 | kind: SealedSecret 3 | metadata: 4 | creationTimestamp: null 5 | name: sonarqube-passwords 6 | namespace: sonar 7 | spec: 8 | encryptedData: 9 | client-id: AgA9tEH3eobvC+eDbeGCTVSPUfHeTYFCvG7cTtbKHg43nN/wdvVf0pywVySw+1YjJv58U3t14Tf2UlY31tGWoUYqwA1npp+upi/WxwKRXwo4ZP4LHwEGm7Pr5wvcrjDA3val+bIJi0/WV5CcaTe+r13kQf5aH4k/i8CeGuZcPen6cebnwddOxYDt0fyGXcjEIB56ubajYOw8jviLrC7pYxDEpd4bxqOQ5jA8wJdIUlMjRGjnZEovd8xzDQ1dGv9LNB0Wm3+sjVR5t6tnPBAolgkZllICVOoY3mhd/YbWdzLvJTZgyMwRzuXOLQo0ot7a40ksKiXsrVmSn+MLc7xcsRHwRbBMwuJBq0iORmyNuzcR2ASn5i19ptjtlD26XgjjoyrluhlSwpVMz0SuIb8cSze/WVqSs2rJEJcrGgNbYPcNhiMU6har/U+FT3m3Alx5hcm/fNtS3cHeimkaK5PHBcFWBrKoFYKGReExvFd1rNXM3QwnbWuEn3quAicD7ixrKELpHyQb/WbmRJu50LH+hZPyGBouHdn0RGLVl5Y8cGdsz1E0IskbkPIdh8jImdZ4BRfykLPs7hZUuZySDLlCEnkeiyfTYn2RIJPMaLxBH9jDpi+xgI12dKZ+hMlcyj0E2ATEiEryNl+sPkrwC89td2v/RCmR2v3UE7nKqaL2BLudddkM5TuDTt1jtSzajG0gegPIoK6j+FrC5sSpd/80OqFE 10 | client-secret: AgCpqq9VxGNEokGdfDZ9s8oP6HJciC0YAHQELSSpyAh0AH7yX+B+WMtf1Gu9UhWjDsG2cHEDt53GaIF7YaNKzMJZewIgLPrI+sznhguUS+Q4O3m+4wdr2s2ebOv6g6tt7cdYKLV2BvlABG9SIFgjLNMq52OmP/L0yYP72Zd3fv2hmJdzBCIQ3FbRrGxc+NDt+pur6pHLQJcqp8WzXLg24jx3pOZlYQwKjD3QHstYGZP5DACjxSK3RSCQvRizv2E/0E1d9lME8FXvCRRsfTkSRQcSNktoC0TfmEDMoju2t4xXv5J32EiT/UNiJ2Or1shT+OQxtl44qWzZN+1z+3wone3s8f8udJEsnF6VsuRRnnfi8x8Gjim1g5vrLDBRL1PY12++LKGdChQOb4/qb3b4p490CmYfSWAGFCLvb8jGzuc15I2rXdR0yz0u0LL+L+PUm4J60+dAtw5ARxJaqerCUvlqutyRtHej5Qw6P5whntMOkn8IFRtlqnMc9gUKaNixbwofphgpDBheIrlQaTZMu+JYmF71TRX2qKfSw9djih6mwmQ64zlan0IEnb/KzB4rb2ReE0T0N66VjaF3c7BITT1VdOWpaHYK8ahoHZ3E8U/4g1goonH14NRU4Jn22xaTfk1DL+kNkt42EmkwDRyiZJUlW8Q7G7+ISytTiBkC9SpOqQjB284opx64AmQGFEBSN+r/AvaxVXvXJtDWbkiAd043iVRzvA== 11 | cookie-secret: AgBSMTjeEQS4ZtyeVEGSI8HL2dkoIbNI/93Cn2aq3QlSZDMS6bkZ99Pz7/CkYOK4Q0z8m5nVn78XLRrG+eitY5dokvoRBcaOCFpot8LVtedewjXcdhQlBzyqaMsRf99HBf4CDvyA2+wrEb5pTDWJGu9ggotJfrC9RNansBolH+CKRteVomkZniHog3pesOLNzIlzr964jr6Sl3ZWHAv8pDBNGc9MClEsohExl4j8x1tL6ZEzfyMyUBRLgCh1QXkZTN8OcxnMVKohT5g3uQz/t4Jqn1APJOWapuddtYUdPdw2vvCSjp64GLWBHEpTlrxU+S646/tBhSqgdFAxNYtANhkwSgr68W2/3gxLvwp21thSnQ3qMDAiq+6pe0U+I80/CWCTwY6Jd/Y/SoTzLyfisuAFfC3I3By1Pg2ppv3igsF6XGSAcCESJVgGmAPOShJaqILvH2P41oAA5DFH5ESgELJDOerSL5JGdTMB0c5ux1zHPilxUOMp4a05MgZ7jAVclbuT6II7+C9bR6fFnHnhNwQbhhOeRl8EfwywSECnQPZmfFfX7NfNFjpPbQftAeBi1sPNe/Ycv1XOycZQ/6uZChkLiC1Kw3gRC8849KlbrO6rnhYRo6gD/7QfrMMJLAjPJMPVJF4pq8QPmjrwlvLcaBP8f3UOnYwJObOS9GbSICog5kxjGy9JaiDyW4L+y/pxqYPrzRRhmZxouzCFN1Q3+2N5VxM6mA== 12 | postgresql-password: AgCJ09kJuLaiXHvPNWJqjdeU6K2sxMAQUHJ3YTmL2kR21RkDVDGddCqz7pMHcxQmuREBz6F2DhLYYx/qCzNozDHLCyP0l98q0DJT+07G5BiFfNqNRJi74NpO0I+j0kXEmXA15vte4n9/wjBY+ah8Y7emqSQaLyE8BTE/ZN8tVv5NSDX1m0VzFuFvkFkhcpgggPQix4Fm3U5bX3zOJquAvcm3OI+Zq4xdeuOY6IcTp3iG+85atJ5zq4ljiIGccMu2X+lXcDqZfxNhK5CfQgq1VNyi4l0MNR4w0HiusYcq81V3zaxQ/q38OmkQRkbGM6vkJAoj1pgnuaVFqLeDAlZfpGAzpZQkkqSpv5wRsYy4l6zbLaxyt6tsXRjN1LuJH/fAh7aWNGhYvhNj9gCS+IfNNJOE3I2dCczOHBzl0KKs7VmsUNwZcI0v4vsOxuxyI+5zsdyaLfmgnhgWazHZ6F05VJuQgXPl6+Hz7lC32Bgvd2Q47FEW9nXG5X9fae/8uzp8CsuY9H6AkBTPz9jPNKvZe7vG321pjzv2FG08HaL5josn9bJe8b9sdH2/te/Q2bS6qWXMLP0ioZCinhkIqfPGItSzFi3VS7MmXNo5ZrDY0SWAHrig05QeiQhI0dCZR6XGsMeCJHakLQ7bp6l5jkBGCOU5v8qsXDqm9aiIUPyRqOxI7m1PX1uZPlaDi2VnPtiA7/XNAtADkwNo5vyU2fpk5bByWGOCKQ== 13 | template: 14 | metadata: 15 | creationTimestamp: null 16 | name: sonarqube-passwords 17 | namespace: sonar 18 | type: Opaque 19 | 20 | -------------------------------------------------------------------------------- /workload/sonarqube/manifests/sonarqube-proxy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app: oauth2-proxy 6 | chart: oauth2-proxy-3.2.2 7 | release: sonarqube-proxy 8 | heritage: Helm 9 | name: sonarqube-proxy-oauth2-proxy 10 | namespace: sonar 11 | --- 12 | apiVersion: v1 13 | kind: ConfigMap 14 | metadata: 15 | labels: 16 | app: oauth2-proxy 17 | chart: oauth2-proxy-3.2.2 18 | heritage: Helm 19 | release: sonarqube-proxy 20 | name: sonarqube-proxy-oauth2-proxy 21 | namespace: sonar 22 | data: 23 | oauth2_proxy.cfg: "provider = \"oidc\"\nprovider_display_name = \"Dex\"\nredirect_url = \"https://sonar.apps.tanzu.world/oauth2/callback\"\nupstreams = [ \"http://sonarqube-sonarqube:9000\" ]\noidc_issuer_url = \"https://login.sso.tanzu.world\"\npass_basic_auth = true\npass_user_headers = true\npass_host_header = true \nssl_insecure_skip_verify = true\nemail_domains = [\n \"*\"\n]" 24 | --- 25 | apiVersion: v1 26 | kind: Service 27 | metadata: 28 | labels: 29 | app: oauth2-proxy 30 | chart: oauth2-proxy-3.2.2 31 | release: sonarqube-proxy 32 | heritage: Helm 33 | name: sonarqube-proxy-oauth2-proxy 34 | namespace: sonar 35 | spec: 36 | type: ClusterIP 37 | ports: 38 | - port: 80 39 | targetPort: http 40 | protocol: TCP 41 | name: http 42 | selector: 43 | app: oauth2-proxy 44 | release: sonarqube-proxy 45 | --- 46 | apiVersion: apps/v1 47 | kind: Deployment 48 | metadata: 49 | labels: 50 | app: oauth2-proxy 51 | chart: oauth2-proxy-3.2.2 52 | heritage: Helm 53 | release: sonarqube-proxy 54 | name: sonarqube-proxy-oauth2-proxy 55 | namespace: sonar 56 | spec: 57 | replicas: 1 58 | selector: 59 | matchLabels: 60 | app: oauth2-proxy 61 | release: sonarqube-proxy 62 | template: 63 | metadata: 64 | annotations: 65 | checksum/config: 17884b10fdab6b8a15222151f511c403b0c0aadc783970aabb7e935a1056ee03 66 | checksum/config-emails: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b 67 | checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 68 | checksum/google-secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 69 | labels: 70 | app: oauth2-proxy 71 | release: sonarqube-proxy 72 | spec: 73 | serviceAccountName: sonarqube-proxy-oauth2-proxy 74 | containers: 75 | - name: oauth2-proxy 76 | image: quay.io/oauth2-proxy/oauth2-proxy:v5.1.0 77 | imagePullPolicy: Always 78 | args: 79 | - --http-address=0.0.0.0:4180 80 | - --config=/etc/oauth2_proxy/oauth2_proxy.cfg 81 | env: 82 | - name: OAUTH2_PROXY_CLIENT_ID 83 | valueFrom: 84 | secretKeyRef: 85 | name: sonarqube-passwords 86 | key: client-id 87 | - name: OAUTH2_PROXY_CLIENT_SECRET 88 | valueFrom: 89 | secretKeyRef: 90 | name: sonarqube-passwords 91 | key: client-secret 92 | - name: OAUTH2_PROXY_COOKIE_SECRET 93 | valueFrom: 94 | secretKeyRef: 95 | name: sonarqube-passwords 96 | key: cookie-secret 97 | ports: 98 | - containerPort: 4180 99 | name: http 100 | protocol: TCP 101 | livenessProbe: 102 | httpGet: 103 | path: /ping 104 | port: http 105 | scheme: HTTP 106 | initialDelaySeconds: 0 107 | timeoutSeconds: 1 108 | readinessProbe: 109 | httpGet: 110 | path: /ping 111 | port: http 112 | scheme: HTTP 113 | initialDelaySeconds: 0 114 | timeoutSeconds: 1 115 | successThreshold: 1 116 | periodSeconds: 10 117 | resources: {} 118 | volumeMounts: 119 | - mountPath: /etc/oauth2_proxy 120 | name: configmain 121 | securityContext: 122 | runAsNonRoot: true 123 | volumes: 124 | - configMap: 125 | defaultMode: 420 126 | name: sonarqube-proxy-oauth2-proxy 127 | name: configmain 128 | tolerations: [] 129 | -------------------------------------------------------------------------------- /workload/storageclass/manifests/storageclass-aws-ebs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: generic 5 | annotations: 6 | kapp.k14s.io/update-strategy: 'always-replace' 7 | storageclass.kubernetes.io/is-default-class: 'true' 8 | provisioner: kubernetes.io/aws-ebs 9 | parameters: 10 | type: gp2 11 | fsType: ext4 12 | encrypted: "true" 13 | reclaimPolicy: Retain 14 | allowVolumeExpansion: true 15 | volumeBindingMode: WaitForFirstConsumer -------------------------------------------------------------------------------- /workload/tag-job/manifests/tag-job-values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | 4 | aws_region: us-east-1 5 | resources: 6 | - subnet-XXXX 7 | - subnet-XXX 8 | tags: 9 | - key: kubernetes.io/cluster/watersafety 10 | value: shared 11 | - key: kubernetes.io/role/internal-elb 12 | value: 1 -------------------------------------------------------------------------------- /workload/tag-job/manifests/tag-job.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | apiVersion: batch/v1 4 | kind: Job 5 | metadata: 6 | name: create-tags 7 | annotations: 8 | kapp.k14s.io/versioned: "" 9 | kapp.k14s.io/num-versions: "4" 10 | spec: 11 | template: 12 | spec: 13 | containers: 14 | - image: amazon/aws-cli 15 | name: create-tags 16 | env: 17 | - name: AWS_DEFAULT_REGION 18 | value: #@ data.values.aws_region 19 | args: 20 | - ec2 21 | - create-tags 22 | - --resources 23 | #@ for resource in data.values.resources: 24 | - #@ resource 25 | #@ end 26 | - --tags 27 | #@ for item in data.values.tags: 28 | - #@ "Key={},Value={}".format(item.key, item.value) 29 | #@ end 30 | restartPolicy: Never 31 | -------------------------------------------------------------------------------- /workload/update-charts.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | set -eux 4 | 5 | echo "Update all helm charts." 6 | 7 | find . -maxdepth 3 -perm 0755 -type f -name "generate-manifests.sh" -exec '{}' ';' 8 | -------------------------------------------------------------------------------- /workload/workload-secrets.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIErjCCApagAwIBAgIRAJPa9I83mBJ8kotl8vpGP40wDQYJKoZIhvcNAQELBQAw 3 | ADAeFw0yMDAzMDcwMTE1MTFaFw0zMDAzMDUwMTE1MTFaMAAwggIiMA0GCSqGSIb3 4 | DQEBAQUAA4ICDwAwggIKAoICAQCxdjX3dc8eolCfa1i+QKBxpXJnF5BpKWTZaXN7 5 | bdai3/q+QFheRMivZ4QD+Wi2iKyf8UNiiQYsdkapoWli+tp4XWyjjTeJ2DAQTzRW 6 | eViwMu1qDfdJKJN9zj4VOg8Sie4FNCmgLjUrM1QCO9JtYxYeWl22/ZujLZLTkkcP 7 | I4c8HRfFfnWJ+NlDhoHMd1SeDmf16Qho7+Gufnh09/DsOGvWeNclpLmC7cX9H6AK 8 | vaWOREBlqc90KqNhwTML/DsUT4qJHj9we2gmsjC76aDPl46//AKJ9YxvxoKzPMFK 9 | 9Zyzd1aoTVkBUsTYbRsAsnBn3G1VHaiXCMPY3535NEwcOqrc3fZ+WJrH5R5WKYFE 10 | C7NuzkJDW/hDQjdOBw42RWpGZkT04KNrnLkz5cE3xHZMpqOfFeegWQROauyzOYzV 11 | kFVkAWsXv6QNJbCgTdp6NBUG0imZw3ZlYa61GkqPFw87e8bzk1at5OApwt7W35Mt 12 | DbAvMrJa7FGPCQEkYhPFiZLERPJ5f6Q2zkKZS3xvhIpaMRK/AgrCitOe5aDd5n30 13 | /VrKspF83bc2zyEn3bF6w08YwFlFVe+aA8e4rlpcEqH0VWD793bm5aZ5mcsUN2W5 14 | EOqONz7TVLc/1JXu8C+liaQ/XRcZud/foMirM8q3dmB0WuNZDd5LN423Jw2THVSa 15 | 234wxQIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAAEwDwYDVR0TAQH/BAUwAwEB/zAN 16 | BgkqhkiG9w0BAQsFAAOCAgEAOCxgfm/i1bXIz9j4tEM21X32L15VPDApLdUjnuds 17 | 6td9o2Evg4AF3PguTgRBBfzKsG4E7hwWHESRBjnhz7I6aF5hOj8qhs5lXOtind1Z 18 | vKIx9Uia61qIGdXaP8T4ubShdhwi43g2pDjnOhYc9dLkR6LM6tXii4qPeFbMmGzz 19 | vQWvXJsAliO4L3dj3jRVoH8N8Em6Mmj7JYApwiy8eQzVx4SiQ61ai04ypkhD5TZ1 20 | BQZcMgfN/LcanN3faOUXlGGMjXRqNk6eMQff21jJ9GOgRzcNZkCcis0uhf8L0QvX 21 | Im3hww308wfbNtRsPGoyAbPHqtAtJt0103djY6OmIEV99R96zDsAGkpGwLsWsSXy 22 | 9UU9dA2sfVYHpKNQjdVFWNqRo6Fph4GN1b0jFya4Z4BETGnbjE0XAqRgggBOrVl8 23 | 2tcU/XJGxH7ajySksL0vRZQS9m/TsVAHVOFRiKJ+jV10k7sdNEEEnG6ZCCSvZ96v 24 | HyCV4a5CEd68B9KjbpB0gJkrNancyXnsPBTdzUR0mvIir5UEjjDoZ+YNV58CCpxP 25 | pf9ByZxCrJ8Sx6pEyhJKtjFwQSoT23q326bGcFOaDYsD9QkFmT9moOLbtDqRhBKb 26 | ylTrsOeXZByN0/hE+vyY4glP90Ilt1bLI4RcrQPik385dmy5eKXt4AuRbRBPYLPP 27 | lis= 28 | -----END CERTIFICATE----- 29 | --------------------------------------------------------------------------------