├── .assets └── cloud-native-ref.png ├── .cursor └── rules ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── enhancement.md ├── renovate.json └── workflows │ ├── ci.yaml │ ├── pr-agent.yaml │ ├── terramate-drift-detection.yaml │ └── terramate-preview.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .tflint.hcl ├── .tool-versions ├── LICENSE ├── README.md ├── clusters └── mycluster-0 │ ├── crds.yaml │ ├── flux.yaml │ ├── infrastructure.yaml │ ├── namespaces.yaml │ ├── observability.yaml │ ├── security.yaml │ └── tooling.yaml ├── crds └── base │ ├── crds-cert-manager.yaml │ ├── external-secrets │ └── source.yaml │ ├── helmrelease-prometheus-operator.yaml │ ├── kustomization-actions-runner-controller.yaml │ ├── kustomization-cloudnative-pg.yaml │ ├── kustomization-external-secrets.yaml │ ├── kustomization-gateway-api.yaml │ ├── kustomization-grafana-operator.yaml │ ├── kustomization-kyverno.yaml │ ├── kustomization-victoria-metrics-operator.yaml │ └── kustomization.yaml ├── flux ├── notifications │ ├── alert.yaml │ ├── externalsecret-flux-slack-app.yaml │ └── provider.yaml ├── observability │ ├── controllers-vmpodscrape.yaml │ ├── grafana-dashboards.yaml │ ├── grafana-folder.yaml │ ├── operator-vmservicescrape.yaml │ └── vmrule.yaml ├── operator │ └── helmrelease.yaml └── sources │ ├── gitrepo-actions-runner-controller.yaml │ ├── gitrepo-cloudnative-pg.yaml │ ├── gitrepo-external-secrets.yaml │ ├── gitrepo-gateway-api.yaml │ ├── gitrepo-grafana-operator.yaml │ ├── gitrepo-kyverno.yaml │ ├── gitrepo-victoria-metrics.yaml │ ├── helmrepo-actions-runner-controller.yaml │ ├── helmrepo-bitnami.yaml │ ├── helmrepo-cloudnative-pg.yaml │ ├── helmrepo-crossplane.yaml │ ├── helmrepo-eks.yaml │ ├── helmrepo-external-dns.yaml │ ├── helmrepo-external-secrets.yaml │ ├── helmrepo-grafana-oci.yaml │ ├── helmrepo-grafana.yaml │ ├── helmrepo-harbor.yaml │ ├── helmrepo-headlamp.yaml │ ├── helmrepo-jetstack.yaml │ ├── helmrepo-kyverno.yaml │ ├── helmrepo-loggen.yaml │ ├── helmrepo-prometheus-community.yaml │ ├── helmrepo-vector.yaml │ ├── helmrepo-victoria-metrics.yaml │ ├── helmrepo-zitadel.yaml │ └── ocirepo-flux-operator.yaml ├── infrastructure ├── base │ ├── aws-load-balancer-controller │ │ ├── helmrelease.yaml │ │ └── kustomization.yaml │ ├── cilium │ │ ├── grafana-dashboards.yaml │ │ ├── grafana-folder.yaml │ │ ├── httproute-hubble-ui.yaml │ │ ├── kustomization.yaml │ │ ├── vmrules.yaml │ │ └── vmservicescrapes.yaml │ ├── cloudnative-pg │ │ ├── grafana-dashboard.yaml │ │ ├── helmrelease.yaml │ │ ├── kustomization.yaml │ │ └── s3-bucket.yaml │ ├── crossplane │ │ ├── README.md │ │ ├── configuration │ │ │ ├── environmentconfig.yaml │ │ │ ├── epi-composition.yaml │ │ │ ├── epi-definition.yaml │ │ │ ├── examples │ │ │ │ ├── environmentconfig.yaml │ │ │ │ ├── epi.yaml │ │ │ │ └── sqlinstance.yaml │ │ │ ├── functions.yaml │ │ │ ├── kcl │ │ │ │ ├── cloudnativepg │ │ │ │ │ ├── kcl.mod │ │ │ │ │ ├── kcl.mod.lock │ │ │ │ │ ├── main.k │ │ │ │ │ └── settings-example.yaml │ │ │ │ └── eks-pod-identity │ │ │ │ │ ├── kcl.mod │ │ │ │ │ ├── kcl.mod.lock │ │ │ │ │ ├── main.k │ │ │ │ │ └── settings-example.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── providerconfig-aws.yaml │ │ │ ├── providerconfig-k8s.yaml │ │ │ ├── sql-instance-composition.yaml │ │ │ └── sql-instance-definition.yaml │ │ ├── controller │ │ │ ├── helmrelease.yaml │ │ │ └── kustomization.yaml │ │ └── providers │ │ │ ├── deploymentruntimeconfig-aws.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── provider-ec2.yaml │ │ │ ├── provider-eks.yaml │ │ │ ├── provider-iam.yaml │ │ │ ├── provider-kms.yaml │ │ │ ├── provider-kubernetes.yaml │ │ │ └── provider-s3.yaml │ ├── external-dns │ │ ├── helmrelease.yaml │ │ └── kustomization.yaml │ └── gapi │ │ ├── example-public-gateway.yaml │ │ ├── kustomization.yaml │ │ ├── platform-private-gateway-certificate.yaml │ │ └── platform-private-gateway.yaml └── mycluster-0 │ ├── crossplane │ ├── configuration │ │ └── kustomization.yaml │ ├── controller │ │ └── kustomization.yaml │ └── providers │ │ └── kustomization.yaml │ └── kustomization.yaml ├── namespaces └── base │ ├── crossplane-system.yaml │ ├── harbor.yaml │ ├── infrastructure.yaml │ ├── kustomization.yaml │ ├── observability.yaml │ ├── security.yaml │ └── tooling.yaml ├── observability ├── base │ ├── grafana-oncall │ │ ├── externalsecret-admin.yaml │ │ ├── externalsecret-rabbitmq.yaml │ │ ├── externalsecret-slackapp.yaml │ │ ├── externalsecret-valkey.yaml │ │ ├── helmrelease-oncall.yaml │ │ ├── helmrelease-rabbitmq.yaml │ │ ├── helmrelease-valkey.yaml │ │ ├── httproute-oncall.yaml │ │ ├── httproute-rabbitmq.yaml │ │ ├── kustomization.yaml │ │ └── sqlinstance.yaml │ ├── grafana-operator │ │ ├── dashboards │ │ │ ├── kubernetes-karpenter.yaml │ │ │ ├── kubernetes-node-exporter-full.yaml │ │ │ ├── kubernetes-views-global.yaml │ │ │ ├── kubernetes-views-namespaces.yaml │ │ │ ├── kubernetes-views-nodes.yaml │ │ │ ├── kubernetes-views-pods.yaml │ │ │ └── kustomization.yaml │ │ ├── folders │ │ │ ├── databases.yaml │ │ │ ├── kubernetes.yaml │ │ │ └── kustomization.yaml │ │ ├── grafana-victoriametrics.yaml │ │ ├── helmrelease.yaml │ │ └── kustomization.yaml │ ├── loggen │ │ ├── helmrelease.yaml │ │ └── kustomization.yaml │ ├── victoria-logs │ │ ├── grafana-dashboards.yaml │ │ ├── grafana-datasource.yaml │ │ ├── helmrelease-vlcluster.yaml │ │ ├── helmrelease-vlsingle.yaml │ │ ├── httproute-vlcluster.yaml │ │ ├── httproute-vlsingle.yaml │ │ └── kustomization.yaml │ └── victoria-metrics-k8s-stack │ │ ├── externalsecret-alertmanager-slack-app.yaml │ │ ├── externalsecret-grafana-envvars.yaml │ │ ├── helmrelease-vmcluster.yaml │ │ ├── helmrelease-vmsingle.yaml │ │ ├── httproute-grafana.yaml │ │ ├── httproute-vmagent.yaml │ │ ├── httproute-vmalertmanager.yaml │ │ ├── httproute-vmcluster.yaml │ │ ├── httproute-vmsingle.yaml │ │ ├── kustomization.yaml │ │ ├── ogenki-grafana-provisioning.yaml │ │ ├── vm-common-helm-values-configmap.yaml │ │ ├── vmrules │ │ ├── karpenter.yaml │ │ └── kustomization.yaml │ │ ├── vmscrapeconfigs │ │ ├── ec2.yaml │ │ └── kustomization.yaml │ │ └── vmservicecrapes │ │ ├── karpenter.yaml │ │ └── kustomization.yaml └── mycluster-0 │ ├── kustomization.yaml │ └── victoria-metrics-k8s-stack │ └── kustomization.yaml ├── opentofu ├── config.tm.hcl ├── eks │ ├── .trivyignore.yaml │ ├── README.md │ ├── backend.tf │ ├── data.tf │ ├── helm.tf │ ├── helm_values │ │ ├── aws-ebs-csi-driver.yaml │ │ ├── cilium.yaml │ │ └── karpenter.yaml │ ├── iam.tf │ ├── karpenter.tf │ ├── kubernetes-manifests │ │ ├── flux │ │ │ ├── cluster-vars-configmap.yaml │ │ │ └── instance.yaml │ │ └── karpenter │ │ │ ├── default-ec2nc.yaml │ │ │ ├── default-nodepool.yaml │ │ │ ├── io-ec2nc.yaml │ │ │ └── io-nodepool.yaml │ ├── kubernetes.tf │ ├── locals.tf │ ├── main.tf │ ├── providers.tf │ ├── stack.tm.hcl │ ├── variables.tf │ ├── variables.tfvars │ ├── versions.tf │ └── workflows.tm.hcl ├── network │ ├── .trivyignore.yaml │ ├── README.md │ ├── backend.tf │ ├── data.tf │ ├── locals.tf │ ├── network.tf │ ├── outputs.tf │ ├── providers.tf │ ├── route53.tf │ ├── stack.tm.hcl │ ├── tailscale.tf │ ├── variables.tf │ ├── variables.tfvars │ └── versions.tf ├── openbao │ ├── cluster │ │ ├── .trivyignore.yaml │ │ ├── README.md │ │ ├── autoscaling_group.tf │ │ ├── backend.tf │ │ ├── data.tf │ │ ├── docs │ │ │ ├── getting_started.md │ │ │ └── pki_requirements.md │ │ ├── iam.tf │ │ ├── kms.tf │ │ ├── load_balancer.tf │ │ ├── locals.tf │ │ ├── outputs.tf │ │ ├── providers.tf │ │ ├── route53.tf │ │ ├── scripts │ │ │ ├── cloudinit-config.yaml │ │ │ ├── setup-local-disks.sh │ │ │ └── startup_script.sh │ │ ├── security_group.tf │ │ ├── stack.tm.hcl │ │ ├── variables.tf │ │ ├── variables.tfvars │ │ └── versions.tf │ └── management │ │ ├── .tls │ │ ├── .trivyignore.yaml │ │ ├── README.md │ │ ├── auth.tf │ │ ├── backend.tf │ │ ├── docs │ │ ├── approle.md │ │ ├── backup_restore.md │ │ └── cert-manager.md │ │ ├── mounts.tf │ │ ├── outputs.tf │ │ ├── pki.tf │ │ ├── policies.tf │ │ ├── policies │ │ ├── admin.hcl │ │ ├── cert-manager.hcl │ │ └── snapshot.hcl │ │ ├── providers.tf │ │ ├── roles.tf │ │ ├── secrets.tf │ │ ├── stack.tm.hcl │ │ ├── variables.tf │ │ ├── variables.tfvars │ │ ├── versions.tf │ │ └── workflows.tm.hcl └── workflows.tm.hcl ├── scripts ├── eks-prepare-destroy.sh ├── openbao-config.sh └── openbao-snapshot.sh ├── security ├── base │ ├── cert-manager │ │ ├── helmrelease.yaml │ │ ├── kustomization.yaml │ │ ├── le-clusterissuer-prod.yaml │ │ ├── le-clusterissuer-staging.yaml │ │ ├── openbao-approle-externalsecret.yaml │ │ └── openbao-clusterissuer.yaml │ ├── epis │ │ ├── cert-manager.yaml │ │ ├── default-gha-runner-scale-set.yaml │ │ ├── external-dns.yaml │ │ ├── external-secrets.yaml │ │ ├── harbor.yaml │ │ ├── load-balancer-controller.yaml │ │ ├── openbao-snapshot.yaml │ │ └── victoriametrics.yaml │ ├── external-secrets │ │ ├── clustersecretstore.yaml │ │ ├── helmrelease.yaml │ │ └── kustomization.yaml │ ├── kyverno │ │ ├── helmrelease-controller.yaml │ │ ├── helmrelease-policies.yaml │ │ └── kustomization.yaml │ ├── openbao-snapshot │ │ ├── external-secrets.yaml │ │ ├── kms.yaml │ │ ├── kustomization.yaml │ │ ├── s3-bucket.yaml │ │ ├── serviceaccount.yaml │ │ ├── snapshot-cronjob.yaml │ │ └── snapshot-pvc.yaml │ ├── rbac │ │ ├── admin.yaml │ │ └── kustomization.yaml │ └── zitadel │ │ ├── certificate.yaml │ │ ├── externalsecret-sqlinstance-password.yaml │ │ ├── externalsecret-zitadel-envvars.yaml │ │ ├── externalsecret-zitadel-masterkey.yaml │ │ ├── gateway.yaml │ │ ├── helmrelease.yaml │ │ ├── kustomization.yaml │ │ ├── network-policy.yaml │ │ ├── sqlinstance.yaml │ │ └── tlsroute.yaml └── mycluster-0 │ ├── external-secrets │ ├── helmrelease.yaml │ └── kustomization.yaml │ ├── kustomization.yaml │ └── zitadel │ └── kustomization.yaml ├── terramate.tm.hcl └── tooling ├── base ├── dagger-engine │ ├── configmap.yaml │ ├── deployment.yaml │ ├── kustomization.yaml │ ├── network-policy.yaml │ ├── pdb.yaml │ └── service.yaml ├── gha-runners │ ├── controller-helmrelease.yaml │ ├── dagger-scale-set-helmrelease.yaml │ ├── default-scale-set-helmrelease.yaml │ ├── externalsecret.yaml │ ├── kustomization.yaml │ └── network-policy.yaml ├── harbor │ ├── externalsecret-admin-password.yaml │ ├── externalsecret-valkey-password.yaml │ ├── helmrelease-harbor.yaml │ ├── helmrelease-valkey.yaml │ ├── httproute.yaml │ ├── iam-user.yaml │ ├── kustomization.yaml │ ├── s3-bucket.yaml │ ├── serviceaccount-harbor.yaml │ └── sqlinstance.yaml └── headlamp │ ├── externalsecret-headlamp-envvars.yaml │ ├── helmrelease.yaml │ ├── httproute.yaml │ └── kustomization.yaml └── mycluster-0 └── kustomization.yaml /.assets/cloud-native-ref.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Smana/cloud-native-ref/44adb58f0deba23e63775726b658ce5c3b5d9884/.assets/cloud-native-ref.png -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Issue Report 3 | about: Create a report to help us improve 4 | title: 'Brief Description of Issue' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Issue Title 11 | *A concise, descriptive title that summarizes the issue.* 12 | 13 | ### Description 14 | *Provide a clear and detailed explanation of the issue. Include any relevant logs, error messages, or screenshots.* 15 | 16 | ### Environment Details 17 | - **Infrastructure Component:** *(e.g., Kubernetes, Docker, VMs, Networking, etc.)* 18 | - **Version/Release:** *(e.g., Kubernetes v1.20)* 19 | - **Configuration Details:** *(relevant configuration details affecting the issue)* 20 | 21 | ### Reproduction Steps 22 | *Steps to reproduce the behavior:* 23 | 1. Go to '...' 24 | 2. Click on '....' 25 | 3. Scroll down to '....' 26 | 4. See error 27 | 28 | ### Expected Behavior 29 | *Describe what you expected to happen.* 30 | 31 | ### Actual Behavior 32 | *Describe what actually happened. Include full error messages and logs if applicable.* 33 | 34 | ### Possible Solutions or Ideas 35 | *If you have any ideas on how the issue could be resolved or the area of code that may be responsible, include those here.* 36 | 37 | ### Impact 38 | *Describe the impact of the issue, e.g., does it cause a crash, does it affect performance, data loss, etc.* 39 | 40 | ### Additional Context 41 | *Add any other context about the problem here, like links to similar issues, external resources, etc.* 42 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/enhancement.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Enhancement Suggestion 3 | about: Suggest an idea for this project 4 | title: 'Brief Description of Enhancement' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Enhancement Title 11 | *A concise, descriptive title that summarizes the enhancement.* 12 | 13 | ### Summary 14 | *Provide a clear and simple summary of what the enhancement is about.* 15 | 16 | ### Motivation 17 | *Explain why this enhancement would be useful to the project or users.* 18 | *Describe the potential benefits of the enhancement, including possible impacts on performance, usability, and efficiency.* 19 | 20 | ### Detailed Explanation 21 | *Provide a detailed explanation of the proposed enhancement. Include any preliminary ideas you have about the implementation, and how it integrates with existing functionalities.* 22 | 23 | ### Possible Drawbacks 24 | *Consider any possible drawbacks or issues that might arise with the implementation of this enhancement.* 25 | 26 | ### Additional Context 27 | *Add any other context or screenshots about the feature request here. Links to similar existing features or implementations in other projects are welcome.* 28 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ], 6 | "dependencyDashboard": true, 7 | "dependencyDashboardTitle": "Renovate Dashboard", 8 | "labels": [ 9 | "renovatebot" 10 | ], 11 | "flux": { 12 | "fileMatch": [ 13 | ".+\\.ya?ml" 14 | ], 15 | "ignorePaths": [ 16 | "clusters", 17 | "opentofu" 18 | ] 19 | }, 20 | "helm-values": { 21 | "fileMatch": [ 22 | ".+\\.ya?ml" 23 | ], 24 | "ignorePaths": [ 25 | "clusters", 26 | "opentofu" 27 | ] 28 | }, 29 | "kubernetes": { 30 | "fileMatch": [ 31 | ".+\\.ya?ml" 32 | ], 33 | "ignorePaths": [ 34 | "clusters", 35 | "opentofu" 36 | ] 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: ["main"] 7 | 8 | jobs: 9 | pre-commit: 10 | name: Pre-commit checks 🛃 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | with: 15 | fetch-depth: 0 16 | 17 | - name: Write required openbao files 18 | run: | 19 | mkdir -p opentofu/openbao/cluster/.tls 20 | echo 'keep' > opentofu/openbao/cluster/.tls/openbao.pem 21 | echo 'keep' > opentofu/openbao/cluster/.tls/openbao-key.pem 22 | echo 'keep' > opentofu/openbao/cluster/.tls/ca-chain.pem 23 | 24 | - name: Validate Opentofu configuration 25 | uses: dagger/dagger-for-github@v7 26 | with: 27 | version: "latest" 28 | verb: call 29 | module: github.com/Smana/daggerverse/pre-commit-tf@pre-commit-tf/v0.1.2 30 | args: run --dir "." --tf-binary="tofu" 31 | 32 | kubernetes-validation: 33 | name: Kubernetes validation ☸ 34 | runs-on: ubuntu-latest 35 | steps: 36 | - name: Checkout 37 | uses: actions/checkout@v4 38 | 39 | - name: Validate Flux clusters manifests 40 | uses: dagger/dagger-for-github@v7 41 | with: 42 | version: "latest" 43 | verb: call 44 | module: github.com/Smana/daggerverse/kubeconform@kubeconform/v0.1.3 45 | args: validate --manifests "./clusters" --exclude ".github/*,opentofu/*" --catalog 46 | 47 | - name: Validate Kubernetes manifests (Kustomize directories) 48 | uses: dagger/dagger-for-github@v7 49 | with: 50 | version: "latest" 51 | verb: call 52 | module: github.com/Smana/daggerverse/kubeconform@kubeconform/v0.1.3 53 | args: validate --manifests "." --exclude ".github/*,opentofu/*" --env "domain_name:cluster.local,cluster_name:foobar,region:eu-west-3,cert_manager_approle_id:random" --kustomize --flux --catalog 54 | 55 | shellcheck: 56 | name: Check the shell scripts 💻 57 | runs-on: ubuntu-latest 58 | steps: 59 | - uses: actions/checkout@v4 60 | - name: Run ShellCheck 61 | uses: ludeeus/action-shellcheck@master 62 | with: 63 | # Optional: specify severity level (style, info, warning, error) 64 | # severity: warning 65 | scandir: "./scripts" 66 | env: 67 | # Optional: If you want ShellCheck to follow sourced files 68 | SHELLCHECK_OPTS: -x 69 | -------------------------------------------------------------------------------- /.github/workflows/pr-agent.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | issue_comment: 4 | jobs: 5 | pr_agent_job: 6 | runs-on: ubuntu-latest 7 | permissions: 8 | issues: write 9 | pull-requests: write 10 | contents: write 11 | name: Run pr agent on every pull request, respond to user comments 12 | steps: 13 | - name: PR Agent action step 14 | id: pragent 15 | uses: Codium-ai/pr-agent@main 16 | env: 17 | OPENAI_KEY: ${{ secrets.OPENAI_KEY }} 18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## More on https://github.com/github/gitignore 2 | 3 | # Local .terraform directories 4 | **/.terraform/* 5 | 6 | # .tfstate files 7 | *.tfstate 8 | *.tfstate.* 9 | 10 | **.terraform.lock.hcl 11 | 12 | # Crash log files 13 | crash.log 14 | 15 | # Ignore override files as they are usually used to override resources locally and so 16 | # are not checked in 17 | override.tf 18 | override.tf.json 19 | *_override.tf 20 | *_override.tf.json 21 | 22 | # Include override files you do wish to add to version control using negated pattern 23 | # 24 | # !example_override.tf 25 | 26 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 27 | # example: *tfplan* 28 | 29 | # Ignore CLI configuration files 30 | .terraformrc 31 | terraform.rc 32 | 33 | # TFSec cache 34 | .tfsec 35 | 36 | # VSCode 37 | .vscode 38 | 39 | # Secrets directory 40 | **/.tls/* 41 | .secrets 42 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/antonbabenko/pre-commit-terraform.git 3 | rev: v1.99.0 4 | hooks: 5 | - id: terraform_fmt 6 | - id: terraform_validate 7 | - id: terraform_tflint 8 | args: 9 | - --args=--config=__GIT_WORKING_DIR__/.tflint.hcl 10 | 11 | - repo: https://github.com/pre-commit/pre-commit-hooks.git 12 | rev: v4.6.0 13 | hooks: 14 | - id: check-merge-conflict 15 | -------------------------------------------------------------------------------- /.tflint.hcl: -------------------------------------------------------------------------------- 1 | plugin "aws" { 2 | enabled = true 3 | version = "0.40.0" 4 | source = "github.com/terraform-linters/tflint-ruleset-aws" 5 | } 6 | -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | terramate 0.13.2 2 | pre-commit 4.2.0 3 | opentofu 1.9.1 4 | trivy 0.63.0 5 | -------------------------------------------------------------------------------- /clusters/mycluster-0/crds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: crds 5 | namespace: flux-system 6 | spec: 7 | prune: true 8 | interval: 1m0s 9 | path: ./crds/base 10 | sourceRef: 11 | kind: GitRepository 12 | name: flux-system 13 | dependsOn: 14 | - name: namespaces 15 | -------------------------------------------------------------------------------- /clusters/mycluster-0/flux.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: flux-operator 5 | namespace: flux-system 6 | spec: 7 | prune: true 8 | interval: 1m0s 9 | path: ./flux/operator 10 | sourceRef: 11 | kind: GitRepository 12 | name: flux-system 13 | --- 14 | apiVersion: kustomize.toolkit.fluxcd.io/v1 15 | kind: Kustomization 16 | metadata: 17 | name: flux-sources 18 | namespace: flux-system 19 | spec: 20 | prune: true 21 | interval: 1m0s 22 | path: ./flux/sources 23 | sourceRef: 24 | kind: GitRepository 25 | name: flux-system 26 | postBuild: 27 | substituteFrom: 28 | - kind: ConfigMap 29 | name: eks-mycluster-0-vars 30 | --- 31 | apiVersion: kustomize.toolkit.fluxcd.io/v1 32 | kind: Kustomization 33 | metadata: 34 | name: flux-observability 35 | namespace: flux-system 36 | spec: 37 | prune: true 38 | interval: 1m0s 39 | path: ./flux/observability 40 | sourceRef: 41 | kind: GitRepository 42 | name: flux-system 43 | postBuild: 44 | substituteFrom: 45 | - kind: ConfigMap 46 | name: eks-mycluster-0-vars 47 | dependsOn: 48 | - name: security 49 | --- 50 | apiVersion: kustomize.toolkit.fluxcd.io/v1 51 | kind: Kustomization 52 | metadata: 53 | name: flux-notifications 54 | namespace: flux-system 55 | spec: 56 | prune: true 57 | interval: 1m0s 58 | path: ./flux/notifications 59 | sourceRef: 60 | kind: GitRepository 61 | name: flux-system 62 | postBuild: 63 | substituteFrom: 64 | - kind: ConfigMap 65 | name: eks-mycluster-0-vars 66 | dependsOn: 67 | - name: security 68 | -------------------------------------------------------------------------------- /clusters/mycluster-0/namespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: namespaces 5 | namespace: flux-system 6 | spec: 7 | prune: true 8 | interval: 1m0s 9 | path: ./namespaces/base 10 | sourceRef: 11 | kind: GitRepository 12 | name: flux-system 13 | -------------------------------------------------------------------------------- /clusters/mycluster-0/tooling.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: tooling 5 | namespace: flux-system 6 | spec: 7 | prune: true 8 | interval: 4m0s 9 | sourceRef: 10 | kind: GitRepository 11 | name: flux-system 12 | path: ./tooling/mycluster-0 13 | postBuild: 14 | substitute: 15 | domain_name: "cloud.ogenki.io" 16 | substituteFrom: 17 | - kind: ConfigMap 18 | name: eks-mycluster-0-vars 19 | dependsOn: 20 | - name: observability 21 | - name: infrastructure 22 | healthChecks: 23 | [] 24 | # - apiVersion: helm.toolkit.fluxcd.io/v2 25 | # kind: HelmRelease 26 | # name: harbor 27 | # namespace: tooling 28 | # - apiVersion: helm.toolkit.fluxcd.io/v2 29 | # kind: HelmRelease 30 | # name: gha-runner-scale-set-controller 31 | # namespace: tooling 32 | # - apiVersion: helm.toolkit.fluxcd.io/v2 33 | # kind: HelmRelease 34 | # name: gha-runner-scale-set 35 | # namespace: tooling 36 | -------------------------------------------------------------------------------- /crds/base/external-secrets/source.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: GitRepository 3 | metadata: 4 | name: external-secrets 5 | spec: 6 | interval: 5m0s 7 | url: https://github.com/external-secrets/external-secrets 8 | ref: 9 | tag: v0.17.0 10 | -------------------------------------------------------------------------------- /crds/base/helmrelease-prometheus-operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: crds-prometheus-operator 5 | namespace: observability 6 | spec: 7 | releaseName: crds-prometheus-operator 8 | chart: 9 | spec: 10 | chart: prometheus-operator-crds 11 | sourceRef: 12 | kind: HelmRepository 13 | name: prometheus-community 14 | version: "20.0.1" 15 | interval: 10m0s 16 | install: 17 | createNamespace: true 18 | remediation: 19 | retries: 3 20 | values: {} 21 | -------------------------------------------------------------------------------- /crds/base/kustomization-actions-runner-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: crds-actions-runner-controller 5 | namespace: tooling 6 | spec: 7 | interval: 10m 8 | sourceRef: 9 | kind: GitRepository 10 | name: actions-runner-controller 11 | path: "./config/crd/bases" 12 | prune: true 13 | timeout: 10m 14 | -------------------------------------------------------------------------------- /crds/base/kustomization-cloudnative-pg.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: crds-cloudnative-pg 5 | namespace: infrastructure 6 | spec: 7 | interval: 10m 8 | sourceRef: 9 | kind: GitRepository 10 | name: cloudnative-pg 11 | path: "./config/crd/bases" 12 | prune: true 13 | timeout: 10m 14 | -------------------------------------------------------------------------------- /crds/base/kustomization-external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: crds-external-secrets 5 | namespace: security 6 | spec: 7 | interval: 2m 8 | targetNamespace: kube-system 9 | sourceRef: 10 | kind: GitRepository 11 | name: external-secrets 12 | path: "./deploy/crds" 13 | prune: true 14 | timeout: 1m 15 | -------------------------------------------------------------------------------- /crds/base/kustomization-gateway-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: crds-gateway-api 5 | namespace: kube-system 6 | spec: 7 | interval: 10m 8 | targetNamespace: kube-system 9 | sourceRef: 10 | kind: GitRepository 11 | name: gateway-api 12 | path: "./config/crd/experimental" 13 | prune: true 14 | timeout: 1m 15 | -------------------------------------------------------------------------------- /crds/base/kustomization-grafana-operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: crds-grafana-operator 5 | namespace: observability 6 | spec: 7 | interval: 10m 8 | targetNamespace: observability 9 | sourceRef: 10 | kind: GitRepository 11 | name: grafana-operator 12 | path: "./config/crd/bases" 13 | prune: true 14 | timeout: 1m 15 | -------------------------------------------------------------------------------- /crds/base/kustomization-kyverno.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: crds-kyverno 5 | namespace: security 6 | spec: 7 | interval: 10m 8 | targetNamespace: kube-system 9 | sourceRef: 10 | kind: GitRepository 11 | name: kyverno 12 | path: "./config/crds" 13 | prune: true 14 | timeout: 1m 15 | -------------------------------------------------------------------------------- /crds/base/kustomization-victoria-metrics-operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: crds-victoria-metrics-operator 5 | namespace: observability 6 | spec: 7 | interval: 10m 8 | targetNamespace: observability 9 | sourceRef: 10 | kind: GitRepository 11 | name: victoria-metrics-operator 12 | path: "./config/crd/overlay" 13 | prune: true 14 | timeout: 1m 15 | -------------------------------------------------------------------------------- /crds/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - crds-cert-manager.yaml 6 | - helmrelease-prometheus-operator.yaml 7 | - kustomization-actions-runner-controller.yaml 8 | - kustomization-external-secrets.yaml 9 | - kustomization-gateway-api.yaml 10 | - kustomization-grafana-operator.yaml 11 | - kustomization-kyverno.yaml 12 | - kustomization-cloudnative-pg.yaml 13 | - kustomization-victoria-metrics-operator.yaml 14 | -------------------------------------------------------------------------------- /flux/notifications/alert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: notification.toolkit.fluxcd.io/v1beta3 2 | kind: Alert 3 | metadata: 4 | name: slack 5 | namespace: flux-system 6 | spec: 7 | summary: "${cluster_name} cluster components impacted" 8 | providerRef: 9 | name: flux-slack-app 10 | eventSeverity: error 11 | eventSources: 12 | - kind: GitRepository 13 | name: "*" 14 | - kind: Kustomization 15 | name: "*" 16 | -------------------------------------------------------------------------------- /flux/notifications/externalsecret-flux-slack-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: flux-slack-app 5 | namespace: flux-system 6 | spec: 7 | dataFrom: 8 | - extract: 9 | conversionStrategy: Default 10 | key: observability/flux/slack-app 11 | refreshInterval: 1h 12 | secretStoreRef: 13 | kind: ClusterSecretStore 14 | name: clustersecretstore 15 | target: 16 | creationPolicy: Owner 17 | deletionPolicy: Retain 18 | name: flux-slack-app 19 | -------------------------------------------------------------------------------- /flux/notifications/provider.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: notification.toolkit.fluxcd.io/v1beta3 2 | kind: Provider 3 | metadata: 4 | name: flux-slack-app 5 | namespace: flux-system 6 | spec: 7 | type: slack 8 | channel: alerts 9 | address: https://slack.com/api/chat.postMessage 10 | secretRef: 11 | name: flux-slack-app 12 | -------------------------------------------------------------------------------- /flux/observability/controllers-vmpodscrape.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operator.victoriametrics.com/v1beta1 2 | kind: VMPodScrape 3 | metadata: 4 | name: flux-system 5 | namespace: flux-system 6 | spec: 7 | namespaceSelector: 8 | matchNames: 9 | - flux-system 10 | selector: 11 | matchExpressions: 12 | - key: app 13 | operator: In 14 | values: 15 | - helm-controller 16 | - source-controller 17 | - kustomize-controller 18 | - notification-controller 19 | - image-automation-controller 20 | - image-reflector-controller 21 | podMetricsEndpoints: 22 | - targetPort: http-prom 23 | -------------------------------------------------------------------------------- /flux/observability/grafana-dashboards.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: grafana.integreatly.org/v1beta1 3 | kind: GrafanaDashboard 4 | metadata: 5 | name: flux-cluster 6 | namespace: flux-system 7 | spec: 8 | folderRef: "flux" 9 | allowCrossNamespaceImport: true 10 | datasources: 11 | - inputName: "DS_PROMETHEUS" 12 | datasourceName: "VictoriaMetrics" 13 | instanceSelector: 14 | matchLabels: 15 | dashboards: "grafana" 16 | url: "https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/refs/heads/main/monitoring/configs/dashboards/cluster.json" 17 | --- 18 | apiVersion: grafana.integreatly.org/v1beta1 19 | kind: GrafanaDashboard 20 | metadata: 21 | name: flux-control-plane 22 | namespace: flux-system 23 | spec: 24 | folderRef: "flux" 25 | allowCrossNamespaceImport: true 26 | datasources: 27 | - inputName: "DS_PROMETHEUS" 28 | datasourceName: "VictoriaMetrics" 29 | instanceSelector: 30 | matchLabels: 31 | dashboards: "grafana" 32 | url: "https://raw.githubusercontent.com/fluxcd/flux2-monitoring-example/refs/heads/main/monitoring/configs/dashboards/control-plane.json" -------------------------------------------------------------------------------- /flux/observability/grafana-folder.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaFolder 3 | metadata: 4 | name: flux 5 | namespace: flux-system 6 | spec: 7 | allowCrossNamespaceImport: true 8 | instanceSelector: 9 | matchLabels: 10 | dashboards: "grafana" 11 | -------------------------------------------------------------------------------- /flux/observability/operator-vmservicescrape.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operator.victoriametrics.com/v1beta1 2 | kind: VMServiceScrape 3 | metadata: 4 | name: flux-operator 5 | namespace: flux-system 6 | labels: 7 | app.kubernetes.io/name: flux-operator 8 | spec: 9 | namespaceSelector: 10 | matchNames: 11 | - flux-system 12 | selector: 13 | matchLabels: 14 | app.kubernetes.io/name: flux-operator 15 | endpoints: 16 | - targetPort: 8080 17 | path: /metrics 18 | interval: 60s 19 | scrapeTimeout: 30s 20 | -------------------------------------------------------------------------------- /flux/observability/vmrule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operator.victoriametrics.com/v1beta1 2 | kind: VMRule 3 | metadata: 4 | labels: 5 | prometheus-instance: main 6 | name: flux-system 7 | namespace: flux-system 8 | spec: 9 | groups: 10 | - name: flux-system 11 | rules: 12 | - alert: FluxReconciliationFailure 13 | annotations: 14 | message: Flux resource has been unhealthy for more than 5m 15 | description: "{{ $labels.kind }} {{ $labels.exported_namespace }}/{{ $labels.name }} reconciliation has been failing for more than ten minutes." 16 | runbook_url: "https://fluxcd.io/flux/cheatsheets/troubleshooting/" 17 | dashboard: "https://grafana.priv.${domain_name}/dashboards" 18 | expr: max(gotk_reconcile_condition{status="False",type="Ready"}) by (exported_namespace, name, kind) + on(exported_namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"}) by (exported_namespace, name, kind)) * 2 == 1 19 | for: 10m 20 | labels: 21 | severity: warning 22 | - alert: FluxHelmOperatorErrors 23 | annotations: 24 | message: Flux Helm operator errors 25 | description: > 26 | There is an issue deploying `{{ $labels.release_name }}` release helm chart. 27 | Errors count `{{ $value }}`. 28 | runbook_url: "https://fluxcd.io/flux/cheatsheets/troubleshooting/" 29 | dashboard: "https://grafana.priv.${domain_name}/dashboards" 30 | for: 5m 31 | expr: sum(increase(flux_helm_operator_release_duration_seconds_count{success="false"}[5m])) by (release_name) > 0 32 | labels: 33 | severity: warning 34 | - alert: FluxSuspended 35 | annotations: 36 | message: (Flux) Resource suspended for more than 45m 37 | description: "`{{ $labels.kind }}` `{{ $labels.name }}` in namespace `{{ $labels.exported_namespace }}` is suspended." 38 | runbook_url: "https://fluxcd.io/flux/cheatsheets/troubleshooting/" 39 | dashboard: "https://grafana.priv.${domain_name}/dashboards" 40 | expr: sum(gotk_suspend_status) by (name, kind, exported_namespace) > 0 41 | for: 45m 42 | labels: 43 | severity: warning 44 | -------------------------------------------------------------------------------- /flux/operator/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: flux-operator 5 | namespace: flux-system 6 | spec: 7 | interval: 10m 8 | releaseName: flux-operator 9 | chartRef: 10 | kind: OCIRepository 11 | name: flux-operator 12 | -------------------------------------------------------------------------------- /flux/sources/gitrepo-actions-runner-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: GitRepository 3 | metadata: 4 | name: actions-runner-controller 5 | namespace: tooling 6 | spec: 7 | interval: 5m0s 8 | url: https://github.com/actions/actions-runner-controller 9 | ref: 10 | # renovate: regex:^gha-runner-scale-set-.*$ 11 | tag: gha-runner-scale-set-0.9.2 12 | -------------------------------------------------------------------------------- /flux/sources/gitrepo-cloudnative-pg.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: GitRepository 3 | metadata: 4 | name: cloudnative-pg 5 | namespace: infrastructure 6 | spec: 7 | interval: 5m0s 8 | url: https://github.com/cloudnative-pg/cloudnative-pg.git 9 | ref: 10 | tag: v1.26.0 11 | -------------------------------------------------------------------------------- /flux/sources/gitrepo-external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: GitRepository 3 | metadata: 4 | name: external-secrets 5 | namespace: security 6 | spec: 7 | interval: 5m0s 8 | url: https://github.com/external-secrets/external-secrets 9 | ref: 10 | tag: v0.17.0 11 | -------------------------------------------------------------------------------- /flux/sources/gitrepo-gateway-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: GitRepository 3 | metadata: 4 | name: gateway-api 5 | namespace: kube-system 6 | spec: 7 | interval: 5m0s 8 | url: https://github.com/kubernetes-sigs/gateway-api 9 | ref: 10 | tag: v1.3.0 11 | -------------------------------------------------------------------------------- /flux/sources/gitrepo-grafana-operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: GitRepository 3 | metadata: 4 | name: grafana-operator 5 | namespace: observability 6 | spec: 7 | interval: 5m0s 8 | url: https://github.com/grafana/grafana-operator 9 | ref: 10 | tag: v5.18.0 11 | -------------------------------------------------------------------------------- /flux/sources/gitrepo-kyverno.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: GitRepository 3 | metadata: 4 | name: kyverno 5 | namespace: security 6 | spec: 7 | interval: 5m0s 8 | url: https://github.com/kyverno/kyverno 9 | ref: 10 | tag: v1.14.2 11 | -------------------------------------------------------------------------------- /flux/sources/gitrepo-victoria-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: GitRepository 3 | metadata: 4 | name: victoria-metrics-operator 5 | namespace: observability 6 | spec: 7 | interval: 5m0s 8 | url: https://github.com/VictoriaMetrics/operator.git 9 | ref: 10 | tag: v0.59.1 11 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-actions-runner-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: gha-runner-scale-set 5 | namespace: tooling 6 | spec: 7 | type: "oci" 8 | interval: 5m0s 9 | url: oci://ghcr.io/actions/actions-runner-controller-charts 10 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-bitnami.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: bitnami 5 | namespace: flux-system 6 | spec: 7 | type: "oci" 8 | interval: 5m 9 | url: oci://registry-1.docker.io/bitnamicharts 10 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-cloudnative-pg.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: cloudnative-pg 5 | namespace: infrastructure 6 | spec: 7 | interval: 5m 8 | url: https://cloudnative-pg.github.io/charts 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-crossplane.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: crossplane 5 | namespace: crossplane-system 6 | spec: 7 | interval: 30m 8 | url: https://charts.crossplane.io/stable 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-eks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: eks 5 | namespace: kube-system 6 | spec: 7 | interval: 30m 8 | url: https://aws.github.io/eks-charts 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-external-dns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: external-dns 5 | namespace: kube-system 6 | spec: 7 | interval: 30m 8 | url: https://kubernetes-sigs.github.io/external-dns/ 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: external-secrets 5 | namespace: security 6 | spec: 7 | interval: 30m 8 | url: https://charts.external-secrets.io 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-grafana-oci.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: grafana-oci 5 | namespace: observability 6 | spec: 7 | interval: 5m 8 | type: oci 9 | url: oci://ghcr.io/grafana/helm-charts/ 10 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-grafana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: grafana 5 | namespace: observability 6 | spec: 7 | interval: 5m 8 | url: https://grafana.github.io/helm-charts 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-harbor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: harbor 5 | namespace: tooling 6 | spec: 7 | interval: 5m 8 | url: https://helm.goharbor.io 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-headlamp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: headlamp 5 | namespace: tooling 6 | spec: 7 | interval: 24h 8 | url: https://kubernetes-sigs.github.io/headlamp/ 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-jetstack.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: jetstack 5 | namespace: security 6 | spec: 7 | interval: 30m 8 | url: https://charts.jetstack.io 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-kyverno.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: kyverno 5 | namespace: security 6 | spec: 7 | interval: 30m 8 | url: https://kyverno.github.io/kyverno/ 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-loggen.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: loggen 5 | namespace: observability 6 | spec: 7 | interval: 30m 8 | url: https://smana.github.io/loggen/helm/loggen 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-prometheus-community.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: prometheus-community 5 | namespace: observability 6 | spec: 7 | interval: 5m 8 | url: https://prometheus-community.github.io/helm-charts 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-vector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: vector 5 | namespace: observability 6 | spec: 7 | interval: 30m 8 | url: https://helm.vector.dev 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-victoria-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: victoria-metrics 5 | namespace: observability 6 | spec: 7 | interval: 2h 8 | url: https://victoriametrics.github.io/helm-charts 9 | -------------------------------------------------------------------------------- /flux/sources/helmrepo-zitadel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1 2 | kind: HelmRepository 3 | metadata: 4 | name: zitadel 5 | namespace: security 6 | spec: 7 | interval: 24h 8 | url: https://charts.zitadel.com 9 | -------------------------------------------------------------------------------- /flux/sources/ocirepo-flux-operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: OCIRepository 3 | metadata: 4 | name: flux-operator 5 | namespace: flux-system 6 | spec: 7 | interval: 10m 8 | url: oci://ghcr.io/controlplaneio-fluxcd/charts/flux-operator 9 | ref: 10 | semver: "0.18.0" 11 | -------------------------------------------------------------------------------- /infrastructure/base/aws-load-balancer-controller/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: aws-load-balancer-controller 5 | spec: 6 | releaseName: aws-load-balancer-controller 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: aws-load-balancer-controller 12 | sourceRef: 13 | kind: HelmRepository 14 | name: eks 15 | namespace: kube-system 16 | version: "1.13.2" 17 | interval: 3m0s 18 | install: 19 | remediation: 20 | retries: 3 21 | values: 22 | clusterName: ${cluster_name} 23 | serviceAccount: 24 | # Has to match the EKS Pod Identity ServiceAccount 25 | name: load-balancer-controller 26 | defaultTags: 27 | environment: ${environment} 28 | clusterName: ${cluster_name} 29 | replicaCount: 1 30 | resources: 31 | limits: 32 | cpu: 100m 33 | memory: 128Mi 34 | vpcId: ${vpc_id} 35 | -------------------------------------------------------------------------------- /infrastructure/base/aws-load-balancer-controller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: kube-system 4 | 5 | resources: 6 | - helmrelease.yaml 7 | -------------------------------------------------------------------------------- /infrastructure/base/cilium/grafana-folder.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaFolder 3 | metadata: 4 | name: cilium 5 | spec: 6 | allowCrossNamespaceImport: true 7 | instanceSelector: 8 | matchLabels: 9 | dashboards: "grafana" 10 | -------------------------------------------------------------------------------- /infrastructure/base/cilium/httproute-hubble-ui.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: hubble-ui 5 | namespace: kube-system 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "hubble-${cluster_name}.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: hubble-ui 15 | port: 80 16 | -------------------------------------------------------------------------------- /infrastructure/base/cilium/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: kube-system 4 | 5 | resources: 6 | - grafana-dashboards.yaml 7 | - grafana-folder.yaml 8 | - httproute-hubble-ui.yaml 9 | - vmrules.yaml 10 | - vmservicescrapes.yaml 11 | -------------------------------------------------------------------------------- /infrastructure/base/cloudnative-pg/grafana-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaDashboard 3 | metadata: 4 | name: databases-cloudnative-pg 5 | spec: 6 | allowCrossNamespaceImport: true 7 | folderRef: "databases" 8 | datasources: 9 | - inputName: "DS_PROMETHEUS" 10 | datasourceName: "VictoriaMetrics" 11 | instanceSelector: 12 | matchLabels: 13 | dashboards: "grafana" 14 | url: "https://grafana.com/api/dashboards/20417/revisions/3/download" 15 | -------------------------------------------------------------------------------- /infrastructure/base/cloudnative-pg/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: cloudnative-pg 5 | spec: 6 | releaseName: cloudnative-pg 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: cloudnative-pg 12 | sourceRef: 13 | kind: HelmRepository 14 | name: cloudnative-pg 15 | namespace: infrastructure 16 | version: "0.24.0" 17 | interval: 10m0s 18 | install: 19 | remediation: 20 | retries: 3 21 | values: 22 | crds: 23 | create: false 24 | monitoring: 25 | podMonitorEnabled: true 26 | resources: 27 | limits: 28 | memory: 400Mi 29 | requests: 30 | cpu: 300m 31 | memory: 400Mi 32 | -------------------------------------------------------------------------------- /infrastructure/base/cloudnative-pg/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: infrastructure 4 | 5 | resources: 6 | - grafana-dashboard.yaml 7 | - helmrelease.yaml 8 | - s3-bucket.yaml 9 | -------------------------------------------------------------------------------- /infrastructure/base/cloudnative-pg/s3-bucket.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: s3.aws.upbound.io/v1beta1 2 | kind: Bucket 3 | metadata: 4 | name: cnpg-backups 5 | annotations: 6 | crossplane.io/external-name: ${region}-ogenki-cnpg-backups 7 | spec: 8 | deletionPolicy: Orphan # The bucket should not be deleted when the resource is deleted in Crossplane 9 | forProvider: 10 | region: ${region} 11 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/README.md: -------------------------------------------------------------------------------- 1 | # Crossplane Configuration 2 | 3 | ## Writing and Pushing Functions 4 | 5 | We have chosen [KCL (Kusion Configuration Language)](https://github.com/crossplane-contrib/function-kcl) to write most of the logic in our Crossplane compositions. These functions are packaged as OCI artifacts. For this repository, we use ephemeral OCI registries because we frequently destroy and recreate the platform. However, for production environments, more persistent solutions should be considered. 6 | 7 | Here is an example for creating and pushing a composition for an RDS instance: 8 | 9 | ```console 10 | cd infrastructure/base/crossplane/configuration/kcl 11 | kcl mod init rdsinstance 12 | ``` 13 | 14 | After writing the code, we can render the output directly from the module directory using the command 15 | 16 | ```console 17 | cd rdsinstance 18 | kcl run -Y settings-example.yam 19 | ``` 20 | 21 | Then you can push it to an OCI registry as follows: 22 | 23 | ```console 24 | cd rdsinstance 25 | kcl mod push oci://ttl.sh/ogenki-cnref/rdsinstance:v0.0.1-24h 26 | ``` 27 | 28 | Here we're using [TTL.sh](https://ttl.sh/) and the OCI artifact will be available for 24 hours, as specified in the tag. You can then reference it in your Crossplane composition: 29 | 30 | ```yaml 31 | ... 32 | spec: 33 | ... 34 | pipeline: 35 | ... 36 | - step: rds 37 | functionRef: 38 | name: function-kcl 39 | input: 40 | apiVersion: krm.kcl.dev/v1alpha1 41 | kind: KCLRun 42 | spec: 43 | target: Resources 44 | source: oci://ttl.sh/ogenki-cnref/rdsinstance:v0.0.1-24h 45 | ``` 46 | 47 | ## Validating a composition 48 | 49 | To validate a composition, such as `sqlinstance`, you can use Crossplane's `render` command with example inputs. Navigate to the Crossplane configuration directory and run the following command: 50 | 51 | ```console 52 | cd infrastructure/base/crossplane/configuration 53 | crossplane render --extra-resources examples/environmentconfig.yaml examples/sqlinstance.yaml sql-instance-composition.yaml functions.yaml 54 | ``` 55 | 56 | This will render and validate the composition based on the provided example configurations. 57 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/environmentconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.crossplane.io/v1beta1 2 | kind: EnvironmentConfig 3 | metadata: 4 | name: eks-environment 5 | data: 6 | clusterName: ${cluster_name} 7 | oidcUrl: ${oidc_issuer_url} 8 | oidcHost: ${oidc_issuer_host} 9 | oidcArn: ${oidc_provider_arn} 10 | accountId: ${aws_account_id} 11 | region: ${region} 12 | vpcId: ${vpc_id} 13 | CIDRBlock: ${vpc_cidr_block} 14 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/epi-composition.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.crossplane.io/v1 2 | kind: Composition 3 | metadata: 4 | creationTimestamp: "2024-02-18T18:06:04Z" 5 | name: xepis.cloud.ogenki.io 6 | spec: 7 | compositeTypeRef: 8 | apiVersion: cloud.ogenki.io/v1alpha1 9 | kind: XEPI 10 | mode: Pipeline 11 | pipeline: 12 | - step: environmentConfigs 13 | functionRef: 14 | name: function-environment-configs 15 | input: 16 | apiVersion: environmentconfigs.fn.crossplane.io/v1beta1 17 | kind: Input 18 | spec: 19 | environmentConfigs: 20 | - type: Reference 21 | ref: 22 | name: eks-environment 23 | 24 | - step: epi 25 | functionRef: 26 | name: function-kcl 27 | input: 28 | apiVersion: krm.kcl.dev/v1alpha1 29 | kind: KCLRun 30 | spec: 31 | target: Resources 32 | source: oci://ttl.sh/ogenki-cnref/eks-pod-identity:v0.1.1-24h 33 | 34 | - step: ready 35 | functionRef: 36 | name: function-auto-ready 37 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/examples/environmentconfig.yaml: -------------------------------------------------------------------------------- 1 | # This manifest is not deployed. It is just used for validating crossplane compositions using 2 | # crossplane render --extra-resources environmentconfig-example.yaml ... 3 | apiVersion: apiextensions.crossplane.io/v1alpha1 4 | kind: EnvironmentConfig 5 | metadata: 6 | name: eks-environment 7 | data: 8 | clusterName: "mycluster-0" 9 | oidcUrl: "https://oidc.eks.eu-west-3.amazonaws.com/id/foobar" 10 | oidcHost: "oidc.eks.eu-west-3.amazonaws.com" 11 | oidcArn: "arn:aws:iam::123456789012:oidc-provider/oidc.eks.eu-west-3.amazonaws.com/id/foobar" 12 | accountId: "123456789012" 13 | region: "eu-west-3" 14 | vpcId: "vpc-0" 15 | CIDRBlock: "10.0.0.0/16" 16 | privateSubnetIds: "subnet-0,subnet-1,subnet-2" 17 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/examples/epi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: EPI 3 | metadata: 4 | name: xplane-foobar 5 | namespace: security 6 | spec: 7 | claimRef: 8 | name: xplane-foobar 9 | namespace: security 10 | parameters: 11 | clusters: 12 | - name: "mycluster-0" 13 | region: "eu-west-3" 14 | serviceAccount: 15 | name: foobar 16 | namespace: security 17 | policyDocument: | 18 | { 19 | "Version": "2012-10-17", 20 | "Statement": [ 21 | { 22 | "Effect": "Allow", 23 | "Action": "route53:GetChange", 24 | "Resource": "arn:aws:route53:::change/*" 25 | }, 26 | { 27 | "Effect": "Allow", 28 | "Action": [ 29 | "route53:ChangeResourceRecordSets", 30 | "route53:ListResourceRecordSets" 31 | ], 32 | "Resource": "arn:aws:route53:::hostedzone/*" 33 | }, 34 | { 35 | "Effect": "Allow", 36 | "Action": "route53:ListHostedZonesByName", 37 | "Resource": "*" 38 | } 39 | ] 40 | } 41 | additionalPolicyArns: 42 | - name: "r53admin" 43 | arn: "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" 44 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/examples/sqlinstance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: SQLInstance 3 | metadata: 4 | name: xplane-foobar 5 | namespace: tooling 6 | spec: 7 | claimRef: 8 | name: "myname" 9 | namespace: "myns" 10 | instances: 3 11 | size: "small" 12 | storageSize: 20Gi 13 | storageClass: "gp3" 14 | databases: 15 | - owner: "foo" 16 | name: "bar" 17 | - owner: "titi" 18 | name: "toto" 19 | - owner: "tata" 20 | name: "tutu" 21 | roles: 22 | - name: "foo" 23 | comment: "User for the bar database" 24 | inRoles: 25 | - pg_monitor 26 | - name: "titi" 27 | comment: "Superuser for the toto database" 28 | superuser: true 29 | primaryUpdateStrategy: "unsupervised" 30 | createSuperuser: true 31 | backup: 32 | schedule: "0 0 * * *" 33 | bucketName: "myname-rds-instance-backups" 34 | initSQL: | 35 | CREATE EXTENSION pg_stat_statements; 36 | CREATE EXTENSION pg_trgm; 37 | CREATE EXTENSION pgcrypto; 38 | postgresql: 39 | parameters: 40 | max_connections: 100 41 | pg_hba: 42 | - "host all all" 43 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/functions.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: pkg.crossplane.io/v1beta1 3 | kind: Function 4 | metadata: 5 | name: function-environment-configs 6 | spec: 7 | package: xpkg.upbound.io/crossplane-contrib/function-environment-configs:v0.2.0 8 | --- 9 | apiVersion: pkg.crossplane.io/v1 10 | kind: Function 11 | metadata: 12 | name: function-kcl 13 | annotations: 14 | spec: 15 | package: xpkg.upbound.io/crossplane-contrib/function-kcl:v0.10.10 16 | --- 17 | apiVersion: pkg.crossplane.io/v1 18 | kind: Function 19 | metadata: 20 | name: function-auto-ready 21 | spec: 22 | package: xpkg.upbound.io/crossplane-contrib/function-auto-ready:v0.4.0 23 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/kcl/cloudnativepg/kcl.mod: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cloudnativepg" 3 | edition = "v0.11.0" 4 | version = "0.1.1" 5 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/kcl/cloudnativepg/kcl.mod.lock: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Smana/cloud-native-ref/44adb58f0deba23e63775726b658ce5c3b5d9884/infrastructure/base/crossplane/configuration/kcl/cloudnativepg/kcl.mod.lock -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/kcl/cloudnativepg/settings-example.yaml: -------------------------------------------------------------------------------- 1 | kcl_options: 2 | - key: params 3 | value: 4 | ctx: 5 | "apiextensions.crossplane.io/environment": 6 | "clusterName": "mycluster-0" 7 | region: "eu-west-3" 8 | oxr: 9 | spec: 10 | claimRef: 11 | name: "myname" 12 | namespace: "myns" 13 | instances: 3 14 | size: "small" 15 | primaryUpdateStrategy: "unsupervised" 16 | createSuperuser: true 17 | backup: 18 | schedule: "0 0 * * *" 19 | bucketName: "myname-rds-instance-backups" 20 | retentionPolicy: "10d" 21 | initSQL: | 22 | CREATE EXTENSION pg_stat_statements; 23 | CREATE EXTENSION pg_trgm; 24 | CREATE EXTENSION pgcrypto; 25 | postgresql: 26 | parameters: 27 | max_connections: 100 28 | pg_hba: 29 | - "host all all" 30 | storageSize: 20Gi 31 | storageClass: "gp3" 32 | roles: 33 | - name: "myname-user" 34 | comment: "User for myname CNPG Instance" 35 | superuser: false 36 | inRoles: 37 | - pg_monitor 38 | - name: "myname-root" 39 | comment: "Root user for myname CNPG Instance" 40 | superuser: true 41 | databases: 42 | - name: "db1" 43 | owner: "myname-user" 44 | - name: "db2" 45 | owner: "myname-root" 46 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/kcl/eks-pod-identity/kcl.mod: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "eks-pod-identity" 3 | edition = "v0.10.7" 4 | version = "0.1.1" 5 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/kcl/eks-pod-identity/kcl.mod.lock: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Smana/cloud-native-ref/44adb58f0deba23e63775726b658ce5c3b5d9884/infrastructure/base/crossplane/configuration/kcl/eks-pod-identity/kcl.mod.lock -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/kcl/eks-pod-identity/settings-example.yaml: -------------------------------------------------------------------------------- 1 | kcl_options: 2 | - key: params 3 | value: 4 | ctx: 5 | "apiextensions.crossplane.io/environment": 6 | region: "eu-west-3" 7 | oxr: 8 | spec: 9 | claimRef: 10 | name: "myname" 11 | namespace: "myns" 12 | parameters: 13 | clusters: 14 | - name: "mycluster-0" 15 | region: "eu-west-3" 16 | - name: "mycluster-1" 17 | region: "eu-west-1" 18 | serviceAccount: 19 | name: foobar 20 | namespace: security 21 | policyDocument: | 22 | { 23 | "Version": "2012-10-17", 24 | "Statement": [ 25 | { 26 | "Effect": "Allow", 27 | "Action": "route53:GetChange", 28 | "Resource": "arn:aws:route53:::change/*" 29 | }, 30 | { 31 | "Effect": "Allow", 32 | "Action": [ 33 | "route53:ChangeResourceRecordSets", 34 | "route53:ListResourceRecordSets" 35 | ], 36 | "Resource": "arn:aws:route53:::hostedzone/*" 37 | }, 38 | { 39 | "Effect": "Allow", 40 | "Action": "route53:ListHostedZonesByName", 41 | "Resource": "*" 42 | } 43 | ] 44 | } 45 | additionalPolicyArns: 46 | - name: "r53admin" 47 | arn: "arn:aws:iam::aws:policy/AmazonRoute53FullAccess" 48 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: crossplane-system 4 | 5 | resources: 6 | - environmentconfig.yaml 7 | - epi-composition.yaml 8 | - epi-definition.yaml 9 | - functions.yaml 10 | - providerconfig-aws.yaml 11 | - providerconfig-k8s.yaml 12 | - sql-instance-composition.yaml 13 | - sql-instance-definition.yaml 14 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/providerconfig-aws.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: aws.upbound.io/v1beta1 2 | kind: ProviderConfig 3 | metadata: 4 | name: default 5 | spec: 6 | credentials: 7 | source: IRSA 8 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/providerconfig-k8s.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kubernetes.crossplane.io/v1alpha1 2 | kind: ProviderConfig 3 | metadata: 4 | name: default 5 | spec: 6 | credentials: 7 | source: InjectedIdentity 8 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/configuration/sql-instance-composition.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.crossplane.io/v1 2 | kind: Composition 3 | metadata: 4 | name: xsqlinstances.cloud.ogenki.io 5 | labels: 6 | provider: aws 7 | spec: 8 | compositeTypeRef: 9 | apiVersion: cloud.ogenki.io/v1alpha1 10 | kind: XSQLInstance 11 | mode: Pipeline 12 | writeConnectionSecretsToNamespace: crossplane-system 13 | pipeline: 14 | - step: environmentConfigs 15 | functionRef: 16 | name: function-environment-configs 17 | input: 18 | apiVersion: environmentconfigs.fn.crossplane.io/v1beta1 19 | kind: Input 20 | spec: 21 | environmentConfigs: 22 | - type: Reference 23 | ref: 24 | name: eks-environment 25 | 26 | - step: cloudnativepg 27 | functionRef: 28 | name: function-kcl 29 | input: 30 | apiVersion: krm.kcl.dev/v1alpha1 31 | kind: KCLRun 32 | spec: 33 | target: Resources 34 | source: oci://ttl.sh/ogenki-cnref/cloudnativepg:v0.1.1-24h 35 | 36 | - step: ready 37 | functionRef: 38 | name: function-auto-ready 39 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/controller/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: crossplane 5 | spec: 6 | releaseName: crossplane 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: crossplane 12 | sourceRef: 13 | kind: HelmRepository 14 | name: crossplane 15 | namespace: crossplane-system 16 | version: "1.20.0" 17 | interval: 2m0s 18 | install: 19 | remediation: 20 | retries: 3 21 | values: 22 | replicas: 1 23 | 24 | resourcesCrossplane: 25 | limits: 26 | cpu: 500m 27 | memory: 512Mi 28 | 29 | metrics: 30 | enabled: true 31 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/controller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: crossplane-system 4 | 5 | resources: 6 | - helmrelease.yaml 7 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/providers/deploymentruntimeconfig-aws.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1beta1 2 | kind: DeploymentRuntimeConfig 3 | metadata: 4 | name: aws-config 5 | spec: 6 | serviceAccountTemplate: 7 | metadata: 8 | annotations: 9 | eks.amazonaws.com/role-arn: arn:aws:iam::${aws_account_id}:role/${cluster_name}-crossplane 10 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/providers/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - deploymentruntimeconfig-aws.yaml 6 | - provider-ec2.yaml 7 | - provider-eks.yaml 8 | - provider-iam.yaml 9 | - provider-kms.yaml 10 | - provider-kubernetes.yaml 11 | - provider-s3.yaml 12 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/providers/provider-ec2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1 2 | kind: Provider 3 | metadata: 4 | name: provider-aws-ec2 5 | spec: 6 | package: xpkg.upbound.io/upbound/provider-aws-ec2:v1 7 | runtimeConfigRef: 8 | apiVersion: pkg.crossplane.io/v1beta1 9 | kind: DeploymentRuntimeConfig 10 | name: aws-config 11 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/providers/provider-eks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1 2 | kind: Provider 3 | metadata: 4 | name: provider-aws-eks 5 | spec: 6 | package: xpkg.upbound.io/upbound/provider-aws-eks:v1 7 | runtimeConfigRef: 8 | apiVersion: pkg.crossplane.io/v1beta1 9 | kind: DeploymentRuntimeConfig 10 | name: aws-config 11 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/providers/provider-iam.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1 2 | kind: Provider 3 | metadata: 4 | name: provider-aws-iam 5 | spec: 6 | package: xpkg.upbound.io/upbound/provider-aws-iam:v1 7 | runtimeConfigRef: 8 | apiVersion: pkg.crossplane.io/v1beta1 9 | kind: DeploymentRuntimeConfig 10 | name: aws-config 11 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/providers/provider-kms.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1 2 | kind: Provider 3 | metadata: 4 | name: provider-aws-kms 5 | spec: 6 | package: xpkg.upbound.io/upbound/provider-aws-kms:v1 7 | runtimeConfigRef: 8 | apiVersion: pkg.crossplane.io/v1beta1 9 | kind: DeploymentRuntimeConfig 10 | name: aws-config 11 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/providers/provider-kubernetes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: pkg.crossplane.io/v1 3 | kind: Provider 4 | metadata: 5 | name: provider-kubernetes 6 | spec: 7 | package: xpkg.upbound.io/upbound/provider-kubernetes:v0 8 | runtimeConfigRef: 9 | apiVersion: pkg.crossplane.io/v1beta1 10 | kind: DeploymentRuntimeConfig 11 | name: provider-kubernetes 12 | --- 13 | apiVersion: pkg.crossplane.io/v1beta1 14 | kind: DeploymentRuntimeConfig 15 | metadata: 16 | name: provider-kubernetes 17 | spec: 18 | serviceAccountTemplate: 19 | metadata: 20 | name: crossplane-provider-kubernetes 21 | --- 22 | apiVersion: v1 23 | kind: ServiceAccount 24 | metadata: 25 | name: crossplane-provider-kubernetes 26 | namespace: crossplane-system 27 | --- 28 | apiVersion: rbac.authorization.k8s.io/v1 29 | kind: ClusterRole 30 | metadata: 31 | namespace: crossplane-system 32 | name: crossplane-kubernetes 33 | rules: 34 | # Create the RDS service 35 | - apiGroups: [""] 36 | resources: ["services"] 37 | verbs: ["get", "create", "delete", "update", "patch"] 38 | # Create the PostgreSQL provider config for the RDS service 39 | - apiGroups: ["postgresql.sql.crossplane.io"] 40 | resources: ["providerconfigs"] 41 | verbs: ["get", "create", "delete", "update", "patch"] 42 | - apiGroups: ["rds.aws.upbound.io"] 43 | resources: ["instances"] 44 | verbs: ["get"] 45 | # Manage CNPG PostgreSQL databases, clusters, roles, and scheduled backups 46 | - apiGroups: ["postgresql.cnpg.io"] 47 | resources: ["databases", "clusters", "roles", "scheduledbackups"] 48 | verbs: ["get", "create", "delete", "update", "patch"] 49 | # Manage external secrets 50 | - apiGroups: ["external-secrets.io"] 51 | resources: ["externalsecrets"] 52 | verbs: ["get", "create", "delete", "update", "patch"] 53 | --- 54 | apiVersion: rbac.authorization.k8s.io/v1 55 | kind: ClusterRoleBinding 56 | metadata: 57 | name: crossplane-kubernetes-services 58 | namespace: crossplane-system 59 | subjects: 60 | - kind: ServiceAccount 61 | name: crossplane-provider-kubernetes 62 | namespace: crossplane-system 63 | roleRef: 64 | kind: ClusterRole 65 | name: crossplane-kubernetes 66 | apiGroup: rbac.authorization.k8s.io 67 | -------------------------------------------------------------------------------- /infrastructure/base/crossplane/providers/provider-s3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pkg.crossplane.io/v1 2 | kind: Provider 3 | metadata: 4 | name: provider-aws-s3 5 | spec: 6 | package: xpkg.upbound.io/upbound/provider-aws-s3:v1 7 | runtimeConfigRef: 8 | apiVersion: pkg.crossplane.io/v1beta1 9 | kind: DeploymentRuntimeConfig 10 | name: aws-config 11 | -------------------------------------------------------------------------------- /infrastructure/base/external-dns/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: external-dns 5 | spec: 6 | releaseName: external-dns 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: external-dns 12 | sourceRef: 13 | kind: HelmRepository 14 | name: external-dns 15 | namespace: kube-system 16 | version: "1.16.1" 17 | interval: 10m0s 18 | install: 19 | remediation: 20 | retries: 3 21 | values: 22 | global: 23 | imageRegistry: public.ecr.aws 24 | 25 | # Requires a recent image tag in order to support EPI 26 | image: 27 | tag: "v0.14.1" 28 | 29 | aws: 30 | region: ${region} 31 | zoneType: "public" 32 | batchChangeSize: 1000 33 | 34 | domainFilters: ["${domain_name}"] 35 | logFormat: json 36 | txtOwnerId: "${cluster_name}" 37 | resources: 38 | limits: 39 | memory: 100Mi 40 | requests: 41 | cpu: 200m 42 | memory: 100Mi 43 | -------------------------------------------------------------------------------- /infrastructure/base/external-dns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: kube-system 4 | 5 | resources: 6 | - helmrelease.yaml 7 | -------------------------------------------------------------------------------- /infrastructure/base/gapi/example-public-gateway.yaml: -------------------------------------------------------------------------------- 1 | # This manifest is not deployed. This is just an example of how to create a public gateway for the platform. 2 | # It uses cert-manager to provision a certificate for the gateway. The certificate is referenced in the gateway spec. 3 | 4 | apiVersion: gateway.networking.k8s.io/v1 5 | kind: Gateway 6 | metadata: 7 | name: platform-public 8 | annotations: 9 | cert-manager.io/cluster-issuer: letsencrypt-prod 10 | spec: 11 | gatewayClassName: cilium 12 | infrastructure: 13 | annotations: 14 | service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" 15 | service.beta.kubernetes.io/aws-load-balancer-name: "ogenki-platform-public-gateway" 16 | service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance 17 | service.beta.kubernetes.io/aws-load-balancer-type: "external" 18 | listeners: 19 | - name: http 20 | hostname: "*.${domain_name}" 21 | port: 443 22 | protocol: HTTPS 23 | allowedRoutes: 24 | namespaces: 25 | from: Selector 26 | selector: 27 | matchExpressions: 28 | - key: kubernetes.io/metadata.name 29 | operator: In 30 | values: 31 | - observability 32 | tls: 33 | mode: Terminate 34 | certificateRefs: 35 | - name: platform-public-tls 36 | -------------------------------------------------------------------------------- /infrastructure/base/gapi/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: infrastructure 4 | 5 | resources: 6 | - platform-private-gateway.yaml 7 | - platform-private-gateway-certificate.yaml 8 | -------------------------------------------------------------------------------- /infrastructure/base/gapi/platform-private-gateway-certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: private-gateway-certificate 5 | spec: 6 | secretName: private-gateway-tls 7 | duration: 2160h # 90d 8 | renewBefore: 360h # 15d 9 | commonName: private-gateway.priv.cloud.ogenki.io 10 | dnsNames: 11 | - grafana.priv.${domain_name} 12 | - harbor.priv.${domain_name} 13 | - vm.priv.${domain_name} 14 | - headlamp.priv.${domain_name} 15 | - hubble-${cluster_name}.priv.${domain_name} 16 | - vmalertmanager-${cluster_name}.priv.${domain_name} 17 | - vmagent-${cluster_name}.priv.${domain_name} 18 | - vl.priv.${domain_name} 19 | - oncall.priv.${domain_name} 20 | - oncall-rabbitmq.priv.${domain_name} 21 | issuerRef: 22 | name: openbao 23 | kind: ClusterIssuer 24 | group: cert-manager.io 25 | -------------------------------------------------------------------------------- /infrastructure/base/gapi/platform-private-gateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: Gateway 3 | metadata: 4 | name: platform-private 5 | spec: 6 | gatewayClassName: cilium 7 | infrastructure: 8 | annotations: 9 | service.beta.kubernetes.io/aws-load-balancer-name: "ogenki-platform-private-gateway" 10 | service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance 11 | service.beta.kubernetes.io/aws-load-balancer-scheme: "internal" 12 | service.beta.kubernetes.io/aws-load-balancer-type: "external" 13 | external-dns.alpha.kubernetes.io/hostname: "harbor.priv.${domain_name},grafana.priv.${domain_name},vm.priv.${domain_name},headlamp.priv.${domain_name},hubble-${cluster_name}.priv.${domain_name},vmalertmanager-${cluster_name}.priv.${domain_name},vmagent-${cluster_name}.priv.${domain_name},vl.priv.${domain_name},oncall.priv.${domain_name},oncall-rabbitmq.priv.${domain_name}" 14 | listeners: 15 | - name: http 16 | hostname: "*.priv.${domain_name}" 17 | port: 443 18 | protocol: HTTPS 19 | allowedRoutes: 20 | namespaces: 21 | from: Selector 22 | selector: 23 | matchExpressions: 24 | - key: kubernetes.io/metadata.name 25 | operator: In 26 | values: 27 | - kube-system 28 | - observability 29 | - tooling 30 | tls: 31 | mode: Terminate 32 | certificateRefs: 33 | - name: private-gateway-tls 34 | -------------------------------------------------------------------------------- /infrastructure/mycluster-0/crossplane/configuration/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../../../base/crossplane/configuration 6 | -------------------------------------------------------------------------------- /infrastructure/mycluster-0/crossplane/controller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../../../base/crossplane/controller 6 | -------------------------------------------------------------------------------- /infrastructure/mycluster-0/crossplane/providers/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../../../base/crossplane/providers 6 | -------------------------------------------------------------------------------- /infrastructure/mycluster-0/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../base/aws-load-balancer-controller 6 | - ../base/cilium 7 | - ../base/cloudnative-pg 8 | - ../base/external-dns 9 | - ../base/gapi 10 | -------------------------------------------------------------------------------- /namespaces/base/crossplane-system.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: crossplane-system 5 | -------------------------------------------------------------------------------- /namespaces/base/harbor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: harbor 5 | -------------------------------------------------------------------------------- /namespaces/base/infrastructure.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: infrastructure 5 | -------------------------------------------------------------------------------- /namespaces/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - crossplane-system.yaml 6 | - harbor.yaml 7 | - infrastructure.yaml 8 | - observability.yaml 9 | - security.yaml 10 | - tooling.yaml 11 | -------------------------------------------------------------------------------- /namespaces/base/observability.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: observability 5 | -------------------------------------------------------------------------------- /namespaces/base/security.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: security 5 | -------------------------------------------------------------------------------- /namespaces/base/tooling.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: tooling 5 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/externalsecret-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: oncall-admin 5 | namespace: observability 6 | spec: 7 | dataFrom: 8 | - extract: 9 | conversionStrategy: Default 10 | key: observability/grafana/oncall-admin 11 | refreshInterval: 20m 12 | secretStoreRef: 13 | kind: ClusterSecretStore 14 | name: clustersecretstore 15 | target: 16 | creationPolicy: Owner 17 | deletionPolicy: Retain 18 | name: grafana-oncall 19 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/externalsecret-rabbitmq.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: oncall-rabbitmq 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: clustersecretstore 9 | refreshInterval: 1h 10 | target: 11 | creationPolicy: Owner 12 | deletionPolicy: Retain 13 | name: oncall-rabbitmq 14 | dataFrom: 15 | - extract: 16 | conversionStrategy: Default 17 | key: observability/grafana/oncall-rabbitmq 18 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/externalsecret-slackapp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: oncall-slack-app 5 | spec: 6 | dataFrom: 7 | - extract: 8 | conversionStrategy: Default 9 | key: observabliity/grafana/oncall-slackapp 10 | refreshInterval: 20m 11 | secretStoreRef: 12 | kind: ClusterSecretStore 13 | name: clustersecretstore 14 | target: 15 | creationPolicy: Owner 16 | deletionPolicy: Retain 17 | name: oncall-slack-app 18 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/externalsecret-valkey.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: oncall-valkey 5 | namespace: observability 6 | spec: 7 | dataFrom: 8 | - extract: 9 | conversionStrategy: Default 10 | key: observability/grafana/oncall-valkey 11 | refreshInterval: 20m 12 | secretStoreRef: 13 | kind: ClusterSecretStore 14 | name: clustersecretstore 15 | target: 16 | creationPolicy: Owner 17 | deletionPolicy: Retain 18 | name: oncall-valkey 19 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/helmrelease-oncall.yaml: -------------------------------------------------------------------------------- 1 | # Based on https://grafana.com/docs/grafana-oncall/latest/setup/install/helm/install-scalable/ 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: oncall 6 | spec: 7 | releaseName: oncall 8 | driftDetection: 9 | mode: enabled 10 | chart: 11 | spec: 12 | chart: oncall 13 | sourceRef: 14 | kind: HelmRepository 15 | name: grafana 16 | namespace: observability 17 | version: "1.16.1" 18 | interval: 5m0s 19 | timeout: 15m 20 | install: 21 | remediation: 22 | retries: 3 23 | values: 24 | base_url: oncall.priv.${domain_name} 25 | base_url_protocol: https 26 | 27 | engine: 28 | replicaCount: 1 29 | resources: 30 | limits: 31 | cpu: 500m 32 | memory: 768Mi 33 | 34 | celery: 35 | replicaCount: 1 36 | resources: 37 | limits: 38 | cpu: 300m 39 | memory: 516Mi 40 | 41 | oncall: 42 | secrets: 43 | existingSecret: "grafana-oncall" 44 | secretKey: "secret_key" 45 | mirageSecretKey: "mirage_secret_key" 46 | slack: 47 | enabled: true 48 | existingSecret: "oncall-slack-app" 49 | clientIdKey: "client_id" 50 | clientSecretKey: "client_secret" 51 | signingSecretKey: "signing_secret" 52 | 53 | ingress: 54 | enabled: false 55 | 56 | ingress-nginx: 57 | enabled: false 58 | 59 | cert-manager: 60 | enabled: false 61 | 62 | database: 63 | type: postgresql 64 | 65 | mariadb: 66 | enabled: false 67 | 68 | externalPostgresql: 69 | host: xplane-oncall-cnpg-cluster-rw 70 | port: 5432 71 | db_name: "oncall" 72 | user: "oncall" 73 | existingSecret: "xplane-oncall-cnpg-role-oncall" 74 | passwordKey: "password" 75 | 76 | rabbitmq: 77 | enabled: false 78 | 79 | externalRabbitmq: 80 | host: oncall-rabbitmq 81 | port: 5672 82 | existingSecret: oncall-rabbitmq 83 | passwordKey: "password" 84 | usernameKey: "username" 85 | 86 | redis: 87 | enabled: false 88 | 89 | externalRedis: 90 | host: oncall-valkey-primary 91 | port: 6379 92 | username: default 93 | existingSecret: "oncall-valkey" 94 | passwordKey: password 95 | 96 | externalGrafana: 97 | url: https://grafana.priv.${domain_name} 98 | 99 | networkPolicy: 100 | enabled: false 101 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/helmrelease-valkey.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: oncall-valkey 5 | spec: 6 | releaseName: oncall-valkey 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: valkey 12 | sourceRef: 13 | kind: HelmRepository 14 | name: bitnami 15 | namespace: flux-system 16 | version: "3.0.9" 17 | interval: 10m0s 18 | install: 19 | remediation: 20 | retries: 3 21 | values: 22 | auth: 23 | existingSecret: "oncall-valkey" 24 | existingSecretPasswordKey: "password" 25 | usePasswordFiles: false 26 | 27 | primary: 28 | ## Valkey master resource requests and limits 29 | ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ 30 | ## @param master.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production). 31 | ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 32 | ## 33 | resourcesPreset: "nano" 34 | persistence: 35 | size: 4Gi 36 | 37 | replica: 38 | resourcesPreset: "nano" 39 | persistence: 40 | size: 4Gi 41 | 42 | metrics: 43 | enabled: true 44 | serviceMonitor: 45 | enabled: true 46 | 47 | useExternalDNS: 48 | enabled: true 49 | suffix: "priv.${domain_name}" 50 | additionalAnnotations: 51 | ttl: 10 52 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/httproute-oncall.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: oncall 5 | namespace: observability 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "oncall.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: oncall-engine 15 | port: 8080 16 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/httproute-rabbitmq.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: oncall-rabbitmq 5 | namespace: observability 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "oncall-rabbitmq.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: oncall-rabbitmq 15 | port: 15672 16 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: observability 4 | 5 | resources: 6 | - externalsecret-admin.yaml 7 | - externalsecret-rabbitmq.yaml 8 | - externalsecret-slackapp.yaml 9 | - externalsecret-valkey.yaml 10 | - helmrelease-oncall.yaml 11 | - helmrelease-rabbitmq.yaml 12 | - helmrelease-valkey.yaml 13 | - httproute-oncall.yaml 14 | - httproute-rabbitmq.yaml 15 | - sqlinstance.yaml 16 | -------------------------------------------------------------------------------- /observability/base/grafana-oncall/sqlinstance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: SQLInstance 3 | metadata: 4 | name: xplane-oncall 5 | namespace: "observability" 6 | spec: 7 | instances: 1 8 | size: "small" 9 | storageSize: "20Gi" 10 | databases: 11 | - owner: "oncall" 12 | name: "oncall" 13 | roles: 14 | - name: "oncall" 15 | comment: "User for oncall CNPG Instance" 16 | superuser: false 17 | backup: 18 | schedule: "0 0 * * *" 19 | bucketName: "eu-west-3-ogenki-cnpg-backups" 20 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/dashboards/kubernetes-karpenter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaDashboard 3 | metadata: 4 | name: kubernetes-karpenter 5 | namespace: infrastructure 6 | spec: 7 | allowCrossNamespaceImport: true 8 | folderRef: "kubernetes" 9 | datasources: 10 | - inputName: "DS_PROMETHEUS" 11 | datasourceName: "VictoriaMetrics" 12 | instanceSelector: 13 | matchLabels: 14 | dashboards: "grafana" 15 | url: "https://grafana.com/api/dashboards/20398/revisions/1/download" 16 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/dashboards/kubernetes-node-exporter-full.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaDashboard 3 | metadata: 4 | name: kubernetes-node-exporter-full 5 | namespace: infrastructure 6 | spec: 7 | allowCrossNamespaceImport: true 8 | folderRef: "kubernetes" 9 | datasources: 10 | - inputName: "DS_PROMETHEUS" 11 | datasourceName: "VictoriaMetrics" 12 | instanceSelector: 13 | matchLabels: 14 | dashboards: "grafana" 15 | url: "https://grafana.com/api/dashboards/1860/revisions/37/download" 16 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/dashboards/kubernetes-views-global.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaDashboard 3 | metadata: 4 | name: kubernetes-views-global 5 | namespace: infrastructure 6 | spec: 7 | allowCrossNamespaceImport: true 8 | folderRef: "kubernetes" 9 | datasources: 10 | - inputName: "DS_PROMETHEUS" 11 | datasourceName: "VictoriaMetrics" 12 | instanceSelector: 13 | matchLabels: 14 | dashboards: "grafana" 15 | url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-global.json" 16 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/dashboards/kubernetes-views-namespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaDashboard 3 | metadata: 4 | name: kubernetes-views-namespaces 5 | namespace: infrastructure 6 | spec: 7 | allowCrossNamespaceImport: true 8 | folderRef: "kubernetes" 9 | datasources: 10 | - inputName: "DS_PROMETHEUS" 11 | datasourceName: "VictoriaMetrics" 12 | instanceSelector: 13 | matchLabels: 14 | dashboards: "grafana" 15 | url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json" 16 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/dashboards/kubernetes-views-nodes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaDashboard 3 | metadata: 4 | name: kubernetes-views-nodes 5 | namespace: infrastructure 6 | spec: 7 | allowCrossNamespaceImport: true 8 | folderRef: "kubernetes" 9 | datasources: 10 | - inputName: "DS_PROMETHEUS" 11 | datasourceName: "VictoriaMetrics" 12 | instanceSelector: 13 | matchLabels: 14 | dashboards: "grafana" 15 | url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-nodes.json" 16 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/dashboards/kubernetes-views-pods.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaDashboard 3 | metadata: 4 | name: kubernetes-views-pods 5 | namespace: infrastructure 6 | spec: 7 | allowCrossNamespaceImport: true 8 | folderRef: "kubernetes" 9 | datasources: 10 | - inputName: "DS_PROMETHEUS" 11 | datasourceName: "VictoriaMetrics" 12 | instanceSelector: 13 | matchLabels: 14 | dashboards: "grafana" 15 | url: "https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json" 16 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/dashboards/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - kubernetes-karpenter.yaml 6 | - kubernetes-node-exporter-full.yaml 7 | - kubernetes-views-global.yaml 8 | - kubernetes-views-namespaces.yaml 9 | - kubernetes-views-nodes.yaml 10 | - kubernetes-views-pods.yaml 11 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/folders/databases.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaFolder 3 | metadata: 4 | name: databases 5 | namespace: infrastructure 6 | spec: 7 | allowCrossNamespaceImport: true 8 | instanceSelector: 9 | matchLabels: 10 | dashboards: "grafana" 11 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/folders/kubernetes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaFolder 3 | metadata: 4 | name: kubernetes 5 | namespace: infrastructure 6 | spec: 7 | allowCrossNamespaceImport: true 8 | instanceSelector: 9 | matchLabels: 10 | dashboards: "grafana" 11 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/folders/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - databases.yaml 6 | - kubernetes.yaml 7 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/grafana-victoriametrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: Grafana 3 | metadata: 4 | name: grafana-victoriametrics 5 | namespace: observability 6 | labels: 7 | dashboards: "grafana" 8 | spec: 9 | external: 10 | url: http://victoria-metrics-k8s-stack-grafana 11 | adminPassword: 12 | name: victoria-metrics-k8s-stack-grafana-envvars 13 | key: GF_SECURITY_ADMIN_PASSWORD 14 | adminUser: 15 | name: victoria-metrics-k8s-stack-grafana-envvars 16 | key: GF_SECURITY_ADMIN_USER 17 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: grafana-operator 5 | namespace: observability 6 | spec: 7 | releaseName: grafana-operator 8 | driftDetection: 9 | mode: enabled 10 | chart: 11 | spec: 12 | chart: grafana-operator 13 | sourceRef: 14 | kind: HelmRepository 15 | name: grafana-oci 16 | namespace: observability 17 | version: "v5.18.0" 18 | interval: 10m0s 19 | timeout: 30m 20 | install: 21 | crds: "Skip" 22 | remediation: 23 | retries: 3 24 | values: 25 | resources: 26 | limits: 27 | cpu: 500m 28 | memory: 100Mi 29 | requests: 30 | cpu: 100m 31 | memory: 100Mi 32 | 33 | serviceMonitor: 34 | enabled: true 35 | -------------------------------------------------------------------------------- /observability/base/grafana-operator/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - dashboards 6 | - folders 7 | - grafana-victoriametrics.yaml 8 | - helmrelease.yaml 9 | -------------------------------------------------------------------------------- /observability/base/loggen/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: loggen 5 | spec: 6 | interval: 30m 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: loggen 12 | version: "0.1.4" 13 | sourceRef: 14 | kind: HelmRepository 15 | name: loggen 16 | interval: 12h 17 | values: 18 | replicaCount: 2 19 | args: 20 | - --sleep 21 | - "0.1" 22 | - --error-rate 23 | - "0.1" 24 | - --format 25 | - "json" 26 | - --latency 27 | - "0.2" 28 | resources: 29 | requests: 30 | cpu: 100m 31 | memory: 100Mi 32 | limits: 33 | cpu: 100m 34 | memory: 100Mi 35 | -------------------------------------------------------------------------------- /observability/base/loggen/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: observability 4 | resources: 5 | - helmrelease.yaml 6 | -------------------------------------------------------------------------------- /observability/base/victoria-logs/grafana-dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaDashboard 3 | metadata: 4 | name: observability-victoria-logs-explorer 5 | namespace: observability 6 | spec: 7 | allowCrossNamespaceImport: true 8 | datasources: 9 | - inputName: "DS_VICTORIALOGS" 10 | datasourceName: "VictoriaLogs" 11 | instanceSelector: 12 | matchLabels: 13 | dashboards: "grafana" 14 | url: "https://grafana.com/api/dashboards/22759/revisions/6/download" 15 | -------------------------------------------------------------------------------- /observability/base/victoria-logs/grafana-datasource.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: grafana.integreatly.org/v1beta1 2 | kind: GrafanaDatasource 3 | metadata: 4 | name: vl-datasource 5 | namespace: observability 6 | spec: 7 | allowCrossNamespaceImport: true 8 | datasource: 9 | access: proxy 10 | type: victoriametrics-logs-datasource 11 | name: VictoriaLogs 12 | # Single 13 | url: http://victoria-logs-victoria-logs-single-server.observability:9428 14 | # Cluster 15 | # url: http://victoria-logs-victoria-logs-cluster-vlselect.observability:9471 16 | instanceSelector: 17 | matchLabels: 18 | dashboards: grafana 19 | -------------------------------------------------------------------------------- /observability/base/victoria-logs/helmrelease-vlsingle.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: victoria-logs 5 | namespace: observability 6 | spec: 7 | releaseName: victoria-logs 8 | chart: 9 | spec: 10 | chart: victoria-logs-single 11 | sourceRef: 12 | kind: HelmRepository 13 | name: victoria-metrics 14 | namespace: observability 15 | version: "0.11.2" 16 | interval: 4m0s 17 | timeout: 30m 18 | install: 19 | remediation: 20 | retries: 3 21 | values: 22 | printNotes: false 23 | 24 | server: 25 | resources: 26 | limits: 27 | cpu: 500m 28 | memory: 512Mi 29 | requests: 30 | cpu: 500m 31 | memory: 512Mi 32 | 33 | vmServiceScrape: 34 | enabled: true 35 | 36 | vector: 37 | enabled: true 38 | 39 | dashboards: 40 | enabled: true 41 | grafanaOperator: 42 | enabled: true 43 | spec: 44 | allowCrossNamespaceImport: true 45 | -------------------------------------------------------------------------------- /observability/base/victoria-logs/httproute-vlcluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: victoria-logs 5 | namespace: observability 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "vl.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: victoria-logs-victoria-logs-cluster-vlselect 15 | port: 9471 16 | -------------------------------------------------------------------------------- /observability/base/victoria-logs/httproute-vlsingle.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: victoria-logs 5 | namespace: observability 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "vl.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: victoria-logs-victoria-logs-single-server 15 | port: 9428 16 | -------------------------------------------------------------------------------- /observability/base/victoria-logs/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - grafana-dashboards.yaml 6 | - grafana-datasource.yaml 7 | 8 | # Choose between single or cluster helm release 9 | 10 | # VM Single 11 | - helmrelease-vlsingle.yaml 12 | - httproute-vlsingle.yaml 13 | 14 | # VM Cluster 15 | # - helmrelease-vlcluster.yaml 16 | # - httproute-vlcluster.yaml 17 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/externalsecret-alertmanager-slack-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: victoria-metrics-k8s-stack-alertmanager-slack-app 5 | namespace: observability 6 | spec: 7 | dataFrom: 8 | - extract: 9 | conversionStrategy: Default 10 | key: observability/victoria-metrics-k8s-stack/alertmanager-slack-app 11 | refreshInterval: 1h 12 | secretStoreRef: 13 | kind: ClusterSecretStore 14 | name: clustersecretstore 15 | target: 16 | creationPolicy: Owner 17 | deletionPolicy: Retain 18 | name: victoria-metrics-k8s-stack-alertmanager-slack-app 19 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/externalsecret-grafana-envvars.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: victoria-metrics-k8s-stack-grafana-envvars 5 | namespace: observability 6 | spec: 7 | dataFrom: 8 | - extract: 9 | conversionStrategy: Default 10 | key: observability/victoria-metrics-k8s-stack/grafana-envvars 11 | refreshInterval: 1h 12 | secretStoreRef: 13 | kind: ClusterSecretStore 14 | name: clustersecretstore 15 | target: 16 | creationPolicy: Owner 17 | deletionPolicy: Retain 18 | name: victoria-metrics-k8s-stack-grafana-envvars 19 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/helmrelease-vmsingle.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: victoria-metrics-k8s-stack 5 | namespace: observability 6 | spec: 7 | releaseName: victoria-metrics-k8s-stack 8 | chart: 9 | spec: 10 | chart: victoria-metrics-k8s-stack 11 | sourceRef: 12 | kind: HelmRepository 13 | name: victoria-metrics 14 | namespace: observability 15 | version: "0.50.1" 16 | interval: 4m0s 17 | timeout: 30m 18 | install: 19 | remediation: 20 | retries: 3 21 | valuesFrom: 22 | - kind: ConfigMap 23 | name: vm-common-helm-values 24 | valuesKey: values.yaml 25 | values: 26 | vmsingle: 27 | spec: 28 | retentionPeriod: "1d" # Minimal retention, for tests only 29 | replicaCount: 1 30 | storage: 31 | accessModes: 32 | - ReadWriteOnce 33 | resources: 34 | requests: 35 | storage: 10Gi 36 | extraArgs: 37 | maxLabelsPerTimeseries: "50" 38 | # Todo authentication with Zitadel. Currently using admin user 39 | # grafana: 40 | # grafana.ini: 41 | # server: 42 | # root_url: "https://grafana.priv.${domain_name}" 43 | # domain: "grafana.priv.${domain_name}" 44 | # auth.generic_auth: 45 | # enabled: true 46 | # name: "Zitadel" 47 | # allow_sign_up: true 48 | # client_id: "293437355073802541" 49 | # client_secret: "3XPQdOtQedxEnAjaTbxsnQ2Fc0WT15rKU5nsgSWYzgktdPHm82whbzfu01J0c0ba" 50 | # scopes: "openid profile email" 51 | # auth_url: "https://auth.${domain_name}/oauth/v2/authorize" 52 | # token_url: "https://auth.${domain_name}/oauth/v2/token" 53 | # api_url: "https://auth.${domain_name}/oidc/v1/userinfo" 54 | # # role_attribute_path: "contains(groups[*], 'admin-group') && 'Admin' || 'Viewer'" 55 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/httproute-grafana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: grafana 5 | namespace: observability 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "grafana.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: victoria-metrics-k8s-stack-grafana 15 | port: 80 16 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/httproute-vmagent.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: vmagent 5 | namespace: observability 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "vmagent-${cluster_name}.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: vmagent-victoria-metrics-k8s-stack 15 | port: 8429 16 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/httproute-vmalertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: vmalertmanager 5 | namespace: observability 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "vmalertmanager-${cluster_name}.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: vmalertmanager-victoria-metrics-k8s-stack 15 | port: 9093 16 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/httproute-vmcluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: victoriametrics 5 | namespace: observability 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "vm.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: vmselect-victoria-metrics-k8s-stack 15 | port: 8481 16 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/httproute-vmsingle.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: victoriametrics 5 | namespace: observability 6 | spec: 7 | parentRefs: 8 | - name: platform-private 9 | namespace: infrastructure 10 | hostnames: 11 | - "vm.priv.${domain_name}" 12 | rules: 13 | - backendRefs: 14 | - name: vmsingle-victoria-metrics-k8s-stack 15 | port: 8429 16 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - externalsecret-alertmanager-slack-app.yaml 6 | - externalsecret-grafana-envvars.yaml 7 | 8 | # HttpRoutes 9 | - httproute-grafana.yaml 10 | - httproute-vmagent.yaml 11 | - httproute-vmalertmanager.yaml 12 | 13 | # Victoria Metrics Operator resources 14 | - vmscrapeconfigs 15 | - vmservicecrapes 16 | - vmrules 17 | 18 | # Common Helm values for both single and cluster 19 | - vm-common-helm-values-configmap.yaml 20 | 21 | # Choose between single or cluster helm release 22 | 23 | # VM Single 24 | - helmrelease-vmsingle.yaml 25 | - httproute-vmsingle.yaml 26 | 27 | # VM Cluster 28 | # - helmrelease-vmcluster.yaml 29 | # - httproute-vmcluster.yaml 30 | 31 | # Grafana provisioning (Plugins config, RBAC, etc) 32 | - ogenki-grafana-provisioning.yaml 33 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/ogenki-grafana-provisioning.yaml: -------------------------------------------------------------------------------- 1 | # Create a configmap for the Grafana provisioning plugin 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: grafana-provisioning 6 | namespace: observability 7 | data: 8 | "plugin-oncall.yaml": | 9 | apiVersion: 1 10 | apps: 11 | - type: grafana-oncall-app 12 | name: grafana-oncall-app 13 | jsonData: 14 | stackId: 5 15 | orgId: 100 16 | onCallApiUrl: "http://oncall-engine:8080/" 17 | grafanaUrl: "http://victoria-metrics-k8s-stack-grafana/" 18 | 19 | ## !! RBAC is not supported in Grafana Open Source version !! 20 | # "rbac.yaml": | 21 | # apiVersion: 2 22 | 23 | # roles: 24 | # - name: 'custom:folder:backend_editor' 25 | # uid: 'backend_editor_role' 26 | # description: 'Edit permissions for the backend folder' 27 | # version: 1 28 | # global: true 29 | # permissions: 30 | # - action: 'folders:write' 31 | # scope: 'folders:name:backend' 32 | # - action: 'dashboards:write' 33 | # scope: 'folders:name:backend' 34 | 35 | # - name: 'custom:folder:frontend_editor' 36 | # uid: 'frontend_editor_role' 37 | # description: 'Edit permissions for the frontend folder' 38 | # version: 1 39 | # global: true 40 | # permissions: 41 | # - action: 'folders:write' 42 | # scope: 'folders:name:frontend' 43 | # - action: 'dashboards:write' 44 | # scope: 'folders:name:frontend' 45 | 46 | # teams: 47 | # - name: 'Backend' 48 | # orgId: 1 49 | # roles: 50 | # - uid: 'backend_editor_role' 51 | # global: true 52 | 53 | # - name: 'Frontend' 54 | # orgId: 1 55 | # roles: 56 | # - uid: 'frontend_editor_role' 57 | # global: true 58 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/vmrules/karpenter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operator.victoriametrics.com/v1beta1 2 | kind: VMRule 3 | metadata: 4 | labels: 5 | app: karpenter 6 | name: karpenter 7 | namespace: karpenter 8 | spec: 9 | groups: 10 | - name: karpenter 11 | rules: 12 | - alert: KarpenterCanNotRegisterNewNodes 13 | expr: sum by (nodepool) (karpenter_nodeclaims_launched) - sum by (nodepool)(karpenter_nodeclaims_registered) != 0 14 | for: 15m 15 | labels: 16 | severity: warning 17 | annotations: 18 | message: Problem with registering new nodes in the cluster. 19 | description: | 20 | Karpenter in the nodepool {{ $labels.nodeppol }} launched new nodes, but some of the nodes did not register in the cluster during 15 minutes. 21 | runbook_url: "https://karpenter.sh/docs/troubleshooting/" 22 | dashboard: "https://grafana.priv.${domain_name}/dashboards" 23 | 24 | - alert: KarpenterNodepoolAlmostFull 25 | expr: sum by (nodepool,resource_type) (karpenter_nodepool_usage) / sum by (nodepool,resource_type) (karpenter_nodepool_limit) * 100 > 80 26 | for: 15m 27 | labels: 28 | severity: warning 29 | annotations: 30 | message: Nodepool almost full, you should increase limits. 31 | description: | 32 | Nodepool {{ $labels.nodeppol }} has launched {{ $value }}% of {{ $labels.resource_type }} resources of the limit. 33 | runbook_url: "https://karpenter.sh/docs/troubleshooting/" 34 | dashboard: "https://grafana.priv.${domain_name}/dashboards" 35 | 36 | - alert: KarpenterCloudproviderErrors 37 | expr: increase(karpenter_cloudprovider_errors_total[10m]) > 0 38 | for: 1m 39 | labels: 40 | severity: warning 41 | annotations: 42 | message: Cloud provider errors detected by Karpenter. 43 | description: | 44 | Karpenter received an error during an API call to the cloud provider. 45 | runbook_url: "https://karpenter.sh/docs/troubleshooting/" 46 | dashboard: "https://grafana.priv.${domain_name}/dashboards" 47 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/vmrules/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - karpenter.yaml 6 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/vmscrapeconfigs/ec2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operator.victoriametrics.com/v1beta1 2 | kind: VMScrapeConfig 3 | metadata: 4 | name: aws-ec2-node-exporter 5 | namespace: observability 6 | spec: 7 | ec2SDConfigs: 8 | - region: ${region} 9 | port: 9100 10 | filters: 11 | - name: tag:observability:node-exporter 12 | values: ["true"] 13 | relabelConfigs: 14 | - action: replace 15 | source_labels: [__meta_ec2_tag_Name] 16 | target_label: ec2_name 17 | - action: replace 18 | source_labels: [__meta_ec2_tag_app] 19 | target_label: ec2_application 20 | - action: replace 21 | source_labels: [__meta_ec2_availability_zone] 22 | target_label: ec2_az 23 | - action: replace 24 | source_labels: [__meta_ec2_instance_id] 25 | target_label: ec2_id 26 | - action: replace 27 | source_labels: [__meta_ec2_region] 28 | target_label: ec2_region 29 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/vmscrapeconfigs/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ec2.yaml 6 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/vmservicecrapes/karpenter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operator.victoriametrics.com/v1beta1 2 | kind: VMServiceScrape 3 | metadata: 4 | name: karpenter 5 | namespace: karpenter 6 | spec: 7 | selector: 8 | matchLabels: 9 | app.kubernetes.io/name: karpenter 10 | endpoints: 11 | - port: http-metrics 12 | path: /metrics 13 | namespaceSelector: 14 | matchNames: 15 | - karpenter 16 | -------------------------------------------------------------------------------- /observability/base/victoria-metrics-k8s-stack/vmservicecrapes/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - karpenter.yaml 6 | -------------------------------------------------------------------------------- /observability/mycluster-0/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../base/victoria-logs 6 | # Log generator for demo purposes 7 | # - ../base/loggen 8 | -------------------------------------------------------------------------------- /observability/mycluster-0/victoria-metrics-k8s-stack/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../../base/victoria-metrics-k8s-stack 6 | -------------------------------------------------------------------------------- /opentofu/config.tm.hcl: -------------------------------------------------------------------------------- 1 | # Global variables that are used in all scripts 2 | # Use your own values for these variables 3 | globals { 4 | provisioner = "tofu" 5 | region = "eu-west-3" 6 | profile = "" 7 | eks_cluster_name = "mycluster-0" 8 | openbao_url = "https://bao.priv.cloud.ogenki.io:8200" 9 | root_token_secret_name = "openbao/cloud-native-ref/tokens/root" 10 | root_ca_secret_name = "certificates/priv.cloud.ogenki.io/root-ca" 11 | cert_manager_approle_secret_name = "openbao/cloud-native-ref/approles/cert-manager" 12 | cert_manager_approle = "cert-manager" 13 | } 14 | -------------------------------------------------------------------------------- /opentofu/eks/.trivyignore.yaml: -------------------------------------------------------------------------------- 1 | # We need to work on these security issues 2 | misconfigurations: 3 | - id: AVD-KSV-0001 4 | - id: AVD-KSV-0003 5 | - id: AVD-KSV-0004 6 | - id: AVD-KSV-0011 7 | - id: AVD-KSV-0012 8 | - id: AVD-KSV-0014 9 | - id: AVD-KSV-0016 10 | - id: AVD-KSV-0018 11 | - id: AVD-KSV-0020 12 | - id: AVD-KSV-0021 13 | - id: AVD-KSV-0030 14 | - id: AVD-KSV-0032 15 | - id: AVD-KSV-0033 16 | - id: AVD-KSV-0035 17 | - id: AVD-KSV-0039 18 | - id: AVD-KSV-0040 19 | - id: AVD-KSV-0104 20 | - id: AVD-KSV-0106 21 | - id: AVD-KSV-0110 22 | - id: AVD-KSV-0118 23 | - id: AVD-KSV-0125 24 | -------------------------------------------------------------------------------- /opentofu/eks/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "demo-smana-remote-backend" 4 | key = "cloud-native-ref/eks/opentofu.tfstate" 5 | region = "eu-west-3" 6 | encrypt = true 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /opentofu/eks/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_caller_identity" "this" {} 2 | data "aws_vpc" "selected" { 3 | filter { 4 | name = "tag:project" 5 | values = ["cloud-native-ref"] 6 | } 7 | filter { 8 | name = "tag:owner" 9 | values = ["Smana"] 10 | } 11 | filter { 12 | name = "tag:environment" 13 | values = ["dev"] 14 | } 15 | } 16 | data "aws_subnets" "private" { 17 | filter { 18 | name = "vpc-id" 19 | values = [data.aws_vpc.selected.id] 20 | } 21 | filter { 22 | name = "tag:Name" 23 | values = ["vpc-${var.region}-${var.env}-private-*"] 24 | } 25 | } 26 | data "aws_subnets" "intra" { 27 | filter { 28 | name = "vpc-id" 29 | values = [data.aws_vpc.selected.id] 30 | } 31 | filter { 32 | name = "tag:Name" 33 | values = ["vpc-${var.region}-${var.env}-intra-*"] 34 | } 35 | } 36 | 37 | data "aws_security_group" "tailscale" { 38 | filter { 39 | name = "tag:project" 40 | values = ["cloud-native-ref"] 41 | } 42 | 43 | filter { 44 | name = "tag:owner" 45 | values = ["Smana"] 46 | } 47 | 48 | filter { 49 | name = "tag:environment" 50 | values = [var.env] 51 | } 52 | filter { 53 | name = "tag:app" 54 | values = ["tailscale"] 55 | } 56 | } 57 | 58 | #tflint-ignore: terraform_unused_declarations 59 | data "aws_ecrpublic_authorization_token" "token" { 60 | provider = aws.virginia 61 | } 62 | 63 | data "aws_eks_cluster_auth" "cluster_auth" { 64 | name = module.eks.cluster_name 65 | } 66 | 67 | data "aws_secretsmanager_secret_version" "github_app" { 68 | secret_id = var.github_app_secret_name 69 | } 70 | 71 | data "aws_secretsmanager_secret_version" "cert_manager_approle" { 72 | secret_id = var.cert_manager_approle_secret_name 73 | } 74 | 75 | data "http" "gateway_api_crds" { 76 | count = length(local.gateway_api_crds_urls) 77 | url = local.gateway_api_crds_urls[count.index] 78 | } 79 | 80 | # Kubernetes manifests 81 | data "kubectl_filename_list" "flux" { 82 | pattern = "${path.module}/kubernetes-manifests/flux/*.yaml" 83 | } 84 | 85 | data "kubectl_filename_list" "karpenter_default" { 86 | pattern = "${path.module}/kubernetes-manifests/karpenter/default-*.yaml" 87 | } 88 | 89 | data "kubectl_filename_list" "karpenter_io" { 90 | pattern = "${path.module}/kubernetes-manifests/karpenter/io-*.yaml" 91 | } 92 | -------------------------------------------------------------------------------- /opentofu/eks/helm_values/aws-ebs-csi-driver.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | k8sTagClusterId: ${cluster_name} 3 | serviceAccount: 4 | annotations: 5 | eks.amazonaws.com/role-arn: ${irsa_role_arn} 6 | replicaCount: 2 7 | resources: 8 | limits: 9 | cpu: 100m 10 | memory: 50Mi 11 | enableMetrics: true 12 | node: 13 | resources: 14 | limits: 15 | cpu: 200m 16 | memory: 200Mi 17 | serviceMonitor: 18 | forceEnable: true 19 | labels: 20 | prometheus-instance: main 21 | -------------------------------------------------------------------------------- /opentofu/eks/helm_values/cilium.yaml: -------------------------------------------------------------------------------- 1 | k8sServiceHost: "${cluster_endpoint}" 2 | k8sServicePort: "443" 3 | bandwidthManager: 4 | enabled: true 5 | bpf: 6 | preallocateMaps: true 7 | egressMasqueradeInterfaces: eth0 8 | eni: 9 | enabled: true 10 | installNoConntrackIptablesRules: true 11 | ipam: 12 | mode: eni 13 | kubeProxyReplacement: true 14 | operator: 15 | resources: 16 | limits: 17 | cpu: 100m 18 | memory: 100Mi 19 | rollOutPods: true # Reload pods when the configmap is updated 20 | # Can't enable servicemonitor as the CRD is not yet installed 21 | prometheus: 22 | enabled: true 23 | serviceMonitor: 24 | enabled: false 25 | resources: 26 | limits: 27 | cpu: 300m 28 | memory: 256Mi 29 | routingMode: native 30 | 31 | envoy: 32 | resources: 33 | limits: 34 | memory: 300Mi 35 | requests: 36 | cpu: 200m 37 | memory: 300Mi 38 | 39 | hubble: 40 | metrics: 41 | enabled: 42 | - dns:query;ignoreAAAA 43 | - drop 44 | - tcp 45 | - flow 46 | - icmp 47 | - http 48 | enableOpenMetrics: true 49 | relay: 50 | enabled: true 51 | ui: 52 | enabled: true 53 | 54 | gatewayAPI: 55 | enabled: true 56 | -------------------------------------------------------------------------------- /opentofu/eks/helm_values/karpenter.yaml: -------------------------------------------------------------------------------- 1 | settings: 2 | clusterName: ${cluster_name} 3 | clusterEndpoint: ${cluster_endpoint} 4 | interruptionQueue: ${queue_name} 5 | -------------------------------------------------------------------------------- /opentofu/eks/karpenter.tf: -------------------------------------------------------------------------------- 1 | #trivy:ignore:AVD-AWS-0342 2 | module "karpenter" { 3 | source = "terraform-aws-modules/eks/aws//modules/karpenter" 4 | version = "~> 20.0" 5 | 6 | cluster_name = module.eks.cluster_name 7 | 8 | node_iam_role_additional_policies = merge( 9 | var.enable_ssm ? { ssm = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" } : {}, 10 | var.iam_role_additional_policies 11 | ) 12 | 13 | tags = var.tags 14 | } 15 | 16 | resource "aws_eks_pod_identity_association" "karpenter" { 17 | cluster_name = module.eks.cluster_name 18 | namespace = "karpenter" 19 | service_account = "karpenter" 20 | role_arn = module.karpenter.iam_role_arn 21 | } 22 | -------------------------------------------------------------------------------- /opentofu/eks/kubernetes-manifests/flux/cluster-vars-configmap.yaml: -------------------------------------------------------------------------------- 1 | # This ConfigMap will be used to store variables that are used in the Flux manifests 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: eks-${cluster_name}-vars 6 | namespace: flux-system 7 | data: 8 | cluster_name: "${cluster_name}" 9 | oidc_provider_arn: "${oidc_provider_arn}" 10 | oidc_issuer_url: "${oidc_issuer_url}" 11 | oidc_issuer_host: "${oidc_issuer_host}" 12 | aws_account_id: "${aws_account_id}" 13 | region: "${region}" 14 | environment: "${environment}" 15 | vpc_id: "${vpc_id}" 16 | vpc_cidr_block: "${vpc_cidr_block}" 17 | -------------------------------------------------------------------------------- /opentofu/eks/kubernetes-manifests/flux/instance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fluxcd.controlplane.io/v1 2 | kind: FluxInstance 3 | metadata: 4 | name: flux 5 | namespace: flux-system 6 | spec: 7 | distribution: 8 | version: "2.x" 9 | registry: "ghcr.io/fluxcd" 10 | artifact: "oci://ghcr.io/controlplaneio-fluxcd/flux-operator-manifests" 11 | components: 12 | - source-controller 13 | - kustomize-controller 14 | - helm-controller 15 | - notification-controller 16 | %{ if enable_flux_image_update_automation ~} 17 | - image-reflector-controller 18 | - image-automation-controller 19 | %{ endif ~} 20 | cluster: 21 | type: kubernetes 22 | networkPolicy: true 23 | storage: 24 | class: "gp3" 25 | size: "5Gi" 26 | sync: 27 | kind: GitRepository 28 | url: ${repository_sync_url} 29 | ref: ${git_ref} 30 | path: "clusters/${cluster_name}" 31 | pullSecret: "flux-system" 32 | kustomize: 33 | patches: 34 | - patch: | 35 | - op: add 36 | path: /spec/provider 37 | value: github 38 | target: 39 | kind: GitRepository 40 | -------------------------------------------------------------------------------- /opentofu/eks/kubernetes-manifests/karpenter/default-ec2nc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: karpenter.k8s.aws/v1 2 | kind: EC2NodeClass 3 | metadata: 4 | name: default 5 | spec: 6 | amiSelectorTerms: 7 | - alias: bottlerocket@1.37.0 8 | role: ${karpenter_node_iam_role_name} 9 | kubelet: 10 | maxPods: 100 11 | subnetSelectorTerms: 12 | - tags: 13 | karpenter.sh/discovery: ${env} 14 | securityGroupSelectorTerms: 15 | - tags: 16 | karpenter.sh/discovery: ${cluster_name} 17 | tags: 18 | karpenter.sh/discovery: ${cluster_name} 19 | -------------------------------------------------------------------------------- /opentofu/eks/kubernetes-manifests/karpenter/default-nodepool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: karpenter.sh/v1 2 | kind: NodePool 3 | metadata: 4 | name: default 5 | spec: 6 | template: 7 | spec: 8 | nodeClassRef: 9 | group: karpenter.k8s.aws 10 | kind: EC2NodeClass 11 | name: default 12 | requirements: 13 | - key: "kubernetes.io/arch" 14 | operator: In 15 | values: ["amd64"] 16 | - key: karpenter.sh/capacity-type 17 | operator: In 18 | values: ["spot"] 19 | # Instance types t are really too small 20 | - key: "karpenter.k8s.aws/instance-category" 21 | operator: NotIn 22 | values: ["t"] 23 | # Do not select big instance types in order to avoid blast radius 24 | - key: karpenter.k8s.aws/instance-cpu 25 | operator: Lt 26 | values: ["26"] 27 | - key: karpenter.k8s.aws/instance-memory 28 | operator: Lt 29 | values: ["50001"] 30 | disruption: 31 | consolidationPolicy: WhenEmpty 32 | consolidateAfter: 30s 33 | limits: 34 | cpu: ${default_nodepool_cpu_limits} 35 | memory: ${default_nodepool_memory_limits} 36 | -------------------------------------------------------------------------------- /opentofu/eks/kubernetes-manifests/karpenter/io-ec2nc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: karpenter.k8s.aws/v1 2 | kind: EC2NodeClass 3 | metadata: 4 | name: io 5 | spec: 6 | amiSelectorTerms: 7 | - alias: al2@latest 8 | instanceStorePolicy: "RAID0" 9 | role: ${karpenter_node_iam_role_name} 10 | subnetSelectorTerms: 11 | - tags: 12 | karpenter.sh/discovery: ${env} 13 | securityGroupSelectorTerms: 14 | - tags: 15 | karpenter.sh/discovery: ${cluster_name} 16 | userData: | 17 | #!/bin/bash 18 | /usr/bin/setup-local-disks raid0 19 | tags: 20 | karpenter.sh/discovery: ${cluster_name} 21 | -------------------------------------------------------------------------------- /opentofu/eks/kubernetes-manifests/karpenter/io-nodepool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: karpenter.sh/v1 2 | kind: NodePool 3 | metadata: 4 | name: io 5 | spec: 6 | template: 7 | spec: 8 | nodeClassRef: 9 | group: karpenter.k8s.aws 10 | kind: EC2NodeClass 11 | name: io 12 | requirements: 13 | - key: karpenter.sh/capacity-type 14 | operator: In 15 | values: ["spot", "on-demand"] 16 | - key: kubernetes.io/arch 17 | operator: In 18 | values: ["amd64"] 19 | - key: karpenter.k8s.aws/instance-cpu 20 | operator: Lt 21 | values: ["26"] 22 | - key: karpenter.k8s.aws/instance-memory 23 | operator: Lt 24 | values: ["50001"] 25 | - key: karpenter.k8s.aws/instance-local-nvme 26 | operator: Gt 27 | values: ["100"] 28 | - key: karpenter.k8s.aws/instance-category 29 | operator: In 30 | values: ["c", "i", "m", "r"] 31 | taints: 32 | - key: ogenki/io 33 | value: "true" 34 | effect: NoSchedule 35 | disruption: 36 | consolidationPolicy: WhenEmptyOrUnderutilized 37 | consolidateAfter: 30s 38 | limits: 39 | cpu: ${io_nodepool_cpu_limits} 40 | memory: ${io_nodepool_memory_limits} 41 | -------------------------------------------------------------------------------- /opentofu/eks/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | gateway_api_crds_urls = [ 3 | "https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/${var.gateway_api_version}/config/crd/experimental/gateway.networking.k8s.io_gatewayclasses.yaml", 4 | "https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/${var.gateway_api_version}/config/crd/experimental/gateway.networking.k8s.io_gateways.yaml", 5 | "https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/${var.gateway_api_version}/config/crd/experimental/gateway.networking.k8s.io_httproutes.yaml", 6 | "https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/${var.gateway_api_version}/config/crd/experimental/gateway.networking.k8s.io_referencegrants.yaml", 7 | "https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/${var.gateway_api_version}/config/crd/experimental/gateway.networking.k8s.io_tcproutes.yaml", 8 | "https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/${var.gateway_api_version}/config/crd/experimental/gateway.networking.k8s.io_tlsroutes.yaml", 9 | "https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/${var.gateway_api_version}/config/crd/experimental/gateway.networking.k8s.io_udproutes.yaml", 10 | "https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/${var.gateway_api_version}/config/crd/experimental/gateway.networking.k8s.io_grpcroutes.yaml" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /opentofu/eks/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | 5 | provider "aws" { 6 | region = "us-east-1" 7 | alias = "virginia" 8 | } 9 | 10 | provider "helm" { 11 | kubernetes { 12 | host = module.eks.cluster_endpoint 13 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) 14 | token = data.aws_eks_cluster_auth.cluster_auth.token 15 | } 16 | } 17 | 18 | provider "kubectl" { 19 | apply_retry_count = 15 20 | host = module.eks.cluster_endpoint 21 | token = data.aws_eks_cluster_auth.cluster_auth.token 22 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) 23 | load_config_file = false 24 | } 25 | 26 | provider "kubernetes" { 27 | host = module.eks.cluster_endpoint 28 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) 29 | token = data.aws_eks_cluster_auth.cluster_auth.token 30 | } 31 | -------------------------------------------------------------------------------- /opentofu/eks/stack.tm.hcl: -------------------------------------------------------------------------------- 1 | stack { 2 | name = "EKS cluster" 3 | description = "EKS cluster" 4 | id = "51322224-ac05-497c-bbaf-e2a821a9b2d8" 5 | 6 | after = [ 7 | "/opentofu/network", 8 | "/opentofu/openbao/management" 9 | ] 10 | 11 | tags = [ 12 | "aws", 13 | "eks", 14 | "kubernetes", 15 | "infrastructure" 16 | ] 17 | 18 | } 19 | -------------------------------------------------------------------------------- /opentofu/eks/variables.tfvars: -------------------------------------------------------------------------------- 1 | env = "dev" 2 | cluster_name = "mycluster-0" # Generated with petname 3 | 4 | flux_sync_repository_url = "https://github.com/Smana/cloud-native-ref.git" 5 | 6 | tags = { 7 | GithubRepo = "cloud-native-ref" 8 | GithubOrg = "Smana" 9 | } 10 | 11 | enable_ssm = true 12 | 13 | cert_manager_approle_secret_name = "openbao/cloud-native-ref/approles/cert-manager" 14 | 15 | karpenter_limits = { 16 | "default" = { 17 | cpu = "20" 18 | memory = "64Gi" 19 | } 20 | "io" = { 21 | cpu = "20" 22 | memory = "64Gi" 23 | } 24 | } 25 | 26 | cluster_identity_providers = { 27 | zitadel = { 28 | client_id = "293655038025345449" 29 | issuer_url = "https://auth.cloud.ogenki.io" 30 | username_claim = "email" 31 | groups_claim = "groups" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /opentofu/eks/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.4" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.0" 8 | } 9 | github = { 10 | source = "integrations/github" 11 | version = "~> 6.0" 12 | } 13 | kubernetes = { 14 | source = "hashicorp/kubernetes" 15 | version = ">= 2.20" 16 | } 17 | kubectl = { 18 | source = "alekc/kubectl" 19 | version = ">= 2.0.0" 20 | } 21 | helm = { 22 | source = "hashicorp/helm" 23 | version = ">= 2.7" 24 | } 25 | http = { 26 | source = "hashicorp/http" 27 | version = ">= 3.4" 28 | } 29 | random = { 30 | source = "hashicorp/random" 31 | version = "~> 3.5" 32 | } 33 | tls = { 34 | source = "hashicorp/tls" 35 | version = "~> 4.0" 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /opentofu/eks/workflows.tm.hcl: -------------------------------------------------------------------------------- 1 | script "destroy" { 2 | description = "Destroy the EKS cluster" 3 | job { 4 | name = "eks-destroy" 5 | description = "Destroy the EKS cluster" 6 | commands = [ 7 | [ 8 | "bash", 9 | "../../scripts/eks-prepare-destroy.sh", 10 | "--cluster-name", 11 | global.eks_cluster_name, 12 | "--region", 13 | global.region, 14 | "--profile", 15 | global.profile, 16 | ], 17 | [ 18 | global.provisioner, "destroy", "-var-file=variables.tfvars" 19 | ] 20 | ] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /opentofu/network/.trivyignore.yaml: -------------------------------------------------------------------------------- 1 | misconfigurations: 2 | - id: AVD-AWS-0099 3 | - id: AVD-AWS-0102 4 | - id: AVD-AWS-0105 5 | -------------------------------------------------------------------------------- /opentofu/network/README.md: -------------------------------------------------------------------------------- 1 | # Network and VPN server 2 | 3 | This module deploys several things: 4 | 5 | * Base network resources: VPC, subnets 6 | * A route53 private zone 7 | * A Tailscale [Subnet Router](https://tailscale.com/kb/1019/subnets) in order to access to securely access to private resources 8 | 9 | ## Prerequisites 10 | 11 | * Create a Tailscale account 12 | * Generate an API key 13 | 14 | Create the `variables.tfvars` file 15 | 16 | ```hcl 17 | env = "dev" 18 | region = "eu-west-3" 19 | private_domain_name = "priv.cloud.ogenki.io" 20 | 21 | tailscale = { 22 | subnet_router_name = "ogenki" 23 | tailnet = "smainklh@gmail.com" 24 | // api_key = "tskey-api-" # Generated in Tailscale Admin console. Sensitive value that should be defined using `export TF_VAR_tailscale_api_key=` 25 | prometheus_enabled = true 26 | ssm_enabled = true 27 | overwrite_existing_content = true # Be careful it will replace the existing ACLs 28 | } 29 | 30 | tags = { 31 | project = "cloud-native-ref" 32 | owner = "Smana" 33 | } 34 | 35 | ``` 36 | 37 | ℹ️ The tags are important here as they are used later on to provision the EKS cluster 38 | 39 | ## Apply 40 | 41 | ```console 42 | cd terraform/network 43 | tofu init 44 | tofu apply --var-file variables.tfvars 45 | ``` 46 | 47 | You can check that the instance has successfully joined the `tailnet` by running this command 48 | 49 | ```console 50 | tailscale status 51 | 100.118.83.67 ogenki smainklh@ linux - 52 | 100.67.5.143 ip-10-0-10-77 smainklh@ linux active; relay "par", tx 9881456 rx 45693984 53 | ``` 54 | -------------------------------------------------------------------------------- /opentofu/network/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "demo-smana-remote-backend" 4 | key = "cloud-native-ref/network/opentofu.tfstate" 5 | region = "eu-west-3" 6 | encrypt = true 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /opentofu/network/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | -------------------------------------------------------------------------------- /opentofu/network/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | azs = slice(data.aws_availability_zones.available.names, 0, 3) 3 | tags = { 4 | environment = var.env 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /opentofu/network/network.tf: -------------------------------------------------------------------------------- 1 | module "vpc" { 2 | source = "terraform-aws-modules/vpc/aws" 3 | version = "~> 5.0" 4 | 5 | name = "vpc-${var.region}-${var.env}" 6 | cidr = var.vpc_cidr 7 | 8 | azs = local.azs 9 | private_subnets = [for k, v in local.azs : cidrsubnet(var.vpc_cidr, 4, k)] 10 | public_subnets = [for k, v in local.azs : cidrsubnet(var.vpc_cidr, 8, k + 48)] 11 | intra_subnets = [for k, v in local.azs : cidrsubnet(var.vpc_cidr, 8, k + 52)] 12 | 13 | enable_nat_gateway = true 14 | single_nat_gateway = true 15 | enable_dns_hostnames = true 16 | 17 | enable_flow_log = true 18 | create_flow_log_cloudwatch_iam_role = true 19 | create_flow_log_cloudwatch_log_group = true 20 | 21 | public_subnet_tags = { 22 | "kubernetes.io/role/elb" = 1 23 | } 24 | 25 | private_subnet_tags = { 26 | "kubernetes.io/role/internal-elb" = 1 27 | "karpenter.sh/discovery" = var.env 28 | } 29 | 30 | tags = merge( 31 | local.tags, 32 | var.tags 33 | ) 34 | } 35 | -------------------------------------------------------------------------------- /opentofu/network/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | description = "The ID of the VPC" 3 | value = module.vpc.vpc_id 4 | } 5 | 6 | output "public_subnets" { 7 | description = "List of IDs of public subnets" 8 | value = module.vpc.public_subnets 9 | } 10 | 11 | output "private_subnets" { 12 | description = "List of IDs of private subnets" 13 | value = module.vpc.private_subnets 14 | } 15 | 16 | output "intra_subnets" { 17 | description = "List of IDs of intra subnets" 18 | value = module.vpc.intra_subnets 19 | } 20 | 21 | output "tailscale_security_group_id" { 22 | description = "value" 23 | value = module.tailscale_subnet_router.security_group_id 24 | } 25 | -------------------------------------------------------------------------------- /opentofu/network/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | 5 | provider "tailscale" { 6 | api_key = var.tailscale_api_key 7 | tailnet = var.tailscale_config.tailnet 8 | } 9 | -------------------------------------------------------------------------------- /opentofu/network/route53.tf: -------------------------------------------------------------------------------- 1 | module "zones" { 2 | source = "terraform-aws-modules/route53/aws//modules/zones" 3 | version = "~> 5.0" 4 | 5 | zones = { 6 | "priv.cloud.ogenki.io" = { 7 | comment = "Internal zone for private DNS hosts" 8 | vpc = [ 9 | { 10 | vpc_id = module.vpc.vpc_id 11 | } 12 | ] 13 | tags = { 14 | env = var.env 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /opentofu/network/stack.tm.hcl: -------------------------------------------------------------------------------- 1 | stack { 2 | name = "Network" 3 | description = "Tailscale VPN,VPC, subnets, etc." 4 | id = "3564c93f-543f-47c9-9a84-a1d4b5ed7461" 5 | 6 | tags = [ 7 | "aws", 8 | "network", 9 | "infrastructure" 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /opentofu/network/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env" { 2 | description = "The environment of the VPC" 3 | type = string 4 | } 5 | 6 | variable "region" { 7 | description = "AWS Region" 8 | default = "eu-west-3" 9 | type = string 10 | } 11 | 12 | # Network 13 | variable "vpc_cidr" { 14 | description = "The IPv4 CIDR block for the VPC" 15 | default = "10.0.0.0/16" 16 | type = string 17 | } 18 | 19 | variable "private_domain_name" { 20 | description = "Route53 domain name for private records" 21 | type = string 22 | } 23 | 24 | variable "tailscale_api_key" { 25 | description = "Tailscale API Key" 26 | type = string 27 | sensitive = true 28 | } 29 | 30 | variable "tailscale_config" { 31 | type = map(any) 32 | default = { 33 | subnet_router_name = "" 34 | api_key = "" 35 | tailnet = "" 36 | prometheus_enabled = false 37 | } 38 | } 39 | 40 | variable "tags" { 41 | description = "A map of tags to add to all resources" 42 | type = map(string) 43 | default = {} 44 | } 45 | -------------------------------------------------------------------------------- /opentofu/network/variables.tfvars: -------------------------------------------------------------------------------- 1 | env = "dev" 2 | region = "eu-west-3" 3 | private_domain_name = "priv.cloud.ogenki.io" 4 | 5 | tailscale_config = { 6 | subnet_router_name = "ogenki" 7 | tailnet = "smainklh@gmail.com" 8 | prometheus_enabled = true 9 | ssm_enabled = true 10 | overwrite_existing_content = true 11 | } 12 | 13 | tags = { 14 | project = "cloud-native-ref" 15 | owner = "Smana" 16 | } 17 | -------------------------------------------------------------------------------- /opentofu/network/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.4" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.0" 8 | } 9 | random = { 10 | source = "hashicorp/random" 11 | version = "~> 3.5" 12 | } 13 | tailscale = { 14 | source = "tailscale/tailscale" 15 | version = "~> 0.20" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/.trivyignore.yaml: -------------------------------------------------------------------------------- 1 | misconfigurations: [] 2 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "demo-smana-remote-backend" 4 | key = "cloud-native-ref/openbao/cluster/opentofu.tfstate" 5 | region = "eu-west-3" 6 | encrypt = true 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/iam.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_instance_profile" "this" { 2 | name = local.name 3 | role = aws_iam_role.this.name 4 | tags = var.tags 5 | } 6 | 7 | resource "aws_iam_role" "this" { 8 | name = local.name 9 | tags = var.tags 10 | 11 | assume_role_policy = jsonencode({ 12 | Version = "2012-10-17", 13 | Statement = [ 14 | { 15 | Action = "sts:AssumeRole", 16 | Principal = { 17 | Service = "ec2.amazonaws.com" 18 | }, 19 | Effect = "Allow", 20 | Sid = "" 21 | } 22 | ] 23 | }) 24 | } 25 | 26 | # enable AWS Systems Manager service core functionality 27 | resource "aws_iam_role_policy_attachment" "ssm" { 28 | count = var.enable_ssm ? 1 : 0 29 | role = aws_iam_role.this.name 30 | policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" 31 | } 32 | 33 | # For the raft auto_join discovery 34 | resource "aws_iam_role_policy_attachment" "ec2_read_only" { 35 | role = aws_iam_role.this.name 36 | policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" 37 | } 38 | 39 | 40 | # For the auto unseal using AWS KMS 41 | data "aws_iam_policy_document" "openbao-kms-unseal" { 42 | statement { 43 | sid = "VaultKMSUnseal" 44 | effect = "Allow" 45 | resources = [aws_kms_key.openbao.arn] 46 | 47 | actions = [ 48 | "kms:Decrypt", 49 | "kms:Encrypt", 50 | "kms:DescribeKey", 51 | "kms:ReEncrypt*", 52 | "kms:GenerateDataKey*" 53 | ] 54 | } 55 | } 56 | 57 | resource "aws_iam_role_policy" "openbao-kms-unseal" { 58 | name = "${local.name}-kms-unseal" 59 | role = aws_iam_role.this.id 60 | policy = data.aws_iam_policy_document.openbao-kms-unseal.json 61 | } 62 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/kms.tf: -------------------------------------------------------------------------------- 1 | #trivy:ignore:AVD-AWS-0104 trivy:ignore:AVD-AWS-0065 2 | resource "aws_kms_key" "openbao" { 3 | description = "OpenBao unseal key" 4 | deletion_window_in_days = 10 5 | 6 | tags = { 7 | Name = "openbao-kms-unseal-${local.name}" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/load_balancer.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb" "this" { 2 | name = local.name 3 | internal = true 4 | load_balancer_type = "network" 5 | subnets = data.aws_subnets.private.ids 6 | 7 | enable_deletion_protection = false 8 | } 9 | 10 | resource "aws_lb_target_group" "this" { 11 | name = local.name 12 | port = 8200 13 | protocol = "TCP" 14 | vpc_id = data.aws_vpc.selected.id 15 | 16 | health_check { 17 | protocol = "TCP" 18 | port = "traffic-port" 19 | } 20 | } 21 | 22 | resource "aws_lb_listener" "this" { 23 | load_balancer_arn = aws_lb.this.arn 24 | port = 8200 25 | protocol = "TCP" 26 | 27 | default_action { 28 | type = "forward" 29 | target_group_arn = aws_lb_target_group.this.arn 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name = format("%s-%s-%s", var.region, var.env, var.name) 3 | tags = { 4 | "OpenBaoInstance" = local.name 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "autoscaling_group_id" { 2 | value = module.openbao_asg.autoscaling_group_id 3 | } 4 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/route53.tf: -------------------------------------------------------------------------------- 1 | resource "aws_route53_record" "nlb" { 2 | zone_id = data.aws_route53_zone.this.zone_id 3 | name = var.leader_tls_servername 4 | type = "A" 5 | 6 | alias { 7 | name = aws_lb.this.dns_name 8 | zone_id = aws_lb.this.zone_id 9 | evaluate_target_health = true 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/scripts/cloudinit-config.yaml: -------------------------------------------------------------------------------- 1 | write_files: 2 | - content: ${tls_key_b64} 3 | encoding: b64 4 | path: /opt/openbao/tls/tls.key 5 | permissions: "0640" 6 | - content: ${tls_cert_b64} 7 | encoding: b64 8 | path: /opt/openbao/tls/tls.crt 9 | permissions: "0644" 10 | - content: ${tls_cacert_b64} # intermediate CA 11 | encoding: b64 12 | path: /opt/openbao/tls/ca.pem 13 | permissions: "0644" 14 | 15 | package_upgrade: true 16 | 17 | packages: 18 | - jq 19 | - gpg 20 | - snapd 21 | - wget 22 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/stack.tm.hcl: -------------------------------------------------------------------------------- 1 | stack { 2 | name = "OpenBao cluster" 3 | description = "OpenBao cluster" 4 | id = "29c70276-6dfc-4bc5-935e-a6c32cebfce4" 5 | after = [ 6 | "/opentofu/network" 7 | ] 8 | tags = [ 9 | "aws", 10 | "openbao", 11 | "openbao-cluster", 12 | "security" 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/variables.tfvars: -------------------------------------------------------------------------------- 1 | name = "ogenki-openbao" # Name of your Vault instance 2 | leader_tls_servername = "bao.priv.cloud.ogenki.io" # Vault domain name that will be exposed to users 3 | domain_name = "priv.cloud.ogenki.io" # Route53 private zone where to provision the DNS records 4 | env = "dev" # Environment used to tags resources 5 | mode = "dev" # Important: More about this setting in this documentation. 6 | region = "eu-west-3" # Where all the resources will be created 7 | enable_ssm = true # Allow to access to the EC2 instances. Enabled for provisionning, but then it should be disabled. 8 | openbao_certificates_secret_name = "certificates/priv.cloud.ogenki.io/openbao" # The name of the AWS Secrets Manager secret containing the OpenBao certificates 9 | oidc_enabled = true # Enable OIDC authentication 10 | oidc_secret_id = "openbao/cloud-native-ref/oidc-client-secret" # The ID of the AWS Secrets Manager secret containing the OIDC client secret 11 | 12 | # Prefer using hardened AMI 13 | # ami_owner = "3xxxxxxxxx" # Account ID where the hardened AMI is 14 | # ami_filter = { 15 | # "name" = ["*hardened-ubuntu-*"] 16 | # } 17 | 18 | prometheus_node_exporter_enabled = true 19 | 20 | tags = { # In my case, these tags are also used to identify the supporting resources (VPC, subnets...) 21 | project = "cloud-native-ref" 22 | owner = "Smana" 23 | app = "openbao" 24 | "observability:node-exporter" = "true" 25 | } 26 | -------------------------------------------------------------------------------- /opentofu/openbao/cluster/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.4" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.0" 8 | } 9 | cloudinit = { 10 | source = "hashicorp/cloudinit" 11 | version = "~> 2.3" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /opentofu/openbao/management/.tls: -------------------------------------------------------------------------------- 1 | ../cluster/.tls -------------------------------------------------------------------------------- /opentofu/openbao/management/.trivyignore.yaml: -------------------------------------------------------------------------------- 1 | misconfigurations: 2 | - id: AVD-AWS-0098 3 | -------------------------------------------------------------------------------- /opentofu/openbao/management/auth.tf: -------------------------------------------------------------------------------- 1 | resource "vault_auth_backend" "approle" { 2 | type = "approle" 3 | } 4 | 5 | resource "vault_approle_auth_backend_role" "snapshot" { 6 | backend = vault_auth_backend.approle.path 7 | role_name = "snapshot-agent" 8 | token_policies = ["snapshot"] 9 | token_bound_cidrs = var.allowed_cidr_blocks 10 | } 11 | 12 | resource "vault_approle_auth_backend_role" "cert_manager" { 13 | backend = vault_auth_backend.approle.path 14 | role_name = "cert-manager" 15 | token_policies = ["cert-manager"] 16 | token_bound_cidrs = var.allowed_cidr_blocks 17 | token_ttl = 600 18 | token_max_ttl = 1200 19 | } 20 | -------------------------------------------------------------------------------- /opentofu/openbao/management/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "demo-smana-remote-backend" 4 | key = "cloud-native-ref/openbao/management/opentofu.tfstate" 5 | region = "eu-west-3" 6 | encrypt = true 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /opentofu/openbao/management/docs/approle.md: -------------------------------------------------------------------------------- 1 | # 🤖 Configure an Approle 2 | 3 | An **approle** in HashiCorp Vault is a machine-based authentication method. It assigns a unique RoleID and SecretID to an application or service, allowing it to securely authenticate and access specific secrets in Vault according to predefined policies. 4 | 5 | 1. Define the permissions needed by the application. Here we need to be able to take snapshots. 6 | 7 | `policies/snapshot.hcl` 8 | ```hcl 9 | path "sys/storage/raft/snapshot" { 10 | capabilities = ["read"] 11 | } 12 | ``` 13 | 14 | ```hcl 15 | resource "vault_policy" "snapshot" { 16 | name = "snapshot" 17 | policy = file("policies/snapshot.hcl") 18 | } 19 | ``` 20 | 21 | 2. Create the Approle 22 | ```hcl 23 | resource "vault_auth_backend" "approle" { 24 | type = "approle" 25 | } 26 | 27 | resource "vault_approle_auth_backend_role" "snapshot" { 28 | backend = vault_auth_backend.approle.path 29 | role_name = "snapshot-agent" 30 | token_policies = ["snapshot"] 31 | token_bound_cidrs = var.allowed_cidr_blocks 32 | } 33 | ``` 34 | 35 | 36 | 37 | 3. Retrieve the secrets that will be used by the application. 38 | 39 | ```console 40 | export APPROLE_ROLE_ID=$(bao read --field=role_id auth/approle/role/snapshot-agent/role-id) 41 | export APPROLE_SECRET_ID=$(bao write --field=secret_id -f auth/approle/role/snapshot-agent/secret-id) 42 | ``` 43 | 44 | We can create a token by running this command. 45 | ```console 46 | bao write auth/approle/login role_id=${APPROLE_ROLE_ID} secret_id=${APPROLE_SECRET_ID} 47 | ``` 48 | 49 | -------------------------------------------------------------------------------- /opentofu/openbao/management/mounts.tf: -------------------------------------------------------------------------------- 1 | resource "vault_mount" "secret" { 2 | path = "secret" 3 | type = "kv-v2" 4 | description = "Store sensitive data" 5 | } 6 | -------------------------------------------------------------------------------- /opentofu/openbao/management/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cert_manager_approle_credentials_secret_arn" { 2 | description = "The ARN of the AWS Secrets Manager secret containing the cert-manager AppRole credentials" 3 | value = aws_secretsmanager_secret.cert_manager_approle_credentials.arn 4 | } 5 | 6 | output "cert_manager_approle_role_id" { 7 | description = "The role ID of the cert-manager AppRole" 8 | value = vault_approle_auth_backend_role.cert_manager.role_id 9 | } 10 | -------------------------------------------------------------------------------- /opentofu/openbao/management/pki.tf: -------------------------------------------------------------------------------- 1 | resource "vault_mount" "this" { 2 | path = var.pki_mount_path 3 | type = "pki" 4 | description = var.pki_common_name 5 | 6 | default_lease_ttl_seconds = var.pki_max_lease_ttl 7 | max_lease_ttl_seconds = var.pki_max_lease_ttl 8 | } 9 | 10 | # Configure PKI with the root CA 11 | resource "vault_pki_secret_backend_config_ca" "pki" { 12 | backend = "pki" 13 | pem_bundle = jsondecode(data.aws_secretsmanager_secret_version.root_ca.secret_string).bundle 14 | } 15 | 16 | # Generate a key 17 | resource "vault_pki_secret_backend_key" "this" { 18 | backend = vault_mount.this.path 19 | type = "internal" 20 | key_type = var.pki_key_type 21 | key_bits = var.pki_key_bits 22 | key_name = lower(replace(var.pki_common_name, " ", "-")) 23 | } 24 | 25 | # Create a CSR (Certificate Signing Request) 26 | resource "vault_pki_secret_backend_intermediate_cert_request" "this" { 27 | backend = vault_mount.this.path 28 | type = "existing" 29 | common_name = var.pki_common_name 30 | key_ref = vault_pki_secret_backend_key.this.key_id 31 | } 32 | 33 | # Sign our CSR 34 | resource "vault_pki_secret_backend_root_sign_intermediate" "this" { 35 | backend = "pki" 36 | csr = vault_pki_secret_backend_intermediate_cert_request.this.csr 37 | common_name = var.pki_common_name 38 | exclude_cn_from_sans = true 39 | organization = var.pki_organization 40 | ttl = var.pki_max_lease_ttl 41 | } 42 | 43 | # Submits the CA certificate to the PKI Secret Backend. 44 | resource "vault_pki_secret_backend_intermediate_set_signed" "this" { 45 | backend = vault_mount.this.path 46 | # Chaining the certificate used by the Vault CA, the intermediate and the root that are both part of the ca-chain.pem file 47 | certificate = "${vault_pki_secret_backend_root_sign_intermediate.this.certificate}\n${jsondecode(data.aws_secretsmanager_secret_version.root_ca.secret_string).ca}" 48 | } 49 | 50 | resource "vault_pki_secret_backend_issuer" "this" { 51 | backend = vault_mount.this.path 52 | issuer_ref = vault_pki_secret_backend_intermediate_set_signed.this.imported_issuers[0] 53 | issuer_name = lower(replace(var.pki_common_name, " ", "-")) 54 | } 55 | 56 | -------------------------------------------------------------------------------- /opentofu/openbao/management/policies.tf: -------------------------------------------------------------------------------- 1 | # Vault administrators 2 | resource "vault_policy" "admin" { 3 | name = "admin" 4 | policy = file("policies/admin.hcl") 5 | } 6 | 7 | # Cert manager 8 | resource "vault_policy" "cert_manager" { 9 | name = "cert-manager" 10 | policy = file("policies/cert-manager.hcl") 11 | } 12 | 13 | # Creating snapshots 14 | resource "vault_policy" "snapshot" { 15 | name = "snapshot" 16 | policy = file("policies/snapshot.hcl") 17 | } 18 | -------------------------------------------------------------------------------- /opentofu/openbao/management/policies/admin.hcl: -------------------------------------------------------------------------------- 1 | # The following doc has been taken as reference: https://learn.hashicorp.com/tutorials/vault/policies#write-a-policy 2 | # Added the ability to manage identities 3 | 4 | # Read system health check 5 | path "sys/health" 6 | { 7 | capabilities = ["read", "sudo"] 8 | } 9 | 10 | # Create and manage ACL policies broadly across Vault 11 | 12 | # List existing policies 13 | path "sys/policies/acl" 14 | { 15 | capabilities = ["list"] 16 | } 17 | 18 | # Create and manage ACL policies 19 | path "sys/policies/acl/*" 20 | { 21 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 22 | } 23 | 24 | # Enable and manage authentication methods broadly across Vault 25 | 26 | # Manage auth methods broadly across Vault 27 | path "auth/*" 28 | { 29 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 30 | } 31 | 32 | # Manage identities broadly across Vault 33 | path "identity/*" 34 | { 35 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 36 | } 37 | 38 | # Manage PKI broadly across Vault 39 | path "pki/*" 40 | { 41 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 42 | } 43 | path "int_pki/*" 44 | { 45 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 46 | } 47 | 48 | # Create, update, and delete auth methods 49 | path "sys/auth/*" 50 | { 51 | capabilities = ["create", "update", "delete", "sudo"] 52 | } 53 | 54 | # List auth methods 55 | path "sys/auth" 56 | { 57 | capabilities = ["read"] 58 | } 59 | 60 | # Enable and manage the key/value secrets engine at `secret/` path 61 | 62 | # List, create, update, and delete key/value secrets 63 | path "secret/*" 64 | { 65 | capabilities = ["create", "read", "update", "patch", "delete", "list", "sudo"] 66 | } 67 | 68 | # Manage secrets engines 69 | path "sys/mounts/*" 70 | { 71 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 72 | } 73 | 74 | # List existing secrets engines. 75 | path "sys/mounts" 76 | { 77 | capabilities = ["read"] 78 | } -------------------------------------------------------------------------------- /opentofu/openbao/management/policies/cert-manager.hcl: -------------------------------------------------------------------------------- 1 | path "pki_private_issuer/*" { 2 | capabilities = ["create", "read", "update", "delete", "list"] 3 | } 4 | -------------------------------------------------------------------------------- /opentofu/openbao/management/policies/snapshot.hcl: -------------------------------------------------------------------------------- 1 | path "sys/storage/raft/snapshot" { 2 | capabilities = ["read"] 3 | } 4 | 5 | # Use to identify the leader 6 | path "sys/storage/raft/configuration" { 7 | capabilities = ["read"] 8 | } 9 | -------------------------------------------------------------------------------- /opentofu/openbao/management/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | 5 | provider "vault" { 6 | address = var.openbao_domain_name == "" ? format("https://bao.%s:8200", var.domain_name) : var.openbao_domain_name 7 | token = jsondecode(data.aws_secretsmanager_secret_version.openbao_root_token_secret.secret_string)["token"] 8 | namespace = "openbao" 9 | skip_tls_verify = true 10 | } 11 | -------------------------------------------------------------------------------- /opentofu/openbao/management/roles.tf: -------------------------------------------------------------------------------- 1 | resource "vault_pki_secret_backend_role" "this" { 2 | backend = vault_mount.this.path 3 | name = lower(var.pki_organization) 4 | allowed_domains = var.pki_domains 5 | allow_subdomains = true 6 | organization = [var.pki_organization] 7 | country = [var.pki_country] 8 | key_usage = [ 9 | "DigitalSignature", 10 | "KeyAgreement", 11 | "KeyEncipherment", 12 | ] 13 | max_ttl = var.pki_max_lease_ttl 14 | ttl = var.pki_max_lease_ttl 15 | } 16 | -------------------------------------------------------------------------------- /opentofu/openbao/management/secrets.tf: -------------------------------------------------------------------------------- 1 | # Get the OpenBao root token from AWS Secrets Manager 2 | data "aws_secretsmanager_secret_version" "openbao_root_token_secret" { 3 | secret_id = var.openbao_root_token_secret_id 4 | } 5 | 6 | # Get the root CA bundle from AWS Secrets Manager 7 | data "aws_secretsmanager_secret" "root_ca" { 8 | name = var.root_ca_secret_name 9 | } 10 | 11 | data "aws_secretsmanager_secret_version" "root_ca" { 12 | secret_id = data.aws_secretsmanager_secret.root_ca.id 13 | } 14 | 15 | # Store AppRole credentials in AWS Secrets Manager 16 | resource "aws_secretsmanager_secret" "cert_manager_approle_credentials" { 17 | name = var.cert_manager_approle_secret_name 18 | recovery_window_in_days = 0 19 | } 20 | 21 | # Generate a new secret ID for the AppRole 22 | resource "vault_approle_auth_backend_role_secret_id" "cert_manager" { 23 | backend = vault_auth_backend.approle.path 24 | role_name = vault_approle_auth_backend_role.cert_manager.role_name 25 | } 26 | 27 | resource "aws_secretsmanager_secret_version" "cert_manager_approle_credentials" { 28 | secret_id = aws_secretsmanager_secret.cert_manager_approle_credentials.id 29 | secret_string = jsonencode({ 30 | cert_manager_approle_id = vault_approle_auth_backend_role.cert_manager.role_id 31 | cert_manager_approle_secret = vault_approle_auth_backend_role_secret_id.cert_manager.secret_id 32 | }) 33 | } 34 | -------------------------------------------------------------------------------- /opentofu/openbao/management/stack.tm.hcl: -------------------------------------------------------------------------------- 1 | stack { 2 | name = "OpenBao management" 3 | description = "Configure the OpenBao cluster" 4 | id = "17b0065c-171c-4bd0-90d9-17793673ff17" 5 | 6 | after = [ 7 | "/opentofu/openbao/cluster" 8 | ] 9 | 10 | tags = [ 11 | "aws", 12 | "openbao", 13 | "openbao-management", 14 | "security" 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /opentofu/openbao/management/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | variable "region" { 3 | description = "The region to deploy the resources" 4 | type = string 5 | } 6 | 7 | variable "openbao_root_token_secret_id" { 8 | description = "The secret ID for the OpenBao root token" 9 | type = string 10 | } 11 | 12 | variable "domain_name" { 13 | description = "The domain name for which the certificate should be issued" 14 | type = string 15 | } 16 | 17 | variable "openbao_domain_name" { 18 | description = "Vault domain name (default: bao.)" 19 | type = string 20 | default = "" 21 | } 22 | 23 | variable "allowed_cidr_blocks" { 24 | description = "List of CIDR blocks allowed to reach Vault's API" 25 | type = list(string) 26 | default = ["10.0.0.0/16"] 27 | } 28 | 29 | variable "root_ca_secret_name" { 30 | description = "The name of the AWS Secrets Manager secret containing the root CA certificate bundle" 31 | type = string 32 | } 33 | 34 | variable "cert_manager_approle_secret_name" { 35 | description = "The name of the AWS Secrets Manager secret containing the cert-manager AppRole credentials" 36 | type = string 37 | } 38 | 39 | variable "pki_common_name" { 40 | description = "Common name to identify the Vault issuer" 41 | type = string 42 | default = "Private PKI - Vault Issuer" 43 | } 44 | 45 | variable "pki_mount_path" { 46 | description = "Vault Issuer PKI mount path" 47 | type = string 48 | default = "pki_private_issuer" 49 | } 50 | 51 | variable "pki_organization" { 52 | description = "The organization name used for generating certificates" 53 | type = string 54 | } 55 | 56 | variable "pki_country" { 57 | description = "The country name used for generating certificates" 58 | type = string 59 | } 60 | 61 | variable "pki_domains" { 62 | description = "List of domain names that can be used within the certificates" 63 | type = list(string) 64 | default = ["cluster.local"] 65 | } 66 | 67 | variable "pki_key_type" { 68 | description = "The generated key type" 69 | type = string 70 | default = "ec" 71 | } 72 | 73 | variable "pki_key_bits" { 74 | description = "The number of bits of generated keys" 75 | type = number 76 | default = 256 77 | } 78 | 79 | variable "pki_max_lease_ttl" { 80 | description = "Maximum TTL (in seconds) that can be requested for certificates (default 3 years)" 81 | type = number 82 | default = 94670856 83 | } 84 | -------------------------------------------------------------------------------- /opentofu/openbao/management/variables.tfvars: -------------------------------------------------------------------------------- 1 | region = "eu-west-3" 2 | openbao_root_token_secret_id = "openbao/cloud-native-ref/tokens/root" 3 | domain_name = "priv.cloud.ogenki.io" 4 | root_ca_secret_name = "certificates/priv.cloud.ogenki.io/root-ca" 5 | openbao_certificates_secret_name = "certificates/priv.cloud.ogenki.io/openbao" 6 | cert_manager_approle_secret_name = "openbao/cloud-native-ref/approles/cert-manager" 7 | pki_country = "France" 8 | pki_organization = "Ogenki" 9 | pki_domains = [ 10 | "cluster.local", 11 | "priv.cloud.ogenki.io" 12 | ] 13 | tags = { 14 | project = "cloud-native-ref" 15 | owner = "Smana" 16 | } 17 | -------------------------------------------------------------------------------- /opentofu/openbao/management/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.4" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 5.0" 8 | } 9 | vault = { 10 | source = "hashicorp/vault" 11 | version = "~> 5.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /opentofu/openbao/management/workflows.tm.hcl: -------------------------------------------------------------------------------- 1 | script "deploy" { 2 | description = "Init OpenBao cluster and configure PKI" 3 | job { 4 | name = "openbao-configure" 5 | description = "OpenBao configuration" 6 | commands = [ 7 | # Initialize OpenBao cluster 8 | [ 9 | "bash", 10 | "../../../scripts/openbao-config.sh", 11 | "init", 12 | "--url", 13 | global.openbao_url, 14 | "--root-token-secret-name", 15 | global.root_token_secret_name, 16 | "--region", 17 | global.region, 18 | "--profile", 19 | global.profile, 20 | "--skip-verify", 21 | ], 22 | [ 23 | "bash", 24 | "../../../scripts/openbao-config.sh", 25 | "pki", 26 | "--url", 27 | global.openbao_url, 28 | "--root-token-secret-name", 29 | global.root_token_secret_name, 30 | "--root-ca-secret-name", 31 | global.root_ca_secret_name, 32 | "--region", 33 | global.region, 34 | "--profile", 35 | global.profile, 36 | ], 37 | # Module management: Configure OpenBao (SecretsEngine, Approles, PKI, etc.) 38 | [global.provisioner, "init"], 39 | [global.provisioner, "validate"], 40 | [global.provisioner, "plan", "-out=out.tfplan", "-lock=false", "-var-file=variables.tfvars"], 41 | ["trivy", "config", "--exit-code=1", "--ignorefile=./.trivyignore.yaml", "."], 42 | [global.provisioner, "apply", "-auto-approve", "-var-file=variables.tfvars", 43 | { 44 | sync_deployment = true 45 | tofu_plan_file = "out.tfplan" 46 | } 47 | ], 48 | ] 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /security/base/cert-manager/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: cert-manager 5 | namespace: security 6 | spec: 7 | releaseName: cert-manager 8 | driftDetection: 9 | mode: enabled 10 | chart: 11 | spec: 12 | chart: cert-manager 13 | sourceRef: 14 | kind: HelmRepository 15 | name: jetstack 16 | version: "v1.17.2" 17 | interval: 2m0s 18 | install: 19 | createNamespace: true 20 | remediation: 21 | retries: 3 22 | values: 23 | fullnameOverride: "cert-manager" 24 | global: 25 | logLevel: 2 26 | leaderElection: 27 | namespace: "security" 28 | installCRDs: false 29 | resources: 30 | limits: 31 | cpu: 300m 32 | memory: 150Mi 33 | securityContext: 34 | fsGroup: 1001 35 | containerSecurityContext: 36 | capabilities: 37 | drop: 38 | - ALL 39 | readOnlyRootFilesystem: true 40 | runAsNonRoot: true 41 | prometheus: 42 | enabled: true 43 | servicemonitor: 44 | enabled: true 45 | labels: 46 | prometheus-instance: main 47 | 48 | webhook: 49 | replicaCount: 1 50 | timeoutSeconds: 10 51 | containerSecurityContext: 52 | capabilities: 53 | drop: 54 | - ALL 55 | readOnlyRootFilesystem: true 56 | runAsNonRoot: true 57 | resources: 58 | limits: 59 | cpu: 100m 60 | memory: 80Mi 61 | 62 | cainjector: 63 | enabled: true 64 | replicaCount: 1 65 | containerSecurityContext: 66 | capabilities: 67 | drop: 68 | - ALL 69 | readOnlyRootFilesystem: true 70 | runAsNonRoot: true 71 | resources: 72 | limits: 73 | cpu: 300m 74 | memory: 250Mi 75 | 76 | extraArgs: 77 | - --enable-gateway-api 78 | -------------------------------------------------------------------------------- /security/base/cert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - le-clusterissuer-prod.yaml 6 | - le-clusterissuer-staging.yaml 7 | - helmrelease.yaml 8 | - openbao-clusterissuer.yaml 9 | - openbao-approle-externalsecret.yaml 10 | -------------------------------------------------------------------------------- /security/base/cert-manager/le-clusterissuer-prod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-prod 5 | spec: 6 | acme: 7 | email: smainklh@gmail.com 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | privateKeySecretRef: 10 | name: ogenki-issuer-account-key 11 | solvers: 12 | - selector: 13 | dnsZones: 14 | - "${domain_name}" 15 | dns01: 16 | route53: 17 | region: ${region} 18 | -------------------------------------------------------------------------------- /security/base/cert-manager/le-clusterissuer-staging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-staging 5 | spec: 6 | acme: 7 | email: smainklh@gmail.com 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | privateKeySecretRef: 10 | name: ogenki-issuer-account-key 11 | solvers: 12 | - selector: 13 | dnsZones: 14 | - "${domain_name}" 15 | dns01: 16 | route53: 17 | region: ${region} 18 | -------------------------------------------------------------------------------- /security/base/cert-manager/openbao-approle-externalsecret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: cert-manager-bao-approle 5 | namespace: security 6 | spec: 7 | dataFrom: 8 | - extract: 9 | conversionStrategy: Default 10 | key: openbao/cloud-native-ref/approles/cert-manager 11 | refreshInterval: 1h 12 | secretStoreRef: 13 | kind: ClusterSecretStore 14 | name: clustersecretstore 15 | target: 16 | creationPolicy: Owner 17 | deletionPolicy: Retain 18 | name: cert-manager-openbao-approle 19 | -------------------------------------------------------------------------------- /security/base/epis/cert-manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: EPI 3 | metadata: 4 | name: xplane-cert-manager 5 | namespace: security 6 | spec: 7 | parameters: 8 | clusters: 9 | - name: "mycluster-0" 10 | region: "eu-west-3" 11 | serviceAccount: 12 | name: cert-manager 13 | namespace: security 14 | # Reference: https://cert-manager.io/docs/configuration/acme/dns01/route53/ 15 | policyDocument: | 16 | { 17 | "Version": "2012-10-17", 18 | "Statement": [ 19 | { 20 | "Effect": "Allow", 21 | "Action": "route53:GetChange", 22 | "Resource": "arn:aws:route53:::change/*" 23 | }, 24 | { 25 | "Effect": "Allow", 26 | "Action": [ 27 | "route53:ChangeResourceRecordSets", 28 | "route53:ListResourceRecordSets" 29 | ], 30 | "Resource": "arn:aws:route53:::hostedzone/*" 31 | }, 32 | { 33 | "Effect": "Allow", 34 | "Action": "route53:ListHostedZonesByName", 35 | "Resource": "*" 36 | } 37 | ] 38 | } 39 | -------------------------------------------------------------------------------- /security/base/epis/default-gha-runner-scale-set.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: EPI 3 | metadata: 4 | name: xplane-default-actions-runner 5 | namespace: tooling 6 | spec: 7 | parameters: 8 | clusters: 9 | - name: "mycluster-0" 10 | region: "eu-west-3" 11 | serviceAccount: 12 | name: default-gha-runner-scale-set 13 | namespace: tooling 14 | # Example policy, not used for the moment. 15 | policyDocument: | 16 | { 17 | "Version": "2012-10-17", 18 | "Statement": [ 19 | { 20 | "Effect": "Allow", 21 | "Action": [ 22 | "ecr:GetDownloadUrlForLayer", 23 | "ecr:BatchGetImage", 24 | "ecr:BatchCheckLayerAvailability", 25 | "ecr:GetAuthorizationToken", 26 | "ecr:InitiateLayerUpload", 27 | "ecr:UploadLayerPart", 28 | "ecr:CompleteLayerUpload", 29 | "ecr:PutImage" 30 | ], 31 | "Resource": "arn:aws:ecr:us-west-2:123456789012:repository/my-repo" 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /security/base/epis/external-dns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: EPI 3 | metadata: 4 | name: xplane-external-dns 5 | namespace: kube-system 6 | spec: 7 | deletionPolicy: Delete 8 | parameters: 9 | clusters: 10 | - name: "mycluster-0" 11 | region: "eu-west-3" 12 | serviceAccount: 13 | name: external-dns 14 | namespace: kube-system 15 | # Reference: https://kubernetes-sigs.github.io/external-dns/v0.13.4/tutorials/aws/#iam-policy 16 | policyDocument: | 17 | { 18 | "Version": "2012-10-17", 19 | "Statement": [ 20 | { 21 | "Effect": "Allow", 22 | "Action": [ 23 | "route53:ChangeResourceRecordSets" 24 | ], 25 | "Resource": [ 26 | "arn:aws:route53:::hostedzone/*" 27 | ] 28 | }, 29 | { 30 | "Effect": "Allow", 31 | "Action": [ 32 | "route53:ListHostedZones", 33 | "route53:ListResourceRecordSets" 34 | ], 35 | "Resource": [ 36 | "*" 37 | ] 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /security/base/epis/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: EPI 3 | metadata: 4 | name: xplane-external-secrets 5 | namespace: security 6 | spec: 7 | parameters: 8 | clusters: 9 | - name: "mycluster-0" 10 | region: "eu-west-3" 11 | serviceAccount: 12 | name: external-secrets 13 | namespace: security 14 | # Reference: https://github.com/external-secrets/external-secrets/blob/main/opentofu/aws/modules/cluster/irsa.tf 15 | policyDocument: | 16 | { 17 | "Version": "2012-10-17", 18 | "Statement": [ 19 | { 20 | "Effect": "Allow", 21 | "Action": [ 22 | "secretsmanager:GetResourcePolicy", 23 | "secretsmanager:GetSecretValue", 24 | "secretsmanager:DescribeSecret", 25 | "secretsmanager:ListSecretVersionIds" 26 | ], 27 | "Resource": [ 28 | "*" 29 | ] 30 | } 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /security/base/epis/harbor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: EPI 3 | metadata: 4 | name: xplane-harbor 5 | namespace: tooling 6 | spec: 7 | deletionPolicy: Delete 8 | parameters: 9 | clusters: 10 | - name: "mycluster-0" 11 | region: "eu-west-3" 12 | serviceAccount: 13 | name: harbor 14 | namespace: tooling 15 | policyDocument: | 16 | { 17 | "Version": "2012-10-17", 18 | "Statement": [ 19 | { 20 | "Sid": "ReadPermissions", 21 | "Effect": "Allow", 22 | "Action": [ 23 | "s3:GetObject", 24 | "s3:ListBucket" 25 | ], 26 | "Resource": [ 27 | "arn:aws:s3:::${region}-ogenki-harbor", 28 | "arn:aws:s3:::${region}-ogenki-harbor/*" 29 | ] 30 | }, 31 | { 32 | "Sid": "WritePermissions", 33 | "Effect": "Allow", 34 | "Action": [ 35 | "s3:PutObject", 36 | "s3:DeleteObject" 37 | ], 38 | "Resource": "arn:aws:s3:::${region}-ogenki-harbor/*" 39 | } 40 | ] 41 | } 42 | -------------------------------------------------------------------------------- /security/base/epis/openbao-snapshot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: EPI 3 | metadata: 4 | name: xplane-openbao-snapshot 5 | namespace: security 6 | spec: 7 | parameters: 8 | clusters: 9 | - name: "mycluster-0" 10 | region: "eu-west-3" 11 | serviceAccount: 12 | name: openbao-snapshot 13 | namespace: security 14 | policyDocument: | 15 | { 16 | "Version": "2012-10-17", 17 | "Statement": [ 18 | { 19 | "Sid": "ReadPermissions", 20 | "Effect": "Allow", 21 | "Action": [ 22 | "s3:GetObject", 23 | "s3:ListBucket" 24 | ], 25 | "Resource": [ 26 | "arn:aws:s3:::${region}-ogenki-openbao-snapshot", 27 | "arn:aws:s3:::${region}-ogenki-openbao-snapshot/*" 28 | ] 29 | }, 30 | { 31 | "Sid": "WritePermissions", 32 | "Effect": "Allow", 33 | "Action": [ 34 | "s3:PutObject" 35 | ], 36 | "Resource": "arn:aws:s3:::${region}-ogenki-openbao-snapshot/*" 37 | }, 38 | { 39 | "Sid": "AllowKMSUsage", 40 | "Effect": "Allow", 41 | "Action": [ 42 | "kms:Decrypt", 43 | "kms:Encrypt", 44 | "kms:ReEncrypt*", 45 | "kms:GenerateDataKey*" 46 | ], 47 | "Resource": "*" 48 | } 49 | ] 50 | } 51 | -------------------------------------------------------------------------------- /security/base/epis/victoriametrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: EPI 3 | metadata: 4 | name: xplane-victoriametrics 5 | namespace: observability 6 | spec: 7 | parameters: 8 | clusters: 9 | - name: "mycluster-0" 10 | region: "eu-west-3" 11 | serviceAccount: 12 | name: vmagent-victoria-metrics-k8s-stack 13 | namespace: observability 14 | policyDocument: | 15 | { 16 | "Version" : "2012-10-17", 17 | "Id" : "AllowEC2DescribeAndList", 18 | "Statement" : [ 19 | { 20 | "Sid" : "", 21 | "Effect" : "Allow", 22 | "Action" : [ 23 | "ec2:DescribeInstances", 24 | "ec2:DescribeAvailabilityZones" 25 | ], 26 | "Resource" : "*" 27 | } 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /security/base/external-secrets/clustersecretstore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ClusterSecretStore 3 | metadata: 4 | name: clustersecretstore 5 | spec: 6 | provider: 7 | aws: 8 | region: ${region} 9 | service: SecretsManager 10 | refreshInterval: 0 11 | -------------------------------------------------------------------------------- /security/base/external-secrets/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: external-secrets 5 | namespace: security 6 | spec: 7 | releaseName: external-secrets 8 | driftDetection: 9 | mode: enabled 10 | chart: 11 | spec: 12 | chart: external-secrets 13 | sourceRef: 14 | kind: HelmRepository 15 | name: external-secrets 16 | version: "0.17.0" 17 | interval: 10m0s 18 | install: 19 | createNamespace: true 20 | remediation: 21 | retries: 3 22 | values: 23 | replicaCount: 1 24 | serviceAccount: 25 | name: "external-secrets" 26 | 27 | installCRDs: false 28 | 29 | securityContext: 30 | capabilities: 31 | drop: 32 | - ALL 33 | readOnlyRootFilesystem: true 34 | runAsNonRoot: true 35 | runAsUser: 1000 36 | 37 | resources: 38 | limits: 39 | cpu: 100m 40 | memory: 150Mi 41 | 42 | prometheus: 43 | # -- deprecated. will be removed with 0.7.0, use serviceMonitor instead. 44 | enabled: true 45 | # -- will be used when we'll deploy the prometheus-operator 46 | serviceMonitor: 47 | enabled: false 48 | additionalLabels: {} 49 | 50 | podDisruptionBudget: 51 | enabled: true 52 | minAvailable: 1 53 | 54 | webhook: 55 | replicaCount: 1 56 | prometheus: 57 | enabled: true 58 | serviceMonitor: 59 | enabled: false 60 | additionalLabels: {} 61 | resources: 62 | limits: 63 | cpu: 100m 64 | memory: 50Mi 65 | securityContext: 66 | capabilities: 67 | drop: 68 | - ALL 69 | readOnlyRootFilesystem: true 70 | runAsNonRoot: true 71 | runAsUser: 1000 72 | 73 | certController: 74 | replicaCount: 1 75 | prometheus: 76 | enabled: true 77 | serviceMonitor: 78 | enabled: false 79 | additionalLabels: {} 80 | 81 | securityContext: 82 | capabilities: 83 | drop: 84 | - ALL 85 | readOnlyRootFilesystem: true 86 | runAsNonRoot: true 87 | runAsUser: 1000 88 | 89 | resources: 90 | limits: 91 | cpu: 100m 92 | memory: 280Mi 93 | -------------------------------------------------------------------------------- /security/base/external-secrets/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - clustersecretstore.yaml 6 | - helmrelease.yaml 7 | -------------------------------------------------------------------------------- /security/base/kyverno/helmrelease-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: kyverno 5 | namespace: security 6 | spec: 7 | releaseName: kyverno 8 | driftDetection: 9 | mode: enabled 10 | chart: 11 | spec: 12 | chart: kyverno 13 | sourceRef: 14 | kind: HelmRepository 15 | name: kyverno 16 | version: "3.4.2" 17 | interval: 10m0s 18 | install: 19 | remediation: 20 | retries: 3 21 | values: 22 | fullnameOverride: kyverno 23 | crds: 24 | install: false 25 | # Need to set at least resources limits in a near future 26 | -------------------------------------------------------------------------------- /security/base/kyverno/helmrelease-policies.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: kyverno-policies 5 | namespace: security 6 | spec: 7 | releaseName: kyverno-policies 8 | driftDetection: 9 | mode: enabled 10 | chart: 11 | spec: 12 | chart: kyverno-policies 13 | sourceRef: 14 | kind: HelmRepository 15 | name: kyverno 16 | version: "3.4.2" 17 | interval: 10m0s 18 | install: 19 | remediation: 20 | retries: 3 21 | values: {} 22 | -------------------------------------------------------------------------------- /security/base/kyverno/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - helmrelease-controller.yaml 6 | - helmrelease-policies.yaml 7 | -------------------------------------------------------------------------------- /security/base/openbao-snapshot/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: openbao-snapshot 5 | spec: 6 | dataFrom: 7 | - extract: 8 | conversionStrategy: Default 9 | key: security/openbao/openbao-snapshot 10 | refreshInterval: 1h 11 | secretStoreRef: 12 | kind: ClusterSecretStore 13 | name: clustersecretstore 14 | target: 15 | creationPolicy: Owner 16 | deletionPolicy: Retain 17 | name: openbao-snapshot 18 | -------------------------------------------------------------------------------- /security/base/openbao-snapshot/kms.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kms.aws.upbound.io/v1beta1 3 | kind: Key 4 | metadata: 5 | labels: 6 | cloud.ogenki.io/name: openbao-snapshot 7 | name: xplane-openbao-snapshot 8 | spec: 9 | forProvider: 10 | deletionWindowInDays: 7 11 | description: Used for the Vault s3 bucket 12 | region: ${region} 13 | --- 14 | apiVersion: kms.aws.upbound.io/v1beta1 15 | kind: Alias 16 | metadata: 17 | name: xplane-openbao-snapshot 18 | spec: 19 | forProvider: 20 | region: ${region} 21 | targetKeyIdRef: 22 | name: xplane-openbao-snapshot 23 | -------------------------------------------------------------------------------- /security/base/openbao-snapshot/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: security 4 | 5 | resources: 6 | - external-secrets.yaml 7 | - kms.yaml 8 | - s3-bucket.yaml 9 | - serviceaccount.yaml 10 | - snapshot-cronjob.yaml 11 | - snapshot-pvc.yaml 12 | -------------------------------------------------------------------------------- /security/base/openbao-snapshot/s3-bucket.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: s3.aws.upbound.io/v1beta1 3 | kind: Bucket 4 | metadata: 5 | name: openbao-snapshot 6 | labels: 7 | cloud.ogenki.io/name: openbao-snapshot 8 | annotations: 9 | crossplane.io/external-name: ${region}-ogenki-openbao-snapshot 10 | spec: 11 | deletionPolicy: Orphan # The bucket should not be deleted when the resource is deleted in Crossplane 12 | forProvider: 13 | region: ${region} 14 | --- 15 | apiVersion: s3.aws.upbound.io/v1beta1 16 | kind: BucketServerSideEncryptionConfiguration 17 | metadata: 18 | labels: 19 | cloud.ogenki.io/name: openbao-snapshot 20 | name: xplane-openbao-snapshot 21 | spec: 22 | forProvider: 23 | bucketSelector: 24 | matchLabels: 25 | cloud.ogenki.io/name: openbao-snapshot 26 | region: ${region} 27 | rule: 28 | - applyServerSideEncryptionByDefault: 29 | - kmsMasterKeyIdSelector: 30 | matchLabels: 31 | cloud.ogenki.io/name: openbao-snapshot 32 | sseAlgorithm: aws:kms 33 | --- 34 | apiVersion: s3.aws.upbound.io/v1beta1 35 | kind: BucketLifecycleConfiguration 36 | metadata: 37 | labels: 38 | cloud.ogenki.io/name: openbao-snapshot 39 | name: xplane-openbao-snapshot 40 | spec: 41 | forProvider: 42 | bucketSelector: 43 | matchLabels: 44 | cloud.ogenki.io/name: openbao-snapshot 45 | region: ${region} 46 | rule: 47 | - transition: 48 | - days: 30 49 | storageClass: GLACIER 50 | id: glacier 51 | status: Enabled 52 | - expiration: 53 | - days: 120 54 | id: expiration 55 | status: Enabled 56 | -------------------------------------------------------------------------------- /security/base/openbao-snapshot/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: openbao-snapshot 5 | -------------------------------------------------------------------------------- /security/base/openbao-snapshot/snapshot-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: openbao 6 | name: openbao-snapshot 7 | spec: 8 | concurrencyPolicy: Forbid 9 | failedJobsHistoryLimit: 3 10 | schedule: "0 4 * * *" 11 | successfulJobsHistoryLimit: 1 12 | suspend: false 13 | jobTemplate: 14 | metadata: 15 | labels: 16 | app.kubernetes.io/instance: openbao 17 | spec: 18 | template: 19 | metadata: 20 | labels: 21 | app.kubernetes.io/instance: openbao 22 | spec: 23 | volumes: 24 | - name: snapshot 25 | persistentVolumeClaim: 26 | claimName: openbao-snapshot 27 | securityContext: 28 | fsGroup: 1001 29 | runAsUser: 1000 30 | runAsGroup: 1001 31 | containers: 32 | - command: 33 | - sh 34 | - -c 35 | - "openbao-snapshot.sh save -b $(BUCKET_NAME) -s /snapshot/backup.snap -a $(VAULT_ADDR) -d 8" 36 | env: 37 | # The CA cert should be added to the container image 38 | - name: "VAULT_SKIP_VERIFY" 39 | value: "true" 40 | envFrom: 41 | - secretRef: 42 | name: openbao-snapshot 43 | image: smana/openbao-snapshot:v0.1.0 44 | imagePullPolicy: IfNotPresent 45 | name: openbao-snapshot 46 | resources: 47 | limits: 48 | cpu: 150m 49 | memory: 150Mi 50 | volumeMounts: 51 | - mountPath: "/snapshot" 52 | name: snapshot 53 | serviceAccountName: openbao-snapshot 54 | restartPolicy: Never 55 | -------------------------------------------------------------------------------- /security/base/openbao-snapshot/snapshot-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: openbao 6 | name: openbao-snapshot 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 5Gi 13 | storageClassName: gp3 14 | -------------------------------------------------------------------------------- /security/base/rbac/admin.yaml: -------------------------------------------------------------------------------- 1 | # Permissions based on groups retrieved from Zitadel 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: ogenki-admin 6 | subjects: 7 | - kind: Group 8 | name: admin 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: ClusterRole 12 | name: cluster-admin 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /security/base/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: security 4 | 5 | resources: 6 | - admin.yaml 7 | -------------------------------------------------------------------------------- /security/base/zitadel/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: zitadel 5 | spec: 6 | secretName: zitadel-certificate 7 | duration: 2160h # 90d 8 | renewBefore: 360h # 15d 9 | commonName: auth.${domain_name} 10 | dnsNames: 11 | - auth.${domain_name} 12 | issuerRef: 13 | name: letsencrypt-prod 14 | kind: ClusterIssuer 15 | group: cert-manager.io 16 | -------------------------------------------------------------------------------- /security/base/zitadel/externalsecret-sqlinstance-password.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: zitadel-masterkey 5 | spec: 6 | data: 7 | - secretKey: ZITADEL_MASTERKEY 8 | remoteRef: 9 | key: zitadel/envvars 10 | property: ZITADEL_MASTERKEY 11 | refreshInterval: 20m 12 | secretStoreRef: 13 | kind: ClusterSecretStore 14 | name: clustersecretstore 15 | target: 16 | template: 17 | engineVersion: v2 18 | data: 19 | masterkey: "{{ .ZITADEL_MASTERKEY }}" 20 | creationPolicy: Owner 21 | deletionPolicy: Retain 22 | name: zitadel-masterkey 23 | -------------------------------------------------------------------------------- /security/base/zitadel/externalsecret-zitadel-envvars.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: zitadel-envvars 5 | spec: 6 | dataFrom: 7 | - extract: 8 | conversionStrategy: Default 9 | key: zitadel/envvars 10 | refreshInterval: 20m 11 | secretStoreRef: 12 | kind: ClusterSecretStore 13 | name: clustersecretstore 14 | target: 15 | creationPolicy: Owner 16 | deletionPolicy: Retain 17 | name: zitadel-envvars 18 | -------------------------------------------------------------------------------- /security/base/zitadel/externalsecret-zitadel-masterkey.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: zitadel-masterkey 5 | spec: 6 | data: 7 | - secretKey: ZITADEL_MASTERKEY 8 | remoteRef: 9 | key: zitadel/envvars 10 | property: ZITADEL_MASTERKEY 11 | refreshInterval: 20m 12 | secretStoreRef: 13 | kind: ClusterSecretStore 14 | name: clustersecretstore 15 | target: 16 | template: 17 | engineVersion: v2 18 | data: 19 | masterkey: "{{ .ZITADEL_MASTERKEY }}" 20 | creationPolicy: Owner 21 | deletionPolicy: Retain 22 | name: zitadel-masterkey 23 | -------------------------------------------------------------------------------- /security/base/zitadel/gateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: Gateway 3 | metadata: 4 | name: zitadel 5 | spec: 6 | gatewayClassName: cilium 7 | infrastructure: 8 | annotations: 9 | service.beta.kubernetes.io/aws-load-balancer-name: "ogenki-zitadel-gateway" 10 | service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: instance 11 | service.beta.kubernetes.io/aws-load-balancer-scheme: "internet-facing" 12 | service.beta.kubernetes.io/aws-load-balancer-type: "external" 13 | external-dns.alpha.kubernetes.io/hostname: "auth.${domain_name}" 14 | listeners: 15 | - name: auth 16 | hostname: "auth.${domain_name}" 17 | port: 443 18 | protocol: TLS 19 | allowedRoutes: 20 | namespaces: 21 | from: Same 22 | tls: 23 | mode: Passthrough 24 | -------------------------------------------------------------------------------- /security/base/zitadel/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: zitadel 5 | spec: 6 | interval: 30m 7 | timeout: 30m 8 | driftDetection: 9 | mode: enabled 10 | chart: 11 | spec: 12 | chart: zitadel 13 | version: "8.11.3" 14 | sourceRef: 15 | kind: HelmRepository 16 | name: zitadel 17 | interval: 12h 18 | values: 19 | replicaCount: 1 20 | initJob: 21 | backoffLimit: 30 # Wait for the CNPG database instance to be ready 22 | zitadel: 23 | masterkeySecretName: "zitadel-masterkey" # Populated from the zitadel-envvars secret 24 | configmapConfig: 25 | Log: 26 | Formatter: 27 | Format: json 28 | ExternalPort: 443 29 | ExternalSecure: true 30 | ExternalDomain: "auth.${domain_name}" 31 | TLS: 32 | Enabled: true 33 | KeyPath: /tls/tls.key 34 | CertPath: /tls/tls.crt 35 | Database: 36 | Postgres: 37 | Host: xplane-zitadel-cnpg-cluster-rw 38 | Port: 5432 39 | Database: zitadel 40 | MaxOpenConns: 20 41 | MaxIdleConns: 10 42 | MaxConnLifetime: 30m 43 | MaxConnIdleTime: 5m 44 | 45 | # reference: https://zitadel.com/docs/self-hosting/manage/configure 46 | # All configuration items are loaded from a secret 47 | # These are the keys that are expected in the secret 48 | # ZITADEL_DATABASE_POSTGRES_ADMIN_PASSWORD 49 | # ZITADEL_DATABASE_POSTGRES_ADMIN_SSL_MODE 50 | # ZITADEL_DATABASE_POSTGRES_ADMIN_USERNAME 51 | # ZITADEL_DATABASE_POSTGRES_USER_PASSWORD 52 | # ZITADEL_DATABASE_POSTGRES_USER_SSL_MODE 53 | # ZITADEL_DATABASE_POSTGRES_USER_USERNAME 54 | # ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORD 55 | # ZITADEL_FIRSTINSTANCE_ORG_HUMAN_USERNAME 56 | # ZITADEL_MASTERKEY 57 | envVarsSecret: "zitadel-envvars" 58 | 59 | # Mount certificate generated by cert-manager 60 | extraVolumes: 61 | - name: zitadel-certificate 62 | secret: 63 | defaultMode: 420 64 | secretName: zitadel-certificate 65 | extraVolumeMounts: 66 | - name: zitadel-certificate 67 | mountPath: /tls 68 | readOnly: true 69 | -------------------------------------------------------------------------------- /security/base/zitadel/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: security 4 | resources: 5 | - externalsecret-zitadel-envvars.yaml 6 | - externalsecret-zitadel-masterkey.yaml 7 | - certificate.yaml 8 | - gateway.yaml 9 | - helmrelease.yaml 10 | - network-policy.yaml 11 | - sqlinstance.yaml 12 | - tlsroute.yaml 13 | -------------------------------------------------------------------------------- /security/base/zitadel/network-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cilium.io/v2 2 | kind: CiliumNetworkPolicy 3 | metadata: 4 | name: zitadel 5 | spec: 6 | description: "Limit traffic to and from the Zitadel application" 7 | endpointSelector: 8 | matchLabels: 9 | k8s:app.kubernetes.io/name: zitadel 10 | egress: 11 | - toEndpoints: 12 | - matchLabels: 13 | k8s:io.kubernetes.pod.namespace: kube-system 14 | k8s:k8s-app: kube-dns 15 | toPorts: 16 | - ports: 17 | - port: "53" 18 | protocol: UDP 19 | - port: "53" 20 | protocol: TCP 21 | - toEntities: 22 | - world 23 | toPorts: 24 | - ports: 25 | - port: "443" 26 | protocol: TCP 27 | - toEndpoints: 28 | - matchLabels: 29 | k8s:cnpg.io/cluster: xplane-zitadel-cnpg-cluster 30 | toPorts: 31 | - ports: 32 | - port: "5432" 33 | protocol: TCP 34 | ingress: 35 | - fromEntities: 36 | - ingress 37 | toPorts: 38 | - ports: 39 | - port: "8080" 40 | protocol: TCP 41 | -------------------------------------------------------------------------------- /security/base/zitadel/sqlinstance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: SQLInstance 3 | metadata: 4 | name: xplane-zitadel 5 | spec: 6 | instances: 2 7 | size: "small" 8 | storageSize: "20Gi" 9 | createSuperuser: true 10 | objectStoreRecovery: 11 | bucketName: "eu-west-3-ogenki-cnpg-backups" 12 | path: "zitadel-20241202" 13 | backup: 14 | schedule: "0 0 * * *" 15 | bucketName: "eu-west-3-ogenki-cnpg-backups" 16 | compositionRef: 17 | name: xsqlinstances.cloud.ogenki.io 18 | -------------------------------------------------------------------------------- /security/base/zitadel/tlsroute.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1alpha2 2 | kind: TLSRoute 3 | metadata: 4 | name: zitadel 5 | spec: 6 | parentRefs: 7 | - name: zitadel 8 | hostnames: 9 | - "auth.${domain_name}" 10 | rules: 11 | - backendRefs: 12 | - name: zitadel 13 | port: 8080 14 | -------------------------------------------------------------------------------- /security/mycluster-0/external-secrets/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: external-secrets 5 | namespace: kube-system 6 | spec: 7 | values: 8 | resources: 9 | limits: 10 | cpu: 400m 11 | -------------------------------------------------------------------------------- /security/mycluster-0/external-secrets/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../../base/external-secrets 6 | 7 | patches: 8 | - path: helmrelease.yaml 9 | target: 10 | group: helm.toolkit.fluxcd.io 11 | kind: HelmRelease 12 | name: external-secrets 13 | namespace: kube-system 14 | version: v2beta2 15 | -------------------------------------------------------------------------------- /security/mycluster-0/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../base/kyverno 6 | - ../base/cert-manager 7 | - ../base/openbao-snapshot 8 | - ../base/rbac 9 | - external-secrets 10 | -------------------------------------------------------------------------------- /security/mycluster-0/zitadel/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../../base/zitadel 6 | -------------------------------------------------------------------------------- /terramate.tm.hcl: -------------------------------------------------------------------------------- 1 | terramate { 2 | config { 3 | cloud { 4 | organization = "ogenki" 5 | } 6 | experiments = [ 7 | "scripts" 8 | ] 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /tooling/base/dagger-engine/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dagger-engine 5 | labels: 6 | app.kubernetes.io/name: dagger-engine 7 | app.kubernetes.io/version: "v0.11.9" 8 | data: 9 | engine.toml: | 10 | debug = true 11 | -------------------------------------------------------------------------------- /tooling/base/dagger-engine/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: tooling 4 | 5 | resources: 6 | - configmap.yaml 7 | - deployment.yaml 8 | - pdb.yaml 9 | - service.yaml 10 | - network-policy.yaml 11 | -------------------------------------------------------------------------------- /tooling/base/dagger-engine/network-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cilium.io/v2 2 | kind: CiliumNetworkPolicy 3 | metadata: 4 | name: dagger-engine 5 | spec: 6 | description: "Allow internal traffic to the Dagger Engine service." 7 | endpointSelector: 8 | matchLabels: 9 | k8s:app.kubernetes.io/name: dagger-engine 10 | egress: 11 | - toEndpoints: 12 | - matchLabels: 13 | k8s:io.kubernetes.pod.namespace: kube-system 14 | k8s:k8s-app: kube-dns 15 | toPorts: 16 | - ports: 17 | - port: "53" 18 | protocol: UDP 19 | - port: "53" 20 | protocol: TCP 21 | - toEntities: 22 | - world 23 | toPorts: 24 | - ports: 25 | - port: "80" 26 | protocol: TCP 27 | - port: "443" 28 | protocol: TCP 29 | ingress: 30 | - fromEndpoints: 31 | - matchLabels: 32 | k8s:actions.github.com/scale-set-name: default-gha-runner-scale-set 33 | - fromEndpoints: 34 | - matchLabels: 35 | k8s:actions.github.com/scale-set-name: dagger-gha-runner-scale-set 36 | toPorts: 37 | - ports: 38 | - port: "8080" 39 | protocol: TCP 40 | -------------------------------------------------------------------------------- /tooling/base/dagger-engine/pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: dagger-engine 5 | spec: 6 | minAvailable: 1 7 | selector: 8 | matchLabels: 9 | app.kubernetes.io/name: dagger-engine 10 | -------------------------------------------------------------------------------- /tooling/base/dagger-engine/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: dagger-engine 5 | labels: 6 | app.kubernetes.io/name: dagger-engine 7 | spec: 8 | selector: 9 | app.kubernetes.io/name: dagger-engine 10 | ports: 11 | - protocol: TCP 12 | port: 8080 13 | targetPort: 8080 14 | -------------------------------------------------------------------------------- /tooling/base/gha-runners/controller-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: gha-runner-scale-set-controller 5 | namespace: tooling 6 | spec: 7 | releaseName: gha-runner-scale-set-controller 8 | driftDetection: 9 | mode: enabled 10 | chart: 11 | spec: 12 | chart: gha-runner-scale-set-controller 13 | version: "0.11.0" 14 | sourceRef: 15 | kind: HelmRepository 16 | name: gha-runner-scale-set 17 | interval: 5m 18 | install: 19 | crds: "Skip" 20 | remediation: 21 | retries: 3 22 | values: 23 | resources: 24 | limits: 25 | memory: 128Mi 26 | requests: 27 | cpu: 100m 28 | memory: 128Mi 29 | flags: 30 | logLevel: "debug" 31 | logFormat: "json" 32 | -------------------------------------------------------------------------------- /tooling/base/gha-runners/dagger-scale-set-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: dagger-gha-runner-scale-set 5 | spec: 6 | releaseName: dagger-gha-runner-scale-set 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: gha-runner-scale-set 12 | version: "0.9.3" 13 | sourceRef: 14 | kind: HelmRepository 15 | name: gha-runner-scale-set 16 | interval: 10m0s 17 | install: 18 | remediation: 19 | retries: 3 20 | values: 21 | runnerGroup: "default" 22 | githubConfigUrl: "https://github.com/Smana/cloud-native-ref" 23 | githubConfigSecret: gha-runner-scale-set 24 | maxRunners: 5 25 | 26 | containerMode: 27 | type: "dind" 28 | -------------------------------------------------------------------------------- /tooling/base/gha-runners/default-scale-set-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: default-gha-runner-scale-set 5 | spec: 6 | releaseName: default-gha-runner-scale-set 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: gha-runner-scale-set 12 | version: "0.9.3" 13 | sourceRef: 14 | kind: HelmRepository 15 | name: gha-runner-scale-set 16 | interval: 10m0s 17 | install: 18 | remediation: 19 | retries: 3 20 | values: 21 | runnerGroup: "default" 22 | githubConfigUrl: "https://github.com/Smana/cloud-native-ref" 23 | githubConfigSecret: gha-runner-scale-set 24 | maxRunners: 5 25 | 26 | ## We'll probably want to use our internal PKI for this. 27 | # githubServerTLS: 28 | # certificateFrom: 29 | # configMapKeyRef: 30 | # name: config-map-name 31 | # key: ca.crt 32 | # runnerMountPath: /usr/local/share/ca-certificates/ 33 | template: 34 | spec: 35 | securityContext: 36 | runAsUser: 1001 37 | runAsGroup: 123 38 | fsGroup: 1000 39 | 40 | containerMode: 41 | type: "kubernetes" 42 | kubernetesModeWorkVolumeClaim: 43 | accessModes: ["ReadWriteOnce"] 44 | storageClassName: "gp3" 45 | resources: 46 | requests: 47 | storage: 10Gi 48 | -------------------------------------------------------------------------------- /tooling/base/gha-runners/externalsecret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: gha-runner-scale-set 5 | spec: 6 | dataFrom: 7 | - extract: 8 | conversionStrategy: Default 9 | key: github/gha-runner-scale-set/default 10 | refreshInterval: 20m 11 | secretStoreRef: 12 | kind: ClusterSecretStore 13 | name: clustersecretstore 14 | target: 15 | creationPolicy: Owner 16 | deletionPolicy: Retain 17 | name: gha-runner-scale-set 18 | -------------------------------------------------------------------------------- /tooling/base/gha-runners/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: tooling 4 | 5 | resources: 6 | - controller-helmrelease.yaml 7 | - dagger-scale-set-helmrelease.yaml 8 | - default-scale-set-helmrelease.yaml 9 | - externalsecret.yaml 10 | - network-policy.yaml 11 | -------------------------------------------------------------------------------- /tooling/base/gha-runners/network-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cilium.io/v2 2 | kind: CiliumNetworkPolicy 3 | metadata: 4 | name: gha-runner-scale-set 5 | spec: 6 | description: "Restrict internal traffic to the GitHub Actions runner scale set." 7 | endpointSelector: 8 | matchExpressions: 9 | - key: k8s:actions.github.com/scale-set-name 10 | operator: In 11 | values: 12 | - default-gha-runner-scale-set 13 | - dagger-gha-runner-scale-set 14 | egress: 15 | - toEndpoints: 16 | - matchLabels: 17 | k8s:io.kubernetes.pod.namespace: kube-system 18 | k8s:k8s-app: kube-dns 19 | toPorts: 20 | - ports: 21 | - port: "53" 22 | protocol: UDP 23 | - port: "53" 24 | protocol: TCP 25 | - toEndpoints: 26 | - matchLabels: 27 | k8s:app.kubernetes.io/name: dagger-engine 28 | toPorts: 29 | - ports: 30 | - port: "8080" 31 | protocol: TCP 32 | - toEntities: 33 | - world 34 | toPorts: 35 | - ports: 36 | - port: "80" 37 | protocol: TCP 38 | - port: "443" 39 | protocol: TCP 40 | -------------------------------------------------------------------------------- /tooling/base/harbor/externalsecret-admin-password.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: admin-password 5 | spec: 6 | dataFrom: 7 | - extract: 8 | conversionStrategy: Default 9 | key: harbor/admin/password 10 | refreshInterval: 20m 11 | secretStoreRef: 12 | kind: ClusterSecretStore 13 | name: clustersecretstore 14 | target: 15 | creationPolicy: Owner 16 | deletionPolicy: Retain 17 | name: harbor-admin-password 18 | -------------------------------------------------------------------------------- /tooling/base/harbor/externalsecret-valkey-password.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: harbor-valkey-password 5 | spec: 6 | dataFrom: 7 | - extract: 8 | conversionStrategy: Default 9 | key: harbor/valkey/password 10 | refreshInterval: 20m 11 | secretStoreRef: 12 | kind: ClusterSecretStore 13 | name: clustersecretstore 14 | target: 15 | creationPolicy: Owner 16 | deletionPolicy: Retain 17 | name: harbor-valkey-password 18 | -------------------------------------------------------------------------------- /tooling/base/harbor/helmrelease-valkey.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: harbor-valkey 5 | spec: 6 | releaseName: harbor-valkey 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: valkey 12 | sourceRef: 13 | kind: HelmRepository 14 | name: bitnami 15 | namespace: flux-system 16 | version: "3.0.9" 17 | interval: 10m0s 18 | install: 19 | remediation: 20 | retries: 3 21 | values: 22 | auth: 23 | existingSecret: "harbor-valkey-password" 24 | existingSecretPasswordKey: "REDIS_PASSWORD" 25 | usePasswordFiles: false 26 | 27 | primary: 28 | ## Valkey primary resource requests and limits 29 | ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ 30 | ## @param master.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production). 31 | ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 32 | ## 33 | resourcesPreset: "nano" 34 | persistence: 35 | size: 4Gi 36 | 37 | replica: 38 | resourcesPreset: "nano" 39 | persistence: 40 | size: 4Gi 41 | 42 | metrics: 43 | enabled: true 44 | serviceMonitor: 45 | enabled: true 46 | 47 | useExternalDNS: 48 | enabled: true 49 | suffix: "priv.${domain_name}" 50 | additionalAnnotations: 51 | ttl: 10 52 | -------------------------------------------------------------------------------- /tooling/base/harbor/httproute.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: harbor 5 | spec: 6 | parentRefs: 7 | - name: platform-private 8 | namespace: infrastructure 9 | hostnames: 10 | - "harbor.priv.${domain_name}" 11 | rules: 12 | - backendRefs: 13 | - name: harbor-core 14 | port: 80 15 | matches: 16 | - path: 17 | type: PathPrefix 18 | value: /chartrepo/ 19 | - backendRefs: 20 | - name: harbor-core 21 | port: 80 22 | matches: 23 | - path: 24 | type: PathPrefix 25 | value: /c/ 26 | - backendRefs: 27 | - name: harbor-portal 28 | port: 80 29 | matches: 30 | - path: 31 | type: PathPrefix 32 | value: / 33 | - backendRefs: 34 | - name: harbor-core 35 | port: 80 36 | matches: 37 | - path: 38 | type: PathPrefix 39 | value: /api/ 40 | - backendRefs: 41 | - name: harbor-core 42 | port: 80 43 | matches: 44 | - path: 45 | type: PathPrefix 46 | value: /service/ 47 | - backendRefs: 48 | - name: harbor-core 49 | port: 80 50 | matches: 51 | - path: 52 | type: PathPrefix 53 | value: /v2/ 54 | -------------------------------------------------------------------------------- /tooling/base/harbor/iam-user.yaml: -------------------------------------------------------------------------------- 1 | # This is a workaround as I cannot use EKS Pod Identity for Harbor to write into S3 bucket 2 | apiVersion: iam.aws.upbound.io/v1beta1 3 | kind: User 4 | metadata: 5 | name: xplane-harbor 6 | labels: 7 | cloud.ogenki.io/name: harbor 8 | spec: 9 | forProvider: {} 10 | --- 11 | apiVersion: iam.aws.upbound.io/v1beta1 12 | kind: AccessKey 13 | metadata: 14 | name: xplane-harbor 15 | labels: 16 | cloud.ogenki.io/name: harbor 17 | spec: 18 | forProvider: 19 | userSelector: 20 | matchLabels: 21 | cloud.ogenki.io/name: harbor 22 | writeConnectionSecretToRef: 23 | name: xplane-harbor-access-key 24 | namespace: tooling 25 | --- 26 | apiVersion: iam.aws.upbound.io/v1beta1 27 | kind: Policy 28 | metadata: 29 | name: xplane-harbor-s3 30 | labels: 31 | cloud.ogenki.io/name: harbor-s3 32 | spec: 33 | forProvider: 34 | # Allow to read/write to the bucket ${region}-ogenki-harbor 35 | policy: | 36 | { 37 | "Version": "2012-10-17", 38 | "Statement": [ 39 | { 40 | "Effect": "Allow", 41 | "Action": [ 42 | "s3:ListBucket", 43 | "s3:GetBucketLocation", 44 | "s3:ListBucketMultipartUploads" 45 | ], 46 | "Resource": [ 47 | "arn:aws:s3:::${region}-ogenki-harbor" 48 | ] 49 | }, 50 | { 51 | "Effect": "Allow", 52 | "Action": [ 53 | "s3:PutObject", 54 | "s3:GetObject", 55 | "s3:DeleteObject" 56 | ], 57 | "Resource": [ 58 | "arn:aws:s3:::${region}-ogenki-harbor/*" 59 | ] 60 | } 61 | ] 62 | } 63 | --- 64 | apiVersion: iam.aws.upbound.io/v1beta1 65 | kind: UserPolicyAttachment 66 | metadata: 67 | name: xplane-harbor-s3 68 | spec: 69 | forProvider: 70 | policyArnSelector: 71 | matchLabels: 72 | cloud.ogenki.io/name: harbor-s3 73 | userSelector: 74 | matchLabels: 75 | cloud.ogenki.io/name: harbor 76 | -------------------------------------------------------------------------------- /tooling/base/harbor/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: tooling 4 | 5 | resources: 6 | - externalsecret-admin-password.yaml 7 | - externalsecret-valkey-password.yaml 8 | - helmrelease-harbor.yaml 9 | - serviceaccount-harbor.yaml 10 | - httproute.yaml 11 | - iam-user.yaml 12 | - helmrelease-valkey.yaml 13 | - s3-bucket.yaml 14 | - sqlinstance.yaml 15 | -------------------------------------------------------------------------------- /tooling/base/harbor/s3-bucket.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: s3.aws.upbound.io/v1beta1 2 | kind: Bucket 3 | metadata: 4 | name: harbor 5 | annotations: 6 | crossplane.io/external-name: ${region}-ogenki-harbor 7 | spec: 8 | deletionPolicy: Orphan # The bucket should not be deleted when the resource is deleted in Crossplane 9 | forProvider: 10 | region: ${region} 11 | -------------------------------------------------------------------------------- /tooling/base/harbor/serviceaccount-harbor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: harbor 5 | -------------------------------------------------------------------------------- /tooling/base/harbor/sqlinstance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cloud.ogenki.io/v1alpha1 2 | kind: SQLInstance 3 | metadata: 4 | name: xplane-harbor 5 | spec: 6 | instances: 1 7 | size: "small" 8 | storageSize: 20Gi 9 | databases: 10 | - name: registry 11 | owner: harbor 12 | roles: 13 | - name: harbor 14 | comment: "Harbor admin user" 15 | superuser: true 16 | objectStoreRecovery: 17 | bucketName: "eu-west-3-ogenki-cnpg-backups" 18 | path: "harbor-20241111" 19 | backup: 20 | schedule: "0 1 * * *" 21 | bucketName: "eu-west-3-ogenki-cnpg-backups" 22 | compositionRef: 23 | name: xsqlinstances.cloud.ogenki.io 24 | -------------------------------------------------------------------------------- /tooling/base/headlamp/externalsecret-headlamp-envvars.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: headlamp-envvars 5 | spec: 6 | dataFrom: 7 | - extract: 8 | conversionStrategy: Default 9 | key: headlamp/envvars 10 | refreshInterval: 20m 11 | secretStoreRef: 12 | kind: ClusterSecretStore 13 | name: clustersecretstore 14 | target: 15 | creationPolicy: Owner 16 | deletionPolicy: Retain 17 | name: headlamp-envvars 18 | -------------------------------------------------------------------------------- /tooling/base/headlamp/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: headlamp 5 | spec: 6 | interval: 30m 7 | driftDetection: 8 | mode: enabled 9 | chart: 10 | spec: 11 | chart: headlamp 12 | version: "0.31.0" 13 | sourceRef: 14 | kind: HelmRepository 15 | name: headlamp 16 | interval: 12h 17 | values: 18 | config: 19 | pluginsDir: /build/plugins 20 | oidc: 21 | secret: 22 | create: false 23 | externalSecret: 24 | enabled: true 25 | name: "headlamp-envvars" 26 | initContainers: 27 | - command: 28 | - /bin/sh 29 | - -c 30 | - mkdir -p /build/plugins && cp -r /plugins/* /build/plugins/ 31 | image: ghcr.io/headlamp-k8s/headlamp-plugin-flux:latest 32 | imagePullPolicy: Always 33 | name: headlamp-plugins 34 | volumeMounts: 35 | - mountPath: /build/plugins 36 | name: headlamp-plugins 37 | 38 | resources: 39 | limits: 40 | memory: 256Mi 41 | requests: 42 | cpu: 300m 43 | 44 | volumeMounts: 45 | - mountPath: /build/plugins 46 | name: headlamp-plugins 47 | 48 | volumes: 49 | - name: headlamp-plugins 50 | emptyDir: {} 51 | -------------------------------------------------------------------------------- /tooling/base/headlamp/httproute.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gateway.networking.k8s.io/v1 2 | kind: HTTPRoute 3 | metadata: 4 | name: headlamp 5 | spec: 6 | parentRefs: 7 | - name: platform-private 8 | namespace: infrastructure 9 | hostnames: 10 | - "headlamp.priv.${domain_name}" 11 | rules: 12 | - backendRefs: 13 | - name: headlamp 14 | port: 80 15 | -------------------------------------------------------------------------------- /tooling/base/headlamp/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: tooling 4 | resources: 5 | - externalsecret-headlamp-envvars.yaml 6 | - httproute.yaml 7 | - helmrelease.yaml 8 | -------------------------------------------------------------------------------- /tooling/mycluster-0/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../base/headlamp 6 | - ../base/harbor 7 | # Enabling only when needed for cost reasons 8 | # - ../base/harbor 9 | # - ../base/dagger-engine 10 | # - ../base/gha-runners 11 | --------------------------------------------------------------------------------