├── .github ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md ├── renovate.json └── workflows │ ├── pr-title.yml │ ├── pre-commit.yml │ ├── release.yml │ ├── renovate.yaml │ └── stale-actions.yaml ├── .gitignore ├── .mergify.yml ├── .pre-commit-config.yaml ├── .python-version ├── .releaserc.json ├── .terraform-docs.yml ├── CODEOWNERS ├── LICENSE ├── README.md ├── admiralty.tf ├── cert-manager-csi-driver.tf ├── cert-manager.tf ├── csi-external-snapshotter.tf ├── flux2.tf ├── helm-dependencies.yaml ├── ingress-nginx.tf ├── k8gb.tf ├── karma.tf ├── keda.tf ├── kong-crds.tf ├── kong.tf ├── kube-prometheus-crd.tf ├── kube-prometheus.tf ├── linkerd-viz.tf ├── linkerd.tf ├── linkerd2-cni.tf ├── locals.tf ├── loki-stack.tf ├── metrics-server.tf ├── modules ├── aws │ ├── .terraform-docs.yml │ ├── README.md │ ├── admiralty.tf │ ├── aws-ebs-csi-driver.tf │ ├── aws-efs-csi-driver.tf │ ├── aws-for-fluent-bit.tf │ ├── aws-load-balancer-controller.tf │ ├── aws-node-termination-handler.tf │ ├── cert-manager-csi-driver.tf │ ├── cert-manager.tf │ ├── cluster-autoscaler.tf │ ├── cni-metrics-helper.tf │ ├── csi-external-snapshotter.tf │ ├── data.tf │ ├── examples │ │ └── README.md │ ├── external-dns.tf │ ├── flux2.tf │ ├── helm-dependencies.yaml │ ├── iam │ │ ├── aws-ebs-csi-driver.json │ │ ├── aws-ebs-csi-driver_kms.json │ │ ├── aws-efs-csi-driver.json │ │ └── aws-load-balancer-controller.json │ ├── ingress-nginx.tf │ ├── k8gb.tf │ ├── karma.tf │ ├── karpenter.tf │ ├── keda.tf │ ├── kong-crds.tf │ ├── kong.tf │ ├── kube-prometheus-crd.tf │ ├── kube-prometheus.tf │ ├── linkerd-viz.tf │ ├── linkerd.tf │ ├── linkerd2-cni.tf │ ├── locals-aws.tf │ ├── locals.tf │ ├── loki-stack.tf │ ├── metrics-server.tf │ ├── node-problem-detector.tf │ ├── priority-class.tf │ ├── prometheus-adapter.tf │ ├── prometheus-blackbox-exporter.tf │ ├── prometheus-cloudwatch-exporter.tf │ ├── promtail.tf │ ├── reloader.tf │ ├── s3-logging.tf │ ├── sealed-secrets.tf │ ├── secrets-store-csi-driver-provider-aws.tf │ ├── secrets-store-csi-driver.tf │ ├── templates │ │ ├── cert-manager-cluster-issuers.yaml.tpl │ │ ├── cert-manager-csi-driver.yaml.tpl │ │ └── cni-metrics-helper.yaml.tpl │ ├── thanos-memcached.tf │ ├── thanos-storegateway.tf │ ├── thanos-tls-querier.tf │ ├── thanos.tf │ ├── tigera-operator.tf │ ├── traefik.tf │ ├── variables-aws.tf │ ├── variables.tf │ ├── velero.tf │ ├── versions.tf │ ├── victoria-metrics-k8s-stack.tf │ └── yet-another-cloudwatch-exporter.tf ├── azure │ ├── .terraform-docs.yml │ ├── README.md │ ├── admiralty.tf │ ├── cert-manager-csi-driver.tf │ ├── cert-manager.tf │ ├── csi-external-snapshotter.tf │ ├── flux2.tf │ ├── helm-dependencies.yaml │ ├── ingress-nginx.tf │ ├── k8gb.tf │ ├── karma.tf │ ├── keda.tf │ ├── kong-crds.tf │ ├── kong.tf │ ├── kube-prometheus-crd.tf │ ├── kube-prometheus.tf │ ├── linkerd-viz.tf │ ├── linkerd.tf │ ├── linkerd2-cni.tf │ ├── locals.tf │ ├── loki-stack.tf │ ├── node-problem-detector.tf │ ├── priority-class.tf │ ├── prometheus-adapter.tf │ ├── prometheus-blackbox-exporter.tf │ ├── reloader.tf │ ├── sealed-secrets.tf │ ├── secrets-store-csi-driver.tf │ ├── templates │ │ ├── cert-manager-cluster-issuers.yaml.tpl │ │ └── cert-manager-csi-driver.yaml.tpl │ ├── tigera-operator.tf │ ├── traefik.tf │ ├── variables.tf │ ├── version.tf │ └── victoria-metrics-k8s-stack.tf ├── google │ ├── .terraform-docs.yml │ ├── README.md │ ├── admiralty.tf │ ├── cert-manager-csi-driver.tf │ ├── cert-manager.tf │ ├── data.tf │ ├── external-dns.tf │ ├── flux2.tf │ ├── helm-dependencies.yaml │ ├── ingress-nginx.tf │ ├── ip-masq-agent.tf │ ├── k8gb.tf │ ├── karma.tf │ ├── keda.tf │ ├── kube-prometheus-crd.tf │ ├── kube-prometheus.tf │ ├── linkerd-viz.tf │ ├── linkerd.tf │ ├── linkerd2-cni.tf │ ├── locals.tf │ ├── loki-stack.tf │ ├── manifests │ │ └── gke-ip-masq │ │ │ ├── ip-masq-agent-configmap.yaml │ │ │ └── ip-masq-agent-daemonset.yaml │ ├── node-problem-detector.tf │ ├── priority-class.tf │ ├── prometheus-adapter.tf │ ├── promtail.tf │ ├── reloader.tf │ ├── sealed-secrets.tf │ ├── secrets-store-csi-driver.tf │ ├── templates │ │ ├── cert-manager-cluster-issuers.yaml.j2 │ │ ├── cert-manager-cluster-issuers.yaml.tpl │ │ ├── cert-manager-csi-driver.yaml.tpl │ │ └── cni-metrics-helper.yaml.tpl │ ├── thanos-memcached.tf │ ├── thanos-receive.tf │ ├── thanos-storegateway.tf │ ├── thanos-tls-querier.tf │ ├── thanos.tf │ ├── traefik.tf │ ├── variables-google.tf │ ├── variables.tf │ ├── velero.tf │ ├── versions.tf │ └── victoria-metrics-k8s-stack.tf └── scaleway │ ├── .terraform-docs.yml │ ├── README.md │ ├── admiralty.tf │ ├── cert-manager-csi-driver.tf │ ├── cert-manager.tf │ ├── csi-external-snapshotter.tf │ ├── examples │ └── README.md │ ├── external-dns.tf │ ├── flux2.tf │ ├── helm-dependencies.yaml │ ├── ingress-nginx.tf │ ├── k8gb.tf │ ├── karma.tf │ ├── keda.tf │ ├── kong-crds.tf │ ├── kong.tf │ ├── kube-prometheus-crd.tf │ ├── kube-prometheus.tf │ ├── linkerd-viz.tf │ ├── linkerd.tf │ ├── linkerd2-cni.tf │ ├── locals-scaleway.tf │ ├── locals.tf │ ├── loki-stack.tf │ ├── priority-class.tf │ ├── prometheus-adapter.tf │ ├── prometheus-blackbox-exporter.tf │ ├── promtail.tf │ ├── reloader.tf │ ├── sealed-secrets.tf │ ├── templates │ ├── cert-manager-cluster-issuers.yaml.tpl │ └── cert-manager-csi-driver.yaml.tpl │ ├── thanos-memcached.tf │ ├── thanos-storegateway.tf │ ├── thanos-tls-querier.tf │ ├── thanos.tf │ ├── traefik.tf │ ├── variables-scaleway.tf │ ├── variables.tf │ ├── velero.tf │ ├── versions.tf │ └── victoria-metrics-k8s-stack.tf ├── node-problem-detector.tf ├── priority-class.tf ├── prometheus-adapter.tf ├── prometheus-blackbox-exporter.tf ├── promtail.tf ├── reloader.tf ├── sealed-secrets.tf ├── secrets-store-csi-driver.tf ├── templates ├── cert-manager-cluster-issuers.yaml.tpl └── cert-manager-csi-driver.yaml.tpl ├── tigera-operator.tf ├── traefik.tf ├── variables.tf ├── versions.tf └── victoria-metrics-k8s-stack.tf /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please first discuss the change you wish to make via issue, 4 | email, or any other method with the owners of this repository before making a change. 5 | 6 | Please note we have a code of conduct, please follow it in all your interactions with the project. 7 | 8 | ## Pull Request Process 9 | 10 | 1. Ensure any install or build dependencies are removed before the end of the layer when doing a build. 11 | 2. Update the README.md with details of changes to the interface, this includes new environment variables, exposed ports, useful file locations, and container parameters. 12 | 3. Once all outstanding comments and checklist items have been addressed, your contribution will be merged! Merged PRs will trigger a new release 13 | 14 | ## Checklists for contributions 15 | 16 | - [ ] Add [semantics prefix](#semantic-pull-requests) to your PR or Commits (at least one of your commit groups) 17 | - [ ] CI tests are passing 18 | - [ ] README.md has been updated after any changes to variables and outputs. See https://github.com/terraform-aws-modules/terraform-aws-eks/#doc-generation 19 | 20 | ## Semantic Pull Requests 21 | 22 | To generate changelog, Pull Requests or Commits must have semantic and must follow conventional specs below: 23 | 24 | - `feat:` for new features 25 | - `fix:` for bug fixes 26 | - `improvement:` for enhancements 27 | - `docs:` for documentation and examples 28 | - `refactor:` for code refactoring 29 | - `test:` for tests 30 | - `ci:` for CI purpose 31 | - `chore:` for chores stuff 32 | 33 | The `chore` prefix skipped during changelog generation. It can be used for `chore: update changelog` commit message by example. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[bug]" 5 | labels: bug 6 | assignees: ArchiFleKs 7 | 8 | --- 9 | 10 | ## Describe the bug 11 | 12 | A clear and concise description of what the bug is. 13 | 14 | ## What is the current behavior? 15 | 16 | 17 | ## How to reproduce? Please include a code sample if relevant. 18 | 19 | 20 | ## What's the expected behavior? 21 | 22 | 23 | ## Are you able to fix this problem and submit a PR? Link here if you have already. 24 | 25 | 26 | ## Environment details 27 | 28 | * Affected module version: 29 | * OS: 30 | * Terraform version: 31 | * Kubernetes version 32 | 33 | ## Any other relevant info 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[enhancement]" 5 | labels: enhancement 6 | assignees: ArchiFleKs 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # Pull request title 2 | 3 | ## Description 4 | 5 | Please explain the changes you made here and link to any relevant issues. 6 | 7 | ### Checklist 8 | 9 | - [ ] CI tests are passing 10 | - [ ] README.md has been updated after any changes to variables and outputs. See https://github.com/particuleio/terraform-kubernetes-addons/#doc-generation 11 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | ":separateMajorReleases", 4 | ":ignoreUnstable", 5 | ":prImmediately", 6 | ":updateNotScheduled", 7 | ":automergeDisabled", 8 | ":disableRateLimiting", 9 | ":ignoreModulesAndTests", 10 | ":gitSignOff", 11 | "group:monorepos", 12 | "group:recommended", 13 | "helpers:disableTypesNodeMajor", 14 | "workarounds:all", 15 | ":automergeDigest", 16 | ":automergePatch", 17 | ":automergeMinor", 18 | ":dependencyDashboard" 19 | ], 20 | "baseBranches": [ 21 | "main" 22 | ], 23 | "enabledManagers": [ 24 | "helmv3", 25 | "github-actions", 26 | "pre-commit", 27 | "terraform" 28 | ], 29 | "semanticCommits": "enabled", 30 | "automergeType": "pr-comment", 31 | "automergeComment": "renovate:merge", 32 | "helmv3": { 33 | "enabled": true, 34 | "fileMatch": [ 35 | "(^|/)helm-dependencies.yaml$" 36 | ] 37 | }, 38 | "reviewers": [ 39 | "team:team" 40 | ], 41 | "commitMessageExtra": "to {{newVersion}} (was {{currentVersion}})", 42 | "prHourlyLimit": 0, 43 | "packageRules": [ 44 | { 45 | "matchManagers": [ 46 | "github-actions" 47 | ], 48 | "semanticCommitScope": "ci", 49 | "semanticCommitType": "chore" 50 | }, 51 | { 52 | "matchManagers": [ 53 | "pre-commit" 54 | ], 55 | "semanticCommitScope": "ci", 56 | "semanticCommitType": "chore" 57 | }, 58 | { 59 | "matchManagers": [ 60 | "helmv3" 61 | ], 62 | "semanticCommitScope": "charts", 63 | "semanticCommitType": "fix", 64 | "matchUpdateTypes": [ 65 | "patch", 66 | "digest" 67 | ] 68 | }, 69 | { 70 | "matchManagers": [ 71 | "helmv3" 72 | ], 73 | "semanticCommitScope": "charts", 74 | "semanticCommitType": "feat", 75 | "matchUpdateTypes": [ 76 | "major", 77 | "minor" 78 | ] 79 | }, 80 | { 81 | "matchManagers": [ 82 | "terraform" 83 | ], 84 | "semanticCommitScope": "tf", 85 | "semanticCommitType": "feat", 86 | "automerge": false 87 | } 88 | ] 89 | } 90 | -------------------------------------------------------------------------------- /.github/workflows/pr-title.yml: -------------------------------------------------------------------------------- 1 | name: 'Validate PR title' 2 | 3 | on: 4 | pull_request_target: 5 | types: 6 | - opened 7 | - edited 8 | - synchronize 9 | 10 | jobs: 11 | main: 12 | name: Validate PR title 13 | runs-on: ubuntu-latest 14 | steps: 15 | # Please look up the latest version from 16 | # https://github.com/amannn/action-semantic-pull-request/releases 17 | - uses: amannn/action-semantic-pull-request@v5.5.3 18 | env: 19 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 20 | with: 21 | # Configure which types are allowed. 22 | # Default: https://github.com/commitizen/conventional-commit-types 23 | types: | 24 | fix 25 | feat 26 | docs 27 | ci 28 | chore 29 | # Configure that a scope must always be provided. 30 | requireScope: false 31 | # Configure additional validation for the subject based on a regex. 32 | # This example ensures the subject starts with an uppercase character. 33 | # subjectPattern: ^[A-Z].+$ 34 | # If `subjectPattern` is configured, you can use this property to override 35 | # the default error message that is shown when the pattern doesn't match. 36 | # The variables `subject` and `title` can be used within the message. 37 | # subjectPatternError: | 38 | # The subject "{subject}" found in the pull request title "{title}" 39 | # didn't match the configured pattern. Please ensure that the subject 40 | # starts with an uppercase character. 41 | # For work-in-progress PRs you can typically use draft pull requests 42 | # from Github. However, private repositories on the free plan don't have 43 | # this option and therefore this action allows you to opt-in to using the 44 | # special "[WIP]" prefix to indicate this state. This will avoid the 45 | # validation of the PR title and the pull request checks remain pending. 46 | # Note that a second check will be reported if this is enabled. 47 | wip: true 48 | # When using "Squash and merge" on a PR with only one commit, GitHub 49 | # will suggest using that commit message instead of the PR title for the 50 | # merge commit, and it's easy to commit this by mistake. Enable this option 51 | # to also validate the commit message for one commit PRs. 52 | validateSingleCommit: false 53 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | name: Pre-Commit 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | - master 8 | workflow_dispatch: 9 | 10 | env: 11 | TERRAFORM_DOCS_VERSION: v0.16.0 12 | 13 | jobs: 14 | collectInputs: 15 | name: Collect workflow inputs 16 | runs-on: ubuntu-latest 17 | outputs: 18 | directories: ${{ steps.dirs.outputs.directories }} 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v4 22 | 23 | - name: Get root directories 24 | id: dirs 25 | uses: clowdhaus/terraform-composite-actions/directories@v1.11.1 26 | 27 | preCommitMinVersions: 28 | name: Min TF pre-commit 29 | needs: collectInputs 30 | runs-on: ubuntu-latest 31 | strategy: 32 | matrix: 33 | directory: ${{ fromJson(needs.collectInputs.outputs.directories) }} 34 | steps: 35 | - name: Checkout 36 | uses: actions/checkout@v4 37 | 38 | - name: Terraform min/max versions 39 | id: minMax 40 | uses: clowdhaus/terraform-min-max@v1.3.2 41 | with: 42 | directory: ${{ matrix.directory }} 43 | 44 | - name: Pre-commit Terraform ${{ steps.minMax.outputs.minVersion }} 45 | # Run only validate pre-commit check on min version supported 46 | if: ${{ matrix.directory != '.' }} 47 | uses: clowdhaus/terraform-composite-actions/pre-commit@v1.11.1 48 | with: 49 | terraform-version: ${{ steps.minMax.outputs.minVersion }} 50 | args: 'terraform_validate --color=always --show-diff-on-failure --files ${{ matrix.directory }}/*' 51 | 52 | - name: Pre-commit Terraform ${{ steps.minMax.outputs.minVersion }} 53 | # Run only validate pre-commit check on min version supported 54 | if: ${{ matrix.directory == '.' }} 55 | uses: clowdhaus/terraform-composite-actions/pre-commit@v1.11.1 56 | with: 57 | terraform-version: ${{ steps.minMax.outputs.minVersion }} 58 | args: 'terraform_validate --color=always --show-diff-on-failure --files $(ls *.tf)' 59 | 60 | preCommitMaxVersion: 61 | name: Max TF pre-commit 62 | runs-on: ubuntu-latest 63 | needs: collectInputs 64 | steps: 65 | - name: Checkout 66 | uses: actions/checkout@v4 67 | with: 68 | ref: ${{ github.event.pull_request.head.ref }} 69 | repository: ${{github.event.pull_request.head.repo.full_name}} 70 | 71 | - name: Terraform min/max versions 72 | id: minMax 73 | uses: clowdhaus/terraform-min-max@v1.3.2 74 | 75 | - name: Pre-commit Terraform ${{ steps.minMax.outputs.maxVersion }} 76 | uses: clowdhaus/terraform-composite-actions/pre-commit@v1.11.1 77 | with: 78 | terraform-version: ${{ steps.minMax.outputs.maxVersion }} 79 | terraform-docs-version: ${{ env.TERRAFORM_DOCS_VERSION }} 80 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - release 7 | 8 | jobs: 9 | terraform-release: 10 | if: github.ref == 'refs/heads/release' 11 | name: 'terraform:release' 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | 17 | - name: Semantic Release 18 | uses: cycjimmy/semantic-release-action@v3 19 | with: 20 | branches: | 21 | [ 22 | 'release' 23 | ] 24 | env: 25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 26 | -------------------------------------------------------------------------------- /.github/workflows/renovate.yaml: -------------------------------------------------------------------------------- 1 | name: Renovate 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | renovate-config-check: 10 | name: 'renovate:config' 11 | runs-on: ubuntu-latest 12 | if: github.ref != 'refs/heads/release' 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | 17 | - name: Validate Renovate Config 18 | uses: suzuki-shunsuke/github-action-renovate-config-validator@v1.1.1 19 | -------------------------------------------------------------------------------- /.github/workflows/stale-actions.yaml: -------------------------------------------------------------------------------- 1 | name: 'Mark or close stale issues and PRs' 2 | on: 3 | schedule: 4 | - cron: '0 0 * * *' 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v9 11 | with: 12 | repo-token: ${{ secrets.GITHUB_TOKEN }} 13 | # Staling issues and PR's 14 | days-before-stale: 30 15 | stale-issue-label: stale 16 | stale-pr-label: stale 17 | stale-issue-message: | 18 | This issue has been automatically marked as stale because it has been open 30 days 19 | with no activity. Remove stale label or comment or this issue will be closed in 10 days 20 | stale-pr-message: | 21 | This PR has been automatically marked as stale because it has been open 30 days 22 | with no activity. Remove stale label or comment or this PR will be closed in 10 days 23 | # Not stale if have this labels or part of milestone 24 | exempt-issue-labels: bug,wip,on-hold 25 | exempt-pr-labels: bug,wip,on-hold 26 | exempt-all-milestones: true 27 | # Close issue operations 28 | # Label will be automatically removed if the issues are no longer closed nor locked. 29 | days-before-close: 10 30 | delete-branch: true 31 | close-issue-message: This issue was automatically closed because of stale in 10 days 32 | close-pr-message: This PR was automatically closed because of stale in 10 days 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .terragrunt-cache 2 | .terraform 3 | .terraform.lock.hcl 4 | .idea 5 | -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | pull_request_rules: 2 | - name: Automatic merge on approval 3 | conditions: 4 | - base=main 5 | - "approved-reviews-by>=1" 6 | actions: 7 | merge: 8 | method: squash 9 | - name: Automatic merge on approval release 10 | conditions: 11 | - base=release 12 | - "approved-reviews-by>=1" 13 | actions: 14 | merge: 15 | method: merge 16 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/antonbabenko/pre-commit-terraform 3 | rev: v1.99.0 4 | hooks: 5 | - id: terraform_fmt 6 | - id: terraform_validate 7 | args: 8 | - --hook-config=--retry-once-with-cleanup=true 9 | - --tf-init-args=-upgrade 10 | - id: terraform_docs 11 | - repo: https://github.com/pre-commit/pre-commit-hooks 12 | rev: v5.0.0 13 | hooks: 14 | - id: check-merge-conflict 15 | - id: end-of-file-fixer 16 | - repo: https://github.com/renovatebot/pre-commit-hooks 17 | rev: 40.26.1 18 | hooks: 19 | - id: renovate-config-validator 20 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.x 2 | -------------------------------------------------------------------------------- /.releaserc.json: -------------------------------------------------------------------------------- 1 | { 2 | "plugins": [ 3 | "@semantic-release/commit-analyzer", 4 | "@semantic-release/release-notes-generator", 5 | "@semantic-release/github" 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /.terraform-docs.yml: -------------------------------------------------------------------------------- 1 | settings: 2 | lockfile: false 3 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # This is a comment. 2 | # Each line is a file pattern followed by one or more owners. 3 | 4 | # These owners will be the default owners for everything in 5 | # the repo. Unless a later match takes precedence, 6 | # @global-owner1 and @global-owner2 will be requested for 7 | # review when someone opens a pull request. 8 | * @particuleio/team 9 | -------------------------------------------------------------------------------- /admiralty.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | admiralty = merge( 3 | local.helm_defaults, 4 | { 5 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "admiralty")].name 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "admiralty")].name 7 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "admiralty")].repository 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "admiralty")].version 9 | namespace = "admiralty" 10 | enabled = false 11 | create_ns = true 12 | default_network_policy = true 13 | }, 14 | var.admiralty 15 | ) 16 | 17 | values_admiralty = <<-VALUES 18 | VALUES 19 | } 20 | 21 | resource "kubernetes_namespace" "admiralty" { 22 | count = local.admiralty["enabled"] && local.admiralty["create_ns"] ? 1 : 0 23 | 24 | metadata { 25 | labels = { 26 | name = local.admiralty["namespace"] 27 | } 28 | 29 | name = local.admiralty["namespace"] 30 | } 31 | } 32 | 33 | resource "helm_release" "admiralty" { 34 | count = local.admiralty["enabled"] ? 1 : 0 35 | repository = local.admiralty["repository"] 36 | name = local.admiralty["name"] 37 | chart = local.admiralty["chart"] 38 | version = local.admiralty["chart_version"] 39 | timeout = local.admiralty["timeout"] 40 | force_update = local.admiralty["force_update"] 41 | recreate_pods = local.admiralty["recreate_pods"] 42 | wait = local.admiralty["wait"] 43 | atomic = local.admiralty["atomic"] 44 | cleanup_on_fail = local.admiralty["cleanup_on_fail"] 45 | dependency_update = local.admiralty["dependency_update"] 46 | disable_crd_hooks = local.admiralty["disable_crd_hooks"] 47 | disable_webhooks = local.admiralty["disable_webhooks"] 48 | render_subchart_notes = local.admiralty["render_subchart_notes"] 49 | replace = local.admiralty["replace"] 50 | reset_values = local.admiralty["reset_values"] 51 | reuse_values = local.admiralty["reuse_values"] 52 | skip_crds = local.admiralty["skip_crds"] 53 | verify = local.admiralty["verify"] 54 | values = [ 55 | local.values_admiralty, 56 | local.admiralty["extra_values"] 57 | ] 58 | namespace = local.admiralty["create_ns"] ? kubernetes_namespace.admiralty.*.metadata.0.name[count.index] : local.admiralty["namespace"] 59 | } 60 | 61 | resource "kubernetes_network_policy" "admiralty_default_deny" { 62 | count = local.admiralty["enabled"] && local.admiralty["default_network_policy"] ? 1 : 0 63 | 64 | metadata { 65 | name = "${local.admiralty["namespace"]}-${local.admiralty["name"]}-default-deny" 66 | namespace = local.admiralty["namespace"] 67 | } 68 | 69 | spec { 70 | pod_selector { 71 | } 72 | policy_types = ["Ingress"] 73 | } 74 | } 75 | 76 | resource "kubernetes_network_policy" "admiralty_allow_namespace" { 77 | count = local.admiralty["enabled"] && local.admiralty["default_network_policy"] ? 1 : 0 78 | 79 | metadata { 80 | name = "${local.admiralty["namespace"]}-${local.admiralty["name"]}-default-namespace" 81 | namespace = local.admiralty["namespace"] 82 | } 83 | 84 | spec { 85 | pod_selector { 86 | } 87 | 88 | ingress { 89 | from { 90 | namespace_selector { 91 | match_labels = { 92 | name = local.admiralty["namespace"] 93 | } 94 | } 95 | } 96 | } 97 | 98 | policy_types = ["Ingress"] 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /cert-manager-csi-driver.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | cert-manager-csi-driver = merge( 4 | local.helm_defaults, 5 | { 6 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "cert-manager-csi-driver")].name 7 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "cert-manager-csi-driver")].name 8 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "cert-manager-csi-driver")].repository 9 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "cert-manager-csi-driver")].version 10 | enabled = local.cert-manager.csi_driver 11 | default_network_policy = true 12 | namespace = local.cert-manager.namespace 13 | }, 14 | var.cert-manager-csi-driver 15 | ) 16 | 17 | values_cert-manager-csi-driver = < v.content } : {} 39 | yaml_body = each.value 40 | } 41 | -------------------------------------------------------------------------------- /flux2.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | # GITHUB_TOKEN should be set for Github provider to work 4 | # GITHUB_ORGANIZATION should be set if deploying in another ORG and not your 5 | # github user 6 | 7 | flux2 = merge( 8 | { 9 | enabled = false 10 | create_ns = true 11 | namespace = "flux-system" 12 | path = "gitops/clusters/${var.cluster-name}" 13 | version = "v2.4.0" 14 | create_github_repository = false 15 | repository = "gitops" 16 | repository_visibility = "public" 17 | branch = "main" 18 | components_extra = ["image-reflector-controller", "image-automation-controller"] 19 | read_only = false 20 | default_network_policy = true 21 | }, 22 | var.flux2 23 | ) 24 | } 25 | 26 | resource "kubernetes_namespace" "flux2" { 27 | count = local.flux2["enabled"] && local.flux2["create_ns"] ? 1 : 0 28 | 29 | metadata { 30 | labels = { 31 | name = local.flux2["namespace"] 32 | } 33 | 34 | name = local.flux2["namespace"] 35 | } 36 | lifecycle { 37 | ignore_changes = [ 38 | metadata[0].annotations, 39 | metadata[0].labels, 40 | ] 41 | } 42 | } 43 | 44 | resource "tls_private_key" "identity" { 45 | count = local.flux2["enabled"] ? 1 : 0 46 | algorithm = "ECDSA" 47 | ecdsa_curve = "P521" 48 | } 49 | 50 | data "github_repository" "main" { 51 | count = local.flux2["enabled"] && !local.flux2["create_github_repository"] ? 1 : 0 52 | name = local.flux2["repository"] 53 | } 54 | 55 | resource "github_repository" "main" { 56 | count = local.flux2["enabled"] && local.flux2["create_github_repository"] ? 1 : 0 57 | name = local.flux2["repository"] 58 | visibility = local.flux2["repository_visibility"] 59 | auto_init = true 60 | } 61 | 62 | resource "github_branch_default" "main" { 63 | count = local.flux2["enabled"] && local.flux2["create_github_repository"] ? 1 : 0 64 | repository = local.flux2["create_github_repository"] ? github_repository.main[0].name : data.github_repository.main[0].name 65 | branch = local.flux2["branch"] 66 | } 67 | 68 | resource "github_repository_deploy_key" "main" { 69 | count = local.flux2["enabled"] ? 1 : 0 70 | title = "flux-${local.flux2["create_github_repository"] ? github_repository.main[0].name : local.flux2["repository"]}-${local.flux2["branch"]}" 71 | repository = local.flux2["create_github_repository"] ? github_repository.main[0].name : data.github_repository.main[0].name 72 | key = tls_private_key.identity[0].public_key_openssh 73 | read_only = local.flux2["read_only"] 74 | } 75 | 76 | resource "flux_bootstrap_git" "flux" { 77 | count = local.flux2["enabled"] ? 1 : 0 78 | 79 | depends_on = [ 80 | github_repository_deploy_key.main, 81 | kubernetes_namespace.flux2 82 | ] 83 | 84 | path = local.flux2["path"] 85 | version = local.flux2["version"] 86 | namespace = local.flux2["namespace"] 87 | cluster_domain = try(local.flux2["cluster_domain"], null) 88 | components = try(local.flux2["components"], null) 89 | components_extra = try(local.flux2["components_extra"], null) 90 | disable_secret_creation = try(local.flux2["disable_secret_creation"], null) 91 | image_pull_secret = try(local.flux2["image_pull_secrets"], null) 92 | interval = try(local.flux2["interval"], null) 93 | kustomization_override = try(local.flux2["kustomization_override"], null) 94 | log_level = try(local.flux2["log_level"], null) 95 | network_policy = try(local.flux2["network_policy"], null) 96 | recurse_submodules = try(local.flux2["recurse_submodules"], null) 97 | registry = try(local.flux2["registry"], null) 98 | secret_name = try(local.flux2["secret_name"], null) 99 | toleration_keys = try(local.flux2["toleration_keys"], null) 100 | watch_all_namespaces = try(local.flux2["watch_all_namespaces"], null) 101 | 102 | } 103 | 104 | resource "kubernetes_network_policy" "flux2_allow_monitoring" { 105 | count = local.flux2["enabled"] && local.flux2["default_network_policy"] ? 1 : 0 106 | 107 | metadata { 108 | name = "${local.flux2["create_ns"] ? kubernetes_namespace.flux2.*.metadata.0.name[count.index] : local.flux2["namespace"]}-allow-monitoring" 109 | namespace = local.flux2["create_ns"] ? kubernetes_namespace.flux2.*.metadata.0.name[count.index] : local.flux2["namespace"] 110 | } 111 | 112 | spec { 113 | pod_selector { 114 | } 115 | 116 | ingress { 117 | ports { 118 | port = "8080" 119 | protocol = "TCP" 120 | } 121 | 122 | from { 123 | namespace_selector { 124 | match_labels = { 125 | "${local.labels_prefix}/component" = "monitoring" 126 | } 127 | } 128 | } 129 | } 130 | 131 | policy_types = ["Ingress"] 132 | } 133 | } 134 | 135 | resource "kubernetes_network_policy" "flux2_allow_namespace" { 136 | count = local.flux2["enabled"] && local.flux2["default_network_policy"] ? 1 : 0 137 | 138 | metadata { 139 | name = "${local.flux2["create_ns"] ? kubernetes_namespace.flux2.*.metadata.0.name[count.index] : local.flux2["namespace"]}-allow-namespace" 140 | namespace = local.flux2["create_ns"] ? kubernetes_namespace.flux2.*.metadata.0.name[count.index] : local.flux2["namespace"] 141 | } 142 | 143 | spec { 144 | pod_selector { 145 | } 146 | 147 | ingress { 148 | from { 149 | namespace_selector { 150 | match_labels = { 151 | name = local.flux2["create_ns"] ? kubernetes_namespace.flux2.*.metadata.0.name[count.index] : local.flux2["namespace"] 152 | } 153 | } 154 | } 155 | } 156 | 157 | policy_types = ["Ingress"] 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /helm-dependencies.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: Handle terraform-kubernetes-addons helm chart dependencies update 3 | version: 1.0.0 4 | dependencies: 5 | - name: admiralty 6 | version: 0.13.2 7 | repository: https://charts.admiralty.io 8 | - name: secrets-store-csi-driver 9 | version: 1.5.1 10 | repository: https://kubernetes-sigs.github.io/secrets-store-csi-driver/charts 11 | - name: aws-ebs-csi-driver 12 | version: 2.44.0 13 | repository: https://kubernetes-sigs.github.io/aws-ebs-csi-driver 14 | - name: aws-efs-csi-driver 15 | version: 3.1.9 16 | repository: https://kubernetes-sigs.github.io/aws-efs-csi-driver 17 | - name: aws-for-fluent-bit 18 | version: 0.1.35 19 | repository: https://aws.github.io/eks-charts 20 | - name: aws-load-balancer-controller 21 | version: 1.13.2 22 | repository: https://aws.github.io/eks-charts 23 | - name: aws-node-termination-handler 24 | version: 0.21.0 25 | repository: https://aws.github.io/eks-charts 26 | - name: cert-manager 27 | version: v1.17.2 28 | repository: https://charts.jetstack.io 29 | - name: cert-manager-csi-driver 30 | version: v0.10.3 31 | repository: https://charts.jetstack.io 32 | - name: cluster-autoscaler 33 | version: 9.46.6 34 | repository: https://kubernetes.github.io/autoscaler 35 | - name: external-dns 36 | version: 1.16.1 37 | repository: https://kubernetes-sigs.github.io/external-dns/ 38 | - name: flux 39 | version: 1.13.3 40 | repository: https://charts.fluxcd.io 41 | - name: ingress-nginx 42 | version: 4.12.3 43 | repository: https://kubernetes.github.io/ingress-nginx 44 | - name: k8gb 45 | version: v0.14.0 46 | repository: https://www.k8gb.io 47 | - name: karma 48 | version: 1.7.2 49 | repository: https://charts.helm.sh/stable 50 | - name: karpenter 51 | version: 1.5.0 52 | repository: oci://public.ecr.aws/karpenter 53 | - name: keda 54 | version: 2.17.1 55 | repository: https://kedacore.github.io/charts 56 | - name: kong 57 | version: 2.49.0 58 | repository: https://charts.konghq.com 59 | - name: kube-prometheus-stack 60 | version: 72.9.1 61 | repository: https://prometheus-community.github.io/helm-charts 62 | - name: linkerd2-cni 63 | version: 30.12.2 64 | repository: https://helm.linkerd.io/stable 65 | - name: linkerd-control-plane 66 | version: 1.16.11 67 | repository: https://helm.linkerd.io/stable 68 | - name: linkerd-crds 69 | version: 1.8.0 70 | repository: https://helm.linkerd.io/stable 71 | - name: linkerd-viz 72 | version: 30.12.11 73 | repository: https://helm.linkerd.io/stable 74 | - name: loki 75 | version: 6.30.1 76 | repository: https://grafana.github.io/helm-charts 77 | - name: promtail 78 | version: 6.17.0 79 | repository: https://grafana.github.io/helm-charts 80 | - name: metrics-server 81 | version: 3.12.2 82 | repository: https://kubernetes-sigs.github.io/metrics-server/ 83 | - name: node-problem-detector 84 | version: 2.3.14 85 | repository: https://charts.deliveryhero.io/ 86 | - name: prometheus-adapter 87 | version: 4.14.1 88 | repository: https://prometheus-community.github.io/helm-charts 89 | - name: prometheus-cloudwatch-exporter 90 | version: 0.27.0 91 | repository: https://prometheus-community.github.io/helm-charts 92 | - name: prometheus-blackbox-exporter 93 | version: 9.8.0 94 | repository: https://prometheus-community.github.io/helm-charts 95 | - name: scaleway-webhook 96 | version: v0.0.1 97 | repository: https://particuleio.github.io/charts 98 | - name: sealed-secrets 99 | version: 2.17.2 100 | repository: https://bitnami-labs.github.io/sealed-secrets 101 | - name: oci://registry-1.docker.io/bitnamicharts/thanos 102 | version: 15.9.2 103 | repository: "" 104 | - name: tigera-operator 105 | version: v3.30.1 106 | repository: https://docs.projectcalico.org/charts 107 | - name: traefik 108 | version: 35.4.0 109 | repository: https://helm.traefik.io/traefik 110 | - name: oci://registry-1.docker.io/bitnamicharts/memcached 111 | version: 7.5.3 112 | repository: "" 113 | - name: velero 114 | version: 8.7.2 115 | repository: https://vmware-tanzu.github.io/helm-charts 116 | - name: victoria-metrics-k8s-stack 117 | version: 0.51.0 118 | repository: https://victoriametrics.github.io/helm-charts/ 119 | - name: yet-another-cloudwatch-exporter 120 | version: 0.14.0 121 | repository: https://nerdswords.github.io/yet-another-cloudwatch-exporter 122 | - name: reloader 123 | version: 2.1.3 124 | repository: https://stakater.github.io/stakater-charts 125 | -------------------------------------------------------------------------------- /k8gb.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | k8gb = merge( 3 | local.helm_defaults, 4 | { 5 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "k8gb")].name 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "k8gb")].name 7 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "k8gb")].repository 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "k8gb")].version 9 | namespace = "k8gb" 10 | enabled = false 11 | create_ns = true 12 | default_network_policy = false 13 | }, 14 | var.k8gb 15 | ) 16 | 17 | values_k8gb = <<-VALUES 18 | VALUES 19 | } 20 | 21 | resource "kubernetes_namespace" "k8gb" { 22 | count = local.k8gb["enabled"] && local.k8gb["create_ns"] ? 1 : 0 23 | 24 | metadata { 25 | labels = { 26 | name = local.k8gb["namespace"] 27 | } 28 | 29 | name = local.k8gb["namespace"] 30 | } 31 | } 32 | 33 | resource "helm_release" "k8gb" { 34 | count = local.k8gb["enabled"] ? 1 : 0 35 | repository = local.k8gb["repository"] 36 | name = local.k8gb["name"] 37 | chart = local.k8gb["chart"] 38 | version = local.k8gb["chart_version"] 39 | timeout = local.k8gb["timeout"] 40 | force_update = local.k8gb["force_update"] 41 | recreate_pods = local.k8gb["recreate_pods"] 42 | wait = local.k8gb["wait"] 43 | atomic = local.k8gb["atomic"] 44 | cleanup_on_fail = local.k8gb["cleanup_on_fail"] 45 | dependency_update = local.k8gb["dependency_update"] 46 | disable_crd_hooks = local.k8gb["disable_crd_hooks"] 47 | disable_webhooks = local.k8gb["disable_webhooks"] 48 | render_subchart_notes = local.k8gb["render_subchart_notes"] 49 | replace = local.k8gb["replace"] 50 | reset_values = local.k8gb["reset_values"] 51 | reuse_values = local.k8gb["reuse_values"] 52 | skip_crds = local.k8gb["skip_crds"] 53 | verify = local.k8gb["verify"] 54 | values = [ 55 | local.values_k8gb, 56 | local.k8gb["extra_values"] 57 | ] 58 | namespace = local.k8gb["create_ns"] ? kubernetes_namespace.k8gb.*.metadata.0.name[count.index] : local.k8gb["namespace"] 59 | } 60 | 61 | resource "kubernetes_network_policy" "k8gb_default_deny" { 62 | count = local.k8gb["enabled"] && local.k8gb["default_network_policy"] ? 1 : 0 63 | 64 | metadata { 65 | name = "${local.k8gb["namespace"]}-${local.k8gb["name"]}-default-deny" 66 | namespace = local.k8gb["namespace"] 67 | } 68 | 69 | spec { 70 | pod_selector { 71 | } 72 | policy_types = ["Ingress"] 73 | } 74 | } 75 | 76 | resource "kubernetes_network_policy" "k8gb_allow_namespace" { 77 | count = local.k8gb["enabled"] && local.k8gb["default_network_policy"] ? 1 : 0 78 | 79 | metadata { 80 | name = "${local.k8gb["namespace"]}-${local.k8gb["name"]}-default-namespace" 81 | namespace = local.k8gb["namespace"] 82 | } 83 | 84 | spec { 85 | pod_selector { 86 | } 87 | 88 | ingress { 89 | from { 90 | namespace_selector { 91 | match_labels = { 92 | name = local.k8gb["namespace"] 93 | } 94 | } 95 | } 96 | } 97 | 98 | policy_types = ["Ingress"] 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /karma.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | karma = merge( 3 | local.helm_defaults, 4 | { 5 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "karma")].name 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "karma")].name 7 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "karma")].repository 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "karma")].version 9 | namespace = "monitoring" 10 | create_ns = false 11 | enabled = false 12 | default_network_policy = true 13 | }, 14 | var.karma 15 | ) 16 | 17 | values_karma = < v.content } : {} 26 | yaml_body = each.value 27 | server_side_apply = true 28 | force_conflicts = true 29 | } 30 | -------------------------------------------------------------------------------- /kong.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | kong = merge( 4 | local.helm_defaults, 5 | { 6 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "kong")].name 7 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "kong")].name 8 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "kong")].repository 9 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "kong")].version 10 | namespace = "kong" 11 | enabled = false 12 | default_network_policy = true 13 | ingress_cidrs = ["0.0.0.0/0"] 14 | manage_crds = true 15 | }, 16 | var.kong 17 | ) 18 | 19 | values_kong = < v.response_body 19 | } : null 20 | 21 | } 22 | 23 | data "http" "prometheus-operator_version" { 24 | count = (local.victoria-metrics-k8s-stack.enabled && local.victoria-metrics-k8s-stack.install_prometheus_operator_crds) || (local.kube-prometheus-stack.enabled && local.kube-prometheus-stack.manage_crds) ? 1 : 0 25 | url = local.prometheus-operator_chart 26 | } 27 | 28 | data "http" "prometheus-operator_crds" { 29 | for_each = (local.victoria-metrics-k8s-stack.enabled && local.victoria-metrics-k8s-stack.install_prometheus_operator_crds) || (local.kube-prometheus-stack.enabled && local.kube-prometheus-stack.manage_crds) ? toset(local.prometheus-operator_crds) : [] 30 | url = each.key 31 | } 32 | 33 | resource "kubectl_manifest" "prometheus-operator_crds" { 34 | for_each = (local.victoria-metrics-k8s-stack.enabled && local.victoria-metrics-k8s-stack.install_prometheus_operator_crds) || (local.kube-prometheus-stack.enabled && local.kube-prometheus-stack.manage_crds) ? local.prometheus-operator_crds_apply : {} 35 | yaml_body = each.value 36 | server_side_apply = true 37 | force_conflicts = true 38 | } 39 | -------------------------------------------------------------------------------- /linkerd2-cni.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | linkerd2-cni = merge( 3 | local.helm_defaults, 4 | { 5 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "linkerd2-cni")].name 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "linkerd2-cni")].name 7 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "linkerd2-cni")].repository 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "linkerd2-cni")].version 9 | namespace = "linkerd-cni" 10 | create_ns = true 11 | enabled = local.linkerd.enabled 12 | cni_conflist_filename = "10-calico.conflist" 13 | default_network_policy = true 14 | }, 15 | var.linkerd2-cni 16 | ) 17 | 18 | values_linkerd2-cni = < v.content } : {} 30 | yaml_body = each.value 31 | } 32 | -------------------------------------------------------------------------------- /modules/aws/secrets-store-csi-driver.tf: -------------------------------------------------------------------------------- 1 | ../../secrets-store-csi-driver.tf -------------------------------------------------------------------------------- /modules/aws/templates/cert-manager-cluster-issuers.yaml.tpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-staging 6 | spec: 7 | acme: 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | email: '${acme_email}' 10 | privateKeySecretRef: 11 | name: letsencrypt-staging 12 | solvers: 13 | %{ if acme_dns01_enabled } 14 | - dns01: 15 | route53: 16 | region: '${aws_region}' 17 | %{ if role_arn != "" } 18 | role: '${role_arn}' 19 | %{ endif } 20 | %{ endif } 21 | %{ if acme_http01_enabled } 22 | - http01: 23 | ingress: 24 | class: '${acme_http01_ingress_class}' 25 | %{ if acme_dns01_enabled } 26 | selector: 27 | matchLabels: 28 | "use-http01-solver": "true" 29 | %{ endif } 30 | %{ endif } 31 | --- 32 | apiVersion: cert-manager.io/v1 33 | kind: ClusterIssuer 34 | metadata: 35 | name: letsencrypt 36 | spec: 37 | acme: 38 | server: https://acme-v02.api.letsencrypt.org/directory 39 | email: '${acme_email}' 40 | privateKeySecretRef: 41 | name: letsencrypt 42 | solvers: 43 | %{ if acme_dns01_enabled } 44 | - dns01: 45 | route53: 46 | region: '${aws_region}' 47 | %{ if role_arn != "" } 48 | role: '${role_arn}' 49 | %{ endif } 50 | %{ endif } 51 | %{ if acme_http01_enabled } 52 | - http01: 53 | ingress: 54 | class: '${acme_http01_ingress_class}' 55 | %{ if acme_dns01_enabled } 56 | selector: 57 | matchLabels: 58 | "use-http01-solver": "true" 59 | %{ endif } 60 | %{ endif } 61 | -------------------------------------------------------------------------------- /modules/aws/templates/cert-manager-csi-driver.yaml.tpl: -------------------------------------------------------------------------------- 1 | ../../../templates/cert-manager-csi-driver.yaml.tpl -------------------------------------------------------------------------------- /modules/aws/templates/cni-metrics-helper.yaml.tpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: cni-metrics-helper 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: cni-metrics-helper 10 | subjects: 11 | - kind: ServiceAccount 12 | name: cni-metrics-helper 13 | namespace: kube-system 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRole 17 | metadata: 18 | name: cni-metrics-helper 19 | rules: 20 | - apiGroups: [""] 21 | resources: 22 | - nodes 23 | - pods 24 | - pods/proxy 25 | - services 26 | - resourcequotas 27 | - replicationcontrollers 28 | - limitranges 29 | - persistentvolumeclaims 30 | - persistentvolumes 31 | - namespaces 32 | - endpoints 33 | verbs: ["list", "watch", "get"] 34 | - apiGroups: ["extensions"] 35 | resources: 36 | - daemonsets 37 | - deployments 38 | - replicasets 39 | verbs: ["list", "watch"] 40 | - apiGroups: ["apps"] 41 | resources: 42 | - statefulsets 43 | verbs: ["list", "watch"] 44 | - apiGroups: ["batch"] 45 | resources: 46 | - cronjobs 47 | - jobs 48 | verbs: ["list", "watch"] 49 | - apiGroups: ["autoscaling"] 50 | resources: 51 | - horizontalpodautoscalers 52 | verbs: ["list", "watch"] 53 | --- 54 | kind: Deployment 55 | apiVersion: apps/v1 56 | metadata: 57 | name: cni-metrics-helper 58 | namespace: kube-system 59 | labels: 60 | k8s-app: cni-metrics-helper 61 | spec: 62 | selector: 63 | matchLabels: 64 | k8s-app: cni-metrics-helper 65 | template: 66 | metadata: 67 | labels: 68 | k8s-app: cni-metrics-helper 69 | spec: 70 | serviceAccountName: cni-metrics-helper 71 | containers: 72 | - image: 602401143452.dkr.ecr.us-west-2.amazonaws.com/cni-metrics-helper:${cni-metrics-helper_version} 73 | imagePullPolicy: Always 74 | name: cni-metrics-helper 75 | env: 76 | - name: USE_CLOUDWATCH 77 | value: "true" 78 | priorityClassName: "system-cluster-critical" 79 | --- 80 | apiVersion: v1 81 | kind: ServiceAccount 82 | metadata: 83 | name: cni-metrics-helper 84 | namespace: kube-system 85 | annotations: 86 | eks.amazonaws.com/role-arn: "${cni-metrics-helper_role_arn_irsa}" 87 | -------------------------------------------------------------------------------- /modules/aws/thanos-memcached.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | thanos-memcached = merge( 4 | local.helm_defaults, 5 | { 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "oci://registry-1.docker.io/bitnamicharts/memcached")].name 7 | repository = "" 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "oci://registry-1.docker.io/bitnamicharts/memcached")].version 9 | name = "thanos-memcached" 10 | namespace = local.thanos["namespace"] 11 | enabled = false 12 | }, 13 | var.thanos-memcached 14 | ) 15 | 16 | values_thanos-memcached = <<-VALUES 17 | architecture: "high-availability" 18 | replicaCount: 2 19 | podAntiAffinityPreset: hard 20 | metrics: 21 | enabled: ${local.kube-prometheus-stack["enabled"]} 22 | serviceMonitor: 23 | enabled: ${local.kube-prometheus-stack["enabled"]} 24 | VALUES 25 | } 26 | 27 | resource "helm_release" "thanos-memcached" { 28 | count = local.thanos-memcached["enabled"] ? 1 : 0 29 | repository = local.thanos-memcached["repository"] 30 | name = local.thanos-memcached["name"] 31 | chart = local.thanos-memcached["chart"] 32 | version = local.thanos-memcached["chart_version"] 33 | timeout = local.thanos-memcached["timeout"] 34 | force_update = local.thanos-memcached["force_update"] 35 | recreate_pods = local.thanos-memcached["recreate_pods"] 36 | wait = local.thanos-memcached["wait"] 37 | atomic = local.thanos-memcached["atomic"] 38 | cleanup_on_fail = local.thanos-memcached["cleanup_on_fail"] 39 | dependency_update = local.thanos-memcached["dependency_update"] 40 | disable_crd_hooks = local.thanos-memcached["disable_crd_hooks"] 41 | disable_webhooks = local.thanos-memcached["disable_webhooks"] 42 | render_subchart_notes = local.thanos-memcached["render_subchart_notes"] 43 | replace = local.thanos-memcached["replace"] 44 | reset_values = local.thanos-memcached["reset_values"] 45 | reuse_values = local.thanos-memcached["reuse_values"] 46 | skip_crds = local.thanos-memcached["skip_crds"] 47 | verify = local.thanos-memcached["verify"] 48 | values = compact([ 49 | local.values_thanos-memcached, 50 | local.thanos-memcached["extra_values"] 51 | ]) 52 | namespace = local.thanos-memcached["namespace"] 53 | 54 | depends_on = [ 55 | helm_release.kube-prometheus-stack, 56 | ] 57 | } 58 | -------------------------------------------------------------------------------- /modules/aws/thanos-storegateway.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | thanos-storegateway = { for k, v in var.thanos-storegateway : k => merge( 4 | local.helm_defaults, 5 | { 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "oci://registry-1.docker.io/bitnamicharts/thanos")].name 7 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "oci://registry-1.docker.io/bitnamicharts/thanos")].repository 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "oci://registry-1.docker.io/bitnamicharts/thanos")].version 9 | name = "${local.thanos["name"]}-storegateway-${k}" 10 | create_iam_resources_irsa = true 11 | iam_policy_override = null 12 | enabled = false 13 | default_global_requests = false 14 | default_global_limits = false 15 | bucket = null 16 | region = null 17 | name_prefix = "${var.cluster-name}-thanos-sg" 18 | }, 19 | v, 20 | ) } 21 | 22 | values_thanos-storegateway = { for k, v in local.thanos-storegateway : k => merge( 23 | { 24 | values = <<-VALUES 25 | objstoreConfig: 26 | type: S3 27 | config: 28 | bucket: ${v["bucket"]} 29 | region: ${v["region"] == null ? data.aws_region.current.name : v["region"]} 30 | endpoint: s3.${v["region"] == null ? data.aws_region.current.name : v["region"]}.amazonaws.com 31 | sse_config: 32 | type: "SSE-S3" 33 | metrics: 34 | enabled: true 35 | serviceMonitor: 36 | enabled: ${local.kube-prometheus-stack["enabled"] ? "true" : "false"} 37 | query: 38 | enabled: false 39 | queryFrontend: 40 | enabled: false 41 | compactor: 42 | enabled: false 43 | storegateway: 44 | replicaCount: 2 45 | extraFlags: 46 | - --ignore-deletion-marks-delay=24h 47 | enabled: true 48 | serviceAccount: 49 | annotations: 50 | eks.amazonaws.com/role-arn: "${v["enabled"] && v["create_iam_resources_irsa"] ? module.iam_assumable_role_thanos-storegateway[k].iam_role_arn : ""}" 51 | pdb: 52 | create: true 53 | minAvailable: 1 54 | VALUES 55 | }, 56 | v, 57 | ) } 58 | } 59 | 60 | module "iam_assumable_role_thanos-storegateway" { 61 | for_each = local.thanos-storegateway 62 | source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc" 63 | version = "~> 5.0" 64 | create_role = each.value["enabled"] && each.value["create_iam_resources_irsa"] 65 | role_name = "${each.value.name_prefix}-${each.key}" 66 | provider_url = replace(var.eks["cluster_oidc_issuer_url"], "https://", "") 67 | role_policy_arns = each.value["enabled"] && each.value["create_iam_resources_irsa"] ? [aws_iam_policy.thanos-storegateway[each.key].arn] : [] 68 | number_of_role_policy_arns = 1 69 | oidc_subjects_with_wildcards = ["system:serviceaccount:${local.thanos["namespace"]}:${each.value["name"]}-storegateway"] 70 | tags = local.tags 71 | } 72 | 73 | resource "aws_iam_policy" "thanos-storegateway" { 74 | for_each = { for k, v in local.thanos-storegateway : k => v if v["enabled"] && v["create_iam_resources_irsa"] } 75 | name = "${each.value.name_prefix}-${each.key}" 76 | policy = each.value["iam_policy_override"] == null ? data.aws_iam_policy_document.thanos-storegateway[each.key].json : each.value["iam_policy_override"] 77 | tags = local.tags 78 | } 79 | 80 | data "aws_iam_policy_document" "thanos-storegateway" { 81 | 82 | for_each = { for k, v in local.thanos-storegateway : k => v if v["enabled"] && v["create_iam_resources_irsa"] } 83 | 84 | statement { 85 | effect = "Allow" 86 | 87 | actions = [ 88 | "s3:ListBucket" 89 | ] 90 | 91 | resources = ["arn:${local.arn-partition}:s3:::${each.value["bucket"]}"] 92 | } 93 | 94 | statement { 95 | effect = "Allow" 96 | 97 | actions = [ 98 | "s3:*Object" 99 | ] 100 | 101 | resources = ["arn:${local.arn-partition}:s3:::${each.value["bucket"]}/*"] 102 | } 103 | } 104 | 105 | resource "helm_release" "thanos-storegateway" { 106 | for_each = { for k, v in local.thanos-storegateway : k => v if v["enabled"] } 107 | repository = each.value["repository"] 108 | name = each.value["name"] 109 | chart = each.value["chart"] 110 | version = each.value["chart_version"] 111 | timeout = each.value["timeout"] 112 | force_update = each.value["force_update"] 113 | recreate_pods = each.value["recreate_pods"] 114 | wait = each.value["wait"] 115 | atomic = each.value["atomic"] 116 | cleanup_on_fail = each.value["cleanup_on_fail"] 117 | dependency_update = each.value["dependency_update"] 118 | disable_crd_hooks = each.value["disable_crd_hooks"] 119 | disable_webhooks = each.value["disable_webhooks"] 120 | render_subchart_notes = each.value["render_subchart_notes"] 121 | replace = each.value["replace"] 122 | reset_values = each.value["reset_values"] 123 | reuse_values = each.value["reuse_values"] 124 | skip_crds = each.value["skip_crds"] 125 | verify = each.value["verify"] 126 | values = compact([ 127 | local.values_thanos-storegateway[each.key]["values"], 128 | each.value["default_global_requests"] ? local.values_thanos_global_requests : null, 129 | each.value["default_global_limits"] ? local.values_thanos_global_limits : null, 130 | each.value["extra_values"] 131 | ]) 132 | namespace = local.thanos["create_ns"] ? kubernetes_namespace.thanos.*.metadata.0.name[0] : local.thanos["namespace"] 133 | 134 | depends_on = [ 135 | helm_release.kube-prometheus-stack, 136 | ] 137 | } 138 | -------------------------------------------------------------------------------- /modules/aws/tigera-operator.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | tigera-operator = merge( 3 | local.helm_defaults, 4 | { 5 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "tigera-operator")].name 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "tigera-operator")].name 7 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "tigera-operator")].repository 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "tigera-operator")].version 9 | namespace = "tigera-operator" 10 | create_ns = true 11 | manage_crds = true 12 | enabled = false 13 | default_network_policy = true 14 | }, 15 | var.tigera-operator 16 | ) 17 | 18 | tigera-operator_crds = "https://raw.githubusercontent.com/projectcalico/calico/${local.tigera-operator.chart_version}/manifests/operator-crds.yaml" 19 | 20 | calico_crds = "https://raw.githubusercontent.com/projectcalico/calico/${local.tigera-operator.chart_version}/manifests/crds.yaml" 21 | 22 | tigera-operator_crds_apply = local.tigera-operator.enabled && local.tigera-operator.manage_crds ? [for v in data.kubectl_file_documents.tigera-operator_crds.0.documents : { 23 | data : yamldecode(v) 24 | content : v 25 | } 26 | ] : null 27 | 28 | calico_crds_apply = local.tigera-operator.enabled && local.tigera-operator.manage_crds ? [for v in data.kubectl_file_documents.tigera-operator_crds.0.documents : { 29 | data : yamldecode(v) 30 | content : v 31 | } 32 | ] : null 33 | 34 | values_tigera-operator = <<-VALUES 35 | installation: 36 | kubernetesProvider: EKS 37 | VALUES 38 | } 39 | 40 | data "http" "tigera-operator_crds" { 41 | count = local.tigera-operator.enabled && local.tigera-operator.manage_crds ? 1 : 0 42 | url = local.tigera-operator_crds 43 | } 44 | 45 | data "http" "calico_crds" { 46 | count = local.tigera-operator.enabled && local.tigera-operator.manage_crds ? 1 : 0 47 | url = local.calico_crds 48 | } 49 | 50 | data "kubectl_file_documents" "tigera-operator_crds" { 51 | count = local.tigera-operator.enabled && local.tigera-operator.manage_crds ? 1 : 0 52 | content = data.http.tigera-operator_crds[0].response_body 53 | } 54 | 55 | data "kubectl_file_documents" "calico_crds" { 56 | count = local.tigera-operator.enabled && local.tigera-operator.manage_crds ? 1 : 0 57 | content = data.http.calico_crds[0].response_body 58 | } 59 | 60 | resource "kubectl_manifest" "tigera-operator_crds" { 61 | for_each = local.tigera-operator.enabled && local.tigera-operator.manage_crds ? { for v in local.tigera-operator_crds_apply : lower(join("/", compact([v.data.apiVersion, v.data.kind, lookup(v.data.metadata, "namespace", ""), v.data.metadata.name]))) => v.content } : {} 62 | yaml_body = each.value 63 | server_side_apply = true 64 | force_conflicts = true 65 | } 66 | 67 | resource "kubectl_manifest" "calico_crds" { 68 | for_each = local.tigera-operator.enabled && local.tigera-operator.manage_crds ? { for v in local.calico_crds_apply : lower(join("/", compact([v.data.apiVersion, v.data.kind, lookup(v.data.metadata, "namespace", ""), v.data.metadata.name]))) => v.content } : {} 69 | yaml_body = each.value 70 | server_side_apply = true 71 | force_conflicts = true 72 | } 73 | 74 | resource "kubernetes_namespace" "tigera-operator" { 75 | count = local.tigera-operator["enabled"] && local.tigera-operator["create_ns"] ? 1 : 0 76 | 77 | metadata { 78 | labels = { 79 | name = local.tigera-operator["namespace"] 80 | "${local.labels_prefix}/component" = "tigera-operator" 81 | } 82 | 83 | name = local.tigera-operator["namespace"] 84 | } 85 | } 86 | 87 | resource "helm_release" "tigera-operator" { 88 | count = local.tigera-operator["enabled"] ? 1 : 0 89 | repository = local.tigera-operator["repository"] 90 | name = local.tigera-operator["name"] 91 | chart = local.tigera-operator["chart"] 92 | version = local.tigera-operator["chart_version"] 93 | timeout = local.tigera-operator["timeout"] 94 | force_update = local.tigera-operator["force_update"] 95 | recreate_pods = local.tigera-operator["recreate_pods"] 96 | wait = local.tigera-operator["wait"] 97 | atomic = local.tigera-operator["atomic"] 98 | cleanup_on_fail = local.tigera-operator["cleanup_on_fail"] 99 | dependency_update = local.tigera-operator["dependency_update"] 100 | disable_crd_hooks = local.tigera-operator["disable_crd_hooks"] 101 | disable_webhooks = local.tigera-operator["disable_webhooks"] 102 | render_subchart_notes = local.tigera-operator["render_subchart_notes"] 103 | replace = local.tigera-operator["replace"] 104 | reset_values = local.tigera-operator["reset_values"] 105 | reuse_values = local.tigera-operator["reuse_values"] 106 | skip_crds = local.tigera-operator["skip_crds"] 107 | verify = local.tigera-operator["verify"] 108 | values = [ 109 | local.values_tigera-operator, 110 | local.tigera-operator["extra_values"] 111 | ] 112 | namespace = local.tigera-operator["create_ns"] ? kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index] : local.tigera-operator["namespace"] 113 | 114 | depends_on = [ 115 | kubectl_manifest.prometheus-operator_crds 116 | ] 117 | } 118 | 119 | resource "kubernetes_network_policy" "tigera-operator_default_deny" { 120 | count = local.tigera-operator["create_ns"] && local.tigera-operator["enabled"] && local.tigera-operator["default_network_policy"] ? 1 : 0 121 | 122 | metadata { 123 | name = "${kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index]}-default-deny" 124 | namespace = kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index] 125 | } 126 | 127 | spec { 128 | pod_selector { 129 | } 130 | policy_types = ["Ingress"] 131 | } 132 | } 133 | 134 | resource "kubernetes_network_policy" "tigera-operator_allow_namespace" { 135 | count = local.tigera-operator["create_ns"] && local.tigera-operator["enabled"] && local.tigera-operator["default_network_policy"] ? 1 : 0 136 | 137 | metadata { 138 | name = "${kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index]}-allow-namespace" 139 | namespace = kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index] 140 | } 141 | 142 | spec { 143 | pod_selector { 144 | } 145 | 146 | ingress { 147 | from { 148 | namespace_selector { 149 | match_labels = { 150 | name = kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index] 151 | } 152 | } 153 | } 154 | } 155 | 156 | policy_types = ["Ingress"] 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /modules/aws/traefik.tf: -------------------------------------------------------------------------------- 1 | ../../traefik.tf -------------------------------------------------------------------------------- /modules/aws/variables-aws.tf: -------------------------------------------------------------------------------- 1 | variable "arn-partition" { 2 | description = "ARN partition" 3 | default = "" 4 | type = string 5 | } 6 | 7 | variable "aws" { 8 | description = "AWS provider customization" 9 | type = any 10 | default = {} 11 | } 12 | 13 | variable "aws-ebs-csi-driver" { 14 | description = "Customize aws-ebs-csi-driver helm chart, see `aws-ebs-csi-driver.tf`" 15 | type = any 16 | default = {} 17 | } 18 | 19 | variable "aws-efs-csi-driver" { 20 | description = "Customize aws-efs-csi-driver helm chart, see `aws-efs-csi-driver.tf`" 21 | type = any 22 | default = {} 23 | } 24 | 25 | variable "aws-for-fluent-bit" { 26 | description = "Customize aws-for-fluent-bit helm chart, see `aws-fluent-bit.tf`" 27 | type = any 28 | default = {} 29 | } 30 | 31 | variable "aws-load-balancer-controller" { 32 | description = "Customize aws-load-balancer-controller chart, see `aws-load-balancer-controller.tf` for supported values" 33 | type = any 34 | default = {} 35 | } 36 | 37 | variable "aws-node-termination-handler" { 38 | description = "Customize aws-node-termination-handler chart, see `aws-node-termination-handler.tf`" 39 | type = any 40 | default = {} 41 | } 42 | 43 | variable "cni-metrics-helper" { 44 | description = "Customize cni-metrics-helper deployment, see `cni-metrics-helper.tf` for supported values" 45 | type = any 46 | default = {} 47 | } 48 | 49 | variable "eks" { 50 | description = "EKS cluster inputs" 51 | type = any 52 | default = {} 53 | } 54 | 55 | variable "karpenter" { 56 | description = "Customize karpenter chart, see `karpenter.tf` for supported values" 57 | type = any 58 | default = {} 59 | } 60 | 61 | variable "prometheus-cloudwatch-exporter" { 62 | description = "Customize prometheus-cloudwatch-exporter chart, see `prometheus-cloudwatch-exporter.tf` for supported values" 63 | type = any 64 | default = {} 65 | } 66 | 67 | variable "s3-logging" { 68 | description = "Logging configuration for bucket created by this module" 69 | type = any 70 | default = {} 71 | } 72 | 73 | variable "secrets-store-csi-driver-provider-aws" { 74 | description = "Enable secrets-store-csi-driver-provider-aws" 75 | type = any 76 | default = {} 77 | } 78 | 79 | variable "tags" { 80 | description = "Map of tags for AWS resources" 81 | type = map(any) 82 | default = {} 83 | } 84 | 85 | variable "yet-another-cloudwatch-exporter" { 86 | description = "Customize yet-another-cloudwatch-exporter chart, see `yet-another-cloudwatch-exporter.tf` for supported values" 87 | type = any 88 | default = {} 89 | } 90 | -------------------------------------------------------------------------------- /modules/aws/variables.tf: -------------------------------------------------------------------------------- 1 | ../../variables.tf -------------------------------------------------------------------------------- /modules/aws/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.2" 3 | required_providers { 4 | aws = ">= 5.27" 5 | helm = "~> 2.0" 6 | kubernetes = "~> 2.0, != 2.12" 7 | kubectl = { 8 | source = "alekc/kubectl" 9 | version = "~> 2.0" 10 | } 11 | flux = { 12 | source = "fluxcd/flux" 13 | version = "~> 1.0" 14 | } 15 | github = { 16 | source = "integrations/github" 17 | version = "~> 6.0" 18 | } 19 | tls = { 20 | source = "hashicorp/tls" 21 | version = "~> 4.0" 22 | } 23 | http = { 24 | source = "hashicorp/http" 25 | version = ">= 3" 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /modules/azure/.terraform-docs.yml: -------------------------------------------------------------------------------- 1 | settings: 2 | lockfile: false 3 | -------------------------------------------------------------------------------- /modules/azure/admiralty.tf: -------------------------------------------------------------------------------- 1 | ../../admiralty.tf -------------------------------------------------------------------------------- /modules/azure/cert-manager-csi-driver.tf: -------------------------------------------------------------------------------- 1 | ../../cert-manager-csi-driver.tf -------------------------------------------------------------------------------- /modules/azure/cert-manager.tf: -------------------------------------------------------------------------------- 1 | ../../cert-manager.tf -------------------------------------------------------------------------------- /modules/azure/csi-external-snapshotter.tf: -------------------------------------------------------------------------------- 1 | ../../csi-external-snapshotter.tf -------------------------------------------------------------------------------- /modules/azure/flux2.tf: -------------------------------------------------------------------------------- 1 | ../../flux2.tf -------------------------------------------------------------------------------- /modules/azure/helm-dependencies.yaml: -------------------------------------------------------------------------------- 1 | ../../helm-dependencies.yaml -------------------------------------------------------------------------------- /modules/azure/ingress-nginx.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | ingress-nginx = merge( 4 | local.helm_defaults, 5 | { 6 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "ingress-nginx")].name 7 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "ingress-nginx")].name 8 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "ingress-nginx")].repository 9 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "ingress-nginx")].version 10 | namespace = "ingress-nginx" 11 | }, 12 | var.ingress-nginx 13 | ) 14 | } 15 | 16 | resource "kubernetes_namespace" "ingress-nginx" { 17 | count = local.ingress-nginx["enabled"] ? 1 : 0 18 | 19 | metadata { 20 | labels = { 21 | name = local.ingress-nginx["namespace"] 22 | "${local.labels_prefix}/component" = "ingress" 23 | } 24 | 25 | name = "nginx-ingress" 26 | } 27 | } 28 | 29 | resource "helm_release" "ingress-nginx" { 30 | count = local.ingress-nginx["enabled"] ? 1 : 0 31 | repository = local.ingress-nginx["repository"] 32 | name = local.ingress-nginx["name"] 33 | chart = local.ingress-nginx["chart"] 34 | version = local.ingress-nginx["chart_version"] 35 | timeout = local.ingress-nginx["timeout"] 36 | force_update = local.ingress-nginx["force_update"] 37 | recreate_pods = local.ingress-nginx["recreate_pods"] 38 | wait = local.ingress-nginx["wait"] 39 | atomic = local.ingress-nginx["atomic"] 40 | cleanup_on_fail = local.ingress-nginx["cleanup_on_fail"] 41 | dependency_update = local.ingress-nginx["dependency_update"] 42 | disable_crd_hooks = local.ingress-nginx["disable_crd_hooks"] 43 | disable_webhooks = local.ingress-nginx["disable_webhooks"] 44 | render_subchart_notes = local.ingress-nginx["render_subchart_notes"] 45 | replace = local.ingress-nginx["replace"] 46 | reset_values = local.ingress-nginx["reset_values"] 47 | reuse_values = local.ingress-nginx["reuse_values"] 48 | skip_crds = local.ingress-nginx["skip_crds"] 49 | verify = local.ingress-nginx["verify"] 50 | values = [ 51 | local.ingress-nginx["extra_values"], 52 | ] 53 | namespace = kubernetes_namespace.ingress-nginx.*.metadata.0.name[count.index] 54 | 55 | #The ingress controller needs to be scheduled on a Linux node. Windows Server nodes shouldn't run the ingress controller 56 | set { 57 | name = "defaultBackend.nodeSelector.kubernetes\\.io/os" 58 | value = "linux" 59 | } 60 | 61 | } 62 | -------------------------------------------------------------------------------- /modules/azure/k8gb.tf: -------------------------------------------------------------------------------- 1 | ../../k8gb.tf -------------------------------------------------------------------------------- /modules/azure/karma.tf: -------------------------------------------------------------------------------- 1 | ../../karma.tf -------------------------------------------------------------------------------- /modules/azure/keda.tf: -------------------------------------------------------------------------------- 1 | ../../keda.tf -------------------------------------------------------------------------------- /modules/azure/kong-crds.tf: -------------------------------------------------------------------------------- 1 | ../../kong-crds.tf -------------------------------------------------------------------------------- /modules/azure/kong.tf: -------------------------------------------------------------------------------- 1 | ../../kong.tf -------------------------------------------------------------------------------- /modules/azure/kube-prometheus-crd.tf: -------------------------------------------------------------------------------- 1 | ../../kube-prometheus-crd.tf -------------------------------------------------------------------------------- /modules/azure/kube-prometheus.tf: -------------------------------------------------------------------------------- 1 | ../../kube-prometheus.tf -------------------------------------------------------------------------------- /modules/azure/linkerd-viz.tf: -------------------------------------------------------------------------------- 1 | ../../linkerd-viz.tf -------------------------------------------------------------------------------- /modules/azure/linkerd.tf: -------------------------------------------------------------------------------- 1 | ../../linkerd.tf -------------------------------------------------------------------------------- /modules/azure/linkerd2-cni.tf: -------------------------------------------------------------------------------- 1 | ../../linkerd2-cni.tf -------------------------------------------------------------------------------- /modules/azure/locals.tf: -------------------------------------------------------------------------------- 1 | ../../locals.tf -------------------------------------------------------------------------------- /modules/azure/loki-stack.tf: -------------------------------------------------------------------------------- 1 | ../../loki-stack.tf -------------------------------------------------------------------------------- /modules/azure/node-problem-detector.tf: -------------------------------------------------------------------------------- 1 | ../../node-problem-detector.tf -------------------------------------------------------------------------------- /modules/azure/priority-class.tf: -------------------------------------------------------------------------------- 1 | ../../priority-class.tf -------------------------------------------------------------------------------- /modules/azure/prometheus-adapter.tf: -------------------------------------------------------------------------------- 1 | ../../prometheus-adapter.tf -------------------------------------------------------------------------------- /modules/azure/prometheus-blackbox-exporter.tf: -------------------------------------------------------------------------------- 1 | ../../prometheus-blackbox-exporter.tf -------------------------------------------------------------------------------- /modules/azure/reloader.tf: -------------------------------------------------------------------------------- 1 | ../../reloader.tf -------------------------------------------------------------------------------- /modules/azure/sealed-secrets.tf: -------------------------------------------------------------------------------- 1 | ../../sealed-secrets.tf -------------------------------------------------------------------------------- /modules/azure/secrets-store-csi-driver.tf: -------------------------------------------------------------------------------- 1 | ../../secrets-store-csi-driver.tf -------------------------------------------------------------------------------- /modules/azure/templates/cert-manager-cluster-issuers.yaml.tpl: -------------------------------------------------------------------------------- 1 | ../../../templates/cert-manager-cluster-issuers.yaml.tpl -------------------------------------------------------------------------------- /modules/azure/templates/cert-manager-csi-driver.yaml.tpl: -------------------------------------------------------------------------------- 1 | ../../../templates/cert-manager-csi-driver.yaml.tpl -------------------------------------------------------------------------------- /modules/azure/tigera-operator.tf: -------------------------------------------------------------------------------- 1 | ../../tigera-operator.tf -------------------------------------------------------------------------------- /modules/azure/traefik.tf: -------------------------------------------------------------------------------- 1 | ../../traefik.tf -------------------------------------------------------------------------------- /modules/azure/variables.tf: -------------------------------------------------------------------------------- 1 | ../../variables.tf -------------------------------------------------------------------------------- /modules/azure/version.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.2" 3 | required_providers { 4 | azurerm = "~> 4.0" 5 | helm = "~> 2.0" 6 | kubernetes = "~> 2.0, != 2.12" 7 | kubectl = { 8 | source = "alekc/kubectl" 9 | version = "~> 2.0" 10 | } 11 | flux = { 12 | source = "fluxcd/flux" 13 | version = "~> 1.0" 14 | } 15 | github = { 16 | source = "integrations/github" 17 | version = "~> 6.0" 18 | } 19 | tls = { 20 | source = "hashicorp/tls" 21 | version = "~> 4.0" 22 | } 23 | http = { 24 | source = "hashicorp/http" 25 | version = ">= 3" 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /modules/azure/victoria-metrics-k8s-stack.tf: -------------------------------------------------------------------------------- 1 | ../../victoria-metrics-k8s-stack.tf -------------------------------------------------------------------------------- /modules/google/.terraform-docs.yml: -------------------------------------------------------------------------------- 1 | settings: 2 | lockfile: false 3 | -------------------------------------------------------------------------------- /modules/google/admiralty.tf: -------------------------------------------------------------------------------- 1 | ../../admiralty.tf -------------------------------------------------------------------------------- /modules/google/cert-manager-csi-driver.tf: -------------------------------------------------------------------------------- 1 | ../../cert-manager-csi-driver.tf -------------------------------------------------------------------------------- /modules/google/data.tf: -------------------------------------------------------------------------------- 1 | data "google_project" "current" {} 2 | 3 | data "google_client_config" "current" {} 4 | -------------------------------------------------------------------------------- /modules/google/flux2.tf: -------------------------------------------------------------------------------- 1 | ../../flux2.tf -------------------------------------------------------------------------------- /modules/google/helm-dependencies.yaml: -------------------------------------------------------------------------------- 1 | ../../helm-dependencies.yaml -------------------------------------------------------------------------------- /modules/google/ip-masq-agent.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | ip-masq-agent = merge( 3 | { 4 | enabled = false 5 | }, 6 | var.ip-masq-agent 7 | ) 8 | } 9 | 10 | data "kubectl_filename_list" "ip_masq_agent_manifests" { 11 | pattern = "./manifests/gke-ip-masq/*.yaml" 12 | } 13 | 14 | resource "kubectl_manifest" "ip_masq_agent" { 15 | count = local.ip-masq-agent.enabled ? length(data.kubectl_filename_list.ip_masq_agent_manifests.matches) : 0 16 | yaml_body = file(element(data.kubectl_filename_list.ip_masq_agent_manifests.matches, count.index)) 17 | } 18 | -------------------------------------------------------------------------------- /modules/google/k8gb.tf: -------------------------------------------------------------------------------- 1 | ../../k8gb.tf -------------------------------------------------------------------------------- /modules/google/karma.tf: -------------------------------------------------------------------------------- 1 | ../../karma.tf -------------------------------------------------------------------------------- /modules/google/keda.tf: -------------------------------------------------------------------------------- 1 | ../../keda.tf -------------------------------------------------------------------------------- /modules/google/kube-prometheus-crd.tf: -------------------------------------------------------------------------------- 1 | ../../kube-prometheus-crd.tf -------------------------------------------------------------------------------- /modules/google/linkerd-viz.tf: -------------------------------------------------------------------------------- 1 | ../../linkerd-viz.tf -------------------------------------------------------------------------------- /modules/google/linkerd.tf: -------------------------------------------------------------------------------- 1 | ../../linkerd.tf -------------------------------------------------------------------------------- /modules/google/linkerd2-cni.tf: -------------------------------------------------------------------------------- 1 | ../../linkerd2-cni.tf -------------------------------------------------------------------------------- /modules/google/locals.tf: -------------------------------------------------------------------------------- 1 | ../../locals.tf -------------------------------------------------------------------------------- /modules/google/manifests/gke-ip-masq/ip-masq-agent-configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: ip-masq-agent 6 | namespace: kube-system 7 | data: 8 | config: | 9 | nonMasqueradeCIDRs: 10 | - 10.0.0.0/8 11 | - 172.16.0.0/12 12 | - 192.168.0.0/16 13 | resyncInterval: 60s 14 | masqLinkLocal: false 15 | -------------------------------------------------------------------------------- /modules/google/manifests/gke-ip-masq/ip-masq-agent-daemonset.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: ip-masq-agent 6 | namespace: kube-system 7 | spec: 8 | selector: 9 | matchLabels: 10 | k8s-app: ip-masq-agent 11 | template: 12 | metadata: 13 | labels: 14 | k8s-app: ip-masq-agent 15 | spec: 16 | hostNetwork: true 17 | containers: 18 | - name: ip-masq-agent 19 | image: gke.gcr.io/ip-masq-agent:v2.9.3-v0.2.4-gke.5 20 | args: 21 | # The masq-chain must be IP-MASQ 22 | - --masq-chain=IP-MASQ 23 | # To non-masquerade reserved IP ranges by default, 24 | # uncomment the following line. 25 | # - --nomasq-all-reserved-ranges 26 | securityContext: 27 | privileged: true 28 | volumeMounts: 29 | - name: config-volume 30 | mountPath: /etc/config 31 | volumes: 32 | - name: config-volume 33 | configMap: 34 | name: ip-masq-agent 35 | optional: true 36 | items: 37 | - key: config 38 | path: ip-masq-agent 39 | tolerations: 40 | - effect: NoSchedule 41 | operator: Exists 42 | - effect: NoExecute 43 | operator: Exists 44 | - key: "CriticalAddonsOnly" 45 | operator: "Exists" 46 | -------------------------------------------------------------------------------- /modules/google/node-problem-detector.tf: -------------------------------------------------------------------------------- 1 | ../../node-problem-detector.tf -------------------------------------------------------------------------------- /modules/google/priority-class.tf: -------------------------------------------------------------------------------- 1 | ../../priority-class.tf -------------------------------------------------------------------------------- /modules/google/prometheus-adapter.tf: -------------------------------------------------------------------------------- 1 | ../../prometheus-adapter.tf -------------------------------------------------------------------------------- /modules/google/promtail.tf: -------------------------------------------------------------------------------- 1 | ../../promtail.tf -------------------------------------------------------------------------------- /modules/google/reloader.tf: -------------------------------------------------------------------------------- 1 | ../../reloader.tf -------------------------------------------------------------------------------- /modules/google/sealed-secrets.tf: -------------------------------------------------------------------------------- 1 | ../../sealed-secrets.tf -------------------------------------------------------------------------------- /modules/google/secrets-store-csi-driver.tf: -------------------------------------------------------------------------------- 1 | ../../secrets-store-csi-driver.tf -------------------------------------------------------------------------------- /modules/google/templates/cert-manager-cluster-issuers.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-staging 6 | spec: 7 | acme: 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | email: {{ acme_email }} 10 | privateKeySecretRef: 11 | name: letsencrypt-staging 12 | solvers: 13 | {%- if acme_dns01_enabled and acme_dns01_provider == "route53" %} 14 | - dns01: 15 | route53: 16 | region: "{{ acme_dns01_provider_route53.aws_region }}" 17 | {%- endif -%} 18 | {%- if acme_dns01_enabled and acme_dns01_provider == "clouddns" %} 19 | - dns01: 20 | cloudDNS: 21 | project: {{ acme_dns01_provider_clouddns.project_id }} 22 | hostedZoneName: {{ acme_dns01_provider_clouddns.dns_zone_name }} 23 | {%- endif -%} 24 | {%- if acme_http01_enabled %} 25 | - http01: 26 | ingress: 27 | class: {{ acme_http01_ingress_class }} 28 | {%- if acme_dns01_enabled %} 29 | selector: 30 | matchLabels: 31 | "use-http01-solver": "true" 32 | {%- endif %} 33 | {%- endif %} 34 | --- 35 | apiVersion: cert-manager.io/v1 36 | kind: ClusterIssuer 37 | metadata: 38 | name: letsencrypt 39 | spec: 40 | acme: 41 | server: https://acme-v02.api.letsencrypt.org/directory 42 | email: {{ acme_email }} 43 | privateKeySecretRef: 44 | name: letsencrypt 45 | solvers: 46 | {%- if acme_dns01_enabled and acme_dns01_provider == "route53" %} 47 | - dns01: 48 | route53: 49 | region: "{{ acme_dns01_provider_route53.aws_region }}" 50 | {%- endif -%} 51 | {%- if acme_dns01_enabled and acme_dns01_provider == "clouddns" %} 52 | - dns01: 53 | cloudDNS: 54 | project: {{ acme_dns01_provider_clouddns.project_id }} 55 | hostedZoneName: {{ acme_dns01_provider_clouddns.dns_zone_name }} 56 | {%- endif -%} 57 | {%- if acme_http01_enabled %} 58 | - http01: 59 | ingress: 60 | class: {{ acme_http01_ingress_class }} 61 | {%- if acme_dns01_enabled %} 62 | selector: 63 | matchLabels: 64 | "use-http01-solver": "true" 65 | {%- endif %} 66 | {%- endif %} 67 | -------------------------------------------------------------------------------- /modules/google/templates/cert-manager-cluster-issuers.yaml.tpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-staging 6 | spec: 7 | acme: 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | email: '${acme_email}' 10 | privateKeySecretRef: 11 | name: letsencrypt-staging 12 | solvers: 13 | %{ if acme_dns01_enabled } 14 | - dns01: 15 | route53: 16 | region: '${aws_region}' 17 | %{ endif } 18 | %{ if acme_http01_enabled } 19 | - http01: 20 | ingress: 21 | class: '${acme_http01_ingress_class}' 22 | %{ if acme_dns01_enabled } 23 | selector: 24 | matchLabels: 25 | "use-http01-solver": "true" 26 | %{ endif } 27 | %{ endif } 28 | --- 29 | apiVersion: cert-manager.io/v1 30 | kind: ClusterIssuer 31 | metadata: 32 | name: letsencrypt 33 | spec: 34 | acme: 35 | server: https://acme-v02.api.letsencrypt.org/directory 36 | email: '${acme_email}' 37 | privateKeySecretRef: 38 | name: letsencrypt 39 | solvers: 40 | %{ if acme_dns01_enabled } 41 | - dns01: 42 | route53: 43 | region: '${aws_region}' 44 | %{ endif } 45 | %{ if acme_http01_enabled } 46 | - http01: 47 | ingress: 48 | class: '${acme_http01_ingress_class}' 49 | %{ if acme_dns01_enabled } 50 | selector: 51 | matchLabels: 52 | "use-http01-solver": "true" 53 | %{ endif } 54 | %{ endif } 55 | -------------------------------------------------------------------------------- /modules/google/templates/cert-manager-csi-driver.yaml.tpl: -------------------------------------------------------------------------------- 1 | ../../../templates/cert-manager-csi-driver.yaml.tpl -------------------------------------------------------------------------------- /modules/google/templates/cni-metrics-helper.yaml.tpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: cni-metrics-helper 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: cni-metrics-helper 10 | subjects: 11 | - kind: ServiceAccount 12 | name: cni-metrics-helper 13 | namespace: kube-system 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRole 17 | metadata: 18 | name: cni-metrics-helper 19 | rules: 20 | - apiGroups: [""] 21 | resources: 22 | - nodes 23 | - pods 24 | - pods/proxy 25 | - services 26 | - resourcequotas 27 | - replicationcontrollers 28 | - limitranges 29 | - persistentvolumeclaims 30 | - persistentvolumes 31 | - namespaces 32 | - endpoints 33 | verbs: ["list", "watch", "get"] 34 | - apiGroups: ["extensions"] 35 | resources: 36 | - daemonsets 37 | - deployments 38 | - replicasets 39 | verbs: ["list", "watch"] 40 | - apiGroups: ["apps"] 41 | resources: 42 | - statefulsets 43 | verbs: ["list", "watch"] 44 | - apiGroups: ["batch"] 45 | resources: 46 | - cronjobs 47 | - jobs 48 | verbs: ["list", "watch"] 49 | - apiGroups: ["autoscaling"] 50 | resources: 51 | - horizontalpodautoscalers 52 | verbs: ["list", "watch"] 53 | --- 54 | kind: Deployment 55 | apiVersion: apps/v1 56 | metadata: 57 | name: cni-metrics-helper 58 | namespace: kube-system 59 | labels: 60 | k8s-app: cni-metrics-helper 61 | spec: 62 | selector: 63 | matchLabels: 64 | k8s-app: cni-metrics-helper 65 | template: 66 | metadata: 67 | labels: 68 | k8s-app: cni-metrics-helper 69 | spec: 70 | serviceAccountName: cni-metrics-helper 71 | containers: 72 | - image: 602401143452.dkr.ecr.us-west-2.amazonaws.com/cni-metrics-helper:${cni-metrics-helper_version} 73 | imagePullPolicy: Always 74 | name: cni-metrics-helper 75 | env: 76 | - name: USE_CLOUDWATCH 77 | value: "true" 78 | priorityClassName: "system-cluster-critical" 79 | --- 80 | apiVersion: v1 81 | kind: ServiceAccount 82 | metadata: 83 | name: cni-metrics-helper 84 | namespace: kube-system 85 | annotations: 86 | eks.amazonaws.com/role-arn: "${cni-metrics-helper_role_arn_irsa}" 87 | -------------------------------------------------------------------------------- /modules/google/thanos-memcached.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | thanos-memcached = merge( 4 | local.helm_defaults, 5 | { 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "oci://registry-1.docker.io/bitnamicharts/memcached")].name 7 | repository = "" 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "oci://registry-1.docker.io/bitnamicharts/memcached")].version 9 | name = "thanos-memcached" 10 | namespace = local.thanos["namespace"] 11 | enabled = false 12 | }, 13 | var.thanos-memcached 14 | ) 15 | 16 | values_thanos-memcached = <<-VALUES 17 | architecture: "high-availability" 18 | replicaCount: 2 19 | podAntiAffinityPreset: hard 20 | metrics: 21 | enabled: ${local.kube-prometheus-stack["enabled"]} 22 | serviceMonitor: 23 | enabled: ${local.kube-prometheus-stack["enabled"]} 24 | VALUES 25 | } 26 | 27 | resource "helm_release" "thanos-memcached" { 28 | count = local.thanos-memcached["enabled"] ? 1 : 0 29 | repository = local.thanos-memcached["repository"] 30 | name = local.thanos-memcached["name"] 31 | chart = local.thanos-memcached["chart"] 32 | version = local.thanos-memcached["chart_version"] 33 | timeout = local.thanos-memcached["timeout"] 34 | force_update = local.thanos-memcached["force_update"] 35 | recreate_pods = local.thanos-memcached["recreate_pods"] 36 | wait = local.thanos-memcached["wait"] 37 | atomic = local.thanos-memcached["atomic"] 38 | cleanup_on_fail = local.thanos-memcached["cleanup_on_fail"] 39 | dependency_update = local.thanos-memcached["dependency_update"] 40 | disable_crd_hooks = local.thanos-memcached["disable_crd_hooks"] 41 | disable_webhooks = local.thanos-memcached["disable_webhooks"] 42 | render_subchart_notes = local.thanos-memcached["render_subchart_notes"] 43 | replace = local.thanos-memcached["replace"] 44 | reset_values = local.thanos-memcached["reset_values"] 45 | reuse_values = local.thanos-memcached["reuse_values"] 46 | skip_crds = local.thanos-memcached["skip_crds"] 47 | verify = local.thanos-memcached["verify"] 48 | values = compact([ 49 | local.values_thanos-memcached, 50 | local.thanos-memcached["extra_values"] 51 | ]) 52 | namespace = local.thanos-memcached["namespace"] 53 | 54 | depends_on = [ 55 | helm_release.kube-prometheus-stack, 56 | ] 57 | } 58 | -------------------------------------------------------------------------------- /modules/google/thanos-storegateway.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | thanos-storegateway = { for k, v in var.thanos-storegateway : k => merge( 4 | local.helm_defaults, 5 | { 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "thanos")].name 7 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "thanos")].repository 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "thanos")].version 9 | name = "${local.thanos["name"]}-storegateway-${k}" 10 | create_iam_resources = true 11 | iam_policy_override = null 12 | enabled = false 13 | default_global_requests = false 14 | default_global_limits = false 15 | bucket = null 16 | region = null 17 | name_prefix = "${var.cluster-name}-thanos-sg" 18 | }, 19 | v, 20 | ) } 21 | 22 | values_thanos-storegateway = { for k, v in local.thanos-storegateway : k => merge( 23 | { 24 | values = <<-VALUES 25 | objstoreConfig: 26 | type: GCS 27 | config: 28 | bucket: ${v["bucket"]} 29 | service_account: "${v["name_prefix"]}-thanos-sg" 30 | metrics: 31 | enabled: true 32 | serviceMonitor: 33 | enabled: ${local.kube-prometheus-stack["enabled"] ? "true" : "false"} 34 | query: 35 | enabled: false 36 | queryFrontend: 37 | enabled: false 38 | compactor: 39 | enabled: false 40 | storegateway: 41 | replicaCount: 2 42 | extraFlags: 43 | - --ignore-deletion-marks-delay=24h 44 | enabled: true 45 | serviceAccount: 46 | annotations: 47 | eks.amazonaws.com/role-arn: "${v["enabled"] && v["create_iam_resources"] ? module.iam_assumable_sa_thanos-storegateway[k].iam_role_arn : ""}" 48 | iam.gke.io/gcp-service-account: "${v["enabled"] && v["create_iam_resources"] ? module.iam_assumable_sa_thanos-storegateway[k].gcp_service_account_name : ""}" 49 | pdb: 50 | create: true 51 | minAvailable: 1 52 | VALUES 53 | }, 54 | v, 55 | ) } 56 | } 57 | 58 | module "iam_assumable_sa_thanos-storegateway" { 59 | for_each = local.thanos-storegateway 60 | source = "terraform-google-modules/kubernetes-engine/google//modules/workload-identity" 61 | version = "~> 36.0" 62 | namespace = each.value["namespace"] 63 | project_id = data.google_project.current.id 64 | name = "${each.value["name_prefix"]}-${each.key}" 65 | } 66 | 67 | 68 | module "thanos-storegateway_bucket_iam" { 69 | for_each = local.thanos-storegateway 70 | source = "terraform-google-modules/iam/google//modules/storage_buckets_iam" 71 | version = "~> 8.0" 72 | 73 | mode = "additive" 74 | storage_buckets = [each.value["bucket"]] 75 | bindings = { 76 | "roles/storage.objectViewer" = [ 77 | "serviceAccount:${module.iam_assumable_sa_thanos-storegateway["${each.key}"].gcp_service_account_email}" 78 | ] 79 | } 80 | } 81 | 82 | resource "helm_release" "thanos-storegateway" { 83 | for_each = { for k, v in local.thanos-storegateway : k => v if v["enabled"] } 84 | repository = each.value["repository"] 85 | name = each.value["name"] 86 | chart = each.value["chart"] 87 | version = each.value["chart_version"] 88 | timeout = each.value["timeout"] 89 | force_update = each.value["force_update"] 90 | recreate_pods = each.value["recreate_pods"] 91 | wait = each.value["wait"] 92 | atomic = each.value["atomic"] 93 | cleanup_on_fail = each.value["cleanup_on_fail"] 94 | dependency_update = each.value["dependency_update"] 95 | disable_crd_hooks = each.value["disable_crd_hooks"] 96 | disable_webhooks = each.value["disable_webhooks"] 97 | render_subchart_notes = each.value["render_subchart_notes"] 98 | replace = each.value["replace"] 99 | reset_values = each.value["reset_values"] 100 | reuse_values = each.value["reuse_values"] 101 | skip_crds = each.value["skip_crds"] 102 | verify = each.value["verify"] 103 | values = compact([ 104 | local.values_thanos-storegateway[each.key]["values"], 105 | each.value["default_global_requests"] ? local.values_thanos_global_requests : null, 106 | each.value["default_global_limits"] ? local.values_thanos_global_limits : null, 107 | each.value["extra_values"] 108 | ]) 109 | namespace = local.thanos["create_ns"] ? kubernetes_namespace.thanos.*.metadata.0.name[0] : local.thanos["namespace"] 110 | 111 | depends_on = [ 112 | helm_release.kube-prometheus-stack, 113 | ] 114 | } 115 | -------------------------------------------------------------------------------- /modules/google/traefik.tf: -------------------------------------------------------------------------------- 1 | ../../traefik.tf -------------------------------------------------------------------------------- /modules/google/variables-google.tf: -------------------------------------------------------------------------------- 1 | variable "google" { 2 | description = "GCP provider customization" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "project_id" { 8 | description = "GCP project id" 9 | type = string 10 | default = "" 11 | } 12 | 13 | variable "cni-metrics-helper" { 14 | description = "Customize cni-metrics-helper deployment, see `cni-metrics-helper.tf` for supported values" 15 | type = any 16 | default = {} 17 | } 18 | 19 | variable "gke" { 20 | description = "GKE cluster inputs" 21 | type = any 22 | default = {} 23 | } 24 | 25 | variable "prometheus-cloudwatch-exporter" { 26 | description = "Customize prometheus-cloudwatch-exporter chart, see `prometheus-cloudwatch-exporter.tf` for supported values" 27 | type = any 28 | default = {} 29 | } 30 | 31 | variable "tags" { 32 | description = "Map of tags for Google resources" 33 | type = map(any) 34 | default = {} 35 | } 36 | -------------------------------------------------------------------------------- /modules/google/variables.tf: -------------------------------------------------------------------------------- 1 | ../../variables.tf -------------------------------------------------------------------------------- /modules/google/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3" 3 | required_providers { 4 | google = ">= 4.69" 5 | google-beta = ">= 4.69" 6 | helm = "~> 2.0" 7 | kubernetes = "~> 2.0, != 2.12" 8 | kubectl = { 9 | source = "alekc/kubectl" 10 | version = "~> 2.0" 11 | } 12 | jinja = { 13 | source = "NikolaLohinski/jinja" 14 | version = "~> 2.0" 15 | } 16 | flux = { 17 | source = "fluxcd/flux" 18 | version = "~> 1.0" 19 | } 20 | github = { 21 | source = "integrations/github" 22 | version = "~> 6.0" 23 | } 24 | tls = { 25 | source = "hashicorp/tls" 26 | version = "~> 4.0" 27 | } 28 | http = { 29 | source = "hashicorp/http" 30 | version = ">= 3" 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /modules/scaleway/.terraform-docs.yml: -------------------------------------------------------------------------------- 1 | settings: 2 | lockfile: false 3 | -------------------------------------------------------------------------------- /modules/scaleway/admiralty.tf: -------------------------------------------------------------------------------- 1 | ../../admiralty.tf -------------------------------------------------------------------------------- /modules/scaleway/cert-manager-csi-driver.tf: -------------------------------------------------------------------------------- 1 | ../../cert-manager-csi-driver.tf -------------------------------------------------------------------------------- /modules/scaleway/csi-external-snapshotter.tf: -------------------------------------------------------------------------------- 1 | ../../csi-external-snapshotter.tf -------------------------------------------------------------------------------- /modules/scaleway/examples/README.md: -------------------------------------------------------------------------------- 1 | ## Examples 2 | 3 | Examples are located in [tkap](https://github.com/particuleio/tkap) repository. 4 | -------------------------------------------------------------------------------- /modules/scaleway/external-dns.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | external-dns = merge( 4 | local.helm_defaults, 5 | { 6 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "external-dns")].name 7 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "external-dns")].name 8 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "external-dns")].repository 9 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "external-dns")].version 10 | namespace = "external-dns" 11 | service_account_name = "external-dns" 12 | enabled = false 13 | default_network_policy = true 14 | secret_name = "scaleway-credentials" 15 | }, 16 | var.external-dns 17 | ) 18 | 19 | values_external-dns = <<-VALUES 20 | provider: scaleway 21 | txtPrefix: "ext-dns-" 22 | txtOwnerId: ${var.cluster-name} 23 | logFormat: json 24 | policy: sync 25 | logFormat: json 26 | serviceMonitor: 27 | enabled: ${local.kube-prometheus-stack["enabled"] || local.victoria-metrics-k8s-stack["enabled"]} 28 | priorityClassName: ${local.priority-class["create"] ? kubernetes_priority_class.kubernetes_addons[0].metadata[0].name : ""} 29 | env: 30 | - name: SCW_ACCESS_KEY 31 | valueFrom: 32 | secretKeyRef: 33 | name: ${local.external-dns["secret_name"]} 34 | key: SCW_ACCESS_KEY 35 | - name: SCW_SECRET_KEY 36 | valueFrom: 37 | secretKeyRef: 38 | name: ${local.external-dns["secret_name"]} 39 | key: SCW_SECRET_KEY 40 | - name: SCW_DEFAULT_ORGANIZATION_ID 41 | valueFrom: 42 | secretKeyRef: 43 | name: ${local.external-dns["secret_name"]} 44 | key: SCW_DEFAULT_ORGANIZATION_ID 45 | VALUES 46 | } 47 | 48 | resource "kubernetes_namespace" "external-dns" { 49 | count = local.external-dns["enabled"] ? 1 : 0 50 | 51 | metadata { 52 | labels = { 53 | name = local.external-dns["namespace"] 54 | } 55 | 56 | name = local.external-dns["namespace"] 57 | } 58 | } 59 | 60 | resource "helm_release" "external-dns" { 61 | count = local.external-dns["enabled"] ? 1 : 0 62 | repository = local.external-dns["repository"] 63 | name = local.external-dns["name"] 64 | chart = local.external-dns["chart"] 65 | version = local.external-dns["chart_version"] 66 | timeout = local.external-dns["timeout"] 67 | force_update = local.external-dns["force_update"] 68 | recreate_pods = local.external-dns["recreate_pods"] 69 | wait = local.external-dns["wait"] 70 | atomic = local.external-dns["atomic"] 71 | cleanup_on_fail = local.external-dns["cleanup_on_fail"] 72 | dependency_update = local.external-dns["dependency_update"] 73 | disable_crd_hooks = local.external-dns["disable_crd_hooks"] 74 | disable_webhooks = local.external-dns["disable_webhooks"] 75 | render_subchart_notes = local.external-dns["render_subchart_notes"] 76 | replace = local.external-dns["replace"] 77 | reset_values = local.external-dns["reset_values"] 78 | reuse_values = local.external-dns["reuse_values"] 79 | skip_crds = local.external-dns["skip_crds"] 80 | verify = local.external-dns["verify"] 81 | values = [ 82 | local.values_external-dns, 83 | local.external-dns["extra_values"] 84 | ] 85 | namespace = kubernetes_namespace.external-dns.*.metadata.0.name[count.index] 86 | 87 | depends_on = [ 88 | kubectl_manifest.prometheus-operator_crds 89 | ] 90 | } 91 | 92 | resource "kubernetes_secret" "external-dns_scaleway_credentials" { 93 | count = local.external-dns["enabled"] ? 1 : 0 94 | metadata { 95 | name = local.external-dns["secret_name"] 96 | namespace = local.external-dns["namespace"] 97 | } 98 | data = { 99 | SCW_ACCESS_KEY = local.scaleway["scw_access_key"] 100 | SCW_SECRET_KEY = local.scaleway["scw_secret_key"] 101 | SCW_DEFAULT_ORGANIZATION_ID = local.scaleway["scw_default_organization_id"] 102 | } 103 | } 104 | 105 | resource "kubernetes_network_policy" "external-dns_default_deny" { 106 | count = local.external-dns["enabled"] && local.external-dns["default_network_policy"] ? 1 : 0 107 | 108 | metadata { 109 | name = "${kubernetes_namespace.external-dns.*.metadata.0.name[count.index]}-default-deny" 110 | namespace = kubernetes_namespace.external-dns.*.metadata.0.name[count.index] 111 | } 112 | 113 | spec { 114 | pod_selector { 115 | } 116 | policy_types = ["Ingress"] 117 | } 118 | } 119 | 120 | resource "kubernetes_network_policy" "external-dns_allow_namespace" { 121 | count = local.external-dns["enabled"] && local.external-dns["default_network_policy"] ? 1 : 0 122 | 123 | metadata { 124 | name = "${kubernetes_namespace.external-dns.*.metadata.0.name[count.index]}-allow-namespace" 125 | namespace = kubernetes_namespace.external-dns.*.metadata.0.name[count.index] 126 | } 127 | 128 | spec { 129 | pod_selector { 130 | } 131 | 132 | ingress { 133 | from { 134 | namespace_selector { 135 | match_labels = { 136 | name = kubernetes_namespace.external-dns.*.metadata.0.name[count.index] 137 | } 138 | } 139 | } 140 | } 141 | 142 | policy_types = ["Ingress"] 143 | } 144 | } 145 | 146 | resource "kubernetes_network_policy" "external-dns_allow_monitoring" { 147 | count = local.external-dns["enabled"] && local.external-dns["default_network_policy"] && local.kube-prometheus-stack["enabled"] ? 1 : 0 148 | 149 | metadata { 150 | name = "${kubernetes_namespace.external-dns.*.metadata.0.name[count.index]}-allow-monitoring" 151 | namespace = kubernetes_namespace.external-dns.*.metadata.0.name[count.index] 152 | } 153 | 154 | spec { 155 | pod_selector { 156 | } 157 | 158 | ingress { 159 | ports { 160 | port = "http" 161 | protocol = "TCP" 162 | } 163 | 164 | from { 165 | namespace_selector { 166 | match_labels = { 167 | name = kubernetes_namespace.kube-prometheus-stack.*.metadata.0.name[count.index] 168 | } 169 | } 170 | } 171 | } 172 | 173 | policy_types = ["Ingress"] 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /modules/scaleway/flux2.tf: -------------------------------------------------------------------------------- 1 | ../../flux2.tf -------------------------------------------------------------------------------- /modules/scaleway/helm-dependencies.yaml: -------------------------------------------------------------------------------- 1 | ../../helm-dependencies.yaml -------------------------------------------------------------------------------- /modules/scaleway/k8gb.tf: -------------------------------------------------------------------------------- 1 | ../../k8gb.tf -------------------------------------------------------------------------------- /modules/scaleway/karma.tf: -------------------------------------------------------------------------------- 1 | ../../karma.tf -------------------------------------------------------------------------------- /modules/scaleway/keda.tf: -------------------------------------------------------------------------------- 1 | ../../keda.tf -------------------------------------------------------------------------------- /modules/scaleway/kong-crds.tf: -------------------------------------------------------------------------------- 1 | ../../kong-crds.tf -------------------------------------------------------------------------------- /modules/scaleway/kong.tf: -------------------------------------------------------------------------------- 1 | ../../kong.tf -------------------------------------------------------------------------------- /modules/scaleway/kube-prometheus-crd.tf: -------------------------------------------------------------------------------- 1 | ../../kube-prometheus-crd.tf -------------------------------------------------------------------------------- /modules/scaleway/linkerd-viz.tf: -------------------------------------------------------------------------------- 1 | ../../linkerd-viz.tf -------------------------------------------------------------------------------- /modules/scaleway/linkerd.tf: -------------------------------------------------------------------------------- 1 | ../../linkerd.tf -------------------------------------------------------------------------------- /modules/scaleway/linkerd2-cni.tf: -------------------------------------------------------------------------------- 1 | ../../linkerd2-cni.tf -------------------------------------------------------------------------------- /modules/scaleway/locals-scaleway.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | scaleway_defaults = { 4 | scw_access_key = "" 5 | scw_secret_key = "" 6 | scw_default_organization_id = "" 7 | region = "" 8 | } 9 | 10 | scaleway = merge( 11 | local.scaleway_defaults, 12 | var.scaleway 13 | ) 14 | 15 | tags = var.tags 16 | 17 | 18 | } 19 | -------------------------------------------------------------------------------- /modules/scaleway/locals.tf: -------------------------------------------------------------------------------- 1 | ../../locals.tf -------------------------------------------------------------------------------- /modules/scaleway/priority-class.tf: -------------------------------------------------------------------------------- 1 | ../../priority-class.tf -------------------------------------------------------------------------------- /modules/scaleway/prometheus-adapter.tf: -------------------------------------------------------------------------------- 1 | ../../prometheus-adapter.tf -------------------------------------------------------------------------------- /modules/scaleway/prometheus-blackbox-exporter.tf: -------------------------------------------------------------------------------- 1 | ../../prometheus-blackbox-exporter.tf -------------------------------------------------------------------------------- /modules/scaleway/promtail.tf: -------------------------------------------------------------------------------- 1 | ../../promtail.tf -------------------------------------------------------------------------------- /modules/scaleway/reloader.tf: -------------------------------------------------------------------------------- 1 | ../../reloader.tf -------------------------------------------------------------------------------- /modules/scaleway/sealed-secrets.tf: -------------------------------------------------------------------------------- 1 | ../../sealed-secrets.tf -------------------------------------------------------------------------------- /modules/scaleway/templates/cert-manager-cluster-issuers.yaml.tpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-staging 6 | spec: 7 | acme: 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | email: '${acme_email}' 10 | privateKeySecretRef: 11 | name: letsencrypt-staging 12 | solvers: 13 | %{ if acme_dns01_enabled } 14 | %{ if acme_dns01_provider == "route53" } 15 | - dns01: 16 | route53: 17 | hostedZoneID: ${acme_dns01_hosted_zone_id} 18 | %{ if acme_dns01_region != "" } 19 | region: '${acme_dns01_region}' 20 | %{ endif } 21 | accessKeyIDSecretRef: 22 | name: ${acme_dns01_aws_secret} 23 | key: ${acme_dns01_aws_access_key_id} 24 | secretAccessKeySecretRef: 25 | name: ${acme_dns01_aws_secret} 26 | key: ${acme_dns01_aws_access_key_secret} 27 | %{ else } 28 | %{if acme_dns01_provider == "google" } 29 | - dns01: 30 | clouddns: 31 | project: '${acme_dns01_google_project}' 32 | serviceAccountSecretRef: 33 | name: '${acme_dns01_google_secret}' 34 | key: '${acme_dns01_google_service_account_key}' 35 | %{ else } 36 | - dns01: 37 | webhook: 38 | groupName: acme.scaleway.com 39 | solverName: scaleway 40 | config: 41 | accessKeySecretRef: 42 | key: SCW_ACCESS_KEY 43 | name: '${secret_name}' 44 | secretKeySecretRef: 45 | key: SCW_SECRET_KEY 46 | name: '${secret_name}' 47 | %{ endif } 48 | %{ endif } 49 | %{ endif } 50 | %{ if acme_http01_enabled } 51 | - http01: 52 | ingress: 53 | class: '${acme_http01_ingress_class}' 54 | %{ if acme_dns01_enabled } 55 | selector: 56 | matchLabels: 57 | "use-http01-solver": "true" 58 | %{ endif } 59 | %{ endif } 60 | --- 61 | apiVersion: cert-manager.io/v1 62 | kind: ClusterIssuer 63 | metadata: 64 | name: letsencrypt 65 | spec: 66 | acme: 67 | server: https://acme-v02.api.letsencrypt.org/directory 68 | email: '${acme_email}' 69 | privateKeySecretRef: 70 | name: letsencrypt 71 | solvers: 72 | %{ if acme_dns01_enabled } 73 | %{ if acme_dns01_provider == "route53" } 74 | - dns01: 75 | route53: 76 | hostedZoneID: ${acme_dns01_hosted_zone_id} 77 | %{ if acme_dns01_region != "" } 78 | region: '${acme_dns01_region}' 79 | %{ endif } 80 | accessKeyIDSecretRef: 81 | name: ${acme_dns01_aws_secret} 82 | key: ${acme_dns01_aws_access_key_id} 83 | secretAccessKeySecretRef: 84 | name: ${acme_dns01_aws_secret} 85 | key: ${acme_dns01_aws_access_key_secret} 86 | %{ else } 87 | %{if acme_dns01_provider == "google" } 88 | - dns01: 89 | clouddns: 90 | project: '${acme_dns01_google_project}' 91 | serviceAccountSecretRef: 92 | name: '${acme_dns01_google_secret}' 93 | key: '${acme_dns01_google_service_account_key}' 94 | %{ else } 95 | - dns01: 96 | webhook: 97 | groupName: acme.scaleway.com 98 | solverName: scaleway 99 | config: 100 | accessKeySecretRef: 101 | key: SCW_ACCESS_KEY 102 | name: '${secret_name}' 103 | secretKeySecretRef: 104 | key: SCW_SECRET_KEY 105 | name: '${secret_name}' 106 | %{ endif } 107 | %{ endif } 108 | %{ endif } 109 | %{ if acme_http01_enabled } 110 | - http01: 111 | ingress: 112 | class: '${acme_http01_ingress_class}' 113 | %{ if acme_dns01_enabled } 114 | selector: 115 | matchLabels: 116 | "use-http01-solver": "true" 117 | %{ endif } 118 | %{ endif } 119 | -------------------------------------------------------------------------------- /modules/scaleway/templates/cert-manager-csi-driver.yaml.tpl: -------------------------------------------------------------------------------- 1 | ../../../templates/cert-manager-csi-driver.yaml.tpl -------------------------------------------------------------------------------- /modules/scaleway/thanos-memcached.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | thanos-memcached = merge( 4 | local.helm_defaults, 5 | { 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "oci://registry-1.docker.io/bitnamicharts/memcached")].name 7 | repository = "" 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "oci://registry-1.docker.io/bitnamicharts/memcached")].version 9 | name = "thanos-memcached" 10 | namespace = local.thanos["namespace"] 11 | enabled = false 12 | }, 13 | var.thanos-memcached 14 | ) 15 | 16 | values_thanos-memcached = <<-VALUES 17 | architecture: "high-availability" 18 | replicaCount: 2 19 | podAntiAffinityPreset: hard 20 | metrics: 21 | enabled: ${local.kube-prometheus-stack["enabled"]} 22 | serviceMonitor: 23 | enabled: ${local.kube-prometheus-stack["enabled"]} 24 | VALUES 25 | } 26 | 27 | resource "helm_release" "thanos-memcached" { 28 | count = local.thanos-memcached["enabled"] ? 1 : 0 29 | repository = local.thanos-memcached["repository"] 30 | name = local.thanos-memcached["name"] 31 | chart = local.thanos-memcached["chart"] 32 | version = local.thanos-memcached["chart_version"] 33 | timeout = local.thanos-memcached["timeout"] 34 | force_update = local.thanos-memcached["force_update"] 35 | recreate_pods = local.thanos-memcached["recreate_pods"] 36 | wait = local.thanos-memcached["wait"] 37 | atomic = local.thanos-memcached["atomic"] 38 | cleanup_on_fail = local.thanos-memcached["cleanup_on_fail"] 39 | dependency_update = local.thanos-memcached["dependency_update"] 40 | disable_crd_hooks = local.thanos-memcached["disable_crd_hooks"] 41 | disable_webhooks = local.thanos-memcached["disable_webhooks"] 42 | render_subchart_notes = local.thanos-memcached["render_subchart_notes"] 43 | replace = local.thanos-memcached["replace"] 44 | reset_values = local.thanos-memcached["reset_values"] 45 | reuse_values = local.thanos-memcached["reuse_values"] 46 | skip_crds = local.thanos-memcached["skip_crds"] 47 | verify = local.thanos-memcached["verify"] 48 | values = compact([ 49 | local.values_thanos-memcached, 50 | local.thanos-memcached["extra_values"] 51 | ]) 52 | namespace = local.thanos-memcached["namespace"] 53 | 54 | depends_on = [ 55 | helm_release.kube-prometheus-stack, 56 | ] 57 | } 58 | -------------------------------------------------------------------------------- /modules/scaleway/thanos-storegateway.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | thanos-storegateway = { for k, v in var.thanos-storegateway : k => merge( 4 | local.helm_defaults, 5 | { 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "thanos")].name 7 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "thanos")].repository 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "thanos")].version 9 | name = "${local.thanos["name"]}-storegateway-${k}" 10 | create_iam_resources_irsa = true 11 | iam_policy_override = null 12 | enabled = false 13 | default_global_requests = false 14 | default_global_limits = false 15 | bucket = null 16 | region = null 17 | }, 18 | v, 19 | ) } 20 | 21 | values_thanos-storegateway = { for k, v in local.thanos-storegateway : k => merge( 22 | { 23 | values = <<-VALUES 24 | objstoreConfig: 25 | type: S3 26 | config: 27 | bucket: ${v["bucket"]} 28 | region: ${v["region"] == null ? local.scaleway["region"] : v["region"]} 29 | endpoint: s3.${v["region"] == null ? local.scaleway["region"] : v["region"]}.scw.cloud 30 | signature_version2: false 31 | access_key: ${local.scaleway["scw_access_key"]} 32 | secret_key: ${local.scaleway["scw_secret_key"]} 33 | metrics: 34 | enabled: true 35 | serviceMonitor: 36 | enabled: ${local.kube-prometheus-stack["enabled"] ? "true" : "false"} 37 | query: 38 | enabled: false 39 | queryFrontend: 40 | enabled: false 41 | compactor: 42 | enabled: false 43 | storegateway: 44 | replicaCount: 2 45 | extraFlags: 46 | - --ignore-deletion-marks-delay=24h 47 | enabled: true 48 | pdb: 49 | create: true 50 | minAvailable: 1 51 | VALUES 52 | }, 53 | v, 54 | ) } 55 | } 56 | 57 | resource "helm_release" "thanos-storegateway" { 58 | for_each = { for k, v in local.thanos-storegateway : k => v if v["enabled"] } 59 | repository = each.value["repository"] 60 | name = each.value["name"] 61 | chart = each.value["chart"] 62 | version = each.value["chart_version"] 63 | timeout = each.value["timeout"] 64 | force_update = each.value["force_update"] 65 | recreate_pods = each.value["recreate_pods"] 66 | wait = each.value["wait"] 67 | atomic = each.value["atomic"] 68 | cleanup_on_fail = each.value["cleanup_on_fail"] 69 | dependency_update = each.value["dependency_update"] 70 | disable_crd_hooks = each.value["disable_crd_hooks"] 71 | disable_webhooks = each.value["disable_webhooks"] 72 | render_subchart_notes = each.value["render_subchart_notes"] 73 | replace = each.value["replace"] 74 | reset_values = each.value["reset_values"] 75 | reuse_values = each.value["reuse_values"] 76 | skip_crds = each.value["skip_crds"] 77 | verify = each.value["verify"] 78 | values = compact([ 79 | local.values_thanos-storegateway[each.key]["values"], 80 | each.value["default_global_requests"] ? local.values_thanos_global_requests : null, 81 | each.value["default_global_limits"] ? local.values_thanos_global_limits : null, 82 | each.value["extra_values"] 83 | ]) 84 | namespace = local.thanos["create_ns"] ? kubernetes_namespace.thanos.*.metadata.0.name[0] : local.thanos["namespace"] 85 | 86 | depends_on = [ 87 | helm_release.kube-prometheus-stack, 88 | ] 89 | } 90 | -------------------------------------------------------------------------------- /modules/scaleway/traefik.tf: -------------------------------------------------------------------------------- 1 | ../../traefik.tf -------------------------------------------------------------------------------- /modules/scaleway/variables-scaleway.tf: -------------------------------------------------------------------------------- 1 | variable "scaleway" { 2 | description = "Scaleway provider customization" 3 | type = any 4 | default = {} 5 | } 6 | 7 | variable "kapsule" { 8 | description = "Kapsule cluster inputs" 9 | type = any 10 | default = {} 11 | } 12 | 13 | variable "cert-manager_scaleway_webhook_dns" { 14 | description = "Scaleway webhook dns customization" 15 | type = any 16 | default = {} 17 | } 18 | 19 | variable "tags" { 20 | description = "Map of tags for Scaleway resources" 21 | type = map(any) 22 | default = {} 23 | } 24 | -------------------------------------------------------------------------------- /modules/scaleway/variables.tf: -------------------------------------------------------------------------------- 1 | ../../variables.tf -------------------------------------------------------------------------------- /modules/scaleway/velero.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | velero = merge( 3 | local.helm_defaults, 4 | { 5 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "velero")].name 6 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "velero")].name 7 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "velero")].repository 8 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "velero")].version 9 | namespace = "velero" 10 | service_account_name = "velero" 11 | enabled = false 12 | create_bucket = true 13 | bucket = "${var.cluster-name}-velero" 14 | bucket_force_destroy = false 15 | default_network_policy = true 16 | name_prefix = "${var.cluster-name}-velero" 17 | secret_name = "velero-scaleway-credentials" 18 | }, 19 | var.velero 20 | ) 21 | 22 | values_velero = < v.content } : {} 62 | yaml_body = each.value 63 | server_side_apply = true 64 | force_conflicts = true 65 | } 66 | 67 | resource "kubectl_manifest" "calico_crds" { 68 | for_each = local.tigera-operator.enabled && local.tigera-operator.manage_crds ? { for v in local.calico_crds_apply : lower(join("/", compact([v.data.apiVersion, v.data.kind, lookup(v.data.metadata, "namespace", ""), v.data.metadata.name]))) => v.content } : {} 69 | yaml_body = each.value 70 | server_side_apply = true 71 | force_conflicts = true 72 | } 73 | 74 | resource "kubernetes_namespace" "tigera-operator" { 75 | count = local.tigera-operator["enabled"] && local.tigera-operator["create_ns"] ? 1 : 0 76 | 77 | metadata { 78 | labels = { 79 | name = local.tigera-operator["namespace"] 80 | "${local.labels_prefix}/component" = "tigera-operator" 81 | } 82 | 83 | name = local.tigera-operator["namespace"] 84 | } 85 | } 86 | 87 | resource "helm_release" "tigera-operator" { 88 | count = local.tigera-operator["enabled"] ? 1 : 0 89 | repository = local.tigera-operator["repository"] 90 | name = local.tigera-operator["name"] 91 | chart = local.tigera-operator["chart"] 92 | version = local.tigera-operator["chart_version"] 93 | timeout = local.tigera-operator["timeout"] 94 | force_update = local.tigera-operator["force_update"] 95 | recreate_pods = local.tigera-operator["recreate_pods"] 96 | wait = local.tigera-operator["wait"] 97 | atomic = local.tigera-operator["atomic"] 98 | cleanup_on_fail = local.tigera-operator["cleanup_on_fail"] 99 | dependency_update = local.tigera-operator["dependency_update"] 100 | disable_crd_hooks = local.tigera-operator["disable_crd_hooks"] 101 | disable_webhooks = local.tigera-operator["disable_webhooks"] 102 | render_subchart_notes = local.tigera-operator["render_subchart_notes"] 103 | replace = local.tigera-operator["replace"] 104 | reset_values = local.tigera-operator["reset_values"] 105 | reuse_values = local.tigera-operator["reuse_values"] 106 | skip_crds = local.tigera-operator["skip_crds"] 107 | verify = local.tigera-operator["verify"] 108 | values = [ 109 | local.values_tigera-operator, 110 | local.tigera-operator["extra_values"] 111 | ] 112 | namespace = local.tigera-operator["create_ns"] ? kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index] : local.tigera-operator["namespace"] 113 | 114 | depends_on = [ 115 | kubectl_manifest.prometheus-operator_crds 116 | ] 117 | } 118 | 119 | resource "kubernetes_network_policy" "tigera-operator_default_deny" { 120 | count = local.tigera-operator["create_ns"] && local.tigera-operator["enabled"] && local.tigera-operator["default_network_policy"] ? 1 : 0 121 | 122 | metadata { 123 | name = "${kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index]}-default-deny" 124 | namespace = kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index] 125 | } 126 | 127 | spec { 128 | pod_selector { 129 | } 130 | policy_types = ["Ingress"] 131 | } 132 | } 133 | 134 | resource "kubernetes_network_policy" "tigera-operator_allow_namespace" { 135 | count = local.tigera-operator["create_ns"] && local.tigera-operator["enabled"] && local.tigera-operator["default_network_policy"] ? 1 : 0 136 | 137 | metadata { 138 | name = "${kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index]}-allow-namespace" 139 | namespace = kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index] 140 | } 141 | 142 | spec { 143 | pod_selector { 144 | } 145 | 146 | ingress { 147 | from { 148 | namespace_selector { 149 | match_labels = { 150 | name = kubernetes_namespace.tigera-operator.*.metadata.0.name[count.index] 151 | } 152 | } 153 | } 154 | } 155 | 156 | policy_types = ["Ingress"] 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /traefik.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | 3 | traefik = merge( 4 | local.helm_defaults, 5 | { 6 | name = local.helm_dependencies[index(local.helm_dependencies.*.name, "traefik")].name 7 | chart = local.helm_dependencies[index(local.helm_dependencies.*.name, "traefik")].name 8 | repository = local.helm_dependencies[index(local.helm_dependencies.*.name, "traefik")].repository 9 | chart_version = local.helm_dependencies[index(local.helm_dependencies.*.name, "traefik")].version 10 | namespace = "traefik" 11 | enabled = false 12 | ingress_cidrs = ["0.0.0.0/0"] 13 | default_network_policy = true 14 | manage_crds = true 15 | }, 16 | var.traefik 17 | ) 18 | 19 | values_traefik = <