├── .github └── workflows │ └── ci.yml ├── .gitignore ├── LICENSE ├── README.md ├── bin ├── bootstrap-backend.sh ├── bootstrap.sh ├── get-kube-config.sh ├── init-bucket.sh ├── terraform-apply.sh ├── terraform-destroy.sh ├── terraform-init.sh └── terraform-plan.sh ├── common ├── Schedule_template.yml ├── argocd-apps-values.yaml.tftpl ├── argocd-values.yaml.tftpl ├── argocd.tf ├── cert-manager.tf ├── hashicorp-vault.tf ├── issuer.yml.tftpl ├── locals.tf ├── loki-values.yml ├── loki.tf ├── prom-grafana-values.yml ├── prometheus-grafana.tf ├── promtail-values.yml ├── promtail.tf ├── tls-vault.tf ├── variables.tf ├── vault-values.yml ├── velero-credentials ├── velero-values.yml └── velero.tf ├── docker-compose.yaml ├── docs ├── argocd.md ├── cluster-auto.md ├── cluster-manual.md ├── hashicorp-vault.md ├── index.md ├── standalone.md └── velero.md ├── examples └── argocd-repo │ ├── .gitignore │ ├── apps │ ├── external-app │ │ └── test.json │ ├── hello-world │ │ ├── preprod.json │ │ ├── prod.json │ │ └── staging.json │ └── secret-helm │ │ ├── base.yaml │ │ ├── dev.json │ │ ├── prod.json │ │ └── prod.yaml │ └── helm │ ├── hello-world │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── deployment.yaml │ │ ├── service.yaml │ │ └── serviceaccount.yaml │ └── values.yaml │ └── secret-helm │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ └── secret.yaml │ └── values.yaml ├── mkdocs.yml ├── ovh ├── .terraform.lock.hcl ├── argocd-apps-values.yaml.tftpl ├── argocd-values.yaml.tftpl ├── argocd.tf ├── backend.conf.example ├── cert-manager.tf ├── credentials.auto.tfvars.json.template ├── hashicorp-vault.tf ├── ingress-nginx.tf ├── issuer.yml.tftpl ├── kubernetes.tf ├── locals.tf ├── loki-values.yml ├── loki.tf ├── prom-grafana-values.yml ├── prometheus-grafana.tf ├── promtail-values.yml ├── promtail.tf ├── terraform.tf ├── terraform.tfvars.example ├── terraform.tfvars.template ├── tls-vault.tf ├── variables-common.tf ├── variables.tf ├── vault-values.yml ├── velero-credentials ├── velero-values.yml └── velero.tf ├── renovate.json ├── scaleway ├── .terraform.lock.hcl ├── README.md ├── argocd-apps-values.yaml.tftpl ├── argocd-values.yaml.tftpl ├── argocd.tf ├── backend.conf.example ├── cert-manager.tf ├── credentials.auto.tfvars.json.template ├── hashicorp-vault.tf ├── ingress-nginx.tf ├── issuer.yml.tftpl ├── kubernetes.tf ├── locals.tf ├── loki-values.yml ├── loki.tf ├── nginx-values.yml ├── prom-grafana-values.yml ├── prometheus-grafana.tf ├── promtail-values.yml ├── promtail.tf ├── terraform.tf ├── terraform.tfvars.example ├── terraform.tfvars.template ├── tls-vault.tf ├── variables-common.tf ├── variables.tf ├── vault-values.yml ├── velero-credentials ├── velero-values.yml └── velero.tf ├── standalone ├── .terraform.lock.hcl ├── Schedule_template.yml ├── argocd-apps-values.yaml.tftpl ├── argocd-values.yaml.tftpl ├── argocd.tf ├── backend.conf ├── cert-manager.tf ├── hashicorp-vault.tf ├── issuer.yml.tftpl ├── locals.tf ├── prom-grafana-values.yml ├── prometheus-grafana.tf ├── terraform.tf ├── terraform.tfvars.example ├── terraform.tfvars.template ├── tls-vault.tf ├── variables.tf ├── vault-values.yml ├── velero-credentials ├── velero-values.yml └── velero.tf ├── state_bucket ├── .terraform.lock.hcl ├── bucket.tf ├── terraform.tf ├── terraform.tfvars.template └── variables.tf └── vault ├── .kube └── .gitkeep ├── .terraform.lock.hcl ├── init.sh ├── terraform.tf ├── terraform.tfvars.example ├── terraform.tfvars.template └── variables.tf /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | push: 4 | branches: 5 | - master 6 | - main 7 | permissions: 8 | contents: write 9 | jobs: 10 | deploy: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - uses: actions/setup-python@v5 15 | with: 16 | python-version: 3.x 17 | - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV 18 | - uses: actions/cache@v4 19 | with: 20 | key: mkdocs-material-${{ env.cache_id }} 21 | path: .cache 22 | restore-keys: | 23 | mkdocs-material- 24 | - run: pip install mkdocs-material 25 | - run: mkdocs gh-deploy --force 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_store 2 | env/ 3 | *.env 4 | *.conf 5 | .vscode 6 | .kube 7 | 8 | # Local .terraform directories 9 | **/.terraform/* 10 | 11 | # .tfstate files 12 | *.tfstate 13 | *.tfstate.* 14 | 15 | # Crash log files 16 | crash.log 17 | crash.*.log 18 | 19 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 20 | # password, private keys, and other secrets. These should not be part of version 21 | # control as they are data points which are potentially sensitive and subject 22 | # to change depending on the environment. 23 | *.tfvars 24 | *.tfvars.json 25 | 26 | # Ignore override files as they are usually used to override resources locally and so 27 | # are not checked in 28 | override.tf 29 | override.tf.json 30 | *_override.tf 31 | *_override.tf.json 32 | 33 | # Include override files you do wish to add to version control using negated pattern 34 | # !example_override.tf 35 | 36 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 37 | *tfplan* 38 | 39 | # Ignore CLI configuration files 40 | .terraformrc 41 | terraform.rc 42 | 43 | # Hashicorp Vault keys file 44 | cluster-keys.json 45 | 46 | # standalone kubeconfig 47 | standalone/kubeconfig 48 | 49 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 France Université Numérique 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubic - Kubernetes Infrastructure as Code 2 | 3 | [![Kubernetes](https://img.shields.io/static/v1?style=for-the-badge&message=Kubernetes&color=326CE5&logo=Kubernetes&logoColor=FFFFFF&label=)](https://kubernetes.io) 4 | [![NGINX](https://img.shields.io/static/v1?style=for-the-badge&message=NGINX&color=009639&logo=NGINX&logoColor=FFFFFF&label=)](https://kubernetes.github.io/ingress-nginx/) 5 | [![ArgoCD](https://img.shields.io/static/v1?style=for-the-badge&message=ArgoCD&color=EF7B4D&logo=Argo&logoColor=FFFFFF&label=)](https://argo-cd.readthedocs.io) 6 | [![Vault](https://img.shields.io/static/v1?style=for-the-badge&message=Vault&color=000000&logo=Vault&logoColor=FFFFFF&label=)](https://www.vaultproject.io) 7 | [![Terraform](https://img.shields.io/static/v1?style=for-the-badge&message=Terraform&color=7B42BC&logo=Terraform&logoColor=FFFFFF&label=)](https://www.terraform.io) 8 | 9 | Available on: 10 | 11 | [![Scaleway](https://img.shields.io/static/v1?style=for-the-badge&message=Scaleway&color=4F0599&logo=Scaleway&logoColor=FFFFFF&label=)](https://www.scaleway.com) 12 | [![OVH](https://img.shields.io/static/v1?style=for-the-badge&message=OVH&color=123F6D&logo=OVH&logoColor=FFFFFF&label=)](https://www.ovh.com) 13 | 14 | ## Overview 15 | 16 | Kubic is a cutting edge, ready for production and multi cloud provider Kubernetes infrastructure as code. It integates an ingress controller, a certificate manager, a monitoring stack, a GitOps tool with complete secret management and a backup tool. 17 | 18 | This Terraform aims at creating a managed k8s cluster setup with : 19 | 20 | - NGINX Ingress Controller 21 | - Cert-manager 22 | - Prometheus / Grafana 23 | - ArgoCD 24 | - Hashicorp Vault if needed 25 | - ArgoCD Vault Plugin if Vault is deployed 26 | - Velero for backuping the cluster 27 | 28 | The cluster can be deployed either on OVHCloud or on Scaleway. New provider can be added by creating a new folder in the root of the repository, and by following the same architecture as the existing providers. 29 | 30 | ## Repository architecture 31 | 32 | ```bash 33 | . 34 | ├── docs # Folder containing the documentation 35 | ├── state_bucket # Folder containing the Terraform to create a S3 bucket for the Terraform state 36 | ├── vault # Folder containing the Terraform to configure Hashicorp Vault 37 | ├── common # Folder containing the Terraform which is common to all the providers 38 | ├── ovh # Folder declaring Terraform to deploy a cluster on OVHCloud 39 | ├── scaleway # Folder declaring Terraform to deploy a cluster on Scaleway 40 | ├── examples # Folder containing examples of applications to deploy with ArgoCD 41 | ├── .gitignore 42 | ├── LICENSE 43 | └── README.md 44 | ``` 45 | 46 | All files contained in the folder `common` are symbolicaly linked in the folders `ovh` and `scaleway` to avoid code duplication. 47 | 48 | ## Getting started 49 | 50 | - Create you cluster: 51 | - [Manual deployment](docs/cluster-manual.md) 52 | - [Automatic deployment](docs/cluster-auto.md) 53 | - [Configure Hashicorp Vault](docs/hashicorp-vault.md) 54 | - [Configure ArgoCD](docs/argocd.md) 55 | - [Configure Velero](docs/velero.md) 56 | - [Standalone use](docs/standalone.md) 57 | 58 | ## Contributing 59 | 60 | Currently, only OVH and Scaleway are supported as providers. Here are the guidelines to add a new provider: 61 | 62 | - Create a new folder in the root of the repository, with the name of the provider; 63 | - Create a symlink for all files in `common` to your new folder; 64 | - Create a `terraform.tf` file containing: 65 | - Terraform configuration with a `s3` backend; 66 | - The `helm`, `kubernetes` and `kubectl` providers along with the provider(s) you need, correctly configured; 67 | - A `kubernetes.tf` file creating the cluster, with an output named `kubeconfig` that contains the actual kubeconfig for the cluster; 68 | - A `ingress-nginx.tf` file, deploying the [ingress-nginx ingress controller](https://kubernetes.github.io/ingress-nginx) and configuring it with an external IP (you may need to create a load balancer on your provider). The ingress IP should be a Terraform output named `ingress_ip`; 69 | - This must also create a `null_resource` named `ingress-nginx` that will `depends_on` on the node pool of your cluster (this is to get a consistent dependency chain for Terraform) 70 | - The controller must have at least the following configuration: 71 | 72 | ```yaml 73 | controller: 74 | metrics: 75 | enabled: true 76 | serviceMonitor: 77 | additionalLabels: 78 | release: prometheus 79 | enabled: true 80 | extraArgs: 81 | enable-ssl-passthrough: true 82 | admissionWebhooks: 83 | timeoutSeconds: 30 84 | ``` 85 | 86 | - Edit the `docker-compose.yaml` and create a service (adapt merely the code) for your provider. 87 | -------------------------------------------------------------------------------- /bin/bootstrap-backend.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Retrieve the directory path from the first argument 4 | directory=$1 5 | 6 | # Check if the directory exists 7 | if [ ! -d "$directory" ]; then 8 | echo "The directory '$directory' does not exist." 9 | exit 1 10 | fi 11 | 12 | # Check if the backend.conf file already exists and ask if the user wants to overwrite it 13 | if [ -e "$directory/backend.conf" ]; then 14 | read -p "Are you sure you want to overwrite your backend.conf? (y/n) " -n 1 -r 15 | echo 16 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 17 | echo "Aborting..." 18 | exit 1 19 | fi 20 | fi 21 | 22 | # Ask for the required values 23 | read -p "Bucket name: " bucket 24 | read -p "Region: " region 25 | read -p "Access Key: " access_key 26 | read -p "Secret Key: " secret_key 27 | read -p "Endpoint: " endpoint 28 | read -p "Skip Region Validation (true/false): " skip_region_validation 29 | read -p "Skip Credentials Validation (true/false): " skip_credentials_validation 30 | 31 | echo "The key used will be terraform.tfstate by default." 32 | read -p "Would you like to use a different key? (y/n): " -n 1 -r 33 | echo 34 | if [[ ! $REPLY =~ ^[Nn]$ ]]; then 35 | read -p "State key: " key2 36 | else 37 | key="terraform.tfstate" 38 | fi 39 | 40 | # Write the backend configurations 41 | echo "bucket = \"$bucket\"" >$directory/backend.conf 42 | 43 | echo "key = \"$key\"" >>$directory/backend.conf 44 | 45 | echo "region = \"$region\"" >>$directory/backend.conf 46 | 47 | echo "access_key = \"$access_key\"" >>$directory/backend.conf 48 | 49 | echo "secret_key = \"$secret_key\"" >>$directory/backend.conf 50 | 51 | echo "endpoint = \"$endpoint\"" >>$directory/backend.conf 52 | 53 | echo "skip_region_validation = $skip_region_validation" >>$directory/backend.conf 54 | 55 | echo "skip_credentials_validation = $skip_credentials_validation" >>$directory/backend.conf 56 | 57 | echo "Congratulations! Your backend configuration has been written down to "$directory"/backend.conf." 58 | -------------------------------------------------------------------------------- /bin/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Define function sed_inplace that works on both GNU and BSD sed 4 | function sed_inplace() { 5 | local file=$2 6 | local command=$1 7 | local tmp_file=$(mktemp) 8 | 9 | sed -e "${command}" "${file}" >"${tmp_file}" 10 | mv "${tmp_file}" "${file}" 11 | } 12 | 13 | # Retrieve the directory path from the first argument 14 | directory=$1 15 | 16 | # Check if the directory exists 17 | if [ ! -d "$directory" ]; then 18 | echo "The directory '$directory' does not exist." 19 | exit 1 20 | fi 21 | 22 | var_files=("$directory/variables.tf" "$directory/variables-common.tf") 23 | tfvars_file="$directory/terraform.tfvars" 24 | 25 | # Ensure docker is installed and running 26 | read -p "Please make sure you have docker installed and running. Press any key to continue..." -n 1 -r -s 27 | echo 28 | 29 | # Create and clean the $all_variables file 30 | all_variables=$(mktemp) 31 | 32 | # Concatenate the contents of the elements of var_files into a single file 33 | for var_file in "${var_files[@]}"; do 34 | if [ -f "$var_file" ]; then 35 | cat "$var_file" >>$all_variables 36 | echo "" >>$all_variables 37 | fi 38 | done 39 | 40 | # Create the tfvars_file of tfvars_files from their templates 41 | if [ ! -f "${tfvars_file}.template" ]; then 42 | echo "The file '${tfvars_file}.template' does not exist." 43 | fi 44 | if [ -f "$tfvars_file" ]; then 45 | read -p "Are you sure you want to overwrite $tfvars_file? (y/n) " -n 1 -r 46 | echo 47 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 48 | echo "Aborting..." 49 | exit 0 50 | fi 51 | fi 52 | cat "${tfvars_file}.template" >"$tfvars_file" 53 | 54 | # Retrieving the names of variables declared in the file 55 | varnames=$(grep "^variable" $all_variables | sed 's/^.*variable "\(.*\)".*$/\1/p' | awk '!a[$0]++') 56 | 57 | # Creation of a temporary file that will contain the sorted and unique variables 58 | tmp_file=$(mktemp) 59 | while read -r varname; do 60 | sed -n "/variable \"$varname\"/,/^}/p" $all_variables | sed -n '1,/^}/p' >>$tmp_file 61 | echo "" >>$tmp_file 62 | done <<<"$varnames" 63 | 64 | # Delete duplicates 65 | cat $tmp_file >$all_variables 66 | # Delete the temporary file 67 | rm $tmp_file 68 | 69 | # Parse the $all_variables file and extract the variable names and descriptions 70 | mapfile -t variable_names < <(sed -n 's/^.*variable "\(.*\)".*$/\1/p' $all_variables) 71 | mapfile -t variable_descs < <(sed -n 's/^.*description *= *"\(.*\)".*$/\1/p' $all_variables) 72 | mapfile -t variable_types < <(sed -n 's/^.*type *= *\(.*\)$/\1/p' $all_variables) 73 | 74 | # Loop through each variable and prompt the user for a value 75 | for i in "${!variable_names[@]}"; do 76 | var_name=${variable_names[i]} 77 | var_desc=${variable_descs[i]} 78 | var_type=${variable_types[i]} 79 | 80 | # Extract the default value for the current variable (if available) 81 | var_default=$(sed -n "/variable \"$var_name\"/,/^}/p" $all_variables | grep "default" | sed -E 's/^.*default *= *"?([^"]*)"?.*$/\1/') 82 | 83 | # If the var_name doesn't exist in the .tfvars file, continue to the next loop iteration 84 | if ! $(grep -q "$var_name" $tfvars_file); then 85 | continue 86 | fi 87 | 88 | # Check if the variable has a default value 89 | if [ -n "$var_default" ]; then 90 | default_variables+=("$var_name") 91 | default_descs+=("$var_desc") 92 | default_defaults+=("$var_default") 93 | default_types+=("$var_type") 94 | else 95 | non_default_variables+=("$var_name") 96 | non_default_descs+=("$var_desc") 97 | non_default_types+=("$var_type") 98 | fi 99 | done 100 | 101 | # Loop through variables with no default values first 102 | for i in "${!non_default_variables[@]}"; do 103 | var_name=${non_default_variables[i]} 104 | var_desc=${non_default_descs[i]} 105 | var_type=${non_default_types[i]} 106 | 107 | if [ "$var_name" == "argocd_password" ]; then 108 | read -p "ArgoCD password : " argocd_password 109 | while [ -z "$argocd_password" ]; do 110 | read -p "You have to specify a value for \"argocd_password\": " argocd_password 111 | done 112 | DOCKER_USER="$(id -u):$(id -g)" \ 113 | argocd_password_hashed="$(docker-compose run --rm argocd-cli argocd account bcrypt --password $argocd_password)" 114 | 115 | # Store the variable name and value in the $tfvars_file file if the variable is declared in the file 116 | if $(grep -q "argocd_password" "$tfvars_file"); then 117 | sed_inplace "s%^argocd_password *= *\".*\"%argocd_password=\"$argocd_password_hashed\"%" "$tfvars_file" 118 | fi 119 | 120 | else 121 | # Add (true/false) to the description if the variable is a boolean 122 | if [ "$var_type" == "bool" ]; then 123 | var_desc="$var_desc (true/false)" 124 | fi 125 | 126 | # Prompt the user for a value 127 | read -p "$var_desc: " var_value 128 | 129 | # if the variable is a boolean and if the user imput is not true or false, prompt again 130 | if [ "$var_type" == "bool" ]; then 131 | while [ "$var_value" != "true" ] && [ "$var_value" != "false" ]; do 132 | read -p "Your value has to be true or false: " var_value 133 | done 134 | fi 135 | 136 | # If the user entered nothing, prompt again 137 | while [ -z "$var_value" ]; do 138 | read -p "You have to specify a value for \"$var_name\": " var_value 139 | done 140 | 141 | # Store the variable name and value in the $tfvars_file file if the variable is declared in the file 142 | if $(grep -q "$var_name" "$tfvars_file"); then 143 | if [ "$var_type" == "string" ]; then 144 | sed_inplace "s%^$var_name *= *\".*\"%$var_name=\"$var_value\"%" "$tfvars_file" 145 | else 146 | sed_inplace "s%^$var_name *= *.*%$var_name=$var_value%" "$tfvars_file" 147 | fi 148 | fi 149 | 150 | fi 151 | done 152 | 153 | # Loop through variables with default values next 154 | for i in "${!default_variables[@]}"; do 155 | var_name=${default_variables[i]} 156 | var_desc=${default_descs[i]} 157 | var_default=${default_defaults[i]} 158 | var_type=${default_types[i]} 159 | 160 | if [ "$var_name" == "issuers" ]; then 161 | read -p "Let's encrypt email (default \"admin@admin.com\", leave blank): " letsencrypt_email 162 | if [ -z "$letsencrypt_email" ]; then 163 | letsencrypt_email="admin@admin.com" 164 | fi 165 | sed_inplace "s%^ email *= *\".*\"% email=\"$letsencrypt_email\"%" "$tfvars_file" 166 | continue 167 | fi 168 | 169 | # Add (true/false) to the description if the variable is a boolean 170 | if [ "$var_type" == "bool" ]; then 171 | var_desc="$var_desc (true/false)" 172 | fi 173 | 174 | # Display the default value to the user 175 | read -p "$var_desc (default \"$var_default\", leave blank): " var_value 176 | 177 | # if the variable is a boolean and if the user imput is not true or false or nothing, prompt again 178 | if [ "$var_type" == "bool" ]; then 179 | while [ "$var_value" != "true" ] && [ "$var_value" != "false" ] && [ -n "$var_value" ]; do 180 | read -p "Your value has to be true or false, leave blank for the default value: " var_value 181 | done 182 | fi 183 | 184 | # If the user entered nothing, use the default value 185 | if [ -z "$var_value" ]; then 186 | var_value="$var_default" 187 | fi 188 | 189 | # Store the variable name and value in the $tfvars_file file if the variable is declared in the file 190 | if $(grep -q "$var_name" "$tfvars_file"); then 191 | if [ "$var_type" == "string" ]; then 192 | sed_inplace "s%^$var_name *= *\".*\"%$var_name=\"$var_value\"%" "$tfvars_file" 193 | else 194 | sed_inplace "s%^$var_name *= *.*%$var_name=$var_value%" "$tfvars_file" 195 | fi 196 | fi 197 | done 198 | 199 | # Ensure suppression of the temporary files if the script ends successfully 200 | echo "Congratulations! Your terraform.tfvars file has been written down to $tfvars_file." 201 | 202 | trap 'rm -f $all_variables' EXIT 203 | -------------------------------------------------------------------------------- /bin/get-kube-config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | # Retrieve the directory path from the first argument 5 | directory=$1 6 | 7 | # Check if the directory exists 8 | if [ ! -d "$directory" ]; then 9 | echo "The directory '$directory' does not exist." 10 | exit 1 11 | fi 12 | 13 | DOCKER_USER="$(id -u):$(id -g)" \ 14 | docker-compose run --rm tf-$directory output -raw kubeconfig 15 | -------------------------------------------------------------------------------- /bin/init-bucket.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | echo "Initializing terraform..." 5 | # Launch terraform init 6 | DOCKER_USER="$(id -u):$(id -g)" \ 7 | docker-compose run --rm tf-bucket-ovh init -input=false -reconfigure 8 | 9 | # Set AWS credentials to dummy values if not already defined (needed by OVH provider) 10 | 11 | if [ -z "$AWS_ACCESS_KEY_ID" ]; then 12 | export AWS_ACCESS_KEY_ID="no_need_to_define_an_access_key" 13 | fi 14 | if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then 15 | export AWS_SECRET_ACCESS_KEY="no_need_to_define_a_secret_key" 16 | fi 17 | 18 | echo "Planning bucket creation and configuration..." 19 | # Launch terraform plan 20 | DOCKER_USER="$(id -u):$(id -g)" \ 21 | docker-compose run --rm tf-bucket-ovh plan -out=tfplan 22 | 23 | echo "Applying the plan..." 24 | # Launch terraform apply 25 | DOCKER_USER="$(id -u):$(id -g)" \ 26 | docker-compose run --rm tf-bucket-ovh apply -input=false tfplan 27 | 28 | echo "Bucket created, retrieving credentials..." 29 | 30 | # Give access key, secret key and bucket name 31 | bucket_name=$(DOCKER_USER="$(id -u):$(id -g)" \ 32 | docker-compose run --rm tf-bucket-ovh output -raw bucket_name) 33 | 34 | access_key=$(DOCKER_USER="$(id -u):$(id -g)" \ 35 | docker-compose run --rm tf-bucket-ovh output -raw access_key) 36 | 37 | secret_key=$(DOCKER_USER="$(id -u):$(id -g)" \ 38 | docker-compose run --rm tf-bucket-ovh output -raw secret_key) 39 | 40 | echo "Here are your credentials:" 41 | echo " - Bucket name: $bucket_name" 42 | echo " - Access key: $access_key" 43 | echo " - Secret key: $secret_key" 44 | 45 | echo "You can now use these credentials to configure your S3 client." 46 | 47 | # Unset AWS credentials if they were not defined before 48 | if [ "$AWS_ACCESS_KEY_ID" = "no_need_to_define_an_access_key" ]; then 49 | unset AWS_ACCESS_KEY_ID 50 | fi 51 | if [ "$AWS_SECRET_ACCESS_KEY" = "no_need_to_define_a_secret_key" ]; then 52 | unset AWS_SECRET_ACCESS_KEY 53 | fi 54 | -------------------------------------------------------------------------------- /bin/terraform-apply.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | # Retrieve the directory path from the first argument 5 | directory=$1 6 | 7 | # Check if the directory exists 8 | if [ ! -d "$directory" ]; then 9 | echo "The directory '$directory' does not exist." 10 | exit 1 11 | fi 12 | 13 | # Ensure tfplan file exists 14 | if [ ! -f "$directory/tfplan" ]; then 15 | echo "The tfplan file does not exist. Please run terraform plan first, with the following command: 'bin/terraform-plan.sh "$directory"'." 16 | exit 1 17 | fi 18 | 19 | DOCKER_USER="$(id -u):$(id -g)" \ 20 | docker-compose run --rm tf-$directory apply -input=false -auto-approve "/app/$directory/tfplan" 21 | 22 | ingress_ip=$(DOCKER_USER="$(id -u):$(id -g)" \ 23 | docker-compose run --rm tf-$directory output -raw ingress_ip) 24 | 25 | echo "" 26 | echo "Your ingress is now running and available at $ingress_ip, please update your DNS accordingly, at least for the following domains:" 27 | for domain in $(grep -Eo '^[^=]+hostname[^=]*=([^=]*)' $directory/terraform.tfvars | cut -d '"' -f 2); do 28 | echo " - $domain" 29 | done 30 | echo "" 31 | echo "You may now get your kubeconfig file using the following command : 'bin/get-kube-config.sh "$directory"'." 32 | -------------------------------------------------------------------------------- /bin/terraform-destroy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | # Retrieve the directory path from the first argument 5 | directory=$1 6 | 7 | # Check if the directory exists 8 | if [ ! -d "$directory" ]; then 9 | echo "The directory '$directory' does not exist." 10 | exit 1 11 | fi 12 | 13 | DOCKER_USER="$(id -u):$(id -g)" \ 14 | docker-compose run --rm tf-$directory destroy -input=false -auto-approve 15 | -------------------------------------------------------------------------------- /bin/terraform-init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | # Retrieve the directory path from the first argument 5 | directory=$1 6 | 7 | # Check if the directory exists 8 | if [ ! -d "$directory" ]; then 9 | echo "The directory '$directory' does not exist." 10 | exit 1 11 | fi 12 | 13 | DOCKER_USER="$(id -u):$(id -g)" \ 14 | docker-compose run --rm tf-$directory init -backend-config=backend.conf -reconfigure 15 | -------------------------------------------------------------------------------- /bin/terraform-plan.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | # Retrieve the directory path from the first argument 5 | directory=$1 6 | 7 | # Check if the directory exists 8 | if [ ! -d "$directory" ]; then 9 | echo "The directory '$directory' does not exist." 10 | exit 1 11 | fi 12 | 13 | DOCKER_USER="$(id -u):$(id -g)" \ 14 | docker-compose run --rm tf-$directory plan -out=/app/$directory/tfplan -input=false 15 | -------------------------------------------------------------------------------- /common/Schedule_template.yml: -------------------------------------------------------------------------------- 1 | apiVersion: velero.io/v1 2 | kind: Schedule 3 | metadata: 4 | name: daily-snapshot # ENTER NAME HERE 5 | namespace: velero 6 | spec: 7 | schedule: '0 0 * * *' # Every day at 00:00 am 8 | template: 9 | defaultVolumesToRestic: false 10 | 11 | includedNamespaces: 12 | - hashicorp-vault # SET NAMESPACE HERE 13 | 14 | ttl: 168h0m0s # Keep the backup 7 days 15 | storageLocation: default 16 | -------------------------------------------------------------------------------- /common/argocd-apps-values.yaml.tftpl: -------------------------------------------------------------------------------- 1 | --- 2 | applicationsets: 3 | # See https://github.com/argoproj/argo-helm/commit/237493a4ab1478a0c33cb1253767f65ce4ec007c 4 | %{ if tonumber(split(".", argocd_apps_version).0) < 2 }- name: apps%{ else }apps:%{ endif } 5 | namespace: argocd 6 | generators: 7 | - git: 8 | repoURL: ${repo_url} 9 | revision: HEAD 10 | files: 11 | - path: "apps/**/*.json" 12 | goTemplate: true 13 | template: 14 | metadata: 15 | name: '{{ index .path.segments 1 }}{{ if ne (index .path.segments 1) (index (splitList "." .path.filename) 0) }}-{{ index (splitList "." .path.filename) 0 }}{{ end }}' 16 | spec: 17 | project: default 18 | source: 19 | repoURL: '{{ default "${repo_url}" .externalRepoURL }}' 20 | targetRevision: HEAD 21 | path: "helm/{{ index .path.segments 1 }}" 22 | plugin: 23 | name: avp-helm 24 | env: 25 | - name: HELM_ARGS 26 | value: "{{ range $i, $value := .valuesFiles }}{{ if $i }} {{ end }}-f ../../apps/{{ index $.path.segments 1 }}/{{ $value }}{{ end }}" 27 | - name: AVP_SECRET 28 | value: '{{ default "default-vault-credentials" .vaultCredentials }}' 29 | destination: 30 | server: https://kubernetes.default.svc 31 | namespace: '{{ index .path.segments 1 }}{{ if ne (index .path.segments 1) (index (splitList "." .path.filename) 0) }}-{{ index (splitList "." .path.filename) 0 }}{{ end }}' 32 | syncPolicy: 33 | automated: 34 | prune: true 35 | selfHeal: false 36 | syncOptions: 37 | - CreateNamespace=true 38 | syncPolicy: 39 | preserveResourcesOnDeletion: false 40 | -------------------------------------------------------------------------------- /common/argocd-values.yaml.tftpl: -------------------------------------------------------------------------------- 1 | --- 2 | crds: 3 | keep: false 4 | 5 | server: 6 | ingress: 7 | enabled: true 8 | https: true 9 | annotations: 10 | cert-manager.io/cluster-issuer: ${cluster_issuer_name} 11 | nginx.ingress.kubernetes.io/ssl-passthrough: true 12 | nginx.ingress.kubernetes.io/backend-protocol: HTTPS 13 | ingressClassName: nginx 14 | hosts: 15 | - ${host_name} 16 | tls: 17 | - hosts: 18 | - ${host_name} 19 | secretName: argocd-secret # do not change, this is provided by Argo CD 20 | 21 | %{ if install_hashicorp_vault } 22 | repoServer: 23 | # Each of the embedded YAMLs inside argocd-cmp-cm ConfigMap will be mounted into it's respective plugin sidecar 24 | volumes: 25 | - configMap: 26 | name: argocd-cmp-cm 27 | name: argocd-cmp-cm 28 | - name: custom-tools 29 | emptyDir: {} 30 | 31 | # Download tools 32 | initContainers: 33 | - name: download-tools 34 | image: registry.access.redhat.com/ubi8 35 | env: 36 | - name: AVP_VERSION 37 | value: ${avp_version} 38 | command: [sh, -c] 39 | args: 40 | - >- 41 | curl -L https://github.com/argoproj-labs/argocd-vault-plugin/releases/download/v$(AVP_VERSION)/argocd-vault-plugin_$(AVP_VERSION)_linux_amd64 -o argocd-vault-plugin && 42 | chmod +x argocd-vault-plugin && 43 | mv argocd-vault-plugin /custom-tools/ 44 | 45 | volumeMounts: 46 | - mountPath: /custom-tools 47 | name: custom-tools 48 | 49 | extraContainers: 50 | # argocd-vault-plugin with plain YAML 51 | - name: avp 52 | command: [/var/run/argocd/argocd-cmp-server] 53 | image: quay.io/argoproj/argocd:{{ .Chart.AppVersion }} 54 | securityContext: 55 | runAsNonRoot: true 56 | runAsUser: 999 57 | volumeMounts: 58 | - mountPath: /var/run/argocd 59 | name: var-files 60 | - mountPath: /home/argocd/cmp-server/plugins 61 | name: plugins 62 | - mountPath: /tmp 63 | name: tmp 64 | 65 | # Register plugins into sidecar 66 | - mountPath: /home/argocd/cmp-server/config/plugin.yaml 67 | subPath: avp.yaml 68 | name: argocd-cmp-cm 69 | 70 | # Important: Mount tools into $PATH 71 | - name: custom-tools 72 | subPath: argocd-vault-plugin 73 | mountPath: /usr/local/bin/argocd-vault-plugin 74 | 75 | # argocd-vault-plugin with Helm 76 | - name: avp-helm 77 | command: [/var/run/argocd/argocd-cmp-server] 78 | image: quay.io/argoproj/argocd:{{ .Chart.AppVersion }} 79 | securityContext: 80 | runAsNonRoot: true 81 | runAsUser: 999 82 | volumeMounts: 83 | - mountPath: /var/run/argocd 84 | name: var-files 85 | - mountPath: /home/argocd/cmp-server/plugins 86 | name: plugins 87 | - mountPath: /tmp 88 | name: tmp 89 | 90 | # Register plugins into sidecar 91 | - mountPath: /home/argocd/cmp-server/config/plugin.yaml 92 | subPath: avp-helm.yaml 93 | name: argocd-cmp-cm 94 | 95 | # Important: Mount tools into $PATH 96 | - name: custom-tools 97 | subPath: argocd-vault-plugin 98 | mountPath: /usr/local/bin/argocd-vault-plugin 99 | 100 | # argocd-vault-plugin with Kustomize 101 | - name: avp-kustomize 102 | command: [/var/run/argocd/argocd-cmp-server] 103 | image: quay.io/argoproj/argocd:{{ .Chart.AppVersion }} 104 | securityContext: 105 | runAsNonRoot: true 106 | runAsUser: 999 107 | volumeMounts: 108 | - mountPath: /var/run/argocd 109 | name: var-files 110 | - mountPath: /home/argocd/cmp-server/plugins 111 | name: plugins 112 | - mountPath: /tmp 113 | name: tmp 114 | 115 | # Register plugins into sidecar 116 | - mountPath: /home/argocd/cmp-server/config/plugin.yaml 117 | subPath: avp-kustomize.yaml 118 | name: argocd-cmp-cm 119 | 120 | # Important: Mount tools into $PATH 121 | - name: custom-tools 122 | subPath: argocd-vault-plugin 123 | mountPath: /usr/local/bin/argocd-vault-plugin 124 | %{ endif } 125 | 126 | configs: 127 | secret: 128 | argocdServerAdminPassword: ${password} 129 | repositories: 130 | applications-repo: 131 | name: applications-repo 132 | type: git 133 | url: ${repo_url} 134 | username: ${repo_username} 135 | password: ${repo_password} 136 | %{ if install_hashicorp_vault } 137 | cmp: 138 | create: true 139 | plugins: 140 | avp: 141 | allowConcurrency: true 142 | discover: 143 | find: 144 | command: 145 | - sh 146 | - "-c" 147 | - "find . -name '*.yaml' | xargs -I {} grep \" issuer } 19 | provider = kubectl 20 | yaml_body = templatefile("issuer.yml.tftpl", { 21 | name = each.value.name 22 | email = each.value.email 23 | server = each.value.server 24 | private_key_secret_name = each.value.private_key_secret_name 25 | }) 26 | 27 | depends_on = [ 28 | helm_release.cert_manager 29 | ] 30 | } -------------------------------------------------------------------------------- /common/hashicorp-vault.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_namespace" "hashicorp-vault" { 2 | count = (var.install_hashicorp_vault) ? 1 : 0 3 | metadata { 4 | name = "hashicorp-vault" 5 | } 6 | 7 | depends_on = [ 8 | null_resource.ingress-nginx 9 | ] 10 | } 11 | 12 | resource "helm_release" "hashicorp-vault" { 13 | count = (var.install_hashicorp_vault) ? 1 : 0 14 | name = "hashicorp-vault" 15 | namespace = "hashicorp-vault" 16 | 17 | repository = "https://helm.releases.hashicorp.com" 18 | chart = "vault" 19 | version = var.vault_version 20 | 21 | values = [templatefile("${path.module}/vault-values.yml", { 22 | kubernetes_secret_name_tls_ca = kubernetes_secret.tls_ca.metadata.0.name 23 | kubernetes_secret_name_tls_cert = kubernetes_secret.tls.metadata.0.name 24 | kubernetes_vault_ui_service_type = var.kubernetes_vault_ui_service_type 25 | 26 | vault_data_storage_size = var.vault_data_storage_size 27 | vault_leader_tls_servername = var.vault_leader_tls_servername 28 | vault_seal_method = var.vault_seal_method 29 | vault_ui = var.vault_ui 30 | 31 | cluster_issuer_name = var.main_cluster_issuer_name 32 | vault_server_hostname = var.vault_server_hostname 33 | enable_vault_server_ingress = var.vault_server_hostname != "" ? true : false 34 | })] 35 | 36 | depends_on = [ 37 | kubernetes_namespace.hashicorp-vault 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /common/issuer.yml.tftpl: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: ${name} 5 | spec: 6 | acme: 7 | email: ${email} 8 | server: ${server} 9 | privateKeySecretRef: 10 | name: ${private_key_secret_name} 11 | solvers: 12 | - http01: 13 | ingress: 14 | class: nginx -------------------------------------------------------------------------------- /common/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | generate_tls_certs = (var.install_hashicorp_vault && (var.vault_api_ca_bundle == null || var.vault_api_signed_certificate == null || var.vault_api_private_key == null)) ? true : false 3 | } 4 | -------------------------------------------------------------------------------- /common/loki-values.yml: -------------------------------------------------------------------------------- 1 | loki: 2 | auth_enabled: false 3 | storage: 4 | bucketNames: 5 | chunks: ${loki_s3_chunks_bucket_name} 6 | ruler: ${loki_s3_ruler_bucket_name} 7 | admin: ${loki_s3_admin_bucket_name} 8 | type: s3 9 | s3: 10 | endpoint: ${loki_s3_bucket_endpoint} 11 | region: ${loki_s3_bucket_region} 12 | secretAccessKey: ${loki_s3_secret_access_key} 13 | accessKeyId: ${loki_s3_access_key_id} 14 | s3ForcePathStyle: true 15 | -------------------------------------------------------------------------------- /common/loki.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "loki" { 2 | name = "loki" 3 | namespace = "loki" 4 | create_namespace = true 5 | count = var.loki_enabled ? 1 : 0 6 | 7 | repository = "https://grafana.github.io/helm-charts" 8 | chart = "loki" 9 | version = var.loki_version 10 | 11 | values = [templatefile("${path.module}/loki-values.yml", { 12 | loki_s3_chunks_bucket_name = var.loki_s3_chunks_bucket_name 13 | loki_s3_ruler_bucket_name = var.loki_s3_ruler_bucket_name 14 | loki_s3_admin_bucket_name = var.loki_s3_admin_bucket_name 15 | loki_s3_bucket_region = var.loki_s3_bucket_region 16 | loki_s3_bucket_endpoint = var.loki_s3_bucket_endpoint 17 | loki_s3_access_key_id = var.loki_s3_access_key_id 18 | loki_s3_secret_access_key = var.loki_s3_secret_access_key 19 | })] 20 | } 21 | -------------------------------------------------------------------------------- /common/prom-grafana-values.yml: -------------------------------------------------------------------------------- 1 | grafana: 2 | ingress: 3 | enabled: true 4 | annotations: 5 | kubernetes.io/ingress.class: nginx 6 | cert-manager.io/cluster-issuer: ${issuer} 7 | labels: {} 8 | path: / 9 | pathType: Prefix 10 | 11 | hosts: 12 | - ${hostname} 13 | tls: 14 | - secretName: ${hostname} 15 | hosts: 16 | - ${hostname} 17 | adminPassword: ${grafana_admin_password} 18 | datasources: 19 | datasources.yaml: 20 | apiVersion: 1 21 | datasources: 22 | - name: Loki 23 | type: loki 24 | url: http://loki-gateway.loki.svc.cluster.local 25 | persistence: 26 | enabled: ${grafana_persistence_enabled} 27 | persistence: 28 | size: ${grafana_persistence_size} 29 | 30 | dashboardProviders: 31 | dashboardproviders.yaml: 32 | apiVersion: 1 33 | providers: 34 | - name: "k8s" 35 | orgId: 1 36 | options: 37 | path: /var/lib/grafana/dashboards/k8s 38 | dashboards: 39 | k8s: 40 | nginx: 41 | url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json 42 | datasource: Prometheus 43 | request-handling-performance: 44 | url: https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/request-handling-performance.json 45 | datasource: Prometheus 46 | k8s-system-api-server: 47 | url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-api-server.json 48 | datasource: Prometheus 49 | k8s-system-coredns: 50 | url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-system-coredns.json 51 | datasource: Prometheus 52 | k8s-views-global: 53 | url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-global.json 54 | datasource: Prometheus 55 | k8s-views-namespaces: 56 | url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json 57 | datasource: Prometheus 58 | k8s-views-nodes: 59 | url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-nodes.json 60 | datasource: Prometheus 61 | k8s-views-pods: 62 | url: https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json 63 | datasource: Prometheus 64 | grafana.ini: 65 | dashboards: 66 | default_home_dashboard_path: /var/lib/grafana/dashboards/k8s/k8s-views-global.json 67 | prometheus: 68 | prometheusSpec: 69 | %{ if prometheus_persistence_enabled } 70 | storageSpec: 71 | volumeClaimTemplate: 72 | spec: 73 | accessModes: ["ReadWriteOnce"] 74 | resources: 75 | requests: 76 | storage: ${prometheus_persistence_size} 77 | %{ endif } 78 | podMonitorSelectorNilUsesHelmValues: false 79 | serviceMonitorSelectorNilUsesHelmValues: false 80 | -------------------------------------------------------------------------------- /common/prometheus-grafana.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "kube-prometheus" { 2 | name = "kube-prometheus-stack" 3 | namespace = "prometheus" 4 | create_namespace = true 5 | 6 | repository = "https://prometheus-community.github.io/helm-charts" 7 | chart = "kube-prometheus-stack" 8 | 9 | values = [templatefile("${path.module}/prom-grafana-values.yml", { 10 | hostname = var.grafana_hostname 11 | issuer = var.main_cluster_issuer_name 12 | grafana_admin_password = var.grafana_admin_password 13 | grafana_persistence_enabled = var.grafana_persistence_enabled 14 | grafana_persistence_size = var.grafana_persistence_size 15 | prometheus_persistence_enabled = var.prometheus_persistence_enabled 16 | prometheus_persistence_size = var.prometheus_persistence_size 17 | })] 18 | } 19 | -------------------------------------------------------------------------------- /common/promtail-values.yml: -------------------------------------------------------------------------------- 1 | serviceMonitor: 2 | enabled: true 3 | config: 4 | clients: 5 | - url: http://loki-gateway.loki.svc.cluster.local/loki/api/v1/push 6 | -------------------------------------------------------------------------------- /common/promtail.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "promtail" { 2 | name = "promtail" 3 | namespace = "promtail" 4 | create_namespace = true 5 | 6 | repository = "https://grafana.github.io/helm-charts" 7 | chart = "promtail" 8 | version = var.promtail_version 9 | 10 | values = [templatefile("${path.module}/promtail-values.yml", {})] 11 | } 12 | -------------------------------------------------------------------------------- /common/tls-vault.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------ 2 | # Certificate Authority 3 | #------------------------------------------------------------------------------ 4 | resource "tls_private_key" "ca" { 5 | count = local.generate_tls_certs ? 1 : 0 6 | 7 | algorithm = "RSA" 8 | ecdsa_curve = "P384" 9 | rsa_bits = "2048" 10 | } 11 | 12 | resource "tls_self_signed_cert" "ca" { 13 | count = local.generate_tls_certs ? 1 : 0 14 | 15 | #key_algorithm = tls_private_key.ca[0].algorithm 16 | private_key_pem = tls_private_key.ca[0].private_key_pem 17 | is_ca_certificate = true 18 | validity_period_hours = "168" 19 | 20 | allowed_uses = [ 21 | "cert_signing", 22 | "key_encipherment", 23 | "digital_signature" 24 | ] 25 | 26 | subject { 27 | organization = "HashiCorp (NonTrusted)" 28 | common_name = "HashiCorp (NonTrusted) Private Certificate Authority" 29 | country = "CA" 30 | } 31 | } 32 | 33 | #------------------------------------------------------------------------------ 34 | # Certificate 35 | #------------------------------------------------------------------------------ 36 | resource "tls_private_key" "vault_private_key" { 37 | count = local.generate_tls_certs ? 1 : 0 38 | 39 | algorithm = "RSA" 40 | ecdsa_curve = "P384" 41 | rsa_bits = "2048" 42 | } 43 | 44 | resource "tls_cert_request" "vault_cert_request" { 45 | count = local.generate_tls_certs ? 1 : 0 46 | 47 | #key_algorithm = tls_private_key.vault_private_key[0].algorithm 48 | private_key_pem = tls_private_key.vault_private_key[0].private_key_pem 49 | 50 | dns_names = [for i in range(3) : format("hashicorp-vault-%s.hashicorp-vault-internal", i)] 51 | 52 | subject { 53 | common_name = "HashiCorp Vault Certificate" 54 | organization = "HashiCorp Vault Certificate" 55 | } 56 | } 57 | 58 | resource "tls_locally_signed_cert" "vault_signed_certificate" { 59 | count = local.generate_tls_certs ? 1 : 0 60 | 61 | cert_request_pem = tls_cert_request.vault_cert_request[0].cert_request_pem 62 | #ca_key_algorithm = tls_private_key.ca[0].algorithm 63 | ca_private_key_pem = tls_private_key.ca[0].private_key_pem 64 | ca_cert_pem = tls_self_signed_cert.ca[0].cert_pem 65 | 66 | validity_period_hours = "168" 67 | 68 | allowed_uses = [ 69 | "key_encipherment", 70 | "digital_signature", 71 | ] 72 | } 73 | 74 | resource "kubernetes_secret" "tls" { 75 | metadata { 76 | name = "tls" 77 | namespace = "hashicorp-vault" 78 | } 79 | 80 | data = { 81 | "tls.crt" = local.generate_tls_certs ? tls_locally_signed_cert.vault_signed_certificate[0].cert_pem : var.vault_api_signed_certificate 82 | "tls.key" = local.generate_tls_certs ? tls_private_key.vault_private_key[0].private_key_pem : var.vault_api_private_key 83 | } 84 | 85 | type = "kubernetes.io/tls" 86 | 87 | depends_on = [ 88 | kubernetes_namespace.hashicorp-vault 89 | ] 90 | } 91 | 92 | resource "kubernetes_secret" "tls_ca" { 93 | metadata { 94 | name = "tls-ca" 95 | namespace = "hashicorp-vault" 96 | } 97 | 98 | data = { 99 | "ca.crt" = local.generate_tls_certs ? tls_self_signed_cert.ca[0].cert_pem : var.vault_api_ca_bundle 100 | } 101 | 102 | depends_on = [ 103 | kubernetes_namespace.hashicorp-vault 104 | ] 105 | } 106 | -------------------------------------------------------------------------------- /common/variables.tf: -------------------------------------------------------------------------------- 1 | variable "argocd_hostname" { 2 | type = string 3 | description = "The hostname to use for the ArgoCD ingress" 4 | } 5 | 6 | variable "argocd_password" { 7 | type = string 8 | description = "ArgoCD password hash, can be defined with `argocd account bcrypt --password change_me` after installing ArgoCD CLI" 9 | } 10 | 11 | variable "argocd_repo_url" { 12 | type = string 13 | description = "ArgoCD applications repo URL" 14 | } 15 | 16 | variable "argocd_repo_username" { 17 | type = string 18 | description = "ArgoCD applications repo username" 19 | } 20 | 21 | variable "argocd_repo_password" { 22 | type = string 23 | description = "ArgoCD applications repo password" 24 | } 25 | 26 | variable "argocd_version" { 27 | type = string 28 | description = "The version of ArgoCD helm release to install" 29 | default = "5.33.1" 30 | } 31 | 32 | variable "argocd_apps_version" { 33 | type = string 34 | description = "ArgoCD apps version" 35 | default = "1.6.2" 36 | } 37 | 38 | variable "argocd_avp_version" { 39 | type = string 40 | description = "ArgoCD argo-vault-plugin version" 41 | default = "1.14.0" 42 | } 43 | 44 | variable "main_cluster_issuer_name" { 45 | type = string 46 | description = "Name of the clusterIssuer" 47 | default = "letsencrypt-prod" 48 | } 49 | 50 | variable "issuers" { 51 | type = list(object({ 52 | name = string 53 | email = string 54 | server = string 55 | private_key_secret_name = string 56 | })) 57 | description = "List of issuers to create" 58 | default = [ 59 | { 60 | name = "letsencrypt-prod" 61 | server = "https://acme-v02.api.letsencrypt.org/directory" 62 | email = "admin@admin.fr", 63 | private_key_secret_name = "letsencrypt-prod" 64 | }, { 65 | name = "letsencrypt-staging" 66 | server = "https://acme-staging-v02.api.letsencrypt.org/directory" 67 | email = "admin@admin.fr" 68 | private_key_secret_name = "letsencrypt-staging" 69 | } 70 | ] 71 | } 72 | 73 | variable "grafana_hostname" { 74 | type = string 75 | description = "The hostname to use for the Grafana ingress" 76 | } 77 | 78 | variable "grafana_admin_password" { 79 | type = string 80 | description = "The password of the Grafana UI" 81 | sensitive = true 82 | } 83 | 84 | variable "grafana_persistence_enabled" { 85 | type = bool 86 | description = "Enable Grafana persistence" 87 | default = false 88 | } 89 | 90 | variable "grafana_persistence_size" { 91 | type = string 92 | description = "Grafana persistence size" 93 | default = "10Gi" 94 | } 95 | 96 | variable "prometheus_persistence_enabled" { 97 | type = bool 98 | description = "Enable Prometheus persistence" 99 | default = false 100 | } 101 | 102 | variable "prometheus_persistence_size" { 103 | type = string 104 | description = "Prometheus persistence size" 105 | default = "20Gi" 106 | } 107 | 108 | variable "vault_server_hostname" { 109 | type = string 110 | description = "The hostname to use for the Vault server ingress" 111 | default = "" 112 | } 113 | 114 | variable "install_hashicorp_vault" { 115 | type = bool 116 | description = "Install Hashicorp Vault" 117 | } 118 | 119 | variable "vault_leader_tls_servername" { 120 | type = string 121 | description = "The servername to use for the TLS certificate" 122 | default = null 123 | } 124 | 125 | variable "vault_data_storage_size" { 126 | type = string 127 | description = "The size, in Gi, of the data storage volume" 128 | default = "10" 129 | } 130 | 131 | variable "vault_api_signed_certificate" { 132 | type = string 133 | description = "The signed certificate secret in Secrets Manager (not the filename)" 134 | default = null 135 | sensitive = true 136 | } 137 | 138 | variable "vault_api_private_key" { 139 | type = string 140 | description = "The certificate private key secret in Secrets Manager (not the filename)" 141 | default = null 142 | sensitive = true 143 | } 144 | 145 | variable "vault_api_ca_bundle" { 146 | type = string 147 | description = "The CA bundle secret in Secrets Manager (not the filename)" 148 | default = null 149 | sensitive = true 150 | } 151 | 152 | variable "vault_ui" { 153 | type = bool 154 | description = "Enable the Vault UI" 155 | default = true 156 | } 157 | 158 | variable "kubernetes_vault_ui_service_type" { 159 | type = string 160 | description = "The Kubernetes service type to use for the Vault UI" 161 | default = "ClusterIP" 162 | } 163 | 164 | variable "vault_seal_method" { 165 | type = string 166 | description = "The Vault seal method to use" 167 | default = "shamir" 168 | } 169 | 170 | variable "vault_version" { 171 | type = string 172 | description = "The version of Hashicorp vault helm release to install" 173 | default = "0.24.1" 174 | } 175 | 176 | variable "velero_version" { 177 | type = string 178 | description = "The version of Velero to install" 179 | default = "4.0.2" 180 | } 181 | 182 | variable "velero_s3_bucket_name" { 183 | type = string 184 | description = "The name of the S3 bucket to use for Velero backups" 185 | } 186 | 187 | variable "velero_s3_bucket_region" { 188 | type = string 189 | description = "The region of the S3 bucket to use for Velero backups" 190 | } 191 | 192 | variable "velero_s3_bucket_endpoint" { 193 | type = string 194 | description = "The endpoint of the S3 bucket to use for Velero backups" 195 | default = "s3.amazonaws.com" 196 | } 197 | 198 | variable "velero_s3_access_key_id" { 199 | type = string 200 | description = "The access key of the S3 bucket to use for Velero backups" 201 | sensitive = true 202 | } 203 | 204 | variable "velero_s3_secret_access_key" { 205 | type = string 206 | description = "The secret key of the S3 bucket to use for Velero backups" 207 | sensitive = true 208 | } 209 | 210 | variable "velero_default_volumes_to_fs_backup" { 211 | type = bool 212 | description = "Enable volume filesystem backups by default" 213 | default = false 214 | } 215 | 216 | variable "promtail_version" { 217 | type = string 218 | description = "The chart version of promtail to install" 219 | default = "6.11.5" 220 | } 221 | 222 | variable "loki_enabled" { 223 | type = bool 224 | description = "Enable the log aggregation with Loki" 225 | default = false 226 | } 227 | 228 | variable "loki_version" { 229 | type = string 230 | description = "The chart version of loki to install" 231 | default = "5.8.9" 232 | } 233 | 234 | variable "loki_s3_chunks_bucket_name" { 235 | type = string 236 | description = "The name of the S3 chunks bucket to use for loki" 237 | } 238 | 239 | variable "loki_s3_ruler_bucket_name" { 240 | type = string 241 | description = "The name of the S3 ruler bucket to use for loki" 242 | } 243 | 244 | variable "loki_s3_admin_bucket_name" { 245 | type = string 246 | description = "The name of the S3 admin bucket to use for loki" 247 | } 248 | 249 | variable "loki_s3_bucket_region" { 250 | type = string 251 | description = "The region of the S3 bucket to use for loki" 252 | } 253 | 254 | variable "loki_s3_bucket_endpoint" { 255 | type = string 256 | description = "The endpoint of the S3 bucket to use for loki" 257 | default = "s3.amazonaws.com" 258 | } 259 | 260 | variable "loki_s3_access_key_id" { 261 | type = string 262 | description = "The access key of the S3 bucket to use for loki" 263 | sensitive = true 264 | } 265 | 266 | variable "loki_s3_secret_access_key" { 267 | type = string 268 | description = "The secret key of the S3 bucket to use for loki" 269 | sensitive = true 270 | } 271 | -------------------------------------------------------------------------------- /common/vault-values.yml: -------------------------------------------------------------------------------- 1 | global: 2 | enabled: true 3 | tlsDisable: false 4 | 5 | server: 6 | dataStorage: 7 | size: ${vault_data_storage_size}Gi 8 | affinity: "" 9 | ha: 10 | enabled: true 11 | raft: 12 | enabled: true 13 | setNodeId: true 14 | config: | 15 | ui = ${vault_ui} 16 | 17 | listener "tcp" { 18 | address = "[::]:8200" 19 | cluster_address = "[::]:8201" 20 | 21 | tls_disable = false 22 | tls_cert_file = "/vault/userconfig/${kubernetes_secret_name_tls_cert}/tls.crt" 23 | tls_key_file = "/vault/userconfig/${kubernetes_secret_name_tls_cert}/tls.key" 24 | 25 | tls_require_and_verify_client_cert = false 26 | tls_disable_client_certs = true 27 | } 28 | 29 | storage "raft" { 30 | path = "/vault/data" 31 | 32 | retry_join { 33 | auto_join = "provider=k8s namespace=hashicorp-vault label_selector=\"component=server,app.kubernetes.io/name=vault\"" 34 | %{ if vault_leader_tls_servername != null } 35 | leader_tls_servername = "${vault_leader_tls_servername}" 36 | %{ else } 37 | leader_tls_servername = "HOSTNAME.hashicorp-vault-internal" 38 | %{ endif } 39 | leader_ca_cert_file = "/vault/userconfig/${kubernetes_secret_name_tls_ca}/ca.crt" 40 | } 41 | } 42 | 43 | service_registration "kubernetes" {} 44 | 45 | seal "${vault_seal_method}" {} 46 | ui: 47 | enabled: ${vault_ui} 48 | serviceType: ${kubernetes_vault_ui_service_type} 49 | %{ if enable_vault_server_ingress } 50 | ingress: 51 | enabled: true 52 | activeService: true 53 | ingressClassName: nginx 54 | annotations: 55 | cert-manager.io/cluster-issuer: ${cluster_issuer_name} 56 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 57 | tls: 58 | - hosts: 59 | - ${vault_server_hostname} 60 | secretName: ${vault_server_hostname} 61 | hosts: 62 | - host: ${vault_server_hostname} 63 | %{ endif } 64 | affinity: | 65 | podAntiAffinity: 66 | preferredDuringSchedulingIgnoredDuringExecution: 67 | - weight: 1 68 | podAffinityTerm: 69 | labelSelector: 70 | matchLabels: 71 | app.kubernetes.io/name: vault 72 | app.kubernetes.io/instance: hashicorp-vault 73 | component: server 74 | topologyKey: kubernetes.io/hostname 75 | extraVolumes: 76 | - type: secret 77 | name: ${kubernetes_secret_name_tls_cert} 78 | - type: secret 79 | name: ${kubernetes_secret_name_tls_ca} 80 | extraEnvironmentVars: 81 | VAULT_CAPATH: /vault/userconfig/tls-ca/ca.crt 82 | VAULT_SKIP_VERIFY: true -------------------------------------------------------------------------------- /common/velero-credentials: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id=${velero_s3_access_key_id} 3 | aws_secret_access_key=${velero_s3_secret_access_key} -------------------------------------------------------------------------------- /common/velero-values.yml: -------------------------------------------------------------------------------- 1 | ## 2 | ## Configuration settings that directly affect the Velero deployment YAML. 3 | ## 4 | 5 | # Details of the container image to use in the Velero deployment & daemonset (if 6 | # enabling node-agent). Required. 7 | image: 8 | repository: velero/velero 9 | tag: v1.11.0 10 | 11 | initContainers: 12 | - name: velero-plugin-for-aws 13 | image: velero/velero-plugin-for-aws:v1.7.0 14 | imagePullPolicy: IfNotPresent 15 | volumeMounts: 16 | - mountPath: /target 17 | name: plugins 18 | 19 | configuration: 20 | # Parameters for the BackupStorageLocation(s). Configure multiple by adding other element(s) to the backupStorageLocation slice. 21 | # See https://velero.io/docs/v1.6/api-types/backupstoragelocation/ 22 | backupStorageLocation: 23 | # name is the name of the backup storage location where backups should be stored. If a name is not provided, 24 | # a backup storage location will be created with the name "default". Optional. 25 | - name: default 26 | # provider is the name for the backup storage location provider. If omitted 27 | # `configuration.provider` will be used instead. 28 | provider: aws 29 | # bucket is the name of the bucket to store backups in. Required. 30 | bucket: ${velero_s3_bucket_name} 31 | # Additional provider-specific configuration. See link above 32 | # for details of required/optional fields for your provider. 33 | config: 34 | region: ${velero_s3_bucket_region} 35 | s3ForcePathStyle: true 36 | s3Url: ${velero_s3_bucket_endpoint} 37 | # Parameters for the VolumeSnapshotLocation(s). Configure multiple by adding other element(s) to the volumeSnapshotLocation slice. 38 | # See https://velero.io/docs/v1.6/api-types/volumesnapshotlocation/ 39 | volumeSnapshotLocation: 40 | # name is the name of the volume snapshot location where snapshots are being taken. Required. 41 | - name: default 42 | # provider is the name for the volume snapshot provider. If omitted 43 | # `configuration.provider` will be used instead. 44 | provider: aws 45 | # Additional provider-specific configuration. See link above 46 | # for details of required/optional fields for your provider. 47 | config: 48 | region: ${velero_s3_bucket_region} 49 | # apiTimeout: 50 | # resourceGroup: 51 | # The ID of the subscription where volume snapshots should be stored, if different from the cluster’s subscription. If specified, also requires `configuration.volumeSnapshotLocation.config.resourceGroup`to be set. (Azure only) 52 | # subscriptionId: 53 | # incremental: 54 | # snapshotLocation: 55 | # project: 56 | defaultVolumesToFsBackup: ${velero_default_volumes_to_fs_backup} 57 | 58 | # Info about the secret to be used by the Velero deployment, which 59 | # should contain credentials for the cloud provider IAM account you've 60 | # set up for Velero. 61 | credentials: 62 | # Whether a secret should be used. Set to false if, for examples: 63 | # - using kube2iam or kiam to provide AWS IAM credentials instead of providing the key file. (AWS only) 64 | # - using workload identity instead of providing the key file. (GCP only) 65 | useSecret: true 66 | # Name of the secret to create if `useSecret` is true and `existingSecret` is empty 67 | name: cloud-credentials 68 | # Data to be stored in the Velero secret, if `useSecret` is true and `existingSecret` is empty. 69 | # As of the current Velero release, Velero only uses one secret key/value at a time. 70 | # The key must be named `cloud`, and the value corresponds to the entire content of your IAM credentials file. 71 | # Note that the format will be different for different providers, please check their documentation. 72 | # Here is a list of documentation for plugins maintained by the Velero team: 73 | # [AWS] https://github.com/vmware-tanzu/velero-plugin-for-aws/blob/main/README.md 74 | # [GCP] https://github.com/vmware-tanzu/velero-plugin-for-gcp/blob/main/README.md 75 | # [Azure] https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure/blob/main/README.md 76 | secretContents: 77 | cloud: | 78 | [default] 79 | aws_access_key_id=${velero_s3_access_key_id} 80 | aws_secret_access_key=${velero_s3_secret_access_key} 81 | 82 | deployNodeAgent: true ## Permet l’activation et la création des pods Restic -------------------------------------------------------------------------------- /common/velero.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "velero" { 2 | name = "velero" 3 | namespace = "velero" 4 | create_namespace = true 5 | 6 | repository = "https://vmware-tanzu.github.io/helm-charts" 7 | chart = "velero" 8 | version = var.velero_version 9 | timeout = 600 10 | 11 | values = [templatefile("${path.module}/velero-values.yml", { 12 | velero_s3_bucket_name = var.velero_s3_bucket_name 13 | velero_s3_bucket_region = var.velero_s3_bucket_region 14 | velero_s3_bucket_endpoint = var.velero_s3_bucket_endpoint 15 | velero_s3_access_key_id = var.velero_s3_access_key_id 16 | velero_s3_secret_access_key = var.velero_s3_secret_access_key 17 | velero_default_volumes_to_fs_backup = var.velero_default_volumes_to_fs_backup 18 | })] 19 | } -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | tf-scaleway: 4 | image: hashicorp/terraform:1.5.7 5 | user: ${DOCKER_USER:-1000} 6 | working_dir: /app/scaleway 7 | environment: 8 | # Without this value, the Helm provider fails to fetch chart repository data 9 | - XDG_CACHE_HOME=/tmp 10 | # 11 | volumes: 12 | - ./scaleway:/app/scaleway 13 | - ./common:/app/common 14 | tf-ovh: 15 | image: hashicorp/terraform:1.5.7 16 | user: ${DOCKER_USER:-1000} 17 | working_dir: /app/ovh 18 | environment: 19 | # Without this value, the Helm provider fails to fetch chart repository data 20 | - XDG_CACHE_HOME=/tmp 21 | # 22 | volumes: 23 | - ./ovh:/app/ovh 24 | - ./common:/app/common 25 | tf-standalone: 26 | image: hashicorp/terraform:1.5.7 27 | user: ${DOCKER_USER:-1000} 28 | working_dir: /app/standalone 29 | environment: 30 | # Without this value, the Helm provider fails to fetch chart repository data 31 | - XDG_CACHE_HOME=/tmp 32 | # 33 | volumes: 34 | - ./standalone:/app/standalone 35 | - ./common:/app/common 36 | tf-bucket-ovh: 37 | image: hashicorp/terraform:1.5.7 38 | user: ${DOCKER_USER:-1000} 39 | working_dir: /app/state_bucket 40 | environment: 41 | # Without this value, the Helm provider fails to fetch chart repository data 42 | - XDG_CACHE_HOME=/tmp 43 | - AWS_ACCESS_KEY_ID="no_need_to_define_an_access_key" 44 | - AWS_SECRET_ACCESS_KEY="no_need_to_define_a_secret_key" 45 | volumes: 46 | - ./state_bucket:/app/state_bucket 47 | bash: 48 | image: bash 49 | user: ${DOCKER_USER:-1000} 50 | working_dir: /app 51 | argocd-cli: 52 | image: argoproj/argocd:v2.6.15 53 | user: ${DOCKER_USER:-1000} 54 | kubectld: 55 | image: bitnami/kubectl:1.29.2 56 | user: ${DOCKER_USER:-1000} 57 | working_dir: /app 58 | volumes: 59 | - ./vault/.kube:/app/.kube -------------------------------------------------------------------------------- /docs/argocd.md: -------------------------------------------------------------------------------- 1 | # ArgoCD 2 | 3 | **Before reading this section, please note that disabling the installation of Hashicorp Vault will also disable the installation of ArgoCD Vault Plugin. You are still able to use ArgoCD the way you want but you will have to use your own repo structure.** 4 | 5 | ## The mono-repo 6 | 7 | The mono-repo is a git repository containing all the applications you want to deploy on your cluster. It is used by ArgoCD to deploy your applications. It is a good practice to have a mono-repo for each cluster you have. 8 | 9 | This project shares a mono-repo structure which was specifically designed to ease the deployment of applications for new k8s users. It is available [here](examples/argocd-repo). However, you may be free to use your own repository structure. 10 | 11 | The repostiory structure is the following : 12 | 13 | ```bash 14 | . 15 | ├── .gitignore 16 | ├── apps # Folder containing all the applications to declare 17 | │ ├── external-app # Folder declaring the external-app application 18 | │ │ └── test.json 19 | │ ├── hello-world # Folder declaring the hello-world application 20 | │ │ ├── preprod.json 21 | │ │ ├── prod.json 22 | │ │ └── staging.json 23 | │ └── secret-helm # Folder declaring the secret-helm application 24 | │ ├── base.yaml 25 | │ ├── dev.json 26 | │ ├── dev.yaml 27 | │ ├── prod.json 28 | │ └── prod.yaml 29 | └── helm # Folder containing all the helm charts 30 | ├── hello-world # Folder containing the hello-world helm chart 31 | │ ├── .helmignore 32 | │ ├── Chart.yaml 33 | │ ├── README.md 34 | │ ├── templates 35 | │ │ ├── NOTES.txt 36 | │ │ ├── _helpers.tpl 37 | │ │ ├── deployment.yaml 38 | │ │ ├── service.yaml 39 | │ │ └── serviceaccount.yaml 40 | │ └── values.yaml 41 | └── secret-helm # Folder containing the secret-helm helm chart 42 | ├── .DS_Store 43 | ├── .helmignore 44 | ├── Chart.yaml 45 | ├── templates 46 | │ ├── .DS_Store 47 | │ └── secret.yaml 48 | └── values.yaml 49 | ``` 50 | 51 | ## Usage 52 | 53 | 1. Create a new repository with the same structure as the mono-repo 54 | 2. Create read credentials for the repository (see [here](https://argo-cd.readthedocs.io/en/stable/user-guide/private-repositories/#access-token) for different providers) 55 | 3. Configure accordingly the Terraform variables `argocd_repo_url`, `argocd_repo_username` and `argocd_repo_password` (see [variables.tf](common/variables.tf)). **Terraform expects HTTP git credentials, not SSH.** 56 | 4. Define the variables `argocd_hostname` and `argocd_password` (see [variables.tf](common/variables.tf)). The variable `argocd_password` is used to define the password of the `admin` user of ArgoCD. Terraform expects a **hash** of the password. To generate it, you can use the following command : `argocd account bcrypt --password P@$sw0rd` after installing ArgoCD CLI. 57 | 58 | ## ArgoCD Vault Plugin 59 | 60 | The ArgoCD Vault Plugin is a plugin for ArgoCD which allows to use secrets stored in Hashicorp Vault in your applications. It is installed by default on the cluster. You can fine tune its version by changing the variable `argocd_avp_version` (see [variables.tf](common/variables.tf)). It is highly recommended to read the [documentation](http://argocd-vault-plugin.readthedocs.io) of the plugin before using it as it has many undocumented features in this README that may suit your needs. 61 | 62 | By default, ArgoCD Vault Plugin is configured to use the Kubernetes auth backend of Vault. The authentication is done with the Kubernetes service account of ArgoCD in the `argocd` namespace. The service account has read access on the path `kv/*`. We'll see later how to restrict the access to the secrets for specific applications. 63 | 64 | ArgoCD Vault Plugin works by taking a directory of YAML files that have been templated out using the pattern of `` and then using the values from Vault to replace the placeholders. The plugin will then apply the YAML files to the cluster. You can use generic or inline placeholders. However, inline placeholders are more straightforward to use. An inline-path placeholder allows you to specify the path, key, and optionally, the version to use for a specific placeholder. This means you can inject values from multiple distinct secrets in your secrets manager into the same YAML. 65 | 66 | Valid examples: 67 | 68 | ``` 69 | - 70 | - 71 | ``` 72 | 73 | If the version is omitted (first example), the latest version of the secret is retrieved. 74 | By default, Vault creates a KV-V2 backend. For KV-V2 backends, the path needs to be specified as `` where `vault-kvv2-backend-path` is the path to the KV-V2 backend and `path-to-secret` is the path to the secret in Vault. 75 | 76 | Again, **it is highly recommended to read the [placeholders documentation](https://argocd-vault-plugin.readthedocs.io/en/stable/howitworks/) of the plugin before using it**. 77 | 78 | ## Examples 79 | 80 | ### Basic example - hello-world application 81 | 82 | This example shows how to deploy a simple application with ArgoCD. The application is a simple nginx server. The application is deployed in 3 environments: staging, preprod and prod. The application is deployed in 3 different namespaces, one namespace per application and per environment. 83 | 84 | The application is deployed with the following instructions : 85 | 86 | - Add the `hello-world` helm chart to the `helm` folder of the mono-repo 87 | - Declare the application in the `apps` folder of the mono-repo by creating a folder named `hello-world`. **Beware of the name of the folder, it must be the same as the name of the helm chart.** 88 | - Add a JSON file that fits your needs: 89 | - Add a JSON file per environment and name the file according to the following pattern: `.json`. For instance, for the staging environment, the file must be named `staging.json`. 90 | - Add a JSON file named after the application folder. For instance `hello-world.json`. This will create a standalone application without any environment. 91 | 92 | This file **must be a valid JSON file and must contain at least**: 93 | 94 | ```json 95 | {} 96 | ``` 97 | 98 | ### ArgoCD Vault Plugin example - secret-helm application 99 | 100 | This example shows how to use the ArgoCD Vault Plugin to deploy a helm chart with secrets stored in Hashicorp Vault. The application is a simple chart which creates a secret with with various keys and values. The application is deployed in 2 environments: dev and prod. The application is deployed in 2 different namespaces, one namespace per application and per environment. 101 | 102 | The application configuration refers to specific helm values per environment. The used value files are declared for each environment using the JSON file. The JSON file must contain the following: 103 | 104 | ```json 105 | { 106 | "valuesFiles": [""] 107 | } 108 | ``` 109 | 110 | For instance, the prod environment uses the `prod.json` file with: 111 | 112 | ```json 113 | { 114 | "valuesFiles": ["base.yaml", "prod.yaml"] 115 | } 116 | ``` 117 | 118 | ### Multi-tenancy example - external-app application 119 | 120 | This example shows how to deploy an application in a multi-tenant environment. The cluster administrator is responsible for declaring the application on the cluster and the developers are responsible for maintaining the application helm chart. This is achieved by specifying the `externalRepoURL` in the JSON file. 121 | 122 | For instance, the test environment uses the `test.json` file with: 123 | 124 | ```json 125 | { 126 | "externalRepoURL": "https://github.com/example/externalRepo.git" 127 | } 128 | ``` 129 | 130 | Beware the distant repository must be public or the cluster must have access to it. Please refer to the [ArgoCD documentation](https://argo-cd.readthedocs.io/en/stable/user-guide/private-repositories) for more information. 131 | 132 | Please also note that the distant repository must have the exact same structure as the mono-repo. The distant repository must contain a `helm` folder with the helm charts and an `apps` folder with the application configuration: 133 | 134 | ```bash 135 | . 136 | ├── apps 137 | │ └── external-app 138 | │ └── test.yaml 139 | └── helm 140 | └── external-app 141 | ├── .helmignore 142 | ├── Chart.yaml 143 | ├── charts 144 | ├── templates 145 | └── values.yaml 146 | ``` 147 | 148 | Just like that, the developer who controls the helm chart is able to request any secret contained in the vault just by using the correct path 149 | of a secret in the vault. Therefore, the cluster administrator must restrict the access to the secrets for specific applications. This is achieved by following this procedure : 150 | 151 | 1. Create a specific policy in Vault for the application which only gives access to the secrets needed by the application 152 | 153 | RW policy example : 154 | 155 | ```hcl 156 | path "kv/metadata/my_app/*" { 157 | capabilities = ["list", "read", "delete"] 158 | } 159 | path "kv/data/my_app/*" { 160 | capabilities = ["create", "update", "read", "delete"] 161 | } 162 | path "kv/delete/my_app/*" { 163 | capabilities = ["update"] 164 | } 165 | path "kv/undelete/my_app/*" { 166 | capabilities = ["update"] 167 | } 168 | path "kv/destroy/my_app/*" { 169 | capabilities = ["update"] 170 | } 171 | ``` 172 | 173 | 2. Attach the policy to the Vault Authentication Method 174 | 3. Create an ArgoCD Vault Plugin configuration secret which uses the Vault Authentication Method. Please refer to the [ArgoCD Vault Plugin backend documentation](https://argocd-vault-plugin.readthedocs.io/en/stable/backends/) and the [ArgoCD Vault Plugin configuration documentation](https://argocd-vault-plugin.readthedocs.io/en/stable/config/) for more information. Here is an example of a configuration secret for AppRole authentication: 175 | 176 | ```yaml 177 | apiVersion: v1 178 | kind: Secret 179 | metadata: 180 | name: external-vault-credentials 181 | namespace: argocd 182 | type: Opaque 183 | stringData: 184 | VAULT_ADDR: Your HashiCorp Vault Address 185 | AVP_TYPE: vault 186 | AVP_AUTH_TYPE: approle 187 | AVP_ROLE_ID: Your AppRole Role ID 188 | AVP_SECRET_ID: Your AppRole Secret ID 189 | ``` 190 | 191 | Beware, the secret **must** be created in the `argocd` namespace. 192 | 193 | 4. Finally, reference the vault credentials secret in the JSON file: 194 | 195 | ```json 196 | { 197 | "vaultCredentials": "external-vault-credentials" 198 | } 199 | ``` 200 | 201 | Please note that if you do not want to use external repositories, you can still declare a helm chart in the mono-repo which calls an external chart which has to be stored on a helm repository. 202 | 203 | Next step → [Use Velero](./velero.md) 204 | -------------------------------------------------------------------------------- /docs/cluster-auto.md: -------------------------------------------------------------------------------- 1 | # [AUTOMATIC] Deployment steps 2 | 3 | _Every command must be run at the root of the repository_ 4 | 5 | ## Prerequisites 6 | 7 | Docker and docker-compose must be installed on your computer. 8 | 9 | ## Create Terraform's state 10 | 11 | First, we need a s3 bucket to store the Terraform's state, so that it can be available everywhere (and not only on your computer). If you already have a bucket, you can skip this step. 12 | 13 | This repository provides a Terraform to create a bucket on OVH, but not on Scaleway. For this step, you will need OVH API credentials (`application key`, `secret key` and `consumer key`, as well as the project id in which you will create the bucket, see [here to generate a token](https://www.ovh.com/auth/api/createToken)). You must add the following rights and replace {serviceName} by your OVH's Public Cloud project id : 14 | 15 | - GET /cloud/project/{serviceName}/\* 16 | - PUT /cloud/project/{serviceName}/\* 17 | - POST /cloud/project/{serviceName}/\* 18 | - DELETE /cloud/project/{serviceName}/\* 19 | 20 | Then execute the corresponding script : `bin/init-bucket.sh`, after entering all the required information, it will create a bucket on OVH; 21 | 22 | And finally, save the provided credentials `access_key`, `secret_key` and `bucket_name`, you will need them for the next step. 23 | 24 | ## Create and provision the cluster 25 | 26 | ### Configure the backend 27 | 28 | Now we've got our s3 bucket, we have to setup Terraform's backend, where it stores its state. For this, we will use the s3 bucket we just created (or the one you already have). 29 | 30 | Run `bin/bootstrap-backend.sh ` to create the backend. It will create a `backend.conf` file, which will be used by Terraform to store its state in the s3 bucket. Replace `` either with `ovh` or `scaleway`. 31 | 32 | If you used the previous script to generate the bucket, here are some information you need: 33 | 34 | - Region: `gra` 35 | - Endpoint: `https://s3.gra.io.cloud.ovh.net/` 36 | - Skip region validation: `true` 37 | - Skip credentials validation: `true` 38 | 39 | ### Provide the correct information 40 | 41 | Terraform needs a few variables to create your cluster, please run `bin/bootstrap.sh ` and provide the desired values for each parameter. You will need: 42 | 43 | - The hostname for several services: ArgoCD, Grafana, Vault (if installed) 44 | - A **already existing** S3 bucket for Velero (you can use the state_bucket terraform script to create a S3 bucket for Velero) 45 | - ArgoCD needs a Git repository with HTTPS credentials for access. You can use a private repository, or a public one. If you use a private repository, you will need to provide the HTTPS credentials (username and password). If you use a public repository, you can leave the username and password empty. 46 | - API keys for your provider: 47 | - For OVH, see [here](https://www.ovh.com/auth/api/createToken) 48 | - For Scaleway, see [here](https://www.scaleway.com/en/docs/identity-and-access-management/iam/how-to/create-api-keys/) 49 | 50 | **The script will prompt for the most common variables. By default, some variables are not prompted (and their default value is then used). If you wish, you can look into the `variables.tf` and the `variables-common.tf` files to see all the variables that can be set. Simply add them to the `terraform.tfvars` file.** 51 | 52 | ### Deploy the cluster 53 | 54 | After your `terraform.tfvars` file has been successfully created, you can now deploy the cluster. Run `bin/terraform-init.sh ` to initialize Terraform. After this, run `bin/terraform-plan.sh `, the output shows you what Terraform will do. If you are satisfied with the plan, run `bin/terraform-apply.sh ` to deploy the cluster. _(Please ignore the output of the command beginning with 'To perform exactly these actions...')_ 55 | 56 | While running, the `terraform-apply.sh` script may crash (especially with OVH). If so, analyze the error. If it is related to timeouts or server errors, simply re-run the script. (if you encounter errors re-running `terraform-apply.sh`, try running `terraform-plan.sh` before). The script may last more than 10 minutes, please be patient. 57 | 58 | **Warning: If the script were to crash, make sure Terraform has not been creating ressources (e.g. a k8s cluster) in the background (which has not been linked to the state due to the crash). If so, you will have to delete them manually.** 59 | 60 | At the end of the script, please make the needed changes on your DNS (adding the ingress IP to the needed domains), you may be able to retrieve your Kubeconfig file with the following command: `bin/get-kube-config.sh `. 61 | 62 | ### Destroy the cluster 63 | 64 | With: `bin/terraform-destroy `. **Warning: there is no confirmation, it will destroy the cluster immediately.** 65 | 66 | Next step → [Configure Hashicorp Vault](./hashicorp-vault.md) 67 | -------------------------------------------------------------------------------- /docs/cluster-manual.md: -------------------------------------------------------------------------------- 1 | # [MANUAL] Deployment steps 2 | 3 | _This assumes you have the Terraform CLI installed on your computer._ 4 | ## Create Terraform's state 5 | 6 | First, we need a s3 bucket to store the Terraform's state, so that it can be available everywhere (and not only on your computer). If you already have a bucket, you can skip this step. 7 | 8 | This repository provides a Terraform to create a bucket on OVH. For this step, you will need OVH API credentials (`application key`, `secret key` and `consumer key`, as well as the project id in which you will create the bucket, see [here if you do not know how to get them](https://help.ovhcloud.com/csm/en-api-getting-started-ovhcloud-api?id=kb_article_view&sysparm_article=KB0042777#advanced-usage-pair-ovhcloud-apis-with-an-application)). 9 | 10 | - Go to `/state_bucket`, and do a `terraform init` 11 | - Copy the `terraform.tfvars.template` into `terraform.tfvars` and provide the correct variables in it. (description of the vars is available in the `variables.tf` file) 12 | - At this step, we need to do a tiny trick coming from [OVH](https://github.com/yomovh/tf-at-ovhcloud/blob/main/s3_bucket_only/README.md) : 13 | 14 | _If you have AWS CLI already configured, you are good to go !_ 15 | 16 | _Else, due to a limitation in Terraform dependency graph for providers initialization (see this long lasting issue) it is required to have the following environement variables defined (even if they are dummy one and overridden during the script execution) : AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY_ 17 | 18 | _If they are not already defined you can use the following:_ 19 | 20 | ```bash 21 | export AWS_ACCESS_KEY_ID="no_need_to_define_an_access_key" 22 | export AWS_SECRET_ACCESS_KEY="no_need_to_define_a_secret_key" 23 | ``` 24 | 25 | - Then create the bucket with a `terraform plan` followed by a `terraform apply` 26 | - Save the provided `access_key` et `secret_key` (because the `secret_key` is a secret, you need to use the `terraform output secret_key` command to get it) 27 | 28 | ## Create and provision the cluster 29 | 30 | _Put yourself in the folder corresponding to the provider you want_ 31 | 32 | Now we've got our s3 bucket, copy the `backend.conf.template` in a `backend.conf` file and fill it with the information you previously obtained. You may choose a name for your state file (using the `key` field). They are needed for Terraform to know in what state your cluster is or will be or has been. 33 | 34 | Next : 35 | 36 | - Provide the correct variables in a `terraform.tfvars` file. List of variables is available in the `variables.tf` file and in the `variables-common.tf` file, along with description and default values; 37 | - For Hashicorp Vault: if you do not have a custom certificate, just leave the following variables empty: `vault_api_signed_certificate`, `vault_api_private_key`, `vault_api_ca_bundle`. 38 | - You will need API credentials for the provider you choose: 39 | - For OVH, see [here](https://help.ovhcloud.com/csm/en-api-getting-started-ovhcloud-api?id=kb_article_view&sysparm_article=KB0042777#advanced-usage-pair-ovhcloud-apis-with-an-application) 40 | - For Scaleway, see [here](https://www.scaleway.com/en/docs/identity-and-access-management/iam/how-to/create-api-keys/) 41 | - Do a `terraform init -backend-config=backend.conf`, then `terraform plan` then `terraform apply` to create your cluster. Doing so, your Terraform state will be saved in the s3 bucket. 42 | 43 | _Using the OVH provider, you may encounter timeouts, or other errors. (coming from OVH) If so, simply re-run the `terraform apply` command. It will continue where it stopped and will eventually complete._ 44 | 45 | Next step → [Scripted cluster creation](./cluster-auto.md) -------------------------------------------------------------------------------- /docs/hashicorp-vault.md: -------------------------------------------------------------------------------- 1 | # Hashicorp Vault 2 | 3 | Once the cluster has been setup, Hashicorp Vault (now referred to as "vault") is not ready for use. It has to be initialized and to be unsealed. _Secrets will be handled in the following steps._ 4 | To ensure HA on the cluster, the deployment consists of 3 pods, spread on 3 nodes. (the node autoscaling feature is used here). More pods can be created by modifying Terraform's vars. (HPA is not available though). 5 | 6 | _To perform the following steps, you need to have every pod in the `Running` state. You can check this with `kubectl get pods -n hashicorp-vault`. (they won't be marked as ready however)_ 7 | 8 | ## Manual initialization 9 | 10 | **Initialization of the vault** 11 | 12 | Shamir's algorithm is used to encrypt the vault. _n_ keys (with _n_ > 0) are generated, and _m_ keys (with 0 < _m_ <= _n_) are needed to unseal the vault. This is achieved with the following command (using `kubectl` in the `hashicorp-vault` namespace): 13 | 14 | ```bash 15 | kubectl exec hashicorp-vault-0 -- vault operator init \ 16 | -key-shares=n \ 17 | -key-threshold=m \ 18 | -format=json > cluster-keys.json 19 | ``` 20 | 21 | This command generates a `cluster-keys.json` file containing : 22 | 23 | - the _n_ generated keys 24 | - a root token, used to authenticate to the vault (once unsealed) 25 | 26 | _If you read the doc, you might want to make the pods join the Raft cluster. The vault is here configured to join the Raft cluster by itself, so no action is required from the user here._ 27 | 28 | **Unsealing of the vault** 29 | 30 | The vault is still not available. Each pod must be _unsealed_ to be operational. This can be achieved by doing so (still in the `hashicorp-vault` namespace), here with _n_ = _m_ = 1 : 31 | 32 | `kubectl exec hashicorp-vault-i -- vault operator unseal $VAULT_UNSEAL_KEY`, with _i_ going from 0 to the number of pods. 33 | 34 | Now, your vault is fully operational. First authentication is possible with the root token. The vault has to been unsealed everytime a pod is destroyed, or for any other reasons detailed in Hashicorp Vault's documentation. 35 | 36 | ## Automatic configuration 37 | 38 | The Vault may be automatically initialized and unsealed. This is done by executing the script `init.sh` in the `vault` folder, with the following command : `./init.sh`. Then follow the instructions and your Vault should be ready to use at the end. 39 | 40 | **Initial configuration** 41 | 42 | This part is not mandatory. It deploys the Key/Value engine on the Vault, as well as a Kubernetes backend for authentication (for instance used by the argocd-vault plugin). 43 | The k8s backend has read-access on the path `kv/*`. 44 | 45 | Go to the `vault` folder, copy `terraform.tfvars.template` to `terraform.tfvars` and fill it with the required variables. (description may be found in the `variables.tf` file). The `vault_root_token` may be found in the previously generated `cluster-keys.json` file. Then do a `terraform init`, followed by `terraform plan`, then `terraform apply`. 46 | 47 | **Congratulations! Your Hashicorp Vault is now ready to use, enjoy!** 48 | 49 | Next step → [Configure ArgoCD](./argocd.md) -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Kubic - Kubernetes Infrastructure as Code 2 | 3 | [![Kubernetes](https://img.shields.io/static/v1?style=for-the-badge&message=Kubernetes&color=326CE5&logo=Kubernetes&logoColor=FFFFFF&label=)](https://kubernetes.io) 4 | [![NGINX](https://img.shields.io/static/v1?style=for-the-badge&message=NGINX&color=009639&logo=NGINX&logoColor=FFFFFF&label=)](https://kubernetes.github.io/ingress-nginx/) 5 | [![ArgoCD](https://img.shields.io/static/v1?style=for-the-badge&message=ArgoCD&color=EF7B4D&logo=Argo&logoColor=FFFFFF&label=)](https://argo-cd.readthedocs.io) 6 | [![Vault](https://img.shields.io/static/v1?style=for-the-badge&message=Vault&color=000000&logo=Vault&logoColor=FFFFFF&label=)](https://www.vaultproject.io) 7 | [![Terraform](https://img.shields.io/static/v1?style=for-the-badge&message=Terraform&color=7B42BC&logo=Terraform&logoColor=FFFFFF&label=)](https://www.terraform.io) 8 | 9 | Available on: 10 | 11 | [![Scaleway](https://img.shields.io/static/v1?style=for-the-badge&message=Scaleway&color=4F0599&logo=Scaleway&logoColor=FFFFFF&label=)](https://www.scaleway.com) 12 | [![OVH](https://img.shields.io/static/v1?style=for-the-badge&message=OVH&color=123F6D&logo=OVH&logoColor=FFFFFF&label=)](https://www.ovh.com) 13 | 14 | ## Overview 15 | 16 | Kubic is a cutting edge, ready for production and multi cloud provider Kubernetes infrastructure as code. It integates an ingress controller, a certificate manager, a monitoring stack, a GitOps tool with complete secret management and a backup tool. 17 | 18 | This Terraform aims at creating a managed k8s cluster setup with : 19 | 20 | - NGINX Ingress Controller 21 | - Cert-manager 22 | - Prometheus / Grafana 23 | - ArgoCD 24 | - Hashicorp Vault if needed 25 | - ArgoCD Vault Plugin if Vault is deployed 26 | - Velero for backuping the cluster 27 | - Loki if enabled 28 | 29 | The cluster can be deployed either on OVHCloud or on Scaleway. New provider can be added by creating a new folder in the root of the repository, and by following the same architecture as the existing providers. 30 | 31 | ## Repository architecture 32 | 33 | ```bash 34 | . 35 | ├── docs # Folder containing the documentation 36 | ├── state_bucket # Folder containing the Terraform to create a S3 bucket for the Terraform state 37 | ├── vault # Folder containing the Terraform to configure Hashicorp Vault 38 | ├── common # Folder containing the Terraform which is common to all the providers 39 | ├── ovh # Folder declaring Terraform to deploy a cluster on OVHCloud 40 | ├── scaleway # Folder declaring Terraform to deploy a cluster on Scaleway 41 | ├── examples # Folder containing examples of applications to deploy with ArgoCD 42 | ├── .gitignore 43 | ├── LICENSE 44 | └── README.md 45 | ``` 46 | 47 | All files contained in the folder `common` are symbolicaly linked in the folders `ovh` and `scaleway` to avoid code duplication. 48 | 49 | ## Getting started 50 | 51 | - Create you cluster: 52 | - [Manual deployment](cluster-manual.md) 53 | - [Automatic deployment](cluster-auto.md) 54 | - [Configure Hashicorp Vault](hashicorp-vault.md) 55 | - [Configure ArgoCD](argocd.md) 56 | - [Configure Velero](velero.md) 57 | - [Standalone use](standalone.md) 58 | 59 | ## Contributing 60 | 61 | Currently, only OVH and Scaleway are supported as providers. Here are the guidelines to add a new provider: 62 | 63 | - Create a new folder in the root of the repository, with the name of the provider; 64 | - Create a symlink for all files in `common` to your new folder; 65 | - Create a `terraform.tf` file containing: 66 | - Terraform configuration with a `s3` backend; 67 | - The `helm`, `kubernetes` and `kubectl` providers along with the provider(s) you need, correctly configured; 68 | - A `kubernetes.tf` file creating the cluster, with an output named `kubeconfig` that contains the actual kubeconfig for the cluster; 69 | - A `ingress-nginx.tf` file, deploying the [ingress-nginx ingress controller](https://kubernetes.github.io/ingress-nginx) and configuring it with an external IP (you may need to create a load balancer on your provider). The ingress IP should be a Terraform output named `ingress_ip`; 70 | - This must also create a `null_resource` named `ingress-nginx` that will `depends_on` on the node pool of your cluster (this is to get a consistent dependency chain for Terraform) 71 | - The controller must have at least the following configuration: 72 | 73 | ```yaml 74 | controller: 75 | metrics: 76 | enabled: true 77 | serviceMonitor: 78 | additionalLabels: 79 | release: prometheus 80 | enabled: true 81 | extraArgs: 82 | enable-ssl-passthrough: true 83 | admissionWebhooks: 84 | timeoutSeconds: 30 85 | ``` 86 | 87 | - Edit the `docker-compose.yaml` and create a service (adapt merely the code) for your provider. 88 | -------------------------------------------------------------------------------- /docs/standalone.md: -------------------------------------------------------------------------------- 1 | # Standalone use 2 | 3 | If you already have a cluster (with another provider or whatever), you can still use this Terraform to deploy all the mentioned tools on it, **if and only if you use ingress-nginx as your ingress controller**. For this, you will need a `kubeconfig` file to access your cluster. (or your credentials, if so, you will have to modify by yourself the `terraform.tf` file). 4 | 5 | This part will install : 6 | 7 | - Cert-manager, provisionned with issuers 8 | - Prometheus along Grafana 9 | - ArgoCD provisionned with a default repository 10 | - Velero 11 | - Hashicorp Vault 12 | - Loki 13 | 14 | ## Requirements 15 | 16 | Ingress-nginx must be configured with some values: 17 | 18 | ```yaml 19 | controller: 20 | metrics: 21 | enabled: true 22 | serviceMonitor: 23 | additionalLabels: 24 | release: prometheus 25 | enabled: true 26 | extraArgs: 27 | enable-ssl-passthrough: true 28 | ``` 29 | 30 | ## Steps 31 | 32 | Follow the following steps (every command must be run at the root of the repository): 33 | 34 | - Run `bin/bootstrap.sh standalone` and fill the asked variables; 35 | - Only the most common variables are prompted, if you want to change other variables, you will have to edit the `standalone/terraform.tfvars` file by yourself. (the complete list of variables is available in the `standalone/variables.tf` file) 36 | - Run `bin/terraform-init.sh standalone` to initialize the Terraform state; 37 | - Put your `kubeconfig` file in the `standalone` folder; 38 | - Run `bin/terraform-plan.sh standalone` to see what will be deployed; 39 | - Run `bin/terraform-apply.sh standalone` to deploy. 40 | -------------------------------------------------------------------------------- /docs/velero.md: -------------------------------------------------------------------------------- 1 | # Velero 2 | 3 | Velero is a backup and restore tool for Kubernetes. It is used to backup the cluster's resources, and to restore them in case of disaster. It is also used to migrate the cluster to another provider. 4 | 5 | Before using Velero, all you need is an external S3 bucket to store your backups. You can use any S3 compatible storage provider. 6 | When creating the cluster, all velero variables are to be set either manualy in the `terraform.tfvars` file or during the automatic setup, according to the type of installation you choose. 7 | 8 | ## Configuration 9 | 10 | **Set persistent volumes backup** 11 | 12 | We use the opt-in approach from Velero to backup persistent volumes (more information [here](https://velero.io/docs/main/file-system-backup/)). This means that you need to add the following annotation to your pods, when you want its PVC to be saved : `backup.velero.io/backup-volumes: , ...`. This will backup the persistent volume claim and the persistent volume associated with it. 13 | 14 | You can use the opt-out approach by setting `velero_default_volumes_to_fs_backup` to `true` in the `terraform.tfvars`. 15 | 16 | ## Velero's CLI 17 | 18 | Velero comes with a CLI to manage the backups. You can install it [here](https://velero.io/docs/v1.6/basic-install/). To bind the CLI to your cluster, just set the `--kubeconfig` flag when you run a command. Otherwise, Velero will use your default kubeconfig file. 19 | 20 | **Manual backup** 21 | 22 | To backup the cluster, you need to create a backup file. This is done with the following command : `velero backup create BACKUP_NAME`. You can list your backups with `velero backup get`. 23 | 24 | **Auto backup** 25 | 26 | To schedule a backup of your namespace, just refer to the template `common/Schedlule-template.yaml.template` and fill it with the correct values. Then apply it with `kubectl apply -f Schedlule-template.yaml`. 27 | 28 | **Restore from backup** 29 | 30 | To restore from a backup, run the following command, with *BACKUP_NAME* being the name of the backup you want to restore from : `velero restore create --from-backup BACKUP_NAME`. 31 | 32 | Next step → [Standalone use](./standalone.md) -------------------------------------------------------------------------------- /examples/argocd-repo/.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | .idea/ 3 | .DS_Store 4 | vendor/ 5 | dist/* 6 | ui/dist/app/* 7 | !ui/dist/app/gitkeep 8 | site/ 9 | *.iml 10 | # delve debug binaries 11 | cmd/**/debug 12 | debug.test 13 | coverage.out 14 | test-results 15 | .scannerwork 16 | .scratch 17 | node_modules/ 18 | .kube/ 19 | ./test/cmp/*.sock 20 | .envrc.remote 21 | .*.swp -------------------------------------------------------------------------------- /examples/argocd-repo/apps/external-app/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "externalRepoURL": "https://github.com/example/externalRepo.git", 3 | "valuesFiles": ["test.yaml"], 4 | "vaultCredentials": "external-vault-credentials" 5 | } 6 | -------------------------------------------------------------------------------- /examples/argocd-repo/apps/hello-world/preprod.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /examples/argocd-repo/apps/hello-world/prod.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /examples/argocd-repo/apps/hello-world/staging.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /examples/argocd-repo/apps/secret-helm/base.yaml: -------------------------------------------------------------------------------- 1 | secret2: 2 | -------------------------------------------------------------------------------- /examples/argocd-repo/apps/secret-helm/dev.json: -------------------------------------------------------------------------------- 1 | { 2 | "valuesFiles": ["base.yaml"] 3 | } 4 | -------------------------------------------------------------------------------- /examples/argocd-repo/apps/secret-helm/prod.json: -------------------------------------------------------------------------------- 1 | { 2 | "valuesFiles": ["base.yaml", "prod.yaml"] 3 | } 4 | -------------------------------------------------------------------------------- /examples/argocd-repo/apps/secret-helm/prod.yaml: -------------------------------------------------------------------------------- 1 | secret3: 2 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/hello-world/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/hello-world/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A Helm chart for Kubernetes 4 | name: hello-world 5 | type: application 6 | version: 0.1.0 7 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/hello-world/README.md: -------------------------------------------------------------------------------- 1 | # Hello World 2 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/hello-world/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if contains "NodePort" .Values.service.type }} 3 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "hello-world.fullname" . }}) 4 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 5 | echo http://$NODE_IP:$NODE_PORT 6 | {{- else if contains "LoadBalancer" .Values.service.type }} 7 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 8 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "hello-world.fullname" . }}' 9 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "hello-world.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 10 | echo http://$SERVICE_IP:{{ .Values.service.port }} 11 | {{- else if contains "ClusterIP" .Values.service.type }} 12 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "hello-world.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 13 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") 14 | echo "Visit http://127.0.0.1:8080 to use your application" 15 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/hello-world/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "hello-world.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "hello-world.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "hello-world.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "hello-world.labels" -}} 37 | helm.sh/chart: {{ include "hello-world.chart" . }} 38 | {{ include "hello-world.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "hello-world.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "hello-world.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "hello-world.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "hello-world.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/hello-world/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "hello-world.fullname" . }} 5 | labels: 6 | {{- include "hello-world.labels" . | nindent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | {{- include "hello-world.selectorLabels" . | nindent 6 }} 12 | template: 13 | metadata: 14 | labels: 15 | {{- include "hello-world.selectorLabels" . | nindent 8 }} 16 | spec: 17 | serviceAccountName: {{ include "hello-world.serviceAccountName" . }} 18 | containers: 19 | - name: {{ .Chart.Name }} 20 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 21 | imagePullPolicy: {{ .Values.image.pullPolicy }} 22 | ports: 23 | - name: http 24 | containerPort: 80 25 | protocol: TCP 26 | livenessProbe: 27 | httpGet: 28 | path: / 29 | port: http 30 | readinessProbe: 31 | httpGet: 32 | path: / 33 | port: http 34 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/hello-world/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "hello-world.fullname" . }} 5 | labels: 6 | {{- include "hello-world.labels" . | nindent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | selector: 15 | {{- include "hello-world.selectorLabels" . | nindent 4 }} 16 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/hello-world/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "hello-world.serviceAccountName" . }} 6 | labels: 7 | {{- include "hello-world.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/hello-world/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for hello-world. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: nginx 9 | pullPolicy: IfNotPresent 10 | # Overrides the image tag whose default is the chart appVersion. 11 | tag: "" 12 | 13 | nameOverride: "" 14 | fullnameOverride: "" 15 | 16 | serviceAccount: 17 | # Specifies whether a service account should be created 18 | create: true 19 | # Annotations to add to the service account 20 | annotations: {} 21 | # The name of the service account to use. 22 | # If not set and create is true, a name is generated using the fullname template 23 | name: "" 24 | 25 | service: 26 | type: ClusterIP 27 | port: 80 28 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/secret-helm/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/secret-helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: secret-helm 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.16.0" 25 | -------------------------------------------------------------------------------- /examples/argocd-repo/helm/secret-helm/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: secret-helm 5 | type: Opaque 6 | data: 7 | secret1: {{ .Values.secret1 | b64enc }} 8 | {{- if .Values.secret2 }} 9 | secret2: {{ .Values.secret2 | b64enc }} 10 | {{- end }} 11 | {{- if .Values.secret3 }} 12 | secret3: {{ .Values.secret3 | b64enc }} 13 | {{- end }} -------------------------------------------------------------------------------- /examples/argocd-repo/helm/secret-helm/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for secret-helm. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | secret1: 6 | secret2: "" 7 | secret3: "" 8 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Kubic 2 | site_url: https://openfun.github.io/kubic/ 3 | site_description: A Kubernetes deployment as code to OVH or Scaleway, with Prometheus, ArgoCD, Velero and Hashicorp Vault 4 | site_author: Open FUN (France Universite Numerique) 5 | 6 | repo_name: openfun/kubic 7 | repo_url: https://github.com/openfun/kubic/ 8 | 9 | copyright: 2023-present France Université Numérique 10 | 11 | theme: 12 | name: material 13 | palette: 14 | primary: deep purple 15 | 16 | nav: 17 | - Getting started: "index.md" 18 | - Deployments: 19 | - Manual: "cluster-manual.md" 20 | - Automatic: "cluster-auto.md" 21 | - Hashicorp Vault: "hashicorp-vault.md" 22 | - ArgoCD: "argocd.md" 23 | - Velero: "velero.md" 24 | - Standalone: "standalone.md" 25 | 26 | plugins: 27 | - search 28 | -------------------------------------------------------------------------------- /ovh/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/gavinbunney/kubectl" { 5 | version = "1.14.0" 6 | constraints = "~> 1.14.0" 7 | hashes = [ 8 | "h1:ItrWfCZMzM2JmvDncihBMalNLutsAk7kyyxVRaipftY=", 9 | "h1:gLFn+RvP37sVzp9qnFCwngRjjFV649r6apjxvJ1E/SE=", 10 | "h1:mX2AOFIMIxJmW5kM8DT51gloIOKCr9iT6W8yodnUyfs=", 11 | "zh:0350f3122ff711984bbc36f6093c1fe19043173fad5a904bce27f86afe3cc858", 12 | "zh:07ca36c7aa7533e8325b38232c77c04d6ef1081cb0bac9d56e8ccd51f12f2030", 13 | "zh:0c351afd91d9e994a71fe64bbd1662d0024006b3493bb61d46c23ea3e42a7cf5", 14 | "zh:39f1a0aa1d589a7e815b62b5aa11041040903b061672c4cfc7de38622866cbc4", 15 | "zh:428d3a321043b78e23c91a8d641f2d08d6b97f74c195c654f04d2c455e017de5", 16 | "zh:4baf5b1de2dfe9968cc0f57fd4be5a741deb5b34ee0989519267697af5f3eee5", 17 | "zh:6131a927f9dffa014ab5ca5364ac965fe9b19830d2bbf916a5b2865b956fdfcf", 18 | "zh:c62e0c9fd052cbf68c5c2612af4f6408c61c7e37b615dc347918d2442dd05e93", 19 | "zh:f0beffd7ce78f49ead612e4b1aefb7cb6a461d040428f514f4f9cc4e5698ac65", 20 | ] 21 | } 22 | 23 | provider "registry.terraform.io/hashicorp/helm" { 24 | version = "2.12.1" 25 | constraints = "~> 2.12.0" 26 | hashes = [ 27 | "h1:7wfYOAeSEchHB8idNl+2jf+OkFi9zFSOLWkEZFuTCik=", 28 | "h1:E2QwIJNtRF/ePP/3Jr5ur/SJRSPqljzUCJDfQGLgMq4=", 29 | "h1:HGilqNGo1KArhsJ9QJDenXhTmOlnA4t9RVQUeEFxi/4=", 30 | "h1:OABttY6YdBeCNK5xNuVxCK7sJtneHnHxCYvYNEeNSqE=", 31 | "h1:aBfcqM4cbywa7TAxfT1YoFS+Cst9waerlm4XErFmJlk=", 32 | "h1:fBTUyEb263IVI8j2aBbioEBjQ4jnnKo4MM3Y0fLPYsk=", 33 | "h1:iPGKXwn/PxspdFvbo3TnaK7W2YKhIzP7t9THn5ZPJQk=", 34 | "h1:pEDM3vsn4FOExaGacenYfyLhV9m3I+PGwITjFDg5rko=", 35 | "h1:sgYI7lwGqJqPopY3NGmhb1eQ0YbH8PIXaAZAmnJrAvw=", 36 | "h1:sjzfyNQAjtF9zXHxB67geryjGkHaPDMMVw9iqPP5pkE=", 37 | "h1:xwHVa6ab/XVfDrZ3h35OzLJ6g0Zte4VAvSnyKw3f9AI=", 38 | "zh:1d623fb1662703f2feb7860e3c795d849c77640eecbc5a776784d08807b15004", 39 | "zh:253a5bc62ba2c4314875139e3fbd2feaad5ef6b0fb420302a474ab49e8e51a38", 40 | "zh:282358f4ad4f20d0ccaab670b8645228bfad1c03ac0d0df5889f0aea8aeac01a", 41 | "zh:4fd06af3091a382b3f0d8f0a60880f59640d2b6d9d6a31f9a873c6f1bde1ec50", 42 | "zh:6816976b1830f5629ae279569175e88b497abbbac30ee809948a1f923c67a80d", 43 | "zh:7d82c4150cdbf48cfeec867be94c7b9bd7682474d4df0ebb7e24e148f964844f", 44 | "zh:83f062049eea2513118a4c6054fb06c8600bac96196f25aed2cc21898ec86e93", 45 | "zh:a79eec0cf4c08fca79e44033ec6e470f25ff23c3e2c7f9bc707ed7771c1072c0", 46 | "zh:b2b2d904b2821a6e579910320605bc478bbef063579a23fbfdd6fcb5871b81f8", 47 | "zh:e91177ca06a15487fc570cb81ecef6359aa399459ea2aa7c4f7367ba86f6fcad", 48 | "zh:e976bcb82996fc4968f8382bbcb6673efb1f586bf92074058a232028d97825b1", 49 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 50 | ] 51 | } 52 | 53 | provider "registry.terraform.io/hashicorp/kubernetes" { 54 | version = "2.26.0" 55 | constraints = "~> 2.26.0" 56 | hashes = [ 57 | "h1:+Vny6dC1+K6fa+23qF2bh+9pjOCh/zo+EPkry7SL+0g=", 58 | "h1:0MjgW/qkJH4udWwTiSpjqA8AmEjVwrQzpkV2bu8+LhY=", 59 | "h1:1FzOq58nk7VxtO94l0Q3L1NmvzGqrRBpUc6UKv4fyqY=", 60 | "h1:HG0vDDKiBPRJK/2p5EvrON0H3nCl8tW7yDZ7WPt3Zq8=", 61 | "h1:LxZk5Vc0TnfeLYnp7HXZui53PEV+gFd+mznBzdNm+po=", 62 | "h1:MMxX/EY9AEGwp5DbGQ+LTd3c9YmjwrnPJHLlyc9u0eU=", 63 | "h1:cqXQ+gBtpLOObtuxc7JK3zStu6jR1wzzGDe5AuDsIrg=", 64 | "h1:h+KamkYSY9zaq6qtmb4y7iqegTjZ0z8GZ54lm7vKJMg=", 65 | "h1:lq3nuBjqNrRXtdhAmyhMM579BRZwhzW9W+LC5jinP4c=", 66 | "h1:vTbi/tiJQS8Wto3LLxZ/WWPcptqaMpQlT33s61WTV9Q=", 67 | "h1:wSFDvzim4kD1iieFFuQJ+cd/TqmpHJTomnK4ktj1mrw=", 68 | "zh:3f8ee1bffab1ba4f6ae549daae1648974214880d3606b6821cb0aceb365284a4", 69 | "zh:5596b1248231cc3b8f6a98f5b78df7120cd3153fd2b34b369dc20356a75bf35b", 70 | "zh:64420c9e4aa49c5e443afcd60f3e8d293ea6bd78797d402e21e23605f7757954", 71 | "zh:8327a488854e15f8d7eaf8272c3b9d6d1d9a6e68212a8dcb111d7b4023aac6b5", 72 | "zh:94c1c9b65280847d28a3e90e5046650858ac0bf87feefd2349336444e21e68e8", 73 | "zh:a3fb0b0b4bfd1844bb94011ae80111cedc188085235cf466313ca2151e75c8ca", 74 | "zh:ab5e381928144e0c2a9d9768a48e38797642e5c5fb2184370c7c08df500e5db3", 75 | "zh:da78995e8d6daf3acfd4c455ebbd12f6bf154cadf455f14ef35c0862e58dd2ec", 76 | "zh:e24cdd5b90196df93215f40d821af3a7b4473c53992be4c3038940d117a50eb4", 77 | "zh:e632efb3bce6d089b7c08507660af8b2c5e3f94c34fe401bfa228f154405e26e", 78 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 79 | "zh:f5aea9da0eba25d35fee49db193c4b44cd3746a5578065092c62a53077e50b84", 80 | ] 81 | } 82 | 83 | provider "registry.terraform.io/hashicorp/null" { 84 | version = "3.2.1" 85 | hashes = [ 86 | "h1:FbGfc+muBsC17Ohy5g806iuI1hQc4SIexpYCrQHQd8w=", 87 | "h1:tSj1mL6OQ8ILGqR2mDu7OYYYWf+hoir0pf9KAQ8IzO8=", 88 | "h1:ydA0/SNRVB1o95btfshvYsmxA+jZFRZcvKzZSB+4S1M=", 89 | "zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840", 90 | "zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb", 91 | "zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5", 92 | "zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3", 93 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 94 | "zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238", 95 | "zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc", 96 | "zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970", 97 | "zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2", 98 | "zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5", 99 | "zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f", 100 | "zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694", 101 | ] 102 | } 103 | 104 | provider "registry.terraform.io/hashicorp/tls" { 105 | version = "4.0.4" 106 | hashes = [ 107 | "h1:GZcFizg5ZT2VrpwvxGBHQ/hO9r6g0vYdQqx3bFD3anY=", 108 | "h1:Wd3RqmQW60k2QWPN4sK5CtjGuO1d+CRNXgC+D4rKtXc=", 109 | "h1:pe9vq86dZZKCm+8k1RhzARwENslF3SXb9ErHbQfgjXU=", 110 | "zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55", 111 | "zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848", 112 | "zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be", 113 | "zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5", 114 | "zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe", 115 | "zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e", 116 | "zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48", 117 | "zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8", 118 | "zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60", 119 | "zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e", 120 | "zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316", 121 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 122 | ] 123 | } 124 | 125 | provider "registry.terraform.io/ovh/ovh" { 126 | version = "0.34.0" 127 | constraints = "~> 0.34.0" 128 | hashes = [ 129 | "h1:6OoAOkhmwTAfwJ/VsB0CaYHX0h3PLe7KSVgrG5dwlkE=", 130 | "h1:9L+ybN4n8LJ3mko3FQKob1AsFAQ2tmlgXE0S4XwZeTs=", 131 | "h1:E5u8Fr+1guCxkyeM1uc0BFA2+pcxz9Dlg5eRaDB+7+I=", 132 | "h1:FUIC8w9zXLq+FNYal7v3dSJeeEWdNTW1zyzzthqRRN8=", 133 | "h1:N8MZPCfc6ULXtQwbNBc4tk98qSMkmtfwPfnuQK0nRUM=", 134 | "h1:R/xzm4e8sUCzI2MJod0E0JPpwVEkm4DmNoxgJTMAbn8=", 135 | "h1:WbC0rsOT9B731cJiBcI7XSknitdficD+Bsc5lU8C80g=", 136 | "h1:clrbGr1KvFmXtInwGMRhWv+xJw5R8TffUFnJhGMZmRM=", 137 | "h1:g5xFbjYvkcc987hh8ZhsWTcUutvR1t0YRFXoT6vwtGE=", 138 | "h1:kq5IVpK8CyqpZk71kVmoj4KSc22U2S9Y+0EIJL+1vsE=", 139 | "h1:sQW+xVAca6PP3Zm4qJuJYVpPlEfhCekoKYf/XIZFi/Y=", 140 | "h1:uT79Tz+G9wb9RzWDt8q/tGIvoYU1H7acoAIcPcfi7CE=", 141 | "h1:vatrn1vnc9D6Y7MZ2ZeOvgnWrTa+WNQD4V9kJlHUKU4=", 142 | ] 143 | } 144 | -------------------------------------------------------------------------------- /ovh/argocd-apps-values.yaml.tftpl: -------------------------------------------------------------------------------- 1 | ../common/argocd-apps-values.yaml.tftpl -------------------------------------------------------------------------------- /ovh/argocd-values.yaml.tftpl: -------------------------------------------------------------------------------- 1 | ../common/argocd-values.yaml.tftpl -------------------------------------------------------------------------------- /ovh/argocd.tf: -------------------------------------------------------------------------------- 1 | ../common/argocd.tf -------------------------------------------------------------------------------- /ovh/backend.conf.example: -------------------------------------------------------------------------------- 1 | bucket = "" 2 | key = "terraform-ovh.tfstate" 3 | region = "gra" # The region of your bucket, default is gra for OVHcloud 4 | access_key = "" 5 | secret_key = "" 6 | endpoint = "https://s3.gra.io.cloud.ovh.net" # The endpoint of your bucket, default is https://s3.gra.io.cloud.ovh.net for OVHcloud 7 | skip_region_validation = true # AWS does not recognize GRA or SBG as valid regions 8 | skip_credentials_validation = true # OVH does not owns a STS service 9 | -------------------------------------------------------------------------------- /ovh/cert-manager.tf: -------------------------------------------------------------------------------- 1 | ../common/cert-manager.tf -------------------------------------------------------------------------------- /ovh/credentials.auto.tfvars.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "application_key": "", 3 | "application_secret": "", 4 | "consumer_key": "" 5 | } -------------------------------------------------------------------------------- /ovh/hashicorp-vault.tf: -------------------------------------------------------------------------------- 1 | ../common/hashicorp-vault.tf -------------------------------------------------------------------------------- /ovh/ingress-nginx.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "ingress-nginx" { 2 | name = "ingress-nginx" 3 | namespace = "ingress-nginx" 4 | create_namespace = true 5 | 6 | repository = "https://kubernetes.github.io/ingress-nginx" 7 | chart = "ingress-nginx" 8 | 9 | set { 10 | name = "controller.service.type" 11 | value = "LoadBalancer" 12 | } 13 | 14 | set { 15 | name = "controller.metrics.enabled" 16 | value = "true" 17 | } 18 | 19 | set { 20 | name = "controller.metrics.serviceMonitor.enabled" 21 | value = "true" 22 | } 23 | 24 | set { 25 | name = "controller.metrics.serviceMonitor.additionalLabels.release" 26 | value = "prometheus" 27 | } 28 | 29 | set { 30 | name = "controller.extraArgs.enable-ssl-passthrough" 31 | value = "true" 32 | } 33 | depends_on = [ 34 | helm_release.kube-prometheus, 35 | helm_release.cert_manager 36 | ] 37 | } 38 | 39 | data "kubernetes_service" "ingress-svc" { 40 | metadata { 41 | name = "ingress-nginx-controller" 42 | namespace = helm_release.ingress-nginx.namespace 43 | } 44 | depends_on = [ 45 | helm_release.ingress-nginx 46 | ] 47 | } 48 | 49 | output "ingress_ip" { 50 | value = data.kubernetes_service.ingress-svc.status.0.load_balancer.0.ingress.0.ip 51 | description = "Address of the loadbalancer" 52 | } 53 | 54 | resource "null_resource" "ingress-nginx" { 55 | depends_on = [ 56 | ovh_cloud_project_kube_nodepool.pool 57 | ] 58 | } 59 | 60 | -------------------------------------------------------------------------------- /ovh/issuer.yml.tftpl: -------------------------------------------------------------------------------- 1 | ../common/issuer.yml.tftpl -------------------------------------------------------------------------------- /ovh/kubernetes.tf: -------------------------------------------------------------------------------- 1 | resource "ovh_cloud_project_kube" "cluster" { 2 | service_name = var.ovh_public_cloud_project_id 3 | name = var.k8s_cluster_name 4 | region = var.k8s_cluster_region 5 | version = var.k8s_cluster_version 6 | } 7 | 8 | resource "ovh_cloud_project_kube_nodepool" "pool" { 9 | service_name = var.ovh_public_cloud_project_id 10 | kube_id = ovh_cloud_project_kube.cluster.id 11 | name = var.k8s_nodepool_name 12 | flavor_name = var.k8s_nodepool_flavor 13 | monthly_billed = var.k8s_nodepool_monthly_billed 14 | min_nodes = var.k8s_nodepool_min_nodes 15 | max_nodes = var.k8s_nodepool_max_nodes 16 | desired_nodes = var.k8s_nodepool_desired_nodes 17 | autoscale = var.k8s_nodepool_autoscale 18 | 19 | timeouts { 20 | create = "30m" 21 | update = "30m" 22 | delete = "30m" 23 | } 24 | } 25 | 26 | output "kubeconfig" { 27 | value = ovh_cloud_project_kube.cluster.kubeconfig 28 | description = "The kubeconfig to access the cluster" 29 | sensitive = true 30 | } 31 | 32 | output "nodesurl" { 33 | description = "The URL to access the cluster nodes" 34 | value = ovh_cloud_project_kube.cluster.nodes_url 35 | } 36 | 37 | output "url" { 38 | value = ovh_cloud_project_kube.cluster.url 39 | description = "The URL to access the cluster" 40 | } 41 | -------------------------------------------------------------------------------- /ovh/locals.tf: -------------------------------------------------------------------------------- 1 | ../common/locals.tf -------------------------------------------------------------------------------- /ovh/loki-values.yml: -------------------------------------------------------------------------------- 1 | ../common/loki-values.yml -------------------------------------------------------------------------------- /ovh/loki.tf: -------------------------------------------------------------------------------- 1 | ../common/loki.tf -------------------------------------------------------------------------------- /ovh/prom-grafana-values.yml: -------------------------------------------------------------------------------- 1 | ../common/prom-grafana-values.yml -------------------------------------------------------------------------------- /ovh/prometheus-grafana.tf: -------------------------------------------------------------------------------- 1 | ../common/prometheus-grafana.tf -------------------------------------------------------------------------------- /ovh/promtail-values.yml: -------------------------------------------------------------------------------- 1 | ../common/promtail-values.yml -------------------------------------------------------------------------------- /ovh/promtail.tf: -------------------------------------------------------------------------------- 1 | ../common/promtail.tf -------------------------------------------------------------------------------- /ovh/terraform.tf: -------------------------------------------------------------------------------- 1 | # This Terraform project is used to create a Kubernetes cluster on OVHcloud, along with an ingress controller and a default node pool. 2 | 3 | terraform { 4 | 5 | backend "s3" { 6 | } 7 | required_providers { 8 | ovh = { 9 | source = "ovh/ovh" 10 | version = "~> 0.37.0" 11 | } 12 | helm = { 13 | source = "hashicorp/helm" 14 | version = "~> 2.12.0" 15 | } 16 | kubectl = { 17 | source = "gavinbunney/kubectl" 18 | version = "~> 1.14.0" 19 | } 20 | kubernetes = { 21 | source = "hashicorp/kubernetes" 22 | version = "~> 2.26.0" 23 | } 24 | } 25 | } 26 | 27 | provider "ovh" { 28 | endpoint = "ovh-eu" 29 | application_key = var.application_key 30 | application_secret = var.application_secret 31 | consumer_key = var.consumer_key 32 | } 33 | 34 | provider "helm" { 35 | kubernetes { 36 | host = ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].host 37 | client_certificate = base64decode(ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].client_certificate) 38 | client_key = base64decode(ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].client_key) 39 | cluster_ca_certificate = base64decode(ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].cluster_ca_certificate) 40 | } 41 | } 42 | provider "kubectl" { 43 | host = ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].host 44 | client_certificate = base64decode(ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].client_certificate) 45 | client_key = base64decode(ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].client_key) 46 | cluster_ca_certificate = base64decode(ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].cluster_ca_certificate) 47 | load_config_file = false 48 | } 49 | provider "kubernetes" { 50 | host = ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].host 51 | client_certificate = base64decode(ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].client_certificate) 52 | client_key = base64decode(ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].client_key) 53 | cluster_ca_certificate = base64decode(ovh_cloud_project_kube.cluster.kubeconfig_attributes[0].cluster_ca_certificate) 54 | } 55 | -------------------------------------------------------------------------------- /ovh/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | ######################################################################################### 2 | # # 3 | # This file contains example values to help during manual configuration # 4 | # # 5 | ######################################################################################### 6 | 7 | application_key = "a73f55cccbff3fdb" 8 | application_secret = "3cfec173af31c5686298c0c149b3b4ad" 9 | consumer_key = "359f134dfafaa3894b75c212113f7705" 10 | ovh_public_cloud_project_id = "2b33cd4c4eb3c1c7d8e9a95541aef0dd" 11 | k8s_cluster_name = "my_cluster" 12 | k8s_cluster_region = "SBG5" 13 | k8s_cluster_version = "1.26" 14 | k8s_nodepool_name = "default-pool" 15 | k8s_nodepool_flavor = "d2-8" 16 | k8s_nodepool_monthly_billed = false 17 | k8s_nodepool_min_nodes = "2" 18 | k8s_nodepool_max_nodes = "10" 19 | argocd_hostname = "argocd.kubic.example" 20 | argocd_password = "$2a$10$QAlYJhAr2QMETwDeqyEKReXttl3P1BUbMvoukKCL9nZwauYbg0mN6" 21 | argocd_repo_url = "https://github.com/example/argocd.git" 22 | argocd_repo_username = "argocd" 23 | argocd_repo_password = "access_token" 24 | main_cluster_issuer_name = "letsencrypt-prod" 25 | issuers = [ 26 | { 27 | name = "letsencrypt-prod" 28 | server = "https://acme-v02.api.letsencrypt.org/directory" 29 | email = "yourname@example.com", 30 | private_key_secret_name = "letsencrypt-prod" 31 | }, { 32 | name = "letsencrypt-staging" 33 | server = "https://acme-staging-v02.api.letsencrypt.org/directory" 34 | email = "yourname+1@example.com" 35 | private_key_secret_name = "letsencrypt-staging" 36 | } 37 | ] 38 | grafana_hostname = "grafana.kubic.example" 39 | grafana_admin_password = "change_me" 40 | grafana_persistence_enabled = true 41 | grafana_persistence_size = "10Gi" 42 | prometheus_persistence_enabled = true 43 | prometheus_persistence_size = "20Gi" 44 | vault_server_hostname = "vault.kubic.example" 45 | install_hashicorp_vault = true 46 | vault_ui = true 47 | velero_version = "4.0.2" 48 | velero_s3_bucket_name = "velero_bucket" 49 | velero_s3_bucket_region = "gra" 50 | velero_s3_bucket_endpoint = "https://s3.gra.io.cloud.ovh.net" 51 | velero_s3_access_key_id = "acb2c7815132b7157b881b83a002b3f" 52 | velero_s3_secret_access_key = "2d0f550387fa8a2073f3a8fc2062e28" 53 | velero_default_volumes_to_fs_backup = false 54 | loki_enabled = "true" 55 | loki_s3_chunks_bucket_name = "my_cluster-loki-chunks" 56 | loki_s3_ruler_bucket_name = "my_cluster-loki-ruler" 57 | loki_s3_admin_bucket_name = "my_cluster-loki-admin" 58 | loki_s3_bucket_region = "gra" 59 | loki_s3_bucket_endpoint = "https://s3.gra.io.cloud.ovh.net/" 60 | loki_s3_access_key_id = "ebc294815132b7157b881b83a009f5ac" 61 | loki_s3_secret_access_key = "2d0f550387fa8a2073f3a8fc29e4ab6" 62 | -------------------------------------------------------------------------------- /ovh/terraform.tfvars.template: -------------------------------------------------------------------------------- 1 | application_key="" 2 | application_secret="" 3 | consumer_key="" 4 | ovh_public_cloud_project_id="" 5 | k8s_cluster_name="" 6 | k8s_cluster_region="" 7 | k8s_cluster_version="" 8 | k8s_nodepool_name="" 9 | k8s_nodepool_flavor="" 10 | k8s_nodepool_monthly_billed="" 11 | k8s_nodepool_min_nodes="" 12 | k8s_nodepool_max_nodes="" 13 | argocd_hostname="" 14 | argocd_password="" 15 | argocd_repo_url="" 16 | argocd_repo_username="" 17 | argocd_repo_password="" 18 | main_cluster_issuer_name="" 19 | issuers = [ 20 | { 21 | name = "letsencrypt-prod" 22 | server = "https://acme-v02.api.letsencrypt.org/directory" 23 | email = "", 24 | private_key_secret_name = "letsencrypt-prod" 25 | }, { 26 | name = "letsencrypt-staging" 27 | server = "https://acme-staging-v02.api.letsencrypt.org/directory" 28 | email = "" 29 | private_key_secret_name = "letsencrypt-staging" 30 | } 31 | ] 32 | grafana_hostname="" 33 | grafana_admin_password="" 34 | grafana_persistence_enabled = 35 | grafana_persistence_size = "" 36 | prometheus_persistence_enabled = 37 | prometheus_persistence_size = "" 38 | vault_server_hostname="" 39 | install_hashicorp_vault=true 40 | vault_ui=true 41 | velero_version="" 42 | velero_s3_bucket_name="" 43 | velero_s3_bucket_region="" 44 | velero_s3_bucket_endpoint="" 45 | velero_s3_access_key_id="" 46 | velero_s3_secret_access_key="" 47 | velero_default_volumes_to_fs_backup=false 48 | loki_enabled = "" 49 | loki_s3_chunks_bucket_name = "" 50 | loki_s3_ruler_bucket_name = "" 51 | loki_s3_admin_bucket_name = "" 52 | loki_s3_bucket_region = "" 53 | loki_s3_bucket_endpoint = "" 54 | loki_s3_access_key_id = "" 55 | loki_s3_secret_access_key = "" 56 | -------------------------------------------------------------------------------- /ovh/tls-vault.tf: -------------------------------------------------------------------------------- 1 | ../common/tls-vault.tf -------------------------------------------------------------------------------- /ovh/variables-common.tf: -------------------------------------------------------------------------------- 1 | ../common/variables.tf -------------------------------------------------------------------------------- /ovh/variables.tf: -------------------------------------------------------------------------------- 1 | variable "application_key" { 2 | type = string 3 | description = "The application key to use for OVH API calls" 4 | sensitive = true 5 | } 6 | 7 | variable "application_secret" { 8 | type = string 9 | description = "The application secret to use for OVH API calls" 10 | sensitive = true 11 | } 12 | 13 | variable "consumer_key" { 14 | type = string 15 | description = "The consumer key to use for OVH API calls" 16 | sensitive = true 17 | } 18 | 19 | variable "ovh_public_cloud_project_id" { 20 | type = string 21 | description = "The OVH public cloud project id" 22 | } 23 | 24 | variable "k8s_cluster_name" { 25 | type = string 26 | description = "The name to use for the cluster" 27 | default = "my_cluster" 28 | } 29 | 30 | variable "k8s_cluster_region" { 31 | type = string 32 | description = "The region to use for the cluster" 33 | default = "SBG5" 34 | } 35 | 36 | variable "k8s_cluster_version" { 37 | type = string 38 | description = "The version to use for the cluster" 39 | default = "1.26" 40 | } 41 | 42 | variable "k8s_nodepool_name" { 43 | type = string 44 | description = "The name to use for the nodepool" 45 | default = "default-pool" 46 | } 47 | 48 | variable "k8s_nodepool_flavor" { 49 | type = string 50 | description = "The flavor to use for the nodepool" 51 | default = "d2-8" 52 | } 53 | 54 | variable "k8s_nodepool_monthly_billed" { 55 | type = bool 56 | description = "Whether the nodepool should be billed monthly or hourly" 57 | default = false 58 | } 59 | 60 | variable "k8s_nodepool_min_nodes" { 61 | type = number 62 | description = "The minimum number of nodes to use for the nodepool" 63 | default = 2 64 | } 65 | 66 | variable "k8s_nodepool_max_nodes" { 67 | type = number 68 | description = "The maximum number of nodes to use for the nodepool" 69 | default = 10 70 | } 71 | 72 | variable "k8s_nodepool_desired_nodes" { 73 | type = number 74 | description = "The desired number of nodes to use for the nodepool" 75 | default = 2 76 | } 77 | 78 | variable "k8s_nodepool_autoscale" { 79 | type = bool 80 | description = "Enable autoscaling feature (WIP)" 81 | default = false 82 | } 83 | -------------------------------------------------------------------------------- /ovh/vault-values.yml: -------------------------------------------------------------------------------- 1 | ../common/vault-values.yml -------------------------------------------------------------------------------- /ovh/velero-credentials: -------------------------------------------------------------------------------- 1 | ../common/velero-credentials -------------------------------------------------------------------------------- /ovh/velero-values.yml: -------------------------------------------------------------------------------- 1 | ../common/velero-values.yml -------------------------------------------------------------------------------- /ovh/velero.tf: -------------------------------------------------------------------------------- 1 | ../common/velero.tf -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /scaleway/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/gavinbunney/kubectl" { 5 | version = "1.14.0" 6 | constraints = "~> 1.14.0" 7 | hashes = [ 8 | "h1:ItrWfCZMzM2JmvDncihBMalNLutsAk7kyyxVRaipftY=", 9 | "h1:gLFn+RvP37sVzp9qnFCwngRjjFV649r6apjxvJ1E/SE=", 10 | "h1:mX2AOFIMIxJmW5kM8DT51gloIOKCr9iT6W8yodnUyfs=", 11 | "zh:0350f3122ff711984bbc36f6093c1fe19043173fad5a904bce27f86afe3cc858", 12 | "zh:07ca36c7aa7533e8325b38232c77c04d6ef1081cb0bac9d56e8ccd51f12f2030", 13 | "zh:0c351afd91d9e994a71fe64bbd1662d0024006b3493bb61d46c23ea3e42a7cf5", 14 | "zh:39f1a0aa1d589a7e815b62b5aa11041040903b061672c4cfc7de38622866cbc4", 15 | "zh:428d3a321043b78e23c91a8d641f2d08d6b97f74c195c654f04d2c455e017de5", 16 | "zh:4baf5b1de2dfe9968cc0f57fd4be5a741deb5b34ee0989519267697af5f3eee5", 17 | "zh:6131a927f9dffa014ab5ca5364ac965fe9b19830d2bbf916a5b2865b956fdfcf", 18 | "zh:c62e0c9fd052cbf68c5c2612af4f6408c61c7e37b615dc347918d2442dd05e93", 19 | "zh:f0beffd7ce78f49ead612e4b1aefb7cb6a461d040428f514f4f9cc4e5698ac65", 20 | ] 21 | } 22 | 23 | provider "registry.terraform.io/hashicorp/helm" { 24 | version = "2.12.1" 25 | constraints = "~> 2.12.0" 26 | hashes = [ 27 | "h1:7wfYOAeSEchHB8idNl+2jf+OkFi9zFSOLWkEZFuTCik=", 28 | "h1:E2QwIJNtRF/ePP/3Jr5ur/SJRSPqljzUCJDfQGLgMq4=", 29 | "h1:HGilqNGo1KArhsJ9QJDenXhTmOlnA4t9RVQUeEFxi/4=", 30 | "h1:OABttY6YdBeCNK5xNuVxCK7sJtneHnHxCYvYNEeNSqE=", 31 | "h1:aBfcqM4cbywa7TAxfT1YoFS+Cst9waerlm4XErFmJlk=", 32 | "h1:fBTUyEb263IVI8j2aBbioEBjQ4jnnKo4MM3Y0fLPYsk=", 33 | "h1:iPGKXwn/PxspdFvbo3TnaK7W2YKhIzP7t9THn5ZPJQk=", 34 | "h1:pEDM3vsn4FOExaGacenYfyLhV9m3I+PGwITjFDg5rko=", 35 | "h1:sgYI7lwGqJqPopY3NGmhb1eQ0YbH8PIXaAZAmnJrAvw=", 36 | "h1:sjzfyNQAjtF9zXHxB67geryjGkHaPDMMVw9iqPP5pkE=", 37 | "h1:xwHVa6ab/XVfDrZ3h35OzLJ6g0Zte4VAvSnyKw3f9AI=", 38 | "zh:1d623fb1662703f2feb7860e3c795d849c77640eecbc5a776784d08807b15004", 39 | "zh:253a5bc62ba2c4314875139e3fbd2feaad5ef6b0fb420302a474ab49e8e51a38", 40 | "zh:282358f4ad4f20d0ccaab670b8645228bfad1c03ac0d0df5889f0aea8aeac01a", 41 | "zh:4fd06af3091a382b3f0d8f0a60880f59640d2b6d9d6a31f9a873c6f1bde1ec50", 42 | "zh:6816976b1830f5629ae279569175e88b497abbbac30ee809948a1f923c67a80d", 43 | "zh:7d82c4150cdbf48cfeec867be94c7b9bd7682474d4df0ebb7e24e148f964844f", 44 | "zh:83f062049eea2513118a4c6054fb06c8600bac96196f25aed2cc21898ec86e93", 45 | "zh:a79eec0cf4c08fca79e44033ec6e470f25ff23c3e2c7f9bc707ed7771c1072c0", 46 | "zh:b2b2d904b2821a6e579910320605bc478bbef063579a23fbfdd6fcb5871b81f8", 47 | "zh:e91177ca06a15487fc570cb81ecef6359aa399459ea2aa7c4f7367ba86f6fcad", 48 | "zh:e976bcb82996fc4968f8382bbcb6673efb1f586bf92074058a232028d97825b1", 49 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 50 | ] 51 | } 52 | 53 | provider "registry.terraform.io/hashicorp/kubernetes" { 54 | version = "2.26.0" 55 | constraints = "~> 2.26.0" 56 | hashes = [ 57 | "h1:+Vny6dC1+K6fa+23qF2bh+9pjOCh/zo+EPkry7SL+0g=", 58 | "h1:0MjgW/qkJH4udWwTiSpjqA8AmEjVwrQzpkV2bu8+LhY=", 59 | "h1:1FzOq58nk7VxtO94l0Q3L1NmvzGqrRBpUc6UKv4fyqY=", 60 | "h1:HG0vDDKiBPRJK/2p5EvrON0H3nCl8tW7yDZ7WPt3Zq8=", 61 | "h1:LxZk5Vc0TnfeLYnp7HXZui53PEV+gFd+mznBzdNm+po=", 62 | "h1:MMxX/EY9AEGwp5DbGQ+LTd3c9YmjwrnPJHLlyc9u0eU=", 63 | "h1:cqXQ+gBtpLOObtuxc7JK3zStu6jR1wzzGDe5AuDsIrg=", 64 | "h1:h+KamkYSY9zaq6qtmb4y7iqegTjZ0z8GZ54lm7vKJMg=", 65 | "h1:lq3nuBjqNrRXtdhAmyhMM579BRZwhzW9W+LC5jinP4c=", 66 | "h1:vTbi/tiJQS8Wto3LLxZ/WWPcptqaMpQlT33s61WTV9Q=", 67 | "h1:wSFDvzim4kD1iieFFuQJ+cd/TqmpHJTomnK4ktj1mrw=", 68 | "zh:3f8ee1bffab1ba4f6ae549daae1648974214880d3606b6821cb0aceb365284a4", 69 | "zh:5596b1248231cc3b8f6a98f5b78df7120cd3153fd2b34b369dc20356a75bf35b", 70 | "zh:64420c9e4aa49c5e443afcd60f3e8d293ea6bd78797d402e21e23605f7757954", 71 | "zh:8327a488854e15f8d7eaf8272c3b9d6d1d9a6e68212a8dcb111d7b4023aac6b5", 72 | "zh:94c1c9b65280847d28a3e90e5046650858ac0bf87feefd2349336444e21e68e8", 73 | "zh:a3fb0b0b4bfd1844bb94011ae80111cedc188085235cf466313ca2151e75c8ca", 74 | "zh:ab5e381928144e0c2a9d9768a48e38797642e5c5fb2184370c7c08df500e5db3", 75 | "zh:da78995e8d6daf3acfd4c455ebbd12f6bf154cadf455f14ef35c0862e58dd2ec", 76 | "zh:e24cdd5b90196df93215f40d821af3a7b4473c53992be4c3038940d117a50eb4", 77 | "zh:e632efb3bce6d089b7c08507660af8b2c5e3f94c34fe401bfa228f154405e26e", 78 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 79 | "zh:f5aea9da0eba25d35fee49db193c4b44cd3746a5578065092c62a53077e50b84", 80 | ] 81 | } 82 | 83 | provider "registry.terraform.io/hashicorp/null" { 84 | version = "3.2.1" 85 | hashes = [ 86 | "h1:FbGfc+muBsC17Ohy5g806iuI1hQc4SIexpYCrQHQd8w=", 87 | "h1:tSj1mL6OQ8ILGqR2mDu7OYYYWf+hoir0pf9KAQ8IzO8=", 88 | "h1:ydA0/SNRVB1o95btfshvYsmxA+jZFRZcvKzZSB+4S1M=", 89 | "zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840", 90 | "zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb", 91 | "zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5", 92 | "zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3", 93 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 94 | "zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238", 95 | "zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc", 96 | "zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970", 97 | "zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2", 98 | "zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5", 99 | "zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f", 100 | "zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694", 101 | ] 102 | } 103 | 104 | provider "registry.terraform.io/hashicorp/tls" { 105 | version = "4.0.4" 106 | hashes = [ 107 | "h1:GZcFizg5ZT2VrpwvxGBHQ/hO9r6g0vYdQqx3bFD3anY=", 108 | "h1:Wd3RqmQW60k2QWPN4sK5CtjGuO1d+CRNXgC+D4rKtXc=", 109 | "h1:pe9vq86dZZKCm+8k1RhzARwENslF3SXb9ErHbQfgjXU=", 110 | "zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55", 111 | "zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848", 112 | "zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be", 113 | "zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5", 114 | "zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe", 115 | "zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e", 116 | "zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48", 117 | "zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8", 118 | "zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60", 119 | "zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e", 120 | "zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316", 121 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 122 | ] 123 | } 124 | 125 | provider "registry.terraform.io/scaleway/scaleway" { 126 | version = "2.37.0" 127 | constraints = "~> 2.37.0" 128 | hashes = [ 129 | "h1:0VhiSEG/ImxVGM6X+KcUUDgEKZ4SVGJeudHK7WdC8w4=", 130 | "h1:5nWpaBz5aqe1PYzcrTaB90Y6nE1EaN2UoRcdI/ZANZs=", 131 | "h1:677t1iodHvl0GIxyYQJrc/bkq7ikmvrnno+XJbpRxhE=", 132 | "h1:CrlE7zZFWJYDAlRuZR74j28VHhqveRvOy4HUg9PKbrA=", 133 | "h1:KM5MuS2H6fhUzz5o69GtXTjjbxhOum4mASXcyhGFN/g=", 134 | "h1:OaEXjsXfL25F4ij16jJHCwJoxjx6xXn7M9bifZav9xE=", 135 | "h1:Vn6zkmkxTwanF5jlp1OVbzJSGKGaEu1xdtX6RaJQwWs=", 136 | "h1:VoyDduQx9n/HU6l5A0JCHJu+Jwg+OY4Z0jhAAjRhNZY=", 137 | "h1:at9Q7aPEVuSgqCYRaLZ5nve7bafHjiIOi6ySJbSmmnw=", 138 | "h1:eETE+i/slDmyXxR0OoxGONZEhk+dFI4tTD4z+97zTr0=", 139 | "h1:jQzHl9qADhn+WKLT2fuhcLoaJJszmTXiB6+g/H/kHjo=", 140 | "h1:w1SWubtEZ/c3PbMqCWbbJECe3SuNTa4xXe4aY8noK7s=", 141 | "h1:yprVcTsxvYreNzxYdkBBN0qEDzk1VBw6JLsih21gAXE=", 142 | "h1:zn4V1XZtdr4zM0F6Ucv0t7iAJM3o32aDQVerbMLp+Nk=", 143 | "zh:0034c7cfb1a6f2b689cbcb6d24a8ad31bac11bf03a915fba4d0f425c96538f55", 144 | "zh:110182494af10021cf35620776638a43e0845a8a2c2b743daf31be924caa0b2f", 145 | "zh:219f06a69581c68d6171fa3f2bfa61a5071a3cf7ba0875e1c733268d3dcb2640", 146 | "zh:227ec0ca1d5f1d42b5c8fb7a0c2c05c90c9d2d3b51ebdef2132f14be673c1984", 147 | "zh:2eac5b68cc3fab806102e3e38ca613915dc9dc552baca6034243387ceeb74a4f", 148 | "zh:45b0b99050551d1a0f3555f2fbc786f6e960d73f6a08e1ccceaea62d5769d267", 149 | "zh:65dc9de5ed4b77aca5aed329cda9c747d61d2e8381a98e19240376f40edb7ac6", 150 | "zh:89ef9958f89779bcca54a92d838d72a21108357d6802be16b5cdad22ad5dc49c", 151 | "zh:8aa92f43ca86888bb334bec1fb1cf13ecdb5121a2296dde0b4f657f339d09144", 152 | "zh:8d9fda08888dd35b26f557768b35dfdd035149df803e521572cec4499b468fa9", 153 | "zh:afc9f5665fcf89c5e09672fe117f92beec7cab431758c72d28cabfc6299295ed", 154 | "zh:c31895cee41e85adeb500a94c21108e95fddc4946eadd9399c6a1c0ca4a9927d", 155 | "zh:c849cbfc5421a81df944d5930709a78254e9cd7fee38f02a0d7e9324eaad9a0d", 156 | "zh:dc10c1c651c292277e64ed2bf80de332e7eb2a220cc096c1c72a31b924905222", 157 | ] 158 | } 159 | -------------------------------------------------------------------------------- /scaleway/README.md: -------------------------------------------------------------------------------- 1 | # How to test 2 | 3 | It should work to test : 4 | 5 | ```yaml 6 | kind: Deployment 7 | apiVersion: apps/v1 8 | metadata: 9 | name: whoami 10 | labels: 11 | app: myapp 12 | name: whoami 13 | 14 | spec: 15 | replicas: 2 16 | selector: 17 | matchLabels: 18 | app: myapp 19 | task: whoami 20 | template: 21 | metadata: 22 | labels: 23 | app: myapp 24 | task: whoami 25 | spec: 26 | containers: 27 | - name: whoami 28 | image: traefik/whoami 29 | ports: 30 | - containerPort: 80 31 | resources: 32 | 33 | --- 34 | apiVersion: v1 35 | kind: Service 36 | metadata: 37 | name: whoami 38 | 39 | spec: 40 | ports: 41 | - name: http 42 | port: 80 43 | selector: 44 | app: myapp 45 | task: whoami 46 | 47 | --- 48 | apiVersion: networking.k8s.io/v1 49 | kind: Ingress 50 | metadata: 51 | name: whoami-ingress 52 | annotations: 53 | kubernetes.io/ingress.class: nginx 54 | cert-manager.io/issuer: "cert-manager-global" 55 | spec: 56 | tls: 57 | - hosts: 58 | - whoami.scw-tf.fun-plus.fr 59 | secretName: whoami.scw-tf.fun-plus.fr 60 | rules: 61 | - host: whoami.scw-tf.fun-plus.fr 62 | http: 63 | paths: 64 | - pathType: Prefix 65 | path: "/" 66 | backend: 67 | service: 68 | name: whoami 69 | port: 70 | number: 80 71 | 72 | ``` 73 | -------------------------------------------------------------------------------- /scaleway/argocd-apps-values.yaml.tftpl: -------------------------------------------------------------------------------- 1 | ../common/argocd-apps-values.yaml.tftpl -------------------------------------------------------------------------------- /scaleway/argocd-values.yaml.tftpl: -------------------------------------------------------------------------------- 1 | ../common/argocd-values.yaml.tftpl -------------------------------------------------------------------------------- /scaleway/argocd.tf: -------------------------------------------------------------------------------- 1 | ../common/argocd.tf -------------------------------------------------------------------------------- /scaleway/backend.conf.example: -------------------------------------------------------------------------------- 1 | bucket = "" 2 | key = "terraform-scaleway.tfstate" 3 | region = "gra" # The region of your bucket, default is gra for OVHcloud 4 | access_key = "" 5 | secret_key = "" 6 | endpoint = "https://s3.gra.io.cloud.ovh.net" # The endpoint of your bucket, default is https://s3.gra.io.cloud.ovh.net for OVHcloud 7 | skip_region_validation = true # AWS does not recognize GRA or SBG as valid regions 8 | skip_credentials_validation = true # OVH does not owns a STS service 9 | -------------------------------------------------------------------------------- /scaleway/cert-manager.tf: -------------------------------------------------------------------------------- 1 | ../common/cert-manager.tf -------------------------------------------------------------------------------- /scaleway/credentials.auto.tfvars.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "scaleway_access_key": "", 3 | "scaleway_secret_key": "" 4 | } -------------------------------------------------------------------------------- /scaleway/hashicorp-vault.tf: -------------------------------------------------------------------------------- 1 | ../common/hashicorp-vault.tf -------------------------------------------------------------------------------- /scaleway/ingress-nginx.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "scaleway_lb_ip" "nginx_ip" { 3 | zone = "fr-par-1" 4 | project_id = scaleway_k8s_cluster.k8s_cluster.project_id 5 | } 6 | 7 | output "ingress_ip" { 8 | value = scaleway_lb_ip.nginx_ip.ip_address 9 | description = "Address of the loadbalancer" 10 | } 11 | 12 | resource "helm_release" "ingress-nginx" { 13 | name = "ingress-nginx" 14 | namespace = "nginx" 15 | create_namespace = true 16 | 17 | repository = "https://kubernetes.github.io/ingress-nginx" 18 | chart = "ingress-nginx" 19 | 20 | values = [templatefile("${path.module}/nginx-values.yml", { 21 | zone = scaleway_lb_ip.nginx_ip.zone 22 | ip_adress = scaleway_lb_ip.nginx_ip.ip_address 23 | })] 24 | 25 | depends_on = [ 26 | helm_release.kube-prometheus, 27 | helm_release.cert_manager 28 | ] 29 | } 30 | 31 | resource "null_resource" "ingress-nginx" { 32 | depends_on = [ 33 | scaleway_k8s_pool.k8s_pool 34 | ] 35 | } -------------------------------------------------------------------------------- /scaleway/issuer.yml.tftpl: -------------------------------------------------------------------------------- 1 | ../common/issuer.yml.tftpl -------------------------------------------------------------------------------- /scaleway/kubernetes.tf: -------------------------------------------------------------------------------- 1 | resource "scaleway_k8s_cluster" "k8s_cluster" { 2 | name = var.k8s_cluster_name 3 | version = var.k8s_cluster_version 4 | cni = "cilium" 5 | delete_additional_resources = true 6 | } 7 | 8 | resource "scaleway_k8s_pool" "k8s_pool" { 9 | cluster_id = scaleway_k8s_cluster.k8s_cluster.id 10 | name = var.k8s_nodepool_name 11 | node_type = var.k8s_nodepool_flavor 12 | size = var.k8s_nodepool_size 13 | } 14 | 15 | resource "null_resource" "kubeconfig" { 16 | depends_on = [scaleway_k8s_pool.k8s_pool] # at least one pool here 17 | triggers = { 18 | host = scaleway_k8s_cluster.k8s_cluster.kubeconfig[0].host 19 | token = scaleway_k8s_cluster.k8s_cluster.kubeconfig[0].token 20 | cluster_ca_certificate = scaleway_k8s_cluster.k8s_cluster.kubeconfig[0].cluster_ca_certificate 21 | } 22 | } 23 | 24 | output "kubeconfig" { 25 | value = scaleway_k8s_cluster.k8s_cluster.kubeconfig[0].config_file 26 | description = "kubeconfig for kubectl access." 27 | sensitive = true 28 | } -------------------------------------------------------------------------------- /scaleway/locals.tf: -------------------------------------------------------------------------------- 1 | ../common/locals.tf -------------------------------------------------------------------------------- /scaleway/loki-values.yml: -------------------------------------------------------------------------------- 1 | ../common/loki-values.yml -------------------------------------------------------------------------------- /scaleway/loki.tf: -------------------------------------------------------------------------------- 1 | ../common/loki.tf -------------------------------------------------------------------------------- /scaleway/nginx-values.yml: -------------------------------------------------------------------------------- 1 | --- 2 | controller: 3 | service: 4 | annotations: 5 | service.beta.kubernetes.io/scw-loadbalancer-use-hostname: "true" 6 | service.beta.kubernetes.io/scw-loadbalancer-zone: ${zone} 7 | externalTrafficPolicy: Local 8 | loadBalancerIP: ${ip_adress} 9 | metrics: 10 | enabled: true 11 | serviceMonitor: 12 | additionalLabels: 13 | release: prometheus 14 | enabled: true 15 | extraArgs: 16 | enable-ssl-passthrough: "true" 17 | admissionWebhooks: 18 | timeoutSeconds: 30 19 | -------------------------------------------------------------------------------- /scaleway/prom-grafana-values.yml: -------------------------------------------------------------------------------- 1 | ../common/prom-grafana-values.yml -------------------------------------------------------------------------------- /scaleway/prometheus-grafana.tf: -------------------------------------------------------------------------------- 1 | ../common/prometheus-grafana.tf -------------------------------------------------------------------------------- /scaleway/promtail-values.yml: -------------------------------------------------------------------------------- 1 | ../common/promtail-values.yml -------------------------------------------------------------------------------- /scaleway/promtail.tf: -------------------------------------------------------------------------------- 1 | ../common/promtail.tf -------------------------------------------------------------------------------- /scaleway/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | } 4 | required_providers { 5 | scaleway = { 6 | source = "scaleway/scaleway" 7 | version = "~> 2.37.0" 8 | } 9 | helm = { 10 | source = "hashicorp/helm" 11 | version = "~> 2.12.0" 12 | } 13 | kubernetes = { 14 | source = "hashicorp/kubernetes" 15 | version = "~> 2.26.0" 16 | } 17 | kubectl = { 18 | source = "gavinbunney/kubectl" 19 | version = "~> 1.14.0" 20 | } 21 | } 22 | } 23 | 24 | provider "scaleway" { 25 | access_key = var.scaleway_access_key 26 | secret_key = var.scaleway_secret_key 27 | project_id = var.scaleway_project_id 28 | region = var.scaleway_region 29 | } 30 | 31 | provider "kubectl" { 32 | host = null_resource.kubeconfig.triggers.host 33 | token = null_resource.kubeconfig.triggers.token 34 | cluster_ca_certificate = base64decode( 35 | null_resource.kubeconfig.triggers.cluster_ca_certificate 36 | ) 37 | load_config_file = false 38 | } 39 | 40 | provider "helm" { 41 | kubernetes { 42 | host = null_resource.kubeconfig.triggers.host 43 | token = null_resource.kubeconfig.triggers.token 44 | cluster_ca_certificate = base64decode( 45 | null_resource.kubeconfig.triggers.cluster_ca_certificate 46 | ) 47 | } 48 | } 49 | 50 | provider "kubernetes" { 51 | host = null_resource.kubeconfig.triggers.host 52 | token = null_resource.kubeconfig.triggers.token 53 | cluster_ca_certificate = base64decode( 54 | null_resource.kubeconfig.triggers.cluster_ca_certificate 55 | ) 56 | } 57 | -------------------------------------------------------------------------------- /scaleway/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | ######################################################################################### 2 | # # 3 | # This file contains example values to help during manual configuration # 4 | # # 5 | ######################################################################################### 6 | 7 | scaleway_access_key = "SCWXXXXXXXXXXXXXXXXX" 8 | scaleway_secret_key = "3caf2873-0ff0-4626-a174-73620869c8d4" 9 | scaleway_project_id = "23d0332d-4917-43e3-bdb5-ff43e9e57ba4" 10 | scaleway_region = "fr-par" 11 | k8s_cluster_name = "my_cluster" 12 | k8s_cluster_version = "1.27.1" 13 | k8s_nodepool_name = "my_pool" 14 | k8s_nodepool_flavor = "DEV1-M" 15 | k8s_nodepool_size = "1" 16 | argocd_hostname = "argocd.kubic.example" 17 | argocd_password = "$2a$10$QAlYJhAr2QMETwDeqyEKReXttl3P1BUbMvoukKCL9nZwauYbg0mN6" 18 | argocd_repo_url = "https://github.com/example/argocd.git" 19 | argocd_repo_username = "argocd" 20 | argocd_repo_password = "access_token" 21 | main_cluster_issuer_name = "letsencrypt-prod" 22 | issuers = [ 23 | { 24 | name = "letsencrypt-prod" 25 | server = "https://acme-v02.api.letsencrypt.org/directory" 26 | email = "yourname@example.com", 27 | private_key_secret_name = "letsencrypt-prod" 28 | }, { 29 | name = "letsencrypt-staging" 30 | server = "https://acme-staging-v02.api.letsencrypt.org/directory" 31 | email = "yourname+1@example.com" 32 | private_key_secret_name = "letsencrypt-staging" 33 | } 34 | ] 35 | grafana_hostname = "grafana.kubic.example" 36 | grafana_admin_password = "change_me" 37 | grafana_persistence_enabled = true 38 | grafana_persistence_size = "10Gi" 39 | prometheus_persistence_enabled = true 40 | prometheus_persistence_size = "20Gi" 41 | vault_server_hostname = "vault.kubic.example" 42 | install_hashicorp_vault = true 43 | vault_ui = true 44 | velero_version = "4.0.2" 45 | velero_s3_bucket_name = "velero_bucket" 46 | velero_s3_bucket_region = "fr-par" 47 | velero_s3_bucket_endpoint = "https://rg.fr-par.scw.cloud/namespace/" 48 | velero_s3_access_key_id = "acb2c7815132b7157b881b83a002b3f" 49 | velero_s3_secret_access_key = "2d0f550387fa8a2073f3a8fc2062e28" 50 | velero_default_volumes_to_fs_backup = false 51 | loki_enabled = true 52 | loki_s3_chunks_bucket_name = "my_cluster-loki-chunks" 53 | loki_s3_ruler_bucket_name = "my_cluster-loki-ruler" 54 | loki_s3_admin_bucket_name = "my_cluster-loki-admin" 55 | loki_s3_bucket_region = "fr-par" 56 | loki_s3_bucket_endpoint = "https://rg.fr-par.scw.cloud/namespace/" 57 | loki_s3_access_key_id = "ebc294815132b7157b881b83a009f5ac" 58 | loki_s3_secret_access_key = "2d0f550387fa8a2073f3a8fc29e4ab6" 59 | -------------------------------------------------------------------------------- /scaleway/terraform.tfvars.template: -------------------------------------------------------------------------------- 1 | scaleway_access_key = "" 2 | scaleway_secret_key = "" 3 | scaleway_project_id = "" 4 | scaleway_region = "" 5 | k8s_cluster_name = "" 6 | k8s_cluster_version = "" 7 | k8s_nodepool_name = "" 8 | k8s_nodepool_flavor = "" 9 | k8s_nodepool_size = "" 10 | argocd_hostname = "" 11 | argocd_password = "" 12 | argocd_repo_url = "" 13 | argocd_repo_username = "" 14 | argocd_repo_password = "" 15 | main_cluster_issuer_name = "" 16 | issuers = [ 17 | { 18 | name = "letsencrypt-prod" 19 | server = "https://acme-v02.api.letsencrypt.org/directory" 20 | email = "", 21 | private_key_secret_name = "letsencrypt-prod" 22 | }, { 23 | name = "letsencrypt-staging" 24 | server = "https://acme-staging-v02.api.letsencrypt.org/directory" 25 | email = "" 26 | private_key_secret_name = "letsencrypt-staging" 27 | } 28 | ] 29 | grafana_hostname = "" 30 | grafana_admin_password = "" 31 | grafana_persistence_enabled = 32 | grafana_persistence_size = "" 33 | prometheus_persistence_enabled = 34 | prometheus_persistence_size = "" 35 | vault_server_hostname = "" 36 | install_hashicorp_vault = 37 | vault_ui = 38 | velero_version = "" 39 | velero_s3_bucket_name = "" 40 | velero_s3_bucket_region = "" 41 | velero_s3_bucket_endpoint = "" 42 | velero_s3_access_key_id = "" 43 | velero_s3_secret_access_key = "" 44 | velero_default_volumes_to_fs_backup = 45 | loki_enabled = "" 46 | loki_s3_chunks_bucket_name = "" 47 | loki_s3_ruler_bucket_name = "" 48 | loki_s3_admin_bucket_name = "" 49 | loki_s3_bucket_region = "" 50 | loki_s3_bucket_endpoint = "" 51 | loki_s3_access_key_id = "" 52 | loki_s3_secret_access_key = "" 53 | -------------------------------------------------------------------------------- /scaleway/tls-vault.tf: -------------------------------------------------------------------------------- 1 | ../common/tls-vault.tf -------------------------------------------------------------------------------- /scaleway/variables-common.tf: -------------------------------------------------------------------------------- 1 | ../common/variables.tf -------------------------------------------------------------------------------- /scaleway/variables.tf: -------------------------------------------------------------------------------- 1 | variable "scaleway_access_key" { 2 | type = string 3 | description = "The access key to use for Scaleway API calls" 4 | sensitive = true 5 | } 6 | 7 | variable "scaleway_secret_key" { 8 | type = string 9 | description = "The secret key to use for Scaleway API calls" 10 | sensitive = true 11 | } 12 | 13 | variable "scaleway_project_id" { 14 | type = string 15 | description = "The project id to use for Scaleway API calls" 16 | sensitive = true 17 | } 18 | 19 | variable "scaleway_region" { 20 | type = string 21 | description = "The region to use for the cluster" 22 | default = "fr-par" 23 | } 24 | 25 | variable "k8s_cluster_name" { 26 | type = string 27 | description = "The name of the cluster" 28 | default = "my_cluster" 29 | } 30 | 31 | variable "k8s_cluster_version" { 32 | type = string 33 | description = "The version of the cluster" 34 | default = "1.27.1" 35 | } 36 | 37 | variable "k8s_nodepool_name" { 38 | type = string 39 | description = "The name of the pool" 40 | default = "my_pool" 41 | } 42 | 43 | variable "k8s_nodepool_flavor" { 44 | type = string 45 | description = "The flavor of the pool" 46 | default = "DEV1-M" 47 | } 48 | 49 | variable "k8s_nodepool_size" { 50 | type = number 51 | description = "The size of the pool" 52 | default = 1 53 | } 54 | -------------------------------------------------------------------------------- /scaleway/vault-values.yml: -------------------------------------------------------------------------------- 1 | ../common/vault-values.yml -------------------------------------------------------------------------------- /scaleway/velero-credentials: -------------------------------------------------------------------------------- 1 | ../common/velero-credentials -------------------------------------------------------------------------------- /scaleway/velero-values.yml: -------------------------------------------------------------------------------- 1 | ../common/velero-values.yml -------------------------------------------------------------------------------- /scaleway/velero.tf: -------------------------------------------------------------------------------- 1 | ../common/velero.tf -------------------------------------------------------------------------------- /standalone/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/gavinbunney/kubectl" { 5 | version = "1.14.0" 6 | constraints = "~> 1.14.0" 7 | hashes = [ 8 | "h1:gLFn+RvP37sVzp9qnFCwngRjjFV649r6apjxvJ1E/SE=", 9 | "zh:0350f3122ff711984bbc36f6093c1fe19043173fad5a904bce27f86afe3cc858", 10 | "zh:07ca36c7aa7533e8325b38232c77c04d6ef1081cb0bac9d56e8ccd51f12f2030", 11 | "zh:0c351afd91d9e994a71fe64bbd1662d0024006b3493bb61d46c23ea3e42a7cf5", 12 | "zh:39f1a0aa1d589a7e815b62b5aa11041040903b061672c4cfc7de38622866cbc4", 13 | "zh:428d3a321043b78e23c91a8d641f2d08d6b97f74c195c654f04d2c455e017de5", 14 | "zh:4baf5b1de2dfe9968cc0f57fd4be5a741deb5b34ee0989519267697af5f3eee5", 15 | "zh:6131a927f9dffa014ab5ca5364ac965fe9b19830d2bbf916a5b2865b956fdfcf", 16 | "zh:c62e0c9fd052cbf68c5c2612af4f6408c61c7e37b615dc347918d2442dd05e93", 17 | "zh:f0beffd7ce78f49ead612e4b1aefb7cb6a461d040428f514f4f9cc4e5698ac65", 18 | ] 19 | } 20 | 21 | provider "registry.terraform.io/hashicorp/helm" { 22 | version = "2.12.1" 23 | constraints = "~> 2.12.0" 24 | hashes = [ 25 | "h1:7wfYOAeSEchHB8idNl+2jf+OkFi9zFSOLWkEZFuTCik=", 26 | "h1:E2QwIJNtRF/ePP/3Jr5ur/SJRSPqljzUCJDfQGLgMq4=", 27 | "h1:HGilqNGo1KArhsJ9QJDenXhTmOlnA4t9RVQUeEFxi/4=", 28 | "h1:OABttY6YdBeCNK5xNuVxCK7sJtneHnHxCYvYNEeNSqE=", 29 | "h1:aBfcqM4cbywa7TAxfT1YoFS+Cst9waerlm4XErFmJlk=", 30 | "h1:fBTUyEb263IVI8j2aBbioEBjQ4jnnKo4MM3Y0fLPYsk=", 31 | "h1:iPGKXwn/PxspdFvbo3TnaK7W2YKhIzP7t9THn5ZPJQk=", 32 | "h1:pEDM3vsn4FOExaGacenYfyLhV9m3I+PGwITjFDg5rko=", 33 | "h1:sgYI7lwGqJqPopY3NGmhb1eQ0YbH8PIXaAZAmnJrAvw=", 34 | "h1:sjzfyNQAjtF9zXHxB67geryjGkHaPDMMVw9iqPP5pkE=", 35 | "h1:xwHVa6ab/XVfDrZ3h35OzLJ6g0Zte4VAvSnyKw3f9AI=", 36 | "zh:1d623fb1662703f2feb7860e3c795d849c77640eecbc5a776784d08807b15004", 37 | "zh:253a5bc62ba2c4314875139e3fbd2feaad5ef6b0fb420302a474ab49e8e51a38", 38 | "zh:282358f4ad4f20d0ccaab670b8645228bfad1c03ac0d0df5889f0aea8aeac01a", 39 | "zh:4fd06af3091a382b3f0d8f0a60880f59640d2b6d9d6a31f9a873c6f1bde1ec50", 40 | "zh:6816976b1830f5629ae279569175e88b497abbbac30ee809948a1f923c67a80d", 41 | "zh:7d82c4150cdbf48cfeec867be94c7b9bd7682474d4df0ebb7e24e148f964844f", 42 | "zh:83f062049eea2513118a4c6054fb06c8600bac96196f25aed2cc21898ec86e93", 43 | "zh:a79eec0cf4c08fca79e44033ec6e470f25ff23c3e2c7f9bc707ed7771c1072c0", 44 | "zh:b2b2d904b2821a6e579910320605bc478bbef063579a23fbfdd6fcb5871b81f8", 45 | "zh:e91177ca06a15487fc570cb81ecef6359aa399459ea2aa7c4f7367ba86f6fcad", 46 | "zh:e976bcb82996fc4968f8382bbcb6673efb1f586bf92074058a232028d97825b1", 47 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 48 | ] 49 | } 50 | 51 | provider "registry.terraform.io/hashicorp/kubernetes" { 52 | version = "2.26.0" 53 | constraints = "~> 2.26.0" 54 | hashes = [ 55 | "h1:+Vny6dC1+K6fa+23qF2bh+9pjOCh/zo+EPkry7SL+0g=", 56 | "h1:0MjgW/qkJH4udWwTiSpjqA8AmEjVwrQzpkV2bu8+LhY=", 57 | "h1:1FzOq58nk7VxtO94l0Q3L1NmvzGqrRBpUc6UKv4fyqY=", 58 | "h1:HG0vDDKiBPRJK/2p5EvrON0H3nCl8tW7yDZ7WPt3Zq8=", 59 | "h1:LxZk5Vc0TnfeLYnp7HXZui53PEV+gFd+mznBzdNm+po=", 60 | "h1:MMxX/EY9AEGwp5DbGQ+LTd3c9YmjwrnPJHLlyc9u0eU=", 61 | "h1:cqXQ+gBtpLOObtuxc7JK3zStu6jR1wzzGDe5AuDsIrg=", 62 | "h1:h+KamkYSY9zaq6qtmb4y7iqegTjZ0z8GZ54lm7vKJMg=", 63 | "h1:lq3nuBjqNrRXtdhAmyhMM579BRZwhzW9W+LC5jinP4c=", 64 | "h1:vTbi/tiJQS8Wto3LLxZ/WWPcptqaMpQlT33s61WTV9Q=", 65 | "h1:wSFDvzim4kD1iieFFuQJ+cd/TqmpHJTomnK4ktj1mrw=", 66 | "zh:3f8ee1bffab1ba4f6ae549daae1648974214880d3606b6821cb0aceb365284a4", 67 | "zh:5596b1248231cc3b8f6a98f5b78df7120cd3153fd2b34b369dc20356a75bf35b", 68 | "zh:64420c9e4aa49c5e443afcd60f3e8d293ea6bd78797d402e21e23605f7757954", 69 | "zh:8327a488854e15f8d7eaf8272c3b9d6d1d9a6e68212a8dcb111d7b4023aac6b5", 70 | "zh:94c1c9b65280847d28a3e90e5046650858ac0bf87feefd2349336444e21e68e8", 71 | "zh:a3fb0b0b4bfd1844bb94011ae80111cedc188085235cf466313ca2151e75c8ca", 72 | "zh:ab5e381928144e0c2a9d9768a48e38797642e5c5fb2184370c7c08df500e5db3", 73 | "zh:da78995e8d6daf3acfd4c455ebbd12f6bf154cadf455f14ef35c0862e58dd2ec", 74 | "zh:e24cdd5b90196df93215f40d821af3a7b4473c53992be4c3038940d117a50eb4", 75 | "zh:e632efb3bce6d089b7c08507660af8b2c5e3f94c34fe401bfa228f154405e26e", 76 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 77 | "zh:f5aea9da0eba25d35fee49db193c4b44cd3746a5578065092c62a53077e50b84", 78 | ] 79 | } 80 | 81 | provider "registry.terraform.io/hashicorp/null" { 82 | version = "3.2.2" 83 | hashes = [ 84 | "h1:zT1ZbegaAYHwQa+QwIFugArWikRJI9dqohj8xb0GY88=", 85 | "zh:3248aae6a2198f3ec8394218d05bd5e42be59f43a3a7c0b71c66ec0df08b69e7", 86 | "zh:32b1aaa1c3013d33c245493f4a65465eab9436b454d250102729321a44c8ab9a", 87 | "zh:38eff7e470acb48f66380a73a5c7cdd76cc9b9c9ba9a7249c7991488abe22fe3", 88 | "zh:4c2f1faee67af104f5f9e711c4574ff4d298afaa8a420680b0cb55d7bbc65606", 89 | "zh:544b33b757c0b954dbb87db83a5ad921edd61f02f1dc86c6186a5ea86465b546", 90 | "zh:696cf785090e1e8cf1587499516b0494f47413b43cb99877ad97f5d0de3dc539", 91 | "zh:6e301f34757b5d265ae44467d95306d61bef5e41930be1365f5a8dcf80f59452", 92 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 93 | "zh:913a929070c819e59e94bb37a2a253c228f83921136ff4a7aa1a178c7cce5422", 94 | "zh:aa9015926cd152425dbf86d1abdbc74bfe0e1ba3d26b3db35051d7b9ca9f72ae", 95 | "zh:bb04798b016e1e1d49bcc76d62c53b56c88c63d6f2dfe38821afef17c416a0e1", 96 | "zh:c23084e1b23577de22603cff752e59128d83cfecc2e6819edadd8cf7a10af11e", 97 | ] 98 | } 99 | 100 | provider "registry.terraform.io/hashicorp/tls" { 101 | version = "4.0.5" 102 | hashes = [ 103 | "h1:e4LBdJoZJNOQXPWgOAG0UuPBVhCStu98PieNlqJTmeU=", 104 | "zh:01cfb11cb74654c003f6d4e32bbef8f5969ee2856394a96d127da4949c65153e", 105 | "zh:0472ea1574026aa1e8ca82bb6df2c40cd0478e9336b7a8a64e652119a2fa4f32", 106 | "zh:1a8ddba2b1550c5d02003ea5d6cdda2eef6870ece86c5619f33edd699c9dc14b", 107 | "zh:1e3bb505c000adb12cdf60af5b08f0ed68bc3955b0d4d4a126db5ca4d429eb4a", 108 | "zh:6636401b2463c25e03e68a6b786acf91a311c78444b1dc4f97c539f9f78de22a", 109 | "zh:76858f9d8b460e7b2a338c477671d07286b0d287fd2d2e3214030ae8f61dd56e", 110 | "zh:a13b69fb43cb8746793b3069c4d897bb18f454290b496f19d03c3387d1c9a2dc", 111 | "zh:a90ca81bb9bb509063b736842250ecff0f886a91baae8de65c8430168001dad9", 112 | "zh:c4de401395936e41234f1956ebadbd2ed9f414e6908f27d578614aaa529870d4", 113 | "zh:c657e121af8fde19964482997f0de2d5173217274f6997e16389e7707ed8ece8", 114 | "zh:d68b07a67fbd604c38ec9733069fbf23441436fecf554de6c75c032f82e1ef19", 115 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 116 | ] 117 | } 118 | -------------------------------------------------------------------------------- /standalone/Schedule_template.yml: -------------------------------------------------------------------------------- 1 | ../common/Schedule_template.yml -------------------------------------------------------------------------------- /standalone/argocd-apps-values.yaml.tftpl: -------------------------------------------------------------------------------- 1 | ../common/argocd-apps-values.yaml.tftpl -------------------------------------------------------------------------------- /standalone/argocd-values.yaml.tftpl: -------------------------------------------------------------------------------- 1 | ../common/argocd-values.yaml.tftpl -------------------------------------------------------------------------------- /standalone/argocd.tf: -------------------------------------------------------------------------------- 1 | ../common/argocd.tf -------------------------------------------------------------------------------- /standalone/backend.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openfun/kubic/c326f47a4a9079453242e13e2d1829cc640020f9/standalone/backend.conf -------------------------------------------------------------------------------- /standalone/cert-manager.tf: -------------------------------------------------------------------------------- 1 | ../common/cert-manager.tf -------------------------------------------------------------------------------- /standalone/hashicorp-vault.tf: -------------------------------------------------------------------------------- 1 | ../common/hashicorp-vault.tf -------------------------------------------------------------------------------- /standalone/issuer.yml.tftpl: -------------------------------------------------------------------------------- 1 | ../common/issuer.yml.tftpl -------------------------------------------------------------------------------- /standalone/locals.tf: -------------------------------------------------------------------------------- 1 | ../common/locals.tf -------------------------------------------------------------------------------- /standalone/prom-grafana-values.yml: -------------------------------------------------------------------------------- 1 | ../common/prom-grafana-values.yml -------------------------------------------------------------------------------- /standalone/prometheus-grafana.tf: -------------------------------------------------------------------------------- 1 | ../common/prometheus-grafana.tf -------------------------------------------------------------------------------- /standalone/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "local" {} 3 | required_providers { 4 | helm = { 5 | source = "hashicorp/helm" 6 | version = "~> 2.12.0" 7 | } 8 | kubernetes = { 9 | source = "hashicorp/kubernetes" 10 | version = "~> 2.26.0" 11 | } 12 | kubectl = { 13 | source = "gavinbunney/kubectl" 14 | version = "~> 1.14.0" 15 | } 16 | } 17 | } 18 | 19 | provider "kubectl" { 20 | config_path = "./kubeconfig" 21 | } 22 | 23 | provider "helm" { 24 | kubernetes { 25 | config_path = "./kubeconfig" 26 | } 27 | } 28 | 29 | provider "kubernetes" { 30 | config_path = "./kubeconfig" 31 | } 32 | 33 | resource null_resource "ingress-nginx" {} -------------------------------------------------------------------------------- /standalone/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | ######################################################################################### 2 | # # 3 | # This file contains example values to help during manual configuration # 4 | # # 5 | ######################################################################################### 6 | 7 | argocd_hostname = "argocd.kubic.example" 8 | argocd_password = "$2a$10$QAlYJhAr2QMETwDeqyEKReXttl3P1BUbMvoukKCL9nZwauYbg0mN6" 9 | argocd_repo_url = "https://github.com/example/argocd.git" 10 | argocd_repo_username = "argocd" 11 | argocd_repo_password = "access_token" 12 | main_cluster_issuer_name = "letsencrypt-prod" 13 | issuers = [ 14 | { 15 | name = "letsencrypt-prod" 16 | server = "https://acme-v02.api.letsencrypt.org/directory" 17 | email = "yourname@example.com", 18 | private_key_secret_name = "letsencrypt-prod" 19 | }, { 20 | name = "letsencrypt-staging" 21 | server = "https://acme-staging-v02.api.letsencrypt.org/directory" 22 | email = "yourname+1@example.com" 23 | private_key_secret_name = "letsencrypt-staging" 24 | } 25 | ] 26 | grafana_hostname = "grafana.kubic.example" 27 | grafana_admin_password = "change_me" 28 | grafana_persistence_enabled = true 29 | grafana_persistence_size = "10Gi" 30 | prometheus_persistence_enabled = true 31 | prometheus_persistence_size = "20Gi" 32 | vault_server_hostname = "vault.kubic.example" 33 | install_hashicorp_vault = true 34 | vault_ui = true 35 | velero_version = "4.0.2" 36 | velero_s3_bucket_name = "velero_bucket" 37 | velero_s3_bucket_region = "gra" 38 | velero_s3_bucket_endpoint = "https://s3.gra.io.cloud.ovh.net" 39 | velero_s3_access_key_id = "acb2c7815132b7157b881b83a002b3f" 40 | velero_s3_secret_access_key = "2d0f550387fa8a2073f3a8fc2062e28" 41 | velero_default_volumes_to_fs_backup = false 42 | loki_enabled = "true" 43 | loki_s3_chunks_bucket_name = "my_cluster-loki-chunks" 44 | loki_s3_ruler_bucket_name = "my_cluster-loki-ruler" 45 | loki_s3_admin_bucket_name = "my_cluster-loki-admin" 46 | loki_s3_bucket_region = "gra" 47 | loki_s3_bucket_endpoint = "https://s3.gra.io.cloud.ovh.net/" 48 | loki_s3_access_key_id = "ebc294815132b7157b881b83a009f5ac" 49 | loki_s3_secret_access_key = "2d0f550387fa8a2073f3a8fc29e4ab6" 50 | -------------------------------------------------------------------------------- /standalone/terraform.tfvars.template: -------------------------------------------------------------------------------- 1 | argocd_hostname = "" 2 | argocd_password = "" 3 | argocd_repo_url = "" 4 | argocd_repo_username = "" 5 | argocd_repo_password = "" 6 | main_cluster_issuer_name = "" 7 | issuers = [ 8 | { 9 | name = "letsencrypt-prod" 10 | server = "https://acme-v02.api.letsencrypt.org/directory" 11 | email = "", 12 | private_key_secret_name = "letsencrypt-prod" 13 | }, { 14 | name = "letsencrypt-staging" 15 | server = "https://acme-staging-v02.api.letsencrypt.org/directory" 16 | email = "" 17 | private_key_secret_name = "letsencrypt-staging" 18 | } 19 | ] 20 | grafana_hostname = "" 21 | grafana_admin_password = "" 22 | vault_server_hostname = "" 23 | install_hashicorp_vault = 24 | vault_ui = 25 | velero_version = "" 26 | velero_s3_bucket_name = "" 27 | velero_s3_bucket_region = "" 28 | velero_s3_bucket_endpoint = "" 29 | velero_s3_access_key_id = "" 30 | velero_s3_secret_access_key = "" 31 | velero_default_volumes_to_fs_backup = 32 | loki_enabled = "" 33 | loki_s3_chunks_bucket_name = "" 34 | loki_s3_ruler_bucket_name = "" 35 | loki_s3_admin_bucket_name = "" 36 | loki_s3_bucket_region = "" 37 | loki_s3_bucket_endpoint = "" 38 | loki_s3_access_key_id = "" 39 | loki_s3_secret_access_key = "" 40 | -------------------------------------------------------------------------------- /standalone/tls-vault.tf: -------------------------------------------------------------------------------- 1 | ../common/tls-vault.tf -------------------------------------------------------------------------------- /standalone/variables.tf: -------------------------------------------------------------------------------- 1 | ../common/variables.tf -------------------------------------------------------------------------------- /standalone/vault-values.yml: -------------------------------------------------------------------------------- 1 | ../common/vault-values.yml -------------------------------------------------------------------------------- /standalone/velero-credentials: -------------------------------------------------------------------------------- 1 | ../common/velero-credentials -------------------------------------------------------------------------------- /standalone/velero-values.yml: -------------------------------------------------------------------------------- 1 | ../common/velero-values.yml -------------------------------------------------------------------------------- /standalone/velero.tf: -------------------------------------------------------------------------------- 1 | ../common/velero.tf -------------------------------------------------------------------------------- /state_bucket/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.45.0" 6 | constraints = "~> 5.0" 7 | hashes = [ 8 | "h1:4Vgk51R7iTY1oczaTQDG+DkA9nE8TmjlUtecqXX6qDU=", 9 | "zh:1379bcf45aef3d486ee18b4f767bfecd40a0056510d26107f388be3d7994c368", 10 | "zh:1615a6f5495acfb3a0cb72324587261dd4d72711a3cc51aff13167b14531501e", 11 | "zh:18b69a0f33f8b1862fbd3f200756b7e83e087b73687085f2cf9c7da4c318e3e6", 12 | "zh:2c5e7aecd197bc3d3b19290bad8cf4c390c2c6a77bb165da4e11f53f2dfe2e54", 13 | "zh:3794da9bef97596e3bc60e12cdd915bda5ec2ed62cd1cd93723d58b4981905fe", 14 | "zh:40a5e45ed91801f83db76dffd467dcf425ea2ca8642327cf01119601cb86021c", 15 | "zh:4abfc3f53d0256a7d5d1fa5e931e4601b02db3d1da28f452341d3823d0518f1a", 16 | "zh:4eb0e98078f79aeb06b5ff6115286dc2135d12a80287885698d04036425494a2", 17 | "zh:75470efbadea4a8d783642497acaeec5077fc4a7f3df3340defeaa1c7de29bf7", 18 | "zh:8861a0b4891d5fa2fa7142f236ae613cea966c45b5472e3915a4ac3abcbaf487", 19 | "zh:8bf6f21cd9390b742ca0b4393fde92616ca9e6553fb75003a0999006ad233d35", 20 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 21 | "zh:ad73008a044e75d337acda910fb54d8b81a366873c8a413fec1291034899a814", 22 | "zh:bf261713b0b8bebfe8c199291365b87d9043849f28a2dc764bafdde73ae43693", 23 | "zh:da3bafa1fd830be418dfcc730e85085fe67c0d415c066716f2ac350a2306f40a", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/ovh/ovh" { 28 | version = "0.37.0" 29 | constraints = "~> 0.37.0" 30 | hashes = [ 31 | "h1:sY7MpVOBwhppzwpT0bYZB8+yLGIgAAWO3KwdD0m8+F0=", 32 | "zh:2e174acf0f9d7c57ad080a502624838057bbf77880a15a524797a6cc011daafc", 33 | "zh:3d690d48eb14e9d0a25266eed7ec1cfa748cb345ef4ae8dec73e437ab20d5516", 34 | "zh:3f792598220f7fa91612985c39dda1f8d27d6e6dc6773af1e8ad6a7cf33554d3", 35 | "zh:4044b45d5fd495bd2fae4f317755c1bbe7a1f2b57b5ed3cca50993aa3908c781", 36 | "zh:500aeecd463232fde8f36aa98199174f7b6469005373484044b5f51efc500da0", 37 | "zh:63276ae47999d11a6a58a9db38f8b609108285ca978cc13bc29eeaefd55fa19a", 38 | "zh:72d16516bc53b36a5827b5abb71b01298f39c5abc9a87852c2095f8b824857c5", 39 | "zh:7e31575445ad0234f844ec91fb989092ef9e4c3a673b92c0272f50a321458f54", 40 | "zh:bae7e1949229aa67aeda0e618b18dd9ad3d5fd655347e478c32f000d35bfd5b6", 41 | "zh:d5d2cddc6df0b34250d855676438d6e847ec38c5804217d94140bee84ba14c75", 42 | "zh:f1b519316ad1d927b5f1079a048ea166f6c7a7bdd1c563f20236238b09047ed6", 43 | "zh:f4f95917c0a02d1cd2077bd91693a5840c535402180f7ce4132e8e69e97b2a11", 44 | "zh:f97481c0d3e8f2ec1ffb861f186f1438afc8a92f17e4183a312e67c5a03d8888", 45 | ] 46 | } 47 | -------------------------------------------------------------------------------- /state_bucket/bucket.tf: -------------------------------------------------------------------------------- 1 | ######################################################################################## 2 | # This script creates an S3 bucket along with 1 S3 user 3 | ######################################################################################## 4 | 5 | ######################################################################################## 6 | # User / Credential 7 | ######################################################################################## 8 | 9 | # Used to create the bucket 10 | resource "ovh_cloud_project_user" "s3_admin_user" { 11 | service_name = var.ovh_public_cloud_project_id 12 | description = "${var.user_desc_prefix} that is used to create S3 access key" 13 | role_name = "objectstore_operator" 14 | } 15 | resource "ovh_cloud_project_user_s3_credential" "s3_admin_cred" { 16 | service_name = var.ovh_public_cloud_project_id 17 | user_id = ovh_cloud_project_user.s3_admin_user.id 18 | } 19 | 20 | # Given to the user for Terraform 21 | resource "ovh_cloud_project_user" "write_user" { 22 | service_name = var.ovh_public_cloud_project_id 23 | description = "${var.user_desc_prefix} that will have write access to the bucket" 24 | role_name = "objectstore_operator" 25 | } 26 | 27 | resource "ovh_cloud_project_user_s3_credential" "write_cred"{ 28 | service_name = var.ovh_public_cloud_project_id 29 | user_id = ovh_cloud_project_user.write_user.id 30 | } 31 | 32 | ######################################################################################## 33 | # Bucket 34 | ######################################################################################## 35 | resource "aws_s3_bucket" "bucket" { 36 | bucket = var.bucket_name 37 | } 38 | 39 | ######################################################################################## 40 | # Policy 41 | ######################################################################################## 42 | 43 | resource "ovh_cloud_project_user_s3_policy" "write_policy" { 44 | service_name = var.ovh_public_cloud_project_id 45 | user_id = ovh_cloud_project_user.write_user.id 46 | policy = jsonencode({ 47 | "Statement":[{ 48 | "Sid": "RWContainer", 49 | "Effect": "Allow", 50 | "Action":["s3:GetObject", "s3:PutObject", "s3:DeleteObject", "s3:ListBucket", "s3:ListMultipartUploadParts", "s3:ListBucketMultipartUploads", "s3:AbortMultipartUpload", "s3:GetBucketLocation"], 51 | "Resource":["arn:aws:s3:::${aws_s3_bucket.bucket.bucket}", "arn:aws:s3:::${aws_s3_bucket.bucket.bucket}/*"] 52 | }] 53 | }) 54 | } 55 | 56 | ######################################################################################## 57 | # Output 58 | ######################################################################################## 59 | output "access_key" { 60 | description = "The access key that have been created by the Terraform script" 61 | value = ovh_cloud_project_user_s3_credential.write_cred.access_key_id 62 | } 63 | 64 | output "secret_key" { 65 | description = "The secret key that have been created by the Terraform script" 66 | value = ovh_cloud_project_user_s3_credential.write_cred.secret_access_key 67 | sensitive = true 68 | } 69 | 70 | # Redundancy since the bucket name is provided as a variable 71 | output "bucket_name" { 72 | description = "The name of the bucket that has been created by the Terraform script" 73 | value = aws_s3_bucket.bucket.bucket 74 | } -------------------------------------------------------------------------------- /state_bucket/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 5.0" 6 | } 7 | ovh = { 8 | source = "ovh/ovh" 9 | version = "~> 0.37.0" 10 | } 11 | } 12 | } 13 | 14 | provider "ovh" { 15 | endpoint = "ovh-eu" 16 | application_key = var.application_key 17 | application_secret = var.application_secret 18 | consumer_key = var.consumer_key 19 | } 20 | 21 | # Configure the AWS Provider 22 | provider "aws" { 23 | region = var.s3_region 24 | access_key = ovh_cloud_project_user_s3_credential.s3_admin_cred.access_key_id 25 | secret_key = ovh_cloud_project_user_s3_credential.s3_admin_cred.secret_access_key 26 | 27 | # OVH implementation has no STS service 28 | skip_credentials_validation = true 29 | skip_requesting_account_id = true 30 | # the gra region is unknown to AWS hence skipping is needed. 31 | skip_region_validation = true 32 | endpoints { 33 | s3 = var.s3_endpoint 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /state_bucket/terraform.tfvars.template: -------------------------------------------------------------------------------- 1 | ovh_public_cloud_project_id = "" 2 | application_key = "" 3 | application_secret = "" 4 | consumer_key = "" 5 | s3_region = "" 6 | s3_endpoint = "" 7 | user_desc_prefix = "" 8 | bucket_name = "" -------------------------------------------------------------------------------- /state_bucket/variables.tf: -------------------------------------------------------------------------------- 1 | variable "ovh_public_cloud_project_id" { 2 | type = string 3 | description = "The OVH public cloud project id" 4 | } 5 | 6 | variable "application_key" { 7 | type = string 8 | description = "The application key to use for OVH API calls" 9 | sensitive = true 10 | } 11 | 12 | variable "application_secret" { 13 | type = string 14 | description = "The application secret to use for OVH API calls" 15 | sensitive = true 16 | } 17 | 18 | variable "consumer_key" { 19 | type = string 20 | description = "The consumer key to use for OVH API calls" 21 | sensitive = true 22 | } 23 | 24 | variable "s3_region" { 25 | type = string 26 | description = "The region for the s3 bucket" 27 | default = "gra" 28 | } 29 | 30 | variable "s3_endpoint" { 31 | type = string 32 | description = "The endpoint for the s3 bucket" 33 | default = "https://s3.gra.io.cloud.ovh.net/" 34 | } 35 | 36 | variable "user_desc_prefix" { 37 | type = string 38 | default = "User for TF backend state storage of shared-k8s" 39 | } 40 | 41 | variable "bucket_name" { 42 | type = string 43 | default = "tf-k8s-state-storage" 44 | description = "Name of the bucket. Warning: only lowercase alphanumeric characters and hyphens are allowed" 45 | } 46 | -------------------------------------------------------------------------------- /vault/.kube/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openfun/kubic/c326f47a4a9079453242e13e2d1829cc640020f9/vault/.kube/.gitkeep -------------------------------------------------------------------------------- /vault/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/vault" { 5 | version = "3.25.0" 6 | constraints = "3.25.0" 7 | hashes = [ 8 | "h1:2zCa1Cj3mwK+4iin4NM8+nv9lUdJRP3BjRSZsloyZOc=", 9 | "h1:3GN5k6zxDAI5gfcKENb/jnJyFGWA/0JoumnD6eyVZjs=", 10 | "h1:4B34OwtP7NttC1lMHzlx/PkylaFarXoQnoxeg7hZW6M=", 11 | "h1:4KzkyK0u0A8az1Fud/AcW/NYaz3jzJQ6k2hd/LIwlWU=", 12 | "h1:9y2/GemP5TP+16AxhVoDl+4p/22zVlyccCJVZYANwNc=", 13 | "h1:QjkFFwx5M4cnNi3oDgJMFQWmVgCbUisAN2SXfhmfsac=", 14 | "h1:XGuVzeZwCLqLqytBpkPEe240qKtpiQX7nwyZoUkSHMw=", 15 | "h1:bKBTVT1YpUG6Mm/YGRfnwzg6EjdI4QxhzY8ntCBrS4U=", 16 | "h1:ico8CTFKaeML6y5bB/uAZaXqUjWKEwf1W6tMCeQAbOk=", 17 | "h1:kFwB7LM5hdvkkZSrB6zxD0utStl4Aq8NQFQCGIZxtEU=", 18 | "h1:vr1E5WSsSiRuLLIMla5NrAbw+hzDvFX3UpyJPhxl/eY=", 19 | "zh:430308f5dbd322a2e5afafd2be810f44eb35e28afa0aa0ac30b270cd6f413da8", 20 | "zh:6c36da504c4af84ea9fbaf1e6c2560f691dc3d2d7f0b382a937bfae78830fa17", 21 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 22 | "zh:7bc39cb2a7d91631cb8be54b0b06de29fb47910923e54f779e74d8b218b1ab70", 23 | "zh:7e4a5bebcfa19b9f1e3a6bbda5c6771b6dd28b3dfa19fdf3d4fced419cfa416f", 24 | "zh:7ea473203b37d006a0d2b1cdc8bff55c96b3c5819dbb62862cdabff6f2f0e2f2", 25 | "zh:9ad136feece62f0c545fefa4592b2cdaa896a39acb697fb129233dce880a69aa", 26 | "zh:ad0c9980295c902804af23da0250830b912eb13089349bf5c7be0649fac2689c", 27 | "zh:b305835cc13dcd9ec996d49d23163c6311f30786296f86ca5657b93aea4f3591", 28 | "zh:d8fe6ab7da12efbb5b122ae9b6856375c5a3759add9df577a8fb448898ceffe3", 29 | "zh:ef59ef2c06a55571e64fdd5888a074ed9556436738e9737e32bacab93ca133ff", 30 | "zh:f59c2605d916e1806dc494241467dd430194f5e7bdbf331c5aca904873347ad8", 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /vault/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Define a function kubectl that imitate the real kubectl command by calling it from a docker container 4 | function kubectld() { 5 | DOCKER_USER="$(id -u):$(id -g)" \ 6 | docker-compose run --rm kubectld --kubeconfig=/app/.kube/config "$@" 7 | } 8 | 9 | printf " __ __ __ _ \n / / / /___ ______/ /_ (_)________ _________ \n / /_/ / __ \`/ ___/ __ \/ / ___/ __ \/ ___/ __ \\n / __ / /_/ (__ ) / / / / /__/ /_/ / / / /_/ /\n/_/ /_/\__,_/____/_/ /_/_/\___/\____/_/ / .___/ \n _ _____ __ ____ ______ /_/ \n | | / / | / / / / / /_ __/ \n| | / / /| |/ / / / / / / \n| |/ / ___ / /_/ / /___/ / \n|___/_/ |_\____/_____/_/\n\n\n" 10 | printf "This script will allow you to initialize your Hashicorp Vault. In order to do so, the following requirements are needed: 11 | * an uninitialized Hashicorp Vault 12 | * the kubeconfig file of the cluster you want to use placed in the vault/.kube/config file, you can have it by running the command : bin/get-kube-config.sh (your-provider) > vault/.kube/config 13 | 14 | If these requirements are met, the script will initialize the Vault and unseal it. The Vault's encryption algorithm is (by default) Shamir's algorithm. \e[1mn\e[0m keys (with n > 0) are generated, and \e[1mm\e[0m keys (with \e[1m0 < m <= n\e[0m) are needed to unseal the vault. The script will generate a cluster-keys.json file containing the keys and a root token to authenticate to the Vault. (which you may need for the next steps of the tutorial). 15 | 16 | If a cluster-keys.json file already exists, the script will use it to unseal the vault, and not generate any new keys. 17 | " 18 | read -p "Are the requirements all met? (y/Y) " -n 1 -r 19 | echo # (optional) move to a new line 20 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 21 | echo 'Requirements not met. Exiting.' 22 | exit 1 23 | fi 24 | 25 | echo 26 | 27 | kubectld cluster-info 28 | 29 | echo 30 | echo "This is the output of the command: kubectl cluster-info. " 31 | read -p "Do you confirm this is your cluster? (y/Y) " -n 1 -r 32 | echo # (optional) move to a new line 33 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 34 | echo 'Aborting.' 35 | exit 1 36 | fi 37 | echo 38 | read -p "How many keys (n) do you want to generate? " -r key_nb 39 | if [[ ! $key_nb =~ ^[0-9]+$ ]]; then 40 | echo 'This is not a number. Exiting.' 41 | exit 1 42 | fi 43 | echo 44 | read -p "How many keys (m) do you want to unseal the vault? " -r key_nd 45 | if [[ ! $key_nd =~ ^[0-9]+$ ]]; then 46 | echo 'This is not a number. Exiting.' 47 | exit 1 48 | fi 49 | echo 50 | nb_replicas=$(kubectld get pods -n hashicorp-vault | grep -cE 'hashicorp-vault-[0-9]+') 51 | 52 | if [ -s "cluster-keys.json" ]; then 53 | echo "A cluster-keys.json file already exists and is not empty. " 54 | read -p "Do you want to use it to unseal the vault? " -r -n 1 55 | echo # (optional) move to a new line 56 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 57 | echo 'Aborting.' 58 | exit 1 59 | fi 60 | echo "Using existing keys..." 61 | else 62 | echo "Generating the keys..." 63 | kubectld exec -n hashicorp-vault hashicorp-vault-0 -- vault operator init \ 64 | -key-shares=$key_nb \ 65 | -key-threshold=$key_nd \ 66 | -format=json > cluster-keys.json 67 | fi 68 | 69 | # We get the m first keys to unseal the vault. 70 | for ((j = 0; j < $nb_replicas; j++)); do 71 | for ((i = 3; i <= $((2 + $key_nd)); i++)); do 72 | key=$(sed "${i}q;d" cluster-keys.json | sed 's/ //g' | sed 's/\"//g' | sed 's/,//g') 73 | number=$(($i - 2)) 74 | echo "Unsealing pod ${j} with key ${number} out of ${key_nd} needed..." 75 | kubectld exec -n hashicorp-vault hashicorp-vault-$j -- vault operator unseal $key 76 | sleep 2 77 | done 78 | done 79 | 80 | echo 81 | echo "Vault completely unsealed. You can now authenticate to the Vault with the root token in the cluster-keys.json file." 82 | exit 0 83 | -------------------------------------------------------------------------------- /vault/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | vault = { 4 | source = "hashicorp/vault" 5 | version = "3.25.0" 6 | } 7 | } 8 | } 9 | 10 | provider "vault" { 11 | address = var.vault_url 12 | token = var.vault_root_token 13 | } 14 | 15 | # Enable the kv secret engine to store key/value secrets 16 | resource "vault_mount" "kvv2" { 17 | path = "kv" 18 | type = "kv" 19 | options = { version = "2" } 20 | description = "KV Version 2 secret engine mount" 21 | } 22 | 23 | resource "vault_kv_secret_backend_v2" "example" { 24 | mount = vault_mount.kvv2.path 25 | max_versions = 5 26 | } 27 | 28 | resource "vault_auth_backend" "kubernetes" { 29 | type = "kubernetes" 30 | } 31 | 32 | resource "vault_kubernetes_auth_backend_role" "vault_backend" { 33 | backend = vault_auth_backend.kubernetes.path 34 | role_name = "argocd" 35 | bound_service_account_names = ["argocd-repo-server"] 36 | bound_service_account_namespaces = ["argocd"] 37 | token_ttl = 3600 38 | token_policies = ["argocd"] 39 | } 40 | 41 | resource "vault_policy" "vault_policy" { 42 | name = "argocd" 43 | 44 | policy = <