├── .github └── workflows │ ├── main_workflow.yml │ └── pr_cluster_workflow.yml ├── .gitignore ├── README.md ├── config.yaml ├── modules └── argocd │ ├── app-of-apps.yaml.tpl │ ├── argocd.yaml │ ├── main.tf │ └── variables.tf ├── scripts ├── bash │ ├── create_kubeconfig.sh │ ├── destroy_cluster_nodes.sh │ ├── merge_kubeconfigs.sh │ └── talos_cli.sh ├── python │ ├── filter_clusters.py │ └── validate_yaml.py └── terraform │ ├── apply.sh │ └── destroy.sh ├── terraform ├── cluster │ ├── .terraform.lock.hcl │ ├── argocd.tf │ ├── main.tf │ ├── netris-operator.tf │ ├── terraform.tf │ └── variables.tf └── infrastructure │ ├── .terraform.lock.hcl │ ├── controlplane.tf │ ├── destroy-cluster-nodes.tf │ ├── github.tf │ ├── kubeconfig.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── talos.tf │ ├── templates │ ├── argocd_application.yaml.tpl │ ├── controlplane.yaml.tpl │ └── worker.yaml.tpl │ ├── terraform.tf │ ├── variables.tf │ └── worker.tf └── test_config.yaml /.github/workflows/main_workflow.yml: -------------------------------------------------------------------------------- 1 | # This workflow is designed for creating clusters using "config.yaml" file to implement GitOps solution with the help of "turnk8s". 2 | # It is started to run when PRs are merged into the 'main' branch. 3 | 4 | 5 | name: 'Automated Terraform Cluster Setup and Cleanup' 6 | 7 | on: 8 | push: 9 | branches: 10 | - 'main' 11 | paths: 12 | - 'config.yaml' 13 | 14 | env: 15 | TF_CLOUD_ORGANIZATION: "infraheads" 16 | TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}" 17 | TF_VAR_proxmox_token_id: "${{ secrets.PROXMOX_TOKEN_ID }}" 18 | TF_VAR_proxmox_token_secret: "${{ secrets.PROXMOX_TOKEN_SECRET }}" 19 | TF_VAR_github_token: "${{ secrets.TOKEN_GITHUB }}" 20 | TF_VAR_netris_controller_host: "${{ vars.NETRIS_CONTROLLER_HOST }}" 21 | TF_VAR_netris_controller_login: "${{ secrets.NETRIS_CONTROLLER_LOGIN }}" 22 | TF_VAR_netris_controller_password: "${{ secrets.NETRIS_CONTROLLER_PASSWORD }}" 23 | TF_VAR_argocd_admin_password: "${{ secrets.ARGOCD_ADMIN_PASSWORD }}" 24 | 25 | jobs: 26 | main_workflow: 27 | runs-on: self-hosted 28 | container: 29 | image: ${{ vars.RUNNER_IMAGE }} 30 | permissions: 31 | contents: read 32 | pull-requests: write 33 | defaults: 34 | run: 35 | working-directory: "terraform/infrastructure" 36 | steps: 37 | - uses: actions/checkout@v4 38 | 39 | - name: Setup Terraform 40 | uses: hashicorp/setup-terraform@v3 41 | with: 42 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 43 | terraform_version: 1.7.5 44 | 45 | - name: Configure Terraform Cache 46 | run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV" 47 | 48 | - name: Initializing Terraform 49 | run: | 50 | terraform init -upgrade 51 | env: 52 | TF_WORKSPACE: "default-ws" 53 | 54 | - name: Filter desired and removable clusters 55 | id: filter_clusters 56 | run: | 57 | filtered_clusters=$(python3 ${GITHUB_WORKSPACE}/scripts/python/filter_clusters.py --yaml-path=${GITHUB_WORKSPACE}/config.yaml --existing-clusters="$(terraform workspace list)") 58 | # Get clusters from config.yaml file 59 | echo "desired_clusters=$(echo $filtered_clusters | cut -d',' -f1)" >> $GITHUB_OUTPUT 60 | # Get all cluster must be removed 61 | echo "removable_clusters=$(echo $filtered_clusters | cut -d',' -f2)" >> $GITHUB_OUTPUT 62 | 63 | # Destroy clusters 64 | - name: Destroy Clusters using Terraform 65 | if: ${{ steps.filter_clusters.outputs.removable_clusters != '' }} 66 | # change list to strings 67 | run: | 68 | bash ${GITHUB_WORKSPACE}/scripts/terraform/destroy.sh ${{ steps.filter_clusters.outputs.removable_clusters }} 69 | 70 | # Apply cluster's infrastructure changes 71 | - name: Infrastructure updates 72 | if: ${{ steps.filter_clusters.outputs.desired_clusters != '' }} 73 | run: | 74 | bash ${GITHUB_WORKSPACE}/scripts/terraform/apply.sh infrastructure ${{ steps.filter_clusters.outputs.desired_clusters }} 75 | 76 | - name: Initializing Terraform 77 | if: ${{ steps.filter_clusters.outputs.desired_clusters != '' }} 78 | run: | 79 | cd ${GITHUB_WORKSPACE}/terraform/cluster 80 | terraform init -upgrade 81 | env: 82 | TF_WORKSPACE: "default-ws" 83 | 84 | # Apply cluster's applications and tools changes 85 | - name: Cluster updates 86 | if: ${{ steps.filter_clusters.outputs.desired_clusters != '' }} 87 | run: | 88 | cd ${GITHUB_WORKSPACE}/terraform/cluster 89 | bash ${GITHUB_WORKSPACE}/scripts/terraform/apply.sh cluster ${{ steps.filter_clusters.outputs.desired_clusters }} 90 | 91 | - name: Merging kube-configs into one file 92 | run: bash ${GITHUB_WORKSPACE}/scripts/bash/merge_kubeconfigs.sh 93 | 94 | - name: Generating kube-config artifact 95 | uses: actions/upload-artifact@v4 96 | with: 97 | name: kubeconfig 98 | path: ~/.kube/config 99 | compression-level: 0 100 | 101 | - name: Generating Markdown 102 | run: | 103 | echo "### turnk8s" >> $GITHUB_STEP_SUMMARY 104 | echo "" >> $GITHUB_STEP_SUMMARY 105 | echo "Push your Kubernetes service manifests to the following GitHub repositories to get them deployed on the cluster. :star_struck:" >> $GITHUB_STEP_SUMMARY 106 | for cluster_name in ${{ steps.filter_clusters.outputs.desired_clusters }}; 107 | do 108 | echo "[$cluster_name](https://github.com/infraheads/$cluster_name)" >> $GITHUB_STEP_SUMMARY 109 | done 110 | echo "Use the 'kubeconfig' file(s) to connect to the cluster, which is(are) attached in 'Artifacts' section." >> $GITHUB_STEP_SUMMARY 111 | -------------------------------------------------------------------------------- /.github/workflows/pr_cluster_workflow.yml: -------------------------------------------------------------------------------- 1 | # This workflow is designed for creating clusters using "config.yaml" and "test_config.yaml" files based on purpose of implementing GitOps solution with the help of "turnk8s". 2 | # 3 | # It is possible to create or delete clusters based on the creating PRs, by updating the configuration files. 4 | # The "config.yaml" file can be updated modifications(creating, updating, or deleting clusters) of cluster(s) described within the "config.yaml" file. In simple words, it describes the existing clusters on the server. 5 | # **Note: The clusters must not start with the "turnk8s-" prefix. 6 | # Modifications take effect upon merging a PRs into the main branch. 7 | # All clusters are destroyed if the "config.yaml" file is empty. 8 | # 9 | # For testing new features, the "test_config.yaml" file should only contain one cluster description(referred to as "test-cluster"), which must be in the "turnk8s-" format. 10 | # Only one cluster can be described in test_config.yaml file. 11 | # **Note: For merging a PR, test_config.yaml file must be empty. 12 | # Test cluster is destroyed if "test_config.yaml" file is empty. 13 | # 14 | # **Attention: The workflow contains a job called "enable_merge_pr", which enable ability of merging PRs. 15 | # As a result of the workflow, the cluster's kube-config file will be found attached in the "Artifacts" section. 16 | 17 | 18 | name: 'Automated Cluster Deployment for Pull Requests' 19 | 20 | on: 21 | pull_request: 22 | branches: 23 | - '*' 24 | paths: 25 | - 'test_config.yaml' 26 | - 'config.yaml' 27 | 28 | env: 29 | TF_CLOUD_ORGANIZATION: "infraheads" 30 | TF_API_TOKEN: "${{ secrets.TF_API_TOKEN }}" 31 | TF_VAR_proxmox_token_id: "${{ secrets.PROXMOX_TOKEN_ID }}" 32 | TF_VAR_proxmox_token_secret: "${{ secrets.PROXMOX_TOKEN_SECRET }}" 33 | TF_VAR_github_token: "${{ secrets.TOKEN_GITHUB }}" 34 | TF_VAR_netris_controller_host: "${{ vars.NETRIS_CONTROLLER_HOST }}" 35 | TF_VAR_netris_controller_login: "${{ secrets.NETRIS_CONTROLLER_LOGIN }}" 36 | TF_VAR_netris_controller_password: "${{ secrets.NETRIS_CONTROLLER_PASSWORD }}" 37 | TF_VAR_argocd_admin_password: "${{ secrets.ARGOCD_ADMIN_PASSWORD }}" 38 | TF_VAR_config_file_path: "../../test_config.yaml" 39 | TF_VAR_cluster_name: "turnk8s-${{ github.event.number }}" 40 | 41 | jobs: 42 | pr_workflow: 43 | runs-on: self-hosted 44 | permissions: 45 | contents: read 46 | pull-requests: write 47 | container: 48 | image: ${{ vars.RUNNER_IMAGE }} 49 | defaults: 50 | run: 51 | working-directory: "terraform/infrastructure" 52 | outputs: 53 | config_is_empty: ${{ steps.check_config.outputs.config_is_empty }} 54 | steps: 55 | - uses: actions/checkout@v4 56 | 57 | - name: Checks if test config is empty 58 | id: check_config 59 | shell: bash 60 | run: | 61 | set -e 62 | if [ -z "$(grep -v '^\s*$' ${GITHUB_WORKSPACE}/test_config.yaml)" ]; 63 | then 64 | echo "config_is_empty=true" >> $GITHUB_OUTPUT 65 | # check how many clusters should be updated: only one cluster must be updated through each PR 66 | echo "The test_config.yaml file is empty and the PR is ready to merge." 67 | else 68 | echo "config_is_empty=false" >> $GITHUB_OUTPUT 69 | echo "The test_config.yaml file is not empty. For merging PRs it must be empty." 70 | fi 71 | 72 | # Validates YAML configuration files: structure, empty lines, keys, etc. 73 | - name: Ensure validity of the configuration files 74 | run: | 75 | # in case of empty test_confi file, it must validate config.yaml file 76 | if ${{ steps.check_config.outputs.config_is_empty == 'true' }}; then 77 | python3 ${GITHUB_WORKSPACE}/scripts/python/validate_yaml.py --yaml-path=${GITHUB_WORKSPACE}/config.yaml 78 | else 79 | python3 ${GITHUB_WORKSPACE}/scripts/python/validate_yaml.py --yaml-path=${GITHUB_WORKSPACE}/test_config.yaml --cluster-name=${{ env.TF_VAR_cluster_name }} 80 | fi 81 | 82 | - name: Setup Terraform 83 | uses: hashicorp/setup-terraform@v3 84 | with: 85 | cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} 86 | terraform_version: 1.7.5 87 | 88 | - name: Configure Terraform Cache 89 | run: echo "TF_PLUGIN_CACHE_DIR=$HOME/.terraform.d/plugin-cache" >> "$GITHUB_ENV" 90 | 91 | - name: Initializing Terraform 92 | run: | 93 | terraform init -upgrade 94 | env: 95 | TF_WORKSPACE: "default-ws" 96 | 97 | # Test PR cluster is removed in case of empty test_config.yaml file 98 | - name: Destroying test cluster 99 | if: ${{ steps.check_config.outputs.config_is_empty == 'true' }} 100 | run: | 101 | if terraform workspace list | grep -w "${{ github.event.number }}-infrastructure"; then 102 | bash ${GITHUB_WORKSPACE}/scripts/terraform/destroy.sh ${{ env.TF_VAR_cluster_name }} 103 | fi 104 | 105 | # Apply cluster's infrastructure changes 106 | - name: Infrastructure updates 107 | if: ${{ steps.check_config.outputs.config_is_empty == 'false' }} 108 | run: | 109 | bash ${GITHUB_WORKSPACE}/scripts/terraform/apply.sh infrastructure ${{ env.TF_VAR_cluster_name }} 110 | 111 | - name: Initializing Terraform 112 | if: ${{ steps.check_config.outputs.config_is_empty == 'false' }} 113 | run: | 114 | cd ${GITHUB_WORKSPACE}/terraform/cluster 115 | terraform init -upgrade 116 | env: 117 | TF_WORKSPACE: "default-ws" 118 | 119 | # Apply cluster's applications and tools changes 120 | - name: Cluster updates 121 | if: ${{ steps.check_config.outputs.config_is_empty == 'false' }} 122 | run: | 123 | cd ${GITHUB_WORKSPACE}/terraform/cluster 124 | bash ${GITHUB_WORKSPACE}/scripts/terraform/apply.sh cluster ${{ env.TF_VAR_cluster_name }} 125 | 126 | - name: Generating kube-config as Artifact 127 | if: ${{ steps.check_config.outputs.config_is_empty == 'false' }} 128 | uses: actions/upload-artifact@v4 129 | with: 130 | name: kube-config-${{ env.TF_VAR_cluster_name }} 131 | path: /opt/kubeconfig/${{ env.TF_VAR_cluster_name }} 132 | compression-level: 0 133 | 134 | - name: Generating Markdown 135 | if: ${{ steps.check_config.outputs.config_is_empty == 'false' }} 136 | run: | 137 | echo "### turnk8s" >> $GITHUB_STEP_SUMMARY 138 | echo "" >> $GITHUB_STEP_SUMMARY 139 | echo "Push your Kubernetes service manifests to [GitHub URL](https://github.com/infraheads/${{ env.TF_VAR_cluster_name }}) to get them deployed on the cluster. :star_struck:" >> $GITHUB_STEP_SUMMARY 140 | echo "Use the 'kubeconfig' file to connect to the cluster, which is attached in 'Artifacts' section." >> $GITHUB_STEP_SUMMARY 141 | 142 | enable_merge_pr: 143 | needs: pr_workflow 144 | runs-on: self-hosted 145 | permissions: 146 | contents: read 147 | pull-requests: write 148 | container: 149 | image: ${{ vars.RUNNER_IMAGE }} 150 | steps: 151 | # PR can be merged in case of empty test_config.yaml 152 | - name: Enable merge PR 153 | run: | 154 | set -e 155 | if ${{ needs.pr_workflow.outputs.config_is_empty == 'false' }}; then 156 | echo "The test_config.yaml file is not empty. For merging PRs the file must be empty." 157 | exit 1 158 | fi 159 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Include override files you do wish to add to version control using negated pattern 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | # Ignore CLI configuration files 33 | .terraformrc 34 | terraform.rc 35 | 36 | # ignore .idea from PyCharm 37 | .idea -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Introduction 2 | 3 | **turnk8s** is a toolset for On-Prem, turnkey Kubernetes deployments based on [Talos Linux](https://www.talos.dev) and [Proxmox](https://www.proxmox.com). 4 | 5 | 6 | ## Before using turnk8s 7 | 8 | You must fork turnk8s, set up a self-hosted GitHub runner inside your infrastructure, and create infrastructure-specific environmental variables. See the list of the repository's environmental variables. 9 | - ARGOCD_ADMIN_PASSWORD - Used as ArgoCD web interface password. 10 | - NETRIS_CONTROLLER_LOGIN - [Your Netris controller login](https://www.netris.io/docs/en/latest/tutorials/installing-netris-controller.html) 11 | - NETRIS_CONTROLLER_PASSWORD - [Your Netris controller password](https://www.netris.io/docs/en/latest/tutorials/installing-netris-controller.html) 12 | - PROXMOX_IP - Proxmox host IP address 13 | - PROXMOX_TOKEN_ID - [Proxmox api token id](https://www.netris.io/docs/en/latest/tutorials/installing-netris-controller.html) 14 | - PROXMOX_TOKEN_SECRET - [Proxmox api token secret](https://www.netris.io/docs/en/latest/tutorials/installing-netris-controller.html) 15 | - TF_API_TOKEN - [Terraform cloud API token](https://developer.hashicorp.com/terraform/tutorials/automation/github-actions) 16 | - TOKEN_GITHUB - GitHub token 17 | 18 | Please note that the GitHub token should have repository creation privileges. 19 | 20 | ## Using turnk8s 21 | 22 | Adhering to GitOps, you can manage all the clusters with the `config.yaml` configuration file. turnk8s ensures that the `config.yaml` represents the state of your running clusters. 23 |
24 | Click here to see the structure of config.yaml file: 25 | 26 | ```yaml 27 | turnk8s-cluster: 28 | versions: 29 | talos: v1.7.1 30 | k8s: v1.30.0 31 | controlplane: 32 | cpu_cores: 2 33 | memory: 4096 34 | disk_size: 20 35 | worker_nodes: 36 | cpu_cores: 2 37 | memory: 4096 38 | disk_size: 20 39 | ``` 40 |
41 | 42 | 43 | ### Creating a Cluster 44 | You can create one or many clusters at once. All you need to do is add the turnk8s cluster configurations to the config.yaml file and push it to your turnk8s. 45 | Please note that you need Proxmox hosts deployed and available for your GitHub runner and Netris controller and softgate nodes available for your Kubernetes cluster pods. 46 | 47 | Configuration parameters: 48 | * **the main key is the cluster name:(Required)** - A unique cluster name 49 | * **versions.talos:(Required)** - Talos Linux version: Supported versions are v1.7.1, v1.7.0, v1.6.7 50 | * **versions.k8s:(Required)** - Kubernetes version: Supported versions are v1.30.0, v1.29.3 51 | * **controlplane.cpu_cores:(Required)** - controlplane node cores :(min 2) 52 | * **controlplane.memory:(Required)** - controlpalne node RAM (min 2048) 53 | * **controlplane.disk_size:(Required)** - controlplane node disk size:(min 10) 54 | * **worker_nodes.cpu_cores:(Required)** - worker node cores:(min 1) 55 | * **worker_nodes.memory:(Required)** - worker node RAM:(min 2048) 56 | * **worker_nodes.disk_size:(Required)** - worker node disk size:(min 10) 57 | 58 | Pushing config.yaml triggers a GitHub actions workflow. The Kubernetes configuration files and the Kubernetes services repo URL are shown on the summary page when the workflow is complete. 59 | 60 | Screenshot 2024-05-24 at 17 11 48 61 | 62 | 63 | 64 | ### Destroying a Cluster 65 | To destroy a cluster, you must remove the cluster configuration from the `config.yaml` file and push it to your turnk8s repository. 66 | 67 | ## Kubectl 68 | 69 | Download the kubeconfig file from the summary page, unzip it, and export its path to the `KUBECONFIG` variable. 70 |
71 | Try `kubectl get nodes` to check if Kubernetes is running. 72 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | internal: 2 | controlplane: 3 | cpu_cores: 2 4 | memory: 4096 5 | disk_size: 10 6 | worker_nodes: 7 | count: 1 8 | cpu_cores: 4 9 | memory: 4096 10 | disk_size: 30 11 | -------------------------------------------------------------------------------- /modules/argocd/app-of-apps.yaml.tpl: -------------------------------------------------------------------------------- 1 | applications: 2 | app-of-apps: 3 | namespace: argocd 4 | finalizers: 5 | - resources-finalizer.argocd.argoproj.io 6 | project: default 7 | source: 8 | repoURL: ${repoURL} 9 | targetRevision: HEAD 10 | path: argocd_applications 11 | destination: 12 | server: https://kubernetes.default.svc 13 | namespace: argocd 14 | syncPolicy: 15 | automated: 16 | prune: true 17 | selfHeal: true -------------------------------------------------------------------------------- /modules/argocd/argocd.yaml: -------------------------------------------------------------------------------- 1 | configs: 2 | params: 3 | server.insecure: true 4 | server: 5 | service: 6 | type: "LoadBalancer" -------------------------------------------------------------------------------- /modules/argocd/main.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "argocd" { 2 | name = "argocd" 3 | namespace = "argocd" 4 | chart = var.argocd_chart_name 5 | version = var.argocd_chart_version 6 | repository = var.argocd_chart_repository 7 | create_namespace = true 8 | recreate_pods = true 9 | force_update = true 10 | 11 | values = [file("${path.module}/argocd.yaml")] 12 | 13 | set { 14 | name = "configs.secret.argocdServerAdminPassword" 15 | value = var.argocd_admin_password 16 | } 17 | 18 | set { 19 | name = "global.image.repository" 20 | value = "${var.registry}/argoproj/argocd" 21 | } 22 | 23 | set { 24 | name = "dex.image.repository" 25 | value = "${var.registry}/dexidp/dex" 26 | } 27 | 28 | set { 29 | name = "redis.image.repository" 30 | value = "${var.registry}/docker/library/redis" 31 | } 32 | } 33 | 34 | resource "helm_release" "argocd-apps" { 35 | depends_on = [helm_release.argocd] 36 | 37 | name = "argocd-apps" 38 | namespace = helm_release.argocd.namespace 39 | chart = var.app_of_apps_chart_name 40 | version = var.app_of_apps_chart_version 41 | repository = var.app_of_apps_chart_repository 42 | 43 | values = [ 44 | templatefile("${path.module}/app-of-apps.yaml.tpl", 45 | { 46 | repoURL = var.git_repository_ssh_url 47 | } 48 | ) 49 | ] 50 | } -------------------------------------------------------------------------------- /modules/argocd/variables.tf: -------------------------------------------------------------------------------- 1 | variable "git_repository_ssh_url" { 2 | description = "Git repository ssh url contains for workload" 3 | type = string 4 | default = "git@github.com:example/project.git" 5 | } 6 | 7 | variable "registry" { 8 | description = "The registry from which images will be downloaded" 9 | type = string 10 | } 11 | 12 | # ArgoCD variables 13 | variable "argocd_chart_name" { 14 | type = string 15 | default = "argo-cd" 16 | } 17 | 18 | variable "argocd_chart_version" { 19 | type = string 20 | default = "6.7.18" 21 | } 22 | 23 | variable "argocd_chart_repository" { 24 | type = string 25 | default = "https://argoproj.github.io/argo-helm" 26 | } 27 | 28 | variable "argocd_admin_password" { 29 | description = "Encrypted password for admin user" 30 | type = string 31 | } 32 | 33 | # ArgoCD AppOfApps variables 34 | variable "app_of_apps_chart_name" { 35 | type = string 36 | default = "argocd-apps" 37 | } 38 | 39 | variable "app_of_apps_chart_version" { 40 | type = string 41 | default = "1.6.2" 42 | } 43 | 44 | variable "app_of_apps_chart_repository" { 45 | type = string 46 | default = "https://argoproj.github.io/argo-helm" 47 | } 48 | -------------------------------------------------------------------------------- /scripts/bash/create_kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat < "/opt/kubeconfig/$2" 4 | $(echo "$1" | tail -n +2 | head -n -1) 5 | EOF 6 | -------------------------------------------------------------------------------- /scripts/bash/destroy_cluster_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cluster_name=$1 4 | desired_worker_nodes_count=$2 5 | existing_worker_nodes_count=$(terraform state list | grep "proxmox_vm_qemu.worker" | wc -l) 6 | removable_worker_nodes_count=$(expr "$existing_worker_nodes_count" - "$desired_worker_nodes_count") 7 | 8 | if [ "$removable_worker_nodes_count" -gt 0 ]; then 9 | export KUBECONFIG="/opt/kubeconfig/$cluster_name" 10 | for (( i="$desired_worker_nodes_count"; i<"$existing_worker_nodes_count"; i++ )) 11 | do 12 | kubectl delete node "$cluster_name-wn-$i" 13 | done 14 | fi -------------------------------------------------------------------------------- /scripts/bash/merge_kubeconfigs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export KUBECONFIG=$(find /opt/kubeconfig -type f | tr '\n' ':') 4 | mkdir ~/.kube 5 | kubectl config view --flatten > ~/.kube/config -------------------------------------------------------------------------------- /scripts/bash/talos_cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | talosctl gen config talos-proxmrox https://$CONTROLPLANE_IP:6443 -o _out --force 4 | talosctl apply-config -n $CONTROLPLANE_IP --insecure -f _out/controlplane.yaml 5 | talosctl apply-config -n $WORKER_NODE_IP --insecure -f _out/worker.yaml 6 | 7 | # Run after booting vm 8 | talosctl bootstrap -e $CONTROLPLANE_IP -n $CONTROLPLANE_IP --talosconfig _out/talosconfig 9 | talosctl kubeconfig -e $CONTROLPLANE_IP -n $CONTROLPLANE_IP --talosconfig _out/talosconfig -------------------------------------------------------------------------------- /scripts/python/filter_clusters.py: -------------------------------------------------------------------------------- 1 | # This script based on config.yaml file and existing terraform workspaces, decided which clusters must be deleted and/or updated 2 | import os 3 | import re 4 | import yaml 5 | import pathlib 6 | import argparse 7 | from collections import Counter 8 | 9 | 10 | def main(): 11 | # Collect values from out of the file 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument("--yaml-path", type=pathlib.Path, help="YAML configuration file path.", required=True) 14 | parser.add_argument("--existing-clusters", type=str, help="Existing clusters name.", required=True) 15 | args = parser.parse_args() 16 | 17 | if not os.path.isfile(args.yaml_path): 18 | raise FileNotFoundError(f"File {args.yaml_path} does not exist.") 19 | 20 | with open(args.yaml_path, 'r') as file: 21 | yaml_content = file.read() 22 | 23 | loaded_yaml_content = yaml.safe_load(yaml_content) or dict() 24 | 25 | # Desired clusters must be applied 26 | desired_clusters = Counter([str(cluster) for cluster in loaded_yaml_content.keys()]) 27 | # Existing clusters filtered from "terraform workspace list" and remove prefixes 28 | existing_clusters = Counter([re.sub(r'(-infrastructure|-cluster)$', '', cluster) for cluster in args.existing_clusters.split() if re.compile(r'^(?!\d).*(-infrastructure|-cluster)$').match(cluster)]) 29 | # Removed unique name 30 | existing_clusters = Counter([cluster for cluster, count in existing_clusters.items() if count == 2]) 31 | # The clusters must be destroyed 32 | removable_clusters = existing_clusters - desired_clusters 33 | 34 | # print the output as comma separated 35 | print(" ".join(desired_clusters), " ".join(removable_clusters), sep=",") 36 | 37 | if __name__ == '__main__': 38 | main() 39 | -------------------------------------------------------------------------------- /scripts/python/validate_yaml.py: -------------------------------------------------------------------------------- 1 | # This script checks config.yaml and test_config.yaml files validity. 2 | import os 3 | import sys 4 | import yaml 5 | import pathlib 6 | import argparse 7 | 8 | from typing import Optional 9 | from schema import Schema, And, Use, Or, SchemaError 10 | 11 | 12 | # Validate YAML empty lines 13 | def check_empty_lines(yaml_content): 14 | 15 | # Check for empty lines 16 | lines = yaml_content.splitlines() 17 | empty_lines = [i + 1 for i in range(len(lines)) if not lines[i].strip()] 18 | 19 | if empty_lines: 20 | raise yaml.YAMLError(f"Empty lines found in YAML file at: {', '.join(map(str, empty_lines))} lines.") 21 | 22 | return True 23 | 24 | 25 | # Custom validator for the cluster names 26 | def validate_cluster_names(cluster_config: dict, cluster_name: Optional[str]): 27 | 28 | if not isinstance(cluster_config, dict): 29 | raise SchemaError(f"Cluster config contains unstructured lines.") 30 | 31 | if cluster_name: 32 | if cluster_name not in cluster_config: 33 | raise SchemaError(f"Cluster name must be match with \"turnk8s-\" format.") 34 | elif len(cluster_config) != 1: 35 | raise SchemaError(f"Only one cluster must be described within test_config.yaml file.") 36 | else: 37 | for cluster_name, cluster_info in cluster_config.items(): 38 | if cluster_name.startswith("turnk8s-"): 39 | raise SchemaError(f"Cluster name {cluster_name} does not start with \"turnk8s-\" prefix.") 40 | 41 | return cluster_config 42 | 43 | 44 | cluster_schema = { 45 | "controlplane": { 46 | "cpu_cores": Or(2, 4, 6, 8, 47 | error="The number of CPU cores for the ControlPlane must be one of the following: 2, 4, 6, or 8."), 48 | "memory": Or(4096, 6144, 8192, 49 | error="The RAM memory size for the ControlPlane must be one of the following: 4096, 6144, or 8192."), 50 | "disk_size": And(Use(int), lambda n: 10 <= n <= 60, 51 | error="The DiskSize for the ControlPlane must be within the range of 10 to 60.") 52 | }, 53 | "worker_nodes": { 54 | "count": And(Use(int), lambda n: 1 <= n <= 5, 55 | error="The Count for the WorkerNodes must be within the range of 1 to 5."), 56 | "cpu_cores": Or(2, 4, 6, 8, 57 | error="The number of CPU cores for the WorkerNodes must be one of the following: 2, 4, 6, or 8."), 58 | "memory": Or(2048, 4096, 6144, 59 | error="The RAM memory size for the WorkerNodes must be one of the following: 2048, 4096 or 6144."), 60 | "disk_size": And(Use(int), lambda n: 10 <= n <= 60, 61 | error="The DiskSize for the WorkerNodes must be within the range of 10 to 60.") 62 | } 63 | } 64 | 65 | 66 | def main(): 67 | # Collect values from out of the file 68 | parser = argparse.ArgumentParser() 69 | parser.add_argument("--yaml-path", type=pathlib.Path, help="YAML configuration file path.", required=True) 70 | parser.add_argument("--cluster-name", type=str, help="A cluster name for checking the validity.", default=None) 71 | args = parser.parse_args() 72 | 73 | if not os.path.isfile(args.yaml_path): 74 | raise FileNotFoundError(f"File {args.yaml_path} does not exist.") 75 | 76 | with open(args.yaml_path, 'r') as file: 77 | yaml_content = file.read() 78 | 79 | try: 80 | # Check if file is not empty 81 | if len(yaml_content.strip()): 82 | loaded_yaml_content = yaml.safe_load(yaml_content) 83 | check_empty_lines(yaml_content=yaml_content) 84 | # Wrap the cluster schema with the cluster names validator 85 | schema = Schema(And(lambda cluster_schema: validate_cluster_names(cluster_schema, args.cluster_name), {str: cluster_schema})) 86 | schema.validate(loaded_yaml_content) 87 | print("YAML configuration file is valid.") 88 | except yaml.YAMLError as e: 89 | print(f"Error parsing YAML configuration file: {e}") 90 | sys.exit(1) 91 | except SchemaError as e: 92 | print(f"Invalid YAML configuration: {e}") 93 | sys.exit(1) 94 | 95 | 96 | if __name__ == '__main__': 97 | main() 98 | -------------------------------------------------------------------------------- /scripts/terraform/apply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | type=$1 5 | clusters=( "${@:2}" ) 6 | 7 | for cluster in "${clusters[@]}" 8 | do 9 | export TF_VAR_cluster_name="$cluster" 10 | workspace="${cluster#turnk8s-}-$type" 11 | terraform workspace select -or-create "$workspace" 12 | terraform validate -no-color 13 | terraform plan -out="tfplan-$workspace" 14 | terraform apply "tfplan-$workspace" 15 | done -------------------------------------------------------------------------------- /scripts/terraform/destroy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | clusters=( "$@" ) 5 | 6 | for cluster in "${clusters[@]}" 7 | do 8 | export TF_VAR_cluster_name="$cluster" 9 | cluster_without_prefix="${cluster#turnk8s-}" 10 | terraform workspace select "$cluster_without_prefix-infrastructure" 11 | terraform destroy -auto-approve 12 | terraform workspace select "default-ws" 13 | terraform workspace delete -force "$cluster_without_prefix-infrastructure" 14 | terraform workspace delete -force "$cluster_without_prefix-cluster" 15 | done -------------------------------------------------------------------------------- /terraform/cluster/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/helm" { 5 | version = "2.13.2" 6 | constraints = ">= 2.13.0" 7 | hashes = [ 8 | "h1:nlSqCo0PajJzjSlx0lXNUq1YcOr8p9b3ahcUUYN2pEg=", 9 | "zh:06c0663031ef5aa19e238fe50be5d3cbf5fb00548d2b26e779c607dfd2dc69a7", 10 | "zh:1850b8f2e729553ba8b96d69dce035b814ce959c6805c25484f407c4e720c497", 11 | "zh:1ec76814a99461cd79ee4c879ed455ab338a3cb9e63fbe9308f91b5515e72e42", 12 | "zh:78546b2f0b2e9072370c017d8056a2ffda908c2e463d2792244e4be6562ab772", 13 | "zh:9205eef438aa3d5e49505655b7c300f7cecfa30f8fa37ed84679f674420403f2", 14 | "zh:9335c7300675e5088ab4090af3c8150701c0bb8ea67ad23ebd753f6ab3a922a9", 15 | "zh:9722d8b419e9615a04b8fc9acb50e52d6ba988c7565cc517bc16faa0a9e895b3", 16 | "zh:aa93d9fc7db91f261b6e41970453926341eaa4222c1b8d507cdeabd0be0af4eb", 17 | "zh:c59a2af538de99c37e4ffe988f33633a9fb064e5360230adac5f6eb0fd473be8", 18 | "zh:d6323f61f255131a7d9f5a645982eb0f0d12f685270f54beade95c0b51a7a6c9", 19 | "zh:e7f46dd2aac9537d20aaac217806f2ebb3a347aaf6bbd28192c042286103635c", 20 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /terraform/cluster/argocd.tf: -------------------------------------------------------------------------------- 1 | module "argocd" { 2 | 3 | source = "../../modules/argocd" 4 | 5 | git_repository_ssh_url = data.terraform_remote_state.infrastructure.outputs.github_repo_url.http_clone_url 6 | registry = var.image_registry 7 | 8 | argocd_chart_name = var.argocd_chart_name 9 | argocd_chart_version = var.argocd_chart_version 10 | argocd_chart_repository = var.argocd_chart_repository 11 | argocd_admin_password = var.argocd_admin_password 12 | 13 | app_of_apps_chart_name = var.argocd_app_of_apps_chart_name 14 | app_of_apps_chart_version = var.argocd_app_of_apps_chart_version 15 | app_of_apps_chart_repository = var.argocd_app_of_apps_chart_repository 16 | } -------------------------------------------------------------------------------- /terraform/cluster/main.tf: -------------------------------------------------------------------------------- 1 | provider "helm" { 2 | kubernetes { 3 | host = data.terraform_remote_state.infrastructure.outputs.cluster_kubeconfig[var.cluster_name].kubernetes_client_configuration.host 4 | client_certificate = base64decode(data.terraform_remote_state.infrastructure.outputs.cluster_kubeconfig[var.cluster_name].kubernetes_client_configuration.client_certificate) 5 | client_key = base64decode(data.terraform_remote_state.infrastructure.outputs.cluster_kubeconfig[var.cluster_name].kubernetes_client_configuration.client_key) 6 | cluster_ca_certificate = base64decode(data.terraform_remote_state.infrastructure.outputs.cluster_kubeconfig[var.cluster_name].kubernetes_client_configuration.ca_certificate) 7 | } 8 | } 9 | 10 | data "terraform_remote_state" "infrastructure" { 11 | backend = "remote" 12 | 13 | config = { 14 | organization = "infraheads" 15 | workspaces = { 16 | name = "turnk8s-${ startswith(var.cluster_name, "turnk8s-") ? substr(var.cluster_name, 8, -1) : var.cluster_name }-infrastructure" 17 | } 18 | } 19 | } -------------------------------------------------------------------------------- /terraform/cluster/netris-operator.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "netris-operator" { 2 | 3 | name = "netris-operator" 4 | namespace = "netris-operator" 5 | chart = "netris-operator" 6 | version = "2.0.0" 7 | repository = "https://netrisai.github.io/charts" 8 | create_namespace = true 9 | recreate_pods = true 10 | force_update = true 11 | 12 | set { 13 | name = "controller.host" 14 | value = var.netris_controller_host 15 | } 16 | 17 | set { 18 | name = "controller.login" 19 | value = var.netris_controller_login 20 | } 21 | 22 | set { 23 | name = "controller.password" 24 | value = var.netris_controller_password 25 | } 26 | 27 | set { 28 | name = "controller.insecure" 29 | value = false 30 | } 31 | 32 | set { 33 | name = "image.repository" 34 | value = "${var.image_registry}/netris-operator" 35 | } 36 | 37 | set { 38 | name = "image.tag" 39 | value = "v3.0.0" 40 | } 41 | } -------------------------------------------------------------------------------- /terraform/cluster/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.7" 3 | backend "remote" { 4 | hostname = "app.terraform.io" 5 | organization = "infraheads" 6 | 7 | workspaces { 8 | prefix = "turnk8s-" 9 | } 10 | } 11 | 12 | required_providers { 13 | helm = { 14 | source = "hashicorp/helm" 15 | version = ">= 2.13" 16 | } 17 | } 18 | } -------------------------------------------------------------------------------- /terraform/cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | description = "The cluster name exists in config file." 3 | type = string 4 | default = "turnk8s-cluster" 5 | } 6 | 7 | variable "talos_version" { 8 | description = "Talos version to be used" 9 | type = string 10 | default = "v1.7.1" 11 | } 12 | 13 | variable "image_registry" { 14 | description = "The registry from which images should be downloaded for cluster" 15 | type = string 16 | default = "192.168.2.4:6000" 17 | } 18 | 19 | # ArgoCD variables 20 | variable "argocd_chart_name" { 21 | type = string 22 | default = "argo-cd" 23 | } 24 | 25 | variable "argocd_chart_version" { 26 | type = string 27 | default = "7.3.4" 28 | } 29 | 30 | variable "argocd_chart_repository" { 31 | type = string 32 | default = "https://argoproj.github.io/argo-helm" 33 | } 34 | 35 | variable "argocd_admin_password" { 36 | description = "Encrypted password for Argocd admin" 37 | type = string 38 | } 39 | 40 | # ArgoCD Apps variables 41 | variable "argocd_app_of_apps_chart_name" { 42 | type = string 43 | default = "argocd-apps" 44 | } 45 | 46 | variable "argocd_app_of_apps_chart_version" { 47 | type = string 48 | default = "2.0.0" 49 | } 50 | 51 | variable "argocd_app_of_apps_chart_repository" { 52 | type = string 53 | default = "https://argoproj.github.io/argo-helm" 54 | } 55 | 56 | # Netris Configuration 57 | variable "netris_controller_host" { 58 | description = "Netris controller host." 59 | type = string 60 | } 61 | 62 | variable "netris_controller_login" { 63 | description = "Netris controller login" 64 | type = string 65 | sensitive = true 66 | } 67 | 68 | variable "netris_controller_password" { 69 | description = "Netris controller password" 70 | type = string 71 | sensitive = true 72 | } 73 | -------------------------------------------------------------------------------- /terraform/infrastructure/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/integrations/github" { 5 | version = "6.2.1" 6 | constraints = "6.2.1" 7 | hashes = [ 8 | "h1:ip7024qn1ewDqlNucxh07DHvuhSLZSqtTGewxNLeYYU=", 9 | "zh:172aa5141c525174f38504a0d2e69d0d16c0a0b941191b7170fe6ae4d7282e30", 10 | "zh:1a098b731fa658c808b591d030cc17cc7dfca1bf001c3c32e596f8c1bf980e9f", 11 | "zh:245d6a1c7e632d8ae4bdd2da2516610c50051e81505cf420a140aa5fa076ea90", 12 | "zh:43c61c230fb4ed26ff1b04b857778e65be3d8f80292759abbe2a9eb3c95f6d97", 13 | "zh:59bb7dd509004921e4322a196be476a2f70471b462802f09d03d6ce96f959860", 14 | "zh:5cb2ab8035d015c0732107c109210243650b6eb115e872091b0f7b98c2763777", 15 | "zh:69d2a6acfcd686f7e859673d1c8a07fc1fc1598a881493f19d0401eb74c0f325", 16 | "zh:77f36d3f46911ace5c50dee892076fddfd64a289999a5099f8d524c0143456d1", 17 | "zh:87df41097dfcde72a1fbe89caca882af257a4763c2e1af669c74dcb8530f9932", 18 | "zh:899dbe621f32d58cb7c6674073a6db8328a9db66eecfb0cc3fc13299fd4e62e7", 19 | "zh:ad2eb7987f02f7dd002076f65a685730705d04435313b5cf44d3a6923629fb29", 20 | "zh:b2145ae7134dba893c7f74ad7dfdc65fdddf6c7b1d0ce7e2f3baa96212322fd8", 21 | "zh:bd6bae3ac5c3f96ad9219d3404aa006ef1480e9041d4c95df1808737e37d911b", 22 | "zh:e89758b20ae59f1b9a6d32c107b17846ddca9634b868cf8f5c927cbb894b1b1f", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/siderolabs/talos" { 27 | version = "0.5.0" 28 | constraints = "0.5.0" 29 | hashes = [ 30 | "h1:xogkLLCrJJmd278E+vNMnmQgaMD05Gd1QXN914xgVec=", 31 | "zh:0f71f2624576224c9bc924b136b601b734243efa7a7ad8280dfd8bd583e4afa5", 32 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 33 | "zh:33c50dacc5029fa20caed702001fb1439899c94f203b1f37dccb970f504bca45", 34 | "zh:3c97a6e2692b88d3f4631a3f8769146f602c210e881b46fa1b3b82c545e51cd1", 35 | "zh:44077a137613bcfe29eef00315b5aa50d83390c3c727580a4ff0f4b87f22d228", 36 | "zh:5bd02f278aec5567f94dd057d1c758363998ce581ff17b0869515bb682c02186", 37 | "zh:80f40939bc3b55f0005c03b77122ceea86ec4deb82f5557950a97ad96fbb1557", 38 | "zh:94c1b17f25bc30eacde926e46f196f1f135032674730d9f50c986ef6b7a854f0", 39 | "zh:95ad665b2fdeed38180f5c471164833a34d07c1ef0470c1652565fe8cf4e9c4a", 40 | "zh:a50ef6088afcb129c176dd4ba86c345e9be7b14358bb3b21c34f06930d8f39ef", 41 | "zh:aa71da1da00ed66f1dddf1b69c10b829f24ac89e207de07d32c455dd04482096", 42 | "zh:abb7eeb2b089081b4814ed80a295673e1a92f82ce092dde37b5bc92e75efec2c", 43 | "zh:db9b9b54a0db5ae151376d5a73e0d28497c3e06181840e71ef8349213ac03e50", 44 | "zh:e50ed8aa90b736508fce63680e8339240cecb74709ab9563d34d2c2ce7bc8445", 45 | "zh:f3a279723ff31a095d7bfff21857abfcc9a2cfdeeea8521d179630ae6565d581", 46 | ] 47 | } 48 | 49 | provider "registry.terraform.io/telmate/proxmox" { 50 | version = "3.0.1-rc1" 51 | constraints = "3.0.1-rc1" 52 | hashes = [ 53 | "h1:4xZeGV+uRpYX6Boe0kWI3Dw3B+x8P4tT4JTnUpE1FJU=", 54 | "zh:4c4a5739ed8b0fdec644632de9cc3219a31022b03aaaf6b77d49efe58541d5c1", 55 | "zh:5c97c58a1d15d3b77bade630c70c75f24cf884560625afa78f408f682c09cc05", 56 | "zh:6b3b8a410cdf39a1cd9dffc2e62806ff91e23a77ccc310fd1ea130560a2f6634", 57 | "zh:73fb750e3363cb1eefacd5fc714a93b9cd65f55459981eb27dd7f4ab7ae5aed7", 58 | "zh:7b4bd5db2188cd21df1d7a49cbf893a18aaa915add400242b20f82cba41d3606", 59 | "zh:8427be708a485325bb0cf70ff9470256e388353b80a554772f19103edf208107", 60 | "zh:9bd7ffdcf8e19efcc90bdef55871d9e6c6d8bcaf46d6873d7faa01709154129c", 61 | "zh:9f7dfe0f4c59fb593f936c67901e851fdfa279fa2aa5ae8f5fff29e763487861", 62 | "zh:a61fd2386c116dd4ed1e202c10b4a3378290d29411f47f0460ba7b8d13e14c53", 63 | "zh:cbe1be50efe3608d014c05503d65d8a3a98cec87962a8a0fdd95065b85db6d4f", 64 | "zh:cdb175a0cb863a11090edbd50500b7d55137dbbb0d31fd119d727e12cadc6b4a", 65 | "zh:cee3e0ed0ecaec22e58e5cec4fb202fb4f2a6779a87ef57c464c7b9c825f4c37", 66 | "zh:e5d4e4fc9619bcfaaed159f9404457f29385dcb2f8fc42cf1b2dab4de7bdbf21", 67 | "zh:f39ec72a5e8624949592d48dec11ef6abb811d0ae5a80b6fe356a47392a38a6a", 68 | ] 69 | } 70 | -------------------------------------------------------------------------------- /terraform/infrastructure/controlplane.tf: -------------------------------------------------------------------------------- 1 | resource "proxmox_vm_qemu" "controlplane" { 2 | for_each = local.clusters 3 | 4 | name = "${var.cluster_name}-cp" 5 | target_node = local.proxmox_target_node 6 | iso = local.talos_iso 7 | 8 | cores = each.value.controlplane.cpu_cores 9 | sockets = var.controlplane_sockets 10 | cpu = var.controlplane_cpu 11 | 12 | qemu_os = var.controlplane_qemu_os 13 | scsihw = var.controlplane_scsihw 14 | memory = each.value.controlplane.memory 15 | onboot = true 16 | agent = 1 17 | 18 | disks { 19 | scsi { 20 | scsi0 { 21 | disk { 22 | storage = var.controlplane_disk_storage 23 | size = each.value.controlplane.disk_size 24 | iothread = true 25 | asyncio = "native" 26 | } 27 | } 28 | } 29 | } 30 | 31 | network { 32 | bridge = var.controlplane_network_bridge 33 | model = var.controlplane_network_model 34 | firewall = var.controlplane_network_firewall 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /terraform/infrastructure/destroy-cluster-nodes.tf: -------------------------------------------------------------------------------- 1 | resource "terraform_data" "delete_nodes" { 2 | depends_on = [terraform_data.kubeconfig] 3 | 4 | # Ensure to delete worker nodes when cluster is scaled down 5 | triggers_replace = [ 6 | length(local.workers) 7 | ] 8 | 9 | provisioner "local-exec" { 10 | command = "bash ../../scripts/bash/destroy_cluster_nodes.sh ${var.cluster_name} ${length(local.workers)}" 11 | } 12 | } -------------------------------------------------------------------------------- /terraform/infrastructure/github.tf: -------------------------------------------------------------------------------- 1 | resource "github_repository" "argocd_applications" { 2 | depends_on = [proxmox_vm_qemu.controlplane, proxmox_vm_qemu.worker] 3 | 4 | name = var.cluster_name 5 | description = "This repo is for the ArgoCD Applications." 6 | vulnerability_alerts = true 7 | 8 | template { 9 | owner = "infraheads" 10 | repository = "turnk8s_template_repo" 11 | include_all_branches = false 12 | } 13 | } 14 | 15 | resource "github_repository_file" "argocd_application" { 16 | repository = github_repository.argocd_applications.name 17 | branch = "main" 18 | file = "argocd_applications/infraheads.yaml" 19 | content = templatefile("${path.module}/templates/argocd_application.yaml.tpl", 20 | { 21 | sourceRepoURL = github_repository.argocd_applications.http_clone_url 22 | } 23 | ) 24 | } -------------------------------------------------------------------------------- /terraform/infrastructure/kubeconfig.tf: -------------------------------------------------------------------------------- 1 | resource "terraform_data" "kubeconfig" { 2 | depends_on = [data.talos_cluster_kubeconfig.cp_ck] 3 | for_each = local.clusters 4 | 5 | # Ensure to retrieve kubeconfig when worker nodes count is changed 6 | triggers_replace = [ 7 | length(local.workers) 8 | ] 9 | 10 | provisioner "local-exec" { 11 | command = "bash ../../scripts/bash/create_kubeconfig.sh \"${yamlencode(data.talos_cluster_kubeconfig.cp_ck[each.key].kubeconfig_raw)}\" ${var.cluster_name}" 12 | } 13 | } -------------------------------------------------------------------------------- /terraform/infrastructure/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | proxmox_api_url = "https://${var.proxmox_ip}:8006/api2/json" 3 | proxmox_target_node = var.proxmox_ip == "192.168.1.5" ? "pve01" : "pve02" 4 | 5 | clusters = try({ tostring(var.cluster_name) = yamldecode(file(var.config_file_path))[var.cluster_name] }, {}) 6 | talos_iso = "local:iso/metal-amd64-qemu-${var.talos_version}.iso" 7 | 8 | workers = flatten([ 9 | for cluster_key, cluster in local.clusters : [ 10 | for i in range(cluster.worker_nodes.count): 11 | { 12 | cpu_cores = cluster.worker_nodes.cpu_cores 13 | disk_size = cluster.worker_nodes.disk_size 14 | memory = cluster.worker_nodes.memory 15 | } 16 | ] 17 | ]) 18 | } 19 | -------------------------------------------------------------------------------- /terraform/infrastructure/main.tf: -------------------------------------------------------------------------------- 1 | provider "proxmox" { 2 | pm_api_url = local.proxmox_api_url 3 | pm_api_token_id = var.proxmox_token_id 4 | pm_api_token_secret = var.proxmox_token_secret 5 | pm_tls_insecure = true 6 | } 7 | 8 | provider "github" { 9 | token = var.github_token 10 | owner = "infraheads" 11 | } 12 | -------------------------------------------------------------------------------- /terraform/infrastructure/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cluster_kubeconfig" { 2 | value = data.talos_cluster_kubeconfig.cp_ck 3 | sensitive = true 4 | } 5 | 6 | output "github_repo_url" { 7 | value = github_repository.argocd_applications 8 | } -------------------------------------------------------------------------------- /terraform/infrastructure/talos.tf: -------------------------------------------------------------------------------- 1 | # Generates machine secrets for Talos cluster 2 | resource "talos_machine_secrets" "talos_secrets" { 3 | talos_version = var.talos_version 4 | } 5 | 6 | # Generates client configuration for a Talos cluster (talosconfig) 7 | data "talos_client_configuration" "cp_cc" { 8 | for_each = local.clusters 9 | 10 | cluster_name = each.key 11 | client_configuration = talos_machine_secrets.talos_secrets.client_configuration 12 | nodes = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address] 13 | endpoints = [proxmox_vm_qemu.controlplane[each.key].default_ipv4_address] 14 | } 15 | 16 | # Generates a machine configuration for the control plane (controlplane.yaml) 17 | data "talos_machine_configuration" "cp_mc" { 18 | for_each = local.clusters 19 | 20 | cluster_name = data.talos_client_configuration.cp_cc[each.key].cluster_name 21 | machine_type = "controlplane" 22 | cluster_endpoint = "https://${proxmox_vm_qemu.controlplane[each.key].default_ipv4_address}:6443" 23 | machine_secrets = talos_machine_secrets.talos_secrets.machine_secrets 24 | kubernetes_version = var.k8s_version 25 | talos_version = var.talos_version 26 | config_patches = [ 27 | templatefile("${path.module}/templates/controlplane.yaml.tpl", 28 | { 29 | talos-version = var.talos_version, 30 | kubernetes-version = var.k8s_version, 31 | registry = var.image_registry 32 | node-name = "${var.cluster_name}-cp" 33 | } 34 | ) 35 | ] 36 | } 37 | 38 | # Applies machine configuration to the control plane 39 | resource "talos_machine_configuration_apply" "cp_mca" { 40 | for_each = local.clusters 41 | 42 | client_configuration = talos_machine_secrets.talos_secrets.client_configuration 43 | machine_configuration_input = data.talos_machine_configuration.cp_mc[each.key].machine_configuration 44 | node = proxmox_vm_qemu.controlplane[each.key].default_ipv4_address 45 | } 46 | 47 | # Bootstraps the etcd cluster on the control plane 48 | resource "talos_machine_bootstrap" "cp_mb" { 49 | depends_on = [talos_machine_configuration_apply.cp_mca] 50 | for_each = local.clusters 51 | 52 | node = proxmox_vm_qemu.controlplane[each.key].default_ipv4_address 53 | client_configuration = talos_machine_secrets.talos_secrets.client_configuration 54 | } 55 | 56 | # Retrieves the kubeconfig for a Talos cluster 57 | data "talos_cluster_kubeconfig" "cp_ck" { 58 | depends_on = [talos_machine_bootstrap.cp_mb] 59 | for_each = local.clusters 60 | 61 | client_configuration = talos_machine_secrets.talos_secrets.client_configuration 62 | node = proxmox_vm_qemu.controlplane[each.key].default_ipv4_address 63 | } 64 | 65 | # Generates a machine configuration for the worker (worker.yaml) 66 | data "talos_machine_configuration" "worker_mc" { 67 | depends_on = [proxmox_vm_qemu.worker] 68 | for_each = local.clusters 69 | 70 | cluster_name = data.talos_client_configuration.cp_cc[each.key].cluster_name 71 | machine_type = "worker" 72 | cluster_endpoint = data.talos_machine_configuration.cp_mc[each.key].cluster_endpoint 73 | machine_secrets = talos_machine_secrets.talos_secrets.machine_secrets 74 | kubernetes_version = var.k8s_version 75 | talos_version = var.talos_version 76 | } 77 | 78 | # Applies machine configuration to the worker node 79 | resource "talos_machine_configuration_apply" "worker_mca" { 80 | depends_on = [data.talos_machine_configuration.worker_mc] 81 | for_each = { for idx, worker in local.workers : idx => worker } 82 | 83 | client_configuration = talos_machine_secrets.talos_secrets.client_configuration 84 | machine_configuration_input = data.talos_machine_configuration.worker_mc[var.cluster_name].machine_configuration 85 | node = proxmox_vm_qemu.worker[each.key].default_ipv4_address 86 | 87 | config_patches = [ 88 | templatefile("${path.module}/templates/worker.yaml.tpl", 89 | { 90 | talos-version = var.talos_version, 91 | kubernetes-version = var.k8s_version, 92 | registry = var.image_registry 93 | node-name = "${var.cluster_name}-wn-${each.key}" 94 | } 95 | ) 96 | ] 97 | } 98 | 99 | data "talos_cluster_health" "cluster_health" { 100 | depends_on = [data.talos_cluster_kubeconfig.cp_ck] 101 | 102 | client_configuration = talos_machine_secrets.talos_secrets.client_configuration 103 | control_plane_nodes = [for controlplane in proxmox_vm_qemu.controlplane : controlplane.default_ipv4_address] 104 | worker_nodes = [for worker in proxmox_vm_qemu.worker : worker.default_ipv4_address] 105 | endpoints = [for controlplane in proxmox_vm_qemu.controlplane : controlplane.default_ipv4_address] 106 | timeouts = { 107 | read = "1h" 108 | } 109 | } -------------------------------------------------------------------------------- /terraform/infrastructure/templates/argocd_application.yaml.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: infraheads 5 | namespace: argocd 6 | finalizers: 7 | - resources-finalizer.argocd.argoproj.io 8 | spec: 9 | project: default # each application belongs to a single project. if unspecified, an application belongs to the "default" projects 10 | 11 | source: 12 | repoURL: ${sourceRepoURL} 13 | targetRevision: main 14 | path: kubernetes 15 | destination: 16 | server: https://kubernetes.default.svc # endpoint of Kubernetes API Server 17 | namespace: default 18 | 19 | syncPolicy: 20 | automated: 21 | selfHeal: true # by default, changes made to the live cluster will not trigger automated sync (override manual changes on cluster) 22 | prune: true # by default, automatic sync will not delete resources -------------------------------------------------------------------------------- /terraform/infrastructure/templates/controlplane.yaml.tpl: -------------------------------------------------------------------------------- 1 | machine: 2 | kubelet: 3 | image: ghcr.io/siderolabs/kubelet:${kubernetes-version} 4 | install: 5 | image: ghcr.io/siderolabs/installer-qemu:${talos-version} 6 | registries: 7 | mirrors: 8 | '*': 9 | endpoints: 10 | - http://${registry} 11 | network: 12 | hostname: ${node-name} 13 | cluster: 14 | apiServer: 15 | image: registry.k8s.io/kube-apiserver:${kubernetes-version} 16 | controllerManager: 17 | image: registry.k8s.io/kube-controller-manager:${kubernetes-version} 18 | proxy: 19 | image: registry.k8s.io/kube-proxy:${kubernetes-version} 20 | scheduler: 21 | image: registry.k8s.io/kube-scheduler:${kubernetes-version} -------------------------------------------------------------------------------- /terraform/infrastructure/templates/worker.yaml.tpl: -------------------------------------------------------------------------------- 1 | machine: 2 | kubelet: 3 | image: ghcr.io/siderolabs/kubelet:${kubernetes-version} 4 | install: 5 | image: ghcr.io/siderolabs/installer-qemu:${talos-version} 6 | registries: 7 | mirrors: 8 | '*': 9 | endpoints: 10 | - http://${registry} 11 | network: 12 | hostname: ${node-name} -------------------------------------------------------------------------------- /terraform/infrastructure/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.7" 3 | backend "remote" { 4 | hostname = "app.terraform.io" 5 | organization = "infraheads" 6 | 7 | workspaces { 8 | prefix = "turnk8s-" 9 | } 10 | } 11 | 12 | required_providers { 13 | proxmox = { 14 | source = "telmate/proxmox" 15 | version = "3.0.1-rc1" 16 | } 17 | talos = { 18 | source = "siderolabs/talos" 19 | version = "0.5.0" 20 | } 21 | github = { 22 | source = "integrations/github" 23 | version = "6.2.1" 24 | } 25 | } 26 | } -------------------------------------------------------------------------------- /terraform/infrastructure/variables.tf: -------------------------------------------------------------------------------- 1 | variable "proxmox_ip" { 2 | description = "IP of the Proxmox server" 3 | type = string 4 | default = "192.168.1.5" 5 | } 6 | 7 | variable "proxmox_token_id" { 8 | description = "This is an API token you have previously created for a specific user." 9 | type = string 10 | sensitive = true 11 | } 12 | 13 | variable "proxmox_token_secret" { 14 | description = "This uuid is only available when the token was initially created." 15 | type = string 16 | sensitive = true 17 | } 18 | 19 | variable "talos_version" { 20 | description = "Talos version to be used" 21 | type = string 22 | default = "v1.7.1" 23 | } 24 | 25 | variable "k8s_version" { 26 | description = "K8s version to be used" 27 | type = string 28 | default = "v1.30.0" 29 | } 30 | 31 | variable "image_registry" { 32 | description = "The registry from which images should be downloaded for cluster" 33 | type = string 34 | default = "192.168.2.4:6000" 35 | } 36 | 37 | variable "github_token" { 38 | description = "Git repository token" 39 | type = string 40 | } 41 | 42 | variable "cluster_name" { 43 | description = "The cluster name exists in config file." 44 | type = string 45 | default = "turnk8s-cluster" 46 | } 47 | 48 | variable "config_file_path" { 49 | description = "The config.yaml file, where clusters are described." 50 | type = string 51 | default = "../../config.yaml" 52 | } 53 | 54 | variable "controlplane_cores" { 55 | description = "The number of CPU cores per CPU socket to allocate to the VM." 56 | type = number 57 | default = 2 58 | } 59 | 60 | variable "controlplane_sockets" { 61 | description = "The number of CPU sockets to allocate to the VM." 62 | type = number 63 | default = 1 64 | } 65 | 66 | variable "controlplane_cpu" { 67 | description = "The type of CPU to emulate in the Guest." 68 | type = string 69 | default = "x86-64-v2-AES" 70 | } 71 | 72 | variable "controlplane_qemu_os" { 73 | description = "The type of OS in the guest." 74 | type = string 75 | default = "l26" 76 | } 77 | 78 | variable "controlplane_scsihw" { 79 | description = "The SCSI controller to emulate." 80 | type = string 81 | default = "virtio-scsi-single" 82 | } 83 | 84 | variable "controlplane_memory" { 85 | description = "The amount of memory to allocate to the VM in Megabytes." 86 | type = number 87 | default = 4096 88 | } 89 | 90 | variable "controlplane_network_bridge" { 91 | description = "Bridge to which the network device should be attached." 92 | type = string 93 | default = "vmbr1" 94 | } 95 | 96 | variable "controlplane_network_model" { 97 | description = "Network Card Model" 98 | type = string 99 | default = "virtio" 100 | } 101 | 102 | variable "controlplane_network_firewall" { 103 | description = "Whether to enable the Proxmox firewall on this network device." 104 | type = bool 105 | default = false 106 | } 107 | 108 | variable "controlplane_disk_storage" { 109 | description = "The name of the storage pool on which to store the disk." 110 | type = string 111 | default = "local-lvm" 112 | } 113 | 114 | variable "controlplane_disk_size" { 115 | description = "The size of the created disk in Gigabytes." 116 | type = number 117 | default = 32 118 | } 119 | 120 | # Node variables 121 | variable "worker_nodes_count" { 122 | description = "Count of the Worker Nodes." 123 | type = number 124 | default = 1 125 | } 126 | 127 | variable "worker_cores" { 128 | description = "The number of CPU cores per CPU socket to allocate to the VM." 129 | type = number 130 | default = 2 131 | } 132 | 133 | variable "worker_sockets" { 134 | description = "The number of CPU sockets to allocate to the VM." 135 | type = number 136 | default = 1 137 | } 138 | 139 | variable "worker_cpu" { 140 | description = "The type of CPU to emulate in the Guest." 141 | type = string 142 | default = "x86-64-v2-AES" 143 | } 144 | 145 | variable "worker_qemu_os" { 146 | description = "The type of OS in the guest." 147 | type = string 148 | default = "l26" 149 | } 150 | 151 | variable "worker_scsihw" { 152 | description = "The SCSI controller to emulate." 153 | type = string 154 | default = "virtio-scsi-single" 155 | } 156 | 157 | variable "worker_memory" { 158 | description = "The amount of memory to allocate to the VM in Megabytes." 159 | type = number 160 | default = 4096 161 | } 162 | 163 | variable "worker_network_bridge" { 164 | description = "Bridge to which the network device should be attached." 165 | type = string 166 | default = "vmbr1" 167 | } 168 | 169 | variable "worker_network_model" { 170 | description = "Network Card Model" 171 | type = string 172 | default = "virtio" 173 | } 174 | 175 | variable "worker_network_firewall" { 176 | description = "Whether to enable the Proxmox firewall on this network device." 177 | type = bool 178 | default = false 179 | } 180 | 181 | variable "worker_disk_storage" { 182 | description = "The name of the storage pool on which to store the disk." 183 | type = string 184 | default = "local-lvm" 185 | } 186 | 187 | variable "worker_disk_size" { 188 | description = "The size of the created disk in Gigabytes." 189 | type = number 190 | default = 32 191 | } 192 | -------------------------------------------------------------------------------- /terraform/infrastructure/worker.tf: -------------------------------------------------------------------------------- 1 | resource "proxmox_vm_qemu" "worker" { 2 | for_each = { for idx, worker in local.workers : idx => worker } 3 | 4 | name = "${var.cluster_name}-worker-${each.key}" 5 | target_node = local.proxmox_target_node 6 | iso = local.talos_iso 7 | 8 | cores = each.value.cpu_cores 9 | sockets = var.worker_sockets 10 | cpu = var.worker_cpu 11 | 12 | qemu_os = var.worker_qemu_os 13 | scsihw = var.worker_scsihw 14 | memory = each.value.memory 15 | onboot = true 16 | agent = 1 17 | 18 | disks { 19 | scsi { 20 | scsi0 { 21 | disk { 22 | storage = var.worker_disk_storage 23 | size = each.value.disk_size 24 | iothread = true 25 | asyncio = "native" 26 | } 27 | } 28 | } 29 | } 30 | 31 | network { 32 | bridge = var.worker_network_bridge 33 | model = var.worker_network_model 34 | firewall = var.worker_network_firewall 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /test_config.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/infraheads/turnk8s/ec469ecdb96996c0cb22a08b96effda6b412dd21/test_config.yaml --------------------------------------------------------------------------------