├── .github └── workflows │ ├── documentation.yaml │ ├── pre-commit.yaml │ └── release.yaml ├── .gitignore ├── .mdl_style.rb ├── .mdlrc ├── .pre-commit-config.yaml ├── .releaserc.json ├── .tflint.hcl ├── CHANGELOG.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── examples └── demo_cluster │ ├── README.md │ ├── clusters-to-rancher.sh │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tfvars.example │ ├── variables.tf │ └── versions.tf ├── main.tf ├── modules └── kube_cluster │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── templates │ └── user-data.tftpl │ ├── variables.tf │ └── versions.tf ├── outputs.tf ├── rancher-clusters-imported.png ├── renovate.json ├── variables.tf └── versions.tf /.github/workflows/documentation.yaml: -------------------------------------------------------------------------------- 1 | name: generate-terraform-docs 2 | # This workflow will generate terraform docs into README.md in the root, examples, and modules folders. 3 | # Source: https://github.com/equinix-labs/terraform-equinix-kubernetes-addons/blob/main/.github/workflows/documentation.yaml 4 | 5 | on: 6 | push: 7 | branches: 8 | - main 9 | paths: 10 | - '**/*.tpl' 11 | - '**/*.tf' 12 | 13 | jobs: 14 | tf-docs: 15 | name: TF docs 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 # v4 19 | id: actions-checkout 20 | with: 21 | ref: main 22 | 23 | - name: Render terraform docs inside the main and the modules README.md files and push changes back to PR branch 24 | id: terraform-docs 25 | uses: terraform-docs/gh-actions@v1 26 | with: 27 | find-dir: . 28 | args: --sort-by required 29 | indention: 3 30 | git-push: "false" 31 | 32 | # terraform-docs/gh-actions@v1.0.0 modifies .git files with owner root:root, and the following steps fail with 33 | # insufficient permission for adding an object to repository database .git/objects 34 | # since the expected user is runner:docker. See https://github.com/terraform-docs/gh-actions/issues/90 35 | - name: Fix .git owner 36 | run: sudo chown runner:docker -R .git 37 | 38 | - name: Create Pull Request 39 | if: steps.terraform-docs.outputs.num_changed != '0' 40 | uses: peter-evans/create-pull-request@v7 41 | with: 42 | commit-message: 'generate-terraform-docs: automated action' 43 | committer: GitHub 44 | author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com> 45 | title: 'generate-terraform-docs: automated action' 46 | body: | 47 | Update terraform docs 48 | branch-suffix: timestamp 49 | base: main 50 | signoff: true 51 | delete-branch: true 52 | 53 | # TODO(ocobleseqx): https://github.com/peter-evans/enable-pull-request-automerge 54 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yaml: -------------------------------------------------------------------------------- 1 | name: 'run-pre-commit-hooks' 2 | # This workflow runs the pre-commit hooks defined in .pre-commit-config.yaml 3 | 4 | on: 5 | pull_request: 6 | branches: [main] 7 | 8 | concurrency: 9 | group: ${{ github.workflow }}-${{ github.ref }} 10 | cancel-in-progress: true 11 | 12 | jobs: 13 | pre-commit: 14 | runs-on: ${{ matrix.os }} 15 | env: 16 | TF_VERSION: ${{ matrix.tf }} 17 | TFLINT_VERSION: ${{ matrix.tflint }} 18 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 19 | strategy: 20 | matrix: 21 | os: [ubuntu-latest] 22 | tf: [1.9.0] 23 | tflint: [v0.44.1] 24 | permissions: 25 | pull-requests: write 26 | id-token: write 27 | contents: read 28 | steps: 29 | - name: Checkout from Github 30 | uses: actions/checkout@v4 # v4 31 | 32 | - name: Install Python3 33 | uses: actions/setup-python@v5 34 | with: 35 | python-version: '3.x' 36 | 37 | - name: Install tflint 38 | uses: terraform-linters/setup-tflint@v4 39 | with: 40 | tflint_version: ${{ env.TFLINT_VERSION }} 41 | 42 | - name: Cache tflint plugin dir 43 | uses: actions/cache@v4 44 | with: 45 | path: ~/.tflint.d/plugins 46 | key: ${{ matrix.os }}-tflint-${{ hashFiles('.tflint.hcl') }} 47 | 48 | - name: Install Terraform 49 | uses: hashicorp/setup-terraform@v3 50 | with: 51 | terraform_version: ${{ env.TF_VERSION }} 52 | 53 | - name: Config Terraform plugin cache 54 | run: | 55 | echo 'plugin_cache_dir="$HOME/.terraform.d/plugin-cache"' >~/.terraformrc 56 | mkdir --parents ~/.terraform.d/plugin-cache 57 | 58 | - name: Cache Terraform 59 | uses: actions/cache@v4 60 | with: 61 | path: | 62 | ~/.terraform.d/plugin-cache 63 | key: ${{ runner.os }}-terraform-${{ hashFiles('**/.terraform.lock.hcl') }} 64 | restore-keys: | 65 | ${{ runner.os }}-terraform- 66 | 67 | - name: Install tfsec 68 | uses: jaxxstorm/action-install-gh-release@v1.12.0 69 | with: 70 | repo: aquasecurity/tfsec 71 | platform: linux 72 | arch: amd64 73 | 74 | - name: Install shfmt 75 | uses: mfinelli/setup-shfmt@v3 76 | 77 | - uses: pre-commit/action@v3.0.1 78 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: generate-release 2 | # This workflow will generate changelog and release notes. 3 | # Source: https://github.com/terraform-aws-modules/terraform-aws-vpc/blob/master/.github/workflows/release.yml 4 | 5 | on: 6 | workflow_dispatch: 7 | 8 | jobs: 9 | release: 10 | name: Release 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v4 # v4 15 | with: 16 | persist-credentials: false 17 | fetch-depth: 0 18 | 19 | - name: Release 20 | uses: cycjimmy/semantic-release-action@v3 21 | with: 22 | semantic_version: 19.0.5 23 | extra_plugins: | 24 | @semantic-release/changelog@6.0.0 25 | @semantic-release/git@10.0.0 26 | conventional-changelog-conventionalcommits@4.6.3 27 | env: 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # OSX leaves these everywhere on SMB shares 2 | ._* 3 | 4 | # OSX trash 5 | **/.DS_Store 6 | *.pyc* 7 | 8 | # Emacs save files 9 | *~ 10 | \#*\# 11 | .\#* 12 | 13 | # Vim-related files 14 | [._]*.s[a-w][a-z] 15 | [._]s[a-w][a-z] 16 | *.un~ 17 | Session.vim 18 | .netrwhist 19 | 20 | # Local .terraform directories 21 | **/.terraform/* 22 | **/*/.terraform/* 23 | .terraform* 24 | 25 | # .tfstate files 26 | *.tfstate 27 | *.tfstate.* 28 | 29 | .terraform.lock.hcl 30 | 31 | # Crash log files 32 | crash.log 33 | 34 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 35 | # .tfvars files are managed as part of configuration and so should be included in 36 | # version control. 37 | # 38 | *.tfvars 39 | 40 | # Ignore override files as they are usually used to override resources locally and so 41 | # are not checked in 42 | override.tf 43 | override.tf.json 44 | *_override.tf 45 | *_override.tf.json 46 | 47 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 48 | # example: *tfplan* 49 | **/terraform.tfvars 50 | util/keys 51 | 52 | *-kubeconfig 53 | -------------------------------------------------------------------------------- /.mdl_style.rb: -------------------------------------------------------------------------------- 1 | all 2 | 3 | exclude_rule 'MD013' 4 | rule 'MD029', style: ['ordered'] 5 | exclude_rule 'MD033' 6 | exclude_rule 'MD041' 7 | exclude_rule 'MD047' 8 | -------------------------------------------------------------------------------- /.mdlrc: -------------------------------------------------------------------------------- 1 | style '.mdl_style.rb' 2 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fail_fast: false 3 | repos: 4 | 5 | - repo: https://github.com/antonbabenko/pre-commit-terraform 6 | rev: v1.77.1 7 | hooks: 8 | - id: terraform_fmt 9 | args: 10 | - "--args=-recursive" 11 | - id: terraform_validate 12 | exclude: "^[^/]+$" 13 | - id: terraform_tflint 14 | args: 15 | - "--args=--config=__GIT_WORKING_DIR__/.tflint.hcl" 16 | - id: terraform_tfsec 17 | args: 18 | - "--args=--soft-fail" 19 | 20 | - repo: https://github.com/pre-commit/pre-commit-hooks 21 | rev: v4.4.0 22 | hooks: 23 | # Git style 24 | - id: check-added-large-files 25 | - id: check-merge-conflict 26 | - id: check-vcs-permalinks 27 | - id: forbid-new-submodules 28 | - id: no-commit-to-branch 29 | args: ['--branch', 'master'] 30 | 31 | # Common errors 32 | - id: end-of-file-fixer 33 | - id: trailing-whitespace 34 | args: 35 | - "--markdown-linebreak-ext=md" 36 | exclude: CHANGELOG.md 37 | - id: check-yaml 38 | args: 39 | - "--allow-multiple-documents" 40 | exclude: | 41 | (?x)^( 42 | examples/| 43 | \.*?.yaml$" 44 | )$ 45 | - id: check-json 46 | - id: check-symlinks 47 | - id: check-executables-have-shebangs 48 | 49 | # Cross platform 50 | - id: check-case-conflict 51 | - id: mixed-line-ending 52 | args: 53 | - "--fix=lf" 54 | 55 | # Security 56 | - id: detect-private-key 57 | 58 | # Shell Script Formatter and Markdown Linter 59 | - repo: https://github.com/jumanjihouse/pre-commit-hooks 60 | rev: 3.0.0 61 | hooks: 62 | - id: shfmt 63 | exclude: | 64 | (?x)^( 65 | helpers/helper-script.sh| 66 | scripts/template-script.sh 67 | )$ 68 | - id: shellcheck 69 | args: 70 | - "--severity=warning" 71 | - "--source-path=SCRIPTDIR scripts/* helpers/*" 72 | - "--shell=bash" 73 | exclude: | 74 | (?x)^( 75 | helpers/helper-script.sh| 76 | scripts/template-script.sh 77 | )$ 78 | - id: markdownlint 79 | exclude: "CHANGELOG.md" 80 | 81 | # JSON5 and Yaml Prettyfier 82 | - repo: https://github.com/pre-commit/mirrors-prettier 83 | rev: v3.0.0-alpha.4 84 | hooks: 85 | - id: prettier 86 | types: [json5, yaml] 87 | exclude: "^examples/" 88 | -------------------------------------------------------------------------------- /.releaserc.json: -------------------------------------------------------------------------------- 1 | { 2 | "branches": [ 3 | "main" 4 | ], 5 | "ci": false, 6 | "plugins": [ 7 | [ 8 | "@semantic-release/commit-analyzer", 9 | { 10 | "preset": "conventionalcommits" 11 | } 12 | ], 13 | [ 14 | "@semantic-release/release-notes-generator", 15 | { 16 | "preset": "conventionalcommits" 17 | } 18 | ], 19 | [ 20 | "@semantic-release/github", 21 | { 22 | "successComment": "This ${issue.pull_request ? 'PR is included' : 'issue has been resolved'} in version ${nextRelease.version} :tada:", 23 | "labels": false, 24 | "releasedLabels": false 25 | } 26 | ], 27 | [ 28 | "@semantic-release/changelog", 29 | { 30 | "changelogFile": "CHANGELOG.md", 31 | "changelogTitle": "# Changelog\n\nAll notable changes to this project will be documented in this file." 32 | } 33 | ], 34 | [ 35 | "@semantic-release/git", 36 | { 37 | "assets": [ 38 | "CHANGELOG.md" 39 | ], 40 | "message": "chore(release): version ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}" 41 | } 42 | ] 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /.tflint.hcl: -------------------------------------------------------------------------------- 1 | plugin "terraform" { 2 | enabled = true 3 | version = "0.9.1" 4 | source = "github.com/terraform-linters/tflint-ruleset-terraform" 5 | } 6 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | ## [0.1.0](https://github.com/equinix-labs/terraform-equinix-metal-k3s/compare/v0.0.2...v0.1.0) (2023-06-05) 6 | 7 | 8 | ### Features 9 | 10 | * use implicit provider inheritance and examples/ ([30bf0a9](https://github.com/equinix-labs/terraform-equinix-metal-k3s/commit/30bf0a97fd656393d85093c98733ad5f7b67c0a7)) 11 | 12 | 13 | ### Bug Fixes 14 | 15 | * extra newline breaks markdown linting in examples/demo_cluster ([42db8a6](https://github.com/equinix-labs/terraform-equinix-metal-k3s/commit/42db8a6fff13b09e81d6f9b411df56d5d0852e73)) 16 | * outputs.tf should be used in examples/demo_cluster ([75f02a4](https://github.com/equinix-labs/terraform-equinix-metal-k3s/commit/75f02a436da29772c8b7db6a8b188eae3032b47b)) 17 | * remove unused metal_auth_token parameter from examples/demo_cluster ([2b0302e](https://github.com/equinix-labs/terraform-equinix-metal-k3s/commit/2b0302e77695625baf8f39758283e6ca1412969b)) 18 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # TEMPLATE: add your username after terraform 2 | # TEMPLATE: * equinix-labs/terraform myusername 3 | * @equinix-labs/terraform 4 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at devrel [at] packet [dot] com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | This document provides guidelines for contributing to the module. 4 | 5 | Contributors to this project must abide by the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). 6 | 7 | ## File structure 8 | 9 | The project has the following folders and files: 10 | 11 | - /: root folder. 12 | - /docs: Examples for using this module. 13 | - /examples: Examples for using this module. 14 | - /files: Static files referenced but not executed by Terraform. 15 | - /helpers: Helper scripts NOT called by Terraform. 16 | - /modules: Inline local modules called by this module. 17 | - /scripts: Scripts for specific tasks on module. 18 | - /templates: Template files used to be execute by data sources. 19 | - /main.tf: Main file for this module, contains all the resources to operate the module. 20 | - /variables.tf: All the variables necessary for run the module. 21 | - /output.tf: The outputs generate from the module. 22 | - /README.md: Main repo README document. 23 | - /CHANGELOG.md: Module release changelog file. 24 | - /CODEOWNERS: Module contributing developers. 25 | - /CODE_OF_CONDUCT.md: Code of Conduct file. 26 | - /CONTRIBUTING.md: This file. 27 | 28 | ## Issues and Change Requests 29 | 30 | Please submit change requests and / or features via [Issues](https://github.com/equinix-labs/equinix-labs/issues). 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # K3s/RKE2 on Equinix Metal 2 | 3 | [![GitHub release](https://img.shields.io/github/release/equinix-labs/terraform-equinix-metal-k3s/all.svg?style=flat-square)](https://github.com/equinix-labs/terraform-equinix-metal-k3s/releases) 4 | ![](https://img.shields.io/badge/Stability-Experimental-red.svg) 5 | [![Equinix Community](https://img.shields.io/badge/Equinix%20Community%20-%20%23E91C24?logo=equinixmetal)](https://community.equinix.com) 6 | 7 | ## Table of content 8 | 9 |
Table of content

10 | 11 | - [K3s/RKE2 on Equinix Metal](#k3srke2-on-equinix-metal) 12 | - [Table of content](#table-of-content) 13 | - [Introduction](#introduction) 14 | - [Prerequisites](#prerequisites) 15 | - [Variable requirements](#variable-requirements) 16 | - [Demo application](#demo-application) 17 | - [Example scenarios](#example-scenarios) 18 | - [Single node in default Metro](#single-node-in-default-metro) 19 | - [Single node in 2 different Metros](#single-node-in-2-different-metros) 20 | - [1 x All-in-one cluster with Rancher (stable), a custom K3s version \& 1 public IP (+1 for Ingress) + 1 x All-in-one with 1 extra node \& a custom RKE2 version + 1 x HA cluster with 3 nodes \& 4 public IPs. Global IPV4 and demo app deployed](#1-x-all-in-one-cluster-with-rancher-stable-a-custom-k3s-version--1-public-ip-1-for-ingress--1-x-all-in-one-with-1-extra-node--a-custom-rke2-version--1-x-ha-cluster-with-3-nodes--4-public-ips-global-ipv4-and-demo-app-deployed) 21 | - [Usage](#usage) 22 | - [Accessing the clusters](#accessing-the-clusters) 23 | - [Rancher bootstrap and add all clusters to Rancher](#rancher-bootstrap-and-add-all-clusters-to-rancher) 24 | - [Terraform module documentation](#terraform-module-documentation) 25 | - [Requirements](#requirements) 26 | - [Providers](#providers) 27 | - [Modules](#modules) 28 | - [Resources](#resources) 29 | - [Inputs](#inputs) 30 | - [Outputs](#outputs) 31 | - [Contributing](#contributing) 32 | - [License](#license) 33 | 34 |

35 | 36 | ## Introduction 37 | 38 | This is a [Terraform](hhttps://registry.terraform.io/providers/equinix/metal/latest/docs) project for deploying [K3s](https://k3s.io) (or [RKE2](https://docs.rke2.io/)) on [Equinix Metal](https://metal.equinix.com) intended to allow you to quickly spin-up and down K3s/RKE2 clusters. 39 | 40 | [K3s](https://docs.k3s.io/) is a fully compliant and lightweight Kubernetes distribution focused on Edge, IoT, ARM or just for situations where a PhD in K8s clusterology is infeasible. [RKE2](https://docs.rke2.io/) is Rancher’s next-generation Kubernetes distribution, it combines the best-of-both-worlds from the 1.x version of RKE (hereafter referred to as RKE1) anxd K3s. From K3s, it inherits the usability, ease-of-operations, and deployment model. From RKE1, it inherits close alignment with upstream Kubernetes. In places K3s has diverged from upstream Kubernetes in order to optimize for edge deployments, but RKE1 and RKE2 can stay closely aligned with upstream. 41 | 42 | > :warning: This repository is [Experimental](https://github.com/packethost/standards/blob/master/experimental-statement.md) meaning that it's based on untested ideas or techniques and not yet established or finalized or involves a radically new and innovative style! This means that support is best effort (at best!) and we strongly encourage you to NOT use this in production. 43 | 44 | This terraform project supports a wide variety of scenarios and mostly focused on Edge, such as: 45 | 46 | * Single node K3s/RKE2 cluster on a single Equinix Metal Metro. 47 | * HA K3s/RKE2 cluster (3 control plane nodes) using [MetalLB](https://metallb.universe.tf/) + BGP to provide an HA K3s/RKE2 API entrypoint. 48 | * Any number of worker nodes (both for single node or HA scenarios). 49 | * Any number of public IPv4s to be used to expose services to the outside using `LoadBalancer` services via MetalLB. 50 | * Optionally it can deploy Rancher Manager on top of the cluster. 51 | * All those previous scenarios but deploying multiple clusters on multiple Equinix Metal metros. 52 | * A Global IPv4 that is shared in all cluster among all Equnix Metal Metros and can be used to expose an example application to demonstrate load balancing between different Equinix Metal Metros. 53 | 54 | More on that later. 55 | 56 | ## Prerequisites 57 | 58 | * An [Equinix Metal account](https://deploy.equinix.com/get-started/) 59 |
Show more details

60 | An Equinix Metal account needs to be created. You can sign up for free (credit card required). 61 |

62 | * An [Equinix Metal project](https://deploy.equinix.com/developers/docs/metal/accounts/projects/) 63 |
Show more details

64 | Equinix Metal is organized in Projects. They can be created either via the Web UI, via the CLI or the API. Check the above link for instructions on how to create it. 65 |

66 | * An [Equinx Metal API Key](https://deploy.equinix.com/developers/docs/metal/accounts/api-keys/) 67 |
Show more details

68 | In order to be able to interact with the Equinix Metal API, an API Key is needed. Check the above link for instructions on how to get it. 69 | For this project to work, the API Key requires write permissions. 70 |

71 | * [BGP](https://deploy.equinix.com/developers/docs/metal/bgp/local-bgp/) enabled in the project. 72 |
Show more details

73 | Equinix Metal supports Local BGP for advertising routes to your Equinix Metal servers in a local environment, and this will be used to provide a single entrypoint for the K3s API in HA deployments as well as to provide `LoadBalancer` services using MetalLB. Check the above link for instructions on how to enable it. 74 |

75 | * An [SSH Key](https://deploy.equinix.com/developers/docs/metal/accounts/ssh-keys/) configured. 76 |
Show more details

77 | Having a SSH in your account or project makes the provision procedure to inject it automatically in the host being provisioned, so you can ssh into it. They can be created either via the Web UI, via the CLI or the API, check the above link for instructions on how to get it. 78 |

79 | * [Terraform](https://developer.hashicorp.com/terraform/downloads?product_intent=terraform) 80 |
Show more details

81 | Terraform is just a single binary. Visit their download page, choose your operating system, make the binary executable, and move it into your path. 82 |

83 | * [git](https://git-scm.com/) to download the content of this repository 84 | 85 | > :warning: Before creating the assets, verify there is enough amount of servers in the chosen Metros by visiting the [Capacity Dashboard](https://deploy.equinix.com/developers/capacity-dashboard/). See more about the inventory and capacity [in the official documentation](https://deploy.equinix.com/developers/docs/metal/locations/capacity/) 86 | 87 | ## Variable requirements 88 | 89 | There is a lot of flexibility in the module to allow customization of the different scenarios. There can be as many cluster with different topologies as wanted but mainly, as defined in [examples/demo_cluster](examples/demo_cluster/README.md): 90 | 91 | | Name | Description | Type | Default | Required | 92 | |------|-------------|------|---------|:--------:| 93 | | [metal\_auth\_token](#input\_metal\_auth\_token) | Your Equinix Metal API key | `string` | n/a | yes | 94 | | [metal\_project\_id](#input\_metal\_project\_id) | Your Equinix Metal Project ID | `string` | n/a | yes | 95 | | [clusters](#input\_clusters) | Kubernetes cluster definition | `list of kubernetes cluster objects` | n/a | yes | 96 | 97 | > :note: The Equinix Metal Auth Token should be defined in a `provider` block in your own Terraform config. In this project, that is done in `examples/demo_cluster/`, not in the root. This pattern facilitates [Implicit Provider Inheritance](https://developer.hashicorp.com/terraform/language/modules/develop/providers#implicit-provider-inheritance) and better reuse of Terraform modules. 98 | 99 | For more details on the variables, see the [Terraform module documentation](#terraform-module-documentation) section. 100 | 101 | The default variables are set to deploy a single node K3s (latest K3s version available) cluster in the FR Metro, using a Equinix Metal's c3.small.x86. You just need to add the cluster name as: 102 | 103 | ```bash 104 | metal_auth_token = "redacted" 105 | metal_project_id = "redacted" 106 | clusters = [ 107 | { 108 | name = "FR DEV Cluster" 109 | } 110 | ] 111 | ``` 112 | 113 | Change each default variable at your own risk, see [Example scenarios](#example-scenarios) and the [kube_cluster module README.md file](modules/kube_cluster/README.md) for more details. 114 | 115 | > :warning: The hostnames are created based on the Cluster Name and the `control_plane_hostnames` & `node_hostnames` variables (normalized), beware the lenght of those variables. 116 | 117 | You can create a [terraform.tfvars](https://developer.hashicorp.com/terraform/language/values/variables#variable-definitions-tfvars-files) file with the appropiate content or use the [`TF_VAR_` environment variables](https://developer.hashicorp.com/terraform/language/values/variables#environment-variables). 118 | 119 | > :warning: The only OS that has been tested is Debian 11. 120 | 121 | ## Demo application 122 | 123 | If enabled (`deploy_demo = true`), a demo application ([hello-kubernetes](https://github.com/paulbouwer/hello-kubernetes)) will be deployed on all the clusters. An extra [Ingress-NGINX Controller](https://github.com/kubernetes/ingress-nginx) is deployed on each cluster to expose that application and the load will be spreaded among all the clusters. This means that different requests will be routed to different clusters. See [the MetalLB documentation](https://metallb.universe.tf/concepts/bgp/#load-balancing-behavior) for more information about how BGP load balancing works. 124 | 125 | ## Example scenarios 126 | 127 | ### Single node in default Metro 128 | 129 | ```bash 130 | metal_auth_token = "redacted" 131 | metal_project_id = "redacted" 132 | clusters = [ 133 | { 134 | name = "FR DEV Cluster" 135 | } 136 | ] 137 | ``` 138 | 139 | This will produce something similar to: 140 | 141 | ```bash 142 | Outputs: 143 | 144 | clusters_output = { 145 | "cluster_details" = { 146 | "FR DEV Cluster" = { 147 | "api" = "147.28.184.239" 148 | "nodes" = { 149 | "fr-dev-cluster-cp-aio" = { 150 | "node_private_ipv4" = "10.25.49.1" 151 | "node_public_ipv4" = "147.28.184.239" 152 | } 153 | } 154 | } 155 | } 156 | } 157 | ``` 158 | 159 | ### Single node in 2 different Metros 160 | 161 | ```bash 162 | metal_auth_token = "redacted" 163 | metal_project_id = "redacted" 164 | clusters = [ 165 | { 166 | name = "FR DEV Cluster" 167 | }, 168 | { 169 | name = "SV DEV Cluster" 170 | metro = "SV" 171 | } 172 | ] 173 | ``` 174 | 175 | This will produce something similar to: 176 | 177 | ```bash 178 | Outputs: 179 | 180 | clusters_output = { 181 | "cluster_details" = { 182 | "FR DEV Cluster" = { 183 | "api" = "147.28.184.239" 184 | "nodes" = { 185 | "fr-dev-cluster-cp-aio" = { 186 | "node_private_ipv4" = "10.25.49.1" 187 | "node_public_ipv4" = "147.28.184.239" 188 | } 189 | } 190 | } 191 | "SV DEV Cluster" = { 192 | "api" = "139.178.70.53" 193 | "nodes" = { 194 | "sv-dev-cluster-cp-aio" = { 195 | "node_private_ipv4" = "10.67.31.129" 196 | "node_public_ipv4" = "139.178.70.53" 197 | } 198 | } 199 | } 200 | } 201 | } 202 | ``` 203 | 204 | ### 1 x All-in-one cluster with Rancher (stable), a custom K3s version & 1 public IP (+1 for Ingress) + 1 x All-in-one with 1 extra node & a custom RKE2 version + 1 x HA cluster with 3 nodes & 4 public IPs. Global IPV4 and demo app deployed 205 | 206 | ```bash 207 | metal_auth_token = "redacted" 208 | metal_project_id = "redacted" 209 | clusters = [ 210 | { 211 | name = "FR DEV Cluster" 212 | rancher_flavor = "stable" 213 | ip_pool_count = 1 214 | kube_version = "v1.29.9+k3s1" 215 | }, 216 | { 217 | name = "SV DEV Cluster" 218 | metro = "SV" 219 | node_count = 1 220 | kube_version = "v1.30.3+rke2r1" 221 | }, 222 | { 223 | name = "SV Production" 224 | ip_pool_count = 4 225 | ha = true 226 | metro = "SV" 227 | node_count = 3 228 | } 229 | ] 230 | 231 | global_ip = true 232 | deploy_demo = true 233 | ``` 234 | 235 | This will produce something similar to: 236 | 237 | ```bash 238 | Outputs: 239 | 240 | clusters_output = { 241 | "anycast_ip" = "147.75.40.34" 242 | "cluster_details" = { 243 | "FR DEV Cluster" = { 244 | "api" = "147.28.184.239" 245 | "ingress" = "147.28.184.119" 246 | "ip_pool_cidr" = "147.28.184.118/32" 247 | "nodes" = { 248 | "fr-dev-cluster-cp-aio" = { 249 | "node_private_ipv4" = "10.25.49.1" 250 | "node_public_ipv4" = "147.28.184.239" 251 | } 252 | } 253 | } 254 | "SV DEV Cluster" = { 255 | "api" = "139.178.70.53" 256 | "nodes" = { 257 | "sv-dev-cluster-cp-aio" = { 258 | "node_private_ipv4" = "10.67.31.129" 259 | "node_public_ipv4" = "139.178.70.53" 260 | } 261 | "sv-dev-cluster-node-00" = { 262 | "node_private_ipv4" = "10.67.31.131" 263 | "node_public_ipv4" = "86.109.11.115" 264 | } 265 | } 266 | } 267 | "SV Production" = { 268 | "api" = "86.109.11.239" 269 | "ingress" = "86.109.11.53" 270 | "ip_pool_cidr" = "139.178.70.68/30" 271 | "nodes" = { 272 | "sv-production-cp-0" = { 273 | "node_private_ipv4" = "10.67.31.133" 274 | "node_public_ipv4" = "139.178.70.141" 275 | } 276 | "sv-production-cp-1" = { 277 | "node_private_ipv4" = "10.67.31.137" 278 | "node_public_ipv4" = "136.144.54.109" 279 | } 280 | "sv-production-cp-2" = { 281 | "node_private_ipv4" = "10.67.31.143" 282 | "node_public_ipv4" = "139.178.94.11" 283 | } 284 | "sv-production-node-00" = { 285 | "node_private_ipv4" = "10.67.31.141" 286 | "node_public_ipv4" = "136.144.54.113" 287 | } 288 | "sv-production-node-01" = { 289 | "node_private_ipv4" = "10.67.31.135" 290 | "node_public_ipv4" = "139.178.70.233" 291 | } 292 | "sv-production-node-02" = { 293 | "node_private_ipv4" = "10.67.31.139" 294 | "node_public_ipv4" = "136.144.54.111" 295 | } 296 | } 297 | } 298 | } 299 | "demo_url" = "http://hellok3s.147.75.40.34.sslip.io" 300 | "rancher_urls" = { 301 | "FR DEV Cluster" = { 302 | "rancher_initial_password_base64" = "Zm9vdmFsdWU=" 303 | "rancher_url" = "https://rancher.147.28.184.119.sslip.io" 304 | } 305 | } 306 | } 307 | ``` 308 | 309 | ## Usage 310 | 311 | * Download the repository: 312 | 313 | ```bash 314 | git clone https://github.com/equinix-labs/terraform-equinix-metal-k3s.git 315 | cd terraform-equinix-metal-k3s/examples/demo_cluster 316 | ``` 317 | 318 | * Initialize terraform: 319 | 320 | ```bash 321 | terraform init -upgrade 322 | ``` 323 | 324 | * Optionally, configure a proper backend to [store the Terraform state file](https://spacelift.io/blog/terraform-state) 325 | 326 | * Modify your variables. Depending on the scenario, some variables are needed and some others are optional but let you customize the scenario as wanted. 327 | 328 | * Review the deployment before submitting it with `terraform plan` (or using environment variables) as: 329 | 330 | ```bash 331 | terraform plan -var-file="foobar.tfvars" 332 | ``` 333 | 334 | * Deploy it 335 | 336 | ```bash 337 | terraform apply -var-file="foobar.tfvars" --auto-approve 338 | ``` 339 | 340 | * Profit! 341 | 342 | The output will show the required IPs or hostnames to use the clusters: 343 | 344 | ```bash 345 | ... 346 | Apply complete! Resources: 3 added, 0 changed, 0 destroyed. 347 | 348 | Outputs: 349 | 350 | clusters_output = { 351 | "cluster_details" = { 352 | "FR DEV Cluster" = { 353 | "api" = "147.28.184.239" 354 | "nodes" = { 355 | "fr-dev-cluster-cp-aio" = { 356 | "node_private_ipv4" = "10.25.49.1" 357 | "node_public_ipv4" = "147.28.184.239" 358 | } 359 | } 360 | } 361 | } 362 | } 363 | ``` 364 | 365 | ## Accessing the clusters 366 | 367 | As the SSH key for the project has been injected, the clusters can be accessed as: 368 | 369 | ```bash 370 | ( 371 | OUTPUT=$(terraform output -json) 372 | IFS=$'\n' 373 | for cluster in $(echo ${OUTPUT} | jq -r ".clusters_output.value.cluster_details | keys[]"); do 374 | FIRSTHOST=$(echo ${OUTPUT} | jq -r "first(.clusters_output.value.cluster_details[\"${cluster}\"].nodes[].node_public_ipv4)") 375 | echo "=== ${cluster} ===" 376 | ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${FIRSTHOST} -tt 'bash -l -c "kubectl get nodes -o wide"' 377 | done 378 | ) 379 | 380 | === FR DEV Cluster === 381 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 382 | fr-dev-cluster-cp-aio Ready control-plane,master 4m31s v1.29.9+k3s1 10.25.49.1 147.28.184.239 Debian GNU/Linux 11 (bullseye) 5.10.0-32-amd64 containerd://1.7.21-k3s2 383 | === SV DEV Cluster === 384 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 385 | sv-dev-cluster-cp-aio Ready control-plane,etcd,master 4m3s v1.30.3+rke2r1 10.67.31.129 139.178.70.53 Debian GNU/Linux 11 (bullseye) 5.10.0-32-amd64 containerd://1.7.17-k3s1 386 | sv-dev-cluster-node-00 Ready 2m29s v1.30.3+rke2r1 10.67.31.133 139.178.70.233 Debian GNU/Linux 11 (bullseye) 5.10.0-32-amd64 containerd://1.7.17-k3s1 387 | === SV Production === 388 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 389 | sv-production-cp-0 Ready control-plane,etcd,master 2m46s v1.30.5+k3s1 10.67.31.131 139.178.70.141 Debian GNU/Linux 11 (bullseye) 5.10.0-32-amd64 containerd://1.7.21-k3s2 390 | sv-production-cp-1 Ready control-plane,etcd,master 42s v1.30.5+k3s1 10.67.31.137 136.144.54.111 Debian GNU/Linux 11 (bullseye) 5.10.0-32-amd64 containerd://1.7.21-k3s2 391 | sv-production-cp-2 Ready control-plane,etcd,master 26s v1.30.5+k3s1 10.67.31.139 136.144.54.113 Debian GNU/Linux 11 (bullseye) 5.10.0-32-amd64 containerd://1.7.21-k3s2 392 | sv-production-node-00 Ready 63s v1.30.5+k3s1 10.67.31.135 136.144.54.109 Debian GNU/Linux 11 (bullseye) 5.10.0-32-amd64 containerd://1.7.21-k3s2 393 | sv-production-node-01 Ready 59s v1.30.5+k3s1 10.67.31.141 139.178.94.11 Debian GNU/Linux 11 (bullseye) 5.10.0-32-amd64 containerd://1.7.21-k3s2 394 | sv-production-node-02 Ready 57s v1.30.5+k3s1 10.67.31.143 139.178.94.19 Debian GNU/Linux 11 (bullseye) 5.10.0-32-amd64 containerd://1.7.21-k3s2 395 | ``` 396 | 397 | To access from outside, the kubeconfig file can be copied to any host and replace the `server` field with the IP of the kubernetes API: 398 | 399 | ```bash 400 | ( 401 | OUTPUT=$(terraform output -json) 402 | IFS=$'\n' 403 | for cluster in $(echo ${OUTPUT} | jq -r ".clusters_output.value.cluster_details | keys[]"); do 404 | FIRSTHOST=$(echo ${OUTPUT} | jq -r "first(.clusters_output.value.cluster_details[\"${cluster}\"].nodes[].node_public_ipv4)") 405 | API=$(echo ${OUTPUT} | jq -r ".clusters_output.value.cluster_details[\"${cluster}\"].api") 406 | export KUBECONFIG="./$(echo ${cluster}| tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9 ]/-/g' | sed 's/ /-/g' | sed 's/^-*\|-*$/''/g')-kubeconfig" 407 | scp -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${FIRSTHOST}:/root/.kube/config ${KUBECONFIG} 408 | sed -i "s/127.0.0.1/${API}/g" ${KUBECONFIG} 409 | chmod 600 ${KUBECONFIG} 410 | echo "=== ${cluster} ===" 411 | kubectl get nodes 412 | done 413 | ) 414 | 415 | === FR DEV Cluster === 416 | NAME STATUS ROLES AGE VERSION 417 | fr-dev-cluster-cp-aio Ready control-plane,master 10m v1.29.9+k3s1 418 | === SV DEV Cluster === 419 | NAME STATUS ROLES AGE VERSION 420 | sv-dev-cluster-cp-aio Ready control-plane,etcd,master 10m v1.30.3+rke2r1 421 | sv-dev-cluster-node-00 Ready 8m43s v1.30.3+rke2r1 422 | === SV Production === 423 | NAME STATUS ROLES AGE VERSION 424 | sv-production-cp-0 Ready control-plane,etcd,master 9m v1.30.5+k3s1 425 | sv-production-cp-1 Ready control-plane,etcd,master 6m56s v1.30.5+k3s1 426 | sv-production-cp-2 Ready control-plane,etcd,master 6m40s v1.30.5+k3s1 427 | sv-production-node-00 Ready 7m17s v1.30.5+k3s1 428 | sv-production-node-01 Ready 7m13s v1.30.5+k3s1 429 | sv-production-node-02 Ready 7m11s v1.30.5+k3s1 430 | ``` 431 | 432 | > :warning: OSX sed is different, it needs to be used as `sed -i "" "s/127.0.0.1/${API}/g" ${KUBECONFIG}` instead. 433 | 434 | ```bash 435 | ( 436 | OUTPUT=$(terraform output -json) 437 | IFS=$'\n' 438 | for cluster in $(echo ${OUTPUT} | jq -r ".clusters_output.value.cluster_details | keys[]"); do 439 | FIRSTHOST=$(echo ${OUTPUT} | jq -r "first(.clusters_output.value.cluster_details[\"${cluster}\"].nodes[].node_public_ipv4)") 440 | API=$(echo ${OUTPUT} | jq -r ".clusters_output.value.cluster_details[\"${cluster}\"].api") 441 | export KUBECONFIG="./$(echo ${cluster}| tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9 ]/-/g' | sed 's/ /-/g' | sed 's/^-*\|-*$/''/g')-kubeconfig" 442 | scp -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${FIRSTHOST}:/root/.kube/config ${KUBECONFIG} 443 | sed -i "" "s/127.0.0.1/${API}/g" ${KUBECONFIG} 444 | chmod 600 ${KUBECONFIG} 445 | echo "=== ${cluster} ===" 446 | kubectl get nodes 447 | done 448 | ) 449 | 450 | === FR DEV Cluster === 451 | NAME STATUS ROLES AGE VERSION 452 | fr-dev-cluster-cp-aio Ready control-plane,master 10m v1.29.9+k3s1 453 | === SV DEV Cluster === 454 | NAME STATUS ROLES AGE VERSION 455 | sv-dev-cluster-cp-aio Ready control-plane,etcd,master 10m v1.30.3+rke2r1 456 | sv-dev-cluster-node-00 Ready 8m43s v1.30.3+rke2r1 457 | === SV Production === 458 | NAME STATUS ROLES AGE VERSION 459 | sv-production-cp-0 Ready control-plane,etcd,master 9m v1.30.5+k3s1 460 | sv-production-cp-1 Ready control-plane,etcd,master 6m56s v1.30.5+k3s1 461 | sv-production-cp-2 Ready control-plane,etcd,master 6m40s v1.30.5+k3s1 462 | sv-production-node-00 Ready 7m17s v1.30.5+k3s1 463 | sv-production-node-01 Ready 7m13s v1.30.5+k3s1 464 | sv-production-node-02 Ready 7m11s v1.30.5+k3s1 465 | ``` 466 | 467 | ## Rancher bootstrap and add all clusters to Rancher 468 | 469 | There is a helper script [clusters-to-rancher.sh](./examples/demo_cluster/clusters-to-rancher.sh) that will perform the 470 | [Rancher first login process](https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/authentication-permissions-and-global-configuration#first-log-in) 471 | automatically based on the Terraform output. 472 | 473 | The script also imports all the other clusters where rancher wasn't deployed. 474 | 475 | It only requires the admin password (>=12 characters) that Rancher will use moving forward: 476 | 477 | ```bash 478 | ./clusters-to-rancher.sh -p 479 | ``` 480 | 481 | ![Awesome Rancher screenshot](./rancher-clusters-imported.png?raw=true "Clusters imported") 482 | 483 | ## Terraform module documentation 484 | 485 | 486 | 487 | 488 | ### Requirements 489 | 490 | | Name | Version | 491 | |------|---------| 492 | | [terraform](#requirement\_terraform) | >= 1.9 | 493 | | [equinix](#requirement\_equinix) | >= 1.14.2 | 494 | 495 | ### Providers 496 | 497 | | Name | Version | 498 | |------|---------| 499 | | [equinix](#provider\_equinix) | >= 1.14.2 | 500 | 501 | ### Modules 502 | 503 | | Name | Source | Version | 504 | |------|--------|---------| 505 | | [kube\_cluster](#module\_kube\_cluster) | ./modules/kube_cluster | n/a | 506 | 507 | ### Resources 508 | 509 | | Name | Type | 510 | |------|------| 511 | | [equinix_metal_reserved_ip_block.global_ip](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_reserved_ip_block) | resource | 512 | 513 | ### Inputs 514 | 515 | | Name | Description | Type | Default | Required | 516 | |------|-------------|------|---------|:--------:| 517 | | [metal\_project\_id](#input\_metal\_project\_id) | Equinix Metal Project ID | `string` | n/a | yes | 518 | | [clusters](#input\_clusters) | Cluster definition |
list(object({
name = optional(string, "Demo cluster")
metro = optional(string, "FR")
plan_control_plane = optional(string, "c3.small.x86")
plan_node = optional(string, "c3.small.x86")
node_count = optional(number, 0)
ha = optional(bool, false)
os = optional(string, "debian_11")
control_plane_hostnames = optional(string, "cp")
node_hostnames = optional(string, "node")
custom_token = optional(string, "")
ip_pool_count = optional(number, 0)
kube_version = optional(string, "")
metallb_version = optional(string, "")
rancher_flavor = optional(string, "")
rancher_version = optional(string, "")
custom_rancher_password = optional(string, "")
}))
|
[
{}
]
| no | 519 | | [deploy\_demo](#input\_deploy\_demo) | Deploys a simple demo using a global IP as ingress and a hello-kubernetes pods | `bool` | `false` | no | 520 | | [global\_ip](#input\_global\_ip) | Enables a global anycast IPv4 that will be shared for all clusters in all metros | `bool` | `false` | no | 521 | 522 | ### Outputs 523 | 524 | | Name | Description | 525 | |------|-------------| 526 | | [anycast\_ip](#output\_anycast\_ip) | Global IP shared across Metros | 527 | | [cluster\_details](#output\_cluster\_details) | List of Clusters => K8s details | 528 | | [demo\_url](#output\_demo\_url) | URL of the demo application to demonstrate a global IP shared across Metros | 529 | | [rancher\_urls](#output\_rancher\_urls) | List of Clusters => Rancher details | 530 | 531 | 532 | ## Contributing 533 | 534 | If you would like to contribute to this module, see [CONTRIBUTING](CONTRIBUTING.md) page. 535 | 536 | ## License 537 | 538 | Apache License, Version 2.0. See [LICENSE](LICENSE). 539 | -------------------------------------------------------------------------------- /examples/demo_cluster/README.md: -------------------------------------------------------------------------------- 1 | # Demo Cluster Examples 2 | 3 | This example demonstrates usage of the Equinix Metal K3s/RKE2 module. A Demo application is installed. 4 | 5 | ## Usage 6 | 7 | ```bash 8 | terraform init 9 | terraform apply 10 | ``` 11 | 12 | 13 | 14 | ### Requirements 15 | 16 | | Name | Version | 17 | |------|---------| 18 | | [terraform](#requirement\_terraform) | >= 1.3 | 19 | | [equinix](#requirement\_equinix) | >= 1.14.2 | 20 | 21 | ### Providers 22 | 23 | No providers. 24 | 25 | ### Modules 26 | 27 | | Name | Source | Version | 28 | |------|--------|---------| 29 | | [demo](#module\_demo) | ../.. | n/a | 30 | 31 | ### Resources 32 | 33 | No resources. 34 | 35 | ### Inputs 36 | 37 | | Name | Description | Type | Default | Required | 38 | |------|-------------|------|---------|:--------:| 39 | | [metal\_auth\_token](#input\_metal\_auth\_token) | Your Equinix Metal API key | `string` | n/a | yes | 40 | | [metal\_project\_id](#input\_metal\_project\_id) | Your Equinix Metal Project ID | `string` | n/a | yes | 41 | | [clusters](#input\_clusters) | Cluster definition |
list(object({
name = optional(string, "Demo cluster")
metro = optional(string, "FR")
plan_control_plane = optional(string, "c3.small.x86")
plan_node = optional(string, "c3.small.x86")
node_count = optional(number, 0)
ha = optional(bool, false)
os = optional(string, "debian_11")
control_plane_hostnames = optional(string, "cp")
node_hostnames = optional(string, "node")
custom_token = optional(string, "")
ip_pool_count = optional(number, 0)
kube_version = optional(string, "")
metallb_version = optional(string, "")
rancher_version = optional(string, "")
rancher_flavor = optional(string, "")
custom_rancher_password = optional(string, "")
}))
|
[
{}
]
| no | 42 | | [deploy\_demo](#input\_deploy\_demo) | Deploys a simple demo using a global IP as ingress and a hello-kubernetes pods | `bool` | `false` | no | 43 | | [global\_ip](#input\_global\_ip) | Enables a global anycast IPv4 that will be shared for all clusters in all metros | `bool` | `false` | no | 44 | 45 | ### Outputs 46 | 47 | | Name | Description | 48 | |------|-------------| 49 | | [clusters\_output](#output\_clusters\_output) | Passthrough of the root module output | 50 | 51 | -------------------------------------------------------------------------------- /examples/demo_cluster/clusters-to-rancher.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | usage() { 5 | echo "Usage: $0 -p " 6 | exit 1 7 | } 8 | 9 | die() { 10 | echo ${1} 1>&2 11 | exit ${2} 12 | } 13 | 14 | prechecks() { 15 | command -v kubectl >/dev/null 2>&1 || die "Error: kubectl not found" 1 16 | command -v curl >/dev/null 2>&1 || die "Error: curl not found" 1 17 | command -v jq >/dev/null 2>&1 || die "Error: jq not found" 1 18 | command -v scp >/dev/null 2>&1 || die "Error: scp not found" 1 19 | } 20 | 21 | wait_for_rancher() { 22 | while ! curl -k "${RANCHERURL}/ping" >/dev/null 2>&1; do sleep 1; done 23 | } 24 | 25 | bootstrap_rancher() { 26 | # Get token 27 | TOKEN=$(curl -sk -X POST ${RANCHERURL}/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d "{\"username\":\"admin\",\"password\":\"${RANCHERPASS}\"}" | jq -r .token) 28 | 29 | # Set password 30 | curl -q -sk ${RANCHERURL}/v3/users?action=changepassword -H 'content-type: application/json' -H "Authorization: Bearer ${TOKEN}" -d "{\"currentPassword\":\"${RANCHERPASS}\",\"newPassword\":\"${PASSWORD}\"}" 31 | 32 | # Create a temporary API token (ttl=60 minutes) 33 | APITOKEN=$(curl -sk ${RANCHERURL}/v3/token -H 'content-type: application/json' -H "Authorization: Bearer ${TOKEN}" -d '{"type":"token","description":"automation","ttl":3600000}' | jq -r .token) 34 | 35 | # Set the Rancher URL 36 | curl -q -sk ${RANCHERURL}/v3/settings/server-url -H 'content-type: application/json' -H "Authorization: Bearer ${APITOKEN}" -X PUT -d "{\"name\":\"server-url\",\"value\":\"${RANCHERURL}\"}" 37 | } 38 | 39 | get_cluster_kubeconfig() { 40 | cluster="${1}" 41 | FIRSTHOST=$(echo ${OUTPUT} | jq -r "first(.clusters_output.value.cluster_details[\"${cluster}\"].nodes[].node_public_ipv4)") 42 | API=$(echo ${OUTPUT} | jq -r ".clusters_output.value.cluster_details[\"${cluster}\"].api") 43 | KUBECONFIG="$(mktemp)" 44 | export KUBECONFIG 45 | scp -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@${FIRSTHOST}:/root/.kube/config ${KUBECONFIG} 46 | # Linux 47 | [ "$(uname -o)" == "GNU/Linux" ] && sed -i "s/127.0.0.1/${API}/g" ${KUBECONFIG} 48 | # OSX 49 | [ "$(uname -o)" == "Darwin" ] && sed -i "" "s/127.0.0.1/${API}/g" ${KUBECONFIG} 50 | chmod 600 ${KUBECONFIG} 51 | echo ${KUBECONFIG} 52 | } 53 | 54 | clusters_to_rancher() { 55 | RANCHERKUBE=$(get_cluster_kubeconfig "${RANCHERCLUSTER}") 56 | 57 | IFS=$'\n' 58 | for clustername in ${OTHERCLUSTERS}; do 59 | export KUBECONFIG=${RANCHERKUBE} 60 | normalizedname=$(echo ${clustername} | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9 ]/-/g' | sed 's/ /-/g' | sed 's/^-*\|-*$/''/g') 61 | cat <<-EOF | kubectl apply -f - >/dev/null 2>&1 62 | apiVersion: provisioning.cattle.io/v1 63 | kind: Cluster 64 | metadata: 65 | name: ${normalizedname} 66 | namespace: fleet-default 67 | spec: {} 68 | EOF 69 | MANIFEST="$(kubectl get clusterregistrationtokens.management.cattle.io -n "$(kubectl get clusters.provisioning.cattle.io -n fleet-default "${normalizedname}" -o jsonpath='{.status.clusterName}')" default-token -o jsonpath='{.status.manifestUrl}')" 70 | DESTKUBECONFIG=$(get_cluster_kubeconfig "${clustername}") 71 | curl --insecure -sfL ${MANIFEST} | kubectl --kubeconfig ${DESTKUBECONFIG} apply -f - >/dev/null 2>&1 72 | rm -f "${DESTKUBECONFIG}" 73 | done 74 | 75 | rm -f "${RANCHERKUBE}" 76 | } 77 | 78 | PASSWORD="" 79 | while getopts ":p:" opt; do 80 | case $opt in 81 | p) 82 | PASSWORD=$OPTARG 83 | ;; 84 | \?) 85 | echo "Invalid option: -$OPTARG" >&2 86 | usage 87 | ;; 88 | :) 89 | echo "Option -$OPTARG requires an argument." >&2 90 | usage 91 | ;; 92 | esac 93 | done 94 | 95 | if [ -z "$PASSWORD" ]; then 96 | echo "Error: Password is required." 1>&2 97 | usage 98 | fi 99 | 100 | if [ ${#PASSWORD} -lt 12 ]; then 101 | die "Error: Password must be at least 12 characters long." 1 102 | fi 103 | 104 | [ ! -f "./terraform.tfstate" ] && die "Error: ./terraform.tfstate does not exist." 1 105 | 106 | OUTPUT=$(terraform output -json) 107 | 108 | [ "${OUTPUT}" == "{}" ] && die "Error. terraform output is '{}'." 1 109 | 110 | RANCHERCLUSTER=$(echo ${OUTPUT} | jq -r 'first(.clusters_output.value.rancher_urls | keys[])') 111 | RANCHERURL=$(echo ${OUTPUT} | jq -r ".clusters_output.value.rancher_urls[\"${RANCHERCLUSTER}\"].rancher_url") 112 | RANCHERPASS=$(echo ${OUTPUT} | jq -r ".clusters_output.value.rancher_urls[\"${RANCHERCLUSTER}\"].rancher_initial_password_base64" | base64 -d) 113 | OTHERCLUSTERS=$(echo ${OUTPUT} | jq -r ".clusters_output.value.cluster_details | keys[] | select(. != \"${RANCHERCLUSTER}\")") 114 | 115 | prechecks 116 | wait_for_rancher 117 | bootstrap_rancher 118 | clusters_to_rancher 119 | -------------------------------------------------------------------------------- /examples/demo_cluster/main.tf: -------------------------------------------------------------------------------- 1 | provider "equinix" { 2 | auth_token = var.metal_auth_token 3 | } 4 | 5 | module "demo" { 6 | # source = "equinix-labs/metal-k3s/equinix" 7 | source = "../.." 8 | 9 | metal_project_id = var.metal_project_id 10 | global_ip = var.global_ip 11 | deploy_demo = var.deploy_demo 12 | clusters = var.clusters 13 | } 14 | -------------------------------------------------------------------------------- /examples/demo_cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "clusters_output" { 2 | description = "Passthrough of the root module output" 3 | value = module.demo 4 | } 5 | -------------------------------------------------------------------------------- /examples/demo_cluster/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | metal_auth_token="your_token_here" #This must be a user API token 2 | metal_project_id="your_project_id" 3 | clusters = [ 4 | { 5 | name = "FR DEV Cluster" 6 | rancher_flavor = "stable" 7 | ip_pool_count = 1 8 | kube_version = "v1.29.9+k3s1" 9 | }, 10 | { 11 | name = "SV DEV Cluster" 12 | metro = "SV" 13 | node_count = 1 14 | kube_version = "v1.30.3+rke2r1" 15 | }, 16 | { 17 | name = "SV Production" 18 | ip_pool_count = 4 19 | ha = true 20 | metro = "SV" 21 | node_count = 3 22 | } 23 | ] 24 | 25 | global_ip = true 26 | deploy_demo = true 27 | -------------------------------------------------------------------------------- /examples/demo_cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "metal_auth_token" { 2 | type = string 3 | sensitive = true 4 | description = "Your Equinix Metal API key" 5 | } 6 | 7 | variable "metal_project_id" { 8 | type = string 9 | description = "Your Equinix Metal Project ID" 10 | } 11 | 12 | variable "global_ip" { 13 | type = bool 14 | description = "Enables a global anycast IPv4 that will be shared for all clusters in all metros" 15 | default = false 16 | } 17 | 18 | variable "deploy_demo" { 19 | type = bool 20 | description = "Deploys a simple demo using a global IP as ingress and a hello-kubernetes pods" 21 | default = false 22 | } 23 | 24 | variable "clusters" { 25 | description = "Cluster definition" 26 | type = list(object({ 27 | name = optional(string, "Demo cluster") 28 | metro = optional(string, "FR") 29 | plan_control_plane = optional(string, "c3.small.x86") 30 | plan_node = optional(string, "c3.small.x86") 31 | node_count = optional(number, 0) 32 | ha = optional(bool, false) 33 | os = optional(string, "debian_11") 34 | control_plane_hostnames = optional(string, "cp") 35 | node_hostnames = optional(string, "node") 36 | custom_token = optional(string, "") 37 | ip_pool_count = optional(number, 0) 38 | kube_version = optional(string, "") 39 | metallb_version = optional(string, "") 40 | rancher_version = optional(string, "") 41 | rancher_flavor = optional(string, "") 42 | custom_rancher_password = optional(string, "") 43 | })) 44 | default = [{}] 45 | } 46 | -------------------------------------------------------------------------------- /examples/demo_cluster/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3" 3 | required_providers { 4 | equinix = { 5 | source = "equinix/equinix" 6 | version = ">= 1.14.2" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | global_ip_cidr = var.global_ip ? equinix_metal_reserved_ip_block.global_ip[0].cidr_notation : "" 3 | } 4 | 5 | ################################################################################ 6 | # K8s Cluster In-line Module 7 | ################################################################################ 8 | 9 | module "kube_cluster" { 10 | source = "./modules/kube_cluster" 11 | 12 | for_each = { for cluster in var.clusters : cluster.name => cluster } 13 | 14 | cluster_name = each.key 15 | metal_metro = each.value.metro 16 | plan_control_plane = each.value.plan_control_plane 17 | plan_node = each.value.plan_node 18 | node_count = each.value.node_count 19 | ha = each.value.ha 20 | os = each.value.os 21 | control_plane_hostnames = each.value.control_plane_hostnames 22 | node_hostnames = each.value.node_hostnames 23 | custom_token = each.value.custom_token 24 | kube_version = each.value.kube_version 25 | metallb_version = each.value.metallb_version 26 | ip_pool_count = each.value.ip_pool_count 27 | rancher_flavor = each.value.rancher_flavor 28 | rancher_version = each.value.rancher_version 29 | custom_rancher_password = each.value.custom_rancher_password 30 | metal_project_id = var.metal_project_id 31 | deploy_demo = var.deploy_demo 32 | global_ip_cidr = local.global_ip_cidr 33 | } 34 | 35 | ################################################################################ 36 | # Global IP 37 | ################################################################################ 38 | 39 | resource "equinix_metal_reserved_ip_block" "global_ip" { 40 | project_id = var.metal_project_id 41 | type = "global_ipv4" 42 | quantity = 1 43 | count = var.global_ip ? 1 : 0 44 | description = "Global IP to Load Balance between all metros" 45 | } 46 | -------------------------------------------------------------------------------- /modules/kube_cluster/README.md: -------------------------------------------------------------------------------- 1 | # K3s/RKE2 Cluster In-line Module 2 | 3 | This in-line module deploys the K3s/RKE2 cluster. 4 | 5 | ## Notes 6 | 7 | * Terraform tries to replace all variables within the templated script, so it fails 8 | 9 | As a workaround, an extra dollar symbol ($) has been added to the variables that doesn't need to be replaced by terraform templating. 10 | 11 | See [this](https://discuss.hashicorp.com/t/invalid-value-for-vars-parameter-vars-map-does-not-contain-key-issue/12074/4) and [this](https://github.com/hashicorp/terraform/issues/23384) for more information. 12 | 13 | * ServiceLB disabled 14 | 15 | `--disable servicelb` is required for metallb to work 16 | 17 | ### Requirements 18 | 19 | | Name | Version | 20 | |------|---------| 21 | | [terraform](#requirement\_terraform) | >= 1.3 | 22 | | [equinix](#requirement\_equinix) | >= 1.14.2 | 23 | | [random](#requirement\_random) | >= 3.5.1 | 24 | 25 | ### Providers 26 | 27 | | Name | Version | 28 | |------|---------| 29 | | [equinix](#provider\_equinix) | >= 1.14.2 | 30 | | [random](#provider\_random) | >= 3.5.1 | 31 | 32 | ### Modules 33 | 34 | No modules. 35 | 36 | ### Resources 37 | 38 | | Name | Type | 39 | |------|------| 40 | | [equinix_metal_bgp_session.all_in_one](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_bgp_session) | resource | 41 | | [equinix_metal_bgp_session.control_plane_master](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_bgp_session) | resource | 42 | | [equinix_metal_bgp_session.control_plane_second](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_bgp_session) | resource | 43 | | [equinix_metal_bgp_session.control_plane_third](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_bgp_session) | resource | 44 | | [equinix_metal_device.all_in_one](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_device) | resource | 45 | | [equinix_metal_device.control_plane_master](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_device) | resource | 46 | | [equinix_metal_device.control_plane_others](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_device) | resource | 47 | | [equinix_metal_device.nodes](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_device) | resource | 48 | | [equinix_metal_reserved_ip_block.api_vip_addr](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_reserved_ip_block) | resource | 49 | | [equinix_metal_reserved_ip_block.ingress_addr](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_reserved_ip_block) | resource | 50 | | [equinix_metal_reserved_ip_block.ip_pool](https://registry.terraform.io/providers/equinix/equinix/latest/docs/resources/metal_reserved_ip_block) | resource | 51 | | [random_string.random_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | 52 | | [random_string.random_token](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | 53 | 54 | ### Inputs 55 | 56 | | Name | Description | Type | Default | Required | 57 | |------|-------------|------|---------|:--------:| 58 | | [metal\_metro](#input\_metal\_metro) | Equinix Metal Metro | `string` | n/a | yes | 59 | | [metal\_project\_id](#input\_metal\_project\_id) | Equinix Metal Project ID | `string` | n/a | yes | 60 | | [cluster\_name](#input\_cluster\_name) | Cluster name | `string` | `"Cluster"` | no | 61 | | [control\_plane\_hostnames](#input\_control\_plane\_hostnames) | Control plane hostname prefix | `string` | `"cp"` | no | 62 | | [custom\_rancher\_password](#input\_custom\_rancher\_password) | Rancher initial password (autogenerated if not provided) | `string` | `null` | no | 63 | | [custom\_token](#input\_custom\_token) | Token used for nodes to join the cluster (autogenerated otherwise) | `string` | `null` | no | 64 | | [deploy\_demo](#input\_deploy\_demo) | Deploys a simple demo using a global IP as ingress and a hello-kubernetes pods | `bool` | `false` | no | 65 | | [global\_ip\_cidr](#input\_global\_ip\_cidr) | Global Anycast IP that will be mapped on all metros via BGP | `string` | `null` | no | 66 | | [ha](#input\_ha) | HA (aka 3 control plane nodes) | `bool` | `false` | no | 67 | | [ip\_pool\_count](#input\_ip\_pool\_count) | Number of public IPv4 per metro to be used as LoadBalancers with MetalLB (it needs to be power of 2 between 0 and 256 as required by Equinix Metal) | `number` | `0` | no | 68 | | [kube\_version](#input\_kube\_version) | K3s/RKE2 version to be installed. Empty for latest K3s | `string` | `""` | no | 69 | | [metallb\_version](#input\_metallb\_version) | MetalLB version to be installed. Empty for latest | `string` | `""` | no | 70 | | [node\_count](#input\_node\_count) | Number of nodes | `number` | `"0"` | no | 71 | | [node\_hostnames](#input\_node\_hostnames) | Node hostname prefix | `string` | `"node"` | no | 72 | | [os](#input\_os) | Operating system | `string` | `"debian_11"` | no | 73 | | [plan\_control\_plane](#input\_plan\_control\_plane) | Control plane type/size | `string` | `"c3.small.x86"` | no | 74 | | [plan\_node](#input\_plan\_node) | Node type/size | `string` | `"c3.small.x86"` | no | 75 | | [rancher\_flavor](#input\_rancher\_flavor) | Rancher flavor to be installed (prime, latest, stable or alpha). Empty to not install it | `string` | `""` | no | 76 | | [rancher\_version](#input\_rancher\_version) | Rancher version to be installed (vX.Y.Z). Empty for latest | `string` | `""` | no | 77 | 78 | ### Outputs 79 | 80 | | Name | Description | 81 | |------|-------------| 82 | | [ingress\_ip](#output\_ingress\_ip) | Ingress IP | 83 | | [ip\_pool\_cidr](#output\_ip\_pool\_cidr) | IP Pool for LoadBalancer SVCs | 84 | | [kube\_api\_ip](#output\_kube\_api\_ip) | K8s API IPs | 85 | | [nodes\_details](#output\_nodes\_details) | Nodes external and internal IPs | 86 | | [rancher\_address](#output\_rancher\_address) | Rancher URL | 87 | | [rancher\_password](#output\_rancher\_password) | Rancher initial password | 88 | 89 | -------------------------------------------------------------------------------- /modules/kube_cluster/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | token = coalesce(var.custom_token, random_string.random_token.result) 3 | rancher_pass = var.custom_rancher_password != null ? coalesce(var.custom_rancher_password, random_string.random_password.result) : null 4 | api_vip = var.ha ? equinix_metal_reserved_ip_block.api_vip_addr[0].address : equinix_metal_device.all_in_one[0].network[0].address 5 | ingress_ip = var.ip_pool_count > 0 ? equinix_metal_reserved_ip_block.ingress_addr[0].address : "" 6 | ip_pool_cidr = var.ip_pool_count > 0 ? equinix_metal_reserved_ip_block.ip_pool[0].cidr_notation : "" 7 | } 8 | 9 | resource "random_string" "random_token" { 10 | length = 16 11 | special = false 12 | } 13 | 14 | resource "random_string" "random_password" { 15 | length = 16 16 | special = false 17 | } 18 | 19 | ################################################################################ 20 | # Control Plane 21 | ################################################################################ 22 | 23 | resource "equinix_metal_device" "control_plane_master" { 24 | hostname = "${lower(replace(var.cluster_name, "/\\W|_|\\s/", "-"))}-${var.control_plane_hostnames}-0" 25 | plan = var.plan_control_plane 26 | metro = var.metal_metro 27 | operating_system = var.os 28 | billing_cycle = "hourly" 29 | project_id = var.metal_project_id 30 | count = var.ha ? 1 : 0 31 | description = var.cluster_name 32 | user_data = templatefile("${path.module}/templates/user-data.tftpl", { 33 | token = local.token, 34 | API_IP = local.api_vip, 35 | ingress_ip = local.ingress_ip, 36 | global_ip_cidr = var.global_ip_cidr, 37 | ip_pool = local.ip_pool_cidr, 38 | kube_version = var.kube_version, 39 | metallb_version = var.metallb_version, 40 | deploy_demo = var.deploy_demo, 41 | rancher_flavor = var.rancher_flavor, 42 | rancher_version = var.rancher_version, 43 | rancher_pass = local.rancher_pass, 44 | node_type = "control-plane-master" }) 45 | } 46 | 47 | resource "equinix_metal_bgp_session" "control_plane_master" { 48 | device_id = equinix_metal_device.control_plane_master[0].id 49 | address_family = "ipv4" 50 | count = var.ha ? 1 : 0 51 | } 52 | 53 | resource "equinix_metal_reserved_ip_block" "api_vip_addr" { 54 | count = var.ha ? 1 : 0 55 | project_id = var.metal_project_id 56 | metro = var.metal_metro 57 | type = "public_ipv4" 58 | quantity = 1 59 | description = "Kubernetes API IP for the ${var.cluster_name} cluster" 60 | } 61 | 62 | resource "equinix_metal_reserved_ip_block" "ingress_addr" { 63 | count = var.ip_pool_count > 0 ? 1 : 0 64 | project_id = var.metal_project_id 65 | metro = var.metal_metro 66 | type = "public_ipv4" 67 | quantity = 1 68 | description = "Ingress IP for the ${var.cluster_name} cluster" 69 | } 70 | 71 | resource "equinix_metal_device" "control_plane_others" { 72 | hostname = format("%s-%d", "${lower(replace(var.cluster_name, "/\\W|_|\\s/", "-"))}-${var.control_plane_hostnames}", count.index + 1) 73 | plan = var.plan_control_plane 74 | metro = var.metal_metro 75 | operating_system = var.os 76 | billing_cycle = "hourly" 77 | project_id = var.metal_project_id 78 | count = var.ha ? 2 : 0 79 | description = var.cluster_name 80 | depends_on = [equinix_metal_device.control_plane_master] 81 | user_data = templatefile("${path.module}/templates/user-data.tftpl", { 82 | token = local.token, 83 | API_IP = local.api_vip, 84 | ingress_ip = local.ingress_ip, 85 | global_ip_cidr = "", 86 | ip_pool = "", 87 | kube_version = var.kube_version, 88 | metallb_version = var.metallb_version, 89 | rancher_flavor = var.rancher_flavor, 90 | rancher_version = var.rancher_version, 91 | rancher_pass = local.rancher_pass, 92 | deploy_demo = false, 93 | node_type = "control-plane" }) 94 | } 95 | 96 | resource "equinix_metal_bgp_session" "control_plane_second" { 97 | device_id = equinix_metal_device.control_plane_others[0].id 98 | address_family = "ipv4" 99 | count = var.ha ? 1 : 0 100 | } 101 | 102 | resource "equinix_metal_bgp_session" "control_plane_third" { 103 | device_id = equinix_metal_device.control_plane_others[1].id 104 | address_family = "ipv4" 105 | count = var.ha ? 1 : 0 106 | } 107 | 108 | ################################################################################ 109 | # IP Pool 110 | ################################################################################ 111 | 112 | resource "equinix_metal_reserved_ip_block" "ip_pool" { 113 | project_id = var.metal_project_id 114 | type = "public_ipv4" 115 | quantity = var.ip_pool_count 116 | metro = var.metal_metro 117 | count = var.ip_pool_count > 0 ? 1 : 0 118 | description = "IP Pool to be used for LoadBalancers via MetalLB on the ${var.cluster_name} cluster" 119 | } 120 | 121 | ################################################################################ 122 | # Nodes 123 | ################################################################################ 124 | 125 | resource "equinix_metal_device" "nodes" { 126 | hostname = format("%s-%02d", "${lower(replace(var.cluster_name, "/\\W|_|\\s/", "-"))}-${var.node_hostnames}", count.index) 127 | plan = var.plan_node 128 | metro = var.metal_metro 129 | operating_system = var.os 130 | billing_cycle = "hourly" 131 | project_id = var.metal_project_id 132 | count = var.node_count 133 | description = var.cluster_name 134 | depends_on = [equinix_metal_device.control_plane_master] 135 | user_data = templatefile("${path.module}/templates/user-data.tftpl", { 136 | token = local.token, 137 | API_IP = local.api_vip, 138 | ingress_ip = local.ingress_ip, 139 | global_ip_cidr = "", 140 | ip_pool = "", 141 | kube_version = var.kube_version, 142 | metallb_version = var.metallb_version, 143 | rancher_flavor = var.rancher_flavor, 144 | rancher_version = var.rancher_version, 145 | rancher_pass = local.rancher_pass, 146 | deploy_demo = false, 147 | node_type = "node" }) 148 | } 149 | 150 | ################################################################################ 151 | # All in One 152 | ################################################################################ 153 | 154 | resource "equinix_metal_device" "all_in_one" { 155 | hostname = "${lower(replace(var.cluster_name, "/\\W|_|\\s/", "-"))}-${var.control_plane_hostnames}-aio" 156 | plan = var.plan_control_plane 157 | metro = var.metal_metro 158 | operating_system = var.os 159 | billing_cycle = "hourly" 160 | project_id = var.metal_project_id 161 | count = var.ha ? 0 : 1 162 | description = var.cluster_name 163 | user_data = templatefile("${path.module}/templates/user-data.tftpl", { 164 | token = local.token, 165 | global_ip_cidr = var.global_ip_cidr, 166 | ip_pool = local.ip_pool_cidr, 167 | API_IP = "", 168 | ingress_ip = local.ingress_ip, 169 | kube_version = var.kube_version, 170 | metallb_version = var.metallb_version, 171 | deploy_demo = var.deploy_demo, 172 | rancher_flavor = var.rancher_flavor, 173 | rancher_version = var.rancher_version, 174 | rancher_pass = local.rancher_pass, 175 | node_type = "all-in-one" }) 176 | } 177 | 178 | # Despite being used or not, enable BGP just in case 179 | resource "equinix_metal_bgp_session" "all_in_one" { 180 | device_id = equinix_metal_device.all_in_one[0].id 181 | address_family = "ipv4" 182 | count = var.ha ? 0 : 1 183 | } 184 | -------------------------------------------------------------------------------- /modules/kube_cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kube_api_ip" { 2 | value = local.api_vip 3 | description = "K8s API IPs" 4 | } 5 | 6 | output "rancher_address" { 7 | value = var.rancher_flavor != "" ? "https://rancher.${local.ingress_ip}.sslip.io" : null 8 | description = "Rancher URL" 9 | } 10 | 11 | output "rancher_password" { 12 | value = var.rancher_flavor != "" ? local.rancher_pass : null 13 | description = "Rancher initial password" 14 | } 15 | 16 | output "ingress_ip" { 17 | value = var.ip_pool_count > 0 ? local.ingress_ip : null 18 | description = "Ingress IP" 19 | } 20 | 21 | output "ip_pool_cidr" { 22 | value = var.ip_pool_count > 0 ? local.ip_pool_cidr : null 23 | description = "IP Pool for LoadBalancer SVCs" 24 | } 25 | 26 | output "nodes_details" { 27 | value = { 28 | for node in flatten([equinix_metal_device.control_plane_master, equinix_metal_device.control_plane_others, equinix_metal_device.nodes, equinix_metal_device.all_in_one]) : node.hostname => { 29 | node_private_ipv4 = node.access_private_ipv4 30 | node_public_ipv4 = node.access_public_ipv4 31 | } 32 | } 33 | description = "Nodes external and internal IPs" 34 | } 35 | -------------------------------------------------------------------------------- /modules/kube_cluster/templates/user-data.tftpl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | die(){ 5 | echo $${1} >&2 6 | exit $${2} 7 | } 8 | 9 | prechecks(){ 10 | # Set OS 11 | source /etc/os-release 12 | case $${ID} in 13 | "debian") 14 | export PKGMANAGER="apt" 15 | ;; 16 | "sles") 17 | export PKGMANAGER="zypper" 18 | ;; 19 | "sle-micro") 20 | export PKGMANAGER="transactional-update" 21 | ;; 22 | *) 23 | die "Unsupported OS $${ID}" 1 24 | ;; 25 | esac 26 | # Set ARCH 27 | ARCH=$(uname -m) 28 | case $${ARCH} in 29 | "amd64") 30 | export ARCH=amd64 31 | export SUFFIX= 32 | ;; 33 | "x86_64") 34 | export ARCH=amd64 35 | export SUFFIX= 36 | ;; 37 | "arm64") 38 | export ARCH=arm64 39 | export SUFFIX=-$${ARCH} 40 | ;; 41 | "s390x") 42 | export ARCH=s390x 43 | export SUFFIX=-$${ARCH} 44 | ;; 45 | "aarch64") 46 | export ARCH=arm64 47 | export SUFFIX=-$${ARCH} 48 | ;; 49 | "arm*") 50 | export ARCH=arm 51 | export SUFFIX=-$${ARCH}hf 52 | ;; 53 | *) 54 | die "Unsupported architecture $${ARCH}" 1 55 | ;; 56 | esac 57 | } 58 | 59 | prereqs(){ 60 | # Required packages 61 | case $${PKGMANAGER} in 62 | "apt") 63 | apt update 64 | apt install -y jq curl 65 | ;; 66 | "zypper") 67 | zypper refresh 68 | zypper install -y jq curl 69 | ;; 70 | esac 71 | } 72 | 73 | wait_for_kube_api(){ 74 | # Wait for the node to be available, meaning the K8s API is available 75 | while ! kubectl wait --for condition=ready node $(cat /etc/hostname | tr '[:upper:]' '[:lower:]') --timeout=60s; do sleep 2 ; done 76 | } 77 | 78 | install_eco(){ 79 | # Wait for K3s to be up. It should be up already but just in case. 80 | wait_for_kube_api 81 | 82 | # Download helm as required to install endpoint-copier-operator 83 | command -v helm || curl -fsSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 |bash 84 | 85 | # Add the SUSE Edge charts and deploy ECO 86 | helm repo add suse-edge https://suse-edge.github.io/charts 87 | helm repo update 88 | helm install --create-namespace -n endpoint-copier-operator endpoint-copier-operator suse-edge/endpoint-copier-operator 89 | 90 | # Configure the MetalLB IP Address pool for the VIP 91 | cat <<-EOF | kubectl apply -f - 92 | apiVersion: metallb.io/v1beta1 93 | kind: IPAddressPool 94 | metadata: 95 | name: kubernetes-vip-ip-pool 96 | namespace: metallb-system 97 | spec: 98 | addresses: 99 | - ${API_IP}/32 100 | serviceAllocation: 101 | priority: 100 102 | namespaces: 103 | - default 104 | EOF 105 | 106 | # Create the kubernetes-vip service that will be updated by e-c-o with the control plane hosts 107 | if [[ $${KUBETYPE} == "k3s" ]]; then 108 | cat <<-EOF | kubectl apply -f - 109 | apiVersion: v1 110 | kind: Service 111 | metadata: 112 | name: kubernetes-vip 113 | namespace: default 114 | spec: 115 | internalTrafficPolicy: Cluster 116 | ipFamilies: 117 | - IPv4 118 | ipFamilyPolicy: SingleStack 119 | ports: 120 | - name: k8s-api 121 | port: 6443 122 | protocol: TCP 123 | targetPort: 6443 124 | type: LoadBalancer 125 | EOF 126 | fi 127 | if [[ $${KUBETYPE} == "rke2" ]]; then 128 | cat <<-EOF | kubectl apply -f - 129 | apiVersion: v1 130 | kind: Service 131 | metadata: 132 | name: kubernetes-vip 133 | namespace: default 134 | spec: 135 | internalTrafficPolicy: Cluster 136 | ipFamilies: 137 | - IPv4 138 | ipFamilyPolicy: SingleStack 139 | ports: 140 | - name: k8s-api 141 | port: 6443 142 | protocol: TCP 143 | targetPort: 6443 144 | - name: rke2-api 145 | port: 9345 146 | protocol: TCP 147 | targetPort: 9345 148 | type: LoadBalancer 149 | EOF 150 | fi 151 | } 152 | 153 | install_metallb(){ 154 | %{ if metallb_version != "" ~} 155 | export METALLB_VERSION=${metallb_version} 156 | %{ else ~} 157 | export METALLB_VERSION=$(curl --silent "https://api.github.com/repos/metallb/metallb/releases/latest" | jq -r .tag_name) 158 | %{ endif ~} 159 | 160 | # Wait for K3s to be up. It should be up already but just in case. 161 | wait_for_kube_api 162 | 163 | # Apply the MetalLB manifest 164 | kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/$${METALLB_VERSION}/config/manifests/metallb-native.yaml 165 | 166 | # Wait for MetalLB to be up 167 | while ! kubectl wait --for condition=ready -n metallb-system $(kubectl get pods -n metallb-system -l component=controller -o name) --timeout=10s; do sleep 2 ; done 168 | 169 | # In order to configure MetalLB, the metadata information is required. 170 | # BGP info can take a few seconds to be populated, retry if that's the case 171 | INTERNAL_IP="null" 172 | while [ $${INTERNAL_IP} == "null" ]; do 173 | echo "BGP data still not available..." 174 | sleep 5 175 | METADATA=$(curl -s https://metadata.platformequinix.com/metadata) 176 | INTERNAL_IP=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].customer_ip') 177 | done 178 | PEER_IP_1=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_ips[0]') 179 | PEER_IP_2=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_ips[1]') 180 | ASN=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].customer_as') 181 | ASN_AS=$(echo $${METADATA} | jq -r '.bgp_neighbors[0].peer_as') 182 | 183 | %{ if global_ip_cidr != "" ~} 184 | # Configure the IPAddressPool for the Global IP if present 185 | cat <<- EOF | kubectl apply -f - 186 | apiVersion: metallb.io/v1beta1 187 | kind: IPAddressPool 188 | metadata: 189 | name: anycast-ip 190 | namespace: metallb-system 191 | spec: 192 | addresses: 193 | - ${global_ip_cidr} 194 | autoAssign: true 195 | avoidBuggyIPs: false 196 | serviceAllocation: 197 | namespaces: 198 | - ingress-nginx-global 199 | priority: 100 200 | serviceSelectors: 201 | - matchExpressions: 202 | - key: ingress-type 203 | operator: In 204 | values: 205 | - ingress-nginx-global 206 | EOF 207 | %{ endif ~} 208 | 209 | %{ if ingress_ip != "" ~} 210 | if [ "$${KUBETYPE}" == "k3s" ]; then 211 | # Configure an IPAddressPool for Ingress only 212 | cat <<- EOF | kubectl apply -f - 213 | apiVersion: metallb.io/v1beta1 214 | kind: IPAddressPool 215 | metadata: 216 | name: ingress 217 | namespace: metallb-system 218 | spec: 219 | addresses: 220 | - ${ingress_ip}/32 221 | serviceAllocation: 222 | priority: 100 223 | serviceSelectors: 224 | - matchExpressions: 225 | - {key: app.kubernetes.io/name, operator: In, values: [traefik]} 226 | EOF 227 | fi 228 | if [ "$${KUBETYPE}" == "rke2" ]; then 229 | # Configure an IPAddressPool for Ingress only 230 | cat <<- EOF | kubectl apply -f - 231 | apiVersion: metallb.io/v1beta1 232 | kind: IPAddressPool 233 | metadata: 234 | name: ingress 235 | namespace: metallb-system 236 | spec: 237 | addresses: 238 | - ${ingress_ip}/32 239 | serviceAllocation: 240 | priority: 100 241 | serviceSelectors: 242 | - matchExpressions: 243 | - {key: app.kubernetes.io/name, operator: In, values: [rke2-ingress-nginx]} 244 | EOF 245 | fi 246 | %{ endif ~} 247 | 248 | %{ if ip_pool != "" ~} 249 | # Configure the IPAddressPool for the IP pool if present 250 | cat <<- EOF | kubectl apply -f - 251 | apiVersion: metallb.io/v1beta1 252 | kind: IPAddressPool 253 | metadata: 254 | name: ippool 255 | namespace: metallb-system 256 | spec: 257 | addresses: 258 | - ${ip_pool} 259 | autoAssign: false 260 | EOF 261 | %{ endif ~} 262 | 263 | # Configure the BGPPeer for each peer IP 264 | cat <<- EOF | kubectl apply -f - 265 | apiVersion: metallb.io/v1beta2 266 | kind: BGPPeer 267 | metadata: 268 | name: equinix-metal-peer-1 269 | namespace: metallb-system 270 | spec: 271 | peerASN: $${ASN_AS} 272 | myASN: $${ASN} 273 | peerAddress: $${PEER_IP_1} 274 | sourceAddress: $${INTERNAL_IP} 275 | EOF 276 | 277 | cat <<- EOF | kubectl apply -f - 278 | apiVersion: metallb.io/v1beta2 279 | kind: BGPPeer 280 | metadata: 281 | name: equinix-metal-peer-1 282 | namespace: metallb-system 283 | spec: 284 | peerASN: $${ASN_AS} 285 | myASN: $${ASN} 286 | peerAddress: $${PEER_IP_2} 287 | sourceAddress: $${INTERNAL_IP} 288 | EOF 289 | 290 | # Enable the BGPAdvertisement, only to be executed in the control-plane nodes 291 | cat <<- EOF | kubectl apply -f - 292 | apiVersion: metallb.io/v1beta1 293 | kind: BGPAdvertisement 294 | metadata: 295 | name: bgp-peers 296 | namespace: metallb-system 297 | spec: 298 | nodeSelectors: 299 | - matchLabels: 300 | node-role.kubernetes.io/control-plane: "true" 301 | EOF 302 | } 303 | 304 | install_k3s(){ 305 | # Download the K3s installer script 306 | curl -L --output k3s_installer.sh https://get.k3s.io && install -m755 k3s_installer.sh /usr/local/bin/ 307 | 308 | %{ if node_type == "control-plane" ~} 309 | # If the node to be installed is the second or third control plane or extra nodes, wait for the API to be up 310 | # Wait for the first control plane node to be up 311 | while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done 312 | %{ endif ~} 313 | %{ if node_type == "node" ~} 314 | # Wait for the first control plane node to be up 315 | while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done 316 | %{ endif ~} 317 | 318 | export INSTALL_K3S_SKIP_ENABLE=false 319 | export INSTALL_K3S_SKIP_START=false 320 | export K3S_TOKEN="${token}" 321 | export NODE_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == false and .address_family == 4) |.address') 322 | export NODE_EXTERNAL_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == true and .address_family == 4) |.address') 323 | %{ if node_type == "all-in-one" ~} 324 | %{ if global_ip_cidr != "" ~} 325 | export INSTALL_K3S_EXEC="server --write-kubeconfig-mode=644 --disable=servicelb --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" 326 | %{ else ~} 327 | %{ if ip_pool != "" ~} 328 | export INSTALL_K3S_EXEC="server --write-kubeconfig-mode=644 --disable=servicelb --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" 329 | %{ else ~} 330 | export INSTALL_K3S_EXEC="server --write-kubeconfig-mode=644 --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" 331 | %{ endif ~} 332 | %{ endif ~} 333 | %{ endif ~} 334 | %{ if node_type == "control-plane-master" ~} 335 | export INSTALL_K3S_EXEC="server --cluster-init --write-kubeconfig-mode=644 --tls-san=${API_IP} --tls-san=${API_IP}.sslip.io --disable=servicelb --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" 336 | %{ endif ~} 337 | %{ if node_type == "control-plane" ~} 338 | export INSTALL_K3S_EXEC="server --server https://${API_IP}:6443 --write-kubeconfig-mode=644 --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" 339 | %{ endif ~} 340 | %{ if node_type == "node" ~} 341 | export INSTALL_K3S_EXEC="agent --server https://${API_IP}:6443 --node-ip $${NODE_IP} --node-external-ip $${NODE_EXTERNAL_IP}" 342 | %{ endif ~} 343 | %{ if kube_version != "" ~} 344 | export INSTALL_K3S_VERSION="${kube_version}" 345 | %{ endif ~} 346 | /usr/local/bin/k3s_installer.sh 347 | } 348 | 349 | install_rke2(){ 350 | # Download the RKE2 installer script 351 | curl -L --output rke2_installer.sh https://get.rke2.io && install -m755 rke2_installer.sh /usr/local/bin/ 352 | 353 | # RKE2 configuration is set via config.yaml file 354 | mkdir -p /etc/rancher/rke2/ 355 | 356 | %{ if node_type == "control-plane" ~} 357 | # If the node to be installed is the second or third control plane or extra nodes, wait for the API to be up 358 | # Wait for the first control plane node to be up 359 | while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done 360 | %{ endif ~} 361 | %{ if node_type == "node" ~} 362 | # Wait for the first control plane node to be up 363 | while ! curl -m 10 -s -k -o /dev/null https://${API_IP}:6443 ; do echo "API still not reachable"; sleep 2 ; done 364 | %{ endif ~} 365 | 366 | export RKE2_TOKEN="${token}" 367 | export NODE_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == false and .address_family == 4) |.address') 368 | export NODE_EXTERNAL_IP=$(curl -s https://metadata.platformequinix.com/metadata | jq -r '.network.addresses[] | select(.public == true and .address_family == 4) |.address') 369 | %{ if node_type == "all-in-one" ~} 370 | export INSTALL_RKE2_TYPE="server" 371 | cat <<- EOF >> /etc/rancher/rke2/config.yaml 372 | token: $${RKE2_TOKEN} 373 | write-kubeconfig-mode: "0644" 374 | node-ip: $${NODE_IP} 375 | node-external-ip: $${NODE_EXTERNAL_IP} 376 | EOF 377 | %{ endif ~} 378 | %{ if node_type == "control-plane-master" ~} 379 | export INSTALL_RKE2_TYPE="server" 380 | cat <<- EOF >> /etc/rancher/rke2/config.yaml 381 | token: $${RKE2_TOKEN} 382 | write-kubeconfig-mode: "0644" 383 | node-ip: $${NODE_IP} 384 | node-external-ip: $${NODE_EXTERNAL_IP} 385 | tls-san: 386 | - "${API_IP}" 387 | - "${API_IP}.sslip.io" 388 | EOF 389 | %{ endif ~} 390 | %{ if node_type == "control-plane" ~} 391 | export INSTALL_RKE2_TYPE="server" 392 | cat <<- EOF >> /etc/rancher/rke2/config.yaml 393 | server: https://${API_IP}:9345 394 | token: $${RKE2_TOKEN} 395 | write-kubeconfig-mode: "0644" 396 | node-ip: $${NODE_IP} 397 | node-external-ip: $${NODE_EXTERNAL_IP} 398 | EOF 399 | %{ endif ~} 400 | %{ if node_type == "node" ~} 401 | export INSTALL_RKE2_TYPE="agent" 402 | cat <<- EOF >> /etc/rancher/rke2/config.yaml 403 | server: https://${API_IP}:9345 404 | token: $${RKE2_TOKEN} 405 | write-kubeconfig-mode: "0644" 406 | node-ip: $${NODE_IP} 407 | node-external-ip: $${NODE_EXTERNAL_IP} 408 | EOF 409 | %{ endif ~} 410 | %{ if ingress_ip != "" ~} 411 | mkdir -p /var/lib/rancher/rke2/server/manifests/ 412 | cat <<- EOF >> /var/lib/rancher/rke2/server/manifests/rke2-ingress-config.yaml 413 | apiVersion: helm.cattle.io/v1 414 | kind: HelmChartConfig 415 | metadata: 416 | name: rke2-ingress-nginx 417 | namespace: kube-system 418 | spec: 419 | valuesContent: |- 420 | controller: 421 | config: 422 | use-forwarded-headers: "true" 423 | enable-real-ip: "true" 424 | publishService: 425 | enabled: true 426 | service: 427 | enabled: true 428 | type: LoadBalancer 429 | externalTrafficPolicy: Local 430 | EOF 431 | %{ endif ~} 432 | %{ if kube_version != "" ~} 433 | export INSTALL_RKE2_VERSION="${kube_version}" 434 | %{ endif ~} 435 | /usr/local/bin/rke2_installer.sh 436 | systemctl enable --now rke2-$${INSTALL_RKE2_TYPE} 437 | } 438 | 439 | deploy_demo(){ 440 | # Check if the demo is already deployed 441 | if kubectl get deployment -n hello-kubernetes hello-kubernetes -o name > /dev/null 2>&1; then exit 0; fi 442 | 443 | if [ "$${KUBETYPE}" == "rke2" ]; then 444 | # Wait for the rke2-ingress-nginx-controller DS to be available if using RKE2 445 | while ! kubectl rollout status daemonset -n kube-system rke2-ingress-nginx-controller --timeout=60s; do sleep 2 ; done 446 | fi 447 | # I cannot make split work in Terraform templates 448 | IP=$(echo ${global_ip_cidr} | cut -d/ -f1) 449 | cat <<- EOF | kubectl apply -f - 450 | --- 451 | apiVersion: v1 452 | kind: Namespace 453 | metadata: 454 | name: hello-kubernetes 455 | --- 456 | apiVersion: v1 457 | kind: ServiceAccount 458 | metadata: 459 | name: hello-kubernetes 460 | namespace: hello-kubernetes 461 | labels: 462 | app.kubernetes.io/name: hello-kubernetes 463 | --- 464 | apiVersion: v1 465 | kind: Service 466 | metadata: 467 | name: hello-kubernetes 468 | namespace: hello-kubernetes 469 | labels: 470 | app.kubernetes.io/name: hello-kubernetes 471 | spec: 472 | type: ClusterIP 473 | ports: 474 | - port: 80 475 | targetPort: http 476 | protocol: TCP 477 | name: http 478 | selector: 479 | app.kubernetes.io/name: hello-kubernetes 480 | --- 481 | apiVersion: apps/v1 482 | kind: Deployment 483 | metadata: 484 | name: hello-kubernetes 485 | namespace: hello-kubernetes 486 | labels: 487 | app.kubernetes.io/name: hello-kubernetes 488 | spec: 489 | replicas: 2 490 | selector: 491 | matchLabels: 492 | app.kubernetes.io/name: hello-kubernetes 493 | template: 494 | metadata: 495 | labels: 496 | app.kubernetes.io/name: hello-kubernetes 497 | spec: 498 | serviceAccountName: hello-kubernetes 499 | containers: 500 | - name: hello-kubernetes 501 | image: "paulbouwer/hello-kubernetes:1.10" 502 | imagePullPolicy: IfNotPresent 503 | ports: 504 | - name: http 505 | containerPort: 8080 506 | protocol: TCP 507 | livenessProbe: 508 | httpGet: 509 | path: / 510 | port: http 511 | readinessProbe: 512 | httpGet: 513 | path: / 514 | port: http 515 | env: 516 | - name: HANDLER_PATH_PREFIX 517 | value: "" 518 | - name: RENDER_PATH_PREFIX 519 | value: "" 520 | - name: KUBERNETES_NAMESPACE 521 | valueFrom: 522 | fieldRef: 523 | fieldPath: metadata.namespace 524 | - name: KUBERNETES_POD_NAME 525 | valueFrom: 526 | fieldRef: 527 | fieldPath: metadata.name 528 | - name: KUBERNETES_NODE_NAME 529 | valueFrom: 530 | fieldRef: 531 | fieldPath: spec.nodeName 532 | - name: CONTAINER_IMAGE 533 | value: "paulbouwer/hello-kubernetes:1.10" 534 | --- 535 | apiVersion: networking.k8s.io/v1 536 | kind: Ingress 537 | metadata: 538 | name: hello-kubernetes-ingress 539 | namespace: hello-kubernetes 540 | spec: 541 | ingressClassName: ingress-nginx-global 542 | rules: 543 | - host: hellok3s.$${IP}.sslip.io 544 | http: 545 | paths: 546 | - path: "/" 547 | pathType: Prefix 548 | backend: 549 | service: 550 | name: hello-kubernetes 551 | port: 552 | name: http 553 | EOF 554 | } 555 | 556 | install_rancher(){ 557 | # Wait for Kube API to be up. It should be up already but just in case. 558 | wait_for_kube_api 559 | 560 | # Download helm as required to install Rancher 561 | command -v helm || curl -fsSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 |bash 562 | 563 | # Get latest Cert-manager version 564 | CMVERSION=$(curl -s "https://api.github.com/repos/cert-manager/cert-manager/releases/latest" | jq -r '.tag_name') 565 | 566 | RANCHERFLAVOR=${rancher_flavor} 567 | # https://ranchermanager.docs.rancher.com/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster 568 | case $${RANCHERFLAVOR} in 569 | "latest" | "stable" | "alpha") 570 | helm repo add rancher https://releases.rancher.com/server-charts/$${RANCHERFLAVOR} 571 | ;; 572 | "prime") 573 | helm repo add rancher https://charts.rancher.com/server-charts/prime 574 | ;; 575 | *) 576 | echo "Rancher flavor not detected, using latest" 577 | helm repo add rancher https://releases.rancher.com/server-charts/latest 578 | ;; 579 | esac 580 | 581 | helm repo add jetstack https://charts.jetstack.io 582 | helm repo update 583 | 584 | # Install the cert-manager Helm chart 585 | helm install cert-manager jetstack/cert-manager \ 586 | --namespace cert-manager \ 587 | --create-namespace \ 588 | --set crds.enabled=true \ 589 | --version $${CMVERSION} 590 | 591 | IP="" 592 | # https://github.com/rancher/rke2/issues/3958 593 | if [ "$${KUBETYPE}" == "rke2" ]; then 594 | # Wait for the rke2-ingress-nginx-controller DS to be available if using RKE2 595 | while ! kubectl rollout status daemonset -n kube-system rke2-ingress-nginx-controller --timeout=60s; do sleep 2 ; done 596 | IP=$(kubectl get svc -n kube-system rke2-ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 597 | fi 598 | 599 | # Get the IP of the ingress object if provided 600 | if [ "$${KUBETYPE}" == "k3s" ]; then 601 | IP=$(kubectl get svc -n kube-system traefik -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 602 | fi 603 | 604 | if [[ $${IP} == "" ]]; then 605 | # Just use internal IPs 606 | IP=$(hostname -I | awk '{print $1}') 607 | fi 608 | 609 | # Install rancher using sslip.io as hostname and with just a single replica 610 | helm install rancher rancher/rancher \ 611 | --namespace cattle-system \ 612 | --create-namespace \ 613 | --set hostname=rancher.$${IP}.sslip.io \ 614 | --set bootstrapPassword="${rancher_pass}" \ 615 | --set replicas=1 \ 616 | --set global.cattle.psp.enabled=false %{ if rancher_version != "" ~}--version "${rancher_version}"%{ endif ~} 617 | 618 | while ! kubectl wait --for condition=ready -n cattle-system $(kubectl get pods -n cattle-system -l app=rancher -o name) --timeout=10s; do sleep 2 ; done 619 | } 620 | 621 | install_global_ingress(){ 622 | command -v helm || curl -fsSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 |bash 623 | 624 | cat <<- EOF > ingress-nginx-global.yaml 625 | controller: 626 | ingressClassResource: 627 | name: ingress-nginx-global 628 | controllerValue: k8s.io/ingress-nginx-global 629 | service: 630 | labels: 631 | ingress-type: ingress-nginx-global 632 | admissionWebhooks: 633 | enabled: false 634 | EOF 635 | 636 | helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx 637 | helm repo update 638 | helm install -f ingress-nginx-global.yaml ingress-nginx-global --namespace ingress-nginx-global --create-namespace ingress-nginx/ingress-nginx 639 | } 640 | 641 | prechecks 642 | prereqs 643 | 644 | if [[ "${kube_version}" =~ .*"k3s".* ]] || [[ "${kube_version}" == "" ]]; then 645 | export KUBETYPE="k3s" 646 | export KUBECONFIG=/etc/rancher/k3s/k3s.yaml 647 | echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> /etc/profile.d/k3s.sh 648 | install_k3s 649 | mkdir -p /root/.kube/ 650 | ln -s /etc/rancher/k3s/k3s.yaml /root/.kube/config 651 | elif [[ "${kube_version}" =~ .*"rke2".* ]]; then 652 | export KUBETYPE="rke2" 653 | ln -s /var/lib/rancher/rke2/bin/kubectl /usr/local/bin/kubectl 654 | export KUBECONFIG=/etc/rancher/rke2/rke2.yaml 655 | echo "export KUBECONFIG=/etc/rancher/rke2/rke2.yaml" >> /etc/profile.d/rke2.sh 656 | install_rke2 657 | mkdir -p /root/.kube/ 658 | ln -s /etc/rancher/rke2/rke2.yaml /root/.kube/config 659 | else 660 | die "Kubernetes version ${kube_version} not valid" 2 661 | fi 662 | 663 | DEPLOY_DEMO=false 664 | INSTALL_METALLB=false 665 | INSTALL_RANCHER=false 666 | INSTALL_GLOBAL_INGRESS=false 667 | 668 | %{ if node_type == "control-plane-master" ~} 669 | INSTALL_METALLB=true 670 | %{ if global_ip_cidr != "" ~} 671 | INSTALL_GLOBAL_INGRESS=true 672 | %{ endif ~} 673 | %{ if deploy_demo != "false" ~} 674 | DEPLOY_DEMO=true 675 | %{ endif ~} 676 | %{ if rancher_flavor != "" ~} 677 | INSTALL_RANCHER=true 678 | %{ endif ~} 679 | %{ endif ~} 680 | 681 | %{ if node_type == "all-in-one" ~} 682 | %{ if global_ip_cidr != "" ~} 683 | INSTALL_METALLB=true 684 | INSTALL_GLOBAL_INGRESS=true 685 | %{ endif } 686 | %{ if ip_pool != "" ~} 687 | INSTALL_METALLB=true 688 | %{ endif } 689 | %{ if deploy_demo != "false" ~} 690 | DEPLOY_DEMO=true 691 | %{ endif ~} 692 | %{ if rancher_flavor != "" ~} 693 | INSTALL_RANCHER=true 694 | %{ endif ~} 695 | %{ endif ~} 696 | 697 | [ $${INSTALL_METALLB} == true ] && install_metallb || true 698 | 699 | %{ if API_IP != "" ~} 700 | %{ if node_type == "control-plane-master" ~} 701 | install_eco 702 | %{ endif ~} 703 | %{ endif ~} 704 | 705 | [ $${INSTALL_GLOBAL_INGRESS} == true ] && install_global_ingress || true 706 | [ $${DEPLOY_DEMO} == true ] && deploy_demo || true 707 | [ $${INSTALL_RANCHER} == true ] && install_rancher || true 708 | -------------------------------------------------------------------------------- /modules/kube_cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "metal_metro" { 2 | type = string 3 | description = "Equinix Metal Metro" 4 | } 5 | 6 | variable "metal_project_id" { 7 | type = string 8 | description = "Equinix Metal Project ID" 9 | } 10 | 11 | variable "deploy_demo" { 12 | type = bool 13 | description = "Deploys a simple demo using a global IP as ingress and a hello-kubernetes pods" 14 | default = false 15 | } 16 | 17 | variable "cluster_name" { 18 | type = string 19 | description = "Cluster name" 20 | default = "Cluster" 21 | } 22 | 23 | variable "plan_control_plane" { 24 | type = string 25 | description = "Control plane type/size" 26 | default = "c3.small.x86" 27 | } 28 | 29 | variable "plan_node" { 30 | type = string 31 | description = "Node type/size" 32 | default = "c3.small.x86" 33 | } 34 | 35 | variable "node_count" { 36 | type = number 37 | description = "Number of nodes" 38 | default = "0" 39 | } 40 | 41 | variable "ha" { 42 | type = bool 43 | description = "HA (aka 3 control plane nodes)" 44 | default = false 45 | } 46 | 47 | variable "os" { 48 | type = string 49 | description = "Operating system" 50 | default = "debian_11" 51 | } 52 | 53 | variable "control_plane_hostnames" { 54 | type = string 55 | description = "Control plane hostname prefix" 56 | default = "cp" 57 | } 58 | 59 | variable "node_hostnames" { 60 | type = string 61 | description = "Node hostname prefix" 62 | default = "node" 63 | } 64 | 65 | variable "custom_token" { 66 | type = string 67 | description = "Token used for nodes to join the cluster (autogenerated otherwise)" 68 | default = null 69 | } 70 | 71 | variable "ip_pool_count" { 72 | type = number 73 | description = "Number of public IPv4 per metro to be used as LoadBalancers with MetalLB (it needs to be power of 2 between 0 and 256 as required by Equinix Metal)" 74 | default = 0 75 | validation { 76 | condition = contains([0, 1, 2, 4, 8, 16, 32, 64, 128, 256], var.ip_pool_count) 77 | error_message = "The value must be a power of 2 between 0 and 256." 78 | } 79 | } 80 | 81 | variable "global_ip_cidr" { 82 | type = string 83 | description = "Global Anycast IP that will be mapped on all metros via BGP" 84 | default = null 85 | } 86 | 87 | variable "kube_version" { 88 | type = string 89 | description = "K3s/RKE2 version to be installed. Empty for latest K3s" 90 | default = "" 91 | } 92 | 93 | variable "metallb_version" { 94 | type = string 95 | description = "MetalLB version to be installed. Empty for latest" 96 | default = "" 97 | } 98 | 99 | variable "rancher_version" { 100 | type = string 101 | description = "Rancher version to be installed (vX.Y.Z). Empty for latest" 102 | default = "" 103 | } 104 | 105 | variable "rancher_flavor" { 106 | type = string 107 | description = "Rancher flavor to be installed (prime, latest, stable or alpha). Empty to not install it" 108 | default = "" 109 | } 110 | 111 | variable "custom_rancher_password" { 112 | type = string 113 | description = "Rancher initial password (autogenerated if not provided)" 114 | default = null 115 | } 116 | -------------------------------------------------------------------------------- /modules/kube_cluster/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3" 3 | required_providers { 4 | equinix = { 5 | source = "equinix/equinix" 6 | version = ">= 1.14.2" 7 | } 8 | random = { 9 | source = "hashicorp/random" 10 | version = ">= 3.5.1" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "anycast_ip" { 2 | value = try(equinix_metal_reserved_ip_block.global_ip[0].address, null) 3 | description = "Global IP shared across Metros" 4 | } 5 | 6 | output "demo_url" { 7 | value = try("http://hellok3s.${equinix_metal_reserved_ip_block.global_ip[0].address}.sslip.io", null) 8 | description = "URL of the demo application to demonstrate a global IP shared across Metros" 9 | } 10 | 11 | output "cluster_details" { 12 | value = { 13 | for cluster in var.clusters : cluster.name => { 14 | api = module.kube_cluster[cluster.name].kube_api_ip 15 | ingress = module.kube_cluster[cluster.name].ingress_ip 16 | ip_pool_cidr = module.kube_cluster[cluster.name].ip_pool_cidr 17 | nodes = module.kube_cluster[cluster.name].nodes_details 18 | } 19 | } 20 | description = "List of Clusters => K8s details" 21 | } 22 | 23 | output "rancher_urls" { 24 | value = { 25 | for cluster in var.clusters : cluster.name => { 26 | rancher_url = cluster.rancher_flavor != "" ? module.kube_cluster[cluster.name].rancher_address : null 27 | rancher_initial_password_base64 = cluster.rancher_flavor != "" ? base64encode(module.kube_cluster[cluster.name].rancher_password) : null 28 | } 29 | if module.kube_cluster[cluster.name].rancher_address != null 30 | } 31 | description = "List of Clusters => Rancher details" 32 | } 33 | -------------------------------------------------------------------------------- /rancher-clusters-imported.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/equinix-labs/terraform-equinix-metal-k3s/d5119d0e1abbf3371146ed9949c070de6c8c1ac8/rancher-clusters-imported.png -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "metal_project_id" { 2 | type = string 3 | description = "Equinix Metal Project ID" 4 | } 5 | 6 | variable "global_ip" { 7 | type = bool 8 | description = "Enables a global anycast IPv4 that will be shared for all clusters in all metros" 9 | default = false 10 | } 11 | 12 | variable "deploy_demo" { 13 | type = bool 14 | description = "Deploys a simple demo using a global IP as ingress and a hello-kubernetes pods" 15 | default = false 16 | validation { 17 | condition = !var.deploy_demo || var.global_ip 18 | error_message = "When deploy_demo is true, global_ip must be true as well." 19 | } 20 | } 21 | 22 | variable "clusters" { 23 | description = "Cluster definition" 24 | type = list(object({ 25 | name = optional(string, "Demo cluster") 26 | metro = optional(string, "FR") 27 | plan_control_plane = optional(string, "c3.small.x86") 28 | plan_node = optional(string, "c3.small.x86") 29 | node_count = optional(number, 0) 30 | ha = optional(bool, false) 31 | os = optional(string, "debian_11") 32 | control_plane_hostnames = optional(string, "cp") 33 | node_hostnames = optional(string, "node") 34 | custom_token = optional(string, "") 35 | ip_pool_count = optional(number, 0) 36 | kube_version = optional(string, "") 37 | metallb_version = optional(string, "") 38 | rancher_flavor = optional(string, "") 39 | rancher_version = optional(string, "") 40 | custom_rancher_password = optional(string, "") 41 | })) 42 | default = [{}] 43 | } 44 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.9" 3 | required_providers { 4 | equinix = { 5 | source = "equinix/equinix" 6 | version = ">= 1.14.2" 7 | } 8 | } 9 | provider_meta "equinix" { 10 | module_name = "equinix/k3s" 11 | } 12 | } 13 | --------------------------------------------------------------------------------