├── .github └── workflows │ └── proton-run.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── env_config.json ├── images ├── cluster_summary.png ├── configure_cluster_deployment.png ├── edit_cluster_params.png ├── eksclusterview.png ├── environt_template.png ├── proton_registry.png ├── update_environment.png └── updated_environment_template.png ├── scripts ├── README.md ├── cloudformation │ ├── GitHubConfiguration.yaml │ ├── README.md │ └── iac.sh └── terraform │ ├── README.md │ ├── iac.sh │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf └── templates └── eks-mng-karpenter-with-new-vpc └── v1 ├── infrastructure ├── README.md ├── eks.tf ├── main.tf ├── manifest.yaml ├── outputs.tf ├── variables.tf ├── versions.tf └── vpc.tf └── schema └── schema.yaml /.github/workflows/proton-run.yml: -------------------------------------------------------------------------------- 1 | # This is a workflow created to run based on a commit made by AWS Proton 2 | # It only works if there is only one resource modified as part of the commit. 3 | 4 | name: 'proton-run' 5 | 6 | on: 7 | pull_request: 8 | types: 9 | - opened 10 | - reopened 11 | paths: 12 | - '**/.proton/deployment-metadata.json' 13 | push: 14 | branches: 15 | - main 16 | paths: 17 | - '**/.proton/deployment-metadata.json' 18 | 19 | jobs: 20 | get-deployment-data: 21 | name: Get Deployment Data 22 | runs-on: ubuntu-latest 23 | 24 | outputs: 25 | role_arn: ${{ steps.get-data.outputs.role_arn }} 26 | environment: ${{ steps.get-data.outputs.environment }} 27 | resource_arn: ${{ steps.get-data.outputs.resource_arn }} 28 | working_directory: ${{ steps.get-data.outputs.working_directory }} 29 | deployment_id: ${{ steps.get-data.outputs.deployment_id }} 30 | target_region: ${{ steps.get-data.outputs.target_region }} 31 | proton_region: ${{ steps.get-data.outputs.proton_region }} 32 | state_bucket: ${{ steps.get-data.outputs.state_bucket }} 33 | is_deleted: ${{ steps.get-data.outputs.is_deleted }} 34 | 35 | permissions: 36 | id-token: write 37 | contents: read 38 | 39 | continue-on-error: true 40 | 41 | steps: 42 | # Checkout the repository to the GitHub Actions runner 43 | - name: Checkout 44 | uses: actions/checkout@v2 45 | 46 | - name: Verify env_config updated 47 | run: | 48 | if grep -q REPLACE_ME env_config.json; then 49 | echo "You must update env_config.json or update this workflow to not require it." 50 | exit 1 51 | fi 52 | 53 | - name: Get changed files 54 | id: files 55 | uses: jitterbit/get-changed-files@v1 56 | 57 | - name: Find modified resource 58 | id: find-modified 59 | run: | 60 | found=false 61 | for changed_file in ${{ steps.files.outputs.all }}; do 62 | if [[ "$changed_file" == *".proton/deployment-metadata.json" ]]; then 63 | echo "found file" 64 | if [[ "$found" == true ]]; then 65 | echo "More than one resource found to have a new deployment, I'm not sure which one to update, exiting." 66 | exit 1 67 | fi 68 | echo "setting found to true" 69 | found=true 70 | echo "setting outputs" 71 | echo "::set-output name=deployment-metadata-path::$changed_file" 72 | fi 73 | done 74 | if [[ "$found" == false ]]; then 75 | echo "No change made to deployment-metadata.json, exiting" 76 | exit 1 77 | fi 78 | 79 | - name: Get data 80 | id: get-data 81 | run: | 82 | modified_resource_arn=$(jq -r '.resourceMetadata.arn' ${{ steps.find-modified.outputs.deployment-metadata-path }}) 83 | echo "::set-output name=resource_arn::$modified_resource_arn" 84 | 85 | IFS=':' 86 | read -a split_arn <<< "$modified_resource_arn" 87 | proton_region=${split_arn[3]} 88 | echo "::set-output name=proton_region::$proton_region" 89 | 90 | deployment_id=$(jq -r '.deploymentId' ${{ steps.find-modified.outputs.deployment-metadata-path }}) 91 | echo "::set-output name=deployment_id::$deployment_id" 92 | 93 | is_deleted=$(jq -r '.isResourceDeleted' ${{ steps.find-modified.outputs.deployment-metadata-path }}) 94 | echo "::set-output name=is_deleted::$is_deleted" 95 | 96 | 97 | if [[ "$modified_resource_arn" == *":environment/"* ]]; then 98 | environment_name=${modified_resource_arn##*/} 99 | working_directory="$environment_name/" 100 | elif [[ "$modified_resource_arn" == *"/service-instance/"* ]]; then 101 | environment_arn=$(jq -r '.resourceMetadata.environmentArn' ${{ steps.find-modified.outputs.deployment-metadata-path }}) 102 | environment_name=${environment_arn##*/} 103 | 104 | resource_portion=${modified_resource_arn##*:} 105 | IFS='/' 106 | read -a split_resources <<< "$resource_portion" 107 | 108 | service_name=${split_resources[1]} 109 | instance_name=${split_resources[3]} 110 | 111 | working_directory=$environment_name/$service_name-$instance_name/ 112 | elif [[ "$modified_resource_arn" == *"/pipeline"* ]]; then 113 | environment_name="pipeline" 114 | 115 | resource_portion=${modified_resource_arn##*:} 116 | IFS='/' 117 | read -a split_resources <<< "$resource_portion" 118 | 119 | service_name=${split_resources[1]} 120 | 121 | working_directory=$service_name/pipeline 122 | fi 123 | 124 | if [[ $(jq -r --arg env $environment_name 'has($env)' env_config.json) = "true" ]]; then 125 | role_arn=$(jq -r --arg env $environment_name '.[$env]["role"]' env_config.json) 126 | target_region=$(jq -r --arg env $environment_name '.[$env]["region"]' env_config.json) 127 | state_bucket=$(jq -r --arg env $environment_name '.[$env]["state_bucket"]' env_config.json) 128 | else 129 | if [[ $(jq -r --arg env $environment_name 'has("*")' env_config.json) = "true" ]]; then 130 | role_arn=$(jq -r --arg env $environment_name '.["*"]["role"]' env_config.json) 131 | target_region=$(jq -r --arg env $environment_name '.["*"]["region"]' env_config.json) 132 | state_bucket=$(jq -r --arg env $environment_name '.["*"]["state_bucket"]' env_config.json) 133 | else 134 | echo "Missing $environment_name or * from env_config.json, exiting" 135 | exit 1 136 | fi 137 | fi 138 | 139 | echo "::set-output name=working_directory::$working_directory" 140 | echo "::set-output name=environment::$environment_name" 141 | 142 | echo "::set-output name=role_arn::$role_arn" 143 | echo "::set-output name=target_region::$target_region" 144 | echo "::set-output name=state_bucket::$state_bucket" 145 | 146 | terraform: 147 | name: 'Terraform' 148 | needs: get-deployment-data 149 | runs-on: ubuntu-latest 150 | environment: ${{ needs.get-deployment-data.outputs.environment }} 151 | 152 | permissions: 153 | id-token: write 154 | contents: read 155 | 156 | defaults: 157 | run: 158 | working-directory: ${{ needs.get-deployment-data.outputs.working_directory }} 159 | shell: bash # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 160 | 161 | if: needs.get-deployment-data.result == 'success' && needs.get-deployment-data.outputs.is_deleted == 'false' 162 | 163 | continue-on-error: true 164 | 165 | outputs: 166 | success: ${{ steps.mark_success.outputs.success }} 167 | 168 | steps: 169 | # Checkout the repository to the GitHub Actions runner 170 | - name: Checkout 171 | uses: actions/checkout@v2 172 | 173 | - name: Configure AWS Credentials 174 | id: assume_role 175 | uses: aws-actions/configure-aws-credentials@v1 176 | with: 177 | aws-region: ${{ needs.get-deployment-data.outputs.target_region }} 178 | role-to-assume: ${{ needs.get-deployment-data.outputs.role_arn }} 179 | role-session-name: TF-Github-Actions 180 | mask-aws-account-id: 'no' 181 | 182 | # Install the latest version of Terraform CLI and configure the Terraform CLI configuration file with a Terraform Cloud user API token 183 | - name: Setup Terraform 184 | id: tf_setup 185 | uses: hashicorp/setup-terraform@v1 186 | with: 187 | terraform_version: 1.0.7 188 | terraform_wrapper: false 189 | 190 | # Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc. 191 | - name: Terraform Init 192 | id: tf_init 193 | run: terraform init -backend-config="bucket=${{ needs.get-deployment-data.outputs.state_bucket }}" -backend-config="key=${{ needs.get-deployment-data.outputs.working_directory }}terraform.tfstate" -backend-config="region=${{ needs.get-deployment-data.outputs.target_region }}" 194 | 195 | # Checks that all Terraform configuration files adhere to a canonical format 196 | - name: Terraform Format 197 | id: tf_fmt 198 | run: terraform fmt -diff -check 199 | 200 | # Generates an execution plan for Terraform 201 | - name: Terraform Plan 202 | id: tf_plan 203 | run: terraform plan -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}" 204 | 205 | # On push to main, build or change infrastructure according to Terraform configuration files 206 | # Note: It is recommended to set up a required "strict" status check in your repository for "Terraform Cloud". See the documentation on "strict" required status checks for more information: https://help.github.com/en/github/administering-a-repository/types-of-required-status-checks 207 | - name: Terraform Apply 208 | id: tf_apply 209 | if: github.ref == 'refs/heads/main' && github.event_name == 'push' 210 | run: terraform apply -auto-approve -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}" 211 | 212 | # If this completes, then the entire workflow has successfully completed 213 | - name: Mark Success 214 | id: mark_success 215 | run: echo "::set-output name=success::True" 216 | 217 | notify-proton: 218 | name: 'Notify Proton' 219 | needs: 220 | - get-deployment-data 221 | - terraform 222 | runs-on: ubuntu-latest 223 | environment: ${{ needs.get-deployment-data.outputs.environment }} 224 | 225 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' && needs.get-deployment-data.outputs.is_deleted == 'false' 226 | 227 | permissions: 228 | id-token: write 229 | contents: read 230 | 231 | defaults: 232 | run: 233 | working-directory: ${{ needs.get-deployment-data.outputs.working_directory }} 234 | shell: bash # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 235 | 236 | steps: 237 | # Checkout the repository to the GitHub Actions runner 238 | - name: Checkout 239 | uses: actions/checkout@v2 240 | 241 | - name: Configure AWS Credentials 242 | id: assume_role 243 | uses: aws-actions/configure-aws-credentials@v1 244 | with: 245 | aws-region: ${{ needs.get-deployment-data.outputs.target_region }} 246 | role-to-assume: ${{ needs.get-deployment-data.outputs.role_arn }} 247 | role-session-name: TF-Github-Actions-Notify-Proton 248 | mask-aws-account-id: 'no' 249 | 250 | # Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc. 251 | - name: Terraform Init 252 | id: tf_init 253 | continue-on-error: true 254 | run: terraform init -backend-config="bucket=${{ needs.get-deployment-data.outputs.state_bucket }}" -backend-config="key=${{ needs.get-deployment-data.outputs.working_directory }}terraform.tfstate" -backend-config="region=${{ needs.get-deployment-data.outputs.target_region }}" 255 | 256 | - name: Notify Proton Success 257 | id: notify_success 258 | if: needs.terraform.outputs.success == 'True' && steps.tf_init.outcome == 'success' 259 | run: | 260 | # Get outputs as json 261 | outputs_json=$(terraform output -json) 262 | 263 | # Map Terraform output JSON to Proton outputs JSON 264 | formatted_outputs=( $(echo $outputs_json | jq "to_entries|map({key: .key, valueString: .value.value})") ) 265 | 266 | # Notify proton 267 | aws proton notify-resource-deployment-status-change --region ${{ needs.get-deployment-data.outputs.proton_region }} --resource-arn ${{ needs.get-deployment-data.outputs.resource_arn }} --status SUCCEEDED --deployment-id ${{ needs.get-deployment-data.outputs.deployment_id }} --outputs "${formatted_outputs[*]}" 268 | echo "Notify success!" 269 | 270 | - name: Notify Proton Failure 271 | if: needs.terraform.outputs.success != 'True' || steps.tf_init.outcome != 'success' 272 | run: | 273 | aws proton notify-resource-deployment-status-change --region ${{ needs.get-deployment-data.outputs.proton_region }} --resource-arn ${{ needs.get-deployment-data.outputs.resource_arn }} --status FAILED --deployment-id ${{ needs.get-deployment-data.outputs.deployment_id }} 274 | echo "Notify failure!" 275 | 276 | terraform-destroy: 277 | name: 'Run terraform destroy' 278 | needs: 279 | - get-deployment-data 280 | runs-on: ubuntu-latest 281 | environment: ${{ needs.get-deployment-data.outputs.environment }} 282 | 283 | if: github.event_name == 'push' && github.ref == 'refs/heads/main' && needs.get-deployment-data.outputs.is_deleted == 'true' 284 | 285 | permissions: 286 | id-token: write 287 | contents: read 288 | 289 | defaults: 290 | run: 291 | working-directory: ${{ needs.get-deployment-data.outputs.working_directory }} 292 | shell: bash # Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest 293 | 294 | steps: 295 | # Checkout the repository to the GitHub Actions runner 296 | - name: Checkout 297 | uses: actions/checkout@v2 298 | 299 | - name: Configure AWS Credentials 300 | id: assume_role 301 | uses: aws-actions/configure-aws-credentials@v1 302 | with: 303 | aws-region: ${{ needs.get-deployment-data.outputs.target_region }} 304 | role-to-assume: ${{ needs.get-deployment-data.outputs.role_arn }} 305 | role-session-name: TF-Github-Actions-Notify-Proton 306 | mask-aws-account-id: 'no' 307 | 308 | # Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc. 309 | - name: Terraform Init 310 | id: tf_init 311 | run: terraform init -backend-config="bucket=${{ needs.get-deployment-data.outputs.state_bucket }}" -backend-config="key=${{ needs.get-deployment-data.outputs.working_directory }}terraform.tfstate" -backend-config="region=${{ needs.get-deployment-data.outputs.target_region }}" 312 | 313 | - name: Terraform Destroy (Add-ons) 314 | id: tf_destroy_addons 315 | run: terraform apply -auto-approve -destroy -target="module.kubernetes_addons" -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}" 316 | 317 | - name: Terraform Destroy (Cluster) 318 | id: tf_destroy_cluster 319 | run: terraform apply -auto-approve -destroy -target="module.eks_blueprints" -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}" 320 | 321 | - name: Terraform Destroy (Everything) 322 | id: tf_destroy_everything 323 | run: terraform apply -auto-approve -destroy -var="aws_region=${{ needs.get-deployment-data.outputs.target_region }}" 324 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.swp 3 | 4 | # Local .terraform directories 5 | **/.terraform/* 6 | 7 | # .tfstate files 8 | *.tfstate 9 | *.tfstate.* 10 | 11 | # terraform lockfile 12 | .terraform.lock.hcl 13 | 14 | # Crash log files 15 | crash.log 16 | 17 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as 18 | # password, private keys, and other secrets. These should not be part of version 19 | # control as they are data points which are potentially sensitive and subject 20 | # to change depending on the environment. 21 | # 22 | *.tfvars 23 | 24 | # Ignore override files as they are usually used to override resources locally and so 25 | # are not checked in 26 | override.tf 27 | override.tf.json 28 | *_override.tf 29 | *_override.tf.json 30 | 31 | # Include override files you do wish to add to version control using negated pattern 32 | # 33 | # !example_override.tf 34 | 35 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 36 | # example: *tfplan* 37 | 38 | # Ignore CLI configuration files 39 | .terraformrc 40 | terraform.rc 41 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.3.0 4 | hooks: 5 | - id: trailing-whitespace 6 | args: ['--markdown-linebreak-ext=md'] 7 | - id: end-of-file-fixer 8 | - id: check-merge-conflict 9 | - id: detect-private-key 10 | - id: detect-aws-credentials 11 | args: ['--allow-missing-credentials'] 12 | - repo: https://github.com/antonbabenko/pre-commit-terraform 13 | rev: v1.76.0 14 | hooks: 15 | - id: terraform_fmt 16 | - id: terraform_docs 17 | args: 18 | - '--args=--lockfile=false' 19 | - id: terraform_tflint 20 | args: 21 | - '--args=--only=terraform_deprecated_interpolation' 22 | - '--args=--only=terraform_deprecated_index' 23 | - '--args=--only=terraform_unused_declarations' 24 | - '--args=--only=terraform_comment_syntax' 25 | - '--args=--only=terraform_documented_outputs' 26 | - '--args=--only=terraform_documented_variables' 27 | - '--args=--only=terraform_typed_variables' 28 | - '--args=--only=terraform_module_pinned_source' 29 | - '--args=--only=terraform_naming_convention' 30 | - '--args=--only=terraform_required_version' 31 | - '--args=--only=terraform_required_providers' 32 | - '--args=--only=terraform_standard_module_structure' 33 | - '--args=--only=terraform_workspace_remote' 34 | # Proton creates the variable definition for the `environment` variable and therefore it cannot be included in the 35 | # variable definition file of this project; this breaks the Terraform validate command 36 | # See https://docs.aws.amazon.com/proton/latest/userguide/ag-infrastructure-tmp-files-terraform.html#compiled-tform 37 | # - id: terraform_validate 38 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 10 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 11 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 12 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 13 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | #### What is the goal of this tutorial? 2 | 3 | This repository includes a template example to configure AWS Proton as a vending machine for EKS clusters using Terraform. For more information about this use case please read this [blog post](https://aws.amazon.com/blogs/containers/using-aws-proton-as-a-provisioning-mechanism-for-amazon-eks-clusters/). This tutorial is not intended to be used as-is for production. It provides an example of how to use Proton for this specific scenario. The template is only provided to describe "the art of possible". 4 | 5 | #### Getting started and one-off configurations 6 | 7 | To get started with this tutorial you need to have an AWS account with administrative privileges and a GitHub account. 8 | 9 | > Note: Terraform provisioning happens outside the context of Proton and for that we are providing a sample GitHub Actions workflow as part of the same repository. However, you could use any Terraform pipeline you want to accomplish the same result with a similar flow. 10 | 11 | Login to the AWS console and select the AWS region where you want to exercise this tutorial. 12 | 13 | Create a repository (in your GitHub account) off of this template: https://github.com/aws-samples/eks-blueprints-for-proton and keep the same name (you can also fork the repository if you prefer). Remember to enable GitHub Actions on your repository because it will be required later. 14 | 15 | Set up an AWS CodeStar connection following [these instructions](https://docs.aws.amazon.com/proton/latest/adminguide/setting-up-for-service.html#setting-up-vcontrol). This will allow you to access your GitHub account (and your repos) from Proton. 16 | 17 | Once you are done, go to the Proton console and switch to the `Settings/Repositories` page. Add the repository you created (or forked) above and point to the `main` branch. You should now have a repository listed that looks like this (your GitHub name will be different): 18 | 19 | ![proton_registry](images/proton_registry.png) 20 | 21 | For Terraform to be able to deploy (or "vend") EKS clusters, it needs to assume a proper IAM role. In addition, since for our solution we will use Terraform open source, we also need an S3 bucket to save the Terraform state. To do this please follow the instructions [at this page](./scripts/README.md). 22 | 23 | Retrieve the role ARN and the S3 bucket name from the output of the IaC above and update the [env-config.json](./env_config.json) file in your GitHub repository. Make sure to update the `region` parameter to the region you are using. 24 | 25 | > Remember to commit and push these changes to your GitHub repository now 26 | 27 | Create an IAM user that you will be using to represent the developer persona (i.e. the person that will request the cluster). Call it `protondev` and attach the AWS managed `AWSProtonDeveloperAccess` policy. 28 | 29 | This user needs a bit more power in addition to the managed `AWSProtonDeveloperAccess` IAM policy: because, in Proton, a developer with the standard `AWSProtonDeveloperAccess` is not allowed to deploy an environment, you need to add the ability to `CreateEnvironment`, `UpdateEnvironment`, `DeleteEnvironment` as well as to `PassRole` (to the Proton service). In addition, to use the `aws eks update-kubeconfig` command to create the `config` file that kubectl will use, the `protondev` must be able to `DescribeCluster`. Lastly, for convenience if you want to use the Cloud Shell with the `protondev` user, that policy must be allowed explicitly. 30 | 31 | The following is an inline policy for the `protondev` user to add these additional permissions required: 32 | ``` 33 | { 34 | "Version": "2012-10-17", 35 | "Statement": [ 36 | { 37 | "Effect": "Allow", 38 | "Action": "iam:PassRole", 39 | "Resource": "*", 40 | "Condition": { 41 | "StringEquals": { 42 | "iam:PassedToService": "proton.amazonaws.com" 43 | } 44 | } 45 | }, 46 | { 47 | "Effect": "Allow", 48 | "Action": [ 49 | "proton:CreateEnvironment", 50 | "proton:UpdateEnvironment", 51 | "proton:DeleteEnvironment", 52 | "iam:ListRoles" 53 | ], 54 | "Resource": "*" 55 | }, 56 | { 57 | "Effect": "Allow", 58 | "Action": "eks:DescribeCluster", 59 | "Resource": "*" 60 | }, 61 | { 62 | "Effect": "Allow", 63 | "Action": "cloudshell:*", 64 | "Resource": "*" 65 | } 66 | ] 67 | } 68 | ``` 69 | 70 | #### Create the environment template in Proton 71 | 72 | Now that you configured the core requirements in your accounts, create the environment template that Proton will use to vend clusters. 73 | 74 | > To do so we will continue to use the administrative user or role we have used so far in the tutorial. Some customers may want this part to be done by Proton administrators (i.e. entities with the `AWSProtonFullAccess` IAM policy associated or a variation of it). 75 | 76 | Switch to the `Templates/Environment templates` page in the Proton console and click `Create environment template`. Leave all defaults except for the following fields: (1) in the `Template bundle source` select `Sync templates from Git`; (2) Pick the repository in your account; (3) set the Branch name to main; (4) in the Template details set `eks-mng-karpenter-with-new-vpc` as the `Template name`. 77 | 78 | > It is important that you set the name exactly to `eks-mng-karpenter-with-new-vpc` because Proton will scan the repo for that exact folder name (and version structure). 79 | 80 | Set a `Template display name` for convenience and an optional description; leave everything else as default and click `Create environment template`. 81 | 82 | Within a few seconds, upon a refresh, you should see a template version `1.0` appear. It's in `Draft` state. Click `Publish` and it will move into `Published` state. 83 | 84 | You should see something like this: 85 | 86 | ![environment_template](images/environt_template.png) 87 | 88 | #### Deploy the cluster via Proton 89 | 90 | Now that a platform administrator has configured the template that represents the organization standard for an EKS cluster, logout from the console with the administrative principle and login back with the `protondev` user you created earlier. 91 | 92 | > Before moving forward double-check that GitHub Actions are enabled for your repository because the next steps will eventually trigger the workflows. To check, in your repository go to `Settings`, `Actions` then `General` and make sure `Allow all actions and reusable workflows` is selected. 93 | 94 | Navigate to the `Environments` page in the Proton console and click `Create environment`. Select the environment template you created above and click `Configure`. In the `Provisioning` section select `Self-managed provisioning`. In the `Provisioning repository details` select `Existing repository`, in the `Repository name` select the GitHub repo you created (or forked) above and `main` as the `Branch name`. Provide an `Environment name`, an optional `Environment description` of your choice and click `Next`. 95 | 96 | Give your cluster a name, leave the vpc_cidr as is and add your AWS IAM user (`protondev`) to the input `user`. You should now see something like this: 97 | 98 | ![configure_cluster_deployment](images/configure_cluster_deployment.png) 99 | 100 | 101 | The EKS Blueprints will enable the user you enter to assume an IAM role that has been defined as a Kubernetes cluster admin in the K8s RBAC (we'll play with this later). The list of add-ons has been provided as an example. Flag or unflag them at your discretion. Should you use this solution in production you may want to check in the EKS Blueprints all the add-ons supported and include what you need in your own Proton template. For example, you may want to expose the size of the cluster either in terms of nodes (min, max) or in t-shirt sizes (small, medium, large). This example template hard code the size of the cluster, but it's really all up to the platform team to decide what parameters to expose to developers. 102 | 103 | This is where the magic happens. The input parameters you see here (which are obviously related to the EKS cluster you are about to provision) are part of the sample template provided in the repo but that you can fully customize based on your needs. Specifically the [main.tf](https://github.com/aws-samples/eks-blueprints-for-proton/blob/main/templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/main.tf) file is where the [EKS Blueprints](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/docs/getting-started.md) module is imported and where the core configuration is defined. The [schema.yaml](https://github.com/aws-samples/eks-blueprints-for-proton/blob/main/templates/eks-mng-karpenter-with-new-vpc/v1/schema/schema.yaml) file is where all the inputs get defined. The [outputs.tf](https://github.com/aws-samples/eks-blueprints-for-proton/blob/main/templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/outputs.tf) file is where all the outputs that will be presented in the Proton console get defined. Note that in the template sample we created in the repo the only Kubernetes version you can pick is 1.20 because we pretend that this is the only version that the platform team at your org has vetted and is supporting internally. 104 | 105 | Click `Next` and in the next summary form click `Create`. This will kick off your cluster creation. 106 | 107 | This will trigger the following process: 108 | - Proton will merge the Terraform template with your inputs and create a PR in the repository you specified (in our tutorial it's the same repository that hosts the template, but you probably want these to be two separate repositories in a production setup - just remember to add them both to the Proton `Repositories` page you configured at the beginning) 109 | - The [GitHub action example that ships with this repository](.github/workflows/proton-run.yml) will trigger to run a plan and check everything is in good shape 110 | - The PR will be created (its merging can be a manual step performed by a platform administrator upon a code review or, because Proton creates enough guardrails for the PR to be legit, the repository can be configured to perform an auto-merge) 111 | - Once the PR is merged the GH action provided as an example in the repository will kick off again and this time it will go till the `terraform apply` stage effectively deploying the cluster 112 | - When the `apply` has completed the action will notify Proton with the `output` which consists, among other things, of the `eks aws` command to configure the `config` file to point `kubectl` to the cluster you just deployed. 113 | 114 | > Note: if you want to know more about how Terraform templating and Git provisioning works in Proton, please refer to these two blogs posts: [AWS Proton Terraform Templates](https://aws.amazon.com/blogs/containers/aws-proton-terraform-templates/) and [AWS Proton Self-Managed Provisioning](https://aws.amazon.com/blogs/containers/aws-proton-self-managed-provisioning/). 115 | 116 | It will take roughly 15/20 minutes to deploy, and you can watch progress in the GitHub Actions workflow. When the workflow completes you should see something like this in your Proton console for the environment you have just deployed: 117 | 118 | ![cluster_summary](images/cluster_summary.png) 119 | 120 | Congratulations. You have just witnessed Proton vending an EKS cluster. 121 | 122 | #### Interacting with the cluster 123 | 124 | You should still be logged in as `protondev`. You can now open a Cloud Shell and run the `aws eks update-kubeconfig` command as reported in the Proton `Outputs` section. This will configure `kubectl` in your shell to communicate with this cluster. If you have `kubectl` installed in your shell you can start interacting with the cluster (you can grab the `kubectl` binary from [here](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)): 125 | 126 | ``` 127 | [cloudshell-user@ip-10-0-70-52 ~]$ 128 | [cloudshell-user@ip-10-0-70-52 ~]$ aws eks --region us-west-2 update-kubeconfig --name 6946-myekscluster --role-arn arn:aws:iam::336419811389:role/6946-aws001-preprod-dev-platform-team-Access 129 | Added new context arn:aws:eks:us-west-2:336419811389:cluster/6946-myekscluster to /home/cloudshell-user/.kube/config 130 | 131 | [cloudshell-user@ip-10-0-70-52 ~]$ kubectl get pods -A 132 | NAMESPACE NAME READY STATUS RESTARTS AGE 133 | NAMESPACE NAME READY STATUS RESTARTS AGE 134 | aws-for-fluent-bit aws-for-fluent-bit-42954 1/1 Running 0 103m 135 | aws-for-fluent-bit aws-for-fluent-bit-9srpk 1/1 Running 0 103m 136 | aws-for-fluent-bit aws-for-fluent-bit-rzj9m 1/1 Running 0 103m 137 | cert-manager cert-manager-5dbb9d7955-tcrfp 1/1 Running 0 103m 138 | cert-manager cert-manager-cainjector-7d55bf8f78-s54mm 1/1 Running 0 103m 139 | cert-manager cert-manager-webhook-5c888754d5-rsw5f 1/1 Running 0 103m 140 | karpenter karpenter-7b9cc6dc5b-wrdlz 2/2 Running 0 104m 141 | kube-system aws-load-balancer-controller-7686bcf8b-bp7rv 1/1 Running 0 104m 142 | kube-system aws-load-balancer-controller-7686bcf8b-m47m6 1/1 Running 0 104m 143 | kube-system aws-node-52hmg 1/1 Running 0 105m 144 | kube-system aws-node-bxqt5 1/1 Running 0 105m 145 | kube-system aws-node-zmhrr 1/1 Running 0 105m 146 | kube-system coredns-86d9946576-5g46r 1/1 Running 0 106m 147 | kube-system coredns-86d9946576-vrldf 1/1 Running 0 106m 148 | kube-system kube-proxy-f5ph4 1/1 Running 0 105m 149 | kube-system kube-proxy-nzwbj 1/1 Running 0 105m 150 | kube-system kube-proxy-qz55d 1/1 Running 0 105m 151 | kube-system metrics-server-679944f8f6-264jl 1/1 Running 0 104m 152 | vpa vpa-recommender-fb896949d-xnknm 1/1 Running 0 104m 153 | vpa vpa-updater-7447f7657-9zkwc 1/1 Running 0 104m 154 | 155 | [cloudshell-user@ip-10-0-70-52 ~]$ kubectl get nodes 156 | NAME STATUS ROLES AGE VERSION 157 | ip-10-0-10-125.us-west-2.compute.internal Ready 105m v1.20.11-eks-f17b81 158 | ip-10-0-11-68.us-west-2.compute.internal Ready 105m v1.20.11-eks-f17b81 159 | ip-10-0-12-132.us-west-2.compute.internal Ready 105m v1.20.11-eks-f17b81 160 | 161 | [cloudshell-user@ip-10-0-70-52 ~]$ kubectl cluster-info 162 | Kubernetes control plane is running at https://FEEEFE9EC8CE64A2F90269827F8CB045.gr7.us-west-2.eks.amazonaws.com 163 | CoreDNS is running at https://FEEEFE9EC8CE64A2F90269827F8CB045.gr7.us-west-2.eks.amazonaws.com/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy 164 | 165 | To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. 166 | 167 | [cloudshell-user@ip-10-0-70-52 ~]$ kubectl version 168 | Client Version: version.Info{Major:"1", Minor:"23", GitVersion:"v1.23.5", GitCommit:"c285e781331a3785a7f436042c65c5641ce8a9e9", GitTreeState:"clean", BuildDate:"2022-03-16T15:58:47Z", GoVersion:"go1.17.8", Compiler:"gc", Platform:"linux/amd64"} 169 | Server Version: version.Info{Major:"1", Minor:"20+", GitVersion:"v1.20.15-eks-14c7a48", GitCommit:"eb09fc479c1b2bfcc35c47416efb36f1b9052d58", GitTreeState:"clean", BuildDate:"2022-04-01T03:14:50Z", GoVersion:"go1.15.15", Compiler:"gc", Platform:"linux/amd64"} 170 | WARNING: version difference between client (1.23) and server (1.21) exceeds the supportedhttps://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-console.html minor version skew of +/-1 171 | [cloudshell-user@ip-10-0-70-52 ~]$ 172 | ``` 173 | From now on it's just standard Kubernetes. 174 | 175 | You can also access the cluster via the AWS console. To do so you need to assume the role that has been added to the K8s RBAC. The `protondev` user is allowed to assume that role (this is what we have done with the `aws eks update-kubeconfig` command above). In our example this role is `arn:aws:iam::336419811389:role/6946-aws001-preprod-dev-platform-team-Access` (available as part of the command string in the Proton environment output). In the console, while logged as `protondev` you can [switch to this role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-console.html), move to the EKS console and navigate in the UI to discover all cluster objects: 176 | 177 | ![eksclusterview](images/eksclusterview.png) 178 | 179 | > Fun fact: the developer may see an `Update now` button in the console if there is a new Kubernetes cluster version available in EKS. While the `arn:aws:iam::336419811389:role/6946-aws001-preprod-dev-platform-team-Access` role is a Kubernetes administrator (in other words you can do pretty much all you want with kubectl), it has very limited IAM permissions when it comes to EKS (mostly limited to list and describe operations). This is why you can use the console to list, describe the cluster and discover all objects in it. Simply put, clicking on the `Update now` button will just generate a message saying the operation cannot be performed. 180 | 181 | How do you maintain this cluster then? We are glad you asked. 182 | 183 | #### Updating the Proton cluster template 184 | 185 | Remember this solution allows a central platform team to maintain a set of standards (one of which is the Kubernetes cluster version). As a platform administrator you may get to the point where you bless another Kubernetes version. In this case let's simulate that you have verified K8s version `1.21` adheres to your organization standards, and you want to make it available to you developers. The only thing you need to do is to update the [schema.yaml](https://github.com/aws-samples/eks-blueprints-for-proton/blob/main/templates/eks-mng-karpenter-with-new-vpc/v1/schema/schema.yaml) file in your own repository (the repository you are using with Proton) to include the new version. Specifically you need to configure the `kubernetes_version` variable to accept both `1.20` and `1.21` and change the default to `1.21` as follows: 186 | ``` 187 | kubernetes_version: 188 | type: string 189 | description: Kubernetes Version 190 | enum: ["1.20", "1.21"] 191 | default: "1.21" 192 | ``` 193 | 194 | When you push this commit to your repository in GitHub, Proton will detect the change in the template. If you login with the administrative user you will see in the details of your `Environment template` that there is a new version in the `Draft` stage. You can click `Publish`, and it will become the new default minor version for that template. 195 | 196 | This means that every cluster a developer will deploy with this template can be deployed with either versions (because `1.20` and `1.21` are both valid options). However, it is also possible to upgrade an existing 1.20 cluster to the new 1.21 version. 197 | 198 | Your environment template should now look like this: 199 | 200 | ![updated_environment_template](images/updated_environment_template.png) 201 | 202 | > Note the new `Template version` (`1.1`) and also note that there is an environment deployed that references this template at version `1.0`. 203 | 204 | #### Updating an existing cluster 205 | 206 | Now that a Proton administrator have updated the template, login back as the `protondev` user and open your Proton environment that represents the cluster you deployed above. You will notice that there is a message, in the `Template version` field, that says that there is a new template available. You can now update your environment to apply the new template and change the inputs. 207 | 208 | > Note: in this example we are updating both the template and an input parameter in that template. In general these can be separate processes. That is, you can update the template functionalities without necessarily exposing the user new or different parameters, or you can update input parameters without having to update a template. Refer to [this Proton documentation page](https://docs.aws.amazon.com/proton/latest/adminguide/ag-env-update.html) for more details about environments updates. Also remember that this EKS Blueprint template is only provided as part of a demonstration tutorial. If you are deep into Terraform and EKS/Kubernetes you can build a template that better fits your own needs. Refer to the [EKS Blueprints repo](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/docs/getting-started.md) for all the options available. 209 | 210 | In our scenario, you have to go to the Proton environment and click `Update minor`: 211 | 212 | ![update_environment](images/update_environment.png) 213 | 214 | At the next screen leave everything unchanged and click `Edit` to get access and update your cluster parameters. Here you can set the cluster version to 1.21: 215 | 216 | ![edit_cluster_params](images/edit_cluster_params.png) 217 | 218 | Click `Next` and then `Update`. 219 | 220 | This will trigger a workflow identical to the one we triggered with the deployment. Terraform, in this case, will `apply` the configuration to an existing cluster and the logic inside the EKS Blueprints module will know how to upgrade an EKS cluster. At the end of this process the GitHub action will notify Proton that the upgrade has completed. 221 | 222 | Please note that Kubernetes versions are a moving target. The examples in this README file refer to setting up version 1.20 and upgrading it to 1.21. Over time, your setup will require to use new EKS Blueprints module versions (as defined in the [main.tf](https://github.com/aws-samples/eks-blueprints-for-proton/blob/main/templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/main.tf) file and different Kubernetes cluster versions as defined in the [schema.yaml](https://github.com/aws-samples/eks-blueprints-for-proton/blob/main/templates/eks-mng-karpenter-with-new-vpc/v1/schema/schema.yaml) file. 223 | 224 | > Note: Kubernetes cluster upgrades are often heavy operations that have ramifications into add-on components and modules you use. In some cases they also have ramifications into how applications running on Kubernetes are defined. While EKS Blueprints help in that regard, it's important that you test these upgrades throughout. Also consider that there are a number of strategies you can use to surface Kubernetes versions in your Proton templates. In our quick example we have shown how to have multiple Kubernetes versions in the same template but another tactic could be to separate Kubernetes versions in dedicated Proton major template versions (in a 1:1 mapping between the Kubernetes version and the Proton template major version). Each approach has advantages and disadvantages that you need to factor in. You can read more about Proton templates versions [here](https://docs.aws.amazon.com/proton/latest/adminguide/ag-template-versions.html). 225 | 226 | #### Deleting the cluster 227 | 228 | When you are done with the test you may want to delete the cluster to avoid incurring into undesired infrastructure costs. From the Proton console go into the environment you have deployed and select `Delete`. This will trigger the same workflow of the deployment and the update. Proton will open a PR against the repository which, when merged, will call the Terraform destroy workflows defined in GitHub Actions. 229 | -------------------------------------------------------------------------------- /env_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "*": { 3 | "role": "arn:aws:iam::111111111111:role/REPLACE_ME", 4 | "region": "us-west-2", 5 | "state_bucket":"REPLACE_ME" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /images/cluster_summary.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-blueprints-for-proton/b2c7706fc5623d2e109a632b59941718ec2d0ab6/images/cluster_summary.png -------------------------------------------------------------------------------- /images/configure_cluster_deployment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-blueprints-for-proton/b2c7706fc5623d2e109a632b59941718ec2d0ab6/images/configure_cluster_deployment.png -------------------------------------------------------------------------------- /images/edit_cluster_params.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-blueprints-for-proton/b2c7706fc5623d2e109a632b59941718ec2d0ab6/images/edit_cluster_params.png -------------------------------------------------------------------------------- /images/eksclusterview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-blueprints-for-proton/b2c7706fc5623d2e109a632b59941718ec2d0ab6/images/eksclusterview.png -------------------------------------------------------------------------------- /images/environt_template.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-blueprints-for-proton/b2c7706fc5623d2e109a632b59941718ec2d0ab6/images/environt_template.png -------------------------------------------------------------------------------- /images/proton_registry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-blueprints-for-proton/b2c7706fc5623d2e109a632b59941718ec2d0ab6/images/proton_registry.png -------------------------------------------------------------------------------- /images/update_environment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-blueprints-for-proton/b2c7706fc5623d2e109a632b59941718ec2d0ab6/images/update_environment.png -------------------------------------------------------------------------------- /images/updated_environment_template.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/eks-blueprints-for-proton/b2c7706fc5623d2e109a632b59941718ec2d0ab6/images/updated_environment_template.png -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | # Core Resources 2 | 3 | In order to provision our Proton environments via Terraform from within Github Actions, we'll need to provision two core resources up front. 4 | 5 | 1. IAM Identity Provider and an IAM Role used by Github Actions to authenticate with AWS and run Terraform. 6 | 7 | 2. S3 Bucket to serve as our Terraform [remote state backend](https://www.terraform.io/language/state/remote). 8 | 9 | 10 | We have provided infrastructure as code to provision these core resources in both Terraform and CloudFormation. Feel free to use whichever you prefer. 11 | 12 | - [Terraform](./terraform/README.md) 13 | 14 | - [CloudFormation](./cloudformation/README.md) 15 | -------------------------------------------------------------------------------- /scripts/cloudformation/GitHubConfiguration.yaml: -------------------------------------------------------------------------------- 1 | Parameters: 2 | FullRepoName: 3 | Type: String 4 | ThumbprintList: 5 | Type: List 6 | 7 | Resources: 8 | Role: 9 | Type: AWS::IAM::Role 10 | # Creates the role that will be passed to Terraform to be able to deploy 11 | # infrastructure in your AWS account. You will enter this role in your 12 | # env_config.json file 13 | # Note that this role has administrator access to your account, so that 14 | # it can be used to provision any infrastructure in your templates. We 15 | # recommend you scope down the role permissions to the resources that will be used 16 | # in your Proton templates 17 | Properties: 18 | RoleName: ExampleGithubRole 19 | ManagedPolicyArns: [arn:aws:iam::aws:policy/AdministratorAccess] 20 | AssumeRolePolicyDocument: 21 | Statement: 22 | - Effect: Allow 23 | Action: sts:AssumeRoleWithWebIdentity 24 | Principal: 25 | Federated: !Ref GithubOidc 26 | Condition: 27 | StringLike: 28 | token.actions.githubusercontent.com:sub: !Sub repo:${FullRepoName}:* 29 | 30 | GithubOidc: 31 | # This identity provider is required to accept OpenID Connect credentials 32 | # from GitHub 33 | Type: AWS::IAM::OIDCProvider 34 | Properties: 35 | Url: https://token.actions.githubusercontent.com 36 | ThumbprintList: !Ref ThumbprintList 37 | ClientIdList: 38 | - sts.amazonaws.com 39 | 40 | StateFileBucket: 41 | # This bucket will be used to store your Terraform Open Source state files 42 | Type: AWS::S3::Bucket 43 | Properties: 44 | BucketName: !Join 45 | - '' 46 | - - 'aws-proton-terraform-bucket-' 47 | - !Ref AWS::AccountId 48 | Outputs: 49 | Role: 50 | Description: "The role that will be used to provision infrastructure. Enter it in your env_config.json file" 51 | Value: !GetAtt Role.Arn 52 | 53 | BucketName: 54 | Description: "Name of the bucket for our state files. Enter it in your env_config.json file" 55 | Value: !Ref StateFileBucket 56 | -------------------------------------------------------------------------------- /scripts/cloudformation/README.md: -------------------------------------------------------------------------------- 1 | # CloudFormation Setup 2 | 3 | Run `GitHubConfiguration.yaml` through CloudFormation (https://aws.amazon.com/cloudformation/). This will create an IAM role that GitHub Actions will use to provision resources into your account, as well as an S3 bucket to store our Proton environment's Terraform remote state. Make sure you use all lowercase names in the stack name, as we will use it to create an S3 bucket to save your state files. 4 | 5 | We have provided a script that launches the CFN Stack. This script will require the following utility installed on your system: 6 | - AWS CLI (with proper administrative credentials configured) 7 | - jq 8 | - openssl 9 | - sed 10 | 11 | One of the easiest ways to run this script is by leveraging the AWS Cloud Shell (note you need to add `openssl` using `sudo yum install openssl -y` because openssl is not part of the default Cloud Shell packages available). 12 | 13 | > :warning: **This sample template uses AdministratorAccess managed policy and is used for demo purpose only. You should use a more scoped down version of the policy** 14 | 15 | First, clone your forked repo (for me it's `git clone https://github.com/mreferre/eks-blueprints-for-proton`) and then export the `GITHUB_USER` variable with your own GH org/user name (this will point to your own fork of the repo): 16 | ```sh 17 | export GITHUB_USER= 18 | ``` 19 | Then launch the CFN stack and extract its outputs by changing into the `scripts/cloudformation` directory and running the following script (remember to install the `openssl` package before you run it if you are using Cloud Shell): 20 | 21 | ```sh 22 | cd eks-blueprints-for-proton/scripts/cloudformation/ 23 | ./iac.sh 24 | ``` 25 | 26 | Once this part of the setup is completed, please resume the steps in the main README from [where you left](../../README.md#getting-started-and-one-off-configurations). 27 | -------------------------------------------------------------------------------- /scripts/cloudformation/iac.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Get the Thumbprint for GitHub OIDC provider 5 | HOST=$(curl -s https://vstoken.actions.githubusercontent.com/.well-known/openid-configuration | jq -r '.jwks_uri | split("/")[2]') 6 | 7 | THUMBPRINT=$(echo | openssl s_client -servername $HOST -showcerts -connect $HOST:443 2> /dev/null | sed -n -e '/BEGIN/h' -e '/BEGIN/,/END/H' -e '$x' -e '$p' | tail -n +2 | openssl x509 -fingerprint -noout | sed -e "s/.*=//" -e "s/://g" | tr "ABCDEF" "abcdef") 8 | 9 | 10 | # Create CloudFormation Stack 11 | aws cloudformation create-stack --stack-name aws-proton-terraform-role-stack \ 12 | --template-body file:///$PWD/GitHubConfiguration.yaml \ 13 | --parameters ParameterKey=FullRepoName,ParameterValue=$GITHUB_USER/eks-blueprints-for-proton \ 14 | ParameterKey=ThumbprintList,ParameterValue=$THUMBPRINT \ 15 | --capabilities CAPABILITY_NAMED_IAM 16 | 17 | # Wait for Stack creation to complete 18 | aws cloudformation wait stack-create-complete --stack-name aws-proton-terraform-role-stack 19 | 20 | # Get the ARN of the IAM Role created by the stack 21 | aws cloudformation describe-stacks --stack-name aws-proton-terraform-role-stack | jq -r '.Stacks[0].Outputs[] | select(.OutputKey=="Role") | .OutputValue' 22 | 23 | # Get the bucket name created by the stack 24 | aws cloudformation describe-stacks --stack-name aws-proton-terraform-role-stack | jq -r '.Stacks[0].Outputs[] | select(.OutputKey=="BucketName") | .OutputValue' 25 | -------------------------------------------------------------------------------- /scripts/terraform/README.md: -------------------------------------------------------------------------------- 1 | # Terraform Setup 2 | 3 | Apply this Terraform module to create a role that GitHub Actions will use to provision resources into your account, as well as an S3 bucket to store our Proton environment's Terraform remote state. 4 | 5 | This script requires that you have [Terraform installed](https://learn.hashicorp.com/tutorials/terraform/install-cli) as well as proper admin [credentials configured](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#environment-variables). 6 | 7 | > :warning: **This sample template uses AdministratorAccess managed policy and is used for demo purpose only. You should use a more scoped down version of the policy** 8 | 9 | First export the `GITHUB_USER` variable with your own GH org/user name (this will point to your own fork of the repo): 10 | 11 | ```sh 12 | export GITHUB_USER= 13 | ``` 14 | 15 | Then run the script in this directory to use Terraform to provision the resources. 16 | 17 | ```sh 18 | cd scripts/terraform/ 19 | ./iac.sh 20 | ``` 21 | 22 | Once this part of the setup is completed, please resume the steps in the main README from [where you left](../../README.md#getting-started-and-one-off-configurations). 23 | -------------------------------------------------------------------------------- /scripts/terraform/iac.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | terraform init -upgrade=true 6 | TF_VAR_github_org=$GITHUB_USER \ 7 | TF_VAR_github_repo=eks-blueprints-for-proton \ 8 | terraform apply -auto-approve 9 | -------------------------------------------------------------------------------- /scripts/terraform/main.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Common Locals 3 | ################################################################################ 4 | 5 | locals { 6 | tags = { 7 | GithubRepo = "github.com/aws-samples/eks-blueprints-for-proton" 8 | } 9 | } 10 | 11 | ################################################################################ 12 | # Common Data 13 | ################################################################################ 14 | 15 | data "aws_caller_identity" "current" {} 16 | 17 | ################################################################################ 18 | # GitHub OIDC IAM role 19 | /* 20 | Creates the role that will be passed to Terraform to be able to deploy 21 | infrastructure in your AWS account. You will enter this role in your 22 | env_config.json file. 23 | 24 | Note that this role has administrator access to your account, so that 25 | it can be used to provision any infrastructure in your templates. We 26 | recommend you scope down the role permissions to the resources that will be used 27 | in your Proton templates. 28 | */ 29 | ################################################################################ 30 | 31 | data "tls_certificate" "github_actions_oidc_provider" { 32 | url = "https://token.actions.githubusercontent.com/.well-known/openid-configuration" 33 | } 34 | 35 | # This identity provider is required to accept OpenID Connect credentials 36 | resource "aws_iam_openid_connect_provider" "github" { 37 | url = "https://token.actions.githubusercontent.com" 38 | client_id_list = ["sts.amazonaws.com"] 39 | thumbprint_list = [data.tls_certificate.github_actions_oidc_provider.certificates[0].sha1_fingerprint] 40 | } 41 | 42 | data "aws_iam_policy_document" "github_actions_assume_role" { 43 | statement { 44 | actions = ["sts:AssumeRoleWithWebIdentity"] 45 | principals { 46 | type = "Federated" 47 | identifiers = [aws_iam_openid_connect_provider.github.arn] 48 | } 49 | condition { 50 | test = "StringLike" 51 | variable = "token.actions.githubusercontent.com:sub" 52 | values = ["repo:${var.github_org}/${var.github_repo}:*"] 53 | } 54 | } 55 | } 56 | 57 | resource "aws_iam_role" "github_actions" { 58 | name = "ExampleGithubRole" 59 | assume_role_policy = data.aws_iam_policy_document.github_actions_assume_role.json 60 | } 61 | 62 | resource "aws_iam_role_policy_attachment" "github_actions" { 63 | role = aws_iam_role.github_actions.name 64 | policy_arn = "arn:aws:iam::aws:policy/AdministratorAccess" 65 | } 66 | 67 | ################################################################################ 68 | # Terraform remote state S3 bucket 69 | ################################################################################ 70 | 71 | module "terraform_state_s3_bucket" { 72 | source = "terraform-aws-modules/s3-bucket/aws" 73 | version = "~> 3.0" 74 | 75 | bucket = "aws-proton-terraform-bucket-${data.aws_caller_identity.current.account_id}" 76 | 77 | attach_deny_insecure_transport_policy = true 78 | attach_require_latest_tls_policy = true 79 | 80 | acl = "private" 81 | 82 | block_public_acls = true 83 | block_public_policy = true 84 | ignore_public_acls = true 85 | restrict_public_buckets = true 86 | 87 | control_object_ownership = true 88 | object_ownership = "BucketOwnerPreferred" 89 | 90 | versioning = { 91 | status = true 92 | mfa_delete = false 93 | } 94 | 95 | server_side_encryption_configuration = { 96 | rule = { 97 | apply_server_side_encryption_by_default = { 98 | sse_algorithm = "AES256" 99 | } 100 | } 101 | } 102 | 103 | tags = local.tags 104 | } 105 | -------------------------------------------------------------------------------- /scripts/terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | output "role" { 2 | description = "AWS IAM role ARN for GitHub OIDC integration" 3 | value = aws_iam_role.github_actions.arn 4 | } 5 | 6 | output "bucket" { 7 | description = "Name of the Terraform state S3 bucket" 8 | value = module.terraform_state_s3_bucket.s3_bucket_id 9 | } 10 | -------------------------------------------------------------------------------- /scripts/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "github_org" { 2 | description = "the name of the github organization" 3 | type = string 4 | } 5 | 6 | variable "github_repo" { 7 | description = "the name of the github repo" 8 | type = string 9 | } 10 | -------------------------------------------------------------------------------- /scripts/terraform/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 4.0" 8 | } 9 | tls = { 10 | source = "hashicorp/tls" 11 | version = ">= 4.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/README.md: -------------------------------------------------------------------------------- 1 | # EKS Managed Node Group w/ Karpenter Template 2 | 3 | 4 | ## Requirements 5 | 6 | | Name | Version | 7 | |------|---------| 8 | | [terraform](#requirement\_terraform) | >= 1.0 | 9 | | [aws](#requirement\_aws) | >= 4.10 | 10 | | [helm](#requirement\_helm) | >= 2.4.1 | 11 | | [kubernetes](#requirement\_kubernetes) | >= 2.6.1 | 12 | 13 | ## Providers 14 | 15 | | Name | Version | 16 | |------|---------| 17 | | [aws](#provider\_aws) | >= 4.10 | 18 | 19 | ## Modules 20 | 21 | | Name | Source | Version | 22 | |------|--------|---------| 23 | | [eks\_blueprints](#module\_eks\_blueprints) | github.com/aws-ia/terraform-aws-eks-blueprints | v4.16.0 | 24 | | [kubernetes\_addons](#module\_kubernetes\_addons) | github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons | v4.16.0 | 25 | | [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | 26 | 27 | ## Resources 28 | 29 | | Name | Type | 30 | |------|------| 31 | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | 32 | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | 33 | | [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | 34 | | [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | 35 | 36 | ## Inputs 37 | 38 | | Name | Description | Type | Default | Required | 39 | |------|-------------|------|---------|:--------:| 40 | | [aws\_region](#input\_aws\_region) | AWS region where resources will be provisioned | `string` | `"us-west-2"` | no | 41 | 42 | ## Outputs 43 | 44 | | Name | Description | 45 | |------|-------------| 46 | | [cluster\_version](#output\_cluster\_version) | The version of the EKS cluster | 47 | | [eks\_cluster\_id](#output\_eks\_cluster\_id) | The name of the EKS cluster | 48 | | [enable\_aws\_for\_fluentbit](#output\_enable\_aws\_for\_fluentbit) | The flag for the Fluentbit | 49 | | [enable\_aws\_load\_balancer\_controller](#output\_enable\_aws\_load\_balancer\_controller) | The flag for the Load Balancer controller | 50 | | [enable\_cert\_manager](#output\_enable\_cert\_manager) | The flag for Certificate Manager | 51 | | [enable\_karpenter](#output\_enable\_karpenter) | The flag for Karpenter | 52 | | [enable\_metrics\_server](#output\_enable\_metrics\_server) | The flag for the Metric Server | 53 | | [enable\_vpa](#output\_enable\_vpa) | The flag for Virtual Pod Autoscaler | 54 | | [platform\_teams\_configure\_kubectl](#output\_platform\_teams\_configure\_kubectl) | The command to use to configure the kubeconfig file to be used with kubectl. | 55 | 56 | -------------------------------------------------------------------------------- /templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/eks.tf: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------- 2 | # EKS Cluster 3 | #------------------------------------------------------------------- 4 | 5 | module "eks_blueprints" { 6 | source = "github.com/aws-ia/terraform-aws-eks-blueprints?ref=v4.16.0" 7 | 8 | cluster_name = local.name 9 | cluster_version = var.environment.inputs.kubernetes_version 10 | 11 | vpc_id = module.vpc.vpc_id 12 | private_subnet_ids = module.vpc.private_subnets 13 | 14 | managed_node_groups = { 15 | default = { 16 | node_group_name = "default" 17 | instance_types = ["m5.xlarge"] 18 | min_size = 1 19 | max_size = 5 20 | desired_size = 3 21 | subnet_ids = module.vpc.private_subnets 22 | } 23 | } 24 | 25 | platform_teams = { 26 | platform-team = { 27 | users = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:user/${var.environment.inputs.user}"] 28 | } 29 | } 30 | 31 | tags = local.tags 32 | } 33 | 34 | #------------------------------------------------------------------- 35 | # Kubernetes Addons 36 | #------------------------------------------------------------------- 37 | 38 | module "kubernetes_addons" { 39 | source = "github.com/aws-ia/terraform-aws-eks-blueprints//modules/kubernetes-addons?ref=v4.16.0" 40 | 41 | eks_cluster_id = module.eks_blueprints.eks_cluster_id 42 | eks_cluster_endpoint = module.eks_blueprints.eks_cluster_endpoint 43 | eks_oidc_provider = module.eks_blueprints.oidc_provider 44 | eks_cluster_version = module.eks_blueprints.eks_cluster_version 45 | 46 | # EKS Managed Add-ons 47 | enable_amazon_eks_vpc_cni = true 48 | enable_amazon_eks_coredns = true 49 | enable_amazon_eks_kube_proxy = true 50 | enable_amazon_eks_aws_ebs_csi_driver = true 51 | 52 | # Add-ons 53 | enable_aws_for_fluentbit = var.environment.inputs.aws_for_fluentbit 54 | enable_aws_load_balancer_controller = var.environment.inputs.aws_load_balancer_controller 55 | enable_cert_manager = var.environment.inputs.cert_manager 56 | enable_karpenter = var.environment.inputs.karpenter 57 | enable_metrics_server = var.environment.inputs.metrics_server 58 | enable_vpa = var.environment.inputs.vpa 59 | 60 | tags = local.tags 61 | } 62 | -------------------------------------------------------------------------------- /templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.aws_region 3 | } 4 | 5 | provider "kubernetes" { 6 | host = module.eks_blueprints.eks_cluster_endpoint 7 | cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) 8 | token = data.aws_eks_cluster_auth.this.token 9 | } 10 | 11 | provider "helm" { 12 | kubernetes { 13 | host = module.eks_blueprints.eks_cluster_endpoint 14 | cluster_ca_certificate = base64decode(module.eks_blueprints.eks_cluster_certificate_authority_data) 15 | token = data.aws_eks_cluster_auth.this.token 16 | } 17 | } 18 | 19 | data "aws_eks_cluster_auth" "this" { 20 | name = module.eks_blueprints.eks_cluster_id 21 | } 22 | 23 | # Update for remote backend S3 bucket, region, and key 24 | terraform { 25 | backend "s3" {} 26 | } 27 | 28 | ################################################################################ 29 | # Common Locals 30 | ################################################################################ 31 | 32 | locals { 33 | name = var.environment.inputs.cluster_name 34 | 35 | tags = { 36 | Name = var.environment.inputs.cluster_name 37 | GithubRepo = "github.com/aws-samples/eks-blueprints-for-proton" 38 | } 39 | } 40 | 41 | ################################################################################ 42 | # Common Data 43 | ################################################################################ 44 | 45 | data "aws_availability_zones" "available" {} 46 | data "aws_partition" "current" {} 47 | data "aws_caller_identity" "current" {} 48 | -------------------------------------------------------------------------------- /templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/manifest.yaml: -------------------------------------------------------------------------------- 1 | infrastructure: 2 | templates: 3 | - file: "*" 4 | rendering_engine: hcl 5 | template_language: terraform 6 | -------------------------------------------------------------------------------- /templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/outputs.tf: -------------------------------------------------------------------------------- 1 | output "platform_teams_configure_kubectl" { 2 | description = "The command to use to configure the kubeconfig file to be used with kubectl." 3 | value = tomap({ 4 | for k, v in module.eks_blueprints.teams[0].platform_teams_iam_role_arn : k => "aws eks --region ${var.aws_region} update-kubeconfig --name ${module.eks_blueprints.eks_cluster_id} --role-arn ${v}" 5 | })["platform-team"] 6 | } 7 | 8 | output "eks_cluster_id" { 9 | description = "The name of the EKS cluster" 10 | value = module.eks_blueprints.eks_cluster_id 11 | } 12 | 13 | output "cluster_version" { 14 | description = "The version of the EKS cluster" 15 | value = var.environment.inputs.kubernetes_version 16 | } 17 | 18 | output "enable_aws_load_balancer_controller" { 19 | description = "The flag for the Load Balancer controller" 20 | value = var.environment.inputs.aws_load_balancer_controller 21 | } 22 | 23 | output "enable_karpenter" { 24 | description = "The flag for Karpenter" 25 | value = var.environment.inputs.karpenter 26 | } 27 | 28 | output "enable_metrics_server" { 29 | description = "The flag for the Metric Server" 30 | value = var.environment.inputs.metrics_server 31 | } 32 | 33 | output "enable_aws_for_fluentbit" { 34 | description = "The flag for the Fluentbit" 35 | value = var.environment.inputs.aws_for_fluentbit 36 | } 37 | 38 | output "enable_cert_manager" { 39 | description = "The flag for Certificate Manager" 40 | value = var.environment.inputs.cert_manager 41 | } 42 | 43 | output "enable_vpa" { 44 | description = "The flag for Virtual Pod Autoscaler" 45 | value = var.environment.inputs.vpa 46 | } 47 | -------------------------------------------------------------------------------- /templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "AWS region where resources will be provisioned" 3 | type = string 4 | default = "us-west-2" 5 | } 6 | 7 | # Proton creates the variable definition for this variable and therefore it should not be included 8 | # in this variable definition file. 9 | # See https://docs.aws.amazon.com/proton/latest/userguide/ag-infrastructure-tmp-files-terraform.html#compiled-tform 10 | # variable "environment" { 11 | # description = "Map of attributes passed from Proton to Terraform configuration" 12 | # type = any 13 | # } 14 | -------------------------------------------------------------------------------- /templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.10" 8 | } 9 | kubernetes = { 10 | source = "hashicorp/kubernetes" 11 | version = ">= 2.6.1" 12 | } 13 | helm = { 14 | source = "hashicorp/helm" 15 | version = ">= 2.4.1" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /templates/eks-mng-karpenter-with-new-vpc/v1/infrastructure/vpc.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | vpc_cidr = var.environment.inputs.vpc_cidr 3 | azs = slice(data.aws_availability_zones.available.names, 0, 3) 4 | } 5 | 6 | module "vpc" { 7 | source = "terraform-aws-modules/vpc/aws" 8 | version = "~> 3.0" 9 | 10 | name = local.name 11 | cidr = local.vpc_cidr 12 | 13 | azs = local.azs 14 | public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k)] 15 | private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 10)] 16 | 17 | enable_nat_gateway = true 18 | single_nat_gateway = true 19 | enable_dns_hostnames = true 20 | 21 | # Manage so we can name 22 | manage_default_network_acl = true 23 | default_network_acl_tags = { Name = "${local.name}-default" } 24 | manage_default_route_table = true 25 | default_route_table_tags = { Name = "${local.name}-default" } 26 | manage_default_security_group = true 27 | default_security_group_tags = { Name = "${local.name}-default" } 28 | 29 | public_subnet_tags = { 30 | "kubernetes.io/role/elb" = "1" 31 | } 32 | 33 | private_subnet_tags = { 34 | "kubernetes.io/role/internal-elb" = "1" 35 | } 36 | 37 | tags = local.tags 38 | } 39 | -------------------------------------------------------------------------------- /templates/eks-mng-karpenter-with-new-vpc/v1/schema/schema.yaml: -------------------------------------------------------------------------------- 1 | schema: 2 | format: 3 | openapi: "3.0.0" 4 | environment_input_type: "EnvironmentInput" 5 | types: 6 | EnvironmentInput: 7 | type: object 8 | description: "Input properties for eks environment." 9 | required: 10 | - cluster_name 11 | - vpc_cidr 12 | - user 13 | - kubernetes_version 14 | properties: 15 | cluster_name: 16 | title: "Cluster name" 17 | type: string 18 | vpc_cidr: 19 | title: "VPC CIDR" 20 | type: string 21 | pattern: ([0-9]{1,3}\.){3}[0-9]{1,3}($|/(16|24)) 22 | default: 10.0.0.0/16 23 | user: 24 | title: "User" 25 | type: string 26 | description: "IAM User for cluster access" 27 | kubernetes_version: 28 | title: "Kubernetes version" 29 | type: string 30 | enum: ["1.23"] 31 | default: "1.23" 32 | aws_load_balancer_controller: 33 | title: "Enable AWS Load Balancer Controller Add-On" 34 | type: boolean 35 | default: true 36 | metrics_server: 37 | title: "Enable Metrics Server Add-On" 38 | type: boolean 39 | default: true 40 | aws_for_fluentbit: 41 | title: "Enable AWS for FluentBit Add-On" 42 | type: boolean 43 | default: true 44 | cert_manager: 45 | title: "Enable Cert Manager Add-On" 46 | type: boolean 47 | default: true 48 | vpa: 49 | title: "Enable VPA Add-On" 50 | type: boolean 51 | default: true 52 | karpenter: 53 | title: "Enable Karpenter Add-On" 54 | type: boolean 55 | default: true 56 | --------------------------------------------------------------------------------