├── .circleci └── config.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CODEOWNERS ├── CONTRIBUTING.md ├── GRUNTWORK_PHILOSOPHY.md ├── LICENSE ├── README.md ├── examples ├── k8s-namespace-with-service-account │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── k8s-tiller-kubergrunt-minikube │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf └── k8s-tiller-minikube │ └── README.md ├── main.tf ├── modules ├── k8s-helm-client-tls-certs │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── k8s-namespace-roles │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── k8s-namespace │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── k8s-service-account │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── k8s-tiller-tls-certs │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf └── k8s-tiller │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── outputs.tf ├── test ├── Gopkg.lock ├── Gopkg.toml ├── README.md ├── k8s_namespace_with_service_account_test.go ├── k8s_tiller_kubergrunt_test.go ├── k8s_tiller_test.go ├── kubefixtures │ ├── curl-kubeapi-as-service-account.yml.tpl │ ├── namespace-check-create-pod.json.tpl │ └── namespace-check-list-pod.json.tpl └── terratest_options.go └── variables.tf /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | defaults: &defaults 2 | machine: 3 | enabled: true 4 | image: "ubuntu-1604:201903-01" 5 | environment: 6 | GRUNTWORK_INSTALLER_VERSION: v0.0.21 7 | TERRATEST_LOG_PARSER_VERSION: v0.13.13 8 | KUBERGRUNT_VERSION: v0.5.8 9 | HELM_VERSION: v2.12.2 10 | MODULE_CI_VERSION: v0.14.1 11 | TERRAFORM_VERSION: 0.12.11 12 | TERRAGRUNT_VERSION: NONE 13 | PACKER_VERSION: NONE 14 | GOLANG_VERSION: 1.11.2 15 | K8S_VERSION: v1.10.0 # Same as EKS 16 | KUBECONFIG: /home/circleci/.kube/config 17 | 18 | 19 | install_helm_client: &install_helm_client 20 | name: install helm 21 | command: | 22 | # install helm 23 | curl -Lo helm.tar.gz https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz 24 | tar -xvf helm.tar.gz 25 | chmod +x linux-amd64/helm 26 | sudo mv linux-amd64/helm /usr/local/bin/ 27 | 28 | 29 | install_gruntwork_utils: &install_gruntwork_utils 30 | name: install gruntwork utils 31 | command: | 32 | curl -Ls https://raw.githubusercontent.com/gruntwork-io/gruntwork-installer/master/bootstrap-gruntwork-installer.sh | bash /dev/stdin --version "${GRUNTWORK_INSTALLER_VERSION}" 33 | gruntwork-install --module-name "gruntwork-module-circleci-helpers" --repo "https://github.com/gruntwork-io/module-ci" --tag "${MODULE_CI_VERSION}" 34 | gruntwork-install --module-name "kubernetes-circleci-helpers" --repo "https://github.com/gruntwork-io/module-ci" --tag "${MODULE_CI_VERSION}" 35 | gruntwork-install --binary-name "terratest_log_parser" --repo "https://github.com/gruntwork-io/terratest" --tag "${TERRATEST_LOG_PARSER_VERSION}" 36 | configure-environment-for-gruntwork-module \ 37 | --circle-ci-2-machine-executor \ 38 | --terraform-version ${TERRAFORM_VERSION} \ 39 | --terragrunt-version ${TERRAGRUNT_VERSION} \ 40 | --packer-version ${PACKER_VERSION} \ 41 | --use-go-dep \ 42 | --go-version ${GOLANG_VERSION} \ 43 | --go-src-path test \ 44 | 45 | 46 | version: 2 47 | jobs: 48 | setup: 49 | <<: *defaults 50 | steps: 51 | - checkout 52 | - restore_cache: 53 | keys: 54 | - dep-{{ checksum "test/Gopkg.lock" }} 55 | 56 | # Install gruntwork utilities 57 | - run: 58 | <<: *install_gruntwork_utils 59 | 60 | - save_cache: 61 | key: dep-{{ checksum "test/Gopkg.lock" }} 62 | paths: 63 | - ./test/vendor 64 | 65 | # Fail the build if the pre-commit hooks don't pass. Note: if you run pre-commit install locally, these hooks will 66 | # execute automatically every time before you commit, ensuring the build never fails at this step! 67 | - run: pip install pre-commit==1.11.2 68 | - run: pre-commit install 69 | - run: pre-commit run --all-files 70 | 71 | - persist_to_workspace: 72 | root: /home/circleci 73 | paths: 74 | - project 75 | - terraform 76 | - packer 77 | 78 | integration_tests: 79 | <<: *defaults 80 | steps: 81 | - attach_workspace: 82 | at: /home/circleci 83 | 84 | # The weird way you have to set PATH in Circle 2.0 85 | - run: echo 'export PATH=$HOME/terraform:$HOME/packer:$PATH' >> $BASH_ENV 86 | 87 | - run: 88 | <<: *install_gruntwork_utils 89 | 90 | - run: 91 | command: setup-minikube 92 | 93 | - run: 94 | <<: *install_helm_client 95 | 96 | - run: 97 | name: Install kubergrunt 98 | command: gruntwork-install --binary-name "kubergrunt" --repo "https://github.com/gruntwork-io/kubergrunt" --tag "${KUBERGRUNT_VERSION}" 99 | 100 | # Execute main terratests 101 | - run: 102 | name: run integration tests 103 | command: | 104 | mkdir -p /tmp/logs 105 | run-go-tests --path test --timeout 60m | tee /tmp/logs/all.log 106 | no_output_timeout: 3600s 107 | 108 | - run: 109 | command: terratest_log_parser --testlog /tmp/logs/all.log --outputdir /tmp/logs 110 | when: always 111 | - store_artifacts: 112 | path: /tmp/logs 113 | - store_test_results: 114 | path: /tmp/logs 115 | 116 | integration_tests_without_kubergrunt: 117 | <<: *defaults 118 | steps: 119 | - attach_workspace: 120 | at: /home/circleci 121 | 122 | # The weird way you have to set PATH in Circle 2.0 123 | - run: echo 'export PATH=$HOME/terraform:$HOME/packer:$PATH' >> $BASH_ENV 124 | 125 | - run: 126 | <<: *install_gruntwork_utils 127 | 128 | - run: 129 | command: setup-minikube 130 | 131 | # Execute main terratests 132 | - run: 133 | name: run integration tests 134 | command: | 135 | mkdir -p /tmp/logs 136 | run-go-tests --path test --timeout 60m --packages "-run TestK8STillerNoKubergrunt$ ." | tee /tmp/logs/all.log 137 | no_output_timeout: 3600s 138 | 139 | - run: 140 | command: terratest_log_parser --testlog /tmp/logs/all.log --outputdir /tmp/logs 141 | when: always 142 | - store_artifacts: 143 | path: /tmp/logs 144 | - store_test_results: 145 | path: /tmp/logs 146 | 147 | workflows: 148 | version: 2 149 | test-and-deploy: 150 | jobs: 151 | - setup: 152 | filters: 153 | tags: 154 | only: /^v.*/ 155 | 156 | - integration_tests: 157 | requires: 158 | - setup 159 | filters: 160 | tags: 161 | only: /^v.*/ 162 | 163 | - integration_tests_without_kubergrunt: 164 | requires: 165 | - setup 166 | filters: 167 | tags: 168 | only: /^v.*/ 169 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform files 2 | .terraform 3 | terraform.tfstate 4 | terraform.tfvars 5 | *.tfstate* 6 | 7 | # OS X files 8 | .history 9 | .DS_Store 10 | 11 | # lambda zip files 12 | lambda.zip 13 | 14 | # IntelliJ files 15 | .idea_modules 16 | *.iml 17 | *.iws 18 | *.ipr 19 | .idea/ 20 | build/ 21 | */build/ 22 | out/ 23 | 24 | # Module artifacts 25 | os.txt 26 | 27 | # Go best practices dictate that libraries should not include the vendor directory 28 | vendor 29 | 30 | # Python stuff 31 | dist 32 | aws_auth_configmap_generator.* 33 | .python-version 34 | .tox 35 | __pycache__ 36 | *.pyc 37 | 38 | # Folder used to store temporary test data by Terratest 39 | .test-data 40 | 41 | # Generic temporary files 42 | /tmp 43 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/gruntwork-io/pre-commit 3 | sha: v0.0.2 4 | hooks: 5 | - id: terraform-fmt 6 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @yorinasub17 2 | # Team: @bwhaley @robmorgan 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guidelines 2 | 3 | Contributions to this Package are very welcome! We follow a fairly standard [pull request process]( 4 | https://help.github.com/articles/about-pull-requests/) for contributions, subject to the following guidelines: 5 | 6 | 1. [File a GitHub issue](#file-a-github-issue) 7 | 1. [Update the documentation](#update-the-documentation) 8 | 1. [Update the tests](#update-the-tests) 9 | 1. [Update the code](#update-the-code) 10 | 1. [Create a pull request](#create-a-pull-request) 11 | 1. [Merge and release](#merge-and-release) 12 | 13 | ## File a GitHub issue 14 | 15 | Before starting any work, we recommend filing a GitHub issue in this repo. This is your chance to ask questions and 16 | get feedback from the maintainers and the community before you sink a lot of time into writing (possibly the wrong) 17 | code. If there is anything you're unsure about, just ask! 18 | 19 | ## Update the documentation 20 | 21 | We recommend updating the documentation *before* updating any code (see [Readme Driven 22 | Development](http://tom.preston-werner.com/2010/08/23/readme-driven-development.html)). This ensures the documentation 23 | stays up to date and allows you to think through the problem at a high level before you get lost in the weeds of 24 | coding. 25 | 26 | ## Update the tests 27 | 28 | We also recommend updating the automated tests *before* updating any code (see [Test Driven 29 | Development](https://en.wikipedia.org/wiki/Test-driven_development)). That means you add or update a test case, 30 | verify that it's failing with a clear error message, and *then* make the code changes to get that test to pass. This 31 | ensures the tests stay up to date and verify all the functionality in this Module, including whatever new 32 | functionality you're adding in your contribution. Check out the [tests](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/test) folder for instructions on running the 33 | automated tests. 34 | 35 | ## Update the code 36 | 37 | At this point, make your code changes and use your new test case to verify that everything is working. As you work, 38 | keep in mind two things: 39 | 40 | 1. Backwards compatibility 41 | 1. Downtime 42 | 43 | ### Backwards compatibility 44 | 45 | Please make every effort to avoid unnecessary backwards incompatible changes. With Terraform code, this means: 46 | 47 | 1. Do not delete, rename, or change the type of input variables. 48 | 1. If you add an input variable, it should have a `default`. 49 | 1. Do not delete, rename, or change the type of output variables. 50 | 1. Do not delete or rename a module in the `modules` folder. 51 | 52 | If a backwards incompatible change cannot be avoided, please make sure to call that out when you submit a pull request, 53 | explaining why the change is absolutely necessary. 54 | 55 | ### Downtime 56 | 57 | Bear in mind that the Terraform code in this Module is used by real companies to run real infrastructure in 58 | production, and certain types of changes could cause downtime. For example, consider the following: 59 | 60 | 1. If you rename a resource (e.g. `aws_instance "foo"` -> `aws_instance "bar"`), Terraform will see that as deleting 61 | the old resource and creating a new one. 62 | 1. If you change certain attributes of a resource (e.g. the `name` of an `aws_elb`), the cloud provider (e.g. AWS) may 63 | treat that as an instruction to delete the old resource and a create a new one. 64 | 65 | Deleting certain types of resources (e.g. virtual servers, load balancers) can cause downtime, so when making code 66 | changes, think carefully about how to avoid that. For example, can you avoid downtime by using 67 | [create_before_destroy](https://www.terraform.io/docs/configuration/resources.html#create_before_destroy)? Or via 68 | the `terraform state` command? If so, make sure to note this in our pull request. If downtime cannot be avoided, 69 | please make sure to call that out when you submit a pull request. 70 | 71 | 72 | ### Formatting and pre-commit hooks 73 | 74 | You must run `terraform fmt` on the code before committing. You can configure your computer to do this automatically 75 | using pre-commit hooks managed using [pre-commit](http://pre-commit.com/): 76 | 77 | 1. [Install pre-commit](http://pre-commit.com/#install). E.g.: `brew install pre-commit`. 78 | 1. Install the hooks: `pre-commit install`. 79 | 80 | That's it! Now just write your code, and every time you commit, `terraform fmt` will be run on the files you're 81 | committing. 82 | 83 | 84 | ## Create a pull request 85 | 86 | [Create a pull request](https://help.github.com/articles/creating-a-pull-request/) with your changes. Please make sure 87 | to include the following: 88 | 89 | 1. A description of the change, including a link to your GitHub issue. 90 | 1. The output of your automated test run, preferably in a [GitHub Gist](https://gist.github.com/). We cannot run 91 | automated tests for pull requests automatically due to [security 92 | concerns](https://circleci.com/docs/fork-pr-builds/#security-implications), so we need you to manually provide this 93 | test output so we can verify that everything is working. 94 | 1. Any notes on backwards incompatibility or downtime. 95 | 96 | ## Merge and release 97 | 98 | The maintainers for this repo will review your code and provide feedback. If everything looks good, they will merge the 99 | code and release a new version, which you'll be able to find in the [releases page](../../releases). 100 | -------------------------------------------------------------------------------- /GRUNTWORK_PHILOSOPHY.md: -------------------------------------------------------------------------------- 1 | # Gruntwork Philosophy 2 | 3 | At Gruntwork, we strive to accelerate the deployment of production grade infrastructure by prodiving a library of 4 | stable, reusable, and battle tested infrastructure as code organized into a series of [modules](#what-is-a-module) with 5 | [submodules](#what-is-a-submodule). Each module represents a particular set of infrastructure that is componentized into 6 | smaller pieces represented by the submodules within the module. By doing so, we have built a composable library that can 7 | be combined into building out everything from simple single service deployments to complicated microservice setups so 8 | that your infrastructure can grow with your business needs. Every module we provide is built with the [production grade 9 | infrastruture checklist](#production-grade-infrastructure-checklist) in mind, ensuring that the services you deploy are 10 | resilient, fault tolerant, and scalable. 11 | 12 | 13 | ## What is a Module? 14 | 15 | A Module is a reusable, tested, documented, configurable, best-practices definition of a single piece of Infrastructure 16 | (e.g., Docker cluster, VPC, Jenkins, Consul), written using a combination of [Terraform](https://www.terraform.io/), Go, 17 | and Bash. A module contains a set of automated tests, documentation, and examples that have been proven in production, 18 | providing the underlying infrastructure for [Gruntwork's customers](https://www.gruntwork.io/customers). 19 | 20 | Instead of figuring out the details of how to run a piece of infrastructure from scratch, you can reuse existing code 21 | that has been proven in production. And instead of maintaining all that infrastructure code yourself, you can leverage 22 | the work of the community to pick up infrastructure improvements through a version number bump. 23 | 24 | 25 | ## What is a Submodule? 26 | 27 | Each Infrastructure Module consists of one or more orthogonal Submodules that handle some specific aspect of that 28 | Infrastructure Module's functionality. Breaking the code up into multiple submodules makes it easier to reuse and 29 | compose to handle many different use cases. Although Modules are designed to provide an end to end solution to manage 30 | the relevant infrastructure by combining the Submodules defined in the Module, Submodules can be used independently for 31 | specific functionality that you need in your infrastructure code. 32 | 33 | 34 | ## Production Grade Infrastructure Checklist 35 | 36 | At Gruntwork, we have learned over the years that it is not enough to just get the services up and running in a publicly 37 | accessible space to call your application "production-ready." There are many more things to consider, and oftentimes 38 | many of these considerations are missing in the deployment plan of applications. These topics come up as afterthoughts, 39 | and are learned the hard way after the fact. That is why we codified all of them into a checklist that can be used as a 40 | reference to help ensure that they are considered before your application goes to production, and conscious decisions 41 | are made to neglect particular components if needed, as opposed to accidentally omitting them from consideration. 42 | 43 | 47 | 48 | | Task | Description | Example tools | 49 | |--------------------|-------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| 50 | | Install | Install the software binaries and all dependencies. | Bash, Chef, Ansible, Puppet | 51 | | Configure | Configure the software at runtime. Includes port settings, TLS certs, service discovery, leaders, followers, replication, etc. | Bash, Chef, Ansible, Puppet | 52 | | Provision | Provision the infrastructure. Includes EC2 instances, load balancers, network topology, security gr oups, IAM permissions, etc. | Terraform, CloudFormation | 53 | | Deploy | Deploy the service on top of the infrastructure. Roll out updates with no downtime. Includes blue-green, rolling, and canary deployments. | Scripts, Orchestration tools (ECS, k8s, Nomad) | 54 | | High availability | Withstand outages of individual processes, EC2 instances, services, Availability Zones, and regions. | Multi AZ, multi-region, replication, ASGs, ELBs | 55 | | Scalability | Scale up and down in response to load. Scale horizontally (more servers) and/or vertically (bigger servers). | ASGs, replication, sharding, caching, divide and conquer | 56 | | Performance | Optimize CPU, memory, disk, network, GPU, and usage. Includes query tuning, benchmarking, load testing, and profiling. | Dynatrace, valgrind, VisualVM, ab, Jmeter | 57 | | Networking | Configure static and dynamic IPs, ports, service discovery, firewalls, DNS, SSH access, and VPN access. | EIPs, ENIs, VPCs, NACLs, SGs, Route 53, OpenVPN | 58 | | Security | Encryption in transit (TLS) and on disk, authentication, authorization, secrets management, server hardening. | ACM, EBS Volumes, Cognito, Vault, CIS | 59 | | Metrics | Availability metrics, business metrics, app metrics, server metrics, events, observability, tracing, and alerting. | CloudWatch, DataDog, New Relic, Honeycomb | 60 | | Logs | Rotate logs on disk. Aggregate log data to a central location. | CloudWatch logs, ELK, Sumo Logic, Papertrail | 61 | | Backup and Restore | Make backups of DBs, caches, and other data on a scheduled basis. Replicate to separate region/account. | RDS, ElastiCache, ec2-snapper, Lambda | 62 | | Cost optimization | Pick proper instance types, use spot and reserved instances, use auto scaling, and nuke unused resources. | ASGs, spot instances, reserved instances | 63 | | Documentation | Document your code, architecture, and practices. Create playbooks to respond to incidents. | READMEs, wikis, Slack | 64 | | Tests | Write automated tests for your infrastructure code. Run tests after every commit and nightly. | Terratest | 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2019 Gruntwork, Inc 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Maintained by Gruntwork.io](https://img.shields.io/badge/maintained%20by-gruntwork.io-%235849a6.svg)](https://gruntwork.io/?ref=repo_terraform_kubernetes_helm) 2 | [![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/gruntwork-io/terraform-kubernetes-helm.svg?label=latest)](https://github.com/gruntwork-io/terraform-kubernetes-helm/releases/latest) 3 | ![Terraform Version](https://img.shields.io/badge/tf-%3E%3D0.12.0-blue.svg) 4 | 5 | # Tiller Module 6 | 7 | 10 | 11 | **DEPRECATION NOTICE: This repo has been deprecated. The Namespace modules have been moved to [terraform-kubernetes-namespace](https://github.com/gruntwork-io/terraform-kubernetes-namespace). Refer to the [v0.1.0](https://github.com/gruntwork-io/terraform-kubernetes-namespace/releases/v0.1.0) release notes for migration instructions. Please use that module for continued functionality of managing Service Accounts and Namespaces.** 12 | 13 | **NOTE: This is for deploying Tiller, a major component of Helm v2. Tiller has been removed in Helm v3 and is no longer necesssary. You do NOT need this module to use Helm v3.** 14 | 15 | This repo contains a Module for deploying Tiller (the server component of Helm) on Kubernetes clusters with 16 | [Terraform](https://www.terraform.io). This repo is a part of [the Gruntwork Infrastructure as Code 17 | Library](https://gruntwork.io/infrastructure-as-code-library/), a collection of reusable, battle-tested, production 18 | ready infrastructure code. Read the [Gruntwork 19 | Philosophy](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/GRUNTWORK_PHILOSOPHY.md) document to 20 | learn more about how Gruntwork builds production grade infrastructure code. 21 | 22 | 23 | ## Quickstart Guide 24 | 25 | The general idea is to: 26 | 27 | 1. Deploy a Kubernetes cluster. You can use one of the following: 28 | - [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) 29 | - [Our GKE module](https://github.com/gruntwork-io/terraform-google-gke/) 30 | - [Our EKS module](https://github.com/gruntwork-io/terraform-aws-eks/) 31 | 1. Setup a `kubectl` config context that is configured to authenticate to the deployed cluster. 32 | 1. Install the necessary prerequisites tools: 33 | - [`helm` client](https://docs.helm.sh/using_helm/#install-helm) 34 | - (Optional) [`kubergrunt`](https://github.com/gruntwork-io/kubergrunt#installation) 35 | 1. Provision a [`Namespace`](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) and 36 | [`ServiceAccount`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) to house the 37 | Tiller instance. 38 | 1. Deploy Tiller. 39 | 40 | You can checkout the [`k8s-tiller-minikube` example 41 | documentation](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/examples/k8s-tiller-minikube) for 42 | detailed instructions on deploying against `minikube`. 43 | 44 | 45 | ## What is in this repo 46 | 47 | This repo provides a Gruntwork IaC Package and has the following folder structure: 48 | 49 | * [root](https://github.com/gruntwork-io/terraform-kubernetes-helm): The root folder contains an example of how to 50 | deploy Tiller using [`kubergrunt`](https://github.com/gruntwork-io/kubergrunt), which implements all the logic for 51 | deploying Tiller with all the security best practices. 52 | * [modules](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/modules): This folder contains the 53 | main implementation code for this Module, broken down into multiple standalone Submodules. 54 | 55 | The primary module is: 56 | 57 | * [k8s-tiller](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/modules/k8s-tiller): Deploy 58 | Tiller with all the security features turned on. This includes using `Secrets` for storing state and enabling TLS 59 | verification. 60 | 61 | The deployed Tiller requires TLS certificate key pairs to operate. Additionally, clients will each need to their 62 | own TLS certificate key pairs to authenticate to the deployed Tiller instance. This is based on [kubergrunt model of 63 | deploying helm](https://github.com/gruntwork-io/kubergrunt/blob/master/HELM_GUIDE.md). 64 | 65 | There are also several supporting modules that help with setting up the deployment: 66 | 67 | * [k8s-namespace](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/modules/k8s-namespace): 68 | Provision a Kubernetes `Namespace` with a default set of RBAC roles. 69 | * [k8s-namespace-roles](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/modules/k8s-namespace-roles): 70 | Provision a default set of RBAC roles to use in a `Namespace`. 71 | * [k8s-service-account](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/modules/k8s-service-account): 72 | Provision a Kubernetes `ServiceAccount`. 73 | * [k8s-tiller-tls-certs](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/modules/k8s-tiller-tls-certs): 74 | Generate a TLS Certificate Authority (CA) and using that, generate signed TLS certificate key pairs that can be 75 | used for TLS verification of Tiller. The certs are managed on the cluster using Kubernetes `Secrets`. **NOTE**: 76 | This module uses the `tls` provider, which means the generated certificate key pairs are stored in plain text in 77 | the Terraform state file. If you are sensitive to secrets in Terraform state, consider using `kubergrunt` for TLS 78 | management. 79 | * [k8s-helm-client-tls-certs](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/modules/k8s-helm-client-tls-certs): 80 | Generate a signed TLS certificate key pair from a previously generated CA certificate key pair. This TLS key pair 81 | can be used to authenticate a helm client to access a deployed Tiller instance. **NOTE**: This module uses the 82 | `tls` provider, which means the generated certificate key pairs are stored in plain text in the Terraform state 83 | file. If you are sensitive to secrets in Terraform state, consider using `kubergrunt` for TLS management. 84 | 85 | * [examples](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/examples): This folder contains 86 | examples of how to use the Submodules. 87 | * [test](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/test): Automated tests for the Submodules 88 | and examples. 89 | 90 | 91 | ## What is Kubernetes? 92 | 93 | [Kubernetes](https://kubernetes.io) is an open source container management system for deploying, scaling, and managing 94 | containerized applications. Kubernetes is built by Google based on their internal proprietary container management 95 | systems (Borg and Omega). Kubernetes provides a cloud agnostic platform to deploy your containerized applications with 96 | built in support for common operational tasks such as replication, autoscaling, self-healing, and rolling deployments. 97 | 98 | You can learn more about Kubernetes from [the official documentation](https://kubernetes.io/docs/tutorials/kubernetes-basics/). 99 | 100 | 101 | ## What is Helm? 102 | 103 | [Helm](https://helm.sh/) is a package and module manager for Kubernetes that allows you to define, install, and manage 104 | Kubernetes applications as reusable packages called Charts. Helm provides support for official charts in their 105 | repository that contains various applications such as Jenkins, MySQL, and Consul to name a few. Gruntwork uses Helm 106 | under the hood for the Kubernetes modules in this package. 107 | 108 | For a background on Helm and its security model, check out [our Helm Guide 109 | document](https://github.com/gruntwork-io/kubergrunt/blob/master/HELM_GUIDE.md). 110 | 111 | 112 | 113 | 114 | ## What's a Module? 115 | 116 | A Module is a canonical, reusable, best-practices definition for how to run a single piece of infrastructure, such 117 | as a database or server cluster. Each Module is written using a combination of [Terraform](https://www.terraform.io/) 118 | and scripts (mostly bash) and include automated tests, documentation, and examples. It is maintained both by the open 119 | source community and companies that provide commercial support. 120 | 121 | Instead of figuring out the details of how to run a piece of infrastructure from scratch, you can reuse 122 | existing code that has been proven in production. And instead of maintaining all that infrastructure code yourself, 123 | you can leverage the work of the Module community to pick up infrastructure improvements through 124 | a version number bump. 125 | 126 | 127 | ## Who maintains this Module? 128 | 129 | This Module and its Submodules are maintained by [Gruntwork](http://www.gruntwork.io/). If you are looking for help or 130 | commercial support, send an email to 131 | [support@gruntwork.io](mailto:support@gruntwork.io?Subject=Tiller%20Module). 132 | 133 | Gruntwork can help with: 134 | 135 | * Setup, customization, and support for this Module. 136 | * Modules and submodules for other types of infrastructure in major cloud providers, such as VPCs, Docker clusters, 137 | databases, and continuous integration. 138 | * Modules and Submodules that meet compliance requirements, such as HIPAA. 139 | * Consulting & Training on AWS, GCP, Terraform, and DevOps. 140 | 141 | 142 | ## How do I contribute to this Module? 143 | 144 | Contributions are very welcome! Check out the [Contribution 145 | Guidelines](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/CONTRIBUTING.md) for instructions. 146 | 147 | 148 | ## How is this Module versioned? 149 | 150 | This Module follows the principles of [Semantic Versioning](http://semver.org/). You can find each new release, along 151 | with the changelog, in the [Releases Page](https://github.com/gruntwork-io/terraform-kubernetes-helm/releases). 152 | 153 | During initial development, the major version will be 0 (e.g., `0.x.y`), which indicates the code does not yet have a 154 | stable API. Once we hit `1.0.0`, we will make every effort to maintain a backwards compatible API and use the MAJOR, 155 | MINOR, and PATCH versions on each release to indicate any incompatibilities. 156 | 157 | 158 | ## License 159 | 160 | Please see [LICENSE](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/LICENSE) for how the code in 161 | this repo is licensed. 162 | 163 | Copyright © 2019 Gruntwork, Inc. 164 | -------------------------------------------------------------------------------- /examples/k8s-namespace-with-service-account/README.md: -------------------------------------------------------------------------------- 1 | # K8S Namespace with Service Account 2 | 3 | This folder shows an example of how to create a new namespace using the [`k8s-namespace`](/modules/k8s-namespace) module, and create two new `ServiceAccounts`: 4 | 5 | - One bound to `namespace-access-all` role 6 | - One bound to `namespace-access-read-only` role 7 | 8 | After this example you should have a new namespace with RBAC roles that can be used to grant read-write or read-only 9 | access to the namespace, and two `ServiceAccounts` that are bound to each of the roles. 10 | 11 | 12 | 13 | ## How do you run this example? 14 | 15 | To run this example, apply the Terraform templates: 16 | 17 | 1. Install [Terraform](https://www.terraform.io/), minimum version: `0.9.7`. 18 | 1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). 19 | 1. Open `variables.tf`, set the environment variables specified at the top of the file, and fill in any other variables 20 | that don't have a default. 21 | 1. Run `terraform init`. 22 | 1. Run `terraform apply`. 23 | -------------------------------------------------------------------------------- /examples/k8s-namespace-with-service-account/main.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # CREATE A NAMESPACE WITH DEFAULT RBAC ROLES AND SERVICE ACCOUNTS BOUND TO THE ROLES 3 | # These templates show an example of how to create a Kubernetes namespace with a set of default RBAC roles, and 4 | # ServiceAccounts that are bound to each default role. 5 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6 | 7 | terraform { 8 | required_version = ">= 0.12" 9 | } 10 | 11 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 12 | # CONFIGURE OUR KUBERNETES CONNECTIONS 13 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 14 | 15 | provider "kubernetes" { 16 | version = "~> 1.5" 17 | config_context = var.kubectl_config_context_name 18 | config_path = var.kubectl_config_path 19 | } 20 | 21 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 | # CREATE THE NAMESPACE WITH RBAC ROLES 23 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 24 | 25 | module "namespace" { 26 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 27 | # to a specific version of the modules, such as the following example: 28 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-namespace?ref=v0.0.1" 29 | source = "../../modules/k8s-namespace" 30 | 31 | create_resources = var.create_resources 32 | name = var.name 33 | } 34 | 35 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 36 | # CREATE THE SERVICE ACCOUNTS 37 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 38 | 39 | module "service_account_access_all" { 40 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 41 | # to a specific version of the modules, such as the following example: 42 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-service-account?ref=v0.0.1" 43 | source = "../../modules/k8s-service-account" 44 | 45 | create_resources = var.create_resources 46 | name = "${var.name}-admin" 47 | namespace = module.namespace.name 48 | num_rbac_roles = 1 49 | 50 | rbac_roles = [ 51 | { 52 | name = module.namespace.rbac_access_all_role 53 | namespace = module.namespace.name 54 | }, 55 | ] 56 | 57 | # How to tag the service account with a label 58 | labels = { 59 | role = "admin" 60 | } 61 | } 62 | 63 | module "service_account_access_read_only" { 64 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 65 | # to a specific version of the modules, such as the following example: 66 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-service-account?ref=v0.0.1" 67 | source = "../../modules/k8s-service-account" 68 | 69 | create_resources = var.create_resources 70 | name = "${var.name}-read-only" 71 | namespace = module.namespace.name 72 | num_rbac_roles = 1 73 | 74 | rbac_roles = [ 75 | { 76 | name = module.namespace.rbac_access_read_only_role 77 | namespace = module.namespace.name 78 | }, 79 | ] 80 | 81 | # How to tag the service account with a label 82 | labels = { 83 | role = "monitor" 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /examples/k8s-namespace-with-service-account/outputs.tf: -------------------------------------------------------------------------------- 1 | output "name" { 2 | description = "Name of the created namespace" 3 | value = module.namespace.name 4 | } 5 | 6 | output "rbac_access_all_role" { 7 | description = "The name of the RBAC role that grants admin level permissions on the namespace." 8 | value = module.namespace.rbac_access_all_role 9 | } 10 | 11 | output "rbac_access_read_only_role" { 12 | description = "The name of the RBAC role that grants read only permissions on the namespace." 13 | value = module.namespace.rbac_access_read_only_role 14 | } 15 | 16 | output "service_account_access_all" { 17 | description = "The name of the ServiceAccount that has admin level permissions." 18 | value = module.service_account_access_all.name 19 | } 20 | 21 | output "service_account_access_read_only" { 22 | description = "The name of the ServiceAccount that has read only level permissions." 23 | value = module.service_account_access_read_only.name 24 | } 25 | -------------------------------------------------------------------------------- /examples/k8s-namespace-with-service-account/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # MODULE PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "name" { 7 | description = "Name of the namespace to be created" 8 | type = string 9 | } 10 | 11 | variable "kubectl_config_context_name" { 12 | description = "The config context to use when authenticating to the Kubernetes cluster. If empty, defaults to the current context specified in the kubeconfig file." 13 | type = string 14 | default = "" 15 | } 16 | 17 | variable "kubectl_config_path" { 18 | description = "The path to the config file to use for kubectl. If empty, defaults to $HOME/.kube/config" 19 | type = string 20 | default = "~/.kube/config" 21 | } 22 | 23 | # --------------------------------------------------------------------------------------------------------------------- 24 | # TEST PARAMETERS 25 | # These variables are only used for testing purposes and should not be touched in normal operations. 26 | # --------------------------------------------------------------------------------------------------------------------- 27 | 28 | variable "create_resources" { 29 | description = "Set to false to have this module skip creating resources." 30 | type = bool 31 | default = true 32 | } 33 | -------------------------------------------------------------------------------- /examples/k8s-tiller-kubergrunt-minikube/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Tiller Deployment With Kubergrunt On Minikube 2 | 3 | This folder shows an example of how to use Terraform to call out to our `kubergrunt` utility for TLS management when 4 | deploying Tiller (the server component of Helm) onto a Kubernetes cluster. Here we will walk through a detailed guide on 5 | how you can setup `minikube` and use the modules in this repo to deploy Tiller onto it. 6 | 7 | 8 | ## Background 9 | 10 | We strongly recommend reading [our guide on Helm](https://github.com/gruntwork-io/kubergrunt/blob/master/HELM_GUIDE.md) 11 | before continuing with this guide for a background on Helm, Tiller, and the security model backing it. 12 | 13 | 14 | ## Overview 15 | 16 | In this guide we will walk through the steps necessary to get up and running with deploying Tiller using this module, 17 | using `minikube` to deploy our target Kubernetes cluster. Here are the steps: 18 | 19 | 1. [Install and setup `minikube`](#setting-up-your-kubernetes-cluster-minikube) 20 | 1. [Install the necessary tools](#installing-necessary-tools) 21 | 1. [Apply the terraform code](#apply-the-terraform-code) 22 | 1. [Verify the deployment](#verify-tiller-deployment) 23 | 1. [Granting access to additional roles](#granting-access-to-additional-users) 24 | 1. [Upgrading the deployed Tiller instance](#upgrading-deployed-tiller) 25 | 26 | 27 | ## Setting up your Kubernetes cluster: Minikube 28 | 29 | In this guide, we will use `minikube` as our Kubernetes cluster to deploy Tiller to. 30 | [Minikube](https://kubernetes.io/docs/setup/minikube/) is an official tool maintained by the Kubernetes community to be 31 | able to provision and run Kubernetes locally your machine. By having a local environment you can have fast iteration 32 | cycles while you develop and play with Kubernetes before deploying to production. 33 | 34 | To setup `minikube`: 35 | 36 | 1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 37 | 1. [Install the minikube utility](https://kubernetes.io/docs/tasks/tools/install-minikube/) 38 | 1. Run `minikube start` to provision a new `minikube` instance on your local machine. 39 | 1. Verify setup with `kubectl`: `kubectl cluster-info` 40 | 41 | **Note**: This module has been tested to work against GKE and EKS as well. You can checkout the examples in the 42 | respective repositories for how to deploy Tiller on those platforms. 43 | 44 | 45 | ## Installing necessary tools 46 | 47 | In addition to `terraform`, this guide uses `kubergrunt` to manage TLS certificates for the deployment of Tiller. You 48 | can read more about the decision behind this approach in [the Appendix](#appendix-a-why-kubergrunt) of this guide. 49 | 50 | This means that your system needs to be configured to be able to find `terraform`, `kubergrunt`, and `helm` client 51 | utilities on the system `PATH`. Here are the installation guide for each: 52 | 53 | 1. [`terraform`](https://learn.hashicorp.com/terraform/getting-started/install.html) 54 | 1. [`helm` client](https://docs.helm.sh/using_helm/#installing-helm) 55 | 1. [`kubergrunt`](https://github.com/gruntwork-io/kubergrunt#installation), minimum version: v0.3.6 56 | 57 | Make sure the binaries are discoverable in your `PATH` variable. See [this stackoverflow 58 | post](https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux-unix) for instructions on 59 | setting up your `PATH` on Unix, and [this 60 | post](https://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) for instructions on 61 | Windows. 62 | 63 | 64 | ## Apply the Terraform Code 65 | 66 | Now that we have a working Kubernetes cluster, and all the prerequisite tools are installed, we are ready to deploy 67 | Tiller! To deploy Tiller, we will use the example Terraform code in this folder: 68 | 69 | 1. If you haven't already, clone this repo: 70 | - `git clone https://github.com/gruntwork-io/terraform-kubernetes-helm.git` 71 | 1. Make sure you are in the example folder: 72 | - `cd terraform-kubernetes-helm/examples/8s-tiller-kubergrunt-minikube` 73 | 1. Initialize terraform: 74 | - `terraform init` 75 | 1. Apply the terraform code: 76 | - `terraform apply` 77 | - Fill in the required variables based on your needs. 78 | 79 | The Terraform code creates a few resources before deploying Tiller: 80 | 81 | - A Kubernetes `Namespace` (the `tiller-namespace`) to house the Tiller instance. This namespace is where all the 82 | Kubernetes resources that Tiller needs to function will live. In production, you will want to lock down access to this 83 | namespace as being able to access these resources can compromise all the protections built into Helm. 84 | - A Kubernetes `Namespace` (the `resource-namespace`) to house the resources deployed by Tiller. This namespace is where 85 | all the Helm chart resources will be deployed into. This is the namespace that your devs and users will have access 86 | to. 87 | - A Kubernetes `ServiceAccount` (`tiller-service-account`) that Tiller will use to apply the resources in Helm charts. 88 | Our Terraform code grants enough permissions to the `ServiceAccount` to be able to have full access to both the 89 | `tiller-namespace` and the `resource-namespace`, so that it can: 90 | - Manage its own resources in the `tiller-namespace`, where the Tiller metadata (e.g release tracking information) will live. 91 | - Manage the resources deployed by helm charts in the `resource-namespace`. 92 | - Using `kubergrunt`, generate a TLS CA certificate key pair and a set of signed certificate key pairs for the server 93 | and the client. These will then be uploaded as `Secrets` on the Kubernetes cluster. 94 | 95 | These resources are then passed into the `k8s-tiller` module where the Tiller `Deployment` resources will be created. 96 | Once the resources are applied to the cluster, this will wait for the Tiller `Deployment` to roll out the `Pods` using 97 | `kubergrunt helm wait-for-tiller`. 98 | 99 | Finally, to allow you to use `helm` right away, this code also sets up the local `helm` client. This involves: 100 | 101 | - Using the CA TLS certificate key pair, create a signed TLS certificate key pair to use to identify the client. 102 | - Upload the certificate key pair to the `tiller-namespace`. 103 | - Grant the RBAC entity access to: 104 | - Get the client certificate `Secret` (`kubergrunt helm configure` uses this to install the client certificate 105 | key pair locally) 106 | - Get and List pods in `tiller-namespace` (the `helm` client uses this to find the Tiller pod) 107 | - Create a port forward to the Tiller pod (the `helm` client uses this to make requests to the Tiller pod) 108 | 109 | - Install the client certificate key pair to the helm home directory so the client can use it. 110 | 111 | At the end of the `apply`, you should now have a working Tiller deployment with your `helm` client configured to access 112 | it. So let's verify that in the next step! 113 | 114 | 115 | ## Verify Tiller Deployment 116 | 117 | To start using `helm` with the configured credentials, you need to specify the following things: 118 | 119 | - enable TLS verification 120 | - use TLS credentials to authenticate 121 | - the namespace where Tiller is deployed 122 | 123 | These are specified through command line arguments. If everything is configured correctly, you should be able to access 124 | the Tiller that was deployed with the following args: 125 | 126 | ``` 127 | helm version --tls --tls-verify --tiller-namespace NAMESPACE_OF_TILLER 128 | ``` 129 | 130 | If you have access to Tiller, this should return you both the client version and the server version of Helm. 131 | 132 | Note that you need to pass the above CLI argument every time you want to use `helm`. This can be cumbersome, so 133 | `kubergrunt` installs an environment file into your helm home directory that you can dot source to set environment 134 | variables that guide `helm` to use those options: 135 | 136 | ``` 137 | . ~/.helm/env 138 | helm version 139 | ``` 140 | 141 | 142 | 143 | 144 | ## Granting Access to Additional Users 145 | 146 | Now that you have deployed Tiller and setup access for your local machine, you are ready to start using `helm`! However, 147 | you might be wondering how do you share the access with your team? To do so, you can rely on `kubergrunt helm grant`. 148 | 149 | In order to allow other users access to the deployed Tiller instance, you need to explicitly grant their RBAC entities 150 | permission to access it. This involves: 151 | 152 | - Granting enough permissions to access the Tiller pod 153 | - Generating and sharing TLS certificate key pairs to identify the client 154 | 155 | `kubergrunt` automates this process in the `grant` and `configure` commands. For example, suppose you wanted to grant 156 | access to the deployed Tiller to a group of users grouped under the RBAC group `dev`. You can grant them access using 157 | the following command: 158 | 159 | ``` 160 | kubergrunt helm grant --tiller-namespace NAMESPACE_OF_TILLER --rbac-group dev --tls-common-name dev --tls-org YOUR_ORG 161 | ``` 162 | 163 | This will generate a new certificate key pair for the client and upload it as a `Secret`. Then, it will bind new RBAC 164 | roles to the `dev` RBAC group that grants it permission to access the Tiller pod and the uploaded `Secret`. 165 | 166 | This in turn allows your users to configure their local client using `kubergrunt`: 167 | 168 | ``` 169 | kubergrunt helm configure --tiller-namespace NAMESPACE_OF_TILLER --rbac-group dev 170 | ``` 171 | 172 | At the end of this, your users should have the same helm client setup as above. 173 | 174 | 175 | ## Appendix A: Why kubergrunt? 176 | 177 | This Terraform example is not idiomatic Terraform code in that it relies on an external binary, `kubergrunt` as opposed 178 | to implementing the functionalities using pure Terraform providers. This approach has some noticeable drawbacks: 179 | 180 | - You have to install extra tools to use, so it is not a minimal `terraform init && terraform apply`. 181 | - Portability concerns to setup, as there is no guarantee the tools work cross platform. We make every effort to test 182 | across the major operating systems (Linux, Mac OSX, and Windows), but we can't possibly test every combination and so 183 | there are bound to be portability issues. 184 | - You don't have the declarative Terraform features that you come to love, such as `plan`, updates through `apply`, and 185 | `destroy`. 186 | 187 | That said, we decided to use this approach because of limitations in the existing providers to implement the 188 | functionalities here in pure Terraform code. 189 | 190 | `kubergrunt` fulfills the role of generating and managing TLS certificate key pairs using Kubernetes `Secrets` as a 191 | database. This allows us to deploy Tiller with TLS verification enabled. We could instead use the `tls` and `kubernetes` 192 | providers in Terraform, but this has a few drawbacks: 193 | 194 | - The [TLS provider](https://www.terraform.io/docs/providers/tls/index.html) stores the certificate key pairs in plain 195 | text into the Terraform state. 196 | - The Kubernetes Secret resource in the provider [also stores the value in plain text in the Terraform 197 | state](https://www.terraform.io/docs/providers/kubernetes/r/secret.html). 198 | - The grant and configure workflows are better suited as CLI tools than in Terraform. 199 | 200 | `kubergrunt` works around this by generating the TLS certs and storing them in Kubernetes `Secrets` directly. In this 201 | way, the generated TLS certs never leak into the Terraform state as they are referenced by name when deploying Tiller as 202 | opposed to by value. 203 | 204 | Note that we intend to implement a pure Terraform version of this functionality, but we plan to continue to maintain the 205 | `kubergrunt` approach for folks who are wary of leaking secrets into Terraform state. 206 | -------------------------------------------------------------------------------- /examples/k8s-tiller-kubergrunt-minikube/main.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # DEPLOY TILLER INTO A NEW NAMESPACE 3 | # These templates show an example of how to deploy Tiller following security best practices. This entails: 4 | # - Creating a Namespace and ServiceAccount for Tiller 5 | # - Creating a separate Namespace for the resources to go into 6 | # - Using kubergrunt to deploy Tiller with TLS management 7 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 | 9 | terraform { 10 | required_version = ">= 0.12" 11 | } 12 | 13 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 14 | # CONFIGURE OUR KUBERNETES CONNECTIONS 15 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 16 | 17 | provider "kubernetes" { 18 | config_context = var.kubectl_config_context_name 19 | config_path = var.kubectl_config_path 20 | } 21 | 22 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 | # CREATE THE NAMESPACE WITH RBAC ROLES AND SERVICE ACCOUNT 24 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 | 26 | module "tiller_namespace" { 27 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 28 | # to a specific version of the modules, such as the following example: 29 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-namespace?ref=v0.3.0" 30 | source = "../../modules/k8s-namespace" 31 | 32 | name = var.tiller_namespace 33 | } 34 | 35 | module "resource_namespace" { 36 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 37 | # to a specific version of the modules, such as the following example: 38 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-namespace?ref=v0.3.0" 39 | source = "../../modules/k8s-namespace" 40 | 41 | name = var.resource_namespace 42 | } 43 | 44 | module "tiller_service_account" { 45 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 46 | # to a specific version of the modules, such as the following example: 47 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-service-account?ref=v0.3.0" 48 | source = "../../modules/k8s-service-account" 49 | 50 | name = var.service_account_name 51 | namespace = module.tiller_namespace.name 52 | num_rbac_roles = 2 53 | 54 | rbac_roles = [ 55 | { 56 | name = module.tiller_namespace.rbac_tiller_metadata_access_role 57 | namespace = module.tiller_namespace.name 58 | }, 59 | { 60 | name = module.resource_namespace.rbac_tiller_resource_access_role 61 | namespace = module.resource_namespace.name 62 | }, 63 | ] 64 | 65 | labels = { 66 | app = "tiller" 67 | } 68 | } 69 | 70 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | # DEPLOY TILLER 72 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 73 | 74 | module "tiller" { 75 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 76 | # to a specific version of the modules, such as the following example: 77 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-tiller?ref=v0.3.0" 78 | source = "../../modules/k8s-tiller" 79 | 80 | tiller_service_account_name = module.tiller_service_account.name 81 | tiller_service_account_token_secret_name = module.tiller_service_account.token_secret_name 82 | namespace = module.tiller_namespace.name 83 | tiller_image_version = var.tiller_version 84 | 85 | tiller_tls_gen_method = "kubergrunt" 86 | tiller_tls_subject = var.tls_subject 87 | private_key_algorithm = var.private_key_algorithm 88 | private_key_ecdsa_curve = var.private_key_ecdsa_curve 89 | private_key_rsa_bits = var.private_key_rsa_bits 90 | 91 | kubectl_config_context_name = var.kubectl_config_context_name 92 | kubectl_config_path = var.kubectl_config_path 93 | } 94 | 95 | # We use kubergrunt to wait for Tiller to be deployed. Any resources that depend on this can assume Tiller is 96 | # successfully deployed and up at that point. 97 | resource "null_resource" "wait_for_tiller" { 98 | provisioner "local-exec" { 99 | interpreter = local.is_windows ? ["PowerShell", "-Command"] : ["bash", "-c"] 100 | 101 | command = <<-EOF 102 | ${module.require_executables.executables["kubergrunt"]} helm wait-for-tiller ${local.esc_newl} 103 | --tiller-namespace ${module.tiller_namespace.name} ${local.esc_newl} 104 | --tiller-deployment-name ${module.tiller.deployment_name} ${local.esc_newl} 105 | --expected-tiller-version ${var.tiller_version} 106 | EOF 107 | } 108 | } 109 | 110 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 111 | # CONFIGURE OPERATOR HELM CLIENT 112 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 113 | 114 | resource "null_resource" "grant_helm_access" { 115 | count = var.configure_helm ? 1 : 0 116 | depends_on = [null_resource.wait_for_tiller] 117 | 118 | provisioner "local-exec" { 119 | interpreter = local.is_windows ? ["PowerShell", "-Command"] : ["bash", "-c"] 120 | 121 | command = <<-EOF 122 | ${module.require_executables.executables["kubergrunt"]} helm grant ${local.esc_newl} 123 | --tiller-namespace ${module.tiller_namespace.name} ${local.esc_newl} 124 | ${local.kubectl_config_options} ${local.esc_newl} 125 | --tls-subject-json '${jsonencode(var.client_tls_subject)}' ${local.esc_newl} 126 | ${local.configure_args} 127 | 128 | ${module.require_executables.executables["kubergrunt"]} helm configure ${local.esc_newl} 129 | --helm-home ${local.helm_home_with_default} ${local.esc_newl} 130 | --tiller-namespace ${module.tiller_namespace.name} ${local.esc_newl} 131 | --resource-namespace ${module.resource_namespace.name} ${local.esc_newl} 132 | ${local.kubectl_config_options} ${local.esc_newl} 133 | ${local.configure_args} 134 | EOF 135 | } 136 | } 137 | 138 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 139 | # COMPUTATIONS 140 | # These locals compute various useful information used throughout this Terraform module. 141 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 142 | 143 | locals { 144 | kubectl_config_options = "${var.kubectl_config_context_name != "" ? "--kubectl-context-name ${var.kubectl_config_context_name}" : ""} ${var.kubectl_config_path != "" ? "--kubeconfig ${var.kubectl_config_path}" : ""}" 145 | 146 | helm_home_with_default = var.helm_home == "" ? pathexpand("~/.helm") : var.helm_home 147 | 148 | configure_args = var.helm_client_rbac_user != "" ? "--rbac-user ${var.helm_client_rbac_user}" : var.helm_client_rbac_group != "" ? "--rbac-group ${var.helm_client_rbac_group}" : var.helm_client_rbac_service_account != "" ? "--rbac-service-account ${var.helm_client_rbac_service_account}" : "" 149 | 150 | is_windows = module.os.name == "Windows" 151 | esc_newl = local.is_windows ? "`" : "\\" 152 | } 153 | 154 | module "os" { 155 | source = "git::https://github.com/gruntwork-io/package-terraform-utilities.git//modules/operating-system?ref=v0.1.0" 156 | } 157 | 158 | module "require_executables" { 159 | source = "git::https://github.com/gruntwork-io/package-terraform-utilities.git//modules/require-executable?ref=v0.1.0" 160 | 161 | required_executables = ["kubergrunt"] 162 | error_message = "The __EXECUTABLE_NAME__ binary is not available in your PATH. Install the binary by following the instructions at https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/examples/k8s-tiller-kubergrunt-minikube/README.md#installing-necessary-tools, or update your PATH variable to search where you installed __EXECUTABLE_NAME__." 163 | } 164 | -------------------------------------------------------------------------------- /examples/k8s-tiller-kubergrunt-minikube/outputs.tf: -------------------------------------------------------------------------------- 1 | output "tiller_namespace" { 2 | description = "The name of the namespace that houses Tiller." 3 | value = module.tiller_namespace.name 4 | } 5 | 6 | output "resource_namespace" { 7 | description = "The name of the namespace where Tiller will deploy resources into." 8 | value = module.resource_namespace.name 9 | } 10 | -------------------------------------------------------------------------------- /examples/k8s-tiller-kubergrunt-minikube/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # MODULE PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "tiller_namespace" { 7 | description = "The namespace to deploy Tiller into." 8 | type = string 9 | } 10 | 11 | variable "resource_namespace" { 12 | description = "The namespace where the Helm chart resources will be deployed into by Tiller." 13 | type = string 14 | } 15 | 16 | variable "service_account_name" { 17 | description = "The name of the service account to use for Tiller." 18 | type = string 19 | } 20 | 21 | variable "tls_subject" { 22 | description = "The issuer information that contains the identifying information for the Tiller server. Used to generate the TLS certificate keypairs." 23 | type = map(string) 24 | 25 | default = { 26 | common_name = "tiller" 27 | org = "Gruntwork" 28 | } 29 | # Expects the following keys 30 | # - common_name 31 | # - org 32 | # - org_unit 33 | # - city 34 | # - state 35 | # - country 36 | } 37 | 38 | variable "client_tls_subject" { 39 | description = "The issuer information that contains the identifying information for the helm client of the operator. Used to generate the TLS certificate keypairs." 40 | type = map(string) 41 | 42 | default = { 43 | common_name = "admin" 44 | org = "Gruntwork" 45 | } 46 | # Expects the following keys 47 | # - common_name 48 | # - org 49 | # - org_unit 50 | # - city 51 | # - state 52 | # - country 53 | } 54 | 55 | # --------------------------------------------------------------------------------------------------------------------- 56 | # OPTIONAL MODULE PARAMETERS 57 | # These variables have reasonable defaults, but can be overridden. 58 | # --------------------------------------------------------------------------------------------------------------------- 59 | 60 | # Tiller configuration 61 | 62 | variable "tiller_version" { 63 | description = "The version of Tiller to deploy." 64 | type = string 65 | default = "v2.11.0" 66 | } 67 | 68 | # TLS algorithm configuration 69 | 70 | variable "private_key_algorithm" { 71 | description = "The name of the algorithm to use for private keys. Must be one of: RSA or ECDSA." 72 | type = string 73 | default = "ECDSA" 74 | } 75 | 76 | variable "private_key_ecdsa_curve" { 77 | description = "The name of the elliptic curve to use. Should only be used if var.private_key_algorithm is ECDSA. Must be one of P224, P256, P384 or P521." 78 | type = string 79 | default = "P256" 80 | } 81 | 82 | variable "private_key_rsa_bits" { 83 | description = "The size of the generated RSA key in bits. Should only be used if var.private_key_algorithm is RSA." 84 | type = number 85 | default = 2048 86 | } 87 | 88 | # Kubectl options 89 | 90 | variable "kubectl_config_context_name" { 91 | description = "The config context to use when authenticating to the Kubernetes cluster. If empty, defaults to the current context specified in the kubeconfig file." 92 | type = string 93 | default = "" 94 | } 95 | 96 | variable "kubectl_config_path" { 97 | description = "The path to the config file to use for kubectl. If empty, defaults to $HOME/.kube/config" 98 | type = string 99 | default = "~/.kube/config" 100 | } 101 | 102 | # Helm client config options 103 | 104 | variable "configure_helm" { 105 | description = "Whether or not to configure the local helm client to authenticate to the deployed Tiller instance." 106 | type = bool 107 | default = true 108 | } 109 | 110 | variable "helm_home" { 111 | description = "The path to the home directory for helm that you wish to use for this deployment." 112 | type = string 113 | default = "" 114 | } 115 | 116 | variable "helm_client_rbac_user" { 117 | description = "If set, will setup the local helm client to authenticate using this RBAC user." 118 | type = string 119 | default = "" 120 | } 121 | 122 | variable "helm_client_rbac_group" { 123 | description = "If set, will setup the local helm client to authenticate using this RBAC group." 124 | type = string 125 | default = "" 126 | } 127 | 128 | variable "helm_client_rbac_service_account" { 129 | description = "If set, will setup the local helm client to authenticate using this ServiceAccount. The ServiceAccount should be encoded as NAMESPACE/NAME." 130 | type = string 131 | default = "" 132 | } 133 | -------------------------------------------------------------------------------- /examples/k8s-tiller-minikube/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Tiller Deployment On Minikube 2 | 3 | The root folder of this repo shows an example of how to use the Terraform modules in this repository to deploy 4 | Tiller (the server component of Helm) onto a Kubernetes cluster. Here we will walk through a detailed guide on how you 5 | can setup `minikube` and use this module to deploy Tiller onto it. 6 | 7 | **WARNING: The private keys generated in this example will be stored unencrypted in your Terraform state file. If you are 8 | sensitive to storing secrets in your Terraform state file, consider using `kubergrunt` to generate and manage your TLS 9 | certificate. See [the k8s-tiller-kubergrunt-minikube example](/examples/k8s-tiller-kubergrunt-minikube) for how to use 10 | `kubergrunt` for TLS management.** 11 | 12 | 13 | ## Background 14 | 15 | We strongly recommend reading [our guide on Helm](https://github.com/gruntwork-io/kubergrunt/blob/master/HELM_GUIDE.md) 16 | before continuing with this guide for a background on Helm, Tiller, and the security model backing it. 17 | 18 | 19 | ## Overview 20 | 21 | In this guide we will walk through the steps necessary to get up and running with deploying Tiller using this module, 22 | using `minikube` to deploy our target Kubernetes cluster. Here are the steps: 23 | 24 | 1. [Install and setup `minikube`](#setting-up-your-kubernetes-cluster-minikube) 25 | 1. [Install the necessary tools](#installing-necessary-tools) 26 | 1. [Apply the terraform code](#apply-the-terraform-code) 27 | 1. [Verify the deployment](#verify-tiller-deployment) 28 | 1. [Granting access to additional roles](#granting-access-to-additional-users) 29 | 1. [Upgrading the deployed Tiller instance](#upgrading-deployed-tiller) 30 | 31 | 32 | ## Setting up your Kubernetes cluster: Minikube 33 | 34 | In this guide, we will use `minikube` as our Kubernetes cluster to deploy Tiller to. 35 | [Minikube](https://kubernetes.io/docs/setup/minikube/) is an official tool maintained by the Kubernetes community to be 36 | able to provision and run Kubernetes locally your machine. By having a local environment you can have fast iteration 37 | cycles while you develop and play with Kubernetes before deploying to production. 38 | 39 | To setup `minikube`: 40 | 41 | 1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 42 | 1. [Install the minikube utility](https://kubernetes.io/docs/tasks/tools/install-minikube/) 43 | 1. Run `minikube start` to provision a new `minikube` instance on your local machine. 44 | 1. Verify setup with `kubectl`: `kubectl cluster-info` 45 | 46 | **Note**: This module has been tested to work against GKE and EKS as well. You can checkout the examples in the 47 | respective repositories for how to deploy Tiller on those platforms. 48 | 49 | 50 | ## Installing necessary tools 51 | 52 | Additionally, this example depends on `terraform` and `helm`. Optionally, you can install `kubergrunt` which automates a 53 | few of the steps. Here are the installation guide for each: 54 | 55 | 1. [`terraform`](https://learn.hashicorp.com/terraform/getting-started/install.html) 56 | 1. [`helm` client](https://docs.helm.sh/using_helm/#installing-helm) 57 | 1. [`kubergrunt`](https://github.com/gruntwork-io/kubergrunt#installation), minimum version: v0.3.6 58 | 59 | Make sure the binaries are discoverble in your `PATH` variable. See [this stackoverflow 60 | post](https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux-unix) for instructions on 61 | setting up your `PATH` on Unix, and [this 62 | post](https://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) for instructions on 63 | Windows. 64 | 65 | 66 | ## Apply the Terraform Code 67 | 68 | Now that we have a working Kubernetes cluster, and all the prerequisite tools are installed, we are ready to deploy 69 | Tiller! To deploy Tiller, we will use the example Terraform code at the root of this repo: 70 | 71 | 1. If you haven't already, clone this repo: 72 | - `git clone https://github.com/gruntwork-io/terraform-kubernetes-helm.git` 73 | 1. Make sure you are at the root of this repo: 74 | - `cd terraform-kubernetes-helm` 75 | 1. Initialize terraform: 76 | - `terraform init` 77 | 1. Apply the terraform code: 78 | - `terraform apply` 79 | - Fill in the required variables based on your needs. 80 | 81 | The Terraform code creates a few resources before deploying Tiller: 82 | 83 | - A Kubernetes `Namespace` (the `tiller-namespace`) to house the Tiller instance. This namespace is where all the 84 | Kubernetes resources that Tiller needs to function will live. In production, you will want to lock down access to this 85 | namespace as being able to access these resources can compromise all the protections built into Helm. 86 | - A Kubernetes `Namespace` (the `resource-namespace`) to house the resources deployed by Tiller. This namespace is where 87 | all the Helm chart resources will be deployed into. This is the namespace that your devs and users will have access 88 | to. 89 | - A Kubernetes `ServiceAccount` (`tiller-service-account`) that Tiller will use to apply the resources in Helm charts. 90 | Our Terraform code grants enough permissions to the `ServiceAccount` to be able to have full access to both the 91 | `tiller-namespace` and the `resource-namespace`, so that it can: 92 | - Manage its own resources in the `tiller-namespace`, where the Tiller metadata (e.g release tracking information) will live. 93 | - Manage the resources deployed by helm charts in the `resource-namespace`. 94 | - Generate a TLS CA certificate key pair and a set of signed certificate key pairs for the server and the client. These 95 | will then be uploaded as `Secrets` on the Kubernetes cluster. 96 | 97 | These resources are then passed into the `k8s-tiller` module where the Tiller `Deployment` resources will be created. 98 | Once the resources are applied to the cluster, this will wait for the Tiller `Deployment` to roll out the `Pods` using 99 | `kubergrunt helm wait-for-tiller`. 100 | 101 | At the end of the `apply`, you should now have a working Tiller deployment. So let's verify that in the next step! 102 | 103 | 104 | ## Verify Tiller Deployment 105 | 106 | To start using `helm`, we must first configure our client with the generated TLS certificates. This is done by 107 | downloading the client side certificates in to the Helm home folder. The client side TLS certificates are available as 108 | outputs by the terraform code. We can store them in the home directory using the `terraform output` command: 109 | 110 | ```bash 111 | mkdir -p $HOME/.helm 112 | terraform output helm_client_tls_private_key_pem > "$HOME/.helm/client.pem" 113 | terraform output helm_client_tls_public_cert_pem > "$HOME/.helm/client.crt" 114 | terraform output helm_client_tls_ca_cert_pem > "$HOME/.helm/ca.crt" 115 | ``` 116 | 117 | Once the certificate key pairs are stored, we need to setup the default repositories where the helm charts are stored. 118 | This can be done using the `helm init` command: 119 | 120 | ```bash 121 | helm init --client-only 122 | ``` 123 | 124 | If you have `kubergrunt` installed, the above steps can be automated in a single using the `helm configure` command of 125 | `kubergrunt`: 126 | 127 | ```bash 128 | kubergrunt helm configure \ 129 | --tiller-namespace $(terraform output tiller_namespace) \ 130 | --resource-namespace $(terraform output resource_namespace) \ 131 | --rbac-user minikube 132 | ``` 133 | 134 | Once the certificates are installed and the client is configured, you are ready to use `helm`. However, by default the 135 | `helm` client does not assume a TLS setup. In order for the `helm` client to properly communicate with the deployed 136 | Tiller instance, it needs to be told to use TLS verification. These are specified through command line arguments. If 137 | everything is configured correctly, you should be able to access the Tiller that was deployed with the following args: 138 | 139 | ``` 140 | helm version --tls --tls-verify --tiller-namespace NAMESPACE_OF_TILLER 141 | ``` 142 | 143 | If you have access to Tiller, this should return you both the client version and the server version of Helm. Note that 144 | you need to pass the above CLI argument every time you want to use `helm`. 145 | 146 | If you used `kubergrunt` to configure your helm client, it will install an environment file into your helm home 147 | directory that you can dot source to set environment variables that guide `helm` to use those options: 148 | 149 | ``` 150 | . ~/.helm/env 151 | helm version 152 | ``` 153 | 154 | This can be a convenient way to avoid specifying the TLS parameters for each and every `helm` command you run. 155 | 156 | 157 | 158 | 159 | ## Granting Access to Additional Users 160 | 161 | Now that you have deployed Tiller and setup access for your local machine, you are ready to start using `helm`! However, 162 | you might be wondering how do you share the access with your team? 163 | 164 | In order to allow other users access to the deployed Tiller instance, you need to explicitly grant their RBAC entities 165 | permission to access it. This involves: 166 | 167 | - Granting enough permissions to access the Tiller pod 168 | - Generating and sharing TLS certificate key pairs to identify the client 169 | 170 | You have two options to do this: 171 | 172 | - [Using the `k8s-helm-client-tls-certs` module](#using-the-k8s-helm-client-tls-certs-module) 173 | - [Using `kubergrunt`](#using-kubergrunt) 174 | 175 | #### Using the k8s-helm-client-tls-certs module 176 | 177 | `k8s-helm-client-tls-certs` is designed to take a CA TLS cert generated using `k8s-tiller-tls-certs` and generate new 178 | signed TLS certs that can be used as verified clients. To use the module for this purpose, you can either call out to 179 | the module in your terraform code (like we do here to generate one for the operator), or use it directly as a temporary 180 | module. 181 | 182 | Follow these steps to use it as a temporary module: 183 | 184 | 1. Copy this module to your computer. 185 | 1. Open `variables.tf` and fill in the variables that do not have a default. 186 | 1. DO NOT configure Terraform remote state storage for this code. You do NOT want to store the state files as they will 187 | contain the private keys for the certificates. 188 | 1. DO NOT configure `store_in_kubernetes_secret` to `true`. You do NOT want to store the certificates in Kubernetes 189 | without the state file. 190 | 1. Run `terraform apply`. 191 | 1. Extract the generated certificates from the output and store to a file. E.g: 192 | 193 | ```bash 194 | terraform output tls_certificate_key_pair_private_key_pem > client.pem 195 | terraform output tls_certificate_key_pair_certificate_pem > client.crt 196 | terraform output ca_tls_certificate_key_pair_certificate_pem > ca.crt 197 | ``` 198 | 199 | 1. Share the extracted files with the user. 200 | 1. Delete your local Terraform state: `rm -rf terraform.tfstate*`. The Terraform state will contain the private keys for 201 | the certificates, so it's important to clean it up! 202 | 203 | The user can then install the certs and setup the client in a similar manner to the process described in [Verify Tiller 204 | Deployment](#verify-tiller-deployment) 205 | 206 | #### Using kubergrunt 207 | 208 | `kubergrunt` automates this process in the `grant` and `configure` commands. For example, suppose you wanted to grant 209 | access to the deployed Tiller to a group of users grouped under the RBAC group `dev`. You can grant them access using 210 | the following command: 211 | 212 | ``` 213 | kubergrunt helm grant --tiller-namespace NAMESPACE_OF_TILLER --rbac-group dev --tls-common-name dev --tls-org YOUR_ORG 214 | ``` 215 | 216 | This will generate a new certificate key pair for the client and upload it as a `Secret`. Then, it will bind new RBAC 217 | roles to the `dev` RBAC group that grants it permission to access the Tiller pod and the uploaded `Secret`. 218 | 219 | This in turn allows your users to configure their local client using `kubergrunt`: 220 | 221 | ``` 222 | kubergrunt helm configure --tiller-namespace NAMESPACE_OF_TILLER --rbac-group dev 223 | ``` 224 | 225 | At the end of this, your users should have the same helm client setup as above. 226 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # DEPLOY TILLER INTO A NEW NAMESPACE 3 | # These templates show an example of how to deploy Tiller following security best practices. This entails: 4 | # - Creating a Namespace and ServiceAccount for Tiller 5 | # - Creating a separate Namespace for the resources to go into 6 | # - Using kubergrunt to deploy Tiller with TLS management 7 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 | 9 | terraform { 10 | required_version = ">= 0.12" 11 | } 12 | 13 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 14 | # CONFIGURE OUR KUBERNETES CONNECTIONS 15 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 16 | 17 | provider "kubernetes" { 18 | config_context = var.kubectl_config_context_name 19 | config_path = var.kubectl_config_path 20 | } 21 | 22 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 | # CREATE THE NAMESPACE WITH RBAC ROLES AND SERVICE ACCOUNT 24 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 | 26 | module "tiller_namespace" { 27 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 28 | # to a specific version of the modules, such as the following example: 29 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-namespace?ref=v0.3.0" 30 | source = "./modules/k8s-namespace" 31 | 32 | name = var.tiller_namespace 33 | } 34 | 35 | module "resource_namespace" { 36 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 37 | # to a specific version of the modules, such as the following example: 38 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-namespace?ref=v0.3.0" 39 | source = "./modules/k8s-namespace" 40 | 41 | name = var.resource_namespace 42 | } 43 | 44 | module "tiller_service_account" { 45 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 46 | # to a specific version of the modules, such as the following example: 47 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-service-account?ref=v0.3.0" 48 | source = "./modules/k8s-service-account" 49 | 50 | name = var.service_account_name 51 | namespace = module.tiller_namespace.name 52 | num_rbac_roles = 2 53 | 54 | rbac_roles = [ 55 | { 56 | name = module.tiller_namespace.rbac_tiller_metadata_access_role 57 | namespace = module.tiller_namespace.name 58 | }, 59 | { 60 | name = module.resource_namespace.rbac_tiller_resource_access_role 61 | namespace = module.resource_namespace.name 62 | }, 63 | ] 64 | 65 | labels = { 66 | app = "tiller" 67 | } 68 | } 69 | 70 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 71 | # DEPLOY TILLER 72 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 73 | 74 | module "tiller" { 75 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 76 | # to a specific version of the modules, such as the following example: 77 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-tiller?ref=v0.3.0" 78 | source = "./modules/k8s-tiller" 79 | 80 | tiller_service_account_name = module.tiller_service_account.name 81 | tiller_service_account_token_secret_name = module.tiller_service_account.token_secret_name 82 | namespace = module.tiller_namespace.name 83 | tiller_image_version = var.tiller_version 84 | 85 | tiller_tls_gen_method = "provider" 86 | tiller_tls_subject = var.tls_subject 87 | private_key_algorithm = var.private_key_algorithm 88 | private_key_ecdsa_curve = var.private_key_ecdsa_curve 89 | private_key_rsa_bits = var.private_key_rsa_bits 90 | } 91 | 92 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 93 | # GENERATE CLIENT TLS CERTIFICATES FOR USE WITH HELM CLIENT 94 | # These certs will be stored in Kubernetes Secrets, in a format compatible with `kubergrunt helm configure` 95 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 96 | 97 | module "helm_client_tls_certs" { 98 | # When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you 99 | # to a specific version of the modules, such as the following example: 100 | # source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-helm-client-tls-certs?ref=v0.3.1" 101 | source = "./modules/k8s-helm-client-tls-certs" 102 | 103 | ca_tls_certificate_key_pair_secret_namespace = module.tiller.tiller_ca_tls_certificate_key_pair_secret_namespace 104 | ca_tls_certificate_key_pair_secret_name = module.tiller.tiller_ca_tls_certificate_key_pair_secret_name 105 | 106 | tls_subject = var.client_tls_subject 107 | tls_certificate_key_pair_secret_namespace = module.tiller_namespace.name 108 | 109 | # Kubergrunt expects client cert secrets to be stored under this name format 110 | 111 | tls_certificate_key_pair_secret_name = "tiller-client-${md5(local.rbac_entity_id)}-certs" 112 | tls_certificate_key_pair_secret_labels = { 113 | "gruntwork.io/tiller-namespace" = module.tiller_namespace.name 114 | "gruntwork.io/tiller-credentials" = "true" 115 | "gruntwork.io/tiller-credentials-type" = "client" 116 | } 117 | } 118 | 119 | locals { 120 | rbac_entity_id = var.grant_helm_client_rbac_user != "" ? var.grant_helm_client_rbac_user : var.grant_helm_client_rbac_group != "" ? var.grant_helm_client_rbac_group : var.grant_helm_client_rbac_service_account != "" ? var.grant_helm_client_rbac_service_account : "" 121 | } 122 | -------------------------------------------------------------------------------- /modules/k8s-helm-client-tls-certs/README.md: -------------------------------------------------------------------------------- 1 | # K8S Helm Client TLS Certs Module 2 | 3 | 6 | 7 | This Terraform Module can be used to generate a signed TLS certificate key pair that can be used to authenticate the 8 | `helm` client with Tiller. These certs are optionally then stored in a Kubernetes `Secret`, that can then be shared with 9 | the client. Note that the `Secret` is configured such that it is compatible with `kubergrunt helm configure` for setting 10 | up the `helm` client. 11 | 12 | This module assumes the CA certs are stored as Kubernetes `Secrets` on the cluster, either via `kubergrunt` or the 13 | [k8s-tiller-tls-certs module](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-tiller-tls-certs). 14 | 15 | If you are unfamiliar with how TLS works, checkout [this primer on 16 | TLS/SSL](https://github.com/hashicorp/terraform-aws-vault/tree/master/modules/private-tls-cert#background). 17 | 18 | You can read more about Helm, Tiller, and their security model in our [Helm 19 | guide](https://github.com/gruntwork-io/kubergrunt/blob/master/HELM_GUIDE.md). 20 | 21 | **WARNING: The private keys generated by this module will be stored unencrypted in your Terraform state file. If you are 22 | sensitive to storing secrets in your Terraform state file, consider using `kubergrunt` to generate and manage your TLS 23 | certificate. See [the k8s-tiller-kubergrunt-minikube example](/examples/k8s-tiller-kubergrunt-minikube) for how to use 24 | `kubergrunt` for TLS management.** 25 | 26 | 27 | ## How do you use this module? 28 | 29 | * See the [root README](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/README.md) for 30 | instructions on using Terraform modules. 31 | * This module uses [the `kubernetes` provider](https://www.terraform.io/docs/providers/kubernetes/index.html). 32 | * See the [examples](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/examples) folder for example 33 | usage. 34 | * See [variables.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-helm-client-tls-certs/variables.tf) 35 | for all the variables you can set on this module. 36 | * See [outputs.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-helm-client-tls-certs/outputs.tf) 37 | for all the variables that are outputed by this module. 38 | 39 | 40 | ## How do I configure Helm to use the generated TLS certs? 41 | 42 | To configure the `helm` client to use the generated TLS certs, the certs must first be downloaded on to the machine. 43 | There are two ways to access the generated TLS certs: 44 | 45 | - Directly using the module outputs 46 | - Via the Kubernetes `Secret` (if `var.store_in_kubernetes_secret` is `true`) 47 | 48 | These certs should be shared with the user so that they can install it on their machine. Once the certs are shared, they 49 | need to be stored on the local file system where the `helm` home directory is. By default the `helm` home directory is 50 | `$HOME/.helm`, but this is configurable. In the `helm` home directory, store the certs under the names: 51 | 52 | - `ca.crt`: The CA public certificate file, encoded in PEM format. 53 | - `client.pem`: The private key of the client TLS certificate key pair, encoded in PEM format. 54 | - `client.crt`: The public certificate of the client TLS certificate key pair, encoded in PEM format. 55 | 56 | Once the certificate key pairs are stored, the `helm` client will automatically discover them when connecting to Tiller 57 | with TLS enabled. You need to pass in the CLI args `--tls` and `--tls-verify` to enable TLS verification with the 58 | client. For example, to run the `ls` command to list releases: 59 | 60 | ```bash 61 | helm ls --tls --tls-verify 62 | ``` 63 | 64 | Note that the CLI args must come after the subcommand. 65 | -------------------------------------------------------------------------------- /modules/k8s-helm-client-tls-certs/main.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # CREATE TLS CERTS AND STORE THEM IN KUBERNETES SECRETS 3 | # These templates generates a a signed TLS certificate key pair using CA certs stored in a Kubernetes Secret. These are 4 | # then stored in Kubernetes Secrets so that they can be used to authenticate to Tiller. 5 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6 | 7 | # --------------------------------------------------------------------------------------------------------------------- 8 | # SET TERRAFORM REQUIREMENTS FOR RUNNING THIS MODULE 9 | # --------------------------------------------------------------------------------------------------------------------- 10 | 11 | terraform { 12 | required_version = ">= 0.12" 13 | } 14 | 15 | # --------------------------------------------------------------------------------------------------------------------- 16 | # CREATE A TLS CERTIFICATE SIGNED USING THE CA CERTIFICATE 17 | # --------------------------------------------------------------------------------------------------------------------- 18 | 19 | resource "tls_private_key" "cert" { 20 | algorithm = var.private_key_algorithm 21 | ecdsa_curve = var.private_key_ecdsa_curve 22 | rsa_bits = var.private_key_rsa_bits 23 | } 24 | 25 | resource "tls_cert_request" "cert" { 26 | key_algorithm = tls_private_key.cert.algorithm 27 | private_key_pem = tls_private_key.cert.private_key_pem 28 | 29 | dns_names = var.tls_certs_dns_names 30 | ip_addresses = var.tls_certs_ip_addresses 31 | 32 | subject { 33 | common_name = lookup(var.tls_subject, "common_name", null) 34 | organization = lookup(var.tls_subject, "organization", null) 35 | organizational_unit = lookup(var.tls_subject, "organizational_unit", null) 36 | street_address = local.tls_subject_maybe_street_address != "" ? split("\n", local.tls_subject_maybe_street_address) : [] 37 | locality = lookup(var.tls_subject, "locality", null) 38 | province = lookup(var.tls_subject, "province", null) 39 | country = lookup(var.tls_subject, "country", null) 40 | postal_code = lookup(var.tls_subject, "postal_code", null) 41 | serial_number = lookup(var.tls_subject, "serial_number", null) 42 | } 43 | } 44 | 45 | resource "tls_locally_signed_cert" "cert" { 46 | cert_request_pem = tls_cert_request.cert.cert_request_pem 47 | 48 | ca_key_algorithm = tls_private_key.cert.algorithm 49 | 50 | ca_private_key_pem = data.kubernetes_secret.ca_certs.data["${var.ca_tls_certificate_key_pair_secret_filename_base}.pem"] 51 | 52 | ca_cert_pem = data.kubernetes_secret.ca_certs.data["${var.ca_tls_certificate_key_pair_secret_filename_base}.crt"] 53 | 54 | validity_period_hours = var.validity_period_hours 55 | allowed_uses = var.tls_certs_allowed_uses 56 | } 57 | 58 | locals { 59 | tls_subject_maybe_street_address = lookup(var.tls_subject, "street_address", "") 60 | } 61 | 62 | # --------------------------------------------------------------------------------------------------------------------- 63 | # STORE SIGNED TLS CERTIFICATE IN KUBERNETES SECRET 64 | # --------------------------------------------------------------------------------------------------------------------- 65 | 66 | resource "kubernetes_secret" "signed_tls" { 67 | count = var.store_in_kubernetes_secret ? 1 : 0 68 | 69 | metadata { 70 | namespace = var.tls_certificate_key_pair_secret_namespace 71 | name = var.tls_certificate_key_pair_secret_name 72 | labels = var.tls_certificate_key_pair_secret_labels 73 | annotations = var.tls_certificate_key_pair_secret_annotations 74 | } 75 | 76 | data = { 77 | "${var.tls_certificate_key_pair_secret_filename_base}.pem" = tls_private_key.cert.private_key_pem 78 | "${var.tls_certificate_key_pair_secret_filename_base}.pub" = tls_private_key.cert.public_key_pem 79 | "${var.tls_certificate_key_pair_secret_filename_base}.crt" = tls_locally_signed_cert.cert.cert_pem 80 | "${var.ca_tls_certificate_key_pair_secret_filename_base}.crt" = data.kubernetes_secret.ca_certs.data["${var.ca_tls_certificate_key_pair_secret_filename_base}.crt"] 81 | } 82 | } 83 | 84 | # --------------------------------------------------------------------------------------------------------------------- 85 | # DATA SOURCES 86 | # --------------------------------------------------------------------------------------------------------------------- 87 | 88 | # Lookup CA certificate info 89 | 90 | data "kubernetes_secret" "ca_certs" { 91 | metadata { 92 | name = var.ca_tls_certificate_key_pair_secret_name 93 | namespace = var.ca_tls_certificate_key_pair_secret_namespace 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /modules/k8s-helm-client-tls-certs/outputs.tf: -------------------------------------------------------------------------------- 1 | output "tls_certificate_key_pair_private_key_pem" { 2 | description = "The private key of the generated TLS certs in PEM format." 3 | value = tls_private_key.cert.private_key_pem 4 | sensitive = true 5 | } 6 | 7 | output "tls_certificate_key_pair_public_key_pem" { 8 | description = "The public key of the generated TLS certs in PEM format." 9 | value = tls_private_key.cert.public_key_pem 10 | sensitive = true 11 | } 12 | 13 | output "tls_certificate_key_pair_certificate_pem" { 14 | description = "The public certificate of the generated TLS certs in PEM format." 15 | value = tls_locally_signed_cert.cert.cert_pem 16 | sensitive = true 17 | } 18 | 19 | output "ca_tls_certificate_key_pair_certificate_pem" { 20 | description = "The public certificate of the CA TLS certs in PEM format." 21 | 22 | value = data.kubernetes_secret.ca_certs.data["${var.ca_tls_certificate_key_pair_secret_filename_base}.crt"] 23 | 24 | sensitive = true 25 | } 26 | 27 | output "tls_certificate_key_pair_secret_namespace" { 28 | description = "Namespace where the signed TLS certificate key pair is stored." 29 | value = element( 30 | concat(kubernetes_secret.signed_tls.*.metadata.0.namespace, [""]), 31 | 0, 32 | ) 33 | } 34 | 35 | output "tls_certificate_key_pair_secret_name" { 36 | description = "Name of the Secret resource where the signed TLS certificate key pair is stored." 37 | value = element( 38 | concat(kubernetes_secret.signed_tls.*.metadata.0.name, [""]), 39 | 0, 40 | ) 41 | } 42 | -------------------------------------------------------------------------------- /modules/k8s-helm-client-tls-certs/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # MODULE PARAMETERS 3 | # These variables are expected to be passed in by the operator when calling this terraform module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | # TLS certificate information 7 | 8 | variable "tls_subject" { 9 | description = "The issuer information that contains the identifying information for the signed certificates. See https://www.terraform.io/docs/providers/tls/r/cert_request.html#common_name for a list of expected keys. Note that street_address must be a newline separated string as opposed to a list of strings." 10 | # We use an string type here instead of directly specifying the object, to allow certain keys to be optional. 11 | type = map(string) 12 | } 13 | 14 | # Kubernetes Secret information 15 | 16 | variable "ca_tls_certificate_key_pair_secret_namespace" { 17 | description = "Namespace where the CA certificate key pairs are stored." 18 | type = string 19 | } 20 | 21 | variable "ca_tls_certificate_key_pair_secret_name" { 22 | description = "Name to use for the Secret resource that stores the CA certificate key pairs." 23 | type = string 24 | } 25 | 26 | variable "tls_certificate_key_pair_secret_namespace" { 27 | description = "Namespace where the signed TLS certificate key pairs should be stored." 28 | type = string 29 | } 30 | 31 | variable "tls_certificate_key_pair_secret_name" { 32 | description = "Name to use for the Secret resource that stores the signed TLS certificate key pairs." 33 | type = string 34 | } 35 | 36 | # --------------------------------------------------------------------------------------------------------------------- 37 | # OPTIONAL MODULE PARAMETERS 38 | # These variables have defaults, but may be overridden by the operator. 39 | # --------------------------------------------------------------------------------------------------------------------- 40 | 41 | # TLS certificate information 42 | 43 | variable "private_key_algorithm" { 44 | description = "The name of the algorithm to use for private keys. Must be one of: RSA or ECDSA." 45 | type = string 46 | default = "ECDSA" 47 | } 48 | 49 | variable "private_key_ecdsa_curve" { 50 | description = "The name of the elliptic curve to use. Should only be used if var.private_key_algorithm is ECDSA. Must be one of P224, P256, P384 or P521." 51 | type = string 52 | default = "P256" 53 | } 54 | 55 | variable "private_key_rsa_bits" { 56 | description = "The size of the generated RSA key in bits. Should only be used if var.private_key_algorithm is RSA." 57 | type = number 58 | default = 2048 59 | } 60 | 61 | variable "tls_certs_allowed_uses" { 62 | description = "List of keywords from RFC5280 describing a use that is permitted for the issued certificate. For more info and the list of keywords, see https://www.terraform.io/docs/providers/tls/r/self_signed_cert.html#allowed_uses." 63 | type = list(string) 64 | 65 | default = [ 66 | "key_encipherment", 67 | "digital_signature", 68 | "client_auth", 69 | ] 70 | } 71 | 72 | variable "tls_certs_dns_names" { 73 | description = "List of DNS names for which the certificate will be valid (e.g. tiller, foo.example.com)." 74 | type = list(string) 75 | default = [] 76 | } 77 | 78 | variable "tls_certs_ip_addresses" { 79 | description = "List of IP addresses for which the certificate will be valid (e.g. 127.0.0.1)." 80 | type = list(string) 81 | default = ["127.0.0.1"] 82 | } 83 | 84 | variable "validity_period_hours" { 85 | description = "The number of hours after initial issuing that the certificate will become invalid." 86 | type = number 87 | 88 | # 10 years 89 | default = 87660 90 | } 91 | 92 | # Kubernetes Secret information 93 | 94 | variable "store_in_kubernetes_secret" { 95 | description = "Whether or not to store the generated TLS certificate key pairs in Kubernetes Secret." 96 | type = bool 97 | default = true 98 | } 99 | 100 | variable "ca_tls_certificate_key_pair_secret_filename_base" { 101 | description = "Basename used for the TLS certificate files stored in the Secret." 102 | type = string 103 | default = "ca" 104 | } 105 | 106 | variable "tls_certificate_key_pair_secret_filename_base" { 107 | description = "Basename to use for the signed TLS certificate files stored in the Secret." 108 | type = string 109 | default = "client" 110 | } 111 | 112 | variable "tls_certificate_key_pair_secret_labels" { 113 | description = "Labels to apply to the Secret resource that stores the signed TLS certificate key pairs." 114 | type = map(string) 115 | default = {} 116 | } 117 | 118 | variable "tls_certificate_key_pair_secret_annotations" { 119 | description = "Annotations to apply to the Secret resource that stores the signed TLS certificate key pairs." 120 | type = map(string) 121 | default = {} 122 | } 123 | -------------------------------------------------------------------------------- /modules/k8s-namespace-roles/README.md: -------------------------------------------------------------------------------- 1 | # K8S Namespace Roles Module 2 | 3 | 6 | 7 | This Terraform Module defines a set of common Kubernetes 8 | [RBAC `Roles`](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) for a `Namespace`. The following roles 9 | will be provided by this module: 10 | 11 | - `namespace-access-all`: Admin level permissions in the namespace. Ability to read, write, and delete all resources in 12 | the namespace. 13 | - `namespace-access-read-only`: Read only permissions to all resources in the namespace. 14 | - `namespace-tiller-metadata-access`: Minimal permissions for Tiller to manage its metadata in this namespace (if this 15 | namespace is where Tiller is deployed). 16 | - `namespace-tiller-resource-access`: Minimal permissions for Tiller to manage resources in this namespace as Helm 17 | charts. 18 | 19 | 20 | ## How do you use this module? 21 | 22 | * See the [root README](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/README.md) for 23 | instructions on using Terraform modules. 24 | * This module uses [the `kubernetes` provider](https://www.terraform.io/docs/providers/kubernetes/index.html). 25 | * See the [examples](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/examples) folder for example 26 | usage. 27 | * See [variables.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-namespace-roles/variables.tf) 28 | for all the variables you can set on this module. 29 | * See [outputs.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-namespace-roles/outputs.tf) 30 | for all the variables that are outputed by this module. 31 | 32 | 33 | ## What is Kubernetes Role Based Access Control (RBAC)? 34 | 35 | [Role Based Access Control (RBAC)](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) is a method to regulate 36 | access to resources based on the role that individual users assume in an organization. Kubernetes allows you to define 37 | roles in the system that individual users inherit, and explicitly grant permissions to resources within the system to 38 | those roles. The Control Plane will then honor those permissions when accessing the resources on Kubernetes through 39 | clients such as `kubectl`. When combined with namespaces, you can implement sophisticated control schemes that limit the 40 | access of resources across the roles in your organization. 41 | 42 | The RBAC system is managed using `ClusterRole` and `ClusterRoleBinding` resources (or `Role` and `RoleBinding` resources 43 | if restricting to a single namespace). The `ClusterRole` (or `Role`) object defines a role in the Kubernetes system that 44 | has explicit permissions on what it can and cannot do. These roles are then bound to users and groups using the 45 | `ClusterRoleBinding` (or `RoleBinding`) resource. An important thing to note here is that you do not explicitly create 46 | users and groups using RBAC, and instead rely on the authentication system to implicitly create these entities. 47 | 48 | Refer to [the official documentation](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) for more 49 | information. 50 | 51 | 52 | ## How do you bind the Roles? 53 | 54 | This module will create a set of RBAC roles that can then be bound to user and group entities to explicitly grant 55 | permissions to access that namespace. 56 | 57 | We can then use `kubectl` to bind the roles to the groups: 58 | ``` 59 | --- 60 | kind: RoleBinding 61 | apiVersion: rbac.authorization.k8s.io/v1 62 | metadata: 63 | name: core-role-binding 64 | namespace: core 65 | subjects: 66 | - kind: Group 67 | name: core 68 | apiGroup: rbac.authorization.k8s.io 69 | roleRef: 70 | kind: Role 71 | name: core-access-all 72 | apiGroup: rbac.authorization.k8s.io 73 | --- 74 | kind: RoleBinding 75 | apiVersion: rbac.authorization.k8s.io/v1 76 | metadata: 77 | name: analytics-role-binding 78 | namespace: analytics 79 | subjects: 80 | - kind: Group 81 | name: analytics 82 | apiGroup: rbac.authorization.k8s.io 83 | roleRef: 84 | kind: Role 85 | name: analytics-access-all 86 | apiGroup: rbac.authorization.k8s.io 87 | ``` 88 | 89 | When we apply this config with `kubectl`, users that are associated with the `core` RBAC group can now create and access 90 | resources deployed in the `core` namespace, and the `analytics` group can access resources in the `analytics` namespace. 91 | However, members of the `core` team can not access resources in the `analytics` namespace and vice versa. 92 | 93 | 94 | ## Why is this a Terraform Module and not a Helm Chart? 95 | 96 | This module uses Terraform to manage the `Namespace` and RBAC role resources instead of using Helm to support the use case of 97 | setting up Helm. When setting up the Helm server, you will want to setup a `Namespace` and `ServiceAccount` for the Helm 98 | server to be deployed with. This leads to a chicken and egg problem, where the `Namespace` and `ServiceAccount` needs to 99 | be created before Helm is available for use. As such, we rely on Terraform to set these core resources up. 100 | -------------------------------------------------------------------------------- /modules/k8s-namespace-roles/main.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # CREATE DEFAULT RBAC ROLES FOR A KUBERNETES NAMESPACE 3 | # These templates provision a set of default RBAC roles with permissions scoped to the provided namespace. 4 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | 6 | # --------------------------------------------------------------------------------------------------------------------- 7 | # SET TERRAFORM REQUIREMENTS FOR RUNNING THIS MODULE 8 | # --------------------------------------------------------------------------------------------------------------------- 9 | 10 | terraform { 11 | required_version = ">= 0.12" 12 | } 13 | 14 | # --------------------------------------------------------------------------------------------------------------------- 15 | # SET MODULE DEPENDENCY RESOURCE 16 | # This works around a terraform limitation where we can not specify module dependencies natively. 17 | # See https://github.com/hashicorp/terraform/issues/1178 for more discussion. 18 | # By resolving and computing the dependencies list, we are able to make all the resources in this module depend on the 19 | # resources backing the values in the dependencies list. 20 | # --------------------------------------------------------------------------------------------------------------------- 21 | 22 | resource "null_resource" "dependency_getter" { 23 | triggers = { 24 | instance = join(",", var.dependencies) 25 | } 26 | } 27 | 28 | # --------------------------------------------------------------------------------------------------------------------- 29 | # CREATE THE DEFAULT RBAC ROLES 30 | # This defines four default RBAC roles scoped to the namespace: 31 | # - namespace-access-all : Admin level permissions on all resources in the namespace. 32 | # - namespace-access-read-only: Read only permissions on all resources in the namespace. 33 | # - namespace-tiller-metadata-access: Minimal permissions for Tiller to manage its metadata in this namespace (if this 34 | # namespace is where Tiller is deployed). 35 | # - namespace-tiller-resource-access: Minimal permissions for Tiller to manage resources in this namespace as Helm 36 | # charts. 37 | # --------------------------------------------------------------------------------------------------------------------- 38 | 39 | resource "kubernetes_role" "rbac_role_access_all" { 40 | count = var.create_resources ? 1 : 0 41 | depends_on = [null_resource.dependency_getter] 42 | 43 | metadata { 44 | name = "${var.namespace}-access-all" 45 | namespace = var.namespace 46 | labels = var.labels 47 | annotations = var.annotations 48 | } 49 | 50 | rule { 51 | api_groups = ["*"] 52 | resources = ["*"] 53 | verbs = ["*"] 54 | } 55 | } 56 | 57 | resource "kubernetes_role" "rbac_role_access_read_only" { 58 | count = var.create_resources ? 1 : 0 59 | depends_on = [null_resource.dependency_getter] 60 | 61 | metadata { 62 | name = "${var.namespace}-access-read-only" 63 | namespace = var.namespace 64 | labels = var.labels 65 | annotations = var.annotations 66 | } 67 | 68 | rule { 69 | api_groups = ["*"] 70 | resources = ["*"] 71 | verbs = ["get", "list", "watch"] 72 | } 73 | } 74 | 75 | # These RBAC role permissions are based on the official example regarding deploying Tiller in a namespace to manage 76 | # resources in another namespace. 77 | # See https://docs.helm.sh/using_helm/#example-deploy-tiller-in-a-namespace-restricted-to-deploying-resources-in-another-namespace 78 | 79 | resource "kubernetes_role" "rbac_tiller_metadata_access" { 80 | count = var.create_resources ? 1 : 0 81 | depends_on = [null_resource.dependency_getter] 82 | 83 | metadata { 84 | name = "${var.namespace}-tiller-metadata-access" 85 | namespace = var.namespace 86 | labels = var.labels 87 | annotations = var.annotations 88 | } 89 | 90 | rule { 91 | api_groups = ["", "extensions", "apps"] 92 | resources = ["secrets"] 93 | verbs = ["*"] 94 | } 95 | } 96 | 97 | resource "kubernetes_role" "rbac_tiller_resource_access" { 98 | count = var.create_resources ? 1 : 0 99 | depends_on = [null_resource.dependency_getter] 100 | 101 | metadata { 102 | name = "${var.namespace}-tiller-resource-access" 103 | namespace = var.namespace 104 | labels = var.labels 105 | annotations = var.annotations 106 | } 107 | 108 | rule { 109 | api_groups = [ 110 | "", 111 | "batch", 112 | "extensions", 113 | "apps", 114 | "rbac.authorization.k8s.io", # We include RBAC here because many helm charts create RBAC roles to minimize pod access. 115 | ] 116 | 117 | resources = ["*"] 118 | verbs = ["*"] 119 | } 120 | 121 | # We include policy PodDisruptionBudget which is useful for the helm charts to manage 122 | rule { 123 | api_groups = [ 124 | "policy", 125 | ] 126 | 127 | resources = ["poddisruptionbudgets"] 128 | verbs = ["*"] 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /modules/k8s-namespace-roles/outputs.tf: -------------------------------------------------------------------------------- 1 | output "rbac_access_all_role" { 2 | description = "The name of the RBAC role that grants admin level permissions on the namespace." 3 | value = element( 4 | concat(kubernetes_role.rbac_role_access_all.*.metadata.0.name, [""]), 5 | 0, 6 | ) 7 | } 8 | 9 | output "rbac_access_read_only_role" { 10 | description = "The name of the RBAC role that grants read only permissions on the namespace." 11 | value = element( 12 | concat( 13 | kubernetes_role.rbac_role_access_read_only.*.metadata.0.name, 14 | [""], 15 | ), 16 | 0, 17 | ) 18 | } 19 | 20 | output "rbac_tiller_metadata_access_role" { 21 | description = "The name of the RBAC role that grants minimal permissions for Tiller to manage its metadata. Use this role if Tiller will be deployed into this namespace." 22 | value = element( 23 | concat( 24 | kubernetes_role.rbac_tiller_metadata_access.*.metadata.0.name, 25 | [""], 26 | ), 27 | 0, 28 | ) 29 | } 30 | 31 | output "rbac_tiller_resource_access_role" { 32 | description = "The name of the RBAC role that grants minimal permissions for Tiller to manage resources in this namespace." 33 | value = element( 34 | concat( 35 | kubernetes_role.rbac_tiller_resource_access.*.metadata.0.name, 36 | [""], 37 | ), 38 | 0, 39 | ) 40 | } 41 | -------------------------------------------------------------------------------- /modules/k8s-namespace-roles/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # MODULE PARAMETERS 3 | # These variables are expected to be passed in by the operator when calling this terraform module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "namespace" { 7 | description = "The name of the namespace where the roles should be created." 8 | type = string 9 | } 10 | 11 | # --------------------------------------------------------------------------------------------------------------------- 12 | # OPTIONAL MODULE PARAMETERS 13 | # These variables have defaults, but may be overridden by the operator. 14 | # --------------------------------------------------------------------------------------------------------------------- 15 | 16 | variable "labels" { 17 | description = "Map of string key value pairs that can be used to organize and categorize the roles. See the Kubernetes Reference for more info (https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)." 18 | type = map(string) 19 | default = {} 20 | } 21 | 22 | variable "annotations" { 23 | description = "Map of string key default pairs that can be used to store arbitrary metadata on the roles. See the Kubernetes Reference for more info (https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)." 24 | type = map(string) 25 | default = {} 26 | } 27 | 28 | variable "create_resources" { 29 | description = "Set to false to have this module skip creating resources. This weird parameter exists solely because Terraform does not support conditional modules. Therefore, this is a hack to allow you to conditionally decide if the Namespace roles should be created or not." 30 | type = bool 31 | default = true 32 | } 33 | 34 | # --------------------------------------------------------------------------------------------------------------------- 35 | # MODULE DEPENDENCIES 36 | # Workaround Terraform limitation where there is no module depends_on. 37 | # See https://github.com/hashicorp/terraform/issues/1178 for more details. 38 | # This can be used to make sure the module resources are created after other bootstrapping resources have been created. 39 | # For example, in GKE, the default permissions are such that you do not have enough authorization to be able to create 40 | # additional Roles in the system. Therefore, you need to first create a ClusterRoleBinding to promote your account 41 | # before you can apply this module. In this use case, you can pass in the ClusterRoleBinding as a dependency into this 42 | # module: 43 | # dependencies = ["${kubernetes_cluster_role_binding.user.metadata.0.name}"] 44 | # --------------------------------------------------------------------------------------------------------------------- 45 | 46 | variable "dependencies" { 47 | description = "Create a dependency between the resources in this module to the interpolated values in this list (and thus the source resources). In other words, the resources in this module will now depend on the resources backing the values in this list such that those resources need to be created before the resources in this module, and the resources in this module need to be destroyed before the resources in the list." 48 | type = list(string) 49 | default = [] 50 | } 51 | -------------------------------------------------------------------------------- /modules/k8s-namespace/README.md: -------------------------------------------------------------------------------- 1 | # K8S Namespace Module 2 | 3 | 6 | 7 | This Terraform Module manages Kubernetes 8 | [`Namespaces`](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/). In addition to creating 9 | namespaces, this module will create a set of default RBAC roles restricted to that namespace. The following roles will 10 | be provided by this module: 11 | 12 | - `namespace-access-all`: Admin level permissions in the namespace. Ability to read, write, and delete all resources in 13 | the namespace. 14 | - `namespace-access-read-only`: Read only permissions to all resources in the namespace. 15 | - `namespace-tiller-metadata-access`: Minimal permissions for Tiller to manage its metadata in this namespace (if this 16 | namespace is where Tiller is deployed). 17 | - `namespace-tiller-resource-access`: Minimal permissions for Tiller to manage resources in this namespace as Helm 18 | charts. 19 | 20 | 21 | ## How do you use this module? 22 | 23 | * See the [root README](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/README.md) for instructions on using Terraform modules. 24 | * This module uses [the `kubernetes` provider](https://www.terraform.io/docs/providers/kubernetes/index.html). 25 | * See the [examples](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/examples) folder for example 26 | usage. 27 | * See [variables.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-namespace/variables.tf) 28 | for all the variables you can set on this module. 29 | * See [outputs.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-namespace/outputs.tf) 30 | for all the variables that are outputed by this module. 31 | 32 | 33 | ## What is a Namespace? 34 | 35 | A `Namespace` is a Kubernetes resource that can be used to create a virtual environment in your cluster to separate 36 | resources. It allows you to scope resources in your cluster to provide finer grained permission control and resource 37 | quotas. 38 | 39 | For example, suppose that you have two different teams managing separate services independently, such that the team 40 | should not be allowed to update or modify the other teams' services. In such a scenario, you would use namespaces to 41 | separate the resources between each team, and implement RBAC roles that only grant access to the namespace if you reside 42 | in the team that manages it. 43 | 44 | To illustrate this, let's assume that we have a team that manages 45 | the application services related to the core product (named `core`) and another team managing analytics services (named 46 | `analytics`). Let's also assume that we have already created RBAC groups for each team, named `core-group` and 47 | `analytics-group`. 48 | 49 | We create the namespaces using this module: 50 | 51 | ``` 52 | module "core_namespace" { 53 | source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-namespace?ref=v0.1.0" 54 | name = "core" 55 | } 56 | 57 | module "analytics_namespace" { 58 | source = "git::https://github.com/gruntwork-io/terraform-kubernetes-helm.git//modules/k8s-namespace?ref=v0.1.0" 59 | name = "analytics" 60 | } 61 | ``` 62 | 63 | In addition to creating namespaces, this will also create a set of RBAC roles that can then be bound to user and group 64 | entities to explicitly grant permissions to access that namespace. 65 | 66 | We can then use `kubectl` to bind the roles to the groups: 67 | ``` 68 | --- 69 | kind: RoleBinding 70 | apiVersion: rbac.authorization.k8s.io/v1 71 | metadata: 72 | name: core-role-binding 73 | namespace: core 74 | subjects: 75 | - kind: Group 76 | name: core 77 | apiGroup: rbac.authorization.k8s.io 78 | roleRef: 79 | kind: Role 80 | name: core-access-all 81 | apiGroup: rbac.authorization.k8s.io 82 | --- 83 | kind: RoleBinding 84 | apiVersion: rbac.authorization.k8s.io/v1 85 | metadata: 86 | name: analytics-role-binding 87 | namespace: analytics 88 | subjects: 89 | - kind: Group 90 | name: analytics 91 | apiGroup: rbac.authorization.k8s.io 92 | roleRef: 93 | kind: Role 94 | name: analytics-access-all 95 | apiGroup: rbac.authorization.k8s.io 96 | ``` 97 | 98 | When we apply this config with `kubectl`, users that are associated with the `core` RBAC group can now create and access 99 | resources deployed in the `core` namespace, and the `analytics` group can access resources in the `analytics` namespace. 100 | However, members of the `core` team can not access resources in the `analytics` namespace and vice versa. 101 | 102 | To summarize, use namespaces to: 103 | 104 | - Implement finer grained access control over deployed resources. 105 | - Implement [resource quotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/) to restrict how much of the 106 | cluster can be utilized by each team. 107 | 108 | 109 | ## Why is this a Terraform Module and not a Helm Chart? 110 | 111 | This module uses Terraform to manage the `Namespace` and RBAC role resources instead of using Helm to support the use case of 112 | setting up Helm. When setting up the Helm server, you will want to setup a `Namespace` and `ServiceAccount` for the Helm 113 | server to be deployed with. This leads to a chicken and egg problem, where the `Namespace` and `ServiceAccount` needs to 114 | be created before Helm is available for use. As such, we rely on Terraform to set these core resources up. 115 | -------------------------------------------------------------------------------- /modules/k8s-namespace/main.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # CREATE KUBERNETES NAMESPACE WITH DEFAULT RBAC ROLES 3 | # These templates provision a new namespace in the Kubernetes cluster, as well as a set of default RBAC roles with 4 | # permissions scoped to the namespace. 5 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6 | 7 | # --------------------------------------------------------------------------------------------------------------------- 8 | # SET TERRAFORM REQUIREMENTS FOR RUNNING THIS MODULE 9 | # --------------------------------------------------------------------------------------------------------------------- 10 | 11 | terraform { 12 | required_version = ">= 0.12" 13 | } 14 | 15 | # --------------------------------------------------------------------------------------------------------------------- 16 | # SET MODULE DEPENDENCY RESOURCE 17 | # This works around a terraform limitation where we can not specify module dependencies natively. 18 | # See https://github.com/hashicorp/terraform/issues/1178 for more discussion. 19 | # By resolving and computing the dependencies list, we are able to make all the resources in this module depend on the 20 | # resources backing the values in the dependencies list. 21 | # --------------------------------------------------------------------------------------------------------------------- 22 | 23 | resource "null_resource" "dependency_getter" { 24 | triggers = { 25 | instance = join(",", var.dependencies) 26 | } 27 | } 28 | 29 | # --------------------------------------------------------------------------------------------------------------------- 30 | # CREATE THE NAMESPACE 31 | # --------------------------------------------------------------------------------------------------------------------- 32 | 33 | resource "kubernetes_namespace" "namespace" { 34 | count = var.create_resources ? 1 : 0 35 | depends_on = [null_resource.dependency_getter] 36 | 37 | metadata { 38 | name = var.name 39 | labels = var.labels 40 | annotations = var.annotations 41 | } 42 | } 43 | 44 | # --------------------------------------------------------------------------------------------------------------------- 45 | # CREATE THE DEFAULT RBAC ROLES 46 | # This uses `k8s-namespace-roles` to define a set of commonly used RBAC roles. 47 | # --------------------------------------------------------------------------------------------------------------------- 48 | 49 | module "namespace_roles" { 50 | source = "../k8s-namespace-roles" 51 | 52 | namespace = var.create_resources ? kubernetes_namespace.namespace[0].id : "" 53 | labels = var.labels 54 | annotations = var.annotations 55 | 56 | create_resources = var.create_resources 57 | dependencies = var.dependencies 58 | } 59 | -------------------------------------------------------------------------------- /modules/k8s-namespace/outputs.tf: -------------------------------------------------------------------------------- 1 | output "name" { 2 | description = "The name of the created namespace." 3 | value = element(concat(kubernetes_namespace.namespace.*.id, [""]), 0) 4 | } 5 | 6 | output "rbac_access_all_role" { 7 | description = "The name of the RBAC role that grants admin level permissions on the namespace." 8 | value = module.namespace_roles.rbac_access_all_role 9 | } 10 | 11 | output "rbac_access_read_only_role" { 12 | description = "The name of the RBAC role that grants read only permissions on the namespace." 13 | value = module.namespace_roles.rbac_access_read_only_role 14 | } 15 | 16 | output "rbac_tiller_metadata_access_role" { 17 | description = "The name of the RBAC role that grants minimal permissions for Tiller to manage its metadata. Use this role if Tiller will be deployed into this namespace." 18 | value = module.namespace_roles.rbac_tiller_metadata_access_role 19 | } 20 | 21 | output "rbac_tiller_resource_access_role" { 22 | description = "The name of the RBAC role that grants minimal permissions for Tiller to manage resources in this namespace." 23 | value = module.namespace_roles.rbac_tiller_resource_access_role 24 | } 25 | -------------------------------------------------------------------------------- /modules/k8s-namespace/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # MODULE PARAMETERS 3 | # These variables are expected to be passed in by the operator when calling this terraform module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "name" { 7 | description = "The name of the namespace to be created." 8 | type = string 9 | } 10 | 11 | # --------------------------------------------------------------------------------------------------------------------- 12 | # OPTIONAL MODULE PARAMETERS 13 | # These variables have defaults, but may be overridden by the operator. 14 | # --------------------------------------------------------------------------------------------------------------------- 15 | 16 | variable "labels" { 17 | description = "Map of string key value pairs that can be used to organize and categorize the namespace and roles. See the Kubernetes Reference for more info (https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)." 18 | type = map(string) 19 | default = {} 20 | } 21 | 22 | variable "annotations" { 23 | description = "Map of string key default pairs that can be used to store arbitrary metadata on the namespace and roles. See the Kubernetes Reference for more info (https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)." 24 | type = map(string) 25 | default = {} 26 | } 27 | 28 | variable "create_resources" { 29 | description = "Set to false to have this module skip creating resources. This weird parameter exists solely because Terraform does not support conditional modules. Therefore, this is a hack to allow you to conditionally decide if the Namespace should be created or not." 30 | type = bool 31 | default = true 32 | } 33 | 34 | # --------------------------------------------------------------------------------------------------------------------- 35 | # MODULE DEPENDENCIES 36 | # Workaround Terraform limitation where there is no module depends_on. 37 | # See https://github.com/hashicorp/terraform/issues/1178 for more details. 38 | # This can be used to make sure the module resources are created after other bootstrapping resources have been created. 39 | # For example, in GKE, the default permissions are such that you do not have enough authorization to be able to create 40 | # additional Roles in the system. Therefore, you need to first create a ClusterRoleBinding to promote your account 41 | # before you can apply this module. In this use case, you can pass in the ClusterRoleBinding as a dependency into this 42 | # module: 43 | # dependencies = ["${kubernetes_cluster_role_binding.user.metadata.0.name}"] 44 | # --------------------------------------------------------------------------------------------------------------------- 45 | 46 | variable "dependencies" { 47 | description = "Create a dependency between the resources in this module to the interpolated values in this list (and thus the source resources). In other words, the resources in this module will now depend on the resources backing the values in this list such that those resources need to be created before the resources in this module, and the resources in this module need to be destroyed before the resources in the list." 48 | type = list(string) 49 | default = [] 50 | } 51 | -------------------------------------------------------------------------------- /modules/k8s-service-account/README.md: -------------------------------------------------------------------------------- 1 | # K8S ServiceAccount Module 2 | 3 | 6 | 7 | This Terraform Module manages Kubernetes 8 | [`ServiceAccounts`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/). This module 9 | can be used to declaratively create and update `ServiceAccounts` and the bound permissions that it has. 10 | 11 | 12 | ## How do you use this module? 13 | 14 | * See the [root README](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/README.md) for 15 | instructions on using Terraform modules. 16 | * This module uses [the `kubernetes` provider](https://www.terraform.io/docs/providers/kubernetes/index.html). 17 | * See the [examples](https://github.com/gruntwork-io/terraform-kubernetes-helm/tree/master/examples) folder for example 18 | usage. 19 | * See [variables.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-service-account/variables.tf) 20 | for all the variables you can set on this module. 21 | * See [outputs.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-service-account/outputs.tf) 22 | for all the variables that are outputed by this module. 23 | 24 | 25 | ## What is a ServiceAccount? 26 | 27 | `ServiceAccounts` are authenticated entities to the Kubernetes API that map to container processes in a Pod. This is 28 | used to differentiate from User Accounts, which map to actual users consuming the Kubernetes API. 29 | 30 | `ServiceAccounts` are allocated to Pods at creation time, and automatically authenticated when calling out to the 31 | Kubernetes API from within the Pod. This has several advantages: 32 | 33 | - You don't need to share and configure secrets for the Kubernetes API client. 34 | - You can restrict permissions on the service to only those that it needs. 35 | - You can differentiate a service accessing the API and performing actions from users accessing the API. 36 | 37 | Use `ServiceAccounts` whenever you need to grant access to the Kubernetes API to Pods deployed on the cluster. 38 | 39 | 40 | ## Why is this a Terraform Module and not a Helm Chart? 41 | 42 | This module uses Terraform to manage the `ServiceAccount` resource instead of using Helm to support the use case of 43 | setting up Helm. When setting up the Helm server, you will want to setup a `Namespace` and `ServiceAccount` for the Helm 44 | server to be deployed with. This leads to a chicken and egg problem, where the `Namespace` and `ServiceAccount` needs to 45 | be created before Helm is available for use. As such, we rely on Terraform to set these core resources up. 46 | -------------------------------------------------------------------------------- /modules/k8s-service-account/main.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # CREATE KUBERNETES SERVICE ACCOUNT AND BIND THE ROLES 3 | # These templates provision a new ServiceAccount in the Kubernetes cluster, as well as a RoleBinding object that will 4 | # bind the provided roles to the ServiceAccount. 5 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6 | 7 | # --------------------------------------------------------------------------------------------------------------------- 8 | # SET TERRAFORM REQUIREMENTS FOR RUNNING THIS MODULE 9 | # --------------------------------------------------------------------------------------------------------------------- 10 | 11 | terraform { 12 | required_version = ">= 0.12" 13 | } 14 | 15 | # --------------------------------------------------------------------------------------------------------------------- 16 | # SET MODULE DEPENDENCY RESOURCE 17 | # This works around a terraform limitation where we can not specify module dependencies natively. 18 | # See https://github.com/hashicorp/terraform/issues/1178 for more discussion. 19 | # By resolving and computing the dependencies list, we are able to make all the resources in this module depend on the 20 | # resources backing the values in the dependencies list. 21 | # --------------------------------------------------------------------------------------------------------------------- 22 | 23 | resource "null_resource" "dependency_getter" { 24 | triggers = { 25 | instance = join(",", var.dependencies) 26 | } 27 | } 28 | 29 | # --------------------------------------------------------------------------------------------------------------------- 30 | # CREATE THE SERVICE ACCOUNT 31 | # --------------------------------------------------------------------------------------------------------------------- 32 | 33 | resource "kubernetes_service_account" "service_account" { 34 | count = var.create_resources ? 1 : 0 35 | 36 | metadata { 37 | name = var.name 38 | namespace = var.namespace 39 | labels = var.labels 40 | annotations = var.annotations 41 | } 42 | 43 | dynamic "image_pull_secret" { 44 | for_each = var.secrets_for_pulling_images 45 | content { 46 | name = image_pull_secret.value 47 | } 48 | } 49 | 50 | dynamic "secret" { 51 | for_each = var.secrets_for_pods 52 | content { 53 | name = secret.value 54 | } 55 | } 56 | 57 | automount_service_account_token = var.automount_service_account_token 58 | 59 | depends_on = [null_resource.dependency_getter] 60 | } 61 | 62 | # --------------------------------------------------------------------------------------------------------------------- 63 | # BIND THE PROVIDED ROLES TO THE SERVICE ACCOUNT 64 | # --------------------------------------------------------------------------------------------------------------------- 65 | 66 | resource "kubernetes_role_binding" "service_account_role_binding" { 67 | count = var.create_resources ? var.num_rbac_roles : 0 68 | 69 | metadata { 70 | name = "${var.name}-${var.rbac_roles[count.index]["name"]}-role-binding" 71 | namespace = var.rbac_roles[count.index]["namespace"] 72 | labels = var.labels 73 | annotations = var.annotations 74 | } 75 | 76 | role_ref { 77 | api_group = "rbac.authorization.k8s.io" 78 | kind = "Role" 79 | name = var.rbac_roles[count.index]["name"] 80 | } 81 | 82 | subject { 83 | api_group = "" 84 | kind = "ServiceAccount" 85 | name = kubernetes_service_account.service_account[0].metadata[0].name 86 | namespace = var.namespace 87 | } 88 | 89 | depends_on = [null_resource.dependency_getter] 90 | } 91 | -------------------------------------------------------------------------------- /modules/k8s-service-account/outputs.tf: -------------------------------------------------------------------------------- 1 | output "name" { 2 | description = "The name of the created service account" 3 | value = var.create_resources ? kubernetes_service_account.service_account[0].metadata[0].name : "" 4 | 5 | depends_on = [kubernetes_role_binding.service_account_role_binding] 6 | } 7 | 8 | output "token_secret_name" { 9 | description = "The name of the secret that holds the default ServiceAccount token that can be used to authenticate to the Kubernetes API." 10 | value = var.create_resources ? kubernetes_service_account.service_account[0].default_secret_name : "" 11 | 12 | depends_on = [kubernetes_role_binding.service_account_role_binding] 13 | } 14 | 15 | -------------------------------------------------------------------------------- /modules/k8s-service-account/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # MODULE PARAMETERS 3 | # These variables are expected to be passed in by the operator when calling this terraform module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "name" { 7 | description = "The name of the service account to be created." 8 | type = string 9 | } 10 | 11 | variable "namespace" { 12 | description = "The namespace where the service account is created." 13 | type = string 14 | } 15 | 16 | # --------------------------------------------------------------------------------------------------------------------- 17 | # OPTIONAL MODULE PARAMETERS 18 | # These variables have defaults, but may be overridden by the operator. 19 | # --------------------------------------------------------------------------------------------------------------------- 20 | 21 | # Workaround terraform limitation where resource count can not include interpolated lists. 22 | # See: https://github.com/hashicorp/terraform/issues/17421 23 | variable "num_rbac_roles" { 24 | description = "Number of RBAC roles to bind. This should match the number of items in the list passed to rbac_roles." 25 | type = number 26 | default = 0 27 | } 28 | 29 | variable "rbac_roles" { 30 | description = "List of maps representing RBAC roles that should be bound to the service account. If this list is non-empty, you must also pass in num_rbac_roles specifying the number of roles. This expects a list of maps, each with keys name and namespace." 31 | type = list(map(string)) 32 | 33 | # Example: 34 | # rbac_roles = [{ 35 | # name = "${module.namespace.rbac_access_read_only_role}" 36 | # namespace = "${module.namespace.name}" 37 | # }] 38 | default = [] 39 | } 40 | 41 | variable "labels" { 42 | description = "Map of string key default pairs that can be used to organize and categorize the service account. See the Kubernetes Reference for more info (https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)." 43 | type = map(string) 44 | default = {} 45 | } 46 | 47 | variable "annotations" { 48 | description = "Map of string key default pairs that can be used to store arbitrary metadata on the service account. See the Kubernetes Reference for more info (https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)." 49 | type = map(string) 50 | default = {} 51 | } 52 | 53 | variable "automount_service_account_token" { 54 | description = "Whether or not to automatically mount the service account token into the container. This defaults to true." 55 | type = bool 56 | default = true 57 | } 58 | 59 | variable "secrets_for_pulling_images" { 60 | description = "A list of references to secrets in the same namespace to use for pulling any images in pods that reference this Service Account." 61 | type = list(string) 62 | default = [] 63 | } 64 | 65 | variable "secrets_for_pods" { 66 | description = "A list of secrets allowed to be used by pods running using this Service Account." 67 | type = list(string) 68 | default = [] 69 | } 70 | 71 | variable "create_resources" { 72 | description = "Set to false to have this module skip creating resources. This weird parameter exists solely because Terraform does not support conditional modules. Therefore, this is a hack to allow you to conditionally decide if the Namespace should be created or not." 73 | type = bool 74 | default = true 75 | } 76 | 77 | # --------------------------------------------------------------------------------------------------------------------- 78 | # MODULE DEPENDENCIES 79 | # Workaround Terraform limitation where there is no module depends_on. 80 | # See https://github.com/hashicorp/terraform/issues/1178 for more details. 81 | # This can be used to make sure the module resources are created after other bootstrapping resources have been created. 82 | # For example, in GKE, the default permissions are such that you do not have enough authorization to be able to create 83 | # additional Roles in the system. Therefore, you need to first create a ClusterRoleBinding to promote your account 84 | # before you can apply this module. In this use case, you can pass in the ClusterRoleBinding as a dependency into this 85 | # module: 86 | # dependencies = ["${kubernetes_cluster_role_binding.user.metadata.0.name}"] 87 | # --------------------------------------------------------------------------------------------------------------------- 88 | 89 | variable "dependencies" { 90 | description = "Create a dependency between the resources in this module to the interpolated values in this list (and thus the source resources). In other words, the resources in this module will now depend on the resources backing the values in this list such that those resources need to be created before the resources in this module, and the resources in this module need to be destroyed before the resources in the list." 91 | type = list(string) 92 | default = [] 93 | } 94 | -------------------------------------------------------------------------------- /modules/k8s-tiller-tls-certs/README.md: -------------------------------------------------------------------------------- 1 | # K8S Tiller TLS Certs Module 2 | 3 | 6 | 7 | This Terraform Module can be used to generate a Certificate Authority (CA) public key, that is then used to generate a 8 | signed TLS certificate. These certs are then stored in a Kubernetes `Secret` so that they can be used with Tiller and 9 | `kubergrunt` to manage authentication to Tiller. 10 | 11 | If you are unfamiliar with how TLS works, checkout [this primer on 12 | TLS/SSL](https://github.com/hashicorp/terraform-aws-vault/tree/master/modules/private-tls-cert#background). 13 | 14 | You can read more about Helm, Tiller, and their security model in our [Helm 15 | guide](https://github.com/gruntwork-io/kubergrunt/blob/master/HELM_GUIDE.md). 16 | 17 | **WARNING: The private keys generated by this module will be stored unencrypted in your Terraform state file. If you are 18 | sensitive to storing secrets in your Terraform state file, consider using `kubergrunt` to generate and manage your TLS 19 | certificate. See [the k8s-tiller-kubergrunt-minikube example](/examples/k8s-tiller-kubergrunt-minikube) for how to use 20 | `kubergrunt` for TLS management.** 21 | 22 | 23 | ## How do you use this module? 24 | 25 | * See the [root README](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/README.md) for 26 | instructions on using Terraform modules. 27 | * This module uses [the `kubernetes` provider](https://www.terraform.io/docs/providers/kubernetes/index.html). 28 | * See the [examples](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/examples) folder for example 29 | usage. 30 | * See [variables.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-tiller-tls-certs/variables.tf) 31 | for all the variables you can set on this module. 32 | * See [outputs.tf](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-tiller-tls-certs/outputs.tf) 33 | for all the variables that are outputed by this module. 34 | 35 | 36 | ## How do you use the generated TLS certs with Tiller? 37 | 38 | This module will generate TLS certificate key pairs and store them in a Kubernetes `Secret`, outputting the name of the 39 | `Secret`. You can then pass the `Secret` name (output variable `signed_tls_certificate_key_pair_secret_name`) to the 40 | [k8s-tiller module](https://github.com/gruntwork-io/terraform-kubernetes-helm/blob/master/modules/k8s-tiller) as the 41 | input variable `tiller_tls_secret_name`. Tiller will then be able to find the generated TLS certificate key pairs and 42 | mount them into the container so that the server can use it. 43 | 44 | 45 | ## How do you use the generated TLS certs with kubergrunt for client side TLS management? 46 | 47 | `kubergrunt` provides TLS management features that can be used for managing client side TLS certs for use with `helm`. 48 | This module is compatible with the `kubergrunt` approach, although it requires a few labels on the created `Secret` 49 | resource so that `kubergrunt` can properly find the CA private key. 50 | 51 | `kubergrunt` model of client side TLS management works by looking for the `Secret` that stores the CA certificate key 52 | pair, which it can use to generate client side TLS certs to authenticate the client. These CA certs need to be the ones 53 | used for generating the server side TLS certs, so that the two way verification works. 54 | 55 | To allow `kubergrunt` to find the TLS certs, the following must be set: 56 | 57 | ```hcl 58 | # kubergrunt looks for CA certs in the kube-system Namespace. 59 | ca_tls_certificate_key_pair_secret_namespace = "kube-system" 60 | # kubergrunt uses the following labels to look for Tiller related certs 61 | ca_tls_certificate_key_pair_secret_labels = { 62 | "gruntwork.io/tiller-namespace" = "{NAME_OF_TILLER_NAMESPACE}" 63 | "gruntwork.io/tiller-credentials" = "true" 64 | "gruntwork.io/tiller-credentials-type" = "ca" 65 | } 66 | # kubergrunt uses the following name to look for the CA certs 67 | ca_tls_certificate_key_pair_secret_name = "{NAME_OF_TILLER_NAMESPACE}-namespace-tiller-ca-certs" 68 | ``` 69 | 70 | With these input variables, `kubergrunt` should be able to locate the generated CA certs and use them to generate client 71 | side certs when you use the `kubergrunt helm grant` command. 72 | 73 | 74 | ## How do you use the generated TLS certs to sign additional certificates? 75 | 76 | In order to access Tiller, you will typically need to generate additional signed certificates using the generated TLS CA 77 | certs. You have two options for generating the client side TLS certs: 78 | 79 | - [Using the `k8s-helm-client-tls-certs` module](#using-the-k8s-helm-client-tls-certs-module) 80 | - [Using `kubergrunt`](#using-kubergrunt) 81 | 82 | #### Using the k8s-helm-client-tls-certs module 83 | 84 | `k8s-helm-client-tls-certs` is designed to take a CA TLS cert generated using `k8s-tiller-tls-certs` and generate new 85 | signed TLS certs that can be used as verified clients. To use the module for this purpose, you can either call out to 86 | the module in your terraform code (like we do here to generate one for the operator), or use it directly as a temporary 87 | module. 88 | 89 | Follow these steps to use it as a temporary module: 90 | 91 | 1. Copy this module to your computer. 92 | 1. Open `variables.tf` and fill in the variables that do not have a default. 93 | 1. DO NOT configure Terraform remote state storage for this code. You do NOT want to store the state files as they will 94 | contain the private keys for the certificates. 95 | 1. DO NOT configure `store_in_kubernetes_secret` to `true`. You do NOT want to store the certificates in Kubernetes 96 | without the state file. 97 | 1. Run `terraform apply`. 98 | 1. Extract the generated certificates from the output and store to a file. E.g: 99 | 100 | ```bash 101 | terraform output tls_certificate_key_pair_private_key_pem > client.pem 102 | terraform output tls_certificate_key_pair_certificate_pem > client.crt 103 | terraform output ca_tls_certificate_key_pair_certificate_pem > ca.crt 104 | ``` 105 | 106 | 1. Share the extracted files with the user. 107 | 1. Delete your local Terraform state: `rm -rf terraform.tfstate*`. The Terraform state will contain the private keys for 108 | the certificates, so it's important to clean it up! 109 | 110 | The user can then install the certs and setup the client by installing them into the helm home directory, and then 111 | running `helm init`. For example: 112 | 113 | ```bash 114 | mkdir -p $HOME/.helm 115 | cp client.pem $HOME/.helm 116 | cp client.crt $HOME/.helm 117 | cp ca.crt $HOME/.helm 118 | helm init --client-only 119 | ``` 120 | 121 | Once the certificates are installed and the client is configured, your user is ready to use `helm`. However, by default 122 | the `helm` client does not assume a TLS setup. In order for the `helm` client to properly communicate with the deployed 123 | Tiller instance, it needs to be told to use TLS verification. These are specified through command line arguments. If 124 | everything is configured correctly, you should be able to access the Tiller that was deployed with the following args: 125 | 126 | ``` 127 | helm version --tls --tls-verify --tiller-namespace NAMESPACE_OF_TILLER 128 | ``` 129 | 130 | If you have access to Tiller, this should return you both the client version and the server version of Helm. Note that 131 | you need to pass the above CLI argument every time you want to use `helm`. 132 | 133 | 134 | #### Using kubergrunt 135 | 136 | `kubergrunt` automates this process in the `grant` and `configure` commands. For example, suppose you wanted to grant 137 | access to the deployed Tiller to a group of users grouped under the RBAC group `dev`. You can grant them access using 138 | the following command: 139 | 140 | ``` 141 | kubergrunt helm grant --tiller-namespace NAMESPACE_OF_TILLER --rbac-group dev --tls-common-name dev --tls-org YOUR_ORG 142 | ``` 143 | 144 | This will generate a new certificate key pair for the client and upload it as a `Secret`. Then, it will bind new RBAC 145 | roles to the `dev` RBAC group that grants it permission to access the Tiller pod and the uploaded `Secret`. 146 | 147 | This in turn allows your users to configure their local client using `kubergrunt`: 148 | 149 | ``` 150 | kubergrunt helm configure --tiller-namespace NAMESPACE_OF_TILLER --rbac-group dev 151 | ``` 152 | 153 | At the end of this, your users is ready to use `helm`. However, like the previous method, you will need to enable a few 154 | flags on the `helm` client to indicate that TLS verification is required. For convenience, `kubergrunt` also installs an 155 | environment file into your helm home directory that sets the same flags using environment variables. You can dot source 156 | this file to use `helm` without passing in the flags: 157 | 158 | ``` 159 | . ~/.helm/env 160 | helm version 161 | ``` 162 | -------------------------------------------------------------------------------- /modules/k8s-tiller-tls-certs/main.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # CREATE TLS CERTS AND STORE THEM IN KUBERNETES SECRETS 3 | # These templates generates a CA TLS certificate key pairs, and then uses that to generate a signed TLS certificate key 4 | # pair. These are then stored in Kubernetes Secrets so that they can be used with applications that support TLS, like 5 | # Tiller. 6 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 7 | 8 | # --------------------------------------------------------------------------------------------------------------------- 9 | # SET TERRAFORM REQUIREMENTS FOR RUNNING THIS MODULE 10 | # --------------------------------------------------------------------------------------------------------------------- 11 | 12 | terraform { 13 | required_version = ">= 0.12" 14 | } 15 | 16 | # --------------------------------------------------------------------------------------------------------------------- 17 | # SET MODULE DEPENDENCY RESOURCE 18 | # This works around a terraform limitation where we can not specify module dependencies natively. 19 | # See https://github.com/hashicorp/terraform/issues/1178 for more discussion. 20 | # By resolving and computing the dependencies list, we are able to make all the resources in this module depend on the 21 | # resources backing the values in the dependencies list. 22 | # --------------------------------------------------------------------------------------------------------------------- 23 | 24 | resource "null_resource" "dependency_getter" { 25 | triggers = { 26 | instance = join(",", var.dependencies) 27 | } 28 | } 29 | 30 | # --------------------------------------------------------------------------------------------------------------------- 31 | # CREATE A CA CERTIFICATE 32 | # --------------------------------------------------------------------------------------------------------------------- 33 | 34 | resource "tls_private_key" "ca" { 35 | count = var.create_resources ? 1 : 0 36 | depends_on = [null_resource.dependency_getter] 37 | 38 | algorithm = var.private_key_algorithm 39 | ecdsa_curve = var.private_key_ecdsa_curve 40 | rsa_bits = var.private_key_rsa_bits 41 | } 42 | 43 | resource "tls_self_signed_cert" "ca" { 44 | count = var.create_resources ? 1 : 0 45 | depends_on = [null_resource.dependency_getter] 46 | 47 | key_algorithm = element(concat(tls_private_key.ca.*.algorithm, [""]), 0) 48 | private_key_pem = element(concat(tls_private_key.ca.*.private_key_pem, [""]), 0) 49 | is_ca_certificate = true 50 | 51 | validity_period_hours = var.validity_period_hours 52 | allowed_uses = var.ca_tls_certs_allowed_uses 53 | 54 | subject { 55 | common_name = lookup(var.ca_tls_subject, "common_name", null) 56 | organization = lookup(var.ca_tls_subject, "organization", null) 57 | organizational_unit = lookup(var.ca_tls_subject, "organizational_unit", null) 58 | street_address = local.ca_tls_subject_maybe_street_address != "" ? split("\n", local.ca_tls_subject_maybe_street_address) : [] 59 | locality = lookup(var.ca_tls_subject, "locality", null) 60 | province = lookup(var.ca_tls_subject, "province", null) 61 | country = lookup(var.ca_tls_subject, "country", null) 62 | postal_code = lookup(var.ca_tls_subject, "postal_code", null) 63 | serial_number = lookup(var.ca_tls_subject, "serial_number", null) 64 | } 65 | } 66 | 67 | locals { 68 | ca_tls_subject_maybe_street_address = lookup(var.ca_tls_subject, "street_address", "") 69 | } 70 | 71 | # --------------------------------------------------------------------------------------------------------------------- 72 | # STORE CA CERTIFICATE IN KUBERNETES SECRET 73 | # --------------------------------------------------------------------------------------------------------------------- 74 | 75 | resource "kubernetes_secret" "ca_secret" { 76 | count = var.create_resources ? 1 : 0 77 | depends_on = [null_resource.dependency_getter] 78 | 79 | metadata { 80 | namespace = var.ca_tls_certificate_key_pair_secret_namespace 81 | name = var.ca_tls_certificate_key_pair_secret_name 82 | labels = var.ca_tls_certificate_key_pair_secret_labels 83 | annotations = var.ca_tls_certificate_key_pair_secret_annotations 84 | } 85 | 86 | data = { 87 | "${var.ca_tls_certificate_key_pair_secret_filename_base}.pem" = element(concat(tls_private_key.ca.*.private_key_pem, [""]), 0) 88 | "${var.ca_tls_certificate_key_pair_secret_filename_base}.pub" = element(concat(tls_private_key.ca.*.public_key_pem, [""]), 0) 89 | "${var.ca_tls_certificate_key_pair_secret_filename_base}.crt" = element(concat(tls_self_signed_cert.ca.*.cert_pem, [""]), 0) 90 | } 91 | } 92 | 93 | # --------------------------------------------------------------------------------------------------------------------- 94 | # CREATE A TLS CERTIFICATE SIGNED USING THE CA CERTIFICATE 95 | # --------------------------------------------------------------------------------------------------------------------- 96 | 97 | resource "tls_private_key" "cert" { 98 | count = var.create_resources ? 1 : 0 99 | depends_on = [null_resource.dependency_getter] 100 | 101 | algorithm = var.private_key_algorithm 102 | ecdsa_curve = var.private_key_ecdsa_curve 103 | rsa_bits = var.private_key_rsa_bits 104 | } 105 | 106 | resource "tls_cert_request" "cert" { 107 | count = var.create_resources ? 1 : 0 108 | 109 | key_algorithm = element(concat(tls_private_key.cert.*.algorithm, [""]), 0) 110 | private_key_pem = element(concat(tls_private_key.cert.*.private_key_pem, [""]), 0) 111 | 112 | dns_names = var.signed_tls_certs_dns_names 113 | ip_addresses = var.signed_tls_certs_ip_addresses 114 | 115 | subject { 116 | common_name = lookup(var.signed_tls_subject, "common_name", null) 117 | organization = lookup(var.signed_tls_subject, "organization", null) 118 | organizational_unit = lookup(var.signed_tls_subject, "organizational_unit", null) 119 | street_address = local.signed_tls_subject_maybe_street_address != "" ? split("\n", local.signed_tls_subject_maybe_street_address) : [] 120 | locality = lookup(var.signed_tls_subject, "locality", null) 121 | province = lookup(var.signed_tls_subject, "province", null) 122 | country = lookup(var.signed_tls_subject, "country", null) 123 | postal_code = lookup(var.signed_tls_subject, "postal_code", null) 124 | serial_number = lookup(var.signed_tls_subject, "serial_number", null) 125 | } 126 | } 127 | 128 | resource "tls_locally_signed_cert" "cert" { 129 | count = var.create_resources ? 1 : 0 130 | depends_on = [null_resource.dependency_getter] 131 | 132 | cert_request_pem = element(concat(tls_cert_request.cert.*.cert_request_pem, [""]), 0) 133 | 134 | ca_key_algorithm = element(concat(tls_private_key.ca.*.algorithm, [""]), 0) 135 | ca_private_key_pem = element(concat(tls_private_key.ca.*.private_key_pem, [""]), 0) 136 | ca_cert_pem = element(concat(tls_self_signed_cert.ca.*.cert_pem, [""]), 0) 137 | 138 | validity_period_hours = var.validity_period_hours 139 | allowed_uses = var.signed_tls_certs_allowed_uses 140 | } 141 | 142 | locals { 143 | signed_tls_subject_maybe_street_address = lookup(var.signed_tls_subject, "street_address", "") 144 | } 145 | 146 | # --------------------------------------------------------------------------------------------------------------------- 147 | # STORE SIGNED TLS CERTIFICATE IN KUBERNETES SECRET 148 | # --------------------------------------------------------------------------------------------------------------------- 149 | 150 | resource "kubernetes_secret" "signed_tls" { 151 | count = var.create_resources ? 1 : 0 152 | depends_on = [null_resource.dependency_getter] 153 | 154 | metadata { 155 | namespace = var.signed_tls_certificate_key_pair_secret_namespace 156 | name = var.signed_tls_certificate_key_pair_secret_name 157 | labels = var.signed_tls_certificate_key_pair_secret_labels 158 | annotations = var.signed_tls_certificate_key_pair_secret_annotations 159 | } 160 | 161 | data = { 162 | "${var.signed_tls_certificate_key_pair_secret_filename_base}.pem" = element(concat(tls_private_key.cert.*.private_key_pem, [""]), 0) 163 | "${var.signed_tls_certificate_key_pair_secret_filename_base}.pub" = element(concat(tls_private_key.cert.*.public_key_pem, [""]), 0) 164 | "${var.signed_tls_certificate_key_pair_secret_filename_base}.crt" = element(concat(tls_locally_signed_cert.cert.*.cert_pem, [""]), 0) 165 | "${var.ca_tls_certificate_key_pair_secret_filename_base}.crt" = element(concat(tls_self_signed_cert.ca.*.cert_pem, [""]), 0) 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /modules/k8s-tiller-tls-certs/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ca_tls_certificate_key_pair_secret_namespace" { 2 | description = "Namespace where the CA TLS certificate key pair is stored." 3 | value = element( 4 | concat(kubernetes_secret.ca_secret.*.metadata.0.namespace, [""]), 5 | 0, 6 | ) 7 | } 8 | 9 | output "ca_tls_certificate_key_pair_secret_name" { 10 | description = "Name of the Secret resource where the CA TLS certificate key pair is stored." 11 | value = element( 12 | concat(kubernetes_secret.ca_secret.*.metadata.0.name, [""]), 13 | 0, 14 | ) 15 | } 16 | 17 | output "signed_tls_certificate_key_pair_secret_namespace" { 18 | description = "Namespace where the signed TLS certificate key pair is stored." 19 | value = element( 20 | concat(kubernetes_secret.signed_tls.*.metadata.0.namespace, [""]), 21 | 0, 22 | ) 23 | } 24 | 25 | output "signed_tls_certificate_key_pair_secret_name" { 26 | description = "Name of the Secret resource where the signed TLS certificate key pair is stored." 27 | value = element( 28 | concat(kubernetes_secret.signed_tls.*.metadata.0.name, [""]), 29 | 0, 30 | ) 31 | } 32 | 33 | -------------------------------------------------------------------------------- /modules/k8s-tiller-tls-certs/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # MODULE PARAMETERS 3 | # These variables are expected to be passed in by the operator when calling this terraform module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | # TLS certificate information 7 | 8 | variable "ca_tls_subject" { 9 | description = "The issuer information that contains the identifying information for the CA certificates. See https://www.terraform.io/docs/providers/tls/r/cert_request.html#common_name for a list of expected keys. Note that street_address must be a newline separated string as opposed to a list of strings." 10 | # We use an string type here instead of directly specifying the object, to allow certain keys to be optional. 11 | type = map(string) 12 | } 13 | 14 | variable "signed_tls_subject" { 15 | description = "The issuer information that contains the identifying information for the signed certificates. See https://www.terraform.io/docs/providers/tls/r/cert_request.html#common_name for a list of expected keys. Note that street_address must be a newline separated string as opposed to a list of strings." 16 | # We use an string type here instead of directly specifying the object, to allow certain keys to be optional. 17 | type = map(string) 18 | } 19 | 20 | # Kubernetes Secret information 21 | 22 | variable "ca_tls_certificate_key_pair_secret_namespace" { 23 | description = "Namespace where the CA certificate key pairs should be stored." 24 | type = string 25 | } 26 | 27 | variable "ca_tls_certificate_key_pair_secret_name" { 28 | description = "Name to use for the Secret resource that stores the CA certificate key pairs." 29 | type = string 30 | } 31 | 32 | variable "signed_tls_certificate_key_pair_secret_namespace" { 33 | description = "Namespace where the signed TLS certificate key pairs should be stored." 34 | type = string 35 | } 36 | 37 | variable "signed_tls_certificate_key_pair_secret_name" { 38 | description = "Name to use for the Secret resource that stores the signed TLS certificate key pairs." 39 | type = string 40 | } 41 | 42 | # --------------------------------------------------------------------------------------------------------------------- 43 | # OPTIONAL MODULE PARAMETERS 44 | # These variables have defaults, but may be overridden by the operator. 45 | # --------------------------------------------------------------------------------------------------------------------- 46 | 47 | # TLS certificate information 48 | 49 | variable "private_key_algorithm" { 50 | description = "The name of the algorithm to use for private keys. Must be one of: RSA or ECDSA." 51 | type = string 52 | default = "ECDSA" 53 | } 54 | 55 | variable "private_key_ecdsa_curve" { 56 | description = "The name of the elliptic curve to use. Should only be used if var.private_key_algorithm is ECDSA. Must be one of P224, P256, P384 or P521." 57 | type = string 58 | default = "P256" 59 | } 60 | 61 | variable "private_key_rsa_bits" { 62 | description = "The size of the generated RSA key in bits. Should only be used if var.private_key_algorithm is RSA." 63 | type = number 64 | default = 2048 65 | } 66 | 67 | variable "ca_tls_certs_allowed_uses" { 68 | description = "List of keywords from RFC5280 describing a use that is permitted for the CA certificate. For more info and the list of keywords, see https://www.terraform.io/docs/providers/tls/r/self_signed_cert.html#allowed_uses." 69 | type = list(string) 70 | 71 | default = [ 72 | "cert_signing", 73 | "key_encipherment", 74 | "digital_signature", 75 | "server_auth", 76 | "client_auth", 77 | ] 78 | } 79 | 80 | variable "signed_tls_certs_allowed_uses" { 81 | description = "List of keywords from RFC5280 describing a use that is permitted for the issued certificate. For more info and the list of keywords, see https://www.terraform.io/docs/providers/tls/r/self_signed_cert.html#allowed_uses." 82 | type = list(string) 83 | 84 | default = [ 85 | "key_encipherment", 86 | "digital_signature", 87 | "server_auth", 88 | ] 89 | } 90 | 91 | variable "signed_tls_certs_dns_names" { 92 | description = "List of DNS names for which the certificate will be valid (e.g. tiller, foo.example.com)." 93 | type = list(string) 94 | default = [] 95 | } 96 | 97 | variable "signed_tls_certs_ip_addresses" { 98 | description = "List of IP addresses for which the certificate will be valid (e.g. 127.0.0.1)." 99 | type = list(string) 100 | default = ["127.0.0.1"] 101 | } 102 | 103 | variable "validity_period_hours" { 104 | description = "The number of hours after initial issuing that the certificate will become invalid." 105 | type = number 106 | 107 | # 10 years 108 | default = 87660 109 | } 110 | 111 | # Kubernetes Secret information 112 | 113 | variable "ca_tls_certificate_key_pair_secret_filename_base" { 114 | description = "Basename to use for the TLS certificate files stored in the Secret." 115 | type = string 116 | default = "ca" 117 | } 118 | 119 | variable "ca_tls_certificate_key_pair_secret_labels" { 120 | description = "Labels to apply to the Secret resource that stores the CA certificate key pairs." 121 | type = map(string) 122 | default = {} 123 | } 124 | 125 | variable "ca_tls_certificate_key_pair_secret_annotations" { 126 | description = "Annotations to apply to the Secret resource that stores the CA certificate key pairs." 127 | type = map(string) 128 | default = {} 129 | } 130 | 131 | variable "signed_tls_certificate_key_pair_secret_filename_base" { 132 | description = "Basename to use for the signed TLS certificate files stored in the Secret." 133 | type = string 134 | default = "tls" 135 | } 136 | 137 | variable "signed_tls_certificate_key_pair_secret_labels" { 138 | description = "Labels to apply to the Secret resource that stores the signed TLS certificate key pairs." 139 | type = map(string) 140 | default = {} 141 | } 142 | 143 | variable "signed_tls_certificate_key_pair_secret_annotations" { 144 | description = "Annotations to apply to the Secret resource that stores the signed TLS certificate key pairs." 145 | type = map(string) 146 | default = {} 147 | } 148 | 149 | variable "create_resources" { 150 | description = "Set to false to have this module create no resources. This weird parameter exists solely because Terraform does not support conditional modules. Therefore, this is a hack to allow you to conditionally decide if the TLS certs should be created or not." 151 | type = bool 152 | default = true 153 | } 154 | 155 | # --------------------------------------------------------------------------------------------------------------------- 156 | # MODULE DEPENDENCIES 157 | # Workaround Terraform limitation where there is no module depends_on. 158 | # See https://github.com/hashicorp/terraform/issues/1178 for more details. 159 | # This can be used to make sure the module resources are created after other bootstrapping resources have been created. 160 | # For example, in GKE, the default permissions are such that you do not have enough authorization to be able to create 161 | # additional Roles in the system. Therefore, you need to first create a ClusterRoleBinding to promote your account 162 | # before you can apply this module. In this use case, you can pass in the ClusterRoleBinding as a dependency into this 163 | # module: 164 | # dependencies = ["${kubernetes_cluster_role_binding.user.metadata.0.name}"] 165 | # --------------------------------------------------------------------------------------------------------------------- 166 | 167 | variable "dependencies" { 168 | description = "Create a dependency between the resources in this module to the interpolated values in this list (and thus the source resources). In other words, the resources in this module will now depend on the resources backing the values in this list such that those resources need to be created before the resources in this module, and the resources in this module need to be destroyed before the resources in the list." 169 | type = list(string) 170 | default = [] 171 | } 172 | -------------------------------------------------------------------------------- /modules/k8s-tiller/outputs.tf: -------------------------------------------------------------------------------- 1 | output "deployment_name" { 2 | description = "The name of the Deployment resource that manages the Tiller Pods." 3 | value = kubernetes_deployment.tiller.metadata[0].name 4 | } 5 | 6 | output "service_name" { 7 | description = "The name of the Service resource that fronts the Tiller Pods." 8 | value = kubernetes_service.tiller.metadata[0].name 9 | } 10 | 11 | output "tiller_ca_tls_certificate_key_pair_secret_namespace" { 12 | description = "The Namespace where the Tiller TLS CA certs are stored. Set only if var.tiller_tls_gen_method is not \"none\"" 13 | 14 | value = var.tiller_tls_gen_method == "provider" ? module.tiller_tls_certs.ca_tls_certificate_key_pair_secret_namespace : var.tiller_tls_gen_method == "kubergrunt" ? var.tiller_tls_ca_cert_secret_namespace : "" 15 | 16 | depends_on = [null_resource.tiller_tls_ca_certs] 17 | } 18 | 19 | output "tiller_ca_tls_certificate_key_pair_secret_name" { 20 | description = "The name of the Secret resource where the Tiller TLS CA certs are stored. Set only if var.tiller_tls_gen_method is not \"none\"" 21 | 22 | value = var.tiller_tls_gen_method == "provider" ? module.tiller_tls_certs.ca_tls_certificate_key_pair_secret_name : var.tiller_tls_gen_method == "kubergrunt" ? local.tiller_tls_ca_certs_secret_name : "" 23 | 24 | depends_on = [null_resource.tiller_tls_ca_certs] 25 | } 26 | 27 | output "tiller_tls_certificate_key_pair_secret_namespace" { 28 | description = "The Namespace where the Tiller TLS certs are stored. Set only if var.tiller_tls_gen_method is not \"none\"" 29 | 30 | value = var.tiller_tls_gen_method == "provider" ? module.tiller_tls_certs.signed_tls_certificate_key_pair_secret_namespace : var.tiller_tls_gen_method == "kubergrunt" ? var.namespace : "" 31 | 32 | depends_on = [null_resource.tiller_tls_certs] 33 | } 34 | 35 | output "tiller_tls_certificate_key_pair_secret_name" { 36 | description = "The name of the Secret resource where the Tiller TLS certs are stored. Set only if var.tiller_tls_gen_method is not \"none\"" 37 | 38 | value = var.tiller_tls_gen_method == "provider" ? module.tiller_tls_certs.signed_tls_certificate_key_pair_secret_name : var.tiller_tls_gen_method == "kubergrunt" ? local.tiller_tls_certs_secret_name : "" 39 | 40 | depends_on = [null_resource.tiller_tls_certs] 41 | } 42 | -------------------------------------------------------------------------------- /modules/k8s-tiller/variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # MODULE PARAMETERS 3 | # These variables are expected to be passed in by the operator when calling this terraform module. 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "namespace" { 7 | description = "The name of the Kubernetes Namespace where Tiller should be deployed into." 8 | type = string 9 | } 10 | 11 | variable "tiller_service_account_name" { 12 | description = "The name of the Kubernetes ServiceAccount that Tiller should use when authenticating to the Kubernetes API." 13 | type = string 14 | } 15 | 16 | variable "tiller_service_account_token_secret_name" { 17 | description = "The name of the Kubernetes Secret that holds the ServiceAccount token." 18 | type = string 19 | } 20 | 21 | variable "tiller_tls_gen_method" { 22 | description = "The method in which the TLS certs for Tiller are generated. Must be one of `provider`, `kubergrunt`, or `none`." 23 | type = string 24 | } 25 | 26 | # --------------------------------------------------------------------------------------------------------------------- 27 | # OPTIONAL MODULE PARAMETERS 28 | # These variables have defaults, but may be overridden by the operator. 29 | # --------------------------------------------------------------------------------------------------------------------- 30 | 31 | variable "deployment_name" { 32 | description = "The name to use for the Kubernetes Deployment resource. This should be unique to the Namespace if you plan on having multiple Tiller Deployments in a single Namespace." 33 | type = string 34 | default = "tiller-deploy" 35 | } 36 | 37 | variable "deployment_labels" { 38 | description = "Any labels to attach to the Kubernetes Deployment resource." 39 | type = map(string) 40 | default = {} 41 | } 42 | 43 | variable "deployment_annotations" { 44 | description = "Any annotations to attach to the Kubernetes Deployment resource." 45 | type = map(string) 46 | default = {} 47 | } 48 | 49 | variable "deployment_replicas" { 50 | description = "The number of Pods to use for Tiller. 1 should be sufficient for most use cases." 51 | type = number 52 | default = 1 53 | } 54 | 55 | variable "service_name" { 56 | description = "The name to use for the Kubernetes Service resource. This should be unique to the Namespace if you plan on having multiple Tiller Deployments in a single Namespace." 57 | type = string 58 | default = "tiller-deploy" 59 | } 60 | 61 | variable "service_labels" { 62 | description = "Any labels to attach to the Kubernetes Service resource." 63 | type = map(string) 64 | default = {} 65 | } 66 | 67 | variable "service_annotations" { 68 | description = "Any annotations to attach to the Kubernetes Service resource." 69 | type = map(string) 70 | default = {} 71 | } 72 | 73 | variable "tiller_image" { 74 | description = "The container image to use for the Tiller Pods." 75 | type = string 76 | default = "gcr.io/kubernetes-helm/tiller" 77 | } 78 | 79 | variable "tiller_image_version" { 80 | description = "The version of the container image to use for the Tiller Pods." 81 | type = string 82 | default = "v2.11.0" 83 | } 84 | 85 | variable "tiller_image_pull_policy" { 86 | description = "Policy for pulling the container image used for the Tiller Pods. Use `Always` if the image tag is mutable (e.g latest)" 87 | type = string 88 | default = "IfNotPresent" 89 | } 90 | 91 | variable "tiller_listen_localhost" { 92 | description = "If Enabled, Tiller will only listen on localhost within the container." 93 | type = bool 94 | default = true 95 | } 96 | 97 | variable "tiller_history_max" { 98 | description = "The maximum number of revisions saved per release. Use 0 for no limit." 99 | type = number 100 | default = 0 101 | } 102 | 103 | variable "tiller_tls_key_file_name" { 104 | description = "The file name of the private key file for the server's TLS certificate key pair, as it is available in the Kubernetes Secret for the TLS certificates." 105 | type = string 106 | default = "tls.pem" 107 | } 108 | 109 | variable "tiller_tls_cert_file_name" { 110 | description = "The file name of the public certificate file for the server's TLS certificate key pair, as it is available in the Kubernetes Secret for the TLS certificates." 111 | type = string 112 | default = "tls.crt" 113 | } 114 | 115 | variable "tiller_tls_cacert_file_name" { 116 | description = "The file name of the CA certificate file that can be used to validate client side TLS certificates, as it is available in the Kubernetes Secret for the TLS certificates." 117 | type = string 118 | default = "ca.crt" 119 | } 120 | 121 | variable "tiller_tls_secret_name" { 122 | description = "The name of the Kubernetes Secret that holds the TLS certificate key pair to use for Tiller. Needs to provide the TLS private key, public certificate, and CA certificate to use for verifying client TLS certificate key pairs. Used when var.tiller_tls_gen_method = none." 123 | type = string 124 | default = null 125 | } 126 | 127 | variable "tiller_tls_subject" { 128 | description = "The issuer information that contains the identifying information for the Tiller server. Used to generate the TLS certificate keypairs. Used when var.tiller_tls_gen_method is not none. See https://www.terraform.io/docs/providers/tls/r/cert_request.html#common_name for a list of expected keys." 129 | type = map(string) 130 | 131 | default = { 132 | common_name = "tiller" 133 | organization = "Gruntwork" 134 | } 135 | } 136 | 137 | variable "private_key_algorithm" { 138 | description = "The name of the algorithm to use for private keys. Must be one of: RSA or ECDSA." 139 | type = string 140 | default = "ECDSA" 141 | } 142 | 143 | variable "private_key_ecdsa_curve" { 144 | description = "The name of the elliptic curve to use. Should only be used if var.private_key_algorithm is ECDSA. Must be one of P224, P256, P384 or P521." 145 | type = string 146 | default = "P256" 147 | } 148 | 149 | variable "private_key_rsa_bits" { 150 | description = "The size of the generated RSA key in bits. Should only be used if var.private_key_algorithm is RSA." 151 | type = number 152 | default = 2048 153 | } 154 | 155 | variable "tiller_tls_ca_cert_secret_namespace" { 156 | description = "The Kubernetes Namespace to use to store the CA certificate key pair." 157 | type = string 158 | default = "kube-system" 159 | } 160 | 161 | # kubergrunt and kubectl Authentication params 162 | 163 | variable "kubectl_config_context_name" { 164 | description = "The config context to use when authenticating to the Kubernetes cluster. If empty, defaults to the current context specified in the kubeconfig file. Used when var.tiller_tls_gen_method is kubergrunt." 165 | type = string 166 | default = "" 167 | } 168 | 169 | variable "kubectl_config_path" { 170 | description = "The path to the config file to use for kubectl. If empty, defaults to $HOME/.kube/config. Used when var.tiller_tls_gen_method is kubergrunt." 171 | type = string 172 | default = "" 173 | } 174 | 175 | variable "kubectl_server_endpoint" { 176 | description = "The endpoint of the Kubernetes API to access when authenticating to the Kubernetes cluster. Use as an alternative to config and config context. When set, var.kubectl_ca_b64_data and var.kubectl_token must be provided. Used when var.tiller_tls_gen_method is kubergrunt." 177 | type = string 178 | default = "" 179 | } 180 | 181 | variable "kubectl_ca_b64_data" { 182 | description = "The bas64 encoded certificate authority of the Kubernetes API when authenticating to the Kubernetes cluster. Use as an alternative to config and config context. Must be set when var.kubectl_server_endpoint is not empty. Used when var.tiller_tls_gen_method is kubergrunt." 183 | type = string 184 | default = "" 185 | } 186 | 187 | variable "kubectl_token" { 188 | description = "The authentication token to use when authenticating to the Kubernetes cluster. Use as an alternative to config and config context. Must be set when var.kubectl_server_endpoint is not empty. Used when var.tiller_tls_gen_method is kubergrunt." 189 | type = string 190 | default = "" 191 | } 192 | 193 | # --------------------------------------------------------------------------------------------------------------------- 194 | # MODULE DEPENDENCIES 195 | # Workaround Terraform limitation where there is no module depends_on. 196 | # See https://github.com/hashicorp/terraform/issues/1178 for more details. 197 | # This can be used to make sure the module resources are created after other bootstrapping resources have been created. 198 | # For example, in GKE, the default permissions are such that you do not have enough authorization to be able to create 199 | # additional Roles in the system. Therefore, you need to first create a ClusterRoleBinding to promote your account 200 | # before you can apply this module. In this use case, you can pass in the ClusterRoleBinding as a dependency into this 201 | # module: 202 | # dependencies = ["${kubernetes_cluster_role_binding.user.metadata.0.name}"] 203 | # --------------------------------------------------------------------------------------------------------------------- 204 | 205 | variable "dependencies" { 206 | description = "Create a dependency between the resources in this module to the interpolated values in this list (and thus the source resources). In other words, the resources in this module will now depend on the resources backing the values in this list such that those resources need to be created before the resources in this module, and the resources in this module need to be destroyed before the resources in the list." 207 | type = list(string) 208 | default = [] 209 | } 210 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "tiller_namespace" { 2 | description = "The name of the namespace that houses Tiller." 3 | value = module.tiller_namespace.name 4 | } 5 | 6 | output "resource_namespace" { 7 | description = "The name of the namespace where Tiller will deploy resources into." 8 | value = module.resource_namespace.name 9 | } 10 | 11 | output "helm_client_tls_private_key_pem" { 12 | description = "The private key of the TLS certificate key pair to use for the helm client." 13 | sensitive = true 14 | value = module.helm_client_tls_certs.tls_certificate_key_pair_private_key_pem 15 | } 16 | 17 | output "helm_client_tls_public_cert_pem" { 18 | description = "The public certificate of the TLS certificate key pair to use for the helm client." 19 | sensitive = true 20 | value = module.helm_client_tls_certs.tls_certificate_key_pair_certificate_pem 21 | } 22 | 23 | output "helm_client_tls_ca_cert_pem" { 24 | description = "The CA certificate of the TLS certificate key pair to use for the helm client." 25 | sensitive = true 26 | value = module.helm_client_tls_certs.ca_tls_certificate_key_pair_certificate_pem 27 | } 28 | -------------------------------------------------------------------------------- /test/Gopkg.toml: -------------------------------------------------------------------------------- 1 | # Gopkg.toml example 2 | # 3 | # Refer to https://golang.github.io/dep/docs/Gopkg.toml.html 4 | # for detailed Gopkg.toml documentation. 5 | # 6 | # required = ["github.com/user/thing/cmd/thing"] 7 | # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] 8 | # 9 | # [[constraint]] 10 | # name = "github.com/user/project" 11 | # version = "1.0.0" 12 | # 13 | # [[constraint]] 14 | # name = "github.com/user/project2" 15 | # branch = "dev" 16 | # source = "github.com/myfork/project2" 17 | # 18 | # [[override]] 19 | # name = "github.com/x/y" 20 | # version = "2.4.0" 21 | # 22 | # [prune] 23 | # non-go = false 24 | # go-tests = true 25 | # unused-packages = true 26 | 27 | 28 | [[constraint]] 29 | name = "github.com/gruntwork-io/terratest" 30 | version = "0.20.1" 31 | 32 | [prune] 33 | go-tests = true 34 | unused-packages = true 35 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | This folder contains automated tests for this Module. All of the tests are written in [Go](https://golang.org/). 4 | Most of these are "integration tests" that deploy real infrastructure using Terraform and verify that infrastructure 5 | works as expected using a helper library called [Terratest](https://github.com/gruntwork-io/terratest). 6 | 7 | 8 | 9 | ## WARNING WARNING WARNING 10 | 11 | **Note #1**: Many of these tests create real resources in an AWS account and then try to clean those resources up at 12 | the end of a test run. That means these tests may cost you money to run! When adding tests, please be considerate of 13 | the resources you create and take extra care to clean everything up when you're done! 14 | 15 | **Note #2**: Never forcefully shut the tests down (e.g. by hitting `CTRL + C`) or the cleanup tasks won't run! 16 | 17 | **Note #3**: We set `-timeout 60m` on all tests not because they necessarily take that long, but because Go has a 18 | default test timeout of 10 minutes, after which it forcefully kills the tests with a `SIGQUIT`, preventing the cleanup 19 | tasks from running. Therefore, we set an overlying long timeout to make sure all tests have enough time to finish and 20 | clean up. 21 | 22 | 23 | 24 | ## Running the tests 25 | 26 | ### Prerequisites 27 | 28 | - Install the latest version of [Go](https://golang.org/). 29 | - Install [dep](https://github.com/golang/dep) for Go dependency management. 30 | - Install [Terraform](https://www.terraform.io/downloads.html). 31 | - Configure your AWS credentials using one of the [options supported by the AWS 32 | SDK](http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html). Usually, the easiest option is to 33 | set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. 34 | 35 | 36 | ### One-time setup 37 | 38 | Download Go dependencies using dep: 39 | 40 | ``` 41 | cd test 42 | dep ensure 43 | ``` 44 | 45 | ### Run all the tests 46 | 47 | #### Terratest 48 | 49 | ```bash 50 | cd test 51 | go test -v -timeout 60m 52 | ``` 53 | 54 | ### Run a specific test 55 | 56 | #### Terratest 57 | 58 | To run a specific test called `TestFoo`: 59 | 60 | ```bash 61 | cd test 62 | go test -v -timeout 60m -run TestFoo 63 | ``` 64 | -------------------------------------------------------------------------------- /test/k8s_tiller_kubergrunt_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/gruntwork-io/terratest/modules/k8s" 11 | "github.com/gruntwork-io/terratest/modules/logger" 12 | "github.com/gruntwork-io/terratest/modules/random" 13 | "github.com/gruntwork-io/terratest/modules/shell" 14 | "github.com/gruntwork-io/terratest/modules/terraform" 15 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 16 | "github.com/stretchr/testify/require" 17 | ) 18 | 19 | func TestK8STillerKubergrunt(t *testing.T) { 20 | t.Parallel() 21 | 22 | // Uncomment any of the following to skip that section during the test 23 | // os.Setenv("SKIP_create_test_copy_of_examples", "true") 24 | // os.Setenv("SKIP_create_test_service_account", "true") 25 | // os.Setenv("SKIP_create_terratest_options", "true") 26 | // os.Setenv("SKIP_terraform_apply", "true") 27 | // os.Setenv("SKIP_validate", "true") 28 | // os.Setenv("SKIP_validate_upgrade", "true") 29 | // os.Setenv("SKIP_cleanup", "true") 30 | 31 | // Create a directory path that won't conflict 32 | workingDir := filepath.Join(".", "stages", t.Name()) 33 | 34 | test_structure.RunTestStage(t, "create_test_copy_of_examples", func() { 35 | k8sTillerTerraformModulePath := test_structure.CopyTerraformFolderToTemp(t, "..", "examples/k8s-tiller-kubergrunt-minikube") 36 | logger.Logf(t, "path to test folder %s\n", k8sTillerTerraformModulePath) 37 | helmHome := filepath.Join(k8sTillerTerraformModulePath, ".helm") 38 | // make sure to create the helm home directory 39 | require.NoError(t, os.Mkdir(helmHome, 0700)) 40 | 41 | test_structure.SaveString(t, workingDir, "k8sTillerTerraformModulePath", k8sTillerTerraformModulePath) 42 | test_structure.SaveString(t, workingDir, "helmHome", helmHome) 43 | }) 44 | 45 | // Create a ServiceAccount in its own namespace that we can use to login as for testing purposes. 46 | test_structure.RunTestStage(t, "create_test_service_account", func() { 47 | uniqueID := random.UniqueId() 48 | testServiceAccountName := fmt.Sprintf("%s-test-account", strings.ToLower(uniqueID)) 49 | testServiceAccountNamespace := fmt.Sprintf("%s-test-account-namespace", strings.ToLower(uniqueID)) 50 | tmpConfigPath := k8s.CopyHomeKubeConfigToTemp(t) 51 | kubectlOptions := k8s.NewKubectlOptions("", tmpConfigPath, "") 52 | 53 | k8s.CreateNamespace(t, kubectlOptions, testServiceAccountNamespace) 54 | kubectlOptions.Namespace = testServiceAccountNamespace 55 | k8s.CreateServiceAccount(t, kubectlOptions, testServiceAccountName) 56 | token := k8s.GetServiceAccountAuthToken(t, kubectlOptions, testServiceAccountName) 57 | err := k8s.AddConfigContextForServiceAccountE(t, kubectlOptions, testServiceAccountName, testServiceAccountName, token) 58 | // We do the error check and namespace deletion manually here, because we can't defer it within the test stage. 59 | if err != nil { 60 | k8s.DeleteNamespace(t, kubectlOptions, testServiceAccountNamespace) 61 | t.Fatal(err) 62 | } 63 | 64 | test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) 65 | test_structure.SaveString(t, workingDir, "tmpKubectlConfigPath", tmpConfigPath) 66 | test_structure.SaveString(t, workingDir, "testServiceAccountName", testServiceAccountName) 67 | test_structure.SaveString(t, workingDir, "testServiceAccountNamespace", testServiceAccountNamespace) 68 | }) 69 | 70 | test_structure.RunTestStage(t, "create_terratest_options", func() { 71 | uniqueID := test_structure.LoadString(t, workingDir, "uniqueID") 72 | helmHome := test_structure.LoadString(t, workingDir, "helmHome") 73 | testServiceAccountName := test_structure.LoadString(t, workingDir, "testServiceAccountName") 74 | testServiceAccountNamespace := test_structure.LoadString(t, workingDir, "testServiceAccountNamespace") 75 | k8sTillerTerraformModulePath := test_structure.LoadString(t, workingDir, "k8sTillerTerraformModulePath") 76 | 77 | k8sTillerTerratestOptions := createExampleK8STillerKubergruntTerraformOptions(t, k8sTillerTerraformModulePath, helmHome, uniqueID, testServiceAccountName, testServiceAccountNamespace) 78 | 79 | test_structure.SaveTerraformOptions(t, workingDir, k8sTillerTerratestOptions) 80 | }) 81 | 82 | defer test_structure.RunTestStage(t, "cleanup", func() { 83 | k8sTillerTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 84 | terraform.Destroy(t, k8sTillerTerratestOptions) 85 | 86 | testServiceAccountNamespace := test_structure.LoadString(t, workingDir, "testServiceAccountNamespace") 87 | kubectlOptions := k8s.NewKubectlOptions("", "", "") 88 | k8s.DeleteNamespace(t, kubectlOptions, testServiceAccountNamespace) 89 | }) 90 | 91 | test_structure.RunTestStage(t, "terraform_apply", func() { 92 | k8sTillerTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 93 | terraform.InitAndApply(t, k8sTillerTerratestOptions) 94 | }) 95 | 96 | test_structure.RunTestStage(t, "validate", func() { 97 | helmHome := test_structure.LoadString(t, workingDir, "helmHome") 98 | k8sTillerTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 99 | resourceNamespace := k8sTillerTerratestOptions.Vars["resource_namespace"].(string) 100 | tmpConfigPath := test_structure.LoadString(t, workingDir, "tmpKubectlConfigPath") 101 | testServiceAccountName := test_structure.LoadString(t, workingDir, "testServiceAccountName") 102 | kubectlOptions := k8s.NewKubectlOptions(testServiceAccountName, tmpConfigPath, resourceNamespace) 103 | 104 | runHelm( 105 | t, 106 | kubectlOptions, 107 | helmHome, 108 | "install", 109 | "stable/kubernetes-dashboard", 110 | "--wait", 111 | ) 112 | }) 113 | 114 | test_structure.RunTestStage(t, "validate_upgrade", func() { 115 | // Make sure the upgrade command mentioned in the docs actually works 116 | helmHome := test_structure.LoadString(t, workingDir, "helmHome") 117 | tmpConfigPath := test_structure.LoadString(t, workingDir, "tmpKubectlConfigPath") 118 | kubectlOptions := k8s.NewKubectlOptions("", tmpConfigPath, "") 119 | 120 | runHelm( 121 | t, 122 | kubectlOptions, 123 | helmHome, 124 | "init", 125 | "--upgrade", 126 | "--wait", 127 | ) 128 | }) 129 | } 130 | 131 | func runHelm(t *testing.T, options *k8s.KubectlOptions, helmHome string, args ...string) { 132 | helmArgs := []string{"helm"} 133 | if options.ContextName != "" { 134 | helmArgs = append(helmArgs, "--kube-context", options.ContextName) 135 | } 136 | if options.ConfigPath != "" { 137 | helmArgs = append(helmArgs, "--kubeconfig", options.ConfigPath) 138 | } 139 | if options.Namespace != "" { 140 | helmArgs = append(helmArgs, "--namespace", options.Namespace) 141 | } 142 | helmArgs = append(helmArgs, args...) 143 | helmCmd := strings.Join(helmArgs, " ") 144 | 145 | // TODO: make this test platform independent 146 | helmEnvPath := filepath.Join(helmHome, "env") 147 | cmd := shell.Command{ 148 | Command: "sh", 149 | Args: []string{ 150 | "-c", 151 | fmt.Sprintf(". %s && %s", helmEnvPath, helmCmd), 152 | }, 153 | } 154 | shell.RunCommand(t, cmd) 155 | } 156 | -------------------------------------------------------------------------------- /test/k8s_tiller_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/gruntwork-io/terratest/modules/k8s" 9 | "github.com/gruntwork-io/terratest/modules/logger" 10 | "github.com/gruntwork-io/terratest/modules/random" 11 | "github.com/gruntwork-io/terratest/modules/shell" 12 | "github.com/gruntwork-io/terratest/modules/terraform" 13 | "github.com/gruntwork-io/terratest/modules/test-structure" 14 | "github.com/stretchr/testify/require" 15 | ) 16 | 17 | // This test makes sure the root example can run without errors on a machine without kubergrunt 18 | func TestK8STillerNoKubergrunt(t *testing.T) { 19 | t.Parallel() 20 | 21 | if kubergruntInstalled(t) { 22 | t.Skip("This test assumes kubergrunt is not installed.") 23 | } 24 | 25 | // os.Setenv("SKIP_create_test_copy_of_examples", "true") 26 | // os.Setenv("SKIP_create_test_service_account", "true") 27 | // os.Setenv("SKIP_create_terratest_options", "true") 28 | // os.Setenv("SKIP_terraform_apply", "true") 29 | // os.Setenv("SKIP_cleanup", "true") 30 | 31 | // Create a directory path that won't conflict 32 | workingDir := filepath.Join(".", "stages", t.Name()) 33 | 34 | test_structure.RunTestStage(t, "create_test_copy_of_examples", func() { 35 | uniqueID := random.UniqueId() 36 | k8sTillerTerraformModulePath := test_structure.CopyTerraformFolderToTemp(t, "..", ".") 37 | logger.Logf(t, "path to test folder %s\n", k8sTillerTerraformModulePath) 38 | helmHome := filepath.Join(k8sTillerTerraformModulePath, ".helm") 39 | // make sure to create the helm home directory 40 | require.NoError(t, os.Mkdir(helmHome, 0700)) 41 | 42 | test_structure.SaveString(t, workingDir, "k8sTillerTerraformModulePath", k8sTillerTerraformModulePath) 43 | test_structure.SaveString(t, workingDir, "helmHome", helmHome) 44 | test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) 45 | }) 46 | 47 | test_structure.RunTestStage(t, "create_terratest_options", func() { 48 | uniqueID := test_structure.LoadString(t, workingDir, "uniqueID") 49 | helmHome := test_structure.LoadString(t, workingDir, "helmHome") 50 | k8sTillerTerraformModulePath := test_structure.LoadString(t, workingDir, "k8sTillerTerraformModulePath") 51 | 52 | k8sTillerTerratestOptions := createExampleK8STillerTerraformOptions(t, k8sTillerTerraformModulePath, helmHome, uniqueID) 53 | 54 | test_structure.SaveTerraformOptions(t, workingDir, k8sTillerTerratestOptions) 55 | }) 56 | 57 | defer test_structure.RunTestStage(t, "cleanup", func() { 58 | k8sTillerTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 59 | terraform.Destroy(t, k8sTillerTerratestOptions) 60 | }) 61 | 62 | test_structure.RunTestStage(t, "terraform_apply", func() { 63 | k8sTillerTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 64 | terraform.InitAndApply(t, k8sTillerTerratestOptions) 65 | }) 66 | } 67 | 68 | func TestK8STiller(t *testing.T) { 69 | t.Parallel() 70 | 71 | // Uncomment any of the following to skip that section during the test 72 | // os.Setenv("SKIP_create_test_copy_of_examples", "true") 73 | // os.Setenv("SKIP_create_test_service_account", "true") 74 | // os.Setenv("SKIP_create_terratest_options", "true") 75 | // os.Setenv("SKIP_terraform_apply", "true") 76 | // os.Setenv("SKIP_setup_helm_client", "true") 77 | // os.Setenv("SKIP_validate", "true") 78 | // os.Setenv("SKIP_cleanup", "true") 79 | 80 | // Create a directory path that won't conflict 81 | workingDir := filepath.Join(".", "stages", t.Name()) 82 | 83 | test_structure.RunTestStage(t, "create_test_copy_of_examples", func() { 84 | uniqueID := random.UniqueId() 85 | k8sTillerTerraformModulePath := test_structure.CopyTerraformFolderToTemp(t, "..", ".") 86 | logger.Logf(t, "path to test folder %s\n", k8sTillerTerraformModulePath) 87 | helmHome := filepath.Join(k8sTillerTerraformModulePath, ".helm") 88 | // make sure to create the helm home directory 89 | require.NoError(t, os.Mkdir(helmHome, 0700)) 90 | 91 | test_structure.SaveString(t, workingDir, "k8sTillerTerraformModulePath", k8sTillerTerraformModulePath) 92 | test_structure.SaveString(t, workingDir, "helmHome", helmHome) 93 | test_structure.SaveString(t, workingDir, "uniqueID", uniqueID) 94 | }) 95 | 96 | test_structure.RunTestStage(t, "create_terratest_options", func() { 97 | uniqueID := test_structure.LoadString(t, workingDir, "uniqueID") 98 | helmHome := test_structure.LoadString(t, workingDir, "helmHome") 99 | k8sTillerTerraformModulePath := test_structure.LoadString(t, workingDir, "k8sTillerTerraformModulePath") 100 | 101 | k8sTillerTerratestOptions := createExampleK8STillerTerraformOptions(t, k8sTillerTerraformModulePath, helmHome, uniqueID) 102 | 103 | test_structure.SaveTerraformOptions(t, workingDir, k8sTillerTerratestOptions) 104 | }) 105 | 106 | defer test_structure.RunTestStage(t, "cleanup", func() { 107 | k8sTillerTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 108 | terraform.Destroy(t, k8sTillerTerratestOptions) 109 | }) 110 | 111 | test_structure.RunTestStage(t, "terraform_apply", func() { 112 | k8sTillerTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 113 | terraform.InitAndApply(t, k8sTillerTerratestOptions) 114 | }) 115 | 116 | test_structure.RunTestStage(t, "setup_helm_client", func() { 117 | helmHome := test_structure.LoadString(t, workingDir, "helmHome") 118 | kubectlOptions := k8s.NewKubectlOptions("", "", "") 119 | k8sTillerTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 120 | tillerNamespace := terraform.OutputRequired(t, k8sTillerTerratestOptions, "tiller_namespace") 121 | resourceNamespace := terraform.OutputRequired(t, k8sTillerTerratestOptions, "resource_namespace") 122 | tillerVersion := k8sTillerTerratestOptions.Vars["tiller_version"].(string) 123 | 124 | runKubergruntWait(t, kubectlOptions, tillerNamespace, tillerVersion) 125 | runKubergruntConfigure(t, kubectlOptions, helmHome, tillerNamespace, resourceNamespace) 126 | }) 127 | 128 | test_structure.RunTestStage(t, "validate", func() { 129 | helmHome := test_structure.LoadString(t, workingDir, "helmHome") 130 | k8sTillerTerratestOptions := test_structure.LoadTerraformOptions(t, workingDir) 131 | resourceNamespace := terraform.OutputRequired(t, k8sTillerTerratestOptions, "resource_namespace") 132 | kubectlOptions := k8s.NewKubectlOptions("", "", resourceNamespace) 133 | 134 | runHelm( 135 | t, 136 | kubectlOptions, 137 | helmHome, 138 | "install", 139 | "stable/kubernetes-dashboard", 140 | "--wait", 141 | ) 142 | }) 143 | } 144 | 145 | func runKubergruntConfigure( 146 | t *testing.T, 147 | options *k8s.KubectlOptions, 148 | helmHome string, 149 | tillerNamespace string, 150 | resourceNamespace string, 151 | ) { 152 | kubergruntArgs := []string{ 153 | "helm", 154 | "configure", 155 | "--helm-home", helmHome, 156 | "--tiller-namespace", tillerNamespace, 157 | "--resource-namespace", resourceNamespace, 158 | "--rbac-user", "minikube", 159 | } 160 | if options.ContextName != "" { 161 | kubergruntArgs = append(kubergruntArgs, "--kubectl-context-name", options.ContextName) 162 | } 163 | if options.ConfigPath != "" { 164 | kubergruntArgs = append(kubergruntArgs, "--kubeconfig", options.ConfigPath) 165 | } 166 | 167 | cmd := shell.Command{ 168 | Command: "kubergrunt", 169 | Args: kubergruntArgs, 170 | } 171 | shell.RunCommand(t, cmd) 172 | } 173 | 174 | func runKubergruntWait( 175 | t *testing.T, 176 | options *k8s.KubectlOptions, 177 | tillerNamespace string, 178 | tillerVersion string, 179 | ) { 180 | kubergruntArgs := []string{ 181 | "helm", 182 | "wait-for-tiller", 183 | "--tiller-namespace", tillerNamespace, 184 | "--expected-tiller-version", tillerVersion, 185 | } 186 | if options.ContextName != "" { 187 | kubergruntArgs = append(kubergruntArgs, "--kubectl-context-name", options.ContextName) 188 | } 189 | if options.ConfigPath != "" { 190 | kubergruntArgs = append(kubergruntArgs, "--kubeconfig", options.ConfigPath) 191 | } 192 | 193 | cmd := shell.Command{ 194 | Command: "kubergrunt", 195 | Args: kubergruntArgs, 196 | } 197 | shell.RunCommand(t, cmd) 198 | } 199 | 200 | func kubergruntInstalled(t *testing.T) bool { 201 | cmd := shell.Command{ 202 | Command: "kubergrunt", 203 | Args: []string{"version"}, 204 | } 205 | err := shell.RunCommandE(t, cmd) 206 | return err == nil 207 | 208 | } 209 | -------------------------------------------------------------------------------- /test/kubefixtures/curl-kubeapi-as-service-account.yml.tpl: -------------------------------------------------------------------------------- 1 | --- 2 | # A Pod that can be used to curl kuberenetes api as the service account 3 | # This works by having a sidecar container provide an kubectl API proxy that uses the service account token to talk to 4 | # the real Kubernetes API in the cluster, and access it via the main curl container. 5 | # Source: "Kubernetes in Action", section 12.1.4 6 | apiVersion: v1 7 | kind: Pod 8 | metadata: 9 | name: {{ .ServiceAccountName }}-curl 10 | namespace: {{ .Namespace }} 11 | spec: 12 | serviceAccountName: {{ .ServiceAccountName }} 13 | containers: 14 | - name: main 15 | image: tutum/curl 16 | # This is intentional. Because of the way pods work, the container needs to be up and running as a service in order 17 | # to run arbitrary commands. Therewore, we use a sleep here to create a pseudo service container that houses the 18 | # curl binary that we can then drop into and use via `kubectl exec`. 19 | command: ["sleep", "9999999"] 20 | - name: ambassador 21 | image: luksa/kubectl-proxy 22 | -------------------------------------------------------------------------------- /test/kubefixtures/namespace-check-create-pod.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "authorization.k8s.io/v1", 3 | "kind": "SelfSubjectAccessReview", 4 | "spec": { 5 | "resourceAttributes": { 6 | "namespace": "{{ .Namespace }}", 7 | "verb": "create", 8 | "group": "core", 9 | "resource": "pod" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/kubefixtures/namespace-check-list-pod.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "authorization.k8s.io/v1", 3 | "kind": "SelfSubjectAccessReview", 4 | "spec": { 5 | "resourceAttributes": { 6 | "namespace": "{{ .Namespace }}", 7 | "verb": "list", 8 | "group": "core", 9 | "resource": "pod" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /test/terratest_options.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/gruntwork-io/terratest/modules/terraform" 9 | ) 10 | 11 | func createExampleK8SNamespaceTerraformOptions(t *testing.T, uniqueID string, templatePath string) *terraform.Options { 12 | terraformVars := map[string]interface{}{"name": strings.ToLower(uniqueID)} 13 | terratestOptions := terraform.Options{ 14 | TerraformDir: templatePath, 15 | Vars: terraformVars, 16 | } 17 | return &terratestOptions 18 | } 19 | 20 | func createExampleK8STillerKubergruntTerraformOptions( 21 | t *testing.T, 22 | templatePath string, 23 | helmHome string, 24 | uniqueID string, 25 | testServiceAccountName string, 26 | testServiceAccountNamespace string, 27 | ) *terraform.Options { 28 | tillerNamespaceName := fmt.Sprintf("%s-tiller", strings.ToLower(uniqueID)) 29 | resourceNamespaceName := fmt.Sprintf("%s-resources", strings.ToLower(uniqueID)) 30 | tillerServiceAccountName := fmt.Sprintf("%s-tiller-service-account", strings.ToLower(uniqueID)) 31 | encodedTestServiceAccount := fmt.Sprintf("%s/%s", testServiceAccountNamespace, testServiceAccountName) 32 | terraformVars := map[string]interface{}{ 33 | "tiller_version": "v2.12.2", 34 | "tiller_namespace": tillerNamespaceName, 35 | "resource_namespace": resourceNamespaceName, 36 | "service_account_name": tillerServiceAccountName, 37 | "tls_subject": map[string]string{ 38 | "common_name": "tiller", 39 | "org": "Gruntwork", 40 | }, 41 | "client_tls_subject": map[string]string{ 42 | "common_name": encodedTestServiceAccount, 43 | "org": "Gruntwork", 44 | }, 45 | "helm_client_rbac_service_account": encodedTestServiceAccount, 46 | "helm_home": helmHome, 47 | } 48 | terratestOptions := terraform.Options{ 49 | TerraformDir: templatePath, 50 | Vars: terraformVars, 51 | } 52 | return &terratestOptions 53 | } 54 | 55 | func createExampleK8STillerTerraformOptions( 56 | t *testing.T, 57 | templatePath string, 58 | helmHome string, 59 | uniqueID string, 60 | ) *terraform.Options { 61 | tillerNamespaceName := fmt.Sprintf("%s-tiller", strings.ToLower(uniqueID)) 62 | resourceNamespaceName := fmt.Sprintf("%s-resources", strings.ToLower(uniqueID)) 63 | tillerServiceAccountName := fmt.Sprintf("%s-tiller-service-account", strings.ToLower(uniqueID)) 64 | terraformVars := map[string]interface{}{ 65 | "tiller_version": "v2.12.2", 66 | "tiller_namespace": tillerNamespaceName, 67 | "resource_namespace": resourceNamespaceName, 68 | "service_account_name": tillerServiceAccountName, 69 | "tls_subject": map[string]string{ 70 | "common_name": "tiller", 71 | "organization": "Gruntwork", 72 | }, 73 | "client_tls_subject": map[string]string{ 74 | "common_name": "minikube", 75 | "organization": "Gruntwork", 76 | }, 77 | "grant_helm_client_rbac_user": "minikube", 78 | } 79 | terratestOptions := terraform.Options{ 80 | TerraformDir: templatePath, 81 | Vars: terraformVars, 82 | } 83 | return &terratestOptions 84 | } 85 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # MODULE PARAMETERS 3 | # These variables are expected to be passed in by the operator 4 | # --------------------------------------------------------------------------------------------------------------------- 5 | 6 | variable "tiller_namespace" { 7 | description = "The namespace to deploy Tiller into." 8 | type = string 9 | } 10 | 11 | variable "resource_namespace" { 12 | description = "The namespace where the Helm chart resources will be deployed into by Tiller." 13 | type = string 14 | } 15 | 16 | variable "service_account_name" { 17 | description = "The name of the service account to use for Tiller." 18 | type = string 19 | } 20 | 21 | variable "tls_subject" { 22 | description = "The issuer information that contains the identifying information for the Tiller server. Used to generate the TLS certificate keypairs. See https://www.terraform.io/docs/providers/tls/r/cert_request.html#common_name for a list of expected keys." 23 | type = map(any) 24 | 25 | default = { 26 | common_name = "tiller" 27 | organization = "Gruntwork" 28 | } 29 | } 30 | 31 | variable "client_tls_subject" { 32 | description = "The issuer information that contains the identifying information for the helm client of the operator. Used to generate the TLS certificate keypairs. See https://www.terraform.io/docs/providers/tls/r/cert_request.html#common_name for a list of expected keys." 33 | type = map(any) 34 | 35 | default = { 36 | common_name = "admin" 37 | organization = "Gruntwork" 38 | } 39 | } 40 | 41 | # --------------------------------------------------------------------------------------------------------------------- 42 | # OPTIONAL MODULE PARAMETERS 43 | # These variables have reasonable defaults, but can be overridden. 44 | # --------------------------------------------------------------------------------------------------------------------- 45 | 46 | # Tiller configuration 47 | 48 | variable "tiller_version" { 49 | description = "The version of Tiller to deploy." 50 | type = string 51 | default = "v2.11.0" 52 | } 53 | 54 | # TLS algorithm configuration 55 | 56 | variable "private_key_algorithm" { 57 | description = "The name of the algorithm to use for private keys. Must be one of: RSA or ECDSA." 58 | type = string 59 | default = "ECDSA" 60 | } 61 | 62 | variable "private_key_ecdsa_curve" { 63 | description = "The name of the elliptic curve to use. Should only be used if var.private_key_algorithm is ECDSA. Must be one of P224, P256, P384 or P521." 64 | type = string 65 | default = "P256" 66 | } 67 | 68 | variable "private_key_rsa_bits" { 69 | description = "The size of the generated RSA key in bits. Should only be used if var.private_key_algorithm is RSA." 70 | type = number 71 | default = 2048 72 | } 73 | 74 | # Kubectl options 75 | 76 | variable "kubectl_config_context_name" { 77 | description = "The config context to use when authenticating to the Kubernetes cluster. If empty, defaults to the current context specified in the kubeconfig file." 78 | type = string 79 | default = "" 80 | } 81 | 82 | variable "kubectl_config_path" { 83 | description = "The path to the config file to use for kubectl. If empty, defaults to $HOME/.kube/config" 84 | type = string 85 | default = "~/.kube/config" 86 | } 87 | 88 | # Helm client config options 89 | 90 | variable "grant_helm_client_rbac_user" { 91 | description = "If set, will generate client side TLS certs for this RBAC user." 92 | type = string 93 | default = "" 94 | } 95 | 96 | variable "grant_helm_client_rbac_group" { 97 | description = "If set, will generate client side TLS certs for this RBAC group." 98 | type = string 99 | default = "" 100 | } 101 | 102 | variable "grant_helm_client_rbac_service_account" { 103 | description = "If set, will generate client side TLS certs for this ServiceAccount. The ServiceAccount should be encoded as NAMESPACE/NAME." 104 | type = string 105 | default = "" 106 | } 107 | --------------------------------------------------------------------------------