├── .circleci └── config.yml ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── pull_request_template.md ├── .gitignore ├── .gon_amd64.hcl ├── .gon_arm64.hcl ├── CODEOWNERS ├── CONTRIBUTING.md ├── LICENSE ├── NOTICE ├── README.md ├── cmd ├── .gitignore ├── common.go ├── common_test.go ├── eks.go ├── eks │ └── types.go ├── errors.go ├── k8s.go ├── main.go └── tls.go ├── commonerrors └── commonerrors.go ├── eks ├── asg.go ├── asg_test.go ├── cleanup.go ├── cleanup_test.go ├── cluster.go ├── coredns.go ├── deploy.go ├── deploy_state.go ├── deploy_state_test.go ├── drain.go ├── eks.go ├── elb.go ├── errors.go ├── fixture │ ├── aws-k8s-cni.yaml │ └── cleanup-test │ │ ├── attached-ni │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ │ └── unattached-ni │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf ├── instances.go ├── instances_test.go ├── kubectl_configure.go ├── kubectl_configure_test.go ├── oidc.go ├── oidc_test.go ├── sync.go ├── sync_test.go └── tls.go ├── eksawshelper ├── arn.go ├── arn_test.go ├── client.go ├── cluster.go ├── ecr.go ├── ecr_test.go ├── eksawshelper.go └── errors.go ├── go.mod ├── go.sum ├── jsonpatch └── jsonpatch.go ├── kubectl ├── client.go ├── command.go ├── config.go ├── config_test.go ├── errors.go ├── helpers.go ├── ingress.go ├── ingress_test.go ├── kubectl.go ├── node.go ├── node_test.go ├── options.go ├── pod.go ├── pod_test.go ├── role.go ├── role_test.go ├── rolebinding.go ├── secret.go ├── secret_test.go ├── service.go ├── service_test.go ├── test_helpers.go ├── types.go └── validate.go ├── logging └── logging.go └── tls ├── cert_common.go ├── cert_common_test.go ├── ecdsa_cert.go ├── ecdsa_cert_test.go ├── ecdsa_keys.go ├── ecdsa_keys_test.go ├── errors.go ├── gencmd.go ├── gencmd_test.go ├── options.go ├── options_test.go ├── pem.go ├── rsa_cert.go ├── rsa_cert_test.go ├── rsa_keys.go ├── rsa_keys_test.go ├── test_helpers.go ├── testfixtures ├── ca.cert ├── ca.key.pem ├── ca.pub ├── tiller.cert ├── tiller.key.pem └── tiller.pub └── tls.go /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | env: &env 2 | environment: 3 | GRUNTWORK_INSTALLER_VERSION: v0.0.38 4 | TERRATEST_LOG_PARSER_VERSION: v0.40.6 5 | MODULE_CI_VERSION: v0.59.10 6 | TERRAFORM_VERSION: 1.2.7 7 | TERRAGRUNT_VERSION: NONE 8 | PACKER_VERSION: NONE 9 | GOLANG_VERSION: 1.21.1 10 | GO111MODULE: auto 11 | KUBECTL_VERSION: v1.24.8 12 | MINIKUBE_VERSION: v1.28.0 13 | CRI_DOCKERD_VERSION: 0.3.0 14 | KUBECONFIG: /home/circleci/.kube/config 15 | defaults: &defaults 16 | machine: 17 | enabled: true 18 | image: ubuntu-2004:202111-02 19 | <<: *env 20 | install_gruntwork_utils: &install_gruntwork_utils 21 | name: install gruntwork utils 22 | command: | 23 | curl -Ls https://raw.githubusercontent.com/gruntwork-io/gruntwork-installer/main/bootstrap-gruntwork-installer.sh | bash /dev/stdin --version "${GRUNTWORK_INSTALLER_VERSION}" 24 | gruntwork-install --module-name "gruntwork-module-circleci-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 25 | gruntwork-install --module-name "kubernetes-circleci-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 26 | gruntwork-install --binary-name "terratest_log_parser" --repo "https://github.com/gruntwork-io/terratest" --tag "${TERRATEST_LOG_PARSER_VERSION}" 27 | configure-environment-for-gruntwork-module \ 28 | --mise-version NONE \ 29 | --terraform-version ${TERRAFORM_VERSION} \ 30 | --terragrunt-version ${TERRAGRUNT_VERSION} \ 31 | --packer-version ${PACKER_VERSION} \ 32 | --go-version ${GOLANG_VERSION} \ 33 | --kubectl-version NONE # We install kubectl in the minikube step 34 | orbs: 35 | go: circleci/go@1.7.3 36 | version: 2.1 37 | jobs: 38 | kubergrunt_tests: 39 | <<: *defaults 40 | steps: 41 | - checkout 42 | - run: 43 | <<: *install_gruntwork_utils 44 | - run: 45 | command: | 46 | cd /home/circleci 47 | sudo apt-get update 48 | sudo DEBIAN_FRONTEND=noninteractive apt-get install -y conntrack wget 49 | setup-minikube --minikube-version "${MINIKUBE_VERSION}" --k8s-version "${KUBECTL_VERSION}" --cri-dockerd-version "${CRI_DOCKERD_VERSION}" 50 | - run: 51 | name: run kubergrunt tests 52 | command: | 53 | mkdir -p /tmp/logs 54 | run-go-tests --path . --timeout 10m | tee /tmp/logs/all.log 55 | no_output_timeout: 900s 56 | - run: 57 | command: terratest_log_parser --testlog /tmp/logs/all.log --outputdir /tmp/logs 58 | when: always 59 | - store_artifacts: 60 | path: /tmp/logs 61 | - store_test_results: 62 | path: /tmp/logs 63 | build: 64 | resource_class: large 65 | <<: *defaults 66 | steps: 67 | - checkout 68 | - run: 69 | <<: *install_gruntwork_utils 70 | - run: build-go-binaries --app-name kubergrunt --src-path ./cmd --dest-path ./bin --ld-flags "-X main.VERSION=$CIRCLE_TAG -extldflags '-static'" 71 | - persist_to_workspace: 72 | root: . 73 | paths: bin 74 | deploy: 75 | <<: *env 76 | macos: 77 | xcode: 14.2.0 78 | resource_class: macos.m1.medium.gen1 79 | steps: 80 | - checkout 81 | - attach_workspace: 82 | at: . 83 | - go/install: 84 | version: "1.20.5" 85 | - run: 86 | name: Install sign-binary-helpers 87 | command: | 88 | curl -Ls https://raw.githubusercontent.com/gruntwork-io/gruntwork-installer/master/bootstrap-gruntwork-installer.sh | bash /dev/stdin --version "${GRUNTWORK_INSTALLER_VERSION}" 89 | gruntwork-install --module-name "gruntwork-module-circleci-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 90 | gruntwork-install --module-name "sign-binary-helpers" --repo "https://github.com/gruntwork-io/terraform-aws-ci" --tag "${MODULE_CI_VERSION}" 91 | - run: 92 | name: Compile and sign the binaries 93 | command: | 94 | export AC_PASSWORD=${MACOS_AC_PASSWORD} 95 | export AC_PROVIDER=${MACOS_AC_PROVIDER} 96 | 97 | sign-binary --install-macos-sign-dependencies --os mac .gon_amd64.hcl 98 | sign-binary --os mac .gon_arm64.hcl 99 | echo "Done signing the binary" 100 | 101 | # Replace the files in bin. These are the same file names generated from .gon_amd64.hcl and .gon_arm64.hcl 102 | unzip kubergrunt_darwin_amd64.zip 103 | mv kubergrunt_darwin_amd64 bin/ 104 | 105 | unzip kubergrunt_darwin_arm64.zip 106 | mv kubergrunt_darwin_arm64 bin/ 107 | - run: 108 | name: Run SHA256SUM 109 | command: | 110 | brew install coreutils 111 | cd bin && sha256sum * > SHA256SUMS 112 | - run: upload-github-release-assets bin/* 113 | workflows: 114 | version: 2 115 | test-and-deploy: 116 | jobs: 117 | - kubergrunt_tests: 118 | filters: 119 | tags: 120 | only: /^v.*/ 121 | context: 122 | - AWS__PHXDEVOPS__circle-ci-test 123 | - GITHUB__PAT__gruntwork-ci 124 | - build: 125 | filters: 126 | tags: 127 | only: /^v.*/ 128 | branches: 129 | ignore: /.*/ 130 | context: 131 | - AWS__PHXDEVOPS__circle-ci-test 132 | - GITHUB__PAT__gruntwork-ci 133 | - deploy: 134 | requires: 135 | - build 136 | filters: 137 | tags: 138 | only: /^v.*/ 139 | branches: 140 | ignore: /.*/ 141 | context: 142 | - AWS__PHXDEVOPS__circle-ci-test 143 | - GITHUB__PAT__gruntwork-ci 144 | - APPLE__OSX__code-signing 145 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: gruntwork-io 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a bug report to help us improve. 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | 14 | 15 | **Describe the bug** 16 | A clear and concise description of what the bug is. 17 | 18 | **To Reproduce** 19 | Steps to reproduce the behavior including the relevant Terraform/Terragrunt/Packer version number and any code snippets and module inputs you used. 20 | 21 | ```hcl 22 | // paste code snippets here 23 | ``` 24 | 25 | **Expected behavior** 26 | A clear and concise description of what you expected to happen. 27 | 28 | **Nice to have** 29 | - [ ] Terminal output 30 | - [ ] Screenshots 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Submit a feature request for this repo. 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | 14 | 15 | **Describe the solution you'd like** 16 | A clear and concise description of what you want to happen. 17 | 18 | **Describe alternatives you've considered** 19 | A clear and concise description of any alternative solutions or features you've considered. 20 | 21 | **Additional context** 22 | Add any other context or screenshots about the feature request here. 23 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | ## Description 8 | 9 | Fixes #000. 10 | 11 | ### Documentation 12 | 13 | 21 | 22 | 23 | ## TODOs 24 | Read the [Gruntwork contribution guidelines](https://gruntwork.notion.site/Gruntwork-Coding-Methodology-02fdcd6e4b004e818553684760bf691e). 25 | 26 | 27 | 28 | Please ensure all of these TODOs are completed before asking for a review. 29 | 30 | - [ ] Ensure the branch is named correctly with the issue number. e.g: `feature/new-vpc-endpoints-955` or `bug/missing-count-param-434`. 31 | - [ ] Update the docs. 32 | - [ ] Keep the changes backward compatible where possible. 33 | - [ ] Run the pre-commit checks successfully. 34 | - [ ] Run the relevant tests successfully. 35 | - [ ] Ensure any 3rd party code adheres with our [license policy](https://www.notion.so/gruntwork/Gruntwork-licenses-and-open-source-usage-policy-f7dece1f780341c7b69c1763f22b1378) or delete this line if its not applicable. 36 | 37 | ## Release Notes (draft) 38 | 39 | 45 | 46 | 47 | Added / Removed / Updated [X]. 48 | 49 | ### Migration Guide 50 | 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Terraform files 2 | .terraform 3 | terraform.tfstate 4 | terraform.tfvars 5 | *.tfstate* 6 | 7 | # OS X files 8 | .history 9 | .DS_Store 10 | 11 | # lambda zip files 12 | lambda.zip 13 | 14 | # IntelliJ files 15 | .idea_modules 16 | *.iml 17 | *.iws 18 | *.ipr 19 | .idea/ 20 | build/ 21 | */build/ 22 | out/ 23 | 24 | # VS Code 25 | .vscode 26 | 27 | # Go best practices dictate that libraries should not include the vendor directory 28 | vendor 29 | 30 | # Python stuff 31 | dist 32 | aws_auth_configmap_generator.* 33 | .python-version 34 | .tox 35 | __pycache__ 36 | *.pyc 37 | 38 | # Folder used to store temporary test data by Terratest 39 | .test-data 40 | 41 | # Generic temporary files 42 | /tmp 43 | 44 | # Go binary directory 45 | bin 46 | 47 | # goenv 48 | .go-version 49 | -------------------------------------------------------------------------------- /.gon_amd64.hcl: -------------------------------------------------------------------------------- 1 | # See https://github.com/gruntwork-io/terraform-aws-ci/blob/main/modules/sign-binary-helpers/ 2 | # for further instructions on how to sign the binary + submitting for notarization. 3 | 4 | source = ["./bin/kubergrunt_darwin_amd64"] 5 | 6 | bundle_id = "io.gruntwork.app.terragrunt" 7 | 8 | apple_id { 9 | username = "machine.apple@gruntwork.io" 10 | } 11 | 12 | sign { 13 | application_identity = "Developer ID Application: Gruntwork, Inc." 14 | } 15 | 16 | zip { 17 | output_path = "kubergrunt_darwin_amd64.zip" 18 | } 19 | -------------------------------------------------------------------------------- /.gon_arm64.hcl: -------------------------------------------------------------------------------- 1 | # See https://github.com/gruntwork-io/terraform-aws-ci/blob/main/modules/sign-binary-helpers/ 2 | # for further instructions on how to sign the binary + submitting for notarization. 3 | 4 | source = ["./bin/kubergrunt_darwin_arm64"] 5 | 6 | bundle_id = "io.gruntwork.app.terragrunt" 7 | 8 | apple_id { 9 | username = "machine.apple@gruntwork.io" 10 | } 11 | 12 | sign { 13 | application_identity = "Developer ID Application: Gruntwork, Inc." 14 | } 15 | 16 | zip { 17 | output_path = "kubergrunt_darwin_arm64.zip" 18 | } 19 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @mateimicu @ryehowell 2 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution Guidelines 2 | 3 | Contributions to this project are very welcome! We follow a fairly standard [pull request process]( 4 | https://help.github.com/articles/about-pull-requests/) for contributions, subject to the following guidelines: 5 | 6 | 1. [File a GitHub issue](#file-a-github-issue) 7 | 1. [Update the documentation](#update-the-documentation) 8 | 1. [Update the tests](#update-the-tests) 9 | 1. [Update the code](#update-the-code) 10 | 1. [Create a pull request](#create-a-pull-request) 11 | 1. [Merge and release](#merge-and-release) 12 | 13 | ## File a GitHub issue 14 | 15 | Before starting any work, we recommend filing a GitHub issue in this repo. This is your chance to ask questions and 16 | get feedback from the maintainers and the community before you sink a lot of time into writing (possibly the wrong) 17 | code. If there is anything you're unsure about, just ask! 18 | 19 | ## Update the documentation 20 | 21 | We recommend updating the documentation *before* updating any code (see [Readme Driven 22 | Development](http://tom.preston-werner.com/2010/08/23/readme-driven-development.html)). This ensures the documentation 23 | stays up to date and allows you to think through the problem at a high level before you get lost in the weeds of 24 | coding. 25 | 26 | ## Update the tests 27 | 28 | We also recommend updating the automated tests *before* updating any code (see [Test Driven 29 | Development](https://en.wikipedia.org/wiki/Test-driven_development)). That means you add or update a test case, 30 | verify that it's failing with a clear error message, and *then* make the code changes to get that test to pass. This 31 | ensures the tests stay up to date and verify all the functionality in this project, including whatever new 32 | functionality you're adding in your contribution. 33 | 34 | ## Update the code 35 | 36 | At this point, make your code changes and use your new test case to verify that everything is working. As you work, 37 | please make every effort to avoid unnecessary backwards incompatible changes. If a backwards incompatible change cannot 38 | be avoided, please make sure to call that out when you submit a pull request, explaining why the change is absolutely 39 | necessary. 40 | 41 | ## Create a pull request 42 | 43 | [Create a pull request](https://help.github.com/articles/creating-a-pull-request/) with your changes. Please make sure 44 | to include the following: 45 | 46 | 1. A description of the change, including a link to your GitHub issue. 47 | 1. The output of your automated test run, preferably in a [GitHub Gist](https://gist.github.com/). We cannot run 48 | automated tests for pull requests automatically due to [security 49 | concerns](https://circleci.com/docs/fork-pr-builds/#security-implications), so we need you to manually provide this 50 | test output so we can verify that everything is working. 51 | 1. Any notes on backwards incompatibility or downtime. 52 | 53 | ## Merge and release 54 | 55 | The maintainers for this repo will review your code and provide feedback. If everything looks good, they will merge the 56 | code and release a new version, which you'll be able to find in the [releases page](../../releases). 57 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | kubergrunt 2 | Copyright 2019 Gruntwork, Inc. 3 | 4 | This product includes software developed at Gruntwork (https://www.gruntwork.io/). 5 | -------------------------------------------------------------------------------- /cmd/.gitignore: -------------------------------------------------------------------------------- 1 | kubergrunt 2 | -------------------------------------------------------------------------------- /cmd/common_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestParseTLSSubjectInfoJsonOrgOrgUnit(t *testing.T) { 10 | t.Parallel() 11 | subjectInfo, err := parseOrCreateTLSSubjectInfo(`{"org": "Gruntwork", "org_unit": "Eng"}`) 12 | assert.NoError(t, err) 13 | assert.Equal(t, subjectInfo.Org, "Gruntwork") 14 | assert.Equal(t, subjectInfo.OrgUnit, "Eng") 15 | } 16 | 17 | func TestParseTLSSubjectInfoJsonOrganizationOrganizationalUnit(t *testing.T) { 18 | t.Parallel() 19 | subjectInfo, err := parseOrCreateTLSSubjectInfo(`{"organization": "Gruntwork", "organizational_unit": "Eng"}`) 20 | assert.NoError(t, err) 21 | assert.Equal(t, subjectInfo.Org, "Gruntwork") 22 | assert.Equal(t, subjectInfo.OrgUnit, "Eng") 23 | } 24 | -------------------------------------------------------------------------------- /cmd/eks/types.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | type CorednsAnnotation string 4 | 5 | const ( 6 | Fargate CorednsAnnotation = "fargate" 7 | EC2 CorednsAnnotation = "ec2" 8 | ) 9 | -------------------------------------------------------------------------------- /cmd/errors.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | // MutualExclusiveFlagError is returned when there is a violation of a mutually exclusive flag set. 6 | type MutuallyExclusiveFlagError struct { 7 | Message string 8 | } 9 | 10 | func (err MutuallyExclusiveFlagError) Error() string { 11 | return err.Message 12 | } 13 | 14 | // ExactlyOneASGErr is returned if a user does not provide exactly one ASG. 15 | type ExactlyOneASGErr struct { 16 | flagName string 17 | } 18 | 19 | func (err ExactlyOneASGErr) Error() string { 20 | return fmt.Sprintf("You must provide exactly one ASG using %s to this command.", err.flagName) 21 | } 22 | -------------------------------------------------------------------------------- /cmd/k8s.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/gruntwork-io/go-commons/entrypoint" 7 | "github.com/urfave/cli" 8 | 9 | "github.com/gruntwork-io/kubergrunt/kubectl" 10 | ) 11 | 12 | var ( 13 | ingressNameFlag = cli.StringFlag{ 14 | Name: "ingress-name", 15 | Usage: "(Required) The name of the Ingress resource to wait for.", 16 | } 17 | namespaceFlag = cli.StringFlag{ 18 | Name: "namespace", 19 | Usage: "(Required) The namespace where the Ingress resource to wait for is deployed to.", 20 | } 21 | 22 | maxRetriesFlag = cli.IntFlag{ 23 | Name: "max-retries", 24 | Value: 60, 25 | Usage: "The maximum number of times to retry checks.", 26 | } 27 | sleepBetweenRetriesFlag = cli.DurationFlag{ 28 | Name: "sleep-between-retries", 29 | Value: 5 * time.Second, 30 | Usage: "The amount of time to sleep inbetween each check attempt. Accepted as a duration (5s, 10m, 1h).", 31 | } 32 | ) 33 | 34 | func SetupK8SCommand() cli.Command { 35 | const helpText = "Helper scripts for managing Kubernetes resources directly." 36 | return cli.Command{ 37 | Name: "k8s", 38 | Usage: helpText, 39 | Description: helpText, 40 | Subcommands: cli.Commands{ 41 | cli.Command{ 42 | Name: "wait-for-ingress", 43 | Usage: "Wait for the Ingress endpoint to be provisioned.", 44 | Description: `Waits for the Ingress endpoint to be provisioned. This will monitor the Ingress resource, continuously checking until the endpoint is allocated to the Ingress resource or times out. By default, this will try for 5 minutes (max retries 60 and time betweeen sleep of 5 seconds). 45 | 46 | You can configure the timeout settings using the --max-retries and --sleep-between-retries CLI args. This will check for --max-retries times, sleeping for --sleep-between-retries inbetween tries.`, 47 | Action: waitForIngressEndpoint, 48 | Flags: []cli.Flag{ 49 | ingressNameFlag, 50 | namespaceFlag, 51 | 52 | maxRetriesFlag, 53 | sleepBetweenRetriesFlag, 54 | 55 | // Kubernetes auth flags 56 | genericKubectlContextNameFlag, 57 | genericKubeconfigFlag, 58 | genericKubectlServerFlag, 59 | genericKubectlCAFlag, 60 | genericKubectlTokenFlag, 61 | genericKubectlEKSClusterArnFlag, 62 | }, 63 | }, 64 | cli.Command{ 65 | Name: "kubectl", 66 | Usage: "Thin wrapper around kubectl to rely on kubergrunt for temporarily authenticating to the cluster.", 67 | Description: `This command will call out to kubectl with a temporary file that acts as the kubeconfig, set up with the parameters --kubectl-server-endpoint, --kubectl-certificate-authority, --kubectl-token. Unlike using kubectl directly, this command allows you to pass in the base64 encoded certificate authority data directly as opposed to as a file. 68 | 69 | To forward args to kubectl, pass all the args you wish to forward after a "--". For example, the following command runs "kubectl get pods -n kube-system": 70 | 71 | kubergrunt k8s kubectl \ 72 | --kubectl-server-endpoint $SERVER_ENDPOINT \ 73 | --kubectl-certificate-authority $SERVER_CA \ 74 | --kubectl-token $TOKEN \ 75 | -- get pods -n kube-system`, 76 | Action: kubectlWrapper, 77 | Flags: []cli.Flag{ 78 | // Kubernetes auth flags 79 | genericKubectlContextNameFlag, 80 | genericKubeconfigFlag, 81 | genericKubectlServerFlag, 82 | genericKubectlCAFlag, 83 | genericKubectlTokenFlag, 84 | genericKubectlEKSClusterArnFlag, 85 | }, 86 | }, 87 | }, 88 | } 89 | } 90 | 91 | // waitForIngressEndpoint is the action function for k8s wait-for-ingress command. 92 | func waitForIngressEndpoint(cliContext *cli.Context) error { 93 | // Extract Kubernetes auth information 94 | kubectlOptions, err := parseKubectlOptions(cliContext) 95 | if err != nil { 96 | return err 97 | } 98 | 99 | // Retrieve required arguments 100 | ingressName, err := entrypoint.StringFlagRequiredE(cliContext, ingressNameFlag.Name) 101 | if err != nil { 102 | return err 103 | } 104 | namespace, err := entrypoint.StringFlagRequiredE(cliContext, namespaceFlag.Name) 105 | if err != nil { 106 | return err 107 | } 108 | 109 | // Retrieve the timeout configuration args 110 | maxRetries := cliContext.Int(maxRetriesFlag.Name) 111 | sleepBetweenRetries := cliContext.Duration(sleepBetweenRetriesFlag.Name) 112 | 113 | // Now call waiting logic for the ingress endpoint 114 | return kubectl.WaitUntilIngressEndpointProvisioned(kubectlOptions, namespace, ingressName, maxRetries, sleepBetweenRetries) 115 | } 116 | 117 | // kubectlWrapper is the action function for k8s kubectl command. 118 | func kubectlWrapper(cliContext *cli.Context) error { 119 | // Extract Kubernetes auth information 120 | kubectlOptions, err := parseKubectlOptions(cliContext) 121 | if err != nil { 122 | return err 123 | } 124 | return kubectl.RunKubectl(kubectlOptions, parseKubectlWrapperArgs(cliContext.Args())...) 125 | } 126 | 127 | func parseKubectlWrapperArgs(args cli.Args) []string { 128 | if args.Get(0) == "--" { 129 | return args[1:] 130 | } 131 | return args 132 | } 133 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/gruntwork-io/go-commons/entrypoint" 5 | "github.com/gruntwork-io/go-commons/errors" 6 | "github.com/gruntwork-io/go-commons/logging" 7 | "github.com/sirupsen/logrus" 8 | "github.com/urfave/cli" 9 | ) 10 | 11 | // This variable is set at build time using -ldflags parameters. For example, we typically set this flag in circle.yml 12 | // to the latest Git tag when building our Go apps: 13 | // 14 | // build-go-binaries --app-name my-app --dest-path bin --ld-flags "-X main.VERSION=$CIRCLE_TAG" 15 | // 16 | // For more info, see: http://stackoverflow.com/a/11355611/483528 17 | var VERSION string 18 | 19 | var ( 20 | logLevelFlag = cli.StringFlag{ 21 | Name: "loglevel", 22 | Value: logrus.InfoLevel.String(), 23 | } 24 | ) 25 | 26 | // initCli initializes the CLI app before any command is actually executed. This function will handle all the setup 27 | // code, such as setting up the logger with the appropriate log level. 28 | func initCli(cliContext *cli.Context) error { 29 | // Set logging level 30 | logLevel := cliContext.String(logLevelFlag.Name) 31 | level, err := logrus.ParseLevel(logLevel) 32 | if err != nil { 33 | return errors.WithStackTrace(err) 34 | } 35 | logging.SetGlobalLogLevel(level) 36 | return nil 37 | } 38 | 39 | // main should only setup the CLI flags and help texts. 40 | func main() { 41 | app := entrypoint.NewApp() 42 | entrypoint.HelpTextLineWidth = 120 43 | 44 | // Override the CLI FlagEnvHinter so it only returns the Usage text of the Flag and doesn't apend the envVar text. Original func https://github.com/urfave/cli/blob/3df9a3c/flag.go#L60 45 | cli.FlagEnvHinter = func(envVar, str string) string { 46 | return str 47 | } 48 | 49 | app.Name = "kubergrunt" 50 | app.Author = "Gruntwork " 51 | app.Description = "A CLI tool to help setup and manage a Kubernetes cluster." 52 | app.EnableBashCompletion = true 53 | // Set the version number from your app from the VERSION variable that is passed in at build time 54 | app.Version = VERSION 55 | 56 | app.Before = initCli 57 | 58 | app.Flags = []cli.Flag{ 59 | logLevelFlag, 60 | } 61 | app.Commands = []cli.Command{ 62 | SetupEksCommand(), 63 | SetupK8SCommand(), 64 | SetupTLSCommand(), 65 | } 66 | entrypoint.RunApp(app) 67 | } 68 | -------------------------------------------------------------------------------- /commonerrors/commonerrors.go: -------------------------------------------------------------------------------- 1 | // Package commonerrors contains error types that are common across the project. 2 | package commonerrors 3 | 4 | import "fmt" 5 | 6 | // ImpossibleErr is returned for impossible conditions that should never happen in the code. This error should only be 7 | // returned if there is no user remedy and represents a bug in the code. 8 | type ImpossibleErr string 9 | 10 | func (err ImpossibleErr) Error() string { 11 | return fmt.Sprintf( 12 | "You reached a point in kubergrunt that should not happen and is almost certainly a bug. Please open a GitHub issue on https://github.com/gruntwork-io/kubergrunt/issues with the contents of this error message. Code: %s", 13 | string(err), 14 | ) 15 | } 16 | -------------------------------------------------------------------------------- /eks/cleanup_test.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "io/ioutil" 5 | "testing" 6 | 7 | awsgo "github.com/aws/aws-sdk-go/aws" 8 | "github.com/aws/aws-sdk-go/aws/awserr" 9 | "github.com/aws/aws-sdk-go/service/ec2" 10 | "github.com/gruntwork-io/kubergrunt/eksawshelper" 11 | "github.com/gruntwork-io/terratest/modules/aws" 12 | "github.com/gruntwork-io/terratest/modules/random" 13 | "github.com/gruntwork-io/terratest/modules/terraform" 14 | test_structure "github.com/gruntwork-io/terratest/modules/test-structure" 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | const cleanupTestCasesFolder = "./fixture/cleanup-test" 19 | 20 | func TestDeleteSecurityGroupDependencies(t *testing.T) { 21 | t.Parallel() 22 | 23 | dirs, err := ioutil.ReadDir(cleanupTestCasesFolder) 24 | require.NoError(t, err) 25 | 26 | for _, dir := range dirs { 27 | dirName := dir.Name() 28 | t.Run(dirName, func(t *testing.T) { 29 | t.Parallel() 30 | testSGDependencyCleanup(t, dirName) 31 | }) 32 | } 33 | 34 | } 35 | 36 | func testSGDependencyCleanup(t *testing.T, exampleName string) { 37 | exampleFolder := test_structure.CopyTerraformFolderToTemp(t, cleanupTestCasesFolder, exampleName) 38 | awsRegion := aws.GetRandomStableRegion(t, nil, nil) 39 | opts := &terraform.Options{ 40 | TerraformDir: exampleFolder, 41 | Vars: map[string]interface{}{ 42 | "prefix": random.UniqueId(), 43 | }, 44 | EnvVars: map[string]string{ 45 | "AWS_DEFAULT_REGION": awsRegion, 46 | }, 47 | } 48 | 49 | // We use the E flavor to ignore errors, since the resources will already be destroyed in the successful case, which 50 | // may interfere with the destroy call. Note that we still want to call destroy in case the cleanup routine is not 51 | // functioning. 52 | defer terraform.DestroyE(t, opts) 53 | terraform.InitAndApply(t, opts) 54 | 55 | securityGroupId := terraform.OutputRequired(t, opts, "security_group_id") 56 | sess, err := eksawshelper.NewAuthenticatedSession(awsRegion) 57 | require.NoError(t, err) 58 | ec2Svc := ec2.New(sess) 59 | require.NoError(t, deleteDependencies(ec2Svc, securityGroupId)) 60 | 61 | networkInterfaceId := terraform.OutputRequired(t, opts, "eni_id") 62 | describeNetworkInterfacesInput := &ec2.DescribeNetworkInterfacesInput{ 63 | NetworkInterfaceIds: awsgo.StringSlice([]string{networkInterfaceId}), 64 | } 65 | _, err = ec2Svc.DescribeNetworkInterfaces(describeNetworkInterfacesInput) 66 | require.Error(t, err) 67 | 68 | // Make sure it is the not found error 69 | awsErr, isAwsErr := err.(awserr.Error) 70 | require.True(t, isAwsErr) 71 | require.Equal(t, awsErr.Code(), "InvalidNetworkInterfaceID.NotFound") 72 | 73 | } 74 | -------------------------------------------------------------------------------- /eks/cluster.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "io/ioutil" 5 | "math" 6 | "net/http" 7 | "time" 8 | 9 | "github.com/aws/aws-sdk-go/aws" 10 | "github.com/aws/aws-sdk-go/service/eks" 11 | "github.com/gruntwork-io/go-commons/errors" 12 | 13 | "github.com/gruntwork-io/kubergrunt/eksawshelper" 14 | "github.com/gruntwork-io/kubergrunt/logging" 15 | ) 16 | 17 | // VerifyCluster verifies that the cluster exists, and that the Kubernetes api server is up and accepting traffic. 18 | // If waitForCluster is true, this command will wait for each stage to reach the true state. 19 | func VerifyCluster( 20 | eksClusterArn string, 21 | waitForCluster bool, 22 | waitMaxRetries int, 23 | waitSleepBetweenRetries time.Duration, 24 | ) error { 25 | logger := logging.GetProjectLogger() 26 | logger.Infof("Checking if EKS cluster %s exists", eksClusterArn) 27 | 28 | if waitForCluster && waitMaxRetries == 0 { 29 | // Default is 5 minutes / duration 30 | waitMaxRetries = int(math.Trunc(300 / waitSleepBetweenRetries.Seconds())) 31 | } 32 | 33 | clusterInfo, err := eksawshelper.GetClusterByArn(eksClusterArn) 34 | if err == nil && !clusterIsActive(clusterInfo) { 35 | err = EKSClusterNotReady{eksClusterArn} 36 | } 37 | if err != nil { 38 | logger.Errorf("EKS cluster %s is not active yet", eksClusterArn) 39 | if !waitForCluster { 40 | logger.Errorf("Did not specify wait. Aborting...") 41 | return err 42 | } 43 | err = waitForClusterActive(eksClusterArn, waitMaxRetries, waitSleepBetweenRetries) 44 | if err != nil { 45 | return err 46 | } 47 | } 48 | 49 | logger.Infof("Verified EKS cluster %s is in active state.", eksClusterArn) 50 | 51 | logger.Infof("Checking EKS cluster %s Kubernetes API endpoint.", eksClusterArn) 52 | available := checkKubernetesApiServer(eksClusterArn) 53 | if !available && !waitForCluster { 54 | logger.Errorf("Kubernetes api server is not ready yet") 55 | logger.Errorf("Did not specify wait. Aborting...") 56 | return errors.WithStackTrace(EKSClusterNotReady{eksClusterArn}) 57 | } 58 | if !available { 59 | err = waitForKubernetesApiServer(eksClusterArn, waitMaxRetries, waitSleepBetweenRetries) 60 | if err != nil { 61 | return err 62 | } 63 | } 64 | logger.Infof("Verified EKS cluster %s Kubernetes API endpoint is available.", eksClusterArn) 65 | 66 | return nil 67 | } 68 | 69 | func clusterIsActive(clusterInfo *eks.Cluster) bool { 70 | return clusterInfo != nil && aws.StringValue(clusterInfo.Status) == "ACTIVE" 71 | } 72 | 73 | // waitForClusterActive continuously queries the AWS API until the cluster reaches the ACTIVE state. 74 | func waitForClusterActive(eksClusterArn string, maxRetries int, sleepBetweenRetries time.Duration) error { 75 | logger := logging.GetProjectLogger() 76 | logger.Infof("Waiting for cluster %s to reach active state.", eksClusterArn) 77 | for i := 0; i < maxRetries; i++ { 78 | logger.Info("Checking EKS cluster info") 79 | clusterInfo, err := eksawshelper.GetClusterByArn(eksClusterArn) 80 | // We do nothing with the error other than log, because it could mean the cluster hasn't been created yet. 81 | if err != nil { 82 | logger.Warnf("Error retrieving cluster info %s", err) 83 | } 84 | if clusterIsActive(clusterInfo) { 85 | logger.Infof("EKS cluster %s is active", eksClusterArn) 86 | return nil 87 | } 88 | logger.Warnf("EKS cluster %s is not active yet", eksClusterArn) 89 | logger.Infof("Waiting for %s...", sleepBetweenRetries) 90 | time.Sleep(sleepBetweenRetries) 91 | } 92 | return errors.WithStackTrace(EKSClusterReadyTimeoutError{eksClusterArn}) 93 | } 94 | 95 | // checkKubernetesApiServer checks if the api server is up and accepting traffic. 96 | func checkKubernetesApiServer(eksClusterArn string) bool { 97 | logger := logging.GetProjectLogger() 98 | logger.Info("Checking EKS cluster info") 99 | clusterInfo, err := eksawshelper.GetClusterByArn(eksClusterArn) 100 | if err != nil { 101 | logger.Warnf("Error retrieving cluster info %s", err) 102 | logger.Warnf("Marking api server as not ready") 103 | return false 104 | } 105 | endpoint := aws.StringValue(clusterInfo.Endpoint) 106 | if endpoint == "" { 107 | logger.Warnf("Api server endpoint not available") 108 | logger.Warnf("Marking api server as not ready") 109 | return false 110 | } 111 | 112 | certificate := aws.StringValue(clusterInfo.CertificateAuthority.Data) 113 | client, err := loadHttpClientWithCA(certificate) 114 | if err != nil { 115 | logger.Errorf("Error loading certificate for EKS cluster %s endpoint: %s", eksClusterArn, err) 116 | logger.Warnf("Marking api server as not ready") 117 | return false 118 | } 119 | resp, err := client.Head(endpoint) 120 | if err != nil { 121 | logger.Warnf("Error retrieiving info from endpoint: %s", err) 122 | logger.Warnf("Marking api server as not ready") 123 | return false 124 | } 125 | // We look for 200 or 403 response. Both indicate the API server is up. 126 | // A 403 response will be returned from EKS in most situations because we are not going through the auth workflow 127 | // here to access the API (to keep things simple), and anonymous access is disabled on the cluster (for security 128 | // reasons). 129 | if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusForbidden && resp.StatusCode != http.StatusUnauthorized { 130 | bodyBytes, err := ioutil.ReadAll(resp.Body) 131 | if err != nil { 132 | logger.Errorf("Error reading response body: %s", err) 133 | return false 134 | } 135 | bodyString := string(bodyBytes) 136 | logger.Warnf( 137 | "Received unexpected status code from endpoint: status code - %d; body - %s", 138 | resp.StatusCode, 139 | bodyString, 140 | ) 141 | logger.Warnf("Marking api server as not ready") 142 | return false 143 | } 144 | 145 | return true 146 | } 147 | 148 | // waitForKubernetesApiServer continuously checks if the api server is up until timing out. 149 | func waitForKubernetesApiServer(eksClusterArn string, maxRetries int, sleepBetweenRetries time.Duration) error { 150 | logger := logging.GetProjectLogger() 151 | logger.Infof("Waiting for cluster %s Kubernetes api server to accept traffic.", eksClusterArn) 152 | for i := 0; i < maxRetries; i++ { 153 | logger.Info("Checking EKS cluster info") 154 | available := checkKubernetesApiServer(eksClusterArn) 155 | if available { 156 | logger.Infof("EKS cluster %s Kubernetes api server is active", eksClusterArn) 157 | return nil 158 | } 159 | logger.Warnf("EKS cluster %s Kubernetes api server is not active yet", eksClusterArn) 160 | logger.Infof("Waiting for %s...", sleepBetweenRetries) 161 | time.Sleep(sleepBetweenRetries) 162 | } 163 | return errors.WithStackTrace(EKSClusterReadyTimeoutError{eksClusterArn}) 164 | } 165 | -------------------------------------------------------------------------------- /eks/coredns.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/gruntwork-io/go-commons/errors" 7 | 8 | "github.com/gruntwork-io/kubergrunt/eksawshelper" 9 | "github.com/gruntwork-io/kubergrunt/kubectl" 10 | "github.com/gruntwork-io/kubergrunt/logging" 11 | ) 12 | 13 | type CorednsAnnotation string 14 | 15 | const ( 16 | Fargate CorednsAnnotation = "fargate" 17 | EC2 CorednsAnnotation = "ec2" 18 | ) 19 | 20 | // ScheduleCoredns adds or removes the compute-type annotation from the coredns deployment resource. 21 | // When adding, it is set to ec2, when removing, it enables coredns for fargate nodes. 22 | func ScheduleCoredns( 23 | kubectlOptions *kubectl.KubectlOptions, 24 | clusterName string, 25 | fargateProfileArn string, 26 | corednsAnnotation CorednsAnnotation, 27 | ) error { 28 | logger := logging.GetProjectLogger() 29 | 30 | region, err := eksawshelper.GetRegionFromArn(fargateProfileArn) 31 | if err != nil { 32 | return err 33 | } 34 | logger.Infof("Got region %s", region) 35 | 36 | eksClusterArn, err := eksawshelper.GetClusterArnByNameAndRegion(clusterName, region) 37 | if err != nil { 38 | return err 39 | } 40 | logger.Infof("Got cluster arn %s", eksClusterArn) 41 | 42 | kubectlOptions.EKSClusterArn = eksClusterArn 43 | 44 | switch corednsAnnotation { 45 | case Fargate: 46 | logger.Info("Doing fargate annotation") 47 | 48 | // CoreDNS may or may not have the compute-type annotation by default. Check for existence. 49 | out, err := kubectl.RunKubectlWithOutput( 50 | kubectlOptions, 51 | "get", "deployment", "coredns", 52 | "-n", "kube-system", 53 | "-o", `jsonpath='{.spec.template.metadata}'`, 54 | ) 55 | if err != nil { 56 | return errors.WithStackTrace(err) 57 | } 58 | 59 | // Only attempt to patch coredns deployment if the compute-type annotation is present. 60 | // Else trigger a update by executing a rollout. This is necessary for coredns to schedule. 61 | if strings.Contains(out, "compute-type") { 62 | err = kubectl.RunKubectl( 63 | kubectlOptions, 64 | "patch", "deployment", "coredns", 65 | "-n", "kube-system", 66 | "--type", "json", 67 | "--patch", `[{"op": "remove","path": "/spec/template/metadata/annotations/eks.amazonaws.com~1compute-type"}]`, 68 | ) 69 | 70 | if err != nil { 71 | return errors.WithStackTrace(err) 72 | } 73 | } else { 74 | err = kubectl.RunKubectl( 75 | kubectlOptions, 76 | "rollout", "restart", "deployment", "coredns", "-n", "kube-system", 77 | ) 78 | 79 | if err != nil { 80 | return errors.WithStackTrace(err) 81 | } 82 | } 83 | 84 | case EC2: 85 | logger.Info("Doing ec2 annotation") 86 | 87 | err = kubectl.RunKubectl( 88 | kubectlOptions, 89 | "patch", "deployment", "coredns", 90 | "-n", "kube-system", 91 | "--type", "json", 92 | "--patch", `[{"op": "add","path": "/spec/template/metadata/annotations","value": {"eks.amazonaws.com/compute-type": "ec2"}}]`, 93 | ) 94 | 95 | if err != nil { 96 | return errors.WithStackTrace(err) 97 | } 98 | } 99 | 100 | logger.Infof("Patched") 101 | return nil 102 | } 103 | -------------------------------------------------------------------------------- /eks/deploy.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "math" 5 | "time" 6 | 7 | "github.com/aws/aws-sdk-go/service/autoscaling" 8 | "github.com/aws/aws-sdk-go/service/ec2" 9 | "github.com/aws/aws-sdk-go/service/elb" 10 | "github.com/aws/aws-sdk-go/service/elbv2" 11 | "github.com/gruntwork-io/go-commons/errors" 12 | 13 | "github.com/gruntwork-io/kubergrunt/eksawshelper" 14 | "github.com/gruntwork-io/kubergrunt/kubectl" 15 | "github.com/gruntwork-io/kubergrunt/logging" 16 | ) 17 | 18 | // RollOutDeployment will perform a zero downtime roll out of the current launch configuration associated with the 19 | // provided ASG in the provided EKS cluster. This is accomplished by: 20 | // 1. Double the desired capacity of the Auto Scaling Group that powers the EKS Cluster. This will launch new EKS 21 | // workers with the new launch configuration. 22 | // 2. Wait for the new nodes to be ready for Pod scheduling in Kubernetes. 23 | // 3. Cordon the old nodes so that no new Pods will be scheduled there. 24 | // 4. Drain the pods scheduled on the old EKS workers (using the equivalent of "kubectl drain"), so that they will be 25 | // rescheduled on the new EKS workers. 26 | // 5. Wait for all the pods to migrate off of the old EKS workers. 27 | // 6. Set the desired capacity down to the original value and remove the old EKS workers from the ASG. 28 | // The process is broken up into stages/checkpoints, state is stored along the way so that command can pick up 29 | // from a stage if something bad happens. 30 | func RollOutDeployment( 31 | region string, 32 | eksAsgName string, 33 | kubectlOptions *kubectl.KubectlOptions, 34 | drainTimeout time.Duration, 35 | deleteEmptyDirData bool, 36 | maxRetries int, 37 | sleepBetweenRetries time.Duration, 38 | ignoreRecoveryFile bool, 39 | ) (returnErr error) { 40 | logger := logging.GetProjectLogger() 41 | logger.Infof("Beginning roll out for EKS cluster worker group %s in %s", eksAsgName, region) 42 | 43 | // Construct clients for AWS 44 | sess, err := eksawshelper.NewAuthenticatedSession(region) 45 | if err != nil { 46 | return errors.WithStackTrace(err) 47 | } 48 | asgSvc := autoscaling.New(sess) 49 | ec2Svc := ec2.New(sess) 50 | elbSvc := elb.New(sess) 51 | elbv2Svc := elbv2.New(sess) 52 | logger.Infof("Successfully authenticated with AWS") 53 | 54 | stateFile := defaultStateFile 55 | 56 | // Retrieve state if one exists or construct a new one 57 | state, err := initDeployState(stateFile, ignoreRecoveryFile, maxRetries, sleepBetweenRetries) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | err = state.gatherASGInfo(asgSvc, []string{eksAsgName}) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | err = state.setMaxCapacity(asgSvc) 68 | if err != nil { 69 | return err 70 | } 71 | 72 | err = state.scaleUp(asgSvc) 73 | if err != nil { 74 | return err 75 | } 76 | 77 | err = state.waitForNodes(ec2Svc, elbSvc, elbv2Svc, kubectlOptions) 78 | if err != nil { 79 | return err 80 | } 81 | 82 | err = state.cordonNodes(ec2Svc, kubectlOptions) 83 | if err != nil { 84 | return err 85 | } 86 | 87 | err = state.drainNodes(ec2Svc, kubectlOptions, drainTimeout, deleteEmptyDirData) 88 | if err != nil { 89 | return err 90 | } 91 | 92 | err = state.detachInstances(asgSvc) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | err = state.terminateInstances(ec2Svc) 98 | if err != nil { 99 | return err 100 | } 101 | 102 | err = state.restoreCapacity(asgSvc) 103 | if err != nil { 104 | return err 105 | } 106 | 107 | err = state.delete() 108 | if err != nil { 109 | logger.Warnf("Error deleting state file %s: %s", stateFile, err.Error()) 110 | logger.Warn("Remove the file manually") 111 | } 112 | logger.Infof("Successfully finished roll out for EKS cluster worker group %s in %s", eksAsgName, region) 113 | return nil 114 | } 115 | 116 | // Calculates the default max retries based on a heuristic of 5 minutes per wave. This assumes that the ASG scales up in 117 | // waves of 10 instances, so the number of retries is: 118 | // ceil(scaleUpCount / 10) * 5 minutes / sleepBetweenRetries 119 | func getDefaultMaxRetries(scaleUpCount int64, sleepBetweenRetries time.Duration) int { 120 | logger := logging.GetProjectLogger() 121 | 122 | numWaves := int(math.Ceil(float64(scaleUpCount) / float64(10))) 123 | logger.Debugf("Calculated number of waves as %d (scaleUpCount %d)", numWaves, scaleUpCount) 124 | 125 | sleepBetweenRetriesSeconds := int(math.Trunc(sleepBetweenRetries.Seconds())) 126 | defaultMaxRetries := numWaves * 600 / sleepBetweenRetriesSeconds 127 | logger.Debugf( 128 | "Calculated default max retries as %d (scaleUpCount %d, num waves %d, duration (s) %d)", 129 | defaultMaxRetries, 130 | scaleUpCount, 131 | numWaves, 132 | sleepBetweenRetriesSeconds, 133 | ) 134 | 135 | return defaultMaxRetries 136 | } 137 | -------------------------------------------------------------------------------- /eks/deploy_state_test.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "github.com/gruntwork-io/kubergrunt/logging" 5 | "github.com/stretchr/testify/assert" 6 | "io/ioutil" 7 | "net/url" 8 | "os" 9 | "testing" 10 | "time" 11 | 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | func TestParseNonExistingDeployState(t *testing.T) { 16 | t.Parallel() 17 | fileName := "./.na" 18 | state, err := initDeployState(fileName, false, 3, 30*time.Second) 19 | require.NoError(t, err) 20 | defer os.Remove(fileName) 21 | 22 | assert.Equal(t, fileName, state.Path) 23 | assert.Equal(t, 3, state.maxRetries) 24 | assert.Equal(t, 30*time.Second, state.sleepBetweenRetries) 25 | 26 | assert.False(t, state.SetMaxCapacityDone) 27 | assert.False(t, state.TerminateInstancesDone) 28 | assert.False(t, state.GatherASGInfoDone) 29 | assert.False(t, state.RestoreCapacityDone) 30 | assert.False(t, state.DrainNodesDone) 31 | assert.False(t, state.CordonNodesDone) 32 | assert.False(t, state.DetachInstancesDone) 33 | assert.False(t, state.WaitForNodesDone) 34 | assert.False(t, state.ScaleUpDone) 35 | } 36 | 37 | func TestParseExistingDeployState(t *testing.T) { 38 | t.Parallel() 39 | 40 | stateFile := generateTempStateFile(t) 41 | state, err := initDeployState(stateFile, false, 3, 30*time.Second) 42 | require.NoError(t, err) 43 | defer os.Remove(stateFile) 44 | 45 | assert.True(t, state.GatherASGInfoDone) 46 | assert.False(t, state.SetMaxCapacityDone) 47 | assert.Equal(t, 1, len(state.ASGs)) 48 | assert.Equal(t, 3, state.maxRetries) 49 | assert.Equal(t, 30*time.Second, state.sleepBetweenRetries) 50 | 51 | asg := state.ASGs[0] 52 | 53 | assert.Equal(t, "my-test-asg", asg.Name) 54 | assert.Equal(t, int64(2), asg.OriginalCapacity) 55 | assert.Equal(t, int64(4), asg.OriginalMaxCapacity) 56 | assert.Equal(t, 2, len(asg.OriginalInstances)) 57 | assert.Equal(t, 1, len(asg.NewInstances)) 58 | } 59 | 60 | func TestParseExistingDeployStateIgnoreCurrent(t *testing.T) { 61 | t.Parallel() 62 | 63 | stateFile := generateTempStateFile(t) 64 | state, err := initDeployState(stateFile, true, 3, 30*time.Second) 65 | require.NoError(t, err) 66 | defer os.Remove(stateFile) 67 | 68 | assert.False(t, state.GatherASGInfoDone) 69 | assert.Equal(t, 0, len(state.ASGs)) 70 | } 71 | 72 | func generateTempStateFile(t *testing.T) string { 73 | escapedTestName := url.PathEscape(t.Name()) 74 | tmpfile, err := ioutil.TempFile("", escapedTestName) 75 | require.NoError(t, err) 76 | defer tmpfile.Close() 77 | 78 | asg := ASG{ 79 | Name: "my-test-asg", 80 | OriginalCapacity: 2, 81 | OriginalMaxCapacity: 4, 82 | OriginalInstances: []string{ 83 | "instance-1", 84 | "instance-2", 85 | }, 86 | NewInstances: []string{ 87 | "instance-3", 88 | }, 89 | } 90 | 91 | state := &DeployState{ 92 | logger: logging.GetProjectLogger(), 93 | GatherASGInfoDone: true, 94 | Path: tmpfile.Name(), 95 | ASGs: []ASG{asg}, 96 | } 97 | 98 | state.persist() 99 | return tmpfile.Name() 100 | } 101 | -------------------------------------------------------------------------------- /eks/drain.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/aws/aws-sdk-go/service/autoscaling" 7 | "github.com/aws/aws-sdk-go/service/ec2" 8 | "github.com/gruntwork-io/go-commons/errors" 9 | "github.com/gruntwork-io/kubergrunt/eksawshelper" 10 | "github.com/gruntwork-io/kubergrunt/kubectl" 11 | "github.com/gruntwork-io/kubergrunt/logging" 12 | ) 13 | 14 | // DrainASG will cordon and drain all the instances associated with the given ASGs at the time of running. 15 | func DrainASG( 16 | region string, 17 | asgNames []string, 18 | kubectlOptions *kubectl.KubectlOptions, 19 | drainTimeout time.Duration, 20 | deleteEmptyDirData bool, 21 | ) error { 22 | logger := logging.GetProjectLogger() 23 | logger.Infof("All instances in the following worker groups will be drained:") 24 | for _, asgName := range asgNames { 25 | logger.Infof("\t- %s", asgName) 26 | } 27 | 28 | // Construct clients for AWS 29 | sess, err := eksawshelper.NewAuthenticatedSession(region) 30 | if err != nil { 31 | return errors.WithStackTrace(err) 32 | } 33 | asgSvc := autoscaling.New(sess) 34 | ec2Svc := ec2.New(sess) 35 | logger.Infof("Successfully authenticated with AWS") 36 | 37 | // Retrieve instance IDs for each ASG requested. 38 | allInstanceIDs := []string{} 39 | for _, asgName := range asgNames { 40 | asgInfo, err := getAsgInfo(asgSvc, asgName) 41 | if err != nil { 42 | return err 43 | } 44 | allInstanceIDs = append(allInstanceIDs, asgInfo.OriginalInstances...) 45 | } 46 | logger.Infof("Found %d instances across all requested ASGs.", len(allInstanceIDs)) 47 | 48 | // Cordon instances in the ASG to avoid scheduling evicted workloads on the instances being drained. 49 | logger.Info("Cordoning instances in requested ASGs.") 50 | if err := cordonNodesInAsg(ec2Svc, kubectlOptions, allInstanceIDs); err != nil { 51 | return err 52 | } 53 | logger.Info("Successfully cordoned all instances in requested ASGs.") 54 | 55 | // Now drain the pods from all the instances. 56 | logger.Info("Draining Pods scheduled on instances in requested ASGs.") 57 | if err := drainNodesInAsg(ec2Svc, kubectlOptions, allInstanceIDs, drainTimeout, deleteEmptyDirData); err != nil { 58 | return err 59 | } 60 | logger.Info("Successfully drained pods from all instances in requested ASGs.") 61 | 62 | return nil 63 | } 64 | -------------------------------------------------------------------------------- /eks/eks.go: -------------------------------------------------------------------------------- 1 | // Package eks contains utility functions for interacting with EKS 2 | package eks 3 | -------------------------------------------------------------------------------- /eks/elb.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "time" 7 | 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/aws/aws-sdk-go/service/elb" 10 | "github.com/aws/aws-sdk-go/service/elbv2" 11 | "github.com/gruntwork-io/go-commons/collections" 12 | "github.com/gruntwork-io/go-commons/errors" 13 | "github.com/gruntwork-io/go-commons/retry" 14 | "github.com/hashicorp/go-multierror" 15 | "github.com/sirupsen/logrus" 16 | 17 | "github.com/gruntwork-io/kubergrunt/commonerrors" 18 | ) 19 | 20 | // waitForAnyInstancesRegisteredToALBOrNLB implements the logic to wait for instance registration to Application and 21 | // Network Load Balancers. Refer to function docs for waitForAnyInstancesRegisteredToELB for more info. 22 | // NOTE: this assumes the ELB is using the instance target type. 23 | func waitForAnyInstancesRegisteredToALBOrNLB(logger *logrus.Entry, elbv2Svc *elbv2.ELBV2, lbName string, instanceIDsToWaitFor []string) error { 24 | targetGroups, err := getELBTargetGroups(elbv2Svc, lbName) 25 | if err != nil { 26 | return err 27 | } 28 | 29 | // Asynchronously wait for instances to be registered to each target group, collecting each goroutine error in 30 | // channels. 31 | wg := new(sync.WaitGroup) 32 | wg.Add(len(targetGroups)) 33 | errChans := make(map[string]chan error, len(targetGroups)) 34 | for _, targetGroup := range targetGroups { 35 | errChan := make(chan error, 1) 36 | errChans[aws.StringValue(targetGroup.TargetGroupName)] = errChan 37 | go asyncWaitForAnyInstancesRegisteredToTargetGroup(wg, errChan, logger, elbv2Svc, lbName, targetGroup, instanceIDsToWaitFor) 38 | } 39 | wg.Wait() 40 | 41 | // Collect all the errors from the async wait calls into a single error struct. 42 | var allErrs *multierror.Error 43 | for targetGroupName, errChan := range errChans { 44 | if err := <-errChan; err != nil { 45 | allErrs = multierror.Append(allErrs, err) 46 | logger.Errorf("Error waiting for instance to register to target group %s: %s", targetGroupName, err) 47 | } 48 | } 49 | finalErr := allErrs.ErrorOrNil() 50 | return errors.WithStackTrace(finalErr) 51 | } 52 | 53 | // asyncWaitForAnyInstancesRegisteredToTargetGroup waits for any instance to register to a single TargetGroup with 54 | // retry. This function is intended to be run in a goroutine. 55 | func asyncWaitForAnyInstancesRegisteredToTargetGroup( 56 | wg *sync.WaitGroup, 57 | errChan chan error, 58 | logger *logrus.Entry, 59 | elbv2Svc *elbv2.ELBV2, 60 | lbName string, 61 | targetGroup *elbv2.TargetGroup, 62 | instanceIDsToWaitFor []string, 63 | ) { 64 | defer wg.Done() 65 | 66 | // Retry up to 10 minutes with 15 second retry sleep 67 | waitErr := retry.DoWithRetry( 68 | logger.Logger, 69 | fmt.Sprintf( 70 | "wait for expected targets to be registered to target group %s of load balancer %s", 71 | aws.StringValue(targetGroup.TargetGroupName), 72 | lbName, 73 | ), 74 | 40, 15*time.Second, 75 | func() error { 76 | targetsResp, err := elbv2Svc.DescribeTargetHealth(&elbv2.DescribeTargetHealthInput{TargetGroupArn: targetGroup.TargetGroupArn}) 77 | if err != nil { 78 | return retry.FatalError{Underlying: err} 79 | } 80 | 81 | // Check each target to see if it is one of the instances we are waiting for, and return without error to 82 | // stop the retry loop if that is the case since condition is met. 83 | for _, targetHealth := range targetsResp.TargetHealthDescriptions { 84 | if targetHealth.Target == nil || targetHealth.Target.Id == nil { 85 | continue 86 | } 87 | instanceID := *targetHealth.Target.Id 88 | if collections.ListContainsElement(instanceIDsToWaitFor, instanceID) { 89 | return nil 90 | } 91 | } 92 | return fmt.Errorf("No expected instances registered yet") 93 | }, 94 | ) 95 | if fatalWaitErr, isFatalErr := waitErr.(retry.FatalError); isFatalErr { 96 | errChan <- fatalWaitErr.Underlying 97 | } 98 | errChan <- waitErr 99 | } 100 | 101 | // waitForAnyInstancesRegisteredToCLB implements the logic to wait for instance registration to Classic Load Balancers. 102 | // Refer to function docs for waitForAnyInstancesRegisteredToELB for more info. 103 | func waitForAnyInstancesRegisteredToCLB(logger *logrus.Entry, elbSvc *elb.ELB, lbName string, instanceIds []string) error { 104 | instances := []*elb.Instance{} 105 | for _, instanceID := range instanceIds { 106 | instances = append(instances, &elb.Instance{InstanceId: aws.String(instanceID)}) 107 | } 108 | 109 | logger.Infof("Waiting for at least one instance to be in service for elb %s", lbName) 110 | params := &elb.DescribeInstanceHealthInput{ 111 | LoadBalancerName: aws.String(lbName), 112 | Instances: instances, 113 | } 114 | err := elbSvc.WaitUntilAnyInstanceInService(params) 115 | if err != nil { 116 | logger.Errorf("error waiting for any instance to be in service for elb %s", lbName) 117 | return err 118 | } 119 | logger.Infof("At least one instance in service for elb %s", lbName) 120 | return nil 121 | } 122 | 123 | // getELBTargetGroups looks up the associated TargetGroup of the given ELB. Note that this assumes lbName refers to a v2 124 | // ELB (ALB or NLB). 125 | // NOTE: You can have multiple target groups on a given ELB if the service or ingress has multiple ports to listen on. 126 | func getELBTargetGroups(elbv2Svc *elbv2.ELBV2, lbName string) ([]*elbv2.TargetGroup, error) { 127 | resp, err := elbv2Svc.DescribeLoadBalancers(&elbv2.DescribeLoadBalancersInput{Names: aws.StringSlice([]string{lbName})}) 128 | if err != nil { 129 | return nil, errors.WithStackTrace(err) 130 | } 131 | 132 | if len(resp.LoadBalancers) == 0 { 133 | return nil, errors.WithStackTrace(CouldNotFindLoadBalancerErr{name: lbName}) 134 | } else if len(resp.LoadBalancers) > 1 { 135 | // This condition is impossible because we are querying a single LB name and names are unique within regions. 136 | return nil, errors.WithStackTrace(commonerrors.ImpossibleErr("MORE_THAN_ONE_ELB_IN_LOOKUP")) 137 | } else if resp.LoadBalancers[0] == nil { 138 | return nil, errors.WithStackTrace(commonerrors.ImpossibleErr("ELB_IS_NULL_FROM_API")) 139 | } 140 | elb := resp.LoadBalancers[0] 141 | 142 | targetGroupsResp, err := elbv2Svc.DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{LoadBalancerArn: elb.LoadBalancerArn}) 143 | if err != nil { 144 | return nil, errors.WithStackTrace(err) 145 | } 146 | 147 | if len(targetGroupsResp.TargetGroups) == 0 { 148 | // This is an impossible condition because the load balancer controllers always creates at least 1 target group. 149 | return nil, errors.WithStackTrace(commonerrors.ImpossibleErr("ELB_HAS_UNEXPECTED_NUMBER_OF_TARGET_GROUPS")) 150 | } 151 | return targetGroupsResp.TargetGroups, nil 152 | } 153 | -------------------------------------------------------------------------------- /eks/errors.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | // EKSClusterNotReady is returned when the EKS cluster is detected to not be in the ready state 9 | type EKSClusterNotReady struct { 10 | eksClusterArn string 11 | } 12 | 13 | func (err EKSClusterNotReady) Error() string { 14 | return fmt.Sprintf("EKS cluster %s is not ready", err.eksClusterArn) 15 | } 16 | 17 | // EKSClusterReadyTimeoutError is returned when we time out waiting for an EKS cluster to be ready. 18 | type EKSClusterReadyTimeoutError struct { 19 | eksClusterArn string 20 | } 21 | 22 | func (err EKSClusterReadyTimeoutError) Error() string { 23 | return fmt.Sprintf( 24 | "Timed out waiting for EKS cluster %s to reach ready state.", 25 | err.eksClusterArn, 26 | ) 27 | } 28 | 29 | // CouldNotMeetASGCapacityError represents an error related to waiting for ASG to reach desired capacity 30 | type CouldNotMeetASGCapacityError struct { 31 | asgName string 32 | message string 33 | } 34 | 35 | func (err CouldNotMeetASGCapacityError) Error() string { 36 | return fmt.Sprintf( 37 | "Could not reach desired capacity of ASG %s: %s", 38 | err.asgName, 39 | err.message, 40 | ) 41 | } 42 | 43 | func NewCouldNotMeetASGCapacityError(asgName string, message string) CouldNotMeetASGCapacityError { 44 | return CouldNotMeetASGCapacityError{asgName, message} 45 | } 46 | 47 | // MultipleTerminateInstanceErrors represents multiple errors found while terminating instances 48 | type MultipleTerminateInstanceErrors struct { 49 | errors []error 50 | } 51 | 52 | func (err MultipleTerminateInstanceErrors) Error() string { 53 | messages := []string{ 54 | fmt.Sprintf("%d errors found while terminating instances:", len(err.errors)), 55 | } 56 | 57 | for _, individualErr := range err.errors { 58 | messages = append(messages, individualErr.Error()) 59 | } 60 | return strings.Join(messages, "\n") 61 | } 62 | 63 | func (err MultipleTerminateInstanceErrors) AddError(newErr error) { 64 | err.errors = append(err.errors, newErr) 65 | } 66 | 67 | func (err MultipleTerminateInstanceErrors) IsEmpty() bool { 68 | return len(err.errors) == 0 69 | } 70 | 71 | func NewMultipleTerminateInstanceErrors() MultipleTerminateInstanceErrors { 72 | return MultipleTerminateInstanceErrors{[]error{}} 73 | } 74 | 75 | // MultipleLookupErrors represents multiple errors found while looking up a resource 76 | type MultipleLookupErrors struct { 77 | errors []error 78 | } 79 | 80 | func (err MultipleLookupErrors) Error() string { 81 | messages := []string{ 82 | fmt.Sprintf("%d errors found during lookup:", len(err.errors)), 83 | } 84 | 85 | for _, individualErr := range err.errors { 86 | messages = append(messages, individualErr.Error()) 87 | } 88 | return strings.Join(messages, "\n") 89 | } 90 | 91 | func (err MultipleLookupErrors) AddError(newErr error) { 92 | err.errors = append(err.errors, newErr) 93 | } 94 | 95 | func (err MultipleLookupErrors) IsEmpty() bool { 96 | return len(err.errors) == 0 97 | } 98 | 99 | func NewMultipleLookupErrors() MultipleLookupErrors { 100 | return MultipleLookupErrors{[]error{}} 101 | } 102 | 103 | // LookupError represents an error related to looking up data on an object. 104 | type LookupError struct { 105 | objectProperty string 106 | objectType string 107 | objectId string 108 | } 109 | 110 | func (err LookupError) Error() string { 111 | return fmt.Sprintf("Failed to look up %s for %s with id %s.", err.objectProperty, err.objectType, err.objectId) 112 | } 113 | 114 | // NewLookupError constructs a new LookupError object that can be used to return an error related to a look up error. 115 | func NewLookupError(objectType string, objectId string, objectProperty string) LookupError { 116 | return LookupError{objectProperty: objectProperty, objectType: objectType, objectId: objectId} 117 | } 118 | 119 | // NoPeerCertificatesError is returned when we couldn't find any TLS peer certificates for the provided URL. 120 | type NoPeerCertificatesError struct { 121 | URL string 122 | } 123 | 124 | func (err NoPeerCertificatesError) Error() string { 125 | return fmt.Sprintf("Could not find any peer certificates for URL %s", err.URL) 126 | } 127 | 128 | // UnsupportedEKSVersion is returned when the Kubernetes version of the EKS cluster is not supported. 129 | type UnsupportedEKSVersion struct { 130 | version string 131 | } 132 | 133 | func (err UnsupportedEKSVersion) Error() string { 134 | return fmt.Sprintf("%s is not a supported version for kubergrunt eks upgrade. Please contact support@gruntwork.io for more info.", err.version) 135 | } 136 | 137 | // CoreComponentUnexpectedConfigurationErr error is returned when the EKS core components are in an unexpected 138 | // configuration, such as a different number of containers. 139 | type CoreComponentUnexpectedConfigurationErr struct { 140 | component string 141 | reason string 142 | } 143 | 144 | func (err CoreComponentUnexpectedConfigurationErr) Error() string { 145 | return fmt.Sprintf("Core component %s is in unexpected configuration: %s", err.component, err.reason) 146 | } 147 | 148 | // NetworkInterfaceDetachedTimeoutError is returned when we time out waiting for a network interface to be detached. 149 | type NetworkInterfaceDetachedTimeoutError struct { 150 | networkInterfaceId string 151 | } 152 | 153 | func (err NetworkInterfaceDetachedTimeoutError) Error() string { 154 | return fmt.Sprintf( 155 | "Timed out waiting for network interface %s to reach detached state.", 156 | err.networkInterfaceId, 157 | ) 158 | } 159 | 160 | // NetworkInterfaceDeletedTimeoutError is returned when we time out waiting for a network interface to be deleted. 161 | type NetworkInterfaceDeletedTimeoutError struct { 162 | networkInterfaceId string 163 | } 164 | 165 | func (err NetworkInterfaceDeletedTimeoutError) Error() string { 166 | return fmt.Sprintf( 167 | "Timed out waiting for network interface %s to reach deleted state.", 168 | err.networkInterfaceId, 169 | ) 170 | } 171 | 172 | // CouldNotFindLoadBalancerErr is returned when the given ELB can not be found. 173 | type CouldNotFindLoadBalancerErr struct { 174 | name string 175 | } 176 | 177 | func (err CouldNotFindLoadBalancerErr) Error() string { 178 | return fmt.Sprintf( 179 | "Could not find ELB with name %s.", 180 | err.name, 181 | ) 182 | } 183 | -------------------------------------------------------------------------------- /eks/fixture/aws-k8s-cni.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: aws-node 6 | rules: 7 | - apiGroups: 8 | - crd.k8s.amazonaws.com 9 | resources: 10 | - "*" 11 | verbs: 12 | - "*" 13 | - apiGroups: [""] 14 | resources: 15 | - pods 16 | - nodes 17 | - namespaces 18 | verbs: ["list", "watch", "get"] 19 | - apiGroups: ["extensions"] 20 | resources: 21 | - daemonsets 22 | verbs: ["list", "watch"] 23 | 24 | --- 25 | apiVersion: v1 26 | kind: ServiceAccount 27 | metadata: 28 | name: aws-node 29 | namespace: kube-system 30 | 31 | --- 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | kind: ClusterRoleBinding 34 | metadata: 35 | name: aws-node 36 | roleRef: 37 | apiGroup: rbac.authorization.k8s.io 38 | kind: ClusterRole 39 | name: aws-node 40 | subjects: 41 | - kind: ServiceAccount 42 | name: aws-node 43 | namespace: kube-system 44 | 45 | --- 46 | kind: DaemonSet 47 | apiVersion: apps/v1 48 | metadata: 49 | name: aws-node 50 | namespace: kube-system 51 | labels: 52 | k8s-app: aws-node 53 | spec: 54 | updateStrategy: 55 | type: RollingUpdate 56 | rollingUpdate: 57 | maxUnavailable: "10%" 58 | selector: 59 | matchLabels: 60 | k8s-app: aws-node 61 | template: 62 | metadata: 63 | labels: 64 | k8s-app: aws-node 65 | spec: 66 | priorityClassName: system-node-critical 67 | affinity: 68 | nodeAffinity: 69 | requiredDuringSchedulingIgnoredDuringExecution: 70 | nodeSelectorTerms: 71 | - matchExpressions: 72 | - key: "beta.kubernetes.io/os" 73 | operator: In 74 | values: 75 | - linux 76 | - key: "beta.kubernetes.io/arch" 77 | operator: In 78 | values: 79 | - amd64 80 | - key: eks.amazonaws.com/compute-type 81 | operator: NotIn 82 | values: 83 | - fargate 84 | serviceAccountName: aws-node 85 | hostNetwork: true 86 | tolerations: 87 | - operator: Exists 88 | containers: 89 | - image: 602401143452.dkr.ecr.ap-northeast-1.amazonaws.com/amazon-k8s-cni:v1.5.7 90 | imagePullPolicy: Always 91 | ports: 92 | - containerPort: 61678 93 | name: metrics 94 | name: aws-node 95 | env: 96 | - name: AWS_VPC_K8S_CNI_LOGLEVEL 97 | value: DEBUG 98 | - name: MY_NODE_NAME 99 | valueFrom: 100 | fieldRef: 101 | fieldPath: spec.nodeName 102 | resources: 103 | requests: 104 | cpu: 10m 105 | securityContext: 106 | privileged: true 107 | volumeMounts: 108 | - mountPath: /host/opt/cni/bin 109 | name: cni-bin-dir 110 | - mountPath: /host/etc/cni/net.d 111 | name: cni-net-dir 112 | - mountPath: /host/var/log 113 | name: log-dir 114 | - mountPath: /var/run/docker.sock 115 | name: dockersock 116 | - mountPath: /var/run/dockershim.sock 117 | name: dockershim 118 | volumes: 119 | - name: cni-bin-dir 120 | hostPath: 121 | path: /opt/cni/bin 122 | - name: cni-net-dir 123 | hostPath: 124 | path: /etc/cni/net.d 125 | - name: log-dir 126 | hostPath: 127 | path: /var/log 128 | - name: dockersock 129 | hostPath: 130 | path: /var/run/docker.sock 131 | - name: dockershim 132 | hostPath: 133 | path: /var/run/dockershim.sock 134 | 135 | --- 136 | apiVersion: apiextensions.k8s.io/v1beta1 137 | kind: CustomResourceDefinition 138 | metadata: 139 | name: eniconfigs.crd.k8s.amazonaws.com 140 | spec: 141 | scope: Cluster 142 | group: crd.k8s.amazonaws.com 143 | versions: 144 | - name: v1alpha1 145 | served: true 146 | storage: true 147 | names: 148 | plural: eniconfigs 149 | singular: eniconfig 150 | kind: ENIConfig 151 | -------------------------------------------------------------------------------- /eks/fixture/cleanup-test/attached-ni/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # PIN TERRAFORM VERSION TO >= 0.12 3 | # --------------------------------------------------------------------------------------------------------------------- 4 | 5 | terraform { 6 | # This module is now only being tested with Terraform 0.13.x. However, to make upgrading easier, we are setting 7 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 8 | # forwards compatible with 0.13.x code. 9 | required_version = ">= 0.12.26" 10 | } 11 | 12 | # --------------------------------------------------------------------------------------------------------------------- 13 | # CREATE A FLOATING ENI WITH SECURITY GROUP AND ATTACH TO INSTANCE 14 | # --------------------------------------------------------------------------------------------------------------------- 15 | 16 | resource "aws_instance" "test" { 17 | ami = data.aws_ami.ubuntu.id 18 | instance_type = module.instance_types.recommended_instance_type 19 | subnet_id = module.floating_eni.subnet_id 20 | 21 | tags = { 22 | Name = "${var.prefix}-kubergrunt-test" 23 | } 24 | } 25 | 26 | resource "aws_network_interface_attachment" "test" { 27 | instance_id = aws_instance.test.id 28 | network_interface_id = module.floating_eni.eni_id 29 | device_index = 1 30 | } 31 | 32 | module "floating_eni" { 33 | source = "../unattached-ni" 34 | prefix = var.prefix 35 | } 36 | 37 | module "instance_types" { 38 | source = "git::git@github.com:gruntwork-io/terraform-aws-utilities.git//modules/instance-type?ref=v0.4.0" 39 | 40 | instance_types = ["t2.micro", "t3.micro"] 41 | } 42 | 43 | 44 | # --------------------------------------------------------------------------------------------------------------------- 45 | # LOOK UP THE LATEST UBUNTU AMI 46 | # --------------------------------------------------------------------------------------------------------------------- 47 | 48 | data "aws_ami" "ubuntu" { 49 | most_recent = true 50 | owners = ["099720109477"] # Canonical 51 | 52 | filter { 53 | name = "virtualization-type" 54 | values = ["hvm"] 55 | } 56 | 57 | filter { 58 | name = "architecture" 59 | values = ["x86_64"] 60 | } 61 | 62 | filter { 63 | name = "image-type" 64 | values = ["machine"] 65 | } 66 | 67 | filter { 68 | name = "name" 69 | values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"] 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /eks/fixture/cleanup-test/attached-ni/outputs.tf: -------------------------------------------------------------------------------- 1 | output "security_group_id" { 2 | value = module.floating_eni.security_group_id 3 | } 4 | 5 | output "eni_id" { 6 | value = module.floating_eni.eni_id 7 | } 8 | -------------------------------------------------------------------------------- /eks/fixture/cleanup-test/attached-ni/variables.tf: -------------------------------------------------------------------------------- 1 | variable "prefix" { 2 | description = "Unique prefix to apply to all resources." 3 | type = string 4 | default = "test" 5 | } 6 | -------------------------------------------------------------------------------- /eks/fixture/cleanup-test/unattached-ni/main.tf: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------------------------------------------------------------- 2 | # PIN TERRAFORM VERSION TO >= 0.12 3 | # --------------------------------------------------------------------------------------------------------------------- 4 | 5 | terraform { 6 | # This module is now only being tested with Terraform 0.13.x. However, to make upgrading easier, we are setting 7 | # 0.12.26 as the minimum version, as that version added support for required_providers with source URLs, making it 8 | # forwards compatible with 0.13.x code. 9 | required_version = ">= 0.12.26" 10 | } 11 | 12 | # --------------------------------------------------------------------------------------------------------------------- 13 | # CREATE A FLOATING ENI WITH SECURITY GROUP 14 | # --------------------------------------------------------------------------------------------------------------------- 15 | 16 | resource "aws_security_group" "allow_tls" { 17 | name = "${var.prefix}-kubergrunt-test-sg" 18 | description = "Allow TLS inbound traffic" 19 | vpc_id = data.aws_vpc.default.id 20 | 21 | ingress { 22 | description = "TLS from VPC" 23 | from_port = 443 24 | to_port = 443 25 | protocol = "tcp" 26 | cidr_blocks = [data.aws_vpc.default.cidr_block] 27 | } 28 | 29 | egress { 30 | from_port = 0 31 | to_port = 0 32 | protocol = "-1" 33 | cidr_blocks = ["0.0.0.0/0"] 34 | } 35 | 36 | tags = { 37 | Name = "${var.prefix}-kubergrunt-test-sg" 38 | } 39 | } 40 | 41 | resource "aws_network_interface" "allow_tls" { 42 | description = "Test network interface that should not be attached" 43 | subnet_id = local.first_subnet 44 | security_groups = [aws_security_group.allow_tls.id] 45 | 46 | tags = { 47 | Name = "${var.prefix}-kubergrunt-test-network-interface" 48 | } 49 | } 50 | 51 | locals { 52 | first_subnet = sort(tolist(data.aws_subnets.default.ids))[0] 53 | } 54 | 55 | 56 | # --------------------------------------------------------------------------------------------------------------------- 57 | # DATA SOURCES 58 | # --------------------------------------------------------------------------------------------------------------------- 59 | 60 | data "aws_vpc" "default" { 61 | default = true 62 | } 63 | 64 | data "aws_subnets" "default" { 65 | filter { 66 | name = "vpc-id" 67 | values = [data.aws_vpc.default.id] 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /eks/fixture/cleanup-test/unattached-ni/outputs.tf: -------------------------------------------------------------------------------- 1 | output "security_group_id" { 2 | value = aws_security_group.allow_tls.id 3 | } 4 | 5 | output "eni_id" { 6 | value = aws_network_interface.allow_tls.id 7 | } 8 | 9 | output "subnet_id" { 10 | value = local.first_subnet 11 | } 12 | -------------------------------------------------------------------------------- /eks/fixture/cleanup-test/unattached-ni/variables.tf: -------------------------------------------------------------------------------- 1 | variable "prefix" { 2 | description = "Unique prefix to apply to all resources." 3 | type = string 4 | default = "test" 5 | } 6 | -------------------------------------------------------------------------------- /eks/instances.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/aws/aws-sdk-go/aws" 7 | "github.com/aws/aws-sdk-go/service/ec2" 8 | "github.com/gruntwork-io/go-commons/collections" 9 | "github.com/gruntwork-io/go-commons/errors" 10 | 11 | "github.com/gruntwork-io/kubergrunt/logging" 12 | ) 13 | 14 | // Given a list of instance IDs, fetch the instance details from AWS. 15 | func instanceDetailsFromIds(svc *ec2.EC2, idList []string) ([]*ec2.Instance, error) { 16 | input := ec2.DescribeInstancesInput{InstanceIds: aws.StringSlice(idList)} 17 | instances := []*ec2.Instance{} 18 | // Handle pagination by repeatedly making the API call while there is a next token set. 19 | for { 20 | response, err := svc.DescribeInstances(&input) 21 | if err != nil { 22 | return nil, errors.WithStackTrace(err) 23 | } 24 | for _, reservation := range response.Reservations { 25 | instances = append(instances, reservation.Instances...) 26 | } 27 | if response.NextToken == nil { 28 | break 29 | } 30 | input.NextToken = response.NextToken 31 | } 32 | return instances, nil 33 | } 34 | 35 | // Currently EKS defaults to using the private DNS name for the node names. 36 | // TODO: The property used should be configurable for deployments where the node names are custom. 37 | func kubeNodeNamesFromInstances(instances []*ec2.Instance) []string { 38 | nodeNames := []string{} 39 | for _, inst := range instances { 40 | nodeNames = append(nodeNames, *inst.PrivateDnsName) 41 | } 42 | return nodeNames 43 | } 44 | 45 | // terminateInstances will make a call to EC2 API to terminate the instances provided in the list. 46 | func terminateInstances(ec2Svc *ec2.EC2, idList []string) error { 47 | logger := logging.GetProjectLogger() 48 | logger.Infof("Terminating %d instances, in groups of up to 1000 instances", len(idList)) 49 | 50 | // Batch the requests up to the limit of 1000 instances 51 | errList := NewMultipleTerminateInstanceErrors() 52 | for batchIdx, batchedInstanceIdList := range collections.BatchListIntoGroupsOf(idList, 1000) { 53 | instanceIds := aws.StringSlice(batchedInstanceIdList) 54 | input := &ec2.TerminateInstancesInput{ 55 | InstanceIds: instanceIds, 56 | } 57 | _, err := ec2Svc.TerminateInstances(input) 58 | if err != nil { 59 | errList.AddError(err) 60 | logger.Errorf("Encountered error terminating instances in batch %d: %s", batchIdx, err) 61 | logger.Errorf("Instance ids: %s", strings.Join(batchedInstanceIdList, ",")) 62 | continue 63 | } 64 | 65 | logger.Infof("Terminated %d instances from batch %d", len(batchedInstanceIdList), batchIdx) 66 | 67 | logger.Infof("Waiting for %d instances to shut down from batch %d", len(batchedInstanceIdList), batchIdx) 68 | err = ec2Svc.WaitUntilInstanceTerminated(&ec2.DescribeInstancesInput{InstanceIds: instanceIds}) 69 | if err != nil { 70 | errList.AddError(err) 71 | logger.Errorf("Encountered error waiting for instances to shutdown in batch %d: %s", batchIdx, err) 72 | logger.Errorf("Instance ids: %s", strings.Join(batchedInstanceIdList, ",")) 73 | continue 74 | } 75 | logger.Infof("Successfully shutdown %d instances from batch %d", len(batchedInstanceIdList), batchIdx) 76 | } 77 | if !errList.IsEmpty() { 78 | return errors.WithStackTrace(errList) 79 | } 80 | logger.Infof("Successfully shutdown all %d instances", len(idList)) 81 | return nil 82 | } 83 | -------------------------------------------------------------------------------- /eks/instances_test.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/gruntwork-io/terratest/modules/aws" 8 | "github.com/gruntwork-io/terratest/modules/random" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestTerminateInstances(t *testing.T) { 13 | t.Parallel() 14 | 15 | uniqueID := random.UniqueId() 16 | name := fmt.Sprintf("%s-%s", t.Name(), uniqueID) 17 | region := getRandomRegion(t) 18 | ec2Svc := aws.NewEc2Client(t, region) 19 | instance := createTestEC2Instance(t, region, name) 20 | terminateInstances(ec2Svc, []string{*instance.InstanceId}) 21 | instanceIds := aws.GetEc2InstanceIdsByTag(t, region, "Name", name) 22 | instances, err := instanceDetailsFromIds(ec2Svc, instanceIds) 23 | require.NoError(t, err) 24 | 25 | // We want either no instances, or the instance is in terminated state 26 | if len(instances) > 0 { 27 | instance := instances[0] 28 | require.Equal(t, *instance.State.Name, "terminated") 29 | } 30 | } 31 | 32 | func TestInstanceDetailsFromIds(t *testing.T) { 33 | t.Parallel() 34 | 35 | uniqueID := random.UniqueId() 36 | name := fmt.Sprintf("%s-%s", t.Name(), uniqueID) 37 | region := getRandomRegion(t) 38 | ec2Svc := aws.NewEc2Client(t, region) 39 | instance := createTestEC2Instance(t, region, name) 40 | defer aws.TerminateInstance(t, region, *instance.InstanceId) 41 | 42 | instances, err := instanceDetailsFromIds(ec2Svc, []string{*instance.InstanceId}) 43 | require.NoError(t, err) 44 | require.Equal(t, len(instances), 1) 45 | require.Equal(t, *instances[0].Tags[0].Value, name) 46 | } 47 | -------------------------------------------------------------------------------- /eks/kubectl_configure.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go/service/eks" 5 | "github.com/gruntwork-io/go-commons/files" 6 | "k8s.io/client-go/tools/clientcmd" 7 | 8 | "github.com/gruntwork-io/kubergrunt/kubectl" 9 | "github.com/gruntwork-io/kubergrunt/logging" 10 | ) 11 | 12 | // ConfigureKubectlForEks adds a new context to the kubeconfig located at the given path that can authenticate with the 13 | // EKS cluster referenced by the given ARN. 14 | func ConfigureKubectlForEks( 15 | eksCluster *eks.Cluster, 16 | kubectlOptions *kubectl.KubectlOptions, 17 | ) error { 18 | logger := logging.GetProjectLogger() 19 | 20 | // Load config from disk and then get actual data structure containing the parsed config information 21 | // Create a blank file if it does not exist already 22 | if !files.FileExists(kubectlOptions.ConfigPath) { 23 | if err := kubectl.CreateInitialConfig(kubectlOptions.ConfigPath); err != nil { 24 | return err 25 | } 26 | } 27 | logger.Infof("Loading kubectl config %s.", kubectlOptions.ConfigPath) 28 | kubeconfig := kubectl.LoadConfigFromPath(kubectlOptions.ConfigPath) 29 | rawConfig, err := kubeconfig.RawConfig() 30 | if err != nil { 31 | return err 32 | } 33 | logger.Infof("Successfully loaded and parsed kubectl config.") 34 | 35 | // Update the config data structure with the EKS cluster info 36 | err = kubectl.AddEksConfigContext( 37 | &rawConfig, 38 | kubectlOptions.ContextName, 39 | *eksCluster.Arn, 40 | *eksCluster.Name, 41 | *eksCluster.Endpoint, 42 | *eksCluster.CertificateAuthority.Data, 43 | ) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | // Update the current context to the newly created context 49 | logger.Infof("Setting current kubectl config context to %s.", kubectlOptions.ContextName) 50 | rawConfig.CurrentContext = kubectlOptions.ContextName 51 | logger.Info("Updated current kubectl config context.") 52 | 53 | // Finally, save the config to disk 54 | logger.Infof("Saving kubectl config updates to %s.", kubectlOptions.ConfigPath) 55 | err = clientcmd.ModifyConfig(kubeconfig.ConfigAccess(), rawConfig, false) 56 | if err != nil { 57 | return err 58 | } 59 | logger.Infof("Successfully saved kubectl config updates.") 60 | 61 | return nil 62 | } 63 | -------------------------------------------------------------------------------- /eks/kubectl_configure_test.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "encoding/base64" 5 | "io/ioutil" 6 | "net/url" 7 | "os" 8 | "testing" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/aws/aws-sdk-go/service/eks" 12 | "github.com/gruntwork-io/terratest/modules/k8s" 13 | "github.com/gruntwork-io/terratest/modules/random" 14 | "github.com/stretchr/testify/require" 15 | 16 | "github.com/gruntwork-io/kubergrunt/kubectl" 17 | ) 18 | 19 | func TestEksKubectlConfigureHonorsKubeConfigPath(t *testing.T) { 20 | t.Parallel() 21 | 22 | kubeconfigPath := generateTempConfig(t) 23 | defer os.Remove(kubeconfigPath) 24 | 25 | originalKubeconfig := k8s.LoadConfigFromPath(kubeconfigPath) 26 | originalRawConfig, err := originalKubeconfig.RawConfig() 27 | require.NoError(t, err) 28 | 29 | uniqueID := random.UniqueId() 30 | anotherUniqueID := random.UniqueId() 31 | b64CertificateAuthorityData := base64.StdEncoding.EncodeToString([]byte(anotherUniqueID)) 32 | mockCluster := &eks.Cluster{ 33 | Arn: aws.String("arn:aws:eks:us-east-2:111111111111:cluster/" + uniqueID), 34 | Name: aws.String(uniqueID), 35 | Endpoint: aws.String("gruntwork.io"), 36 | CertificateAuthority: &eks.Certificate{Data: aws.String(b64CertificateAuthorityData)}, 37 | } 38 | options := &kubectl.KubectlOptions{ContextName: t.Name(), ConfigPath: kubeconfigPath} 39 | err = ConfigureKubectlForEks(mockCluster, options) 40 | require.NoError(t, err) 41 | 42 | // Verify config was updated 43 | kubeconfig := k8s.LoadConfigFromPath(kubeconfigPath) 44 | rawConfig, err := kubeconfig.RawConfig() 45 | require.NoError(t, err) 46 | require.NotEqual(t, rawConfig, originalRawConfig) 47 | } 48 | 49 | func generateTempConfig(t *testing.T) string { 50 | escapedTestName := url.PathEscape(t.Name()) 51 | tmpfile, err := ioutil.TempFile("", escapedTestName) 52 | require.NoError(t, err) 53 | defer tmpfile.Close() 54 | 55 | _, err = tmpfile.WriteString(BASIC_CONFIG) 56 | require.NoError(t, err) 57 | return tmpfile.Name() 58 | } 59 | 60 | // Various example configs used in testing the config manipulation functions 61 | 62 | const BASIC_CONFIG = `apiVersion: v1 63 | clusters: 64 | - cluster: 65 | certificate-authority: /home/terratest/.minikube/ca.crt 66 | server: https://172.17.0.48:8443 67 | name: minikube 68 | contexts: 69 | - context: 70 | cluster: minikube 71 | user: minikube 72 | name: minikube 73 | current-context: minikube 74 | kind: Config 75 | preferences: {} 76 | users: 77 | - name: minikube 78 | user: 79 | client-certificate: /home/terratest/.minikube/client.crt 80 | client-key: /home/terratest/.minikube/client.key 81 | ` 82 | -------------------------------------------------------------------------------- /eks/oidc.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "crypto/sha1" 5 | "encoding/hex" 6 | "encoding/json" 7 | "io/ioutil" 8 | "net" 9 | "net/http" 10 | "net/url" 11 | "path" 12 | 13 | "github.com/gruntwork-io/go-commons/errors" 14 | 15 | "github.com/gruntwork-io/kubergrunt/logging" 16 | ) 17 | 18 | type Thumbprint struct { 19 | Thumbprint string `json:"thumbprint"` 20 | } 21 | 22 | type PartialOIDCConfig struct { 23 | JwksURI string `json:"jwks_uri"` 24 | } 25 | 26 | // GetOIDCThumbprint will retrieve the thumbprint of the root CA for the OIDC Provider identified by the issuer URL. 27 | // This is done by first looking up the domain where the keys are provided, and then looking up the TLS certificate 28 | // chain for that domain. 29 | func GetOIDCThumbprint(issuerURL string) (*Thumbprint, error) { 30 | logger := logging.GetProjectLogger() 31 | logger.Infof("Retrieving OIDC Issuer (%s) CA Thumbprint", issuerURL) 32 | 33 | openidConfigURL, err := getOIDCConfigURL(issuerURL) 34 | if err != nil { 35 | logger.Errorf("Error parsing OIDC Issuer URL: %s is not a valid URL", issuerURL) 36 | return nil, err 37 | } 38 | 39 | jwksURL, err := getJwksURL(openidConfigURL) 40 | if err != nil { 41 | logger.Errorf("Error retrieving JWKS URI from Issuer Config URL %s", openidConfigURL) 42 | return nil, err 43 | } 44 | 45 | thumbprint, err := getThumbprint(jwksURL) 46 | if err != nil { 47 | logger.Errorf("Error retrieving root CA Thumbprint for JWKS URL %s", jwksURL) 48 | return nil, err 49 | } 50 | logger.Infof("Retrieved OIDC Issuer (%s) CA Thumbprint: %s", issuerURL, thumbprint) 51 | return &Thumbprint{Thumbprint: thumbprint}, nil 52 | } 53 | 54 | // getOIDCConfigURL constructs the URL where you can retrieve the OIDC Config information for a given OIDC provider. 55 | func getOIDCConfigURL(issuerURL string) (string, error) { 56 | parsedURL, err := url.Parse(issuerURL) 57 | if err != nil { 58 | return "", errors.WithStackTrace(err) 59 | } 60 | 61 | parsedURL.Path = path.Join(parsedURL.Path, ".well-known", "openid-configuration") 62 | openidConfigURL := parsedURL.String() 63 | return openidConfigURL, nil 64 | } 65 | 66 | // getJwksURL returns the configured URL where the JWKS keys can be retrieved from the provider. 67 | func getJwksURL(openidConfigURL string) (string, error) { 68 | resp, err := http.Get(openidConfigURL) 69 | if err != nil { 70 | return "", errors.WithStackTrace(err) 71 | } 72 | 73 | defer resp.Body.Close() 74 | body, err := ioutil.ReadAll(resp.Body) 75 | if err != nil { 76 | return "", errors.WithStackTrace(err) 77 | } 78 | 79 | var partialOIDCConfig PartialOIDCConfig 80 | if err := json.Unmarshal(body, &partialOIDCConfig); err != nil { 81 | return "", errors.WithStackTrace(err) 82 | } 83 | 84 | return partialOIDCConfig.JwksURI, nil 85 | } 86 | 87 | // getThumbprint will get the root CA from TLS certificate chain for the FQDN of the JWKS URL. 88 | func getThumbprint(jwksURL string) (string, error) { 89 | parsedURL, err := url.Parse(jwksURL) 90 | if err != nil { 91 | return "", errors.WithStackTrace(err) 92 | } 93 | hostname := parsedURL.Host 94 | if parsedURL.Port() == "" { 95 | hostname = net.JoinHostPort(hostname, "443") 96 | } 97 | 98 | resp, err := http.Get("https://" + hostname) 99 | if err != nil { 100 | return "", errors.WithStackTrace(err) 101 | } 102 | defer resp.Body.Close() 103 | 104 | peerCerts := resp.TLS.PeerCertificates 105 | numCerts := len(peerCerts) 106 | if numCerts == 0 { 107 | return "", errors.WithStackTrace(NoPeerCertificatesError{jwksURL}) 108 | } 109 | 110 | // root CA certificate is the last one in the list 111 | root := peerCerts[numCerts-1] 112 | return sha1Hash(root.Raw), nil 113 | } 114 | 115 | // sha1Hash computes the SHA1 of the byte array and returns the hex encoding as a string. 116 | func sha1Hash(data []byte) string { 117 | hasher := sha1.New() 118 | hasher.Write(data) 119 | hashed := hasher.Sum(nil) 120 | return hex.EncodeToString(hashed) 121 | } 122 | -------------------------------------------------------------------------------- /eks/oidc_test.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGetOIDCConfigURL(t *testing.T) { 10 | t.Parallel() 11 | 12 | testCases := []struct { 13 | name string 14 | issuerURL string 15 | expected string 16 | }{ 17 | { 18 | "base", 19 | "https://accounts.google.com", 20 | "https://accounts.google.com/.well-known/openid-configuration", 21 | }, 22 | { 23 | "trailing-slash", 24 | "https://accounts.google.com/", 25 | "https://accounts.google.com/.well-known/openid-configuration", 26 | }, 27 | { 28 | "include-path", 29 | "https://accounts.google.com/id/1234", 30 | "https://accounts.google.com/id/1234/.well-known/openid-configuration", 31 | }, 32 | } 33 | 34 | for _, testCase := range testCases { 35 | // Capture range variable to bring in scope within for loop to avoid it changing 36 | testCase := testCase 37 | 38 | t.Run(testCase.name, func(t *testing.T) { 39 | t.Parallel() 40 | 41 | configURL, err := getOIDCConfigURL(testCase.issuerURL) 42 | assert.NoError(t, err) 43 | assert.Equal(t, configURL, testCase.expected) 44 | }) 45 | } 46 | } 47 | 48 | func TestGetJwksURL(t *testing.T) { 49 | const configURL = "https://accounts.google.com/.well-known/openid-configuration" 50 | const expected = "https://www.googleapis.com/oauth2/v3/certs" 51 | jwksURL, err := getJwksURL(configURL) 52 | assert.NoError(t, err) 53 | assert.Equal(t, jwksURL, expected) 54 | } 55 | 56 | func TestGetThumbprint(t *testing.T) { 57 | const jwksURL = "https://www.googleapis.com/oauth2/v3/certs" 58 | const expected = "08745487e891c19e3078c1f2a07e452950ef36f6" 59 | thumbprint, err := getThumbprint(jwksURL) 60 | assert.NoError(t, err) 61 | assert.Equal(t, expected, thumbprint) 62 | } 63 | -------------------------------------------------------------------------------- /eks/tls.go: -------------------------------------------------------------------------------- 1 | package eks 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "encoding/base64" 7 | "github.com/gruntwork-io/go-commons/errors" 8 | "net/http" 9 | ) 10 | 11 | // loadHttpCA takes base64 encoded certificate authority data and and loads a certificate pool that includes the 12 | // provided CA data. 13 | func loadHttpCA(b64CAData string) (*x509.CertPool, error) { 14 | caCert, err := base64.StdEncoding.DecodeString(b64CAData) 15 | if err != nil { 16 | return nil, errors.WithStackTrace(err) 17 | } 18 | caCertPool := x509.NewCertPool() 19 | caCertPool.AppendCertsFromPEM(caCert) 20 | return caCertPool, nil 21 | } 22 | 23 | // loadHttpClientWithCA takes base64 enconded certificate authority data and loads it into an HTTP client that can 24 | // verify TLS endpoints with the CA data. 25 | func loadHttpClientWithCA(b64CAData string) (*http.Client, error) { 26 | caCertPool, err := loadHttpCA(b64CAData) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | client := &http.Client{ 32 | Transport: &http.Transport{ 33 | TLSClientConfig: &tls.Config{ 34 | RootCAs: caCertPool, 35 | }, 36 | }, 37 | } 38 | return client, nil 39 | } 40 | -------------------------------------------------------------------------------- /eksawshelper/arn.go: -------------------------------------------------------------------------------- 1 | package eksawshelper 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/aws/aws-sdk-go/aws" 7 | "github.com/aws/aws-sdk-go/aws/arn" 8 | "github.com/aws/aws-sdk-go/service/eks" 9 | ) 10 | 11 | // GetClusterNameFromArn extracts the EKS cluster name given the ARN for the cluster. 12 | func GetClusterNameFromArn(eksClusterArnString string) (string, error) { 13 | eksClusterArn, err := arn.Parse(eksClusterArnString) 14 | if err != nil { 15 | return "", err 16 | } 17 | 18 | // EKS Cluster ARN resource section is cluster/CLUSTER_NAME, so we extract out the cluster name by droping the first 19 | // path. 20 | return strings.Join(strings.Split(eksClusterArn.Resource, "/")[1:], "/"), nil 21 | } 22 | 23 | // GetRegionFromArn extracts the AWS region that the EKS cluster is in from the ARN of the EKS cluster. 24 | func GetRegionFromArn(eksClusterArnString string) (string, error) { 25 | eksClusterArn, err := arn.Parse(eksClusterArnString) 26 | if err != nil { 27 | return "", err 28 | } 29 | return eksClusterArn.Region, nil 30 | } 31 | 32 | // GetClusterArnByNameAndRegion looks up the EKS Cluster ARN using the region and EKS Cluster Name. 33 | // For instances where we don't have the EKS Cluster ARN, such as within the Fargate Profile resource. 34 | func GetClusterArnByNameAndRegion(eksClusterName string, region string) (string, error) { 35 | sess, err := NewAuthenticatedSession(region) 36 | if err != nil { 37 | return "", err 38 | } 39 | 40 | svc := eks.New(sess) 41 | input := &eks.DescribeClusterInput{ 42 | Name: aws.String(eksClusterName), 43 | } 44 | 45 | output, err := svc.DescribeCluster(input) 46 | if err != nil { 47 | return "", err 48 | } 49 | return *output.Cluster.Arn, nil 50 | } 51 | -------------------------------------------------------------------------------- /eksawshelper/arn_test.go: -------------------------------------------------------------------------------- 1 | package eksawshelper 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGetClusterNameFromArn(t *testing.T) { 10 | t.Parallel() 11 | 12 | var testCases = []struct { 13 | in string 14 | out string 15 | }{ 16 | {"arn:aws:eks:us-east-2:111111111111:cluster/eks-cluster-srlBd2", "eks-cluster-srlBd2"}, 17 | {"arn:aws:eks:us-east-2:111111111111:cluster/eks-cluster/srlBd2", "eks-cluster/srlBd2"}, 18 | } 19 | for _, testcase := range testCases { 20 | testcase := testcase 21 | t.Run(testcase.out, func(t *testing.T) { 22 | t.Parallel() 23 | 24 | name, err := GetClusterNameFromArn(testcase.in) 25 | assert.NoError(t, err) 26 | assert.Equal(t, name, testcase.out) 27 | }) 28 | } 29 | } 30 | 31 | func TestGetClusterNameFromArnErrorCases(t *testing.T) { 32 | t.Parallel() 33 | 34 | var testCases = []string{ 35 | "eks-cluster-srlBd2", 36 | "", 37 | "aws:eks:us-east-2:111111111111:cluster/eks-cluster/srlBd2", 38 | } 39 | for _, testcase := range testCases { 40 | testcase := testcase 41 | t.Run(testcase, func(t *testing.T) { 42 | t.Parallel() 43 | 44 | name, err := GetClusterNameFromArn(testcase) 45 | assert.Error(t, err) 46 | assert.Equal(t, name, "") 47 | }) 48 | } 49 | } 50 | 51 | func TestGetRegionFromArn(t *testing.T) { 52 | t.Parallel() 53 | 54 | var testCases = []struct { 55 | in string 56 | out string 57 | }{ 58 | {"arn:aws:eks:us-east-2:111111111111:cluster/eks-cluster-srlBd2", "us-east-2"}, 59 | {"arn:aws:eks:eu-west-1:111111111111:cluster/eks-cluster/srlBd2", "eu-west-1"}, 60 | {"arn:aws:eks::111111111111:cluster/eks-cluster/srlBd2", ""}, 61 | } 62 | for _, testcase := range testCases { 63 | testcase := testcase 64 | t.Run(testcase.out, func(t *testing.T) { 65 | t.Parallel() 66 | 67 | region, err := GetRegionFromArn(testcase.in) 68 | assert.NoError(t, err) 69 | assert.Equal(t, region, testcase.out) 70 | }) 71 | } 72 | } 73 | 74 | func TestGetRegionFromArnErrorCases(t *testing.T) { 75 | t.Parallel() 76 | 77 | var testCases = []string{ 78 | "eks-cluster-srlBd2", 79 | "", 80 | "aws:eks:us-east-2:111111111111:cluster/eks-cluster/srlBd2", 81 | } 82 | for _, testcase := range testCases { 83 | testcase := testcase 84 | t.Run(testcase, func(t *testing.T) { 85 | t.Parallel() 86 | 87 | name, err := GetClusterNameFromArn(testcase) 88 | assert.Error(t, err) 89 | assert.Equal(t, name, "") 90 | }) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /eksawshelper/client.go: -------------------------------------------------------------------------------- 1 | package eksawshelper 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go/aws" 5 | "github.com/aws/aws-sdk-go/aws/session" 6 | ) 7 | 8 | // NewAuthenticatedSession gets an AWS Session, checking that the user has credentials properly configured in their environment. 9 | func NewAuthenticatedSession(region string) (*session.Session, error) { 10 | opts := session.Options{ 11 | Config: *(aws.NewConfig().WithRegion(region)), 12 | SharedConfigState: session.SharedConfigEnable, 13 | } 14 | sess, err := session.NewSessionWithOptions(opts) 15 | if err != nil { 16 | return nil, err 17 | } 18 | 19 | if _, err = sess.Config.Credentials.Get(); err != nil { 20 | return nil, CredentialsError{UnderlyingErr: err} 21 | } 22 | 23 | return sess, nil 24 | } 25 | -------------------------------------------------------------------------------- /eksawshelper/cluster.go: -------------------------------------------------------------------------------- 1 | package eksawshelper 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go/aws" 5 | "github.com/aws/aws-sdk-go/service/eks" 6 | "github.com/gruntwork-io/go-commons/errors" 7 | "sigs.k8s.io/aws-iam-authenticator/pkg/token" 8 | 9 | "github.com/gruntwork-io/kubergrunt/logging" 10 | ) 11 | 12 | // GetClusterByArn returns the EKS Cluster object that corresponds to the given ARN. 13 | func GetClusterByArn(eksClusterArn string) (*eks.Cluster, error) { 14 | logger := logging.GetProjectLogger() 15 | logger.Infof("Retrieving details for EKS cluster %s", eksClusterArn) 16 | 17 | region, err := GetRegionFromArn(eksClusterArn) 18 | if err != nil { 19 | return nil, errors.WithStackTrace(err) 20 | } 21 | logger.Infof("Detected cluster deployed in region %s", region) 22 | 23 | client, err := NewEksClient(region) 24 | if err != nil { 25 | return nil, errors.WithStackTrace(err) 26 | } 27 | 28 | eksClusterName, err := GetClusterNameFromArn(eksClusterArn) 29 | if err != nil { 30 | return nil, errors.WithStackTrace(err) 31 | } 32 | 33 | describeClusterOutput, err := client.DescribeCluster(&eks.DescribeClusterInput{Name: aws.String(eksClusterName)}) 34 | if err != nil { 35 | return nil, errors.WithStackTrace(err) 36 | } 37 | 38 | logger.Infof("Successfully retrieved EKS cluster details") 39 | 40 | return describeClusterOutput.Cluster, nil 41 | } 42 | 43 | func GetKubernetesTokenForCluster(clusterID string) (*token.Token, string, error) { 44 | gen, err := token.NewGenerator(false, false) 45 | if err != nil { 46 | return nil, "", errors.WithStackTrace(err) 47 | } 48 | tok, err := gen.Get(clusterID) 49 | return &tok, gen.FormatJSON(tok), errors.WithStackTrace(err) 50 | } 51 | 52 | // NewEksClient creates an EKS client. 53 | func NewEksClient(region string) (*eks.EKS, error) { 54 | sess, err := NewAuthenticatedSession(region) 55 | if err != nil { 56 | return nil, err 57 | } 58 | return eks.New(sess), nil 59 | } 60 | -------------------------------------------------------------------------------- /eksawshelper/ecr.go: -------------------------------------------------------------------------------- 1 | package eksawshelper 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/aws/aws-sdk-go/service/ecr" 10 | "github.com/gruntwork-io/go-commons/errors" 11 | "github.com/gruntwork-io/kubergrunt/commonerrors" 12 | "github.com/hashicorp/go-cleanhttp" 13 | ) 14 | 15 | // TagExistsInRepo queries the ECR repository docker API to see if the given tag exists for the given ECR repository. 16 | func TagExistsInRepo(token, repoDomain, repoPath, tag string) (bool, error) { 17 | manifestURL := fmt.Sprintf("https://%s/v2/%s/manifests/%s", repoDomain, repoPath, tag) 18 | req, err := http.NewRequest("GET", manifestURL, nil) 19 | if err != nil { 20 | return false, errors.WithStackTrace(err) 21 | } 22 | req.Header.Set("Authorization", "Basic "+token) 23 | 24 | httpClient := cleanhttp.DefaultClient() 25 | resp, err := httpClient.Do(req) 26 | if err != nil { 27 | return false, errors.WithStackTrace(err) 28 | } 29 | 30 | switch resp.StatusCode { 31 | case 200: 32 | return true, nil 33 | case 404: 34 | return false, nil 35 | } 36 | 37 | // All other status codes should be consider API errors. 38 | defer resp.Body.Close() 39 | body, err := ioutil.ReadAll(resp.Body) 40 | if err != nil { 41 | return false, errors.WithStackTrace(err) 42 | } 43 | return false, errors.WithStackTrace(ECRManifestFetchError{ 44 | manifestURL: manifestURL, 45 | statusCode: resp.StatusCode, 46 | body: string(body), 47 | }) 48 | } 49 | 50 | // GetDockerLoginToken retrieves an authorization token that can be used to access ECR via the docker APIs. The 51 | // return token can directly be used as a HTTP authorization header for basic auth. 52 | func GetDockerLoginToken(region string) (string, error) { 53 | client, err := NewECRClient(region) 54 | if err != nil { 55 | return "", errors.WithStackTrace(err) 56 | } 57 | 58 | resp, err := client.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{}) 59 | if err != nil { 60 | return "", errors.WithStackTrace(err) 61 | } 62 | 63 | if len(resp.AuthorizationData) != 1 { 64 | // AWS docs mention that there is always one token returned on a successful response. 65 | return "", errors.WithStackTrace(commonerrors.ImpossibleErr("AWS_DID_NOT_RETURN_DOCKER_TOKEN")) 66 | } 67 | return aws.StringValue(resp.AuthorizationData[0].AuthorizationToken), nil 68 | } 69 | 70 | // NewECRClient creates an AWS SDK client to access ECR API. 71 | func NewECRClient(region string) (*ecr.ECR, error) { 72 | sess, err := NewAuthenticatedSession(region) 73 | if err != nil { 74 | return nil, err 75 | } 76 | return ecr.New(sess), nil 77 | } 78 | -------------------------------------------------------------------------------- /eksawshelper/ecr_test.go: -------------------------------------------------------------------------------- 1 | package eksawshelper 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestTagExistsInRepo(t *testing.T) { 12 | t.Parallel() 13 | 14 | region := "us-west-2" 15 | existingTag := "v1.18.8-eksbuild.1" 16 | nonExistingTag := "v1.18.8-eksbuild.10" 17 | 18 | token, err := GetDockerLoginToken(region) 19 | require.NoError(t, err) 20 | repoDomain := fmt.Sprintf("602401143452.dkr.ecr.%s.amazonaws.com", region) 21 | tagExists1, err := TagExistsInRepo(token, repoDomain, "eks/kube-proxy", existingTag) 22 | require.NoError(t, err) 23 | assert.True(t, tagExists1) 24 | 25 | tagExists2, err := TagExistsInRepo(token, repoDomain, "eks/kube-proxy", nonExistingTag) 26 | require.NoError(t, err) 27 | assert.False(t, tagExists2) 28 | } 29 | -------------------------------------------------------------------------------- /eksawshelper/eksawshelper.go: -------------------------------------------------------------------------------- 1 | // eksawshelper contains helper functions for accessing the AWS EKS API directly. This package exists to break cyclic 2 | // dependencies between eks and kubectl. 3 | package eksawshelper 4 | -------------------------------------------------------------------------------- /eksawshelper/errors.go: -------------------------------------------------------------------------------- 1 | package eksawshelper 2 | 3 | import "fmt" 4 | 5 | // CredentialsError is an error that occurs because AWS credentials can't be found. 6 | type CredentialsError struct { 7 | UnderlyingErr error 8 | } 9 | 10 | func (err CredentialsError) Error() string { 11 | return fmt.Sprintf("Error finding AWS credentials. Did you set the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables or configure an AWS profile? Underlying error: %v", err.UnderlyingErr) 12 | } 13 | 14 | // ECRManifestFetchError is an error that occurs when retrieving information about a given tag in an ECR repository. 15 | type ECRManifestFetchError struct { 16 | manifestURL string 17 | statusCode int 18 | body string 19 | } 20 | 21 | func (err ECRManifestFetchError) Error() string { 22 | return fmt.Sprintf("Error querying ECR repo URL %s (status code %d) (response body %s)", err.manifestURL, err.statusCode, err.body) 23 | } 24 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/gruntwork-io/kubergrunt 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.23.7 6 | 7 | require ( 8 | github.com/aws/aws-sdk-go v1.44.145 9 | github.com/blang/semver/v4 v4.0.0 10 | github.com/gruntwork-io/go-commons v0.8.2 11 | github.com/gruntwork-io/terratest v0.46.11 12 | github.com/hashicorp/go-cleanhttp v0.5.2 13 | github.com/hashicorp/go-multierror v1.1.0 14 | github.com/mitchellh/go-homedir v1.1.0 15 | github.com/sirupsen/logrus v1.8.1 16 | github.com/stretchr/testify v1.8.4 17 | github.com/urfave/cli v1.22.4 18 | k8s.io/api v0.28.4 19 | k8s.io/apimachinery v0.28.4 20 | k8s.io/client-go v0.28.4 21 | sigs.k8s.io/aws-iam-authenticator v0.6.1 22 | ) 23 | 24 | require ( 25 | cloud.google.com/go v0.110.0 // indirect 26 | cloud.google.com/go/compute v1.19.1 // indirect 27 | cloud.google.com/go/compute/metadata v0.2.3 // indirect 28 | cloud.google.com/go/iam v0.13.0 // indirect 29 | cloud.google.com/go/storage v1.28.1 // indirect 30 | github.com/agext/levenshtein v1.2.3 // indirect 31 | github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect 32 | github.com/beorn7/perks v1.0.1 // indirect 33 | github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect 34 | github.com/bgentry/speakeasy v0.1.0 // indirect 35 | github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect 36 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 37 | github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect 38 | github.com/davecgh/go-spew v1.1.1 // indirect 39 | github.com/emicklei/go-restful/v3 v3.9.0 // indirect 40 | github.com/fatih/color v1.9.0 // indirect 41 | github.com/go-errors/errors v1.0.2-0.20180813162953-d98b870cc4e0 // indirect 42 | github.com/go-logr/logr v1.2.4 // indirect 43 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 44 | github.com/go-openapi/jsonreference v0.20.2 // indirect 45 | github.com/go-openapi/swag v0.22.3 // indirect 46 | github.com/go-sql-driver/mysql v1.5.0 // indirect 47 | github.com/gofrs/flock v0.7.0 // indirect 48 | github.com/gogo/protobuf v1.3.2 // indirect 49 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 50 | github.com/golang/protobuf v1.5.3 // indirect 51 | github.com/google/gnostic-models v0.6.8 // indirect 52 | github.com/google/go-cmp v0.5.9 // indirect 53 | github.com/google/gofuzz v1.2.0 // indirect 54 | github.com/google/uuid v1.3.0 // indirect 55 | github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect 56 | github.com/googleapis/gax-go/v2 v2.7.1 // indirect 57 | github.com/hashicorp/errwrap v1.0.0 // indirect 58 | github.com/hashicorp/go-getter v1.7.5 // indirect 59 | github.com/hashicorp/go-safetemp v1.0.0 // indirect 60 | github.com/hashicorp/go-version v1.6.0 // indirect 61 | github.com/hashicorp/hcl/v2 v2.9.1 // indirect 62 | github.com/hashicorp/terraform-json v0.13.0 // indirect 63 | github.com/imdario/mergo v0.3.11 // indirect 64 | github.com/jinzhu/copier v0.0.0-20190924061706-b57f9002281a // indirect 65 | github.com/jmespath/go-jmespath v0.4.0 // indirect 66 | github.com/josharian/intern v1.0.0 // indirect 67 | github.com/json-iterator/go v1.1.12 // indirect 68 | github.com/klauspost/compress v1.15.11 // indirect 69 | github.com/mailru/easyjson v0.7.7 // indirect 70 | github.com/mattn/go-colorable v0.1.4 // indirect 71 | github.com/mattn/go-isatty v0.0.19 // indirect 72 | github.com/mattn/go-zglob v0.0.2-0.20190814121620-e3c945676326 // indirect 73 | github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect 74 | github.com/mitchellh/go-testing-interface v1.14.1 // indirect 75 | github.com/mitchellh/go-wordwrap v1.0.1 // indirect 76 | github.com/moby/spdystream v0.2.0 // indirect 77 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 78 | github.com/modern-go/reflect2 v1.0.2 // indirect 79 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 80 | github.com/pmezard/go-difflib v1.0.0 // indirect 81 | github.com/pquerna/otp v1.2.0 // indirect 82 | github.com/prometheus/client_golang v1.11.1 // indirect 83 | github.com/prometheus/client_model v0.2.0 // indirect 84 | github.com/prometheus/common v0.26.0 // indirect 85 | github.com/prometheus/procfs v0.6.0 // indirect 86 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 87 | github.com/spf13/pflag v1.0.5 // indirect 88 | github.com/tmccombs/hcl2json v0.3.3 // indirect 89 | github.com/ulikunitz/xz v0.5.10 // indirect 90 | github.com/zclconf/go-cty v1.9.1 // indirect 91 | go.opencensus.io v0.24.0 // indirect 92 | golang.org/x/crypto v0.35.0 // indirect 93 | golang.org/x/net v0.36.0 // indirect 94 | golang.org/x/oauth2 v0.8.0 // indirect 95 | golang.org/x/sys v0.30.0 // indirect 96 | golang.org/x/term v0.29.0 // indirect 97 | golang.org/x/text v0.22.0 // indirect 98 | golang.org/x/time v0.3.0 // indirect 99 | golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect 100 | google.golang.org/api v0.114.0 // indirect 101 | google.golang.org/appengine v1.6.7 // indirect 102 | google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect 103 | google.golang.org/grpc v1.56.3 // indirect 104 | google.golang.org/protobuf v1.33.0 // indirect 105 | gopkg.in/inf.v0 v0.9.1 // indirect 106 | gopkg.in/yaml.v2 v2.4.0 // indirect 107 | gopkg.in/yaml.v3 v3.0.1 // indirect 108 | k8s.io/klog/v2 v2.100.1 // indirect 109 | k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect 110 | k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect 111 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 112 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 113 | sigs.k8s.io/yaml v1.3.0 // indirect 114 | ) 115 | -------------------------------------------------------------------------------- /jsonpatch/jsonpatch.go: -------------------------------------------------------------------------------- 1 | // Package that defines useful structs for constructing JSON Patch operations. 2 | package jsonpatch 3 | 4 | type Operation string 5 | 6 | const ( 7 | AddOp Operation = "add" 8 | RemoveOp Operation = "remove" 9 | ReplaceOp Operation = "replace" 10 | MoveOp Operation = "move" 11 | CopyOp Operation = "copy" 12 | TestOp Operation = "test" 13 | ) 14 | 15 | type PatchString struct { 16 | Op Operation `json:"op"` 17 | Path string `json:"path"` 18 | Value string `json:"value"` 19 | } 20 | -------------------------------------------------------------------------------- /kubectl/client.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "k8s.io/client-go/kubernetes" 5 | 6 | // The following line loads the gcp plugin which is required to authenticate against GKE clusters. 7 | // See: https://github.com/kubernetes/client-go/issues/242 8 | _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" 9 | 10 | "github.com/gruntwork-io/kubergrunt/logging" 11 | ) 12 | 13 | // GetKubernetesClientFromOptions returns a Kubernetes API client given a KubectlOptions object. Constructs the client 14 | // based on the information in the struct: 15 | // - If Server is set, assume direct auth methods and use Server, Base64PEMCertificateAuthority, and BearerToken to 16 | // construct authenticated client. 17 | // - Else, use ConfigPath and ContextName to load the config from disk and setup the client to use the auth method 18 | // provided in the context. 19 | func GetKubernetesClientFromOptions(kubectlOptions *KubectlOptions) (*kubernetes.Clientset, error) { 20 | logger := logging.GetProjectLogger() 21 | logger.Infof("Loading Kubernetes Client") 22 | 23 | config, err := LoadApiClientConfigFromOptions(kubectlOptions) 24 | if err != nil { 25 | return nil, err 26 | } 27 | return kubernetes.NewForConfig(config) 28 | } 29 | -------------------------------------------------------------------------------- /kubectl/command.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/gruntwork-io/go-commons/shell" 7 | ) 8 | 9 | // RunKubectl will make a call to kubectl, setting the config and context to the ones specified in the provided options. 10 | func RunKubectl(options *KubectlOptions, args ...string) error { 11 | shellOptions := shell.NewShellOptions() 12 | cmdArgs := []string{} 13 | scheme := options.AuthScheme() 14 | switch scheme { 15 | case ConfigBased: 16 | if options.ContextName != "" { 17 | cmdArgs = append(cmdArgs, "--context", options.ContextName) 18 | } 19 | if options.ConfigPath != "" { 20 | cmdArgs = append(cmdArgs, "--kubeconfig", options.ConfigPath) 21 | } 22 | default: 23 | tmpfile, err := options.TempConfigFromAuthInfo() 24 | if tmpfile != "" { 25 | // Make sure to delete the tmp file at the end 26 | defer os.Remove(tmpfile) 27 | } 28 | if err != nil { 29 | return err 30 | } 31 | cmdArgs = append(cmdArgs, "--kubeconfig", tmpfile) 32 | } 33 | cmdArgs = append(cmdArgs, args...) 34 | _, err := shell.RunShellCommandAndGetAndStreamOutput(shellOptions, "kubectl", cmdArgs...) 35 | return err 36 | } 37 | 38 | func RunKubectlWithOutput(options *KubectlOptions, args ...string) (string, error) { 39 | shellOptions := shell.NewShellOptions() 40 | cmdArgs := []string{} 41 | scheme := options.AuthScheme() 42 | switch scheme { 43 | case ConfigBased: 44 | if options.ContextName != "" { 45 | cmdArgs = append(cmdArgs, "--context", options.ContextName) 46 | } 47 | if options.ConfigPath != "" { 48 | cmdArgs = append(cmdArgs, "--kubeconfig", options.ConfigPath) 49 | } 50 | default: 51 | tmpfile, err := options.TempConfigFromAuthInfo() 52 | if tmpfile != "" { 53 | // Make sure to delete the tmp file at the end 54 | defer os.Remove(tmpfile) 55 | } 56 | if err != nil { 57 | return "ERROR", err 58 | } 59 | cmdArgs = append(cmdArgs, "--kubeconfig", tmpfile) 60 | } 61 | cmdArgs = append(cmdArgs, args...) 62 | out, err := shell.RunShellCommandAndGetAndStreamOutput(shellOptions, "kubectl", cmdArgs...) 63 | return out, err 64 | } 65 | -------------------------------------------------------------------------------- /kubectl/config_test.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "encoding/base64" 5 | "os" 6 | "path/filepath" 7 | "testing" 8 | 9 | "github.com/gruntwork-io/go-commons/errors" 10 | "github.com/gruntwork-io/go-commons/files" 11 | "github.com/gruntwork-io/terratest/modules/k8s" 12 | "github.com/gruntwork-io/terratest/modules/random" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | "k8s.io/client-go/tools/clientcmd/api" 16 | ) 17 | 18 | type MockEksConfigContextData struct { 19 | Config *api.Config 20 | Name string 21 | EksArn string 22 | EksName string 23 | EksEndpoint string 24 | EksCAData string 25 | } 26 | 27 | func TestCreateInitialConfigCreatesDir(t *testing.T) { 28 | // Make sure this is not a relative dir 29 | dirName := random.UniqueId() 30 | require.NotEqual(t, dirName, "") 31 | require.NotEqual(t, dirName, ".") 32 | require.NotEqual(t, dirName, "..") 33 | currentDir, err := os.Getwd() 34 | require.NoError(t, err) 35 | configPath := filepath.Join(currentDir, dirName, "config") 36 | defer os.RemoveAll(filepath.Join(currentDir, dirName)) 37 | err = CreateInitialConfig(configPath) 38 | require.NoError(t, err) 39 | require.True(t, files.FileExists(configPath)) 40 | kubeconfig := k8s.LoadConfigFromPath(configPath) 41 | require.NotNil(t, kubeconfig) 42 | } 43 | 44 | func TestAddContextToConfig(t *testing.T) { 45 | mockConfig := api.NewConfig() 46 | contextName := random.UniqueId() 47 | clusterName := random.UniqueId() 48 | authInfoName := random.UniqueId() 49 | 50 | err := AddContextToConfig(mockConfig, contextName, clusterName, authInfoName) 51 | require.NoError(t, err) 52 | 53 | context, ok := mockConfig.Contexts[contextName] 54 | require.True(t, ok) 55 | assert.Equal(t, context.Cluster, clusterName) 56 | assert.Equal(t, context.AuthInfo, authInfoName) 57 | } 58 | 59 | func TestAddEksConfigContextHonorsContextName(t *testing.T) { 60 | mockData, err := basicAddCall(t) 61 | require.NoError(t, err) 62 | 63 | context, ok := mockData.Config.Contexts[mockData.Name] 64 | require.True(t, ok) 65 | assert.Equal(t, context.Cluster, mockData.EksArn) 66 | assert.Equal(t, context.AuthInfo, mockData.EksArn) 67 | } 68 | 69 | func TestAddEksConfigContextFailsOnAddingExistingContext(t *testing.T) { 70 | mockConfig := api.NewConfig() 71 | mockConfig.Contexts[t.Name()] = api.NewContext() 72 | err := AddEksConfigContext( 73 | mockConfig, 74 | t.Name(), 75 | "", 76 | "", 77 | "", 78 | "", 79 | ) 80 | err = errors.Unwrap(err) 81 | require.IsType(t, ContextAlreadyExistsError{}, err, err.Error()) 82 | } 83 | 84 | func TestAddClusterToConfigAppendsCorrectClusterInfo(t *testing.T) { 85 | mockConfig := api.NewConfig() 86 | clusterName := "devops" 87 | clusterEndpoint := "dev" 88 | b64CertificateAuthorityData := base64.StdEncoding.EncodeToString([]byte("ops")) 89 | 90 | err := AddClusterToConfig(mockConfig, clusterName, clusterEndpoint, b64CertificateAuthorityData) 91 | require.NoError(t, err) 92 | 93 | caData, err := base64.StdEncoding.DecodeString(b64CertificateAuthorityData) 94 | require.NoError(t, err) 95 | 96 | cluster, ok := mockConfig.Clusters[clusterName] 97 | require.True(t, ok) 98 | assert.Equal(t, cluster.Server, clusterEndpoint) 99 | assert.Equal(t, cluster.CertificateAuthorityData, caData) 100 | assert.False(t, cluster.InsecureSkipTLSVerify) 101 | } 102 | 103 | func TestAddEksConfigContextAppendsCorrectClusterInfo(t *testing.T) { 104 | mockData, err := basicAddCall(t) 105 | require.NoError(t, err) 106 | caData, err := base64.StdEncoding.DecodeString(mockData.EksCAData) 107 | require.NoError(t, err) 108 | 109 | cluster, ok := mockData.Config.Clusters[mockData.EksArn] 110 | require.True(t, ok) 111 | assert.Equal(t, cluster.Server, mockData.EksEndpoint) 112 | assert.Equal(t, cluster.CertificateAuthorityData, caData) 113 | assert.False(t, cluster.InsecureSkipTLSVerify) 114 | } 115 | 116 | func TestAddEksConfigContextAppendsCorrectAuthInfo(t *testing.T) { 117 | mockData, err := basicAddCall(t) 118 | require.NoError(t, err) 119 | 120 | authInfo, ok := mockData.Config.AuthInfos[mockData.EksArn] 121 | require.True(t, ok) 122 | 123 | execInfo := authInfo.Exec 124 | assert.Contains(t, execInfo.Args, mockData.EksName) 125 | 126 | // Verify none of the other authentication styles are set 127 | assert.Equal(t, authInfo.ClientCertificate, "") 128 | assert.Equal(t, authInfo.ClientCertificateData, []byte(nil)) 129 | assert.Equal(t, authInfo.ClientKey, "") 130 | assert.Equal(t, authInfo.ClientKeyData, []byte(nil)) 131 | assert.Equal(t, authInfo.Token, "") 132 | assert.Equal(t, authInfo.TokenFile, "") 133 | assert.Equal(t, authInfo.Impersonate, "") 134 | assert.Equal(t, authInfo.ImpersonateGroups, []string(nil)) 135 | assert.Equal(t, authInfo.ImpersonateUserExtra, map[string][]string{}) 136 | assert.Equal(t, authInfo.Username, "") 137 | assert.Equal(t, authInfo.Password, "") 138 | } 139 | 140 | func TestAddEksAuthInfoToConfigAppendsCorrectAuthInfo(t *testing.T) { 141 | mockConfig := api.NewConfig() 142 | name := t.Name() 143 | arn := "arn:aws:eks:us-east-2:111111111111:cluster/" + t.Name() 144 | 145 | err := AddEksAuthInfoToConfig(mockConfig, arn, name) 146 | require.NoError(t, err) 147 | 148 | authInfo, ok := mockConfig.AuthInfos[arn] 149 | require.True(t, ok) 150 | 151 | execInfo := authInfo.Exec 152 | assert.Contains(t, execInfo.Args, name) 153 | 154 | // Verify none of the other authentication styles are set 155 | assert.Equal(t, authInfo.ClientCertificate, "") 156 | assert.Equal(t, authInfo.ClientCertificateData, []byte(nil)) 157 | assert.Equal(t, authInfo.ClientKey, "") 158 | assert.Equal(t, authInfo.ClientKeyData, []byte(nil)) 159 | assert.Equal(t, authInfo.Token, "") 160 | assert.Equal(t, authInfo.TokenFile, "") 161 | assert.Equal(t, authInfo.Impersonate, "") 162 | assert.Equal(t, authInfo.ImpersonateGroups, []string(nil)) 163 | assert.Equal(t, authInfo.ImpersonateUserExtra, map[string][]string{}) 164 | assert.Equal(t, authInfo.Username, "") 165 | assert.Equal(t, authInfo.Password, "") 166 | } 167 | 168 | // basicAddCall makes a call to AddEksConfigContext with fake data and returns the mock config, fake data, and if there 169 | // was an error adding the context. 170 | func basicAddCall(t *testing.T) (MockEksConfigContextData, error) { 171 | uniqueID := random.UniqueId() 172 | arn := "arn:aws:eks:us-east-2:111111111111:cluster/" + t.Name() 173 | name := t.Name() 174 | endpoint := "gruntwork.io" 175 | 176 | anotherUniqueID := random.UniqueId() 177 | b64CertificateAuthorityData := base64.StdEncoding.EncodeToString([]byte(anotherUniqueID)) 178 | 179 | mockConfig := api.NewConfig() 180 | err := AddEksConfigContext( 181 | mockConfig, 182 | uniqueID, 183 | arn, 184 | name, 185 | endpoint, 186 | b64CertificateAuthorityData, 187 | ) 188 | mockData := MockEksConfigContextData{ 189 | Config: mockConfig, 190 | Name: uniqueID, 191 | EksArn: arn, 192 | EksName: name, 193 | EksEndpoint: endpoint, 194 | EksCAData: b64CertificateAuthorityData, 195 | } 196 | return mockData, err 197 | } 198 | -------------------------------------------------------------------------------- /kubectl/errors.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // KubeContextNotFound error is returned when the specified Kubernetes context is unabailable in the specified 8 | // kubeconfig. 9 | type KubeContextNotFound struct { 10 | Options *KubectlOptions 11 | } 12 | 13 | func (err KubeContextNotFound) Error() string { 14 | return fmt.Sprintf("Context %s does not exist in config %s", err.Options.ContextName, err.Options.ConfigPath) 15 | } 16 | 17 | // ContextAlreadyExistsError is returned when trying to create a new context with a name that is already in the config 18 | type ContextAlreadyExistsError struct { 19 | contextName string 20 | } 21 | 22 | func (err ContextAlreadyExistsError) Error() string { 23 | return fmt.Sprintf("kubeconfig context %s already exists", err.contextName) 24 | } 25 | 26 | func NewContextAlreadyExistsError(contextName string) ContextAlreadyExistsError { 27 | return ContextAlreadyExistsError{contextName} 28 | } 29 | 30 | // AuthSchemeNotSupported is returned when the specified auth scheme in KubectlOptions is not supported. 31 | type AuthSchemeNotSupported struct { 32 | scheme AuthScheme 33 | } 34 | 35 | func (err AuthSchemeNotSupported) Error() string { 36 | return fmt.Sprintf("The auth scheme %s is not supported", authSchemeToString(err.scheme)) 37 | } 38 | 39 | // NodeReadyTimeoutError is returned when we timeout waiting for nodes to reach ready state 40 | type NodeReadyTimeoutError struct { 41 | numNodes int 42 | } 43 | 44 | func (err NodeReadyTimeoutError) Error() string { 45 | return fmt.Sprintf("Timed out wiating for %d nodes to reach ready state", err.numNodes) 46 | } 47 | 48 | func NewNodeReadyTimeoutError(numNodes int) NodeReadyTimeoutError { 49 | return NodeReadyTimeoutError{numNodes} 50 | } 51 | 52 | // NodeDrainError is returned when there is an error draining a node. 53 | type NodeDrainError struct { 54 | Error error 55 | NodeID string 56 | } 57 | 58 | // NodeCordonError is returned when there is an error cordoning a node. 59 | type NodeCordonError struct { 60 | Error error 61 | NodeID string 62 | } 63 | 64 | // LoadBalancerNotReadyError is returned when the LoadBalancer Service is unexpectedly not ready. 65 | type LoadBalancerNotReadyError struct { 66 | serviceName string 67 | } 68 | 69 | func (err LoadBalancerNotReadyError) Error() string { 70 | return fmt.Sprintf("LoadBalancer is not ready on service %s", err.serviceName) 71 | } 72 | 73 | func NewLoadBalancerNotReadyError(serviceName string) LoadBalancerNotReadyError { 74 | return LoadBalancerNotReadyError{serviceName} 75 | } 76 | 77 | // LoadBalancerNameFormatError is returned when the hostname of the load balancer is in an unexpected format 78 | type LoadBalancerNameFormatError struct { 79 | hostname string 80 | } 81 | 82 | func (err LoadBalancerNameFormatError) Error() string { 83 | return fmt.Sprintf("LoadBalancer hostname is in an unexpected format: %s", err.hostname) 84 | } 85 | 86 | func NewLoadBalancerNameFormatError(hostname string) LoadBalancerNameFormatError { 87 | return LoadBalancerNameFormatError{hostname} 88 | } 89 | 90 | // ProvisionIngressEndpointTimeoutError is returned when we time out waiting for the endpoint to be provisioned. 91 | type ProvisionIngressEndpointTimeoutError struct { 92 | ingressName string 93 | namespace string 94 | } 95 | 96 | func (err ProvisionIngressEndpointTimeoutError) Error() string { 97 | return fmt.Sprintf( 98 | "Timed out waiting for Ingress %s (Namespace: %s) to provision endpoint.", 99 | err.ingressName, 100 | err.namespace, 101 | ) 102 | } 103 | 104 | // UnknownAWSLoadBalancerTypeErr is returned when we encounter a load balancer type that we don't expect/support. 105 | type UnknownAWSLoadBalancerTypeErr struct { 106 | typeKey string 107 | typeStr string 108 | } 109 | 110 | func (err UnknownAWSLoadBalancerTypeErr) Error() string { 111 | return fmt.Sprintf( 112 | "Unknown value for annotation %s (value: %s)", 113 | err.typeKey, 114 | err.typeStr, 115 | ) 116 | } 117 | -------------------------------------------------------------------------------- /kubectl/helpers.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | // LabelsToListOptions takes a map of label keys/values and returns ListOptions with LabelSelector 11 | func LabelsToListOptions(labels map[string]string) metav1.ListOptions { 12 | var selectors []string 13 | for k, v := range labels { 14 | selectors = append(selectors, fmt.Sprintf("%s=%s", k, v)) 15 | } 16 | return metav1.ListOptions{ 17 | LabelSelector: strings.Join(selectors, ","), 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /kubectl/ingress.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/gruntwork-io/go-commons/errors" 8 | networkingv1 "k8s.io/api/networking/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | 11 | "github.com/gruntwork-io/kubergrunt/logging" 12 | ) 13 | 14 | // GetIngress returns a Kubernetes Ingress resource in the provided namespace with the given name. 15 | func GetIngress(options *KubectlOptions, namespace string, ingressName string) (*networkingv1.Ingress, error) { 16 | client, err := GetKubernetesClientFromOptions(options) 17 | if err != nil { 18 | return nil, err 19 | } 20 | 21 | return client.NetworkingV1().Ingresses(namespace).Get(context.Background(), ingressName, metav1.GetOptions{}) 22 | } 23 | 24 | // IsIngressAvailable returns true if the Ingress endpoint is provisioned and available. 25 | func IsIngressAvailable(ingress *networkingv1.Ingress) bool { 26 | // Ingress is ready if it has at least one endpoint 27 | endpoints := ingress.Status.LoadBalancer.Ingress 28 | return len(endpoints) > 0 29 | } 30 | 31 | // GetIngressEndpoints returns all the available ingress endpoints (preferring hostnames, and if unavailable, returning 32 | // IPs). Note that if no endpoints are available, returns empty list. 33 | func GetIngressEndpoints(ingress *networkingv1.Ingress) []string { 34 | endpointStatuses := ingress.Status.LoadBalancer.Ingress 35 | endpoints := []string{} 36 | for _, endpointStatus := range endpointStatuses { 37 | endpoint := endpointStatus.Hostname 38 | if endpoint == "" { 39 | endpoint = endpointStatus.IP 40 | } 41 | endpoints = append(endpoints, endpoint) 42 | } 43 | return endpoints 44 | } 45 | 46 | // WaitUntilIngressEndpointProvisioned continuously checks the Ingress resource until the endpoint is provisioned or if 47 | // it times out. 48 | func WaitUntilIngressEndpointProvisioned( 49 | options *KubectlOptions, 50 | namespace string, 51 | ingressName string, 52 | maxRetries int, 53 | sleepBetweenRetries time.Duration, 54 | ) error { 55 | logger := logging.GetProjectLogger() 56 | logger.Infof("Waiting for Ingress %s (Namespace: %s) endpoint to be provisioned.", ingressName, namespace) 57 | 58 | for i := 0; i < maxRetries; i++ { 59 | logger.Info("Retrieving Ingress and checking if the endpoint is provisioned.") 60 | 61 | ingress, err := GetIngress(options, namespace, ingressName) 62 | if err == nil && IsIngressAvailable(ingress) { 63 | endpoints := GetIngressEndpoints(ingress) 64 | logger.Infof("Endpoint for Ingress %s (Namespace: %s): %v", ingressName, namespace, endpoints) 65 | return nil 66 | } 67 | 68 | logger.Warnf("Endpoint for Ingress %s (Namespace: %s) is not provisioned yet", ingressName, namespace) 69 | logger.Infof("Waiting for %s...", sleepBetweenRetries) 70 | time.Sleep(sleepBetweenRetries) 71 | } 72 | return errors.WithStackTrace(ProvisionIngressEndpointTimeoutError{ingressName: ingressName, namespace: namespace}) 73 | } 74 | -------------------------------------------------------------------------------- /kubectl/ingress_test.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | "time" 8 | 9 | "github.com/gruntwork-io/terratest/modules/k8s" 10 | "github.com/gruntwork-io/terratest/modules/random" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | const ExampleIngressName = "nginx-service-ingress" 15 | 16 | func TestGetIngressReturnsErrorForNonExistantIngress(t *testing.T) { 17 | t.Parallel() 18 | 19 | kubeConfigPath, err := k8s.GetKubeConfigPathE(t) 20 | require.NoError(t, err) 21 | kubectlOptions := &KubectlOptions{ConfigPath: kubeConfigPath} 22 | _, err = GetIngress(kubectlOptions, "kube-system", "i-dont-exist") 23 | require.Error(t, err) 24 | } 25 | 26 | func TestGetIngressEReturnsCorrectIngressInCorrectNamespace(t *testing.T) { 27 | t.Parallel() 28 | 29 | uniqueID := strings.ToLower(random.UniqueId()) 30 | ttKubectlOptions := k8s.NewKubectlOptions("", "", uniqueID) 31 | configData := fmt.Sprintf( 32 | exampleIngressDeploymentYAMLTemplate, 33 | uniqueID, uniqueID, uniqueID, uniqueID, uniqueID, 34 | ) 35 | defer k8s.KubectlDeleteFromString(t, ttKubectlOptions, configData) 36 | k8s.KubectlApplyFromString(t, ttKubectlOptions, configData) 37 | 38 | kubeConfigPath, err := k8s.GetKubeConfigPathE(t) 39 | require.NoError(t, err) 40 | kubectlOptions := &KubectlOptions{ConfigPath: kubeConfigPath} 41 | 42 | ingress, err := GetIngress(kubectlOptions, uniqueID, "nginx-service-ingress") 43 | require.NoError(t, err) 44 | require.Equal(t, ingress.Name, "nginx-service-ingress") 45 | require.Equal(t, ingress.Namespace, uniqueID) 46 | } 47 | 48 | func TestWaitUntilIngressAvailableReturnsSuccessfully(t *testing.T) { 49 | t.Parallel() 50 | 51 | uniqueID := strings.ToLower(random.UniqueId()) 52 | ttKubectlOptions := k8s.NewKubectlOptions("", "", uniqueID) 53 | configData := fmt.Sprintf( 54 | exampleIngressDeploymentYAMLTemplate, 55 | uniqueID, uniqueID, uniqueID, uniqueID, uniqueID, 56 | ) 57 | defer k8s.KubectlDeleteFromString(t, ttKubectlOptions, configData) 58 | k8s.KubectlApplyFromString(t, ttKubectlOptions, configData) 59 | 60 | kubeConfigPath, err := k8s.GetKubeConfigPathE(t) 61 | require.NoError(t, err) 62 | kubectlOptions := &KubectlOptions{ConfigPath: kubeConfigPath} 63 | 64 | err = WaitUntilIngressEndpointProvisioned(kubectlOptions, uniqueID, ExampleIngressName, 60, 5*time.Second) 65 | require.NoError(t, err) 66 | } 67 | 68 | const exampleIngressDeploymentYAMLTemplate = `--- 69 | apiVersion: v1 70 | kind: Namespace 71 | metadata: 72 | name: %s 73 | --- 74 | apiVersion: apps/v1 75 | kind: Deployment 76 | metadata: 77 | name: nginx-deployment 78 | namespace: %s 79 | spec: 80 | selector: 81 | matchLabels: 82 | app: nginx 83 | replicas: 1 84 | template: 85 | metadata: 86 | labels: 87 | app: nginx 88 | spec: 89 | containers: 90 | - name: nginx 91 | image: nginx:1.15.7 92 | ports: 93 | - containerPort: 80 94 | --- 95 | kind: Service 96 | apiVersion: v1 97 | metadata: 98 | name: nginx-service 99 | namespace: %s 100 | spec: 101 | selector: 102 | app: nginx 103 | ports: 104 | - protocol: TCP 105 | targetPort: 80 106 | port: 80 107 | type: NodePort 108 | --- 109 | kind: Ingress 110 | apiVersion: networking.k8s.io/v1 111 | metadata: 112 | name: nginx-service-ingress 113 | namespace: %s 114 | spec: 115 | rules: 116 | - http: 117 | paths: 118 | - path: /app%s 119 | pathType: Prefix 120 | backend: 121 | service: 122 | name: nginx-service 123 | port: 124 | number: 80 125 | ` 126 | -------------------------------------------------------------------------------- /kubectl/kubectl.go: -------------------------------------------------------------------------------- 1 | // Package kubectl contains functions to work with kubectl command line. 2 | package kubectl 3 | -------------------------------------------------------------------------------- /kubectl/node.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | "github.com/gruntwork-io/go-commons/collections" 9 | "github.com/gruntwork-io/go-commons/errors" 10 | "github.com/hashicorp/go-multierror" 11 | corev1 "k8s.io/api/core/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/client-go/kubernetes" 14 | 15 | "github.com/gruntwork-io/kubergrunt/logging" 16 | ) 17 | 18 | // WaitForNodesReady will continuously watch the nodes until they reach the ready state. 19 | func WaitForNodesReady( 20 | kubectlOptions *KubectlOptions, 21 | nodeIds []string, 22 | maxRetries int, 23 | sleepBetweenRetries time.Duration, 24 | ) error { 25 | logger := logging.GetProjectLogger() 26 | logger.Infof("Waiting for %d nodes in Kubernetes to reach ready state", len(nodeIds)) 27 | 28 | client, err := GetKubernetesClientFromOptions(kubectlOptions) 29 | if err != nil { 30 | return errors.WithStackTrace(err) 31 | } 32 | for i := 0; i < maxRetries; i++ { 33 | logger.Infof("Checking if nodes ready") 34 | nodes, err := GetNodes(client, metav1.ListOptions{}) 35 | if err != nil { 36 | return errors.WithStackTrace(err) 37 | } 38 | newNodes := filterNodesByID(nodes, nodeIds) 39 | logger.Debugf("Received %d nodes. Expecting %d nodes.", len(newNodes), len(nodeIds)) 40 | allNewNodesRegistered := len(newNodes) == len(nodeIds) 41 | allNewNodesReady := allNodesReady(newNodes) 42 | if allNewNodesRegistered && allNewNodesReady { 43 | return nil 44 | } 45 | if !allNewNodesRegistered { 46 | logger.Infof("Not all nodes are registered yet") 47 | } 48 | if !allNewNodesReady { 49 | logger.Infof("Not all nodes are ready yet") 50 | } 51 | logger.Infof("Waiting for %s...", sleepBetweenRetries) 52 | time.Sleep(sleepBetweenRetries) 53 | } 54 | // Time out 55 | logger.Errorf("Timedout waiting for nodes to reach ready state") 56 | if err := reportAllNotReadyNodes(client, nodeIds); err != nil { 57 | return err 58 | } 59 | return errors.WithStackTrace(NewNodeReadyTimeoutError(len(nodeIds))) 60 | } 61 | 62 | // reportAllNotReadyNodes will log error messages for each node that is not ready 63 | func reportAllNotReadyNodes(client *kubernetes.Clientset, nodeIds []string) error { 64 | logger := logging.GetProjectLogger() 65 | nodes, err := GetNodes(client, metav1.ListOptions{}) 66 | if err != nil { 67 | return errors.WithStackTrace(err) 68 | } 69 | filteredNodes := filterNodesByID(nodes, nodeIds) 70 | for _, node := range filteredNodes { 71 | if !IsNodeReady(node) { 72 | logger.Errorf("Node %s is not ready", node.Name) 73 | } 74 | } 75 | return nil 76 | } 77 | 78 | // allNodesReady will return true if all the nodes in the list are ready, and false when any node is not. 79 | func allNodesReady(nodes []corev1.Node) bool { 80 | logger := logging.GetProjectLogger() 81 | for _, node := range nodes { 82 | if !IsNodeReady(node) { 83 | logger.Debugf("Node %s is not ready", node.Name) 84 | return false 85 | } 86 | logger.Debugf("Node %s is ready", node.Name) 87 | } 88 | return true 89 | } 90 | 91 | // filterNodesByID will return the list of nodes that correspond to the given node id 92 | func filterNodesByID(nodes []corev1.Node, nodeIds []string) []corev1.Node { 93 | filteredNodes := []corev1.Node{} 94 | for _, node := range nodes { 95 | if collections.ListContainsElement(nodeIds, node.Name) { 96 | filteredNodes = append(filteredNodes, node) 97 | } 98 | } 99 | return filteredNodes 100 | } 101 | 102 | // DrainNodes calls `kubectl drain` on each node provided. Draining a node consists of: 103 | // - Taint the nodes so that new pods are not scheduled 104 | // - Evict all the pods gracefully 105 | // See 106 | // https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/#use-kubectl-drain-to-remove-a-node-from-service 107 | // for more information. 108 | func DrainNodes(kubectlOptions *KubectlOptions, nodeIds []string, timeout time.Duration, deleteEmptyDirData bool) error { 109 | // Concurrently trigger drain events for all requested nodes. 110 | var wg sync.WaitGroup // So that we can wait for all the drain calls 111 | errChans := []chan NodeDrainError{} 112 | for _, nodeID := range nodeIds { 113 | wg.Add(1) 114 | errChannel := make(chan NodeDrainError, 1) 115 | go drainNode(&wg, errChannel, kubectlOptions, nodeID, timeout, deleteEmptyDirData) 116 | errChans = append(errChans, errChannel) 117 | } 118 | wg.Wait() 119 | 120 | var drainErrs *multierror.Error 121 | for _, errChan := range errChans { 122 | err := <-errChan 123 | if err.Error != nil { 124 | drainErrs = multierror.Append(drainErrs, err.Error) 125 | } 126 | } 127 | return errors.WithStackTrace(drainErrs.ErrorOrNil()) 128 | } 129 | 130 | func drainNode( 131 | wg *sync.WaitGroup, 132 | errChannel chan<- NodeDrainError, 133 | kubectlOptions *KubectlOptions, 134 | nodeID string, 135 | timeout time.Duration, 136 | deleteEmptyDirData bool, 137 | ) { 138 | defer wg.Done() 139 | defer close(errChannel) 140 | 141 | args := []string{"drain", nodeID, "--ignore-daemonsets", "--timeout", timeout.String()} 142 | 143 | if deleteEmptyDirData { 144 | args = append(args, "--delete-emptydir-data") 145 | } 146 | 147 | err := RunKubectl(kubectlOptions, args...) 148 | errChannel <- NodeDrainError{NodeID: nodeID, Error: err} 149 | } 150 | 151 | // CordonNodes calls `kubectl cordon` on each node provided. Cordoning a node makes it unschedulable, preventing new 152 | // Pods from being scheduled on the node. Note that cordoning a node does not evict the running Pods. To evict existing 153 | // Pods, use DrainNodes. 154 | func CordonNodes(kubectlOptions *KubectlOptions, nodeIds []string) error { 155 | // Concurrently trigger cordon events for all requested nodes. 156 | var wg sync.WaitGroup // So that we can wait for all the cordon calls 157 | errChans := []chan NodeCordonError{} 158 | for _, nodeID := range nodeIds { 159 | wg.Add(1) 160 | errChannel := make(chan NodeCordonError, 1) // Collect all errors from each command 161 | go cordonNode(&wg, errChannel, kubectlOptions, nodeID) 162 | errChans = append(errChans, errChannel) 163 | } 164 | wg.Wait() 165 | 166 | var cordonErrs *multierror.Error 167 | for _, errChan := range errChans { 168 | err := <-errChan 169 | if err.Error != nil { 170 | cordonErrs = multierror.Append(cordonErrs, err.Error) 171 | } 172 | } 173 | return errors.WithStackTrace(cordonErrs.ErrorOrNil()) 174 | } 175 | 176 | func cordonNode( 177 | wg *sync.WaitGroup, 178 | errChannel chan<- NodeCordonError, 179 | kubectlOptions *KubectlOptions, 180 | nodeID string, 181 | ) { 182 | defer wg.Done() 183 | defer close(errChannel) 184 | err := RunKubectl(kubectlOptions, "cordon", nodeID) 185 | errChannel <- NodeCordonError{NodeID: nodeID, Error: err} 186 | } 187 | 188 | func waitForAllCordons(wg *sync.WaitGroup) { 189 | wg.Wait() 190 | } 191 | 192 | // GetNodes queries Kubernetes for information about the worker nodes registered to the cluster, given a 193 | // clientset. 194 | func GetNodes(clientset *kubernetes.Clientset, options metav1.ListOptions) ([]corev1.Node, error) { 195 | nodes, err := clientset.CoreV1().Nodes().List(context.Background(), options) 196 | if err != nil { 197 | return nil, err 198 | } 199 | return nodes.Items, err 200 | } 201 | 202 | // IsNodeReady takes a Kubernetes Node information object and checks if the Node is in the ready state. 203 | func IsNodeReady(node corev1.Node) bool { 204 | for _, condition := range node.Status.Conditions { 205 | if condition.Type == corev1.NodeReady { 206 | return condition.Status == corev1.ConditionTrue 207 | } 208 | } 209 | return false 210 | } 211 | -------------------------------------------------------------------------------- /kubectl/node_test.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/gruntwork-io/terratest/modules/k8s" 8 | "github.com/stretchr/testify/require" 9 | corev1 "k8s.io/api/core/v1" 10 | ) 11 | 12 | func TestWaitForNodesReady(t *testing.T) { 13 | t.Parallel() 14 | 15 | kubeConfigPath, err := k8s.GetKubeConfigPathE(t) 16 | require.NoError(t, err) 17 | ttKubectlOptions := k8s.NewKubectlOptions("", kubeConfigPath, "") 18 | 19 | node := getNodes(t, ttKubectlOptions)[0] 20 | nodeID := node.Name 21 | require.NoError(t, WaitForNodesReady(&KubectlOptions{ConfigPath: kubeConfigPath}, []string{nodeID}, 40, 15*time.Second)) 22 | } 23 | 24 | func TestFilterNodesById(t *testing.T) { 25 | t.Parallel() 26 | 27 | kubeConfigPath, err := k8s.GetKubeConfigPathE(t) 28 | require.NoError(t, err) 29 | ttKubectlOptions := k8s.NewKubectlOptions("", kubeConfigPath, "") 30 | 31 | nodes := getNodes(t, ttKubectlOptions) 32 | require.Equal(t, len(filterNodesByID(nodes, []string{})), 0) 33 | require.Equal(t, len(filterNodesByID(nodes, []string{nodes[0].Name})), 1) 34 | } 35 | 36 | func getNodes(t *testing.T, options *k8s.KubectlOptions) []corev1.Node { 37 | nodes := k8s.GetNodes(t, options) 38 | // Assumes local kubernetes (minikube or docker-for-desktop kube), where there is only one node 39 | require.Equal(t, len(nodes), 1) 40 | return nodes 41 | } 42 | -------------------------------------------------------------------------------- /kubectl/options.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "encoding/base64" 5 | "io/ioutil" 6 | "os" 7 | 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/gruntwork-io/go-commons/errors" 10 | "github.com/sirupsen/logrus" 11 | "k8s.io/client-go/tools/clientcmd" 12 | "k8s.io/client-go/tools/clientcmd/api" 13 | 14 | "github.com/gruntwork-io/kubergrunt/eksawshelper" 15 | "github.com/gruntwork-io/kubergrunt/logging" 16 | ) 17 | 18 | // AuthScheme is an enum that indicates how to authenticate to the Kubernetes cluster. 19 | type AuthScheme int 20 | 21 | const ( 22 | ConfigBased AuthScheme = iota 23 | DirectAuth 24 | EKSClusterBased 25 | ) 26 | 27 | // Represents common options necessary to specify for all Kubectl calls 28 | type KubectlOptions struct { 29 | // Config based authentication scheme 30 | ContextName string 31 | ConfigPath string 32 | 33 | // Direct authentication scheme. Has precedence over config based scheme. All 3 values must be set. 34 | Server string 35 | Base64PEMCertificateAuthority string 36 | BearerToken string 37 | 38 | // EKS based authentication scheme. Has precedence over direct or config based scheme. 39 | EKSClusterArn string 40 | } 41 | 42 | type serverInfo struct { 43 | Server string 44 | Base64PEMCertificateAuthority string 45 | BearerToken string 46 | } 47 | 48 | // TempConfigFromAuthInfo will create a temporary kubeconfig file that can be used with commands that don't support 49 | // directly configuring auth info (e.g helm). 50 | func (options *KubectlOptions) TempConfigFromAuthInfo() (string, error) { 51 | logger := logging.GetProjectLogger() 52 | logger.Infof("Creating temporary file to act as kubeconfig with auth info") 53 | 54 | tmpfile, err := ioutil.TempFile("", "") 55 | if err != nil { 56 | return "", errors.WithStackTrace(err) 57 | } 58 | err = tmpfile.Close() 59 | if err != nil { 60 | return tmpfile.Name(), errors.WithStackTrace(err) 61 | } 62 | logger.Infof("Created %s to act as temporary kubeconfig file.", tmpfile.Name()) 63 | 64 | scheme := options.AuthScheme() 65 | switch scheme { 66 | case DirectAuth: 67 | err = tempConfigFromDirectAuthInfo( 68 | logger, 69 | tmpfile, 70 | serverInfo{ 71 | Server: options.Server, 72 | Base64PEMCertificateAuthority: options.Base64PEMCertificateAuthority, 73 | BearerToken: options.BearerToken, 74 | }, 75 | ) 76 | case EKSClusterBased: 77 | err = tempConfigFromEKSClusterInfo(logger, tmpfile, options.EKSClusterArn) 78 | default: 79 | return "", errors.WithStackTrace(AuthSchemeNotSupported{scheme}) 80 | } 81 | 82 | return tmpfile.Name(), err 83 | } 84 | 85 | func tempConfigFromDirectAuthInfo(logger *logrus.Entry, tmpfile *os.File, serverInfo serverInfo) error { 86 | config := api.NewConfig() 87 | err := AddClusterToConfig( 88 | config, 89 | "default", 90 | serverInfo.Server, 91 | serverInfo.Base64PEMCertificateAuthority, 92 | ) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | logger.Infof("Adding auth info to config") 98 | authInfo := api.NewAuthInfo() 99 | authInfo.Token = serverInfo.BearerToken 100 | config.AuthInfos["default"] = authInfo 101 | logger.Infof("Done adding auth info to config") 102 | 103 | err = AddContextToConfig( 104 | config, 105 | "default", 106 | "default", 107 | "default", 108 | ) 109 | if err != nil { 110 | return err 111 | } 112 | config.CurrentContext = "default" 113 | 114 | if err := saveConfigToFile(config, tmpfile); err != nil { 115 | return errors.WithStackTrace(err) 116 | } 117 | return nil 118 | } 119 | 120 | func tempConfigFromEKSClusterInfo(logger *logrus.Entry, tmpfile *os.File, eksClusterArn string) error { 121 | info, err := getKubeCredentialsFromEKSCluster(eksClusterArn) 122 | if err != nil { 123 | return err 124 | } 125 | return tempConfigFromDirectAuthInfo(logger, tmpfile, *info) 126 | } 127 | 128 | func saveConfigToFile(config *api.Config, tmpfile *os.File) error { 129 | // In order to be able to render and save the config, we need to have the config file be in a format that k8s 130 | // understands, so we first initialize the empty file with a minimal config structure. 131 | if err := CreateInitialConfig(tmpfile.Name()); err != nil { 132 | return err 133 | } 134 | 135 | // Then load the contents into a struct that can be saved, and save the generated config data. 136 | kubeconfig := LoadConfigFromPath(tmpfile.Name()) 137 | if err := clientcmd.ModifyConfig(kubeconfig.ConfigAccess(), *config, false); err != nil { 138 | return errors.WithStackTrace(err) 139 | } 140 | return nil 141 | } 142 | 143 | func getKubeCredentialsFromEKSCluster(eksClusterArn string) (*serverInfo, error) { 144 | cluster, err := eksawshelper.GetClusterByArn(eksClusterArn) 145 | if err != nil { 146 | return nil, err 147 | } 148 | 149 | server := aws.StringValue(cluster.Endpoint) 150 | b64PEMCA := aws.StringValue(cluster.CertificateAuthority.Data) 151 | 152 | clusterName, err := eksawshelper.GetClusterNameFromArn(eksClusterArn) 153 | if err != nil { 154 | return nil, err 155 | } 156 | token, _, err := eksawshelper.GetKubernetesTokenForCluster(clusterName) 157 | if err != nil { 158 | return nil, err 159 | } 160 | 161 | info := serverInfo{ 162 | Server: server, 163 | Base64PEMCertificateAuthority: b64PEMCA, 164 | BearerToken: token.Token, 165 | } 166 | return &info, nil 167 | } 168 | 169 | // TempCAFile creates a temporary file to hold the Certificate Authority data so that it can be passed on to kubectl. 170 | func (options *KubectlOptions) TempCAFile() (string, error) { 171 | logger := logging.GetProjectLogger() 172 | logger.Infof("Creating temporary file to hold certificate authority data") 173 | 174 | tmpfile, err := ioutil.TempFile("", "") 175 | if err != nil { 176 | return "", errors.WithStackTrace(err) 177 | } 178 | defer tmpfile.Close() 179 | logger.Infof("Created %s to hold certificate authority data.", tmpfile.Name()) 180 | 181 | caData, err := base64.StdEncoding.DecodeString(options.Base64PEMCertificateAuthority) 182 | if err != nil { 183 | return tmpfile.Name(), errors.WithStackTrace(err) 184 | } 185 | _, err = tmpfile.Write(caData) 186 | return tmpfile.Name(), errors.WithStackTrace(err) 187 | } 188 | 189 | func (options *KubectlOptions) AuthScheme() AuthScheme { 190 | if options.EKSClusterArn != "" { 191 | return EKSClusterBased 192 | } else if options.Server != "" { 193 | return DirectAuth 194 | } 195 | return ConfigBased 196 | } 197 | 198 | func authSchemeToString(scheme AuthScheme) string { 199 | switch scheme { 200 | case ConfigBased: 201 | return "config-based" 202 | case DirectAuth: 203 | return "direct" 204 | case EKSClusterBased: 205 | return "eks-cluster-based" 206 | } 207 | // This should not happen 208 | return "unspecified" 209 | } 210 | -------------------------------------------------------------------------------- /kubectl/pod.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/gruntwork-io/go-commons/errors" 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | // ListPods will look for pods in the given namespace and return them. 12 | func ListPods(options *KubectlOptions, namespace string, filters metav1.ListOptions) ([]corev1.Pod, error) { 13 | client, err := GetKubernetesClientFromOptions(options) 14 | if err != nil { 15 | return nil, err 16 | } 17 | 18 | resp, err := client.CoreV1().Pods(namespace).List(context.Background(), filters) 19 | if err != nil { 20 | return nil, errors.WithStackTrace(err) 21 | } 22 | return resp.Items, nil 23 | } 24 | 25 | // IsPodReady returns True when a Pod is in the Ready status. 26 | func IsPodReady(pod corev1.Pod) bool { 27 | for _, condition := range pod.Status.Conditions { 28 | if condition.Type == corev1.PodReady { 29 | return condition.Status == corev1.ConditionTrue 30 | } 31 | } 32 | return false 33 | } 34 | -------------------------------------------------------------------------------- /kubectl/pod_test.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/gruntwork-io/terratest/modules/k8s" 7 | "github.com/stretchr/testify/require" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | func TestListPodsReturnsPods(t *testing.T) { 12 | t.Parallel() 13 | 14 | kubeConfigPath, err := k8s.GetKubeConfigPathE(t) 15 | require.NoError(t, err) 16 | kubectlOptions := &KubectlOptions{ConfigPath: kubeConfigPath} 17 | 18 | // There are always Pods in the kube-system namespace in any kubernetes cluster 19 | pods, err := ListPods(kubectlOptions, "kube-system", metav1.ListOptions{}) 20 | require.NoError(t, err) 21 | require.True(t, len(pods) > 0) 22 | } 23 | -------------------------------------------------------------------------------- /kubectl/role.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/gruntwork-io/go-commons/errors" 7 | rbacv1 "k8s.io/api/rbac/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | // PrepareTillerRole will construct a new Role struct with the provided 12 | // metadata. The role can later be used to add rules. 13 | func PrepareRole( 14 | namespace string, 15 | name string, 16 | labels map[string]string, 17 | annotations map[string]string, 18 | rules []rbacv1.PolicyRule, 19 | ) *rbacv1.Role { 20 | // Cannot use a struct literal due to promoted fields from the ObjectMeta 21 | newRole := rbacv1.Role{} 22 | newRole.Name = name 23 | newRole.Namespace = namespace 24 | newRole.Labels = labels 25 | newRole.Annotations = annotations 26 | newRole.Rules = rules 27 | return &newRole 28 | } 29 | 30 | // CreateRole will create the provided role on the Kubernetes cluster. 31 | func CreateRole(options *KubectlOptions, newRole *rbacv1.Role) error { 32 | client, err := GetKubernetesClientFromOptions(options) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | _, err = client.RbacV1().Roles(newRole.Namespace).Create(context.Background(), newRole, metav1.CreateOptions{}) 38 | if err != nil { 39 | return errors.WithStackTrace(err) 40 | } 41 | return nil 42 | } 43 | 44 | // GetRole will get an RBAC role by name in the provided namespace 45 | func GetRole(options *KubectlOptions, namespace string, name string) (*rbacv1.Role, error) { 46 | client, err := GetKubernetesClientFromOptions(options) 47 | if err != nil { 48 | return nil, err 49 | } 50 | 51 | role, err := client.RbacV1().Roles(namespace).Get(context.Background(), name, metav1.GetOptions{}) 52 | if err != nil { 53 | return nil, errors.WithStackTrace(err) 54 | } 55 | return role, nil 56 | } 57 | 58 | // ListRole will list all roles that match the provided filters in the provided namespace 59 | func ListRoles(options *KubectlOptions, namespace string, filters metav1.ListOptions) ([]rbacv1.Role, error) { 60 | client, err := GetKubernetesClientFromOptions(options) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | resp, err := client.RbacV1().Roles(namespace).List(context.Background(), filters) 66 | if err != nil { 67 | return nil, errors.WithStackTrace(err) 68 | } 69 | return resp.Items, nil 70 | } 71 | 72 | // DeleteRole will delete the role in the provided namespace that has the provided name. 73 | func DeleteRole(options *KubectlOptions, namespace string, name string) error { 74 | client, err := GetKubernetesClientFromOptions(options) 75 | if err != nil { 76 | return err 77 | } 78 | 79 | err = client.RbacV1().Roles(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) 80 | if err != nil { 81 | return errors.WithStackTrace(err) 82 | } 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /kubectl/rolebinding.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/gruntwork-io/go-commons/errors" 7 | rbacv1 "k8s.io/api/rbac/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | // PrepareTillerRoleBinding will construct a new RoleBinding struct with the provided metadata. The role can later 12 | // be used to add rules. 13 | func PrepareRoleBinding( 14 | namespace string, 15 | name string, 16 | labels map[string]string, 17 | annotations map[string]string, 18 | subjects []rbacv1.Subject, 19 | roleRef rbacv1.RoleRef, 20 | ) *rbacv1.RoleBinding { 21 | newRoleBinding := rbacv1.RoleBinding{} 22 | newRoleBinding.Name = name 23 | newRoleBinding.Namespace = namespace 24 | newRoleBinding.Labels = labels 25 | newRoleBinding.Annotations = annotations 26 | newRoleBinding.Subjects = subjects 27 | newRoleBinding.RoleRef = roleRef 28 | return &newRoleBinding 29 | } 30 | 31 | // CreateRoleBinding will create the provided role binding on the Kubernetes cluster. 32 | func CreateRoleBinding(options *KubectlOptions, newRoleBinding *rbacv1.RoleBinding) error { 33 | client, err := GetKubernetesClientFromOptions(options) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | _, err = client.RbacV1().RoleBindings(newRoleBinding.Namespace).Create(context.Background(), newRoleBinding, metav1.CreateOptions{}) 39 | if err != nil { 40 | return errors.WithStackTrace(err) 41 | } 42 | return nil 43 | } 44 | 45 | // GetRoleBinding will get an RBAC role binding by name in the provided namespace 46 | func GetRoleBinding(options *KubectlOptions, namespace string, name string) (*rbacv1.RoleBinding, error) { 47 | client, err := GetKubernetesClientFromOptions(options) 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | roleBinding, err := client.RbacV1().RoleBindings(namespace).Get(context.Background(), name, metav1.GetOptions{}) 53 | if err != nil { 54 | return nil, errors.WithStackTrace(err) 55 | } 56 | return roleBinding, nil 57 | } 58 | 59 | // ListRoleBindings will list all role bindings that match the provided filters in the provided namespace 60 | func ListRoleBindings(options *KubectlOptions, namespace string, filters metav1.ListOptions) ([]rbacv1.RoleBinding, error) { 61 | client, err := GetKubernetesClientFromOptions(options) 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | resp, err := client.RbacV1().RoleBindings(namespace).List(context.Background(), filters) 67 | if err != nil { 68 | return nil, errors.WithStackTrace(err) 69 | } 70 | return resp.Items, nil 71 | } 72 | 73 | // DeleteRole will delete the role in the provided namespace that has the provided name. 74 | func DeleteRoleBinding(options *KubectlOptions, namespace string, name string) error { 75 | client, err := GetKubernetesClientFromOptions(options) 76 | if err != nil { 77 | return err 78 | } 79 | 80 | err = client.RbacV1().RoleBindings(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) 81 | if err != nil { 82 | return errors.WithStackTrace(err) 83 | } 84 | return nil 85 | } 86 | -------------------------------------------------------------------------------- /kubectl/secret.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "context" 5 | "io/ioutil" 6 | 7 | "github.com/gruntwork-io/go-commons/errors" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ) 11 | 12 | // PrepareSecret will construct a new Secret struct with the provided metadata. This can then be used to append data to 13 | // it, either from a file (using AddToSecretFromFile) or raw data (using AddToSecretFromData). 14 | func PrepareSecret( 15 | namespace string, 16 | name string, 17 | labels map[string]string, 18 | annotations map[string]string, 19 | ) *corev1.Secret { 20 | newSecret := corev1.Secret{} 21 | newSecret.Name = name 22 | newSecret.Namespace = namespace 23 | newSecret.Labels = labels 24 | newSecret.Annotations = annotations 25 | newSecret.Data = map[string][]byte{} 26 | return &newSecret 27 | } 28 | 29 | // AddToSecretFromFile will add data to the secret from a file, attached using the provided key. 30 | func AddToSecretFromFile(secret *corev1.Secret, key string, path string) error { 31 | data, err := ioutil.ReadFile(path) 32 | if err != nil { 33 | return errors.WithStackTrace(err) 34 | } 35 | secret.Data[key] = data 36 | return nil 37 | } 38 | 39 | // AddToSecretFromData will add data to the secret at the provided key. 40 | func AddToSecretFromData(secret *corev1.Secret, key string, rawData []byte) { 41 | secret.Data[key] = rawData 42 | } 43 | 44 | // CreateSecret will create the provided secret on the Kubernetes cluster. 45 | func CreateSecret(options *KubectlOptions, newSecret *corev1.Secret) error { 46 | client, err := GetKubernetesClientFromOptions(options) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | _, err = client.CoreV1().Secrets(newSecret.Namespace).Create(context.Background(), newSecret, metav1.CreateOptions{}) 52 | if err != nil { 53 | return errors.WithStackTrace(err) 54 | } 55 | return nil 56 | } 57 | 58 | // GetSecret will get a Kubernetes secret by name in the provided namespace. 59 | func GetSecret(options *KubectlOptions, namespace string, name string) (*corev1.Secret, error) { 60 | client, err := GetKubernetesClientFromOptions(options) 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | secret, err := client.CoreV1().Secrets(namespace).Get(context.Background(), name, metav1.GetOptions{}) 66 | if err != nil { 67 | return nil, errors.WithStackTrace(err) 68 | } 69 | return secret, nil 70 | } 71 | 72 | // ListSecrets will list all the secrets that match the provided filters in the provided namespace. 73 | func ListSecrets(options *KubectlOptions, namespace string, filters metav1.ListOptions) ([]corev1.Secret, error) { 74 | client, err := GetKubernetesClientFromOptions(options) 75 | if err != nil { 76 | return nil, err 77 | } 78 | 79 | resp, err := client.CoreV1().Secrets(namespace).List(context.Background(), filters) 80 | if err != nil { 81 | return nil, errors.WithStackTrace(err) 82 | } 83 | return resp.Items, nil 84 | } 85 | 86 | // DeleteSecret will delete the secret in the provided namespace that has the provided name. 87 | func DeleteSecret(options *KubectlOptions, namespace string, secretName string) error { 88 | client, err := GetKubernetesClientFromOptions(options) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | err = client.CoreV1().Secrets(namespace).Delete(context.Background(), secretName, metav1.DeleteOptions{}) 94 | if err != nil { 95 | return errors.WithStackTrace(err) 96 | } 97 | return nil 98 | } 99 | -------------------------------------------------------------------------------- /kubectl/service.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | 7 | "github.com/gruntwork-io/go-commons/errors" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/client-go/kubernetes" 11 | 12 | "github.com/gruntwork-io/kubergrunt/logging" 13 | ) 14 | 15 | const ( 16 | lbTypeAnnotationKey = "service.beta.kubernetes.io/aws-load-balancer-type" 17 | lbTargetAnnotationKey = "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" 18 | 19 | lbTypeAnnotationNLB = "nlb" 20 | lbTypeAnnotationExternal = "external" 21 | lbTargetAnnotationIP = "ip" 22 | lbTargetAnnotationNLBIP = "nlb-ip" 23 | lbTargetAnnotationInstance = "instance" 24 | ) 25 | 26 | // GetAllServices queries Kubernetes for information on all deployed Service resources in the current cluster that the 27 | // provided client can access. 28 | func GetAllServices(clientset *kubernetes.Clientset) ([]corev1.Service, error) { 29 | // We use the empty string for the namespace to indicate all namespaces 30 | namespace := "" 31 | servicesApi := clientset.CoreV1().Services(namespace) 32 | 33 | services := []corev1.Service{} 34 | params := metav1.ListOptions{} 35 | for { 36 | resp, err := servicesApi.List(context.Background(), params) 37 | if err != nil { 38 | return nil, errors.WithStackTrace(err) 39 | } 40 | for _, service := range resp.Items { 41 | services = append(services, service) 42 | } 43 | if resp.Continue == "" { 44 | break 45 | } 46 | params.Continue = resp.Continue 47 | } 48 | return services, nil 49 | } 50 | 51 | // GetAWSLoadBalancers will query Kubernetes for all services, filter for LoadBalancer services, and then parse out the 52 | // following information: 53 | // - Type of LB (NLB or Classic LB) 54 | // - Instance target or IP target 55 | // TODO: support ALBs with Ingress as well 56 | func GetAWSLoadBalancers(kubectlOptions *KubectlOptions) ([]AWSLoadBalancer, error) { 57 | logger := logging.GetProjectLogger() 58 | logger.Infof("Getting all LoadBalancers from services in kubernetes") 59 | 60 | client, err := GetKubernetesClientFromOptions(kubectlOptions) 61 | if err != nil { 62 | return nil, errors.WithStackTrace(err) 63 | } 64 | services, err := GetAllServices(client) 65 | if err != nil { 66 | return nil, errors.WithStackTrace(err) 67 | } 68 | loadBalancerServices := filterLoadBalancerServices(services) 69 | logger.Infof("Found %d LoadBalancer services of %d services in kubernetes.", len(loadBalancerServices), len(services)) 70 | 71 | lbs := []AWSLoadBalancer{} 72 | for _, service := range loadBalancerServices { 73 | lbName, err := GetLoadBalancerNameFromService(service) 74 | if err != nil { 75 | return nil, errors.WithStackTrace(err) 76 | } 77 | lbType, lbTargetType, err := GetLoadBalancerTypeFromService(service) 78 | if err != nil { 79 | return nil, err 80 | } 81 | lbs = append( 82 | lbs, 83 | AWSLoadBalancer{ 84 | Name: lbName, 85 | Type: lbType, 86 | TargetType: lbTargetType, 87 | }, 88 | ) 89 | } 90 | logger.Infof("Successfully extracted AWS Load Balancers") 91 | return lbs, nil 92 | } 93 | 94 | // filterLoadBalancerServices will return services that are of type LoadBalancer from the provided list of services. 95 | func filterLoadBalancerServices(services []corev1.Service) []corev1.Service { 96 | out := []corev1.Service{} 97 | for _, service := range services { 98 | if service.Spec.Type == corev1.ServiceTypeLoadBalancer { 99 | out = append(out, service) 100 | } 101 | } 102 | return out 103 | } 104 | 105 | // GetLoadBalancerNameFromService will return the name of the LoadBalancer given a Kubernetes service object 106 | func GetLoadBalancerNameFromService(service corev1.Service) (string, error) { 107 | loadbalancerInfo := service.Status.LoadBalancer.Ingress 108 | if len(loadbalancerInfo) == 0 { 109 | return "", NewLoadBalancerNotReadyError(service.Name) 110 | } 111 | loadbalancerHostname := loadbalancerInfo[0].Hostname 112 | 113 | // TODO: When expanding to GCP, update this logic 114 | return getAWSLoadBalancerNameFromHostname(loadbalancerHostname) 115 | } 116 | 117 | // getAWSLoadBalancerNameFromHostname will return the AWS LoadBalancer name given the assigned hostname. For ELB (both 118 | // v1 and v2), the subdomain will be one of NAME-TIME or internal-NAME-TIME. Note that we need to use strings.Join here 119 | // to account for LB names that contain '-'. 120 | func getAWSLoadBalancerNameFromHostname(hostname string) (string, error) { 121 | loadbalancerHostnameSubDomain := strings.Split(hostname, ".")[0] 122 | loadbalancerHostnameSubDomainParts := strings.Split(loadbalancerHostnameSubDomain, "-") 123 | numParts := len(loadbalancerHostnameSubDomainParts) 124 | if numParts < 2 { 125 | return "", NewLoadBalancerNameFormatError(hostname) 126 | } else if loadbalancerHostnameSubDomainParts[0] == "internal" { 127 | return strings.Join(loadbalancerHostnameSubDomainParts[1:numParts-1], "-"), nil 128 | } else { 129 | return strings.Join(loadbalancerHostnameSubDomainParts[:numParts-1], "-"), nil 130 | } 131 | } 132 | 133 | // GetLoadBalancerTypeFromService will return the ELB type and target type of the given LoadBalancer Service. This uses 134 | // the following heuristic: 135 | // - A LoadBalancer Service with no type annotations will default to Classic Load Balancer (from the in-tree 136 | // controller). 137 | // - If service.beta.kubernetes.io/aws-load-balancer-type is set to nlb or external, then the ELB will be NLB. (When 138 | // external, we assume the LB controller handles it) 139 | // - For LB services handled by the LB controller, also check for 140 | // service.beta.kubernetes.io/aws-load-balancer-nlb-target-type which determines the target type. Otherwise, it is 141 | // always instance target type. 142 | func GetLoadBalancerTypeFromService(service corev1.Service) (ELBType, ELBTargetType, error) { 143 | annotations := service.ObjectMeta.Annotations 144 | lbTypeString, hasLBTypeAnnotation := annotations[lbTypeAnnotationKey] 145 | if !hasLBTypeAnnotation { 146 | // No annotation base case 147 | return CLB, InstanceTarget, nil 148 | } 149 | 150 | if lbTypeString == lbTypeAnnotationNLB { 151 | // in-tree controller based NLB provisioning only supports instance targets 152 | return NLB, InstanceTarget, nil 153 | } else if lbTypeString != lbTypeAnnotationExternal { 154 | // Unsupported load balancer type 155 | return UnknownELB, UnknownELBTarget, errors.WithStackTrace(UnknownAWSLoadBalancerTypeErr{typeKey: lbTypeAnnotationKey, typeStr: lbTypeString}) 156 | } 157 | 158 | // lbTypeString is external at this point, which means we are using the AWS LB controller. This means we need to 159 | // take into account the target type. 160 | lbTargetTypeString, hasLBTargetAnnotation := annotations[lbTargetAnnotationKey] 161 | if !hasLBTargetAnnotation { 162 | // Default is instance target type 163 | return NLB, InstanceTarget, nil 164 | } 165 | switch lbTargetTypeString { 166 | case lbTargetAnnotationInstance: 167 | return NLB, InstanceTarget, nil 168 | case lbTargetAnnotationIP, lbTargetAnnotationNLBIP: 169 | return NLB, IPTarget, nil 170 | default: 171 | return NLB, UnknownELBTarget, errors.WithStackTrace(UnknownAWSLoadBalancerTypeErr{typeKey: lbTargetAnnotationKey, typeStr: lbTargetTypeString}) 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /kubectl/service_test.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestGetAWSLoadBalancerNameFromHostname(t *testing.T) { 11 | t.Parallel() 12 | 13 | testCases := []struct { 14 | hostname string 15 | expectedLBName string 16 | expectErr bool 17 | }{ 18 | {"foo.bar.com", "", true}, 19 | { 20 | "k8s-kubesyst-agtvv3pn-73e284744f-1915683655.ap-northeast-1.elb.amazonaws.com", 21 | "k8s-kubesyst-agtvv3pn-73e284744f", 22 | false, 23 | }, 24 | { 25 | "73e284744f-1915683655.ap-northeast-1.elb.amazonaws.com", 26 | "73e284744f", 27 | false, 28 | }, 29 | { 30 | "internal-73e284744f-1915683655.ap-northeast-1.elb.amazonaws.com", 31 | "73e284744f", 32 | false, 33 | }, 34 | { 35 | "internal-k8s-kubesyst-agtvv3pn-73e284744f-1915683655.ap-northeast-1.elb.amazonaws.com", 36 | "k8s-kubesyst-agtvv3pn-73e284744f", 37 | false, 38 | }, 39 | } 40 | 41 | for _, tc := range testCases { 42 | t.Run(tc.hostname, func(t *testing.T) { 43 | lbName, err := getAWSLoadBalancerNameFromHostname(tc.hostname) 44 | if tc.expectErr { 45 | require.Error(t, err) 46 | } else { 47 | require.NoError(t, err) 48 | assert.Equal(t, tc.expectedLBName, lbName) 49 | } 50 | }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /kubectl/test_helpers.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/gruntwork-io/terratest/modules/k8s" 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func GetTestKubectlOptions(t *testing.T) *KubectlOptions { 11 | kubeConfigPath, err := k8s.GetKubeConfigPathE(t) 12 | require.NoError(t, err) 13 | return &KubectlOptions{ConfigPath: kubeConfigPath} 14 | } 15 | 16 | func GetKubectlOptions(t *testing.T) (*k8s.KubectlOptions, *KubectlOptions) { 17 | ttKubectlOptions := k8s.NewKubectlOptions("", "", "") 18 | configPath, err := k8s.KubeConfigPathFromHomeDirE() 19 | require.NoError(t, err) 20 | kubectlOptions := &KubectlOptions{ConfigPath: configPath} 21 | return ttKubectlOptions, kubectlOptions 22 | } 23 | -------------------------------------------------------------------------------- /kubectl/types.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | // AWSLoadBalancer is a struct that represents an AWS ELB that is associated with Kubernetes resources (Service or 4 | // Ingress). 5 | type AWSLoadBalancer struct { 6 | Name string 7 | Type ELBType 8 | TargetType ELBTargetType 9 | } 10 | 11 | // ELBType represents the underlying type of the load balancer (classic, network, or application) 12 | type ELBType int 13 | 14 | const ( 15 | ALB ELBType = iota 16 | NLB 17 | CLB 18 | UnknownELB 19 | ) 20 | 21 | // ELBTargetType represents the different ways the AWS ELB routes to the services. 22 | type ELBTargetType int 23 | 24 | const ( 25 | InstanceTarget ELBTargetType = iota 26 | IPTarget 27 | UnknownELBTarget 28 | ) 29 | -------------------------------------------------------------------------------- /kubectl/validate.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/gruntwork-io/go-commons/errors" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | // ValidateNamespaceExists will return an error if the provided namespace does not exist on the Kubernetes cluster. 11 | func ValidateNamespaceExists(kubectlOptions *KubectlOptions, namespace string) error { 12 | client, err := GetKubernetesClientFromOptions(kubectlOptions) 13 | if err != nil { 14 | return errors.WithStackTrace(err) 15 | } 16 | 17 | _, err = client.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) 18 | if err != nil { 19 | return errors.WithStackTrace(err) 20 | } 21 | return nil 22 | } 23 | 24 | // ValidateServiceAccountExists will return an error if the provided service account does not exist on the provided 25 | // namespace in the Kubernetes cluster. 26 | func ValidateServiceAccountExists(kubectlOptions *KubectlOptions, namespace string, serviceAccount string) error { 27 | client, err := GetKubernetesClientFromOptions(kubectlOptions) 28 | if err != nil { 29 | return errors.WithStackTrace(err) 30 | } 31 | 32 | _, err = client.CoreV1().ServiceAccounts(namespace).Get(context.Background(), serviceAccount, metav1.GetOptions{}) 33 | if err != nil { 34 | return errors.WithStackTrace(err) 35 | } 36 | return nil 37 | } 38 | -------------------------------------------------------------------------------- /logging/logging.go: -------------------------------------------------------------------------------- 1 | // Logging package includes code for managing the logger for kubergrunt 2 | package logging 3 | 4 | import ( 5 | "github.com/gruntwork-io/go-commons/logging" 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | func GetProjectLogger() *logrus.Entry { 10 | logger := logging.GetLogger("") 11 | return logger.WithField("name", "kubergrunt") 12 | } 13 | -------------------------------------------------------------------------------- /tls/cert_common.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/x509" 6 | "crypto/x509/pkix" 7 | "encoding/pem" 8 | "fmt" 9 | "io/ioutil" 10 | "math/big" 11 | "net" 12 | "time" 13 | 14 | "github.com/gruntwork-io/go-commons/errors" 15 | 16 | "github.com/gruntwork-io/kubergrunt/kubectl" 17 | ) 18 | 19 | // CertificateKeyPairPath represents the path where the certificate key pair resides. 20 | type CertificateKeyPairPath struct { 21 | CertificatePath string 22 | PrivateKeyPath string 23 | PublicKeyPath string 24 | } 25 | 26 | // StoreCertificate will take the provided certificate, encode it to pem, and store it on disk at the specified path. 27 | func StoreCertificate(certificate *x509.Certificate, path string) error { 28 | pemBlock := EncodeCertificateToPEM(certificate) 29 | return errors.WithStackTrace(StorePEM(pemBlock, path)) 30 | } 31 | 32 | // CreateCertificateFromKeys will take the provided key pair and generate the associated TLS certificate. You can 33 | // customize the distinguished name on the certificate, the validity time span, whether or not it is a CA certificate, 34 | // and sign the certificate with a given CA using the available parameters. 35 | // Note: The passed in private key should be the private key of the SIGNER (certificate signing), while the public key 36 | // should be the public key of the SIGNEE (certificate being signed). 37 | // Code based on generate_cert command in crypto/tls: https://golang.org/src/crypto/tls/generate_cert.go 38 | func CreateCertificateFromKeys( 39 | validityTimeSpan time.Duration, 40 | distinguishedName pkix.Name, 41 | signedBy *x509.Certificate, 42 | isCA bool, 43 | dnsNames []string, 44 | pubKey interface{}, // This has to be able to accept the key in any format, like the underlying go func 45 | privKey interface{}, // This has to be able to accept the key in any format, like the underlying go func 46 | ) ([]byte, error) { 47 | serialNumber, err := generateSerialNumber() 48 | if err != nil { 49 | return nil, errors.WithStackTrace(err) 50 | } 51 | 52 | template := createCertificateTemplate(serialNumber, distinguishedName, validityTimeSpan, isCA, dnsNames) 53 | // If signedBy is nil, we will set it to the template so that the generated certificate is self signed 54 | if signedBy == nil { 55 | signedBy = &template 56 | } 57 | certificateBytes, err := x509.CreateCertificate(rand.Reader, &template, signedBy, pubKey, privKey) 58 | if err != nil { 59 | return nil, errors.WithStackTrace(err) 60 | } 61 | return certificateBytes, nil 62 | } 63 | 64 | // generateSerialNumber will generate a random serial number to use for generating a new certificate 65 | func generateSerialNumber() (*big.Int, error) { 66 | serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) 67 | return rand.Int(rand.Reader, serialNumberLimit) 68 | } 69 | 70 | // createCertificateTemplate will generate the Certificate struct with the metadata. The actual certificate data still 71 | // needs to be appended to the struct. 72 | func createCertificateTemplate( 73 | serialNumber *big.Int, 74 | distinguishedName pkix.Name, 75 | validityTimeSpan time.Duration, 76 | isCA bool, 77 | dnsNames []string, 78 | ) x509.Certificate { 79 | validFrom := time.Now() 80 | template := x509.Certificate{ 81 | SerialNumber: serialNumber, 82 | Subject: distinguishedName, 83 | 84 | NotBefore: validFrom, 85 | NotAfter: validFrom.Add(validityTimeSpan), 86 | 87 | KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, 88 | ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, 89 | BasicConstraintsValid: true, 90 | 91 | DNSNames: dnsNames, 92 | } 93 | if isCA { 94 | template.IsCA = true 95 | template.KeyUsage |= x509.KeyUsageCertSign 96 | } 97 | 98 | // TODO: make generic so can be used for generating other kinds of certs 99 | // Add localhost, because the helm client will open a port forwarder via the Kubernetes API to access Tiller. 100 | // Because of that, helm requires a certificate that allows localhost. 101 | template.IPAddresses = append(template.IPAddresses, net.ParseIP("127.0.0.1")) 102 | 103 | return template 104 | } 105 | 106 | // LoadCertificate will load a Certificate object from the provided path, assuming it holds a certificate encoded in PEM. 107 | func LoadCertificate(path string) (*x509.Certificate, error) { 108 | rawData, err := ioutil.ReadFile(path) 109 | if err != nil { 110 | return nil, errors.WithStackTrace(err) 111 | } 112 | certificatePemBlock, _ := pem.Decode(rawData) 113 | certificate, err := x509.ParseCertificate(certificatePemBlock.Bytes) 114 | if err != nil { 115 | return nil, errors.WithStackTrace(err) 116 | } 117 | return certificate, nil 118 | } 119 | 120 | // StoreCertificateKeyPairAsKubernetesSecret will store the provided certificate key pair (which is available in the 121 | // local file system) in the Kubernetes cluster as a secret. 122 | func StoreCertificateKeyPairAsKubernetesSecret( 123 | kubectlOptions *kubectl.KubectlOptions, 124 | secretName string, 125 | secretNamespace string, 126 | labels map[string]string, 127 | annotations map[string]string, 128 | nameBase string, 129 | certificateKeyPairPath CertificateKeyPairPath, 130 | caCertPath string, 131 | ) error { 132 | secret := kubectl.PrepareSecret(secretNamespace, secretName, labels, annotations) 133 | err := kubectl.AddToSecretFromFile(secret, fmt.Sprintf("%s.crt", nameBase), certificateKeyPairPath.CertificatePath) 134 | if err != nil { 135 | return err 136 | } 137 | err = kubectl.AddToSecretFromFile(secret, fmt.Sprintf("%s.pem", nameBase), certificateKeyPairPath.PrivateKeyPath) 138 | if err != nil { 139 | return err 140 | } 141 | err = kubectl.AddToSecretFromFile(secret, fmt.Sprintf("%s.pub", nameBase), certificateKeyPairPath.PublicKeyPath) 142 | if err != nil { 143 | return err 144 | } 145 | 146 | // If we also want to store the CA certificate that can be used to validate server or client 147 | if caCertPath != "" { 148 | err = kubectl.AddToSecretFromFile(secret, "ca.crt", caCertPath) 149 | if err != nil { 150 | return err 151 | } 152 | } 153 | 154 | return kubectl.CreateSecret(kubectlOptions, secret) 155 | } 156 | -------------------------------------------------------------------------------- /tls/ecdsa_cert.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/ecdsa" 5 | "crypto/x509" 6 | "crypto/x509/pkix" 7 | "time" 8 | 9 | "github.com/gruntwork-io/go-commons/errors" 10 | ) 11 | 12 | // TLSECDSACertificateKeyPair represents the certificate key pair generated using the ECDSA algorithm. 13 | type TLSECDSACertificateKeyPair struct { 14 | CertificateBytes []byte 15 | PrivateKey *ecdsa.PrivateKey 16 | PublicKey *ecdsa.PublicKey 17 | } 18 | 19 | // Certificate will return the Certificate struct represented by the raw bytes stored on the key pair struct. 20 | func (certificateKeyPair *TLSECDSACertificateKeyPair) Certificate() (*x509.Certificate, error) { 21 | return x509.ParseCertificate(certificateKeyPair.CertificateBytes) 22 | } 23 | 24 | // CreateECDSACertificateKeyPair will generate a new certificate key pair using the ECDSA algorithm. You can 25 | // customize the distinguished name on the certificate, the validity time span, whether or not it is a CA certificate, 26 | // and sign the certificate with a given CA using the available parameters. 27 | // The elliptic curve is configurable, and it must be one of P224, P256, P384, P521. 28 | func CreateECDSACertificateKeyPair( 29 | validityTimeSpan time.Duration, 30 | distinguishedName pkix.Name, 31 | signedBy *x509.Certificate, 32 | signedByKey interface{}, // We don't know what format the signing key is in, so we will accept any type 33 | isCA bool, 34 | dnsNames []string, 35 | ecdsaCurve string, 36 | ) (TLSECDSACertificateKeyPair, error) { 37 | privateKey, publicKey, err := CreateECDSAKeyPair(ecdsaCurve) 38 | if err != nil { 39 | return TLSECDSACertificateKeyPair{}, errors.WithStackTrace(err) 40 | } 41 | 42 | var signingKey interface{} 43 | signingKey = privateKey 44 | if signedBy != nil { 45 | signingKey = signedByKey 46 | } 47 | certificateBytes, err := CreateCertificateFromKeys( 48 | validityTimeSpan, 49 | distinguishedName, 50 | signedBy, 51 | isCA, 52 | dnsNames, 53 | publicKey, 54 | signingKey, 55 | ) 56 | if err != nil { 57 | return TLSECDSACertificateKeyPair{}, err 58 | } 59 | 60 | certificateKeyPair := TLSECDSACertificateKeyPair{ 61 | CertificateBytes: certificateBytes, 62 | PrivateKey: privateKey, 63 | PublicKey: publicKey, 64 | } 65 | return certificateKeyPair, nil 66 | } 67 | -------------------------------------------------------------------------------- /tls/ecdsa_cert_test.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | "time" 7 | 8 | "github.com/gruntwork-io/terratest/modules/shell" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestCreateECDSACertificateKeyPairSupportsSigningCerts(t *testing.T) { 13 | t.Parallel() 14 | 15 | distinguishedName := CreateSampleDistinguishedName(t) 16 | caKeyPair, err := CreateECDSACertificateKeyPair(1*time.Hour, distinguishedName, nil, nil, true, nil, "P256") 17 | require.NoError(t, err) 18 | caCert, err := caKeyPair.Certificate() 19 | require.NoError(t, err) 20 | 21 | signedKeyPair, err := CreateECDSACertificateKeyPair(1*time.Hour, distinguishedName, caCert, caKeyPair.PrivateKey, false, nil, "P256") 22 | require.NoError(t, err) 23 | signedCert, err := signedKeyPair.Certificate() 24 | require.NoError(t, err) 25 | 26 | caCertTmpPath := StoreCertToTempFile(t, caCert) 27 | defer os.Remove(caCertTmpPath) 28 | signedCertTmpPath := StoreCertToTempFile(t, signedCert) 29 | defer os.Remove(signedCertTmpPath) 30 | 31 | // Verify the signed certificate is indeed signed by the CA certificate 32 | verifyCmd := shell.Command{ 33 | Command: "openssl", 34 | Args: []string{"verify", "-CAfile", caCertTmpPath, signedCertTmpPath}, 35 | } 36 | shell.RunCommand(t, verifyCmd) 37 | } 38 | 39 | func TestCreateECDSACertificateKeyPairSupportsSigningByRSACerts(t *testing.T) { 40 | t.Parallel() 41 | 42 | distinguishedName := CreateSampleDistinguishedName(t) 43 | caKeyPair, err := CreateRSACertificateKeyPair(1*time.Hour, distinguishedName, nil, nil, true, nil, 2048) 44 | require.NoError(t, err) 45 | caCert, err := caKeyPair.Certificate() 46 | require.NoError(t, err) 47 | 48 | signedKeyPair, err := CreateECDSACertificateKeyPair(1*time.Hour, distinguishedName, caCert, caKeyPair.PrivateKey, false, nil, "P256") 49 | require.NoError(t, err) 50 | signedCert, err := signedKeyPair.Certificate() 51 | require.NoError(t, err) 52 | 53 | caCertTmpPath := StoreCertToTempFile(t, caCert) 54 | defer os.Remove(caCertTmpPath) 55 | signedCertTmpPath := StoreCertToTempFile(t, signedCert) 56 | defer os.Remove(signedCertTmpPath) 57 | 58 | // Verify the signed certificate is indeed signed by the CA certificate 59 | verifyCmd := shell.Command{ 60 | Command: "openssl", 61 | Args: []string{"verify", "-CAfile", caCertTmpPath, signedCertTmpPath}, 62 | } 63 | shell.RunCommand(t, verifyCmd) 64 | } 65 | -------------------------------------------------------------------------------- /tls/ecdsa_keys.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/ecdsa" 5 | "crypto/elliptic" 6 | "crypto/rand" 7 | "crypto/x509" 8 | "encoding/pem" 9 | "io/ioutil" 10 | 11 | "github.com/gruntwork-io/go-commons/errors" 12 | ) 13 | 14 | // StoreECDSAPrivateKey takes the given ECDSA private key, encode it to pem, and store it on disk at the specified path. 15 | // You can optionally provide a password to encrypt the key on disk (passing in "" will store it unencrypted). 16 | func StoreECDSAPrivateKey(privateKey *ecdsa.PrivateKey, password string, path string) error { 17 | pemBlock, err := EncodeECDSAPrivateKeyToPEM(privateKey, password) 18 | if err != nil { 19 | return errors.WithStackTrace(err) 20 | } 21 | return errors.WithStackTrace(StorePEM(pemBlock, path)) 22 | } 23 | 24 | // StoreECDSAPublicKey takes the given ECDSA public key, encode it to pem, and store it on disk at the specified path. 25 | func StoreECDSAPublicKey(publicKey *ecdsa.PublicKey, path string) error { 26 | pemBlock, err := EncodePublicKeyToPEM(publicKey) 27 | if err != nil { 28 | return errors.WithStackTrace(err) 29 | } 30 | return errors.WithStackTrace(StorePEM(pemBlock, path)) 31 | } 32 | 33 | // CreateECDSAKeyPair generates a new private public key pair using the ECDSA algorithm. The elliptic curve is 34 | // configurable, and it must be one of P224, P256, P384, P521. 35 | func CreateECDSAKeyPair(ecdsaCurve string) (*ecdsa.PrivateKey, *ecdsa.PublicKey, error) { 36 | var curve elliptic.Curve 37 | switch ecdsaCurve { 38 | case P224Curve: 39 | curve = elliptic.P224() 40 | case P256Curve: 41 | curve = elliptic.P256() 42 | case P384Curve: 43 | curve = elliptic.P384() 44 | case P521Curve: 45 | curve = elliptic.P521() 46 | default: 47 | err := UnknownECDSACurveError{ecdsaCurve} 48 | return nil, nil, errors.WithStackTrace(err) 49 | } 50 | 51 | privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) 52 | if err != nil { 53 | return nil, nil, errors.WithStackTrace(err) 54 | } 55 | return privateKey, &privateKey.PublicKey, nil 56 | } 57 | 58 | // LoadECDSAPrivateKey will load a private key object from the provided path, assuming it holds a certificate encoded in 59 | // PEM. 60 | func LoadECDSAPrivateKey(path string) (*ecdsa.PrivateKey, error) { 61 | rawData, err := ioutil.ReadFile(path) 62 | if err != nil { 63 | return nil, errors.WithStackTrace(err) 64 | } 65 | privateKeyPemBlock, _ := pem.Decode(rawData) 66 | privateKey, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes) 67 | if err != nil { 68 | return nil, errors.WithStackTrace(err) 69 | } 70 | return privateKey, nil 71 | } 72 | -------------------------------------------------------------------------------- /tls/ecdsa_keys_test.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/ecdsa" 5 | "fmt" 6 | "io/ioutil" 7 | "net/url" 8 | "os" 9 | "strings" 10 | "testing" 11 | 12 | "github.com/gruntwork-io/go-commons/errors" 13 | "github.com/gruntwork-io/terratest/modules/logger" 14 | "github.com/gruntwork-io/terratest/modules/random" 15 | "github.com/gruntwork-io/terratest/modules/shell" 16 | "github.com/stretchr/testify/assert" 17 | "github.com/stretchr/testify/require" 18 | ) 19 | 20 | func TestCreateECDSAKeyPairErrorsOnUnknownCurve(t *testing.T) { 21 | t.Parallel() 22 | 23 | _, _, err := CreateECDSAKeyPair("unknown") 24 | assert.Error(t, err) 25 | switch errors.Unwrap(err).(type) { 26 | case UnknownECDSACurveError: 27 | default: 28 | logger.Log(t, "Wrong error type for CreateECDSAKeyPair using unknown elliptic curve") 29 | t.Fail() 30 | } 31 | } 32 | 33 | func TestCreateECDSAKeyPairSupportsAllKnownCurves(t *testing.T) { 34 | t.Parallel() 35 | 36 | for _, curve := range KnownCurves { 37 | // Capture range variable because it changes (due to for loop) before executing the test function 38 | curve := curve 39 | t.Run(curve, func(t *testing.T) { 40 | t.Parallel() 41 | privKey, pubKey, err := CreateECDSAKeyPair(curve) 42 | assert.NoError(t, err) 43 | assert.NotNil(t, privKey) 44 | assert.NotNil(t, pubKey) 45 | }) 46 | } 47 | } 48 | 49 | func TestCreateECDSAKeyPairReturnsCompatibleKeys(t *testing.T) { 50 | t.Parallel() 51 | 52 | privKey, pubKey, err := CreateECDSAKeyPair("P256") 53 | assert.NoError(t, err) 54 | privKeyTmpPath := StoreECDSAKeyToTempFile(t, privKey, "") 55 | defer os.Remove(privKeyTmpPath) 56 | pubKeyTmpPath := StoreECDSAPublicKeyToTempFile(t, pubKey) 57 | defer os.Remove(pubKeyTmpPath) 58 | 59 | // Verify the public key matches the private key by regenerating the public key from the private key and verifying 60 | // it is the same as what we have. 61 | keyPubFromPrivCmd := shell.Command{ 62 | Command: "openssl", 63 | Args: []string{"pkey", "-pubout", "-inform", "PEM", "-in", privKeyTmpPath, "-outform", "PEM"}, 64 | } 65 | keyPubFromPriv := shell.RunCommandAndGetOutput(t, keyPubFromPrivCmd) 66 | pubKeyBytes, err := ioutil.ReadFile(pubKeyTmpPath) 67 | assert.NoError(t, err) 68 | assert.Equal(t, strings.TrimSpace(string(pubKeyBytes)), strings.TrimSpace(keyPubFromPriv)) 69 | } 70 | 71 | func TestStoreECDSAPrivateKeyStoresInPEMFormat(t *testing.T) { 72 | t.Parallel() 73 | 74 | privKey, _, err := CreateECDSAKeyPair("P256") 75 | require.NoError(t, err) 76 | tmpPath := StoreECDSAKeyToTempFile(t, privKey, "") 77 | defer os.Remove(tmpPath) 78 | 79 | // Verify the format, and that key is unencrypted. We use openssl binary to read in the file and if it doesn't 80 | // error, then we know the key is formatted correctly. 81 | // See: https://stackoverflow.com/questions/26259432/how-to-check-a-public-rsa-key-file/26260514#26260514 82 | cmd := shell.Command{ 83 | Command: "openssl", 84 | Args: []string{"ec", "-inform", "PEM", "-in", tmpPath, "-noout"}, 85 | } 86 | shell.RunCommand(t, cmd) 87 | } 88 | 89 | func TestStoreECDSAPrivateKeyEncryption(t *testing.T) { 90 | t.Parallel() 91 | 92 | uniqueId := random.UniqueId() 93 | privKey, _, err := CreateECDSAKeyPair("P256") 94 | require.NoError(t, err) 95 | tmpPath := StoreECDSAKeyToTempFile(t, privKey, uniqueId) 96 | defer os.Remove(tmpPath) 97 | 98 | // Verify the format, and that key is encrypted. We use openssl binary to read in the file and if it doesn't 99 | // error, then we know the key is formatted correctly. 100 | // See: https://stackoverflow.com/questions/26259432/how-to-check-a-public-rsa-key-file/26260514#26260514 101 | cmd := shell.Command{ 102 | Command: "openssl", 103 | Args: []string{"ec", "-inform", "PEM", "-in", tmpPath, "-passin", fmt.Sprintf("pass:%s", uniqueId), "-noout"}, 104 | } 105 | shell.RunCommand(t, cmd) 106 | } 107 | 108 | // StoreECDSAKeyToTempFile will create a new temporary file and store the provided private key, encrypting it with the 109 | // provided password. 110 | func StoreECDSAKeyToTempFile(t *testing.T, privKey *ecdsa.PrivateKey, password string) string { 111 | escapedTestName := url.PathEscape(t.Name()) 112 | tmpfile, err := ioutil.TempFile("", escapedTestName) 113 | require.NoError(t, err) 114 | defer tmpfile.Close() 115 | require.NoError(t, StoreECDSAPrivateKey(privKey, password, tmpfile.Name())) 116 | return tmpfile.Name() 117 | } 118 | 119 | // StoreECDSAPublicKeyToTempFile will create a new temporary file and store the provided public key. 120 | func StoreECDSAPublicKeyToTempFile(t *testing.T, pubKey *ecdsa.PublicKey) string { 121 | escapedTestName := url.PathEscape(t.Name()) 122 | tmpfile, err := ioutil.TempFile("", escapedTestName) 123 | require.NoError(t, err) 124 | defer tmpfile.Close() 125 | require.NoError(t, StoreECDSAPublicKey(pubKey, tmpfile.Name())) 126 | return tmpfile.Name() 127 | } 128 | -------------------------------------------------------------------------------- /tls/errors.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // UnknownPrivateKeyAlgorithm is returned when the provided algorithm is unknown or unsupported. 8 | type UnknownPrivateKeyAlgorithm struct { 9 | Algorithm string 10 | } 11 | 12 | func (err UnknownPrivateKeyAlgorithm) Error() string { 13 | return fmt.Sprintf("Unrecognized private key algorithm %s", err.Algorithm) 14 | } 15 | 16 | // UnknownECDSACurveError is returned when an unknown ecdsa curve is requested. 17 | type UnknownECDSACurveError struct { 18 | Curve string 19 | } 20 | 21 | func (err UnknownECDSACurveError) Error() string { 22 | return fmt.Sprintf("Unrecognized elliptic curve %s when generating ECDSA key pair.", err.Curve) 23 | } 24 | 25 | // RSABitsTooLow is returned when the requested RSA key length is too low. 26 | type RSABitsTooLow struct { 27 | RSABits int 28 | } 29 | 30 | func (err RSABitsTooLow) Error() string { 31 | return fmt.Sprintf("RSA Key length of %d is too low. Choose at least 2048.", err.RSABits) 32 | } 33 | -------------------------------------------------------------------------------- /tls/options.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/x509" 5 | "crypto/x509/pkix" 6 | "fmt" 7 | "path/filepath" 8 | "time" 9 | 10 | "github.com/gruntwork-io/go-commons/collections" 11 | "github.com/gruntwork-io/go-commons/errors" 12 | ) 13 | 14 | const ( 15 | // Private key algorithms 16 | ECDSAAlgorithm = "ECDSA" 17 | RSAAlgorithm = "RSA" 18 | 19 | // Elliptic curves 20 | P224Curve = "P224" 21 | P256Curve = "P256" 22 | P384Curve = "P384" 23 | P521Curve = "P521" 24 | 25 | // We force users to use at least 2048 bits for RSA, as anything less is cryptographically insecure (since they have 26 | // been cracked). 27 | // See https://en.wikipedia.org/wiki/Key_size for more commentary 28 | MinimumRSABits = 2048 29 | ) 30 | 31 | var ( 32 | // Valid private key algorithms we support in this library 33 | PrivateKeyAlgorithms = []string{ 34 | ECDSAAlgorithm, 35 | RSAAlgorithm, 36 | } 37 | 38 | // List of known curves we support for ECDSA private key algorithm 39 | KnownCurves = []string{ 40 | P224Curve, 41 | P256Curve, 42 | P384Curve, 43 | P521Curve, 44 | } 45 | ) 46 | 47 | // TLSOptions is a convenient struct to capture all the options needed for generating a TLS certificate key pair. 48 | type TLSOptions struct { 49 | DistinguishedName pkix.Name 50 | ValidityTimeSpan time.Duration 51 | PrivateKeyAlgorithm string 52 | RSABits int 53 | ECDSACurve string 54 | } 55 | 56 | // Validate will validate the provided TLSOptions struct is valid. 57 | func (options *TLSOptions) Validate() error { 58 | switch options.PrivateKeyAlgorithm { 59 | case ECDSAAlgorithm: 60 | if !collections.ListContainsElement(KnownCurves, options.ECDSACurve) { 61 | return errors.WithStackTrace(UnknownECDSACurveError{options.ECDSACurve}) 62 | } 63 | case RSAAlgorithm: 64 | if options.RSABits < MinimumRSABits { 65 | return errors.WithStackTrace(RSABitsTooLow{options.RSABits}) 66 | } 67 | default: 68 | return errors.WithStackTrace(UnknownPrivateKeyAlgorithm{options.PrivateKeyAlgorithm}) 69 | } 70 | return nil 71 | } 72 | 73 | // GenerateAndStoreTLSCertificateKeyPair is a convenience method that will select the right underlying functions to use 74 | // to generate the certificate key pairs and store them to disk at the provided root path. The following files will be 75 | // created: 76 | // - name.crt : The x509 certificate file in PEM format. 77 | // - name.pem : The private key file in PEM format. 78 | // - name.pub : The public key file in PEM format. 79 | func (options *TLSOptions) GenerateAndStoreTLSCertificateKeyPair( 80 | name string, 81 | rootPath string, 82 | keyPassword string, 83 | isCA bool, 84 | dnsNames []string, 85 | signedBy *x509.Certificate, 86 | signedByKey interface{}, // We don't know what format the signing key is in, so we will accept any type 87 | ) (CertificateKeyPairPath, error) { 88 | var err error 89 | path := CertificateKeyPairPath{ 90 | CertificatePath: filepath.Join(rootPath, fmt.Sprintf("%s.crt", name)), 91 | PrivateKeyPath: filepath.Join(rootPath, fmt.Sprintf("%s.pem", name)), 92 | PublicKeyPath: filepath.Join(rootPath, fmt.Sprintf("%s.pub", name)), 93 | } 94 | switch options.PrivateKeyAlgorithm { 95 | case ECDSAAlgorithm: 96 | err = options.generateECDSATLSCertificateKeyPair(path, keyPassword, isCA, dnsNames, signedBy, signedByKey) 97 | case RSAAlgorithm: 98 | err = options.generateRSATLSCertificateKeyPair(path, keyPassword, isCA, dnsNames, signedBy, signedByKey) 99 | default: 100 | err = errors.WithStackTrace(UnknownPrivateKeyAlgorithm{options.PrivateKeyAlgorithm}) 101 | } 102 | return path, err 103 | } 104 | 105 | func (options *TLSOptions) generateECDSATLSCertificateKeyPair( 106 | certificateKeyPairPath CertificateKeyPairPath, 107 | keyPassword string, 108 | isCA bool, 109 | dnsNames []string, 110 | signedBy *x509.Certificate, 111 | signedByKey interface{}, // We don't know what format the signing key is in, so we will accept any type 112 | ) error { 113 | keypair, err := CreateECDSACertificateKeyPair(options.ValidityTimeSpan, options.DistinguishedName, signedBy, signedByKey, isCA, dnsNames, options.ECDSACurve) 114 | if err != nil { 115 | return errors.WithStackTrace(err) 116 | } 117 | cert, err := keypair.Certificate() 118 | if err != nil { 119 | return err 120 | } 121 | err = StoreCertificate(cert, certificateKeyPairPath.CertificatePath) 122 | if err != nil { 123 | return err 124 | } 125 | err = StoreECDSAPrivateKey(keypair.PrivateKey, keyPassword, certificateKeyPairPath.PrivateKeyPath) 126 | if err != nil { 127 | return err 128 | } 129 | return StoreECDSAPublicKey(keypair.PublicKey, certificateKeyPairPath.PublicKeyPath) 130 | } 131 | 132 | func (options *TLSOptions) generateRSATLSCertificateKeyPair( 133 | certificateKeyPairPath CertificateKeyPairPath, 134 | keyPassword string, 135 | isCA bool, 136 | dnsNames []string, 137 | signedBy *x509.Certificate, 138 | signedByKey interface{}, // We don't know what format the signing key is in, so we will accept any type 139 | ) error { 140 | keypair, err := CreateRSACertificateKeyPair(options.ValidityTimeSpan, options.DistinguishedName, signedBy, signedByKey, isCA, dnsNames, options.RSABits) 141 | if err != nil { 142 | return errors.WithStackTrace(err) 143 | } 144 | cert, err := keypair.Certificate() 145 | if err != nil { 146 | return err 147 | } 148 | err = StoreCertificate(cert, certificateKeyPairPath.CertificatePath) 149 | if err != nil { 150 | return err 151 | } 152 | err = StoreRSAPrivateKey(keypair.PrivateKey, keyPassword, certificateKeyPairPath.PrivateKeyPath) 153 | if err != nil { 154 | return err 155 | } 156 | return StoreRSAPublicKey(keypair.PublicKey, certificateKeyPairPath.PublicKeyPath) 157 | } 158 | -------------------------------------------------------------------------------- /tls/options_test.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/gruntwork-io/go-commons/errors" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestTLSOptionsValidateAcceptsAllKnownAlgorithms(t *testing.T) { 11 | for _, algorithm := range PrivateKeyAlgorithms { 12 | options := TLSOptions{ 13 | PrivateKeyAlgorithm: algorithm, 14 | ECDSACurve: P224Curve, 15 | RSABits: MinimumRSABits, 16 | } 17 | assert.NoError(t, options.Validate()) 18 | } 19 | } 20 | 21 | func TestTLSOptionsValidateAcceptsAllKnownCurves(t *testing.T) { 22 | for _, curve := range KnownCurves { 23 | options := TLSOptions{ 24 | PrivateKeyAlgorithm: ECDSAAlgorithm, 25 | ECDSACurve: curve, 26 | RSABits: MinimumRSABits, 27 | } 28 | assert.NoError(t, options.Validate()) 29 | } 30 | } 31 | 32 | func TestTLSOptionsValidateAcceptsUnknownCurveWhenAlgorithmIsRSA(t *testing.T) { 33 | options := TLSOptions{ 34 | PrivateKeyAlgorithm: RSAAlgorithm, 35 | ECDSACurve: "P0", 36 | RSABits: MinimumRSABits, 37 | } 38 | assert.NoError(t, options.Validate()) 39 | } 40 | 41 | func TestTLSOptionsValidateAcceptsRSABitsAboveMinimum(t *testing.T) { 42 | options := TLSOptions{ 43 | PrivateKeyAlgorithm: RSAAlgorithm, 44 | ECDSACurve: P224Curve, 45 | RSABits: 4096, 46 | } 47 | assert.NoError(t, options.Validate()) 48 | } 49 | 50 | func TestTLSOptionsValidateAcceptsRSABitsBelowMinimumWhenAlgorithmIsECDSA(t *testing.T) { 51 | options := TLSOptions{ 52 | PrivateKeyAlgorithm: ECDSAAlgorithm, 53 | ECDSACurve: P224Curve, 54 | RSABits: 1, 55 | } 56 | assert.NoError(t, options.Validate()) 57 | 58 | } 59 | 60 | func TestTLSOptionsValidateRejectsUnknownAlgorithms(t *testing.T) { 61 | options := TLSOptions{ 62 | PrivateKeyAlgorithm: "UNKNOWN", 63 | ECDSACurve: P224Curve, 64 | RSABits: MinimumRSABits, 65 | } 66 | err := options.Validate() 67 | assert.Error(t, err) 68 | err = errors.Unwrap(err) 69 | switch err.(type) { 70 | case UnknownPrivateKeyAlgorithm: 71 | default: 72 | t.Fatalf("Wrong validation error type: %s", err) 73 | } 74 | } 75 | 76 | func TestTLSOptionsValidateRejectsUnknownCurves(t *testing.T) { 77 | options := TLSOptions{ 78 | PrivateKeyAlgorithm: ECDSAAlgorithm, 79 | ECDSACurve: "UNKNOWN", 80 | RSABits: MinimumRSABits, 81 | } 82 | err := options.Validate() 83 | assert.Error(t, err) 84 | err = errors.Unwrap(err) 85 | switch err.(type) { 86 | case UnknownECDSACurveError: 87 | default: 88 | t.Fatalf("Wrong validation error type: %s", err) 89 | } 90 | } 91 | 92 | func TestTLSOptionsValidateRejectsRSABitsBelowMinimum(t *testing.T) { 93 | options := TLSOptions{ 94 | PrivateKeyAlgorithm: RSAAlgorithm, 95 | ECDSACurve: P224Curve, 96 | RSABits: 2047, 97 | } 98 | err := options.Validate() 99 | assert.Error(t, err) 100 | err = errors.Unwrap(err) 101 | switch err.(type) { 102 | case RSABitsTooLow: 103 | default: 104 | t.Fatalf("Wrong validation error type: %s", err) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /tls/pem.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/ecdsa" 5 | "crypto/rand" 6 | "crypto/rsa" 7 | "crypto/x509" 8 | "encoding/pem" 9 | "os" 10 | "strings" 11 | 12 | "github.com/gruntwork-io/go-commons/errors" 13 | ) 14 | 15 | // EncodeCertificateToPEM will take the raw x509 Certificate and encode it to a pem Block struct. 16 | func EncodeCertificateToPEM(certificate *x509.Certificate) pem.Block { 17 | return pem.Block{ 18 | Type: "CERTIFICATE", 19 | Bytes: certificate.Raw, 20 | } 21 | } 22 | 23 | // EncodeRSAPrivateKeyToPEM will take the provided RSA private key and encode it to a pem Block struct. You can 24 | // optionally encrypt the private key by providing a password (passing in "" will keep it unencrypted). 25 | func EncodeRSAPrivateKeyToPEM(privateKey *rsa.PrivateKey, password string) (pem.Block, error) { 26 | // TODO: make encoding type (PKCS) configurable 27 | return NewPrivateKeyPEMBlock("RSA PRIVATE KEY", x509.MarshalPKCS1PrivateKey(privateKey), password) 28 | } 29 | 30 | // EncodeECDSAPrivateKeyToPEM will take the provided ECDSA private key and encode it to a pem Block struct. You can 31 | // optionally encrypt the private key by providing a password (passing in "" will keep it unencrypted). 32 | func EncodeECDSAPrivateKeyToPEM(privateKey *ecdsa.PrivateKey, password string) (pem.Block, error) { 33 | blockBytes, err := x509.MarshalECPrivateKey(privateKey) 34 | if err != nil { 35 | return pem.Block{}, errors.WithStackTrace(err) 36 | } 37 | return NewPrivateKeyPEMBlock("EC PRIVATE KEY", blockBytes, password) 38 | } 39 | 40 | // EncodePublicKeyToPEM will take the provided public key and encode it to a pem Block struct. 41 | func EncodePublicKeyToPEM(publicKey interface{}) (pem.Block, error) { 42 | blockBytes, err := x509.MarshalPKIXPublicKey(publicKey) 43 | if err != nil { 44 | return pem.Block{}, errors.WithStackTrace(err) 45 | } 46 | return pem.Block{ 47 | Type: "PUBLIC KEY", 48 | Bytes: blockBytes, 49 | }, nil 50 | } 51 | 52 | // NewPrivateKeyPEMBlock will create the pem Block struct with the provided data. You can optionally encrypt the 53 | // private key by providing a password (passing in "" will keep it unencrypted). 54 | func NewPrivateKeyPEMBlock(pemType string, pemData []byte, password string) (pem.Block, error) { 55 | block := pem.Block{ 56 | Type: pemType, 57 | Bytes: pemData, 58 | } 59 | 60 | // Encrypt the pem 61 | if password != "" { 62 | blockPtr, err := x509.EncryptPEMBlock(rand.Reader, block.Type, block.Bytes, []byte(password), x509.PEMCipherAES256) 63 | if err != nil { 64 | return pem.Block{}, errors.WithStackTrace(err) 65 | } 66 | block = *blockPtr 67 | } 68 | return block, nil 69 | } 70 | 71 | // StorePEM will take the pem block and store it to disk. 72 | func StorePEM(pemBlock pem.Block, path string) error { 73 | var filePermissions os.FileMode 74 | if strings.HasSuffix(pemBlock.Type, "PRIVATE KEY") || pemBlock.Type == "CERTIFICATE" { 75 | filePermissions = 0600 76 | } else { 77 | filePermissions = 0644 78 | } 79 | 80 | outFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, filePermissions) 81 | if err != nil { 82 | return err 83 | } 84 | return pem.Encode(outFile, &pemBlock) 85 | } 86 | -------------------------------------------------------------------------------- /tls/rsa_cert.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/rsa" 5 | "crypto/x509" 6 | "crypto/x509/pkix" 7 | "time" 8 | 9 | "github.com/gruntwork-io/go-commons/errors" 10 | ) 11 | 12 | // TLSRSACertificateKeyPair represents the certificate key pair generated using the RSA algorithm. 13 | type TLSRSACertificateKeyPair struct { 14 | CertificateBytes []byte 15 | PrivateKey *rsa.PrivateKey 16 | PublicKey *rsa.PublicKey 17 | } 18 | 19 | // Certificate will return the Certificate struct represented by the raw bytes stored on the key pair struct. 20 | func (certificateKeyPair *TLSRSACertificateKeyPair) Certificate() (*x509.Certificate, error) { 21 | return x509.ParseCertificate(certificateKeyPair.CertificateBytes) 22 | } 23 | 24 | // CreateRSACertificateKeyPair will generate a new certificate key pair using the RSA algorithm. You can 25 | // customize the distinguished name on the certificate, the validity time span, whether or not it is a CA certificate, 26 | // and sign the certificate with a given CA using the available parameters. 27 | // The size of the RSA key in bits is configurable. Choosing at least 2048 bits is recommended. 28 | func CreateRSACertificateKeyPair( 29 | validityTimeSpan time.Duration, 30 | distinguishedName pkix.Name, 31 | signedBy *x509.Certificate, 32 | signedByKey interface{}, // We don't know what format the signing key is in, so we will accept any type 33 | isCA bool, 34 | dnsNames []string, 35 | rsaBits int, 36 | ) (TLSRSACertificateKeyPair, error) { 37 | privateKey, publicKey, err := CreateRSAKeyPair(rsaBits) 38 | if err != nil { 39 | return TLSRSACertificateKeyPair{}, errors.WithStackTrace(err) 40 | } 41 | 42 | var signingKey interface{} 43 | signingKey = privateKey 44 | if signedBy != nil { 45 | signingKey = signedByKey 46 | } 47 | certificateBytes, err := CreateCertificateFromKeys( 48 | validityTimeSpan, 49 | distinguishedName, 50 | signedBy, 51 | isCA, 52 | dnsNames, 53 | publicKey, 54 | signingKey, 55 | ) 56 | if err != nil { 57 | return TLSRSACertificateKeyPair{}, err 58 | } 59 | 60 | certificateKeyPair := TLSRSACertificateKeyPair{ 61 | CertificateBytes: certificateBytes, 62 | PrivateKey: privateKey, 63 | PublicKey: publicKey, 64 | } 65 | return certificateKeyPair, nil 66 | } 67 | -------------------------------------------------------------------------------- /tls/rsa_cert_test.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | "time" 7 | 8 | "github.com/gruntwork-io/terratest/modules/shell" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestCreateRSACertificateKeyPairSupportsSigningCerts(t *testing.T) { 13 | t.Parallel() 14 | 15 | distinguishedName := CreateSampleDistinguishedName(t) 16 | caKeyPair, err := CreateRSACertificateKeyPair(1*time.Hour, distinguishedName, nil, nil, true, nil, 2048) 17 | require.NoError(t, err) 18 | caCert, err := caKeyPair.Certificate() 19 | require.NoError(t, err) 20 | 21 | signedKeyPair, err := CreateRSACertificateKeyPair(1*time.Hour, distinguishedName, caCert, caKeyPair.PrivateKey, false, nil, 2048) 22 | require.NoError(t, err) 23 | signedCert, err := signedKeyPair.Certificate() 24 | require.NoError(t, err) 25 | 26 | caCertTmpPath := StoreCertToTempFile(t, caCert) 27 | defer os.Remove(caCertTmpPath) 28 | signedCertTmpPath := StoreCertToTempFile(t, signedCert) 29 | defer os.Remove(signedCertTmpPath) 30 | 31 | // Verify the signed certificate is indeed signed by the CA certificate 32 | verifyCmd := shell.Command{ 33 | Command: "openssl", 34 | Args: []string{"verify", "-CAfile", caCertTmpPath, signedCertTmpPath}, 35 | } 36 | shell.RunCommand(t, verifyCmd) 37 | } 38 | 39 | func TestCreateRSACertificateKeyPairSupportsSigningByECDSACerts(t *testing.T) { 40 | t.Parallel() 41 | 42 | distinguishedName := CreateSampleDistinguishedName(t) 43 | caKeyPair, err := CreateECDSACertificateKeyPair(1*time.Hour, distinguishedName, nil, nil, true, nil, "P256") 44 | require.NoError(t, err) 45 | caCert, err := caKeyPair.Certificate() 46 | require.NoError(t, err) 47 | 48 | signedKeyPair, err := CreateRSACertificateKeyPair(1*time.Hour, distinguishedName, caCert, caKeyPair.PrivateKey, false, nil, 2048) 49 | require.NoError(t, err) 50 | signedCert, err := signedKeyPair.Certificate() 51 | require.NoError(t, err) 52 | 53 | caCertTmpPath := StoreCertToTempFile(t, caCert) 54 | defer os.Remove(caCertTmpPath) 55 | signedCertTmpPath := StoreCertToTempFile(t, signedCert) 56 | defer os.Remove(signedCertTmpPath) 57 | 58 | // Verify the signed certificate is indeed signed by the CA certificate 59 | verifyCmd := shell.Command{ 60 | Command: "openssl", 61 | Args: []string{"verify", "-CAfile", caCertTmpPath, signedCertTmpPath}, 62 | } 63 | shell.RunCommand(t, verifyCmd) 64 | } 65 | -------------------------------------------------------------------------------- /tls/rsa_keys.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/rsa" 6 | "crypto/x509" 7 | "encoding/pem" 8 | "io/ioutil" 9 | 10 | "github.com/gruntwork-io/go-commons/errors" 11 | ) 12 | 13 | // StoreRSAPrivateKey takes the given RSA private key, encode it to pem, and store it on disk at the specified path. You 14 | // can optionally provide a password to encrypt the key on disk (passing in "" will store it unencrypted). 15 | func StoreRSAPrivateKey(privateKey *rsa.PrivateKey, password string, path string) error { 16 | pemBlock, err := EncodeRSAPrivateKeyToPEM(privateKey, password) 17 | if err != nil { 18 | return errors.WithStackTrace(err) 19 | } 20 | return errors.WithStackTrace(StorePEM(pemBlock, path)) 21 | } 22 | 23 | // StoreRSAPublicKey takes the given RSA public key, encode it to pem, and store it on disk at the specified path. You 24 | func StoreRSAPublicKey(publicKey *rsa.PublicKey, path string) error { 25 | pemBlock, err := EncodePublicKeyToPEM(publicKey) 26 | if err != nil { 27 | return errors.WithStackTrace(err) 28 | } 29 | return errors.WithStackTrace(StorePEM(pemBlock, path)) 30 | } 31 | 32 | // CreateRSAKeyPair generates a new private public key pair using the RSA algorithm. The size of the RSA key in bits is 33 | // configurable. We force users to use at least 2048 bits, as anything less is cryptographically insecure (since they 34 | // have been cracked). 35 | // See https://en.wikipedia.org/wiki/Key_size for more commentary 36 | func CreateRSAKeyPair(rsaBits int) (*rsa.PrivateKey, *rsa.PublicKey, error) { 37 | if rsaBits < MinimumRSABits { 38 | err := RSABitsTooLow{rsaBits} 39 | return nil, nil, errors.WithStackTrace(err) 40 | } 41 | 42 | privateKey, err := rsa.GenerateKey(rand.Reader, rsaBits) 43 | if err != nil { 44 | return nil, nil, errors.WithStackTrace(err) 45 | } 46 | return privateKey, &privateKey.PublicKey, nil 47 | } 48 | 49 | // LoadRSAPrivateKey will load a private key object from the provided path, assuming it holds a certificate encoded in 50 | // PEM. 51 | func LoadRSAPrivateKey(path string) (*rsa.PrivateKey, error) { 52 | rawData, err := ioutil.ReadFile(path) 53 | if err != nil { 54 | return nil, errors.WithStackTrace(err) 55 | } 56 | privateKeyPemBlock, _ := pem.Decode(rawData) 57 | privateKey, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes) 58 | if err != nil { 59 | return nil, errors.WithStackTrace(err) 60 | } 61 | return privateKey, nil 62 | } 63 | -------------------------------------------------------------------------------- /tls/rsa_keys_test.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/rsa" 5 | "fmt" 6 | "io/ioutil" 7 | "net/url" 8 | "os" 9 | "strings" 10 | "testing" 11 | 12 | "github.com/gruntwork-io/go-commons/errors" 13 | "github.com/gruntwork-io/terratest/modules/logger" 14 | "github.com/gruntwork-io/terratest/modules/random" 15 | "github.com/gruntwork-io/terratest/modules/shell" 16 | "github.com/stretchr/testify/assert" 17 | "github.com/stretchr/testify/require" 18 | ) 19 | 20 | func TestCreateRSAKeyPairErrorsOnTooLowBits(t *testing.T) { 21 | t.Parallel() 22 | 23 | _, _, err := CreateRSAKeyPair(1024) 24 | assert.Error(t, err) 25 | switch errors.Unwrap(err).(type) { 26 | case RSABitsTooLow: 27 | default: 28 | logger.Log(t, "Wrong error type for CreateRSAKeyPair using small key length") 29 | t.Fail() 30 | } 31 | } 32 | 33 | func TestCreateRSAKeyPairAllows2048KeyLength(t *testing.T) { 34 | t.Parallel() 35 | 36 | privKey, pubKey, err := CreateRSAKeyPair(2048) 37 | assert.NoError(t, err) 38 | assert.NotNil(t, privKey) 39 | assert.NotNil(t, pubKey) 40 | } 41 | 42 | func TestCreateRSAKeyPairReturnsCompatibleKeys(t *testing.T) { 43 | t.Parallel() 44 | 45 | privKey, pubKey, err := CreateRSAKeyPair(2048) 46 | assert.NoError(t, err) 47 | privKeyTmpPath := StoreRSAKeyToTempFile(t, privKey, "") 48 | defer os.Remove(privKeyTmpPath) 49 | pubKeyTmpPath := StoreRSAPublicKeyToTempFile(t, pubKey) 50 | defer os.Remove(pubKeyTmpPath) 51 | 52 | // Verify the public key matches the private key by regenerating the public key from the private key and verifying 53 | // it is the same as what we have. 54 | keyPubFromPrivCmd := shell.Command{ 55 | Command: "openssl", 56 | Args: []string{"pkey", "-pubout", "-inform", "PEM", "-in", privKeyTmpPath, "-outform", "PEM"}, 57 | } 58 | keyPubFromPriv := shell.RunCommandAndGetOutput(t, keyPubFromPrivCmd) 59 | pubKeyBytes, err := ioutil.ReadFile(pubKeyTmpPath) 60 | assert.NoError(t, err) 61 | assert.Equal(t, strings.TrimSpace(string(pubKeyBytes)), strings.TrimSpace(keyPubFromPriv)) 62 | } 63 | 64 | func TestStoreRSAPrivateKeyStoresInPEMFormat(t *testing.T) { 65 | t.Parallel() 66 | 67 | privKey, _, err := CreateRSAKeyPair(2048) 68 | require.NoError(t, err) 69 | tmpPath := StoreRSAKeyToTempFile(t, privKey, "") 70 | defer os.Remove(tmpPath) 71 | 72 | // Verify the format, and that key is unencrypted. We use openssl binary to read in the file and if it doesn't 73 | // error, then we know the key is formatted correctly. 74 | // See: https://stackoverflow.com/questions/26259432/how-to-check-a-public-rsa-key-file/26260514#26260514 75 | cmd := shell.Command{ 76 | Command: "openssl", 77 | Args: []string{"rsa", "-inform", "PEM", "-in", tmpPath, "-noout"}, 78 | } 79 | shell.RunCommand(t, cmd) 80 | } 81 | 82 | func TestStoreRSAPrivateKeyEncryption(t *testing.T) { 83 | t.Parallel() 84 | 85 | uniqueId := random.UniqueId() 86 | privKey, _, err := CreateRSAKeyPair(2048) 87 | require.NoError(t, err) 88 | tmpPath := StoreRSAKeyToTempFile(t, privKey, uniqueId) 89 | defer os.Remove(tmpPath) 90 | 91 | // Verify the format, and that key is encrypted. We use openssl binary to read in the file and if it doesn't 92 | // error, then we know the key is formatted correctly. 93 | // See: https://stackoverflow.com/questions/26259432/how-to-check-a-public-rsa-key-file/26260514#26260514 94 | cmd := shell.Command{ 95 | Command: "openssl", 96 | Args: []string{"rsa", "-inform", "PEM", "-in", tmpPath, "-passin", fmt.Sprintf("pass:%s", uniqueId), "-noout"}, 97 | } 98 | shell.RunCommand(t, cmd) 99 | } 100 | 101 | // GetTempFilePath returns a temporary file path that can be used as scratch space. 102 | func GetTempFilePath(t *testing.T) string { 103 | return WriteStringToTempFile(t, "") 104 | } 105 | 106 | // WriteStringToTempFile creates a new temporary file and stores the provided string into it. Returns the path to the 107 | // temporary file. 108 | func WriteStringToTempFile(t *testing.T, data string) string { 109 | escapedTestName := url.PathEscape(t.Name()) 110 | tmpfile, err := ioutil.TempFile("", escapedTestName) 111 | require.NoError(t, err) 112 | defer tmpfile.Close() 113 | if data != "" { 114 | _, err := tmpfile.WriteString(data) 115 | require.NoError(t, err) 116 | } 117 | return tmpfile.Name() 118 | } 119 | 120 | // StoreRSAKeyToTempFile will create a new temporary file and store the provided private key, encrypting it with the 121 | // provided password. 122 | func StoreRSAKeyToTempFile(t *testing.T, privKey *rsa.PrivateKey, password string) string { 123 | escapedTestName := url.PathEscape(t.Name()) 124 | tmpfile, err := ioutil.TempFile("", escapedTestName) 125 | require.NoError(t, err) 126 | defer tmpfile.Close() 127 | require.NoError(t, StoreRSAPrivateKey(privKey, password, tmpfile.Name())) 128 | return tmpfile.Name() 129 | } 130 | 131 | // StoreRSAPublicKeyToTempFile will create a new temporary file and store the provided public key. 132 | func StoreRSAPublicKeyToTempFile(t *testing.T, pubKey *rsa.PublicKey) string { 133 | escapedTestName := url.PathEscape(t.Name()) 134 | tmpfile, err := ioutil.TempFile("", escapedTestName) 135 | require.NoError(t, err) 136 | defer tmpfile.Close() 137 | require.NoError(t, StoreRSAPublicKey(pubKey, tmpfile.Name())) 138 | return tmpfile.Name() 139 | } 140 | -------------------------------------------------------------------------------- /tls/test_helpers.go: -------------------------------------------------------------------------------- 1 | package tls 2 | 3 | import ( 4 | "crypto/x509/pkix" 5 | "time" 6 | ) 7 | 8 | func SampleTlsOptions(algorithm string) TLSOptions { 9 | options := TLSOptions{ 10 | DistinguishedName: pkix.Name{ 11 | CommonName: "gruntwork.io", 12 | Organization: []string{"Gruntwork"}, 13 | OrganizationalUnit: []string{"IT"}, 14 | Locality: []string{"Phoenix"}, 15 | Province: []string{"AZ"}, 16 | Country: []string{"US"}, 17 | }, 18 | ValidityTimeSpan: 1 * time.Hour, 19 | PrivateKeyAlgorithm: algorithm, 20 | RSABits: 2048, 21 | ECDSACurve: P256Curve, 22 | } 23 | return options 24 | } 25 | -------------------------------------------------------------------------------- /tls/testfixtures/ca.cert: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEpDCCAowCCQDZ6hhOGQBcZzANBgkqhkiG9w0BAQsFADAUMRIwEAYDVQQKDAlH 3 | cnVudHdvcmswHhcNMTkwMTI5MTQxOTQwWhcNMzkwMTI0MTQxOTQwWjAUMRIwEAYD 4 | VQQKDAlHcnVudHdvcmswggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC6 5 | ty+BTT6eLnrf+sCKEzCRrpfGYlXJ5OByWRBt8Sya26ObGKLvKghn5wjg+tqC/IyT 6 | x4JpBOXeZU1L43FSE4K/irLle/Vv7eEJOupqGdManG3AmoSLlkhLR5LPPBrTOBaS 7 | kDyQx1NbghbZAOmibv2aCQdlFOH6nayoVG8PUO4qgwBkYvoAAsOhXaZkoHss5Cxi 8 | qTlC3cPt2byS12IDctyC+Sk5RV9pnEl9lW0NUx3hbKl3UMGTROmWRobDyRUekxmE 9 | +J+uuBEgrI0Ff2YnJ6dOs5CwRE6Rwy5iSz9nCemMX0esR+IDU+mgSok8798j++JR 10 | i56mVa9hqJ5MIy3MgAthWoNoJp3pbSV9VzlDZvoB6BzeO1m1wXxi3h5C+QAAbfCu 11 | NFMBjD0MjhjdeS8FOqdyF9HWmJ3ynTy++EiZ5YJNpbxsmu8THe0wyePz8GWaa3ue 12 | 07QDasvglt5bGaMtxb9rIldBMaMUmxtxR8YGjNs44hvxDobo2KNI6nxaNJbSuGV1 13 | c2dAzyorNiGa+v+vSRALOcfsB3HAMeObnElTSe3kmUzLxVa21MiuBUsMKZo/zS8P 14 | QzaJ8qYmf89vdXbrSUst0oXG5Hrh24A+veY7AydTsSqz/SdYHN56FX4JZlCM0a4Y 15 | 8TZ6TdLsxa/JntCPeELxxT5ls6t4HObObxFs7JsixwIDAQABMA0GCSqGSIb3DQEB 16 | CwUAA4ICAQBNysBRRLRkWFQGdhANetM673mS4rC6KaC9vw9u2ic6TVJDY1/FRoug 17 | IyFF5oqryCCTc6xqmexqqVxNRx3C2eRUFPAs3xe+99DGcX2LHR/Ww0v4eQcX/RHZ 18 | O3J3WAcUzRz+cR2061iQOMJDqzKPRiSe0jS22RoUsZmxOUV0sRroKHb7vcUxJWSw 19 | 6ehLTBd9nuA7hCeIZerm8e+zFsXSaqnnvloxt2AodjZMZ+wjIzhbty3nG+O0BdZ2 20 | uu4kJr0eSg46mdaXmUkN58A/nJdH4VZnPagooOt1nR84VkwWPFr6anZ7eQWFsasg 21 | 6rkGvBvuQw0UfOS9q8maljGs8HMNP3wmKWcZUgEAyGHsUBA+QFjhaRh4BO3DJSb5 22 | mEzK18ccd8D3bdEouaWFnCD6Y0dkju9QTo8eH/kAdKwapA86jNQhDR9o/cFTr8e2 23 | QbQ8vRJLJ46XK5dxi6zsa+4/JMVSNy71lLC15Eftp9s3S7uEbrHVMG/n5A6mzKBC 24 | 03+QSyFxccLatqyh2cBEnw4qlAqZIq2akOqZOdYnwiZ0DqcgXP9O5mVwh0k5J5no 25 | 0PEGo1lDgxr8rryZSHrRLV3zxw4kqC3MLRniUPvjCf+nBMcWBKmzNafqJWc+po2T 26 | tQC84XtEoMan5aWMkJT0Jd5odq8vR/KPJsojmfQzSRIZxBZ9FLFBbw== 27 | -----END CERTIFICATE----- 28 | -------------------------------------------------------------------------------- /tls/testfixtures/ca.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKAIBAAKCAgEAurcvgU0+ni563/rAihMwka6XxmJVyeTgclkQbfEsmtujmxii 3 | 7yoIZ+cI4PragvyMk8eCaQTl3mVNS+NxUhOCv4qy5Xv1b+3hCTrqahnTGpxtwJqE 4 | i5ZIS0eSzzwa0zgWkpA8kMdTW4IW2QDpom79mgkHZRTh+p2sqFRvD1DuKoMAZGL6 5 | AALDoV2mZKB7LOQsYqk5Qt3D7dm8ktdiA3LcgvkpOUVfaZxJfZVtDVMd4Wypd1DB 6 | k0TplkaGw8kVHpMZhPifrrgRIKyNBX9mJyenTrOQsEROkcMuYks/ZwnpjF9HrEfi 7 | A1PpoEqJPO/fI/viUYueplWvYaieTCMtzIALYVqDaCad6W0lfVc5Q2b6Aegc3jtZ 8 | tcF8Yt4eQvkAAG3wrjRTAYw9DI4Y3XkvBTqnchfR1pid8p08vvhImeWCTaW8bJrv 9 | Ex3tMMnj8/Blmmt7ntO0A2rL4JbeWxmjLcW/ayJXQTGjFJsbcUfGBozbOOIb8Q6G 10 | 6NijSOp8WjSW0rhldXNnQM8qKzYhmvr/r0kQCznH7AdxwDHjm5xJU0nt5JlMy8VW 11 | ttTIrgVLDCmaP80vD0M2ifKmJn/Pb3V260lLLdKFxuR64duAPr3mOwMnU7Eqs/0n 12 | WBzeehV+CWZQjNGuGPE2ek3S7MWvyZ7Qj3hC8cU+ZbOreBzmzm8RbOybIscCAwEA 13 | AQKCAgAwtaNd/xHL4GJL1jWuj60HBF4x8ethfSUUj6malOVHDO+UgYvYCsYgnQQS 14 | 9T5WF55w2wZpcU6qgxD0MESa3d4pK/zZo0n3Rj6D0uvefBg3hAGZ0nWrmKwJf4WM 15 | LKju214AJEK4nldfswNU30kUCBJ+wcY9F4KQh9vIGibS/xDy5DQQ/EUuBYqVqUHE 16 | VJ1tq6TspCr9B6+aeEgaore+IJPoUynu9+sZOR1u9rF5XWqZKWjYaKfnTH4HpyR3 17 | JRkuXb+1kcrOoNRPCtTLT14N7ulVvcCMS3WXzyxlli3XKp3T8fNXvXT320dCes9s 18 | p0M2NvrmvR7JU+cp+fw57p9/DxRogkD3HsUwOY1xRk4A8MG7mmZrsZtMyFT04txZ 19 | Jh/nldFCkVNxrWxyi/ldwxSbDp9bRA7u2JVlUQr90c74Ejz4c+e2g8bdG0Gy7ke9 20 | 7zuili8rQQeNlKtfKmgwtaBDZwZZIX2tA+Dai4mcsBxhQsY0bO4y01uNqcnioWpv 21 | V+83+FDS/Ut9EFdjGze7AchrE6XXvdzz+lL3STi/2hTWB/TKXnLW/XyPv0iGHMLJ 22 | YEAUGAYv5GDxi55wogxQ6cOqt8uO9gOU5Z4EwJNRctxHw7oVLkPL7yl/OsG+U15l 23 | YG0xma7XvI1PmEdX18+9R3ckuFFXgGZxkT8oiUeI8XNlSI8AAQKCAQEA4Gpl8FK7 24 | 1/rz0dZGbM1ZVN7+m99DLhGMDE86uv7KWxHOqFN8bpWVli7xrjAhUKaNCQX+Kro2 25 | iIIDT6Zco02tk1xwqAPwhwL3liGIC68rbvMP5bJGM7QMrljfqvwLWqDpkR3RiaFY 26 | xo6rl3CjwKCus2xvlhHAJNoSnVGXgvFD/dtiv3x+x4qE+PU9yIXouQd6iPVN15s6 27 | dwhM4hIKPGPoU8wr0dcn5KTCkRXFwl8wnNETu/fn9i+Z00WTorW4CcTpug45tJi0 28 | eNRTFtrsiji78Y7ul3JcN6dNwbRjg+Hl/8tr48NmI95K/plOUlIDkJCMv8wgSZpL 29 | ouNKXLfJpDMixwKCAQEA1P54XvyZCRwuKD9/YlG6Kroi4gzSUjokdZWIxEUx2vot 30 | zdIqYQwpQ+PYh834XUMvAVE2/lG7DmQdMsgIrxdm4BOsavwDCe2t8zL5I0NOTWDf 31 | TctgaCWH6ByfLSy2K+e/AWhqQe9mYuJyUjW2Lqggg12etIpJP9LomZruBXlq3AIJ 32 | BzjHZI6DW1yx3S5P1nCFBBB5sUx5PUJl8E30ESpM9aGax8w0IHSTUPosSgLE612U 33 | WmqzWM3e2wgZkoKarWj3e7AApNOxRvCWjltrdfRftn1ybizqORT1LbrA1tlpV0qs 34 | wxsfdcB2fMBO9aRw/bEcD1CNM42XQqBIr/wiDFgAAQKCAQEAoDe2IZafn3n9+tDV 35 | JLaS0YIHGreR2eETMHKcRobDZ7NV7Fz2WGkZC8jmKOsZzd+qj0elB1I8vsa7oHDw 36 | cHJzuuJjV8DAR1O5+Yhk/ZqJEsQblGzrq9wetZaj0Qny/TgmqME2KOF1ZQTUwuDb 37 | OBo/v79qByUTHfxHD+QBnK2tUh7/6B/0z3c4qhlEi9X2qasKM5+WfJjJa9QUN86T 38 | Wjwl0TXSDxVh2N48PqLhtnyJU1v9j9RU2jFvzCM/XQoJo4ffKkMftazUI/PKgmW4 39 | PJJ2s+XMt3khF2atn96usahksXPEyi5lzLPWrOA8x47gFqTsp74PSx0KZPeKzpk0 40 | L7OGXwKCAQAWH2QzQXtHtfSZW5cIiEKU44ZyAjDqN33H9MMi16kI/Ik7YoPiVpgG 41 | pCi+WkdcsHi2Y1AZ9DVAlmdt+dblEMIvrftvlUNfiZ9Srk2wOvJsGQ64jtIxnwxR 42 | sH9b+tPlFYoCRG0c8hepV6nWHhwRFodh/ZxZVi+rgGWGZtAoI2dU+KUlaeFT8XV1 43 | TNhwh7nlJQS72lY5cx74nOfnoMEj80fRD3r7G5uib1lJxOJKwaTgaXTaO1DrfV6T 44 | U3EMhaI5NrjV0P8QH6i/HIgWuyobM0mps7fp96JQUXvxxv72kdheJ7RwA2YuHdJw 45 | vKy22OCfiQDIGm7AFwnJ8sn59W835wABAoIBACW3BMSr1+cRlWIvgq/evCUQBa2o 46 | jv228aPKIG4eMLuI29RDE5KYDCSfCec9e+a2jdujb9pqKGzt3FysxGRYrM9E9Rwi 47 | Q4C/zReG6RgtXmaCnD5iZFy+6Qqs5ZNkjVUhK0eJ1gIlgq2LClU/NHDDvpeRcKfB 48 | ain9FaOBlYr7JSWYiMKe+954cILB4UrqyNTjgIW8RKLyHkO88bhvgmFbIt6xL6KC 49 | 0Okluzq2gPexCXwlf1VC3n4SMdnsLQv22cTK27swZ4MA9rWvhzq8D5B+7i3a0Nr1 50 | E8HgC0QaYBK6dDWu7uW7ojFZzJahxM6asSbPzBwNu1RT57JTsGXekWygHq0= 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /tls/testfixtures/ca.pub: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAurcvgU0+ni563/rAihMw 3 | ka6XxmJVyeTgclkQbfEsmtujmxii7yoIZ+cI4PragvyMk8eCaQTl3mVNS+NxUhOC 4 | v4qy5Xv1b+3hCTrqahnTGpxtwJqEi5ZIS0eSzzwa0zgWkpA8kMdTW4IW2QDpom79 5 | mgkHZRTh+p2sqFRvD1DuKoMAZGL6AALDoV2mZKB7LOQsYqk5Qt3D7dm8ktdiA3Lc 6 | gvkpOUVfaZxJfZVtDVMd4Wypd1DBk0TplkaGw8kVHpMZhPifrrgRIKyNBX9mJyen 7 | TrOQsEROkcMuYks/ZwnpjF9HrEfiA1PpoEqJPO/fI/viUYueplWvYaieTCMtzIAL 8 | YVqDaCad6W0lfVc5Q2b6Aegc3jtZtcF8Yt4eQvkAAG3wrjRTAYw9DI4Y3XkvBTqn 9 | chfR1pid8p08vvhImeWCTaW8bJrvEx3tMMnj8/Blmmt7ntO0A2rL4JbeWxmjLcW/ 10 | ayJXQTGjFJsbcUfGBozbOOIb8Q6G6NijSOp8WjSW0rhldXNnQM8qKzYhmvr/r0kQ 11 | CznH7AdxwDHjm5xJU0nt5JlMy8VWttTIrgVLDCmaP80vD0M2ifKmJn/Pb3V260lL 12 | LdKFxuR64duAPr3mOwMnU7Eqs/0nWBzeehV+CWZQjNGuGPE2ek3S7MWvyZ7Qj3hC 13 | 8cU+ZbOreBzmzm8RbOybIscCAwEAAQ== 14 | -----END PUBLIC KEY----- 15 | -------------------------------------------------------------------------------- /tls/testfixtures/tiller.cert: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEsjCCApoCCQDWC0wlBohLuzANBgkqhkiG9w0BAQUFADAUMRIwEAYDVQQKDAlH 3 | cnVudHdvcmswHhcNMTkwMTI5MTQzNTQxWhcNMjAwMTI5MTQzNTQxWjAiMRIwEAYD 4 | VQQKDAlHcnVudHdvcmsxDDAKBgNVBAMMA0ZvbzCCAiIwDQYJKoZIhvcNAQEBBQAD 5 | ggIPADCCAgoCggIBAK3vM8Sjd7khUl77KX6CNi4KXCppG8WGkBez0h8p1Z7b03/8 6 | GfnlETQzgkcEl2KOwRdZnvg3wUuIFHdfSaJ9tNqX0UZDqL5EGbW6aQ5tbQj3+OIP 7 | FLCBa+QrSzeiPu3GIKDE8gUhXO/P73sLrq++UQxCu7NkZpQBhvw5g16/18Wgr1rg 8 | ZeBznJGh5OpB7dGXGO5UNVnyQOPfSMd4vgOVJkVXYdCQRTFyjbyoJ2o0slx8XBzp 9 | BeaeTIGeoBEjFxoHl+sFKSfFdvuUsERYqoirRbRE8vxyIdwH1s/xUwmbwwsK69vZ 10 | VrzkHnd+aw+LUThhcb2G6CRQ5r7O1ThSAYlsKFwYZtjS/Ka7URJcQznL0urZFdq9 11 | Cv0z35nOZCbLmIq9sh4J9FS7He+ZmT+X/FQwp5ZhHmW14bjHG6/gFlxnTZzWPnEd 12 | 2NTI3X4SSWM4lcw/JXgz7Mz96KRXVxFUxXhhzZh0SoXO5AHca8BpUf7FP7QkJvfd 13 | x3NW6YCynrXxW489zp1l9JjSpULXcdXjDIjYpIcRh4HoFXiq9uhfn4mXBw6UDalT 14 | PzJkMLDgZXGkgeh9svdx1rJpvX/FHG4gksAj4ZzBluL9KgxG2iK0joF9ezTKqh7b 15 | E92pQjmKdreyZwuhHoaZv3rxxadYeQuVtmDAYAIsjIKRmsjWxluc6u8GMDuJAgMB 16 | AAEwDQYJKoZIhvcNAQEFBQADggIBAJ2O8UhO6vBYSzAWZRF3gpVG31pd3tTk3aCX 17 | 2erZ3vz95LIKVcYtsXxwOIbzkIGpQMtUicuKP/I4e29iflSiR8u1+aL526qXVFfx 18 | o+SGkn3a+RX43SfBlHywIRp9WpsSwTr4hfXlQ5wegxVRkVzoX+TOwNl7PjjK8pvc 19 | IbEKb6irqODRLMEbzdSR2llSo5++n3Dw4H5o3twyszrBBjj8ju0yejvcWcvOiL7p 20 | 1I5NBSHz3ZujDPH8oLA9QCddwHQgmtrShdzC8QhJnWWfoZ3m/9sajZp5nUBiECUJ 21 | SaW+ylPcOz9+3TfuY02eOzkGQ37pQHkJtZhV1FSw1P6C9zDLNY+cd7PludKjyFzR 22 | XGiYolqG95HQkQHuz/msNYGlXkCnbmYzTQxQaFp14eJIfsV8ZOdNRIXztR1U73pA 23 | PHxGRa81lItNTFclubOres7thPvqAPhl1hmbmjgcV3hw5ZEgiRLY+WKbgUK3EdBO 24 | zEhMtj1X4AqYsMA96EaE7tyeQDC4HfHQHHz9ICwiUbEUIjDx13omu513eVsfor2i 25 | 6y3JsoV0Jx6OlWlZQd3PrXplGL7JImK3UjN7Rwty54maRY4av4wvzTx2G1KnqQ1E 26 | EoZEZ+L7krjqczwow6HepCrENub98SvVqo5hTjfrMXlNkAVb7KbjBm9ECiTkuW5R 27 | XiWAKX6d 28 | -----END CERTIFICATE----- 29 | -------------------------------------------------------------------------------- /tls/testfixtures/tiller.key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKQIBAAKCAgEAre8zxKN3uSFSXvspfoI2LgpcKmkbxYaQF7PSHynVntvTf/wZ 3 | +eURNDOCRwSXYo7BF1me+DfBS4gUd19Jon202pfRRkOovkQZtbppDm1tCPf44g8U 4 | sIFr5CtLN6I+7cYgoMTyBSFc78/vewuur75RDEK7s2RmlAGG/DmDXr/XxaCvWuBl 5 | 4HOckaHk6kHt0ZcY7lQ1WfJA499Ix3i+A5UmRVdh0JBFMXKNvKgnajSyXHxcHOkF 6 | 5p5MgZ6gESMXGgeX6wUpJ8V2+5SwRFiqiKtFtETy/HIh3AfWz/FTCZvDCwrr29lW 7 | vOQed35rD4tROGFxvYboJFDmvs7VOFIBiWwoXBhm2NL8prtRElxDOcvS6tkV2r0K 8 | /TPfmc5kJsuYir2yHgn0VLsd75mZP5f8VDCnlmEeZbXhuMcbr+AWXGdNnNY+cR3Y 9 | 1MjdfhJJYziVzD8leDPszP3opFdXEVTFeGHNmHRKhc7kAdxrwGlR/sU/tCQm993H 10 | c1bpgLKetfFbjz3OnWX0mNKlQtdx1eMMiNikhxGHgegVeKr26F+fiZcHDpQNqVM/ 11 | MmQwsOBlcaSB6H2y93HWsmm9f8UcbiCSwCPhnMGW4v0qDEbaIrSOgX17NMqqHtsT 12 | 3alCOYp2t7JnC6Eehpm/evHFp1h5C5W2YMBgAiyMgpGayNbGW5zq7wYwO4kCAwEA 13 | AQKCAgEAlEJ8NxY7/CMqKmoe4YYulqogC8qScJmWA441AnlPbNzmeIa0waz/MCbj 14 | 2pNQUyNp9fRfrCcibbat817WzlomqdmU+ja8BIrZ+2zNFg4yTd3JWTAPq7B1/gTl 15 | VaPHzjf/IesefT8GECiHcrPvkEBCp5fOlUh0o551qFOB5mbSjtO8BVOXo24IvJ5q 16 | GUBfbjNEfgpre6MiqtpuGBGUrksZVyv35abERDToGjKffkky6+DZR49dh0nR3v8O 17 | Y+0sSPTGCv/YJ92K4UHJykhl/J85iELpeEP1Fq/b7F5BMMSDycriZwDojZRJYAPz 18 | UEmpzsxI/oubHzGSrnxxhVNpmfEGqKjuVBvFsMc6wcFC4xKyzV4XL0xKUExZfL8o 19 | uXA3GF5X8rh+qCcuZSSBAM7nqX+XRWxBThYAqGYe2h09Z9kHCPmrQDsPK4g6C7Ir 20 | lyk0hN/aGrW5HUK5hFtEdrCBGfo8IBY+Jc7pD/BN4zEkaSJnif6yC2D6n6np9Q9V 21 | uCj5jzMtXYkIy75kI9sACVRqsO/cxc2MFpEEk3V0Qmfr3Gy9BHovIN7ICsoZiMcZ 22 | 5VO3hQ5G0blXYftrJVU2f6Sj7eV23p2CR/JhU7kB5hhPf3GuZeKBj/BWUAYb5z3g 23 | +/gPN4hPCfrzdPMKtJSPAUmR7RAyXyE1V0a4jPVHbo1uXre0dt0CggEBAOVB0XAr 24 | enzK+yhLQUv/m5V8/UqJlr6G5RF4JbaUZfBZeKxzke6sG6zB+Nu+4Nx1ikXZFmOP 25 | JZIQDkrIEIjXdXAFjEFOvcwCshhhXEhDbCy/1CNp5y7GWOdF4wuzv8E0vPnSEwrr 26 | 0Ygcpn3BV7N2QbyNJrqFqpwUlzIsGjcyRABYce29ICGWnwv+Nfy8HkUzI7DmsI87 27 | /yKf2maRQ8oqIaua+Zl+fIdBXlJzIMGaRmm7eySo0i/MUzoF1WdE7zGJLR7Dk0lC 28 | JLK/BWXqcmesNcoBpN0/9+TWExdBH9eAdse2NG7eHqvxCtww7i9OVxqYLNswcWfK 29 | AUs+mm39JXpXq9MCggEBAMI5T7oit+e2X5cdnZTmzJ45t+5+wge9axGvUEeQmVdb 30 | CIEI979GtVKgEVyaT68X3K2qFvZ+AegOUfNFPsiX/fpqI7YBud8JkUtkvyg/8YvK 31 | j9CJ2YFmUH5Pj1UGutWp4V/j3qZVity+f5TRR8K12rIBIKyFUMWtCxPRNcYmmTgw 32 | YKUMENIW4gM5DpUDb32W8vRlWlg3aX5CcUhNV4lyHL2cfTBMAZvcIYWbjjMCk1PR 33 | NhqXSMaM9kPPh5fifi6w017xeg1vhwChD1tPI5+uvtdquTIcIviYnSs86ryLYS6i 34 | AFcF22buuGWp540m0bmd6hQ/e59TCt+8CL3qryU1LbMCggEAYRnliWsA8EZQ5pop 35 | R/U1grVz2tQ34u7jquiGoLRuEMT0laZFkx+WgS+PJhPMzdODA16VWwnQUfM2+nmO 36 | d0c62H2hEDA3t2cU47qpCXp1IOcHBRDf+DHOifZR2i0B9OVPfUh7BE8paksoQW9Y 37 | pAxcf24Rj0L6Ji2adIe1etdxcewEJTvLli5jowLC/w72izzDyzOKaMByqcmnCXZR 38 | jhkWNcboHRkzyV3og2d74kRjunjMCZTvoYsbXSIHZzwNS5/cWoFfqtG3FXZO5QHN 39 | /xczmSHlIEjtyCDb6SeOQwqIRMJppuOxfgYUIXsgBy1ZO0xlA1mbTuiCb8pOQnPM 40 | IetsPQKCAQBsGvZThouSf6JJPIS/oo916m6EYy2gnUABQfGPQ972Dod3OwSbBMJc 41 | URgY0RNVRBTi3O+XF5/8CCx5GavOfzswrO9fmSpaCLtEmbkEsELEKmVOD8xvCAH5 42 | rFL9YpeP4dS+SNle//Ig2C+vJ6WnrQ0jUxlarYG95Ylq6GHj/wv9iH/ingqmxXHb 43 | lWCWdKkT4HcurVNSEtW3H0hqlBILQObaa0YLIeMrW6WU9nZ50g8q8iIJPveU/FT+ 44 | fX7dfYX6exXLnODwpYkhQoSHsE8FzIAQ6yhVye91K40fyfJmK7ZV/Hnv9iSe8HTW 45 | sXFe0aKejFUEHCm1DsXYrpK3Eu+V6DmdAoIBAQDj2gZgo+R8MHjTYYfp0eYWE2G4 46 | jv5yOxR2M9tHcFIcBQcl9+BfOjCqTA8ZUE+MiGARL2sZxiA4PB9f84cIwq+8/k0A 47 | l3kS7udEUJ690V991iT7dgg37UtdAmAsIr5oHu25ncR4pr8W8j6xdUD9GiZA6oP3 48 | i9GO34m64e6MJcuOtva6vUovnhbOxt53EJlVsBuc4C12b18vB2JiCwCzPhfnDZEr 49 | Q5/jhMUed4f13MGi83IRgTCFsV+9Whv1GktK47rKSjrdlFvMKKml+ly9hDaqdwsP 50 | fGIXjmOdWeZyHH7ubFuwf8wZR1W0/rCW9C+huGz2P9Q1qeOvjz9ojW76F0X1 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /tls/testfixtures/tiller.pub: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAre8zxKN3uSFSXvspfoI2 3 | LgpcKmkbxYaQF7PSHynVntvTf/wZ+eURNDOCRwSXYo7BF1me+DfBS4gUd19Jon20 4 | 2pfRRkOovkQZtbppDm1tCPf44g8UsIFr5CtLN6I+7cYgoMTyBSFc78/vewuur75R 5 | DEK7s2RmlAGG/DmDXr/XxaCvWuBl4HOckaHk6kHt0ZcY7lQ1WfJA499Ix3i+A5Um 6 | RVdh0JBFMXKNvKgnajSyXHxcHOkF5p5MgZ6gESMXGgeX6wUpJ8V2+5SwRFiqiKtF 7 | tETy/HIh3AfWz/FTCZvDCwrr29lWvOQed35rD4tROGFxvYboJFDmvs7VOFIBiWwo 8 | XBhm2NL8prtRElxDOcvS6tkV2r0K/TPfmc5kJsuYir2yHgn0VLsd75mZP5f8VDCn 9 | lmEeZbXhuMcbr+AWXGdNnNY+cR3Y1MjdfhJJYziVzD8leDPszP3opFdXEVTFeGHN 10 | mHRKhc7kAdxrwGlR/sU/tCQm993Hc1bpgLKetfFbjz3OnWX0mNKlQtdx1eMMiNik 11 | hxGHgegVeKr26F+fiZcHDpQNqVM/MmQwsOBlcaSB6H2y93HWsmm9f8UcbiCSwCPh 12 | nMGW4v0qDEbaIrSOgX17NMqqHtsT3alCOYp2t7JnC6Eehpm/evHFp1h5C5W2YMBg 13 | AiyMgpGayNbGW5zq7wYwO4kCAwEAAQ== 14 | -----END PUBLIC KEY----- 15 | -------------------------------------------------------------------------------- /tls/tls.go: -------------------------------------------------------------------------------- 1 | // Package tls contains functions for creating and managing x509 certificates and key pairs for the purposes of TLS 2 | package tls 3 | --------------------------------------------------------------------------------