├── .gitignore ├── resources ├── halyard │ └── deck.yml ├── kubernetes │ ├── spinnaker-k8s-role.yaml │ └── lb-services.yaml └── cloudformation │ ├── spinnaker-managed.yaml │ ├── codebuild-projects.yaml │ ├── spinnaker-eks-ec2.yaml │ └── spinnaker-eks-nodegroup.yaml ├── NOTICE ├── .github └── PULL_REQUEST_TEMPLATE.md ├── CODE_OF_CONDUCT.md ├── scripts ├── start_hal_daemon.sh ├── create_spinnaker_managed.sh ├── cleanup.sh ├── deploy_spinnaker.sh └── create_eks_cluster.sh ├── templates ├── aws-auth-cm.tmpl.yaml ├── kubeconfig.tmpl.yaml └── kubeconfig-with-role.tmpl.yaml ├── LICENSE ├── CONTRIBUTING.md └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .idea -------------------------------------------------------------------------------- /resources/halyard/deck.yml: -------------------------------------------------------------------------------- 1 | artifactId: gcr.io/spinnaker-marketplace/deck:2.5.0-20180927135911 -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Spinnaker Halyard Deployment on AWS 2 | Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | *Issue #, if available:* 2 | 3 | *Description of changes:* 4 | 5 | 6 | By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. 7 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /scripts/start_hal_daemon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -ex 2 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | ## SPDX-License-Identifier: MIT-0 4 | 5 | docker run -p 8084:8084 -p 9000:9000 \ 6 | --name halyard --rm \ 7 | -v $(pwd)/.hal:/home/spinnaker/.hal \ 8 | -v ~/.kube:/home/spinnaker/.kube \ 9 | -v ~/.aws:/home/spinnaker/.aws \ 10 | -it \ 11 | gcr.io/spinnaker-marketplace/halyard:stable & 12 | -------------------------------------------------------------------------------- /templates/aws-auth-cm.tmpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: aws-auth 5 | namespace: kube-system 6 | data: 7 | mapRoles: | 8 | - rolearn: %%EKS_ADMIN_ARN%% 9 | username: kubernetes-admin 10 | groups: 11 | - system:masters 12 | - rolearn: %%EKS_NODE_INSTANCE_ROLE_ARN%% 13 | username: system:node:{{EC2PrivateDNSName}} 14 | groups: 15 | - system:bootstrappers 16 | - system:nodes 17 | -------------------------------------------------------------------------------- /templates/kubeconfig.tmpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | server: "%%K8S_ENDPOINT%%" 5 | certificate-authority-data: "%%CA_DATA%%" 6 | name: kubernetes 7 | contexts: 8 | - context: 9 | cluster: kubernetes 10 | user: aws 11 | name: aws 12 | current-context: aws 13 | kind: Config 14 | preferences: {} 15 | users: 16 | - name: aws 17 | user: 18 | exec: 19 | apiVersion: client.authentication.k8s.io/v1alpha1 20 | command: heptio-authenticator-aws 21 | args: 22 | - "token" 23 | - "-i" 24 | - "%%K8S_NAME%%" -------------------------------------------------------------------------------- /resources/kubernetes/spinnaker-k8s-role.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | apiVersion: v1 5 | kind: ServiceAccount 6 | metadata: 7 | name: spinnaker-service-account 8 | namespace: spinnaker 9 | 10 | --- 11 | 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRoleBinding 14 | metadata: 15 | name: spinnaker-admin 16 | roleRef: 17 | apiGroup: rbac.authorization.k8s.io 18 | kind: ClusterRole 19 | name: cluster-admin 20 | subjects: 21 | - kind: ServiceAccount 22 | name: spinnaker-service-account 23 | namespace: spinnaker -------------------------------------------------------------------------------- /templates/kubeconfig-with-role.tmpl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | server: "%%K8S_ENDPOINT%%" 5 | certificate-authority-data: "%%CA_DATA%%" 6 | name: kubernetes 7 | contexts: 8 | - context: 9 | cluster: kubernetes 10 | user: aws 11 | name: aws 12 | current-context: aws 13 | kind: Config 14 | preferences: {} 15 | users: 16 | - name: aws 17 | user: 18 | exec: 19 | apiVersion: client.authentication.k8s.io/v1alpha1 20 | command: heptio-authenticator-aws 21 | args: 22 | - "token" 23 | - "-i" 24 | - "%%K8S_NAME%%" 25 | - "-r" 26 | - "%%EKS_ADMIN_ARN%%" -------------------------------------------------------------------------------- /resources/kubernetes/lb-services.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | --- 5 | kind: Service 6 | apiVersion: v1 7 | metadata: 8 | name: deck-lb 9 | labels: 10 | app: spin 11 | cluster: spin-deck 12 | spec: 13 | ports: 14 | - port: 80 15 | targetPort: 9000 16 | selector: 17 | app: spin 18 | cluster: spin-deck 19 | type: LoadBalancer 20 | --- 21 | kind: Service 22 | apiVersion: v1 23 | metadata: 24 | name: gate-lb 25 | labels: 26 | app: spin 27 | cluster: spin-gate 28 | spec: 29 | ports: 30 | - port: 80 31 | targetPort: 8084 32 | selector: 33 | app: spin 34 | cluster: spin-gate 35 | type: LoadBalancer -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this 4 | software and associated documentation files (the "Software"), to deal in the Software 5 | without restriction, including without limitation the rights to use, copy, modify, 6 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 7 | permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 10 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 11 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 12 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 13 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 14 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | -------------------------------------------------------------------------------- /resources/cloudformation/spinnaker-managed.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | AWSTemplateFormatVersion: '2010-09-09' 5 | Description: Setup AWS CloudProvider for Spinnaker 6 | Parameters: 7 | AuthArn: 8 | Description: ARN which Spinnaker is using. It should be the ARN either of the IAM user or the EC2 Instance Role, which is used by Spinnaker in Managing Account 9 | Type: String 10 | ManagingAccountId: 11 | Description: AWS Account number, in which Spinnaker is running 12 | Type: String 13 | 14 | Resources: 15 | 16 | SpinnakerManagedRole: 17 | Type: AWS::IAM::Role 18 | Properties: 19 | RoleName: SpinnakerManaged 20 | AssumeRolePolicyDocument: 21 | Statement: 22 | - Action: 23 | - sts:AssumeRole 24 | Effect: Allow 25 | Principal: 26 | AWS: !Ref AuthArn 27 | Version: '2012-10-17' 28 | ManagedPolicyArns: 29 | - arn:aws:iam::aws:policy/PowerUserAccess 30 | 31 | SpinnakerManagedPolicy: 32 | Type: AWS::IAM::Policy 33 | Properties: 34 | Roles: 35 | - !Ref SpinnakerManagedRole 36 | PolicyDocument: 37 | Version: '2012-10-17' 38 | Statement: 39 | - Action: iam:PassRole 40 | Effect: Allow 41 | Resource: "*" # You should restrict this only to certain set of roles, if required 42 | PolicyName: SpinnakerPassRole 43 | 44 | SpinnakerBaseIAMRole: 45 | Type: AWS::IAM::Role 46 | Properties: 47 | RoleName: BaseIAMRole 48 | AssumeRolePolicyDocument: 49 | Version: '2012-10-17' 50 | Statement: 51 | - Effect: Allow 52 | Principal: 53 | Service: 54 | - ec2.amazonaws.com 55 | Action: 56 | - sts:AssumeRole 57 | Path: "/" 58 | 59 | SpinnakerBaseInstanceProfile: 60 | Type: AWS::IAM::InstanceProfile 61 | Properties: 62 | Path: "/" 63 | Roles: 64 | - !Ref SpinnakerBaseIAMRole 65 | InstanceProfileName: BaseIAMRole 66 | 67 | Outputs: 68 | 69 | SpinnakerManagedRole: 70 | Description: Spinnaker Managed Role 71 | Value: !Ref SpinnakerManagedRole 72 | 73 | SpinnakerManagedPolicy: 74 | Description: Spinnaker Managed Policy 75 | Value: !Ref SpinnakerManagedPolicy -------------------------------------------------------------------------------- /scripts/create_spinnaker_managed.sh: -------------------------------------------------------------------------------- 1 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | #!/bin/bash -e 4 | 5 | usage() { 6 | cat """ 7 | Usage: $0 8 | -p Managed profile, profile that will be managed by Spinnaker 9 | -m Managing profile, the profile that has Spinnaker residing in it and that will manage other accounts 10 | -a Authentication ARN, this the ARN that will assume the spinnakerManaged role in the managed accounts 11 | -h Help, print this help message 12 | """ 1>&2; exit 1; 13 | } 14 | 15 | while getopts ":p:m:a:" o; do 16 | case "${o}" in 17 | p) 18 | MANAGED_PROFILE=${OPTARG} 19 | ;; 20 | m) 21 | MANAGING_PROFILE=${OPTARG} 22 | ;; 23 | a) 24 | AUTH_ARN=${OPTARG} 25 | ;; 26 | *) 27 | usage 28 | ;; 29 | esac 30 | done 31 | shift $((OPTIND-1)) 32 | 33 | if [ -z "${AUTH_ARN}" ]; then 34 | usage 35 | exit 1 36 | fi 37 | 38 | mkdir -p resources 39 | if [ -z "${MANAGING_PROFILE}" ]; then 40 | MANAGING_ID=$(aws sts get-caller-identity --query Account --output text) 41 | else 42 | MANAGING_ID=$(aws --profile ${MANAGING_PROFILE} sts get-caller-identity --query Account --output text) 43 | fi 44 | 45 | PARAM_STRING="ParameterKey=AuthArn,ParameterValue=${AUTH_ARN} ParameterKey=ManagingAccountId,ParameterValue=${MANAGING_ID}" 46 | 47 | 48 | if [ -z "${MANAGED_PROFILE}" ]; then 49 | if aws cloudformation describe-stacks --stack-name spinnaker-managed-${MANAGING_ID}; then 50 | echo "Managed role already created" 51 | else 52 | aws cloudformation create-stack --stack-name spinnaker-managed-${MANAGING_ID} --template-body "$(cat resources/cloudformation/spinnaker-managed.yaml)" \ 53 | --parameters ${PARAM_STRING} \ 54 | --capabilities CAPABILITY_NAMED_IAM 55 | fi 56 | else 57 | if aws --profile ${MANAGED_PROFILE} cloudformation describe-stacks --stack-name spinnaker-managed-${MANAGING_ID}; then 58 | echo "Managed role already created" 59 | else 60 | aws --profile ${MANAGED_PROFILE} cloudformation create-stack --stack-name spinnaker-managed-${MANAGING_ID} --template-body "$(cat resources/cloudformation/spinnaker-managed.yaml)" \ 61 | --parameters ${PARAM_STRING} \ 62 | --capabilities CAPABILITY_NAMED_IAM 63 | fi 64 | fi -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check [existing open](https://github.com/aws-samples/aws-deploy-spinnaker-halyard/issues), or [recently closed](https://github.com/aws-samples/aws-deploy-spinnaker-halyard/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *master* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/aws-samples/aws-deploy-spinnaker-halyard/labels/help%20wanted) issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](https://github.com/aws-samples/aws-deploy-spinnaker-halyard/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | 61 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 62 | -------------------------------------------------------------------------------- /scripts/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | ## SPDX-License-Identifier: MIT-0 4 | 5 | usage() { 6 | cat """ 7 | Usage: $0 8 | -k (Required) Kubernetes cluster name, the name of the Kubernetes cluster to be created 9 | -f (Required) Security group ID to be cleaned up, this is the one you made manually 10 | -d (Boolean) Set to true to delete the spinnaker data bucket contents 11 | -h Help, print this help message 12 | """ 1>&2; exit 1; 13 | } 14 | 15 | while getopts "k:f:d:" o; do 16 | case "${o}" in 17 | k) 18 | K8S_NAME=${OPTARG} 19 | ;; 20 | f) 21 | SG_ID=${OPTARG} 22 | ;; 23 | d) 24 | FORCE_DELETE=${OPTARG} 25 | ;; 26 | h) 27 | usage 28 | ;; 29 | *) 30 | usage 31 | ;; 32 | 33 | esac 34 | done 35 | 36 | if [ -z "${K8S_NAME}" ]; then 37 | K8S_NAME=spinnaker-infra 38 | fi 39 | 40 | GATE_ADDRESS=$(kubectl describe svc gate-lb -n spinnaker | grep LoadBalancer\ Ingress | awk '{print $3}') 41 | DECK_ADDRESS=$(kubectl describe svc deck-lb -n spinnaker | grep LoadBalancer\ Ingress | awk '{print $3}') 42 | GATE_LB=$(echo ${GATE_ADDRESS} | cut -d "-" -f1) 43 | DECK_LB=$(echo ${DECK_ADDRESS} | cut -d "-" -f1) 44 | if [ ! -z "${GATE_LB}" ] && [ ! -z "${DECK_LB}" ]; then 45 | for LB in "${GATE_LB}" "${DECK_LB}"; do 46 | aws elb apply-security-groups-to-load-balancer --load-balancer-name ${LB} --security-groups "" 47 | done 48 | fi 49 | VPC_ID=$(aws cloudformation describe-stacks --stack-name spin-eks-ec2-vpc --query 'Stacks[0].Outputs[?OutputKey==`EKSVpcId`].OutputValue' --output text) 50 | 51 | kubectl delete svc gate-lb -n spinnaker || echo "gate-lb already gone" 52 | kubectl delete svc deck-lb -n spinnaker || echo "deck-lb already gone" 53 | 54 | sleep 30 55 | 56 | ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 57 | aws cloudformation delete-stack --stack-name spinnaker-managed-${ACCOUNT_ID} 58 | aws cloudformation wait stack-delete-complete --stack-name spinnaker-managed-${ACCOUNT_ID} 59 | aws cloudformation delete-stack --stack-name spinnaker-infra-eks-nodes 60 | aws cloudformation wait stack-delete-complete --stack-name spinnaker-infra-eks-nodes 61 | aws ec2 delete-security-group --group-id ${SG_ID} || echo "security group already gone" 62 | sleep 5 63 | DEFAULT=$(aws ec2 describe-security-groups --filter Name="vpc-id",Values="${VPC_ID}" --query "SecurityGroups[?GroupName=='default']|[].GroupId" --output text) 64 | EKS_SG=$(aws cloudformation describe-stacks --stack-name spin-eks-ec2-vpc --query 'Stacks[0].Outputs[?OutputKey==`EKSSecurityGroups`].OutputValue' --output text) 65 | if [ "$(uname)" = "Darwin" ]; then 66 | aws ec2 describe-security-groups --filters Name="vpc-id",Values="${VPC_ID}" --query SecurityGroups[].GroupId --output text | \ 67 | tr "\t" "\n" | grep -v ${DEFAULT} | grep -v ${EKS_SG} | xargs -I {} sh -c "aws ec2 delete-security-group --group-id {} && sleep 5" 68 | else 69 | aws ec2 describe-security-groups --filters Name="vpc-id",Values="${VPC_ID}" --query SecurityGroups[].GroupId --output text | \ 70 | tr "\t" "\n" | grep -v ${DEFAULT} | grep -v ${EKS_SG} | xargs -I {} sh -c "aws ec2 delete-security-group --group-id {} && sleep 5" 71 | fi 72 | 73 | if [ "${FORCE_DELETE}" = "true" ]; then 74 | echo "Deleting contents of spinnaker data bucket" 75 | BUCKET=$(aws cloudformation describe-stacks --stack-name spin-eks-ec2-vpc --query 'Stacks[0].Outputs[?OutputKey==`SpinnakerDataBucket`].OutputValue' --output text) 76 | aws s3 rm s3://${BUCKET} --recursive 77 | aws cloudformation delete-stack --stack-name spin-eks-ec2-vpc 78 | aws cloudformation wait stack-delete-complete --stack-name spin-eks-ec2-vpc 79 | exit 0 80 | else 81 | echo "You did not specify force deleting the s3 bucket, exiting" 82 | exit 0 83 | fi 84 | 85 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Halyard deploy 2 | 3 | This repo is intended to: 4 | 5 | 1. Create an EKS cluster for Spinnaker to be deployed to 6 | 1. Deploy Spinnaker using halyard 7 | 8 | This is mostly for demo environment purposes, and there are some overly permissive IAM roles in places. If you wish to run this in production, you should modify the permissive roles to be more restrictive. This is intended to run as-is in a brand new AWS account. 9 | 10 | # Pre-requisites 11 | 12 | This repository assumes you have a new AWS account and wish to test Spinnaker out, you will need: 13 | 14 | 1. AWS CLI credentials setup for a user with at least Administrator access to create resources 15 | 1. Access to create EC2 security groups 16 | 17 | # Quick Start 18 | 19 | 1. Fork this repository on GitHub (or CodeCommit) 20 | 2. Run the following from a terminal with aws cli access to your account (change GITHUB to CODECOMMIT if code is uploaded there). Ensure you run this in us-west-2 as the AMI is only available there. 21 | 22 | ``` 23 | ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 24 | aws cloudformation create-stack --stack-name codebuild-projects \ 25 | --template-body "$(cat resources/cloudformation/codebuild-projects.yaml)" \ 26 | --parameters ParameterKey=CodeBuildArtifactsBucketName,ParameterValue=codebuild-artifacts-${ACCOUNT_ID} \ 27 | ParameterKey=SourceLocation,ParameterValue=https://github.com/aws-samples/aws-deploy-spinnaker-halyard \ 28 | ParameterKey=SourceType,ParameterValue=GITHUB \ 29 | --capabilities CAPABILITY_NAMED_IAM 30 | aws ec2 create-key-pair --key-name spinnaker-eks-keypair 31 | ``` 32 | 3. Navigate to CodeBuild 33 | 4. Start the create-eks CodeBuild project 34 | 5. Create a security group in the EKS-VPC to lock-down the Spinnaker load balancers take note of the security group id. 35 | 6. Start the deploy-spinnaker CodeBuild project, fill in the environment variable "SECURITY_GROUP_ID" with the security group id from the previous step (replacing the "false" default) 36 | 37 | Spinnaker will be available at the UI/Deck address emitted at the end of the deploy-spinnaker CodeBuild job. 38 | 39 | # Cleaning up 40 | 41 | The CodeBuild project "cleanup-infrastructure" will delete all objects associated with all the cloudformation stacks in this project except the CodeBuild projects stack. For the stack to delete *everything* you must specify the FORCE_DELETE parameter to true, this will empty the Spinnaker infra bucket of data before deleting the CloudFormation stack that defines the Spinnaker data bucket. This at the moment is a best effort there might be resources created by Spinnaker or other processes that will need to be manually deleted before the Spinnaker CloudFormation can be deleted. 42 | 43 | # Accessing EKS 44 | 45 | You will need to add your user ARN to the EKS-Admin role, once this done you can download the EKS kubeconfig with the following command 46 | 47 | ```$bash 48 | ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 49 | aws s3 cp s3://codebuild-artifacts-${ACCOUNT_ID}/create-eks/files/resources/kubernetes/kubeconfig.yaml /tmp/kube/config 50 | export KUBECONFIG=/tmp/kube/config 51 | kubectl get pods -n spinnaker 52 | ``` 53 | 54 | Once it is downloaded you can run kubectl commands as normal to read and output logs and see pod status. 55 | 56 | # Exposing Services 57 | 58 | There are two methods in this repository that can expose the Spinnaker services on load balancers, one uses a user-provided security group that is locked down. These are controlled via environment variables in the deploy-spinnaker CodeBuild project. The second method is using SSM to store security information that can be used to lock down the Spinnaker installation even further. See details in the deploy_spinnaker.sh script. 59 | 60 | # Modifying the Spinnaker installation 61 | 62 | If you need to tweak the halyard settings that are applied to the Spinnaker installation this can be accomplished by modifying the `deploy_spinnaker.sh` script. Once modified you can upload your changes to the source control, and then rerun the deploy-spinnaker CodeBuild job to apply the changes. 63 | 64 | # Updating Spinnaker Release Version 65 | 66 | The default version deployed by this repository will be updated periodically, if you wish to try out a newer version than this repository defaults to, the deploy-spinnaker CodeBuild job takes a Spinnaker release verison as a parameter. This can either be a SemVer version number or `master-latest-unvalidated` 67 | 68 | # Known Issues 69 | 70 | Ocassionally we will fill in known issues with the chosen version of Spinnaker that this repository deploys. Issues that have been fixed can be found in the Spinnaker changelogs here: 71 | 72 | https://www.spinnaker.io/community/releases/versions/ 73 | 74 | # Feedback 75 | 76 | This repository is meant to be an easy method of deploying Spinnaker to a brand new AWS account for demo purposes. Not all use cases are meant to be covered, but if new use cases can be added without making the repository difficult to use, then they are more than welcome. You can submit changes or fixes to this repository by submitting a pull request on this repository. We will review and provide feedback, we might need further follow up from pull request authors to make changes. 77 | 78 | 79 | 80 | 81 | 82 | 83 | -------------------------------------------------------------------------------- /resources/cloudformation/codebuild-projects.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | --- 5 | AWSTemplateFormatVersion: '2010-09-09' 6 | Description: 'CodeBuild Projects for EKS+Spinnaker' 7 | 8 | Parameters: 9 | 10 | CodeBuildArtifactsBucketName: 11 | Type: String 12 | Default: codebuild-artifacts 13 | Description: Name for the bucket where CodeBuild will save output artifacts 14 | 15 | SourceLocation: 16 | Type: String 17 | Default: https://git-codecommit.us-west-2.amazonaws.com/v1/repos/myrepowithcode 18 | Description: Location of source code for the create-eks and deploy spinnaker codebuild projects 19 | 20 | SourceType: 21 | Type: String 22 | Default: GITHUB 23 | AllowedValues: 24 | - GITHUB 25 | - CODECOMMIT 26 | 27 | Resources: 28 | 29 | CreateEKSSpinnakerRole: 30 | Type: AWS::IAM::Role 31 | Properties: 32 | RoleName: create-eks-spinnaker 33 | AssumeRolePolicyDocument: 34 | Statement: 35 | - Action: 36 | - sts:AssumeRole 37 | Effect: Allow 38 | Principal: 39 | Service: codebuild.amazonaws.com 40 | Version: '2012-10-17' 41 | ManagedPolicyArns: 42 | - arn:aws:iam::aws:policy/PowerUserAccess 43 | - arn:aws:iam::aws:policy/IAMFullAccess 44 | 45 | CodeBuildArtifactsBucket: 46 | Type: "AWS::S3::Bucket" 47 | Properties: 48 | BucketName: !Ref CodeBuildArtifactsBucketName 49 | Tags: 50 | - 51 | Key: cloudformation-stack 52 | Value: !Ref AWS::StackId 53 | 54 | CreateEKSProject: 55 | Type: AWS::CodeBuild::Project 56 | Properties: 57 | Artifacts: 58 | Name: files 59 | Location: !Ref CodeBuildArtifactsBucket 60 | OverrideArtifactName: true 61 | NamespaceType: NONE 62 | Packaging: NONE 63 | Path: create-eks 64 | Type: S3 65 | BadgeEnabled: true 66 | Cache: 67 | Type: NO_CACHE 68 | Description: Creates and EKS cluster and supporting infrastructure/roles/policies for managing a Spinnaker instance 69 | EncryptionKey: 70 | Fn::Join: [ "", [ "arn:aws:kms:", { Ref: "AWS::Region" }, ":", { Ref: "AWS::AccountId" }, ":alias/aws/s3"]] 71 | Environment: 72 | ComputeType: BUILD_GENERAL1_SMALL 73 | Image: infrastructureascode/aws-cli 74 | PrivilegedMode: false 75 | Type: LINUX_CONTAINER 76 | EnvironmentVariables: 77 | - 78 | Name: K8S_NAME 79 | Type: PLAINTEXT 80 | Value: "spinnaker-infra" 81 | - 82 | Name: K8S_KEYPAIR 83 | Type: PLAINTEXT 84 | Value: "spinnaker-eks-keypair" 85 | - 86 | Name: BUCKET_NAME 87 | Type: PLAINTEXT 88 | Value: "spinnaker-infra" 89 | Name: create-eks 90 | ServiceRole: !Ref CreateEKSSpinnakerRole 91 | Source: 92 | BuildSpec: buildspec-infra.yaml 93 | GitCloneDepth: 1 94 | InsecureSsl: false 95 | Location: !Ref SourceLocation 96 | Type: !Ref SourceType 97 | Tags: 98 | - 99 | Key: cloudformation-stack 100 | Value: !Ref AWS::StackId 101 | TimeoutInMinutes: 45 102 | 103 | DeploySpinnakerProject: 104 | Type: AWS::CodeBuild::Project 105 | Properties: 106 | Artifacts: 107 | Type: NO_ARTIFACTS 108 | BadgeEnabled: true 109 | Cache: 110 | Type: NO_CACHE 111 | Description: Deploys Spinnaker via Halyard 112 | EncryptionKey: 113 | Fn::Join: [ "", [ "arn:aws:kms:", { Ref: "AWS::Region" }, ":", { Ref: "AWS::AccountId" }, ":alias/aws/s3"]] 114 | Environment: 115 | ComputeType: BUILD_GENERAL1_SMALL 116 | Image: gcr.io/spinnaker-marketplace/halyard:stable 117 | PrivilegedMode: false 118 | Type: LINUX_CONTAINER 119 | EnvironmentVariables: 120 | - 121 | Name: SECURITY_GROUP_ID 122 | Type: PLAINTEXT 123 | Value: false 124 | - 125 | Name: USE_SSM 126 | Type: PLAINTEXT 127 | Value: false 128 | - 129 | Name: SPINNAKER_VERSION 130 | Type: PLAINTEXT 131 | Value: 1.10.2 132 | Name: deploy-spinnaker 133 | ServiceRole: !Ref CreateEKSSpinnakerRole 134 | Source: 135 | BuildSpec: buildspec-deploy.yaml 136 | GitCloneDepth: 1 137 | InsecureSsl: false 138 | Location: !Ref SourceLocation 139 | Type: !Ref SourceType 140 | Tags: 141 | - 142 | Key: cloudformation-stack 143 | Value: !Ref AWS::StackId 144 | TimeoutInMinutes: 45 145 | 146 | CleanUpProject: 147 | Type: AWS::CodeBuild::Project 148 | Properties: 149 | Artifacts: 150 | Type: NO_ARTIFACTS 151 | BadgeEnabled: true 152 | Cache: 153 | Type: NO_CACHE 154 | Description: Cleanup infrastructure deployed by aws-deploy-spinnaker-halyard 155 | EncryptionKey: 156 | Fn::Join: [ "", [ "arn:aws:kms:", { Ref: "AWS::Region" }, ":", { Ref: "AWS::AccountId" }, ":alias/aws/s3"]] 157 | Environment: 158 | ComputeType: BUILD_GENERAL1_SMALL 159 | Image: infrastructureascode/aws-cli 160 | PrivilegedMode: false 161 | Type: LINUX_CONTAINER 162 | EnvironmentVariables: 163 | - 164 | Name: SECURITY_GROUP_ID 165 | Type: PLAINTEXT 166 | Value: "" 167 | - 168 | Name: FORCE_DELETE 169 | Type: PLAINTEXT 170 | Value: "false" 171 | Name: cleanup-infrastructure 172 | ServiceRole: !Ref CreateEKSSpinnakerRole 173 | Source: 174 | BuildSpec: buildspec-cleanup.yaml 175 | GitCloneDepth: 1 176 | InsecureSsl: false 177 | Location: !Ref SourceLocation 178 | Type: !Ref SourceType 179 | Tags: 180 | - 181 | Key: cloudformation-stack 182 | Value: !Ref AWS::StackId 183 | TimeoutInMinutes: 45 184 | 185 | Outputs: 186 | 187 | CodeBuildArtifactsBucket: 188 | Description: Bucket where codebuild artifacts are placed 189 | Value: !Ref CodeBuildArtifactsBucket 190 | 191 | CreateEKSSpinnakerRole: 192 | Description: Role that the codebuild projects use for EKS and Spinnaker deployments 193 | Value: !Ref CreateEKSSpinnakerRole 194 | -------------------------------------------------------------------------------- /scripts/deploy_spinnaker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | ## SPDX-License-Identifier: MIT-0 4 | IFS=" " 5 | /opt/halyard/bin/halyard > /dev/null 2>&1 & 6 | 7 | usage() { 8 | cat """ 9 | Usage: $0 10 | -S SSM Secrets, the script will attempt to pull secrets from SSM to fill in authentication and other settings from SSM 11 | -g Github Organization, if set and other AUTHN and AUTHZ secrets set this is the org to be used for AUTHN and AUTHZ in Spinnaker 12 | -r AWS Region, where the S3 bucket for Spinnaker is located 13 | -f Load balancer security group, places the security group provided on the AWS load balancers to lock them down. 14 | -s Spinnaker Version number 15 | -h Help, print this help message 16 | """ 1>&2; exit 1; 17 | } 18 | 19 | while getopts "S:g:r:f:s:" o; do 20 | case "${o}" in 21 | S) 22 | USE_SSM_FOR_SECRETS=${OPTARG} 23 | ;; 24 | g) 25 | GITHUB_ORG=${OPTARG} 26 | ;; 27 | r) 28 | REGION=${OPTARG} 29 | ;; 30 | f) 31 | LB_SG=${OPTARG} 32 | ;; 33 | s) 34 | SPINNAKER_VERSION=${OPTARG} 35 | ;; 36 | h) 37 | usage 38 | ;; 39 | *) 40 | usage 41 | exit 1 42 | ;; 43 | esac 44 | done 45 | 46 | EKS_EC2_VPC_STACK_NAME="spin-eks-ec2-vpc" 47 | 48 | if [ -z "${REGION}" ]; then 49 | REGION="us-west-2" 50 | fi 51 | 52 | if [ "${USE_SSM_FOR_SECRETS}" == true ]; then 53 | LB_SG="" 54 | AUTHN_CLIENT_ID=$(aws ssm get-parameters --names github-authn-client-id --with-decryption --query Parameters[0].Value --output text) 55 | AUTHN_CLIENT_SECRET=$(aws ssm get-parameters --names github-authn-client-secret --with-decryption --query Parameters[0].Value --output text) 56 | AUTHZ_ACCESS_TOKEN=$(aws ssm get-parameters --names github-authz-token --with-decryption --query Parameters[0].Value --output text) 57 | GITHUB_ORG=$(aws ssm get-parameters --names github-org --with-decryption --query Parameters[0].Value --output text) 58 | PREFIX_LIST=$(aws ssm get-parameters --names sg-prefix-list --with-decryption --query Parameters[0].Value --output text) 59 | if [ "${AUTHN_CLIENT_ID}" = "None" ] || [ "${AUTHN_CLIENT_SECRET}" = "None" ] || [ "${AUTHZ_ACCESS_TOKEN}" = "None" ] || [ "${PREFIX_LIST}" = "None" ]; then 60 | echo "One of github-authn-client-id, github-authn-client-secret, github-authz-token, or sg-prefix-list is not in SSM" 61 | exit 1 62 | fi 63 | fi 64 | 65 | BAKING_VPC=$(aws ec2 describe-vpcs --filters Name=cidr,Values=172.31.0.0/16 --query Vpcs[0].VpcId --output text) 66 | ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 67 | SPINNAKER_BUCKET=$(aws cloudformation describe-stacks --stack-name ${EKS_EC2_VPC_STACK_NAME} --query 'Stacks[0].Outputs[?OutputKey==`SpinnakerDataBucket`].OutputValue' --output text | cut -d ":" -f6) 68 | SPINNAKER_MANAGED_ROLE="role/SpinnakerManaged" 69 | 70 | echo "Creating some kubernetes resources before running halyard" 71 | kubectl apply -f resources/kubernetes/lb-services.yaml -n spinnaker 72 | kubectl apply -f resources/kubernetes/spinnaker-k8s-role.yaml 73 | 74 | GATE_ADDRESS=$(kubectl describe svc gate-lb -n spinnaker | grep LoadBalancer\ Ingress | awk '{print $3}') 75 | DECK_ADDRESS=$(kubectl describe svc deck-lb -n spinnaker | grep LoadBalancer\ Ingress | awk '{print $3}') 76 | until [ "${GATE_ADDRESS}" != "" ]; do 77 | GATE_ADDRESS=$(kubectl describe svc gate-lb -n spinnaker | grep LoadBalancer\ Ingress | awk '{print $3}') 78 | sleep 30 79 | done 80 | until [ "${DECK_ADDRESS}" != "" ]; do 81 | DECK_ADDRESS=$(kubectl describe svc deck-lb -n spinnaker | grep LoadBalancer\ Ingress | awk '{print $3}') 82 | done 83 | 84 | 85 | GATE_LB=$(echo ${GATE_ADDRESS} | cut -d "-" -f1) 86 | DECK_LB=$(echo ${DECK_ADDRESS} | cut -d "-" -f1) 87 | GATE_SG=$(aws elb describe-load-balancers --load-balancer-names ${GATE_LB} --query LoadBalancerDescriptions[0].SecurityGroups[0] --output text) 88 | DECK_SG=$(aws elb describe-load-balancers --load-balancer-names ${DECK_LB} --query LoadBalancerDescriptions[0].SecurityGroups[0] --output text) 89 | 90 | if [ ! -z "${PREFIX_LIST}" ]; then 91 | for SG in "${GATE_SG}" "${DECK_SG}"; do 92 | aws ec2 revoke-security-group-ingress --group-id ${SG} --protocol tcp --port 80 --cidr 0.0.0.0/0 || true 93 | aws ec2 describe-security-groups --group-ids ${SG} | grep ${PREFIX_LIST} && echo "Found prefix list, skipping adding exception" || \ 94 | aws ec2 authorize-security-group-ingress --group-id ${SG} --ip-permissions '[{"FromPort":80,"IpProtocol":"tcp","PrefixListIds":[{"Description":"prefix-list-restriction","PrefixListId":"pl-f8a64391"}],"ToPort":80}]' 95 | done 96 | elif [ ! -z "${LB_SG}" ]; then 97 | for LB in "${GATE_LB}" "${DECK_LB}"; do 98 | PREV_GROUPS=$(aws elb describe-load-balancers --load-balancer-names ${LB} --query LoadBalancerDescriptions[0].SecurityGroups[*] --output text | tr "\t" " ") 99 | NEW_GROUPS="" 100 | for GRP in ${PREV_GROUPS}; do 101 | if [ "${GRP}" = "${LB_SG}" ]; then 102 | echo "Do nothing" 103 | else 104 | NEW_GROUPS="${NEW_GROUPS} ${GRP}" 105 | fi 106 | done 107 | NEW_GROUPS=$(echo ${NEW_GROUPS} | sed -e 's/^[ \t]*//') 108 | NEW_GROUPS="${NEW_GROUPS} ${LB_SG}" 109 | aws elb apply-security-groups-to-load-balancer --load-balancer-name ${LB} --security-groups ${NEW_GROUPS} 110 | for PREV_GROUP in ${PREV_GROUPS}; do 111 | if [ "${PREV_GROUP}" != "${LB_SG}" ]; then 112 | aws ec2 revoke-security-group-ingress --group-id ${PREV_GROUP} --protocol tcp --port 80 --cidr 0.0.0.0/0 || true 113 | fi 114 | done 115 | done 116 | fi 117 | 118 | sleep 30 119 | 120 | echo "Executing Halyard commands to create a Halyard configuration file" 121 | hal --color false config provider aws account add my-aws-account \ 122 | --account-id ${ACCOUNT_ID} \ 123 | --assume-role ${SPINNAKER_MANAGED_ROLE} \ 124 | --regions us-west-2 125 | 126 | hal --color false config provider aws bakery edit --aws-vpc-id ${BAKING_VPC} 127 | hal --color false config provider aws enable 128 | 129 | hal --color false config provider kubernetes account add my-k8s-account --provider-version v2 --context spinnaker-context --namespaces default,spinnaker 130 | hal --color false config features edit --artifacts true 131 | hal --color false config provider kubernetes enable 132 | 133 | hal --color false config provider ecs account add my-ecs-account --aws-account my-aws-account 134 | hal --color false config provider ecs enable 135 | 136 | hal --color false config storage s3 edit \ 137 | --bucket ${SPINNAKER_BUCKET} \ 138 | --region ${REGION} 139 | 140 | hal --color false config storage edit --type s3 141 | 142 | hal --color false config security ui edit --override-base-url http://${DECK_ADDRESS} 143 | hal --color false config security api edit --override-base-url http://${GATE_ADDRESS} 144 | 145 | if [ ! -z "${AUTHN_CLIENT_ID}" ] && [ ! -z "${AUTHN_CLIENT_SECRET}" ] && [ ! -z "${AUTHZ_ACCESS_TOKEN}" ] && [ ! -z "${GITHUB_ORG}" ]; then 146 | hal --color false config security authn oauth2 edit \ 147 | --client-id ${AUTHN_CLIENT_ID} \ 148 | --client-secret ${AUTHN_CLIENT_SECRET} \ 149 | --provider github 150 | hal --color false config security authn oauth2 enable 151 | ## Once this https://github.com/spinnaker/spinnaker/issues/3154 is fixed we can use just run the commands 152 | # sed -ie "s|roleProviderType:\ GITHUB|roleProviderType:\ GITHUB\n baseUrl: https://api.github.com\n accessToken: ${AUTHZ_ACCESS_TOKEN}\n organization: ${GITHUB_ORG}|g" /home/spinnaker/.hal/config 153 | # hal --color false config security authz enable 154 | fi 155 | 156 | hal --color false config deploy edit --type distributed --account-name my-k8s-account 157 | 158 | hal --color false config version edit --version ${SPINNAKER_VERSION} 159 | 160 | #mkdir -p /home/spinnaker/.hal/default/service-settings 161 | #cp resources/halyard/deck.yml /home/spinnaker/.hal/default/service-settings/deck.yml 162 | 163 | hal --color false deploy apply 164 | 165 | set +x 166 | echo "The Spinnaker UI (deck) should be accessible at the following address: ${DECK_ADDRESS}" 167 | echo "The Spinnaker API server (gate) should be at the following address: ${GATE_ADDRESS}" 168 | -------------------------------------------------------------------------------- /scripts/create_eks_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | ## SPDX-License-Identifier: MIT-0 4 | 5 | usage() { 6 | cat """ 7 | Usage: $0 8 | -m Managing profile, the AWS cli profile where the EKS cluster will be deployed 9 | -k (Required) Kubernetes cluster name, the name of the Kubernetes cluster to be created 10 | -s (Required) Kubernetes worker keypair, the keypair for the EC2 worker nodes 11 | -b (Required) Spinnaker bucket name, this must be globally unique 12 | -h Help, print this help message 13 | """ 1>&2; exit 1; 14 | } 15 | 16 | while getopts ":m:k:s:hb:" o; do 17 | case "${o}" in 18 | m) 19 | export AWS_PROFILE=${OPTARG} 20 | ;; 21 | k) 22 | K8S_NAME=${OPTARG} 23 | ;; 24 | s) 25 | K8S_KEYPAIR=${OPTARG} 26 | ;; 27 | b) 28 | SPINNAKER_BUCKET=${OPTARG} 29 | ;; 30 | h) 31 | usage 32 | ;; 33 | *) 34 | usage 35 | ;; 36 | esac 37 | done 38 | 39 | if [ -z "${MANAGING_PROFILE}" ]; then 40 | echo "Missing managing profile: -m, assuming none" 41 | unset AWS_PROFILE 42 | fi 43 | 44 | if [ -z "${K8S_NAME}" ]; then 45 | echo "Missing Kubernetes cluster name: -k" 46 | usage 47 | exit 1 48 | fi 49 | 50 | if [ -z "${K8S_KEYPAIR}" ]; then 51 | echo "Missing Kubernetes worker keypair: -s" 52 | usage 53 | exit 1 54 | fi 55 | 56 | if [ -z "${SPINNAKER_BUCKET}" ]; then 57 | echo "Missing Spinnaker bucket name: -b" 58 | usage 59 | exit 1 60 | fi 61 | 62 | ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 63 | WORKER_AMI="ami-0e7ee8863c8536cce" 64 | WORKER_TYPE="t2.large" 65 | EKS_EC2_VPC_STACK_NAME="spin-eks-ec2-vpc" 66 | EKS_WORKER_STACK_NAME="spinnaker-infra-eks-nodes" 67 | CODEBUILD_STACK_NAME="codebuild-projects" 68 | 69 | function createEKS { 70 | STACK_NAME=${1} 71 | SPINNAKER_BUCKET=${2} 72 | ACCOUNT_ID=${3} 73 | echo "Checking for and creating ${STACK_NAME}" 74 | aws cloudformation describe-stacks --stack-name ${STACK_NAME} && echo "${STACK_NAME} already exists" || \ 75 | aws cloudformation create-stack \ 76 | --stack-name ${STACK_NAME} \ 77 | --template-body "$(cat resources/cloudformation/spinnaker-eks-ec2.yaml)" \ 78 | --parameters "ParameterKey=SpinnakerBucketName,ParameterValue=${SPINNAKER_BUCKET}-${ACCOUNT_ID}" \ 79 | --capabilities CAPABILITY_NAMED_IAM 80 | echo "Waiting for stack creation complete" 81 | aws cloudformation wait stack-create-complete --stack-name ${STACK_NAME} 82 | echo "Stack creation is now complete" 83 | unset STACK_NAME 84 | } 85 | 86 | function createEKSWorkers { 87 | EKS_WORKER_STACK_NAME=${1} 88 | K8S_NAME=${2} 89 | K8S_KEYPAIR=${3} 90 | WORKER_AMI=${4} 91 | WORKER_TYPE=${5} 92 | ACCOUNT_ID=${6} 93 | NETWORK_STACK_NAME=${7} 94 | echo "Creating EKS worker nodes" 95 | aws cloudformation describe-stacks --stack-name ${EKS_WORKER_STACK_NAME} && echo "stack ${EKS_WORKER_STACK_NAME} already exists" || \ 96 | aws cloudformation deploy --stack-name ${EKS_WORKER_STACK_NAME} \ 97 | --template-file resources/cloudformation/spinnaker-eks-nodegroup.yaml \ 98 | --parameter-overrides ClusterName=${K8S_NAME} KeyName=${K8S_KEYPAIR} SpinnakerBucketName=${SPINNAKER_BUCKET}-${ACCOUNT_ID} \ 99 | NodeGroupName=spinnaker-eks NodeImageId=${WORKER_AMI} NodeInstanceType=${WORKER_TYPE} NetworkStackName=${NETWORK_STACK_NAME} \ 100 | --capabilities CAPABILITY_NAMED_IAM 101 | aws cloudformation wait stack-create-complete --stack-name ${EKS_WORKER_STACK_NAME} 102 | } 103 | 104 | function renderKubeConfig { 105 | K8S_ENDPOINT=${1} 106 | CA_DATA=${2} 107 | K8S_NAME=${3} 108 | EKS_ADMIN_ARN=${4} 109 | mkdir -p resources/kubernetes/ 110 | if [ -z "${EKS_ADMIN_ARN}" ]; then 111 | echo "Rendering without role iam access" 112 | sed -e "s|%%K8S_ENDPOINT%%|${K8S_ENDPOINT}|g;s|%%CA_DATA%%|${CA_DATA}|g;s|%%K8S_NAME%%|${K8S_NAME}|g" < templates/kubeconfig.tmpl.yaml > resources/kubernetes/kubeconfig.yaml 113 | else 114 | echo "Rendering with role iam access" 115 | mv resources/kubernetes/kubeconfig.yaml resources/kubernetes/kubeconfig-no-role.yaml 116 | sed -e "s|%%K8S_ENDPOINT%%|${K8S_ENDPOINT}|g;s|%%CA_DATA%%|${CA_DATA}|g;s|%%K8S_NAME%%|${K8S_NAME}|g;s|%%EKS_ADMIN_ARN%%|${EKS_ADMIN_ARN}|g" < templates/kubeconfig-with-role.tmpl.yaml > resources/kubernetes/kubeconfig.yaml 117 | fi 118 | } 119 | 120 | function updateKubeRoles { 121 | export KUBECONFIG=${1} 122 | EKS_ADMIN_ARN=${2} 123 | EKS_NODE_INSTANCE_ROLE_ARN=${3} 124 | CODEBUILD_ROLE_ARN=${4} 125 | if kubectl get svc; then 126 | echo "Have connectivity to kubernetes, updating with EKS admins role access and worker nodes" 127 | sed -e "s|%%EKS_ADMIN_ARN%%|${EKS_ADMIN_ARN}|g;s|%%CODEBUILD_ROLE_ARN%%|${CODEBUILD_ROLE_ARN}|g;s|%%EKS_NODE_INSTANCE_ROLE_ARN%%|${EKS_NODE_INSTANCE_ROLE_ARN}|g" < templates/aws-auth-cm.tmpl.yaml > resources/kubernetes/aws-auth-cm.yaml 128 | kubectl apply -f resources/kubernetes/aws-auth-cm.yaml 129 | fi 130 | } 131 | 132 | function main { 133 | createEKS ${EKS_EC2_VPC_STACK_NAME} ${SPINNAKER_BUCKET} ${ACCOUNT_ID} 134 | createEKSWorkers ${EKS_WORKER_STACK_NAME} ${K8S_NAME} ${K8S_KEYPAIR} ${WORKER_AMI} ${WORKER_TYPE} ${ACCOUNT_ID} ${EKS_EC2_VPC_STACK_NAME} 135 | EKS_NODE_INSTANCE_ROLE_ARN=$(aws cloudformation describe-stacks --stack-name ${EKS_WORKER_STACK_NAME} --query 'Stacks[0].Outputs[?OutputKey==`NodeInstanceRole`].OutputValue' --output text) 136 | K8S_ENDPOINT=$(aws eks describe-cluster --name ${K8S_NAME} --query 'cluster.endpoint' --output text) 137 | CA_DATA=$(aws eks describe-cluster --name ${K8S_NAME} --query 'cluster.certificateAuthority.data' --output text) 138 | renderKubeConfig ${K8S_ENDPOINT} ${CA_DATA} ${K8S_NAME} 139 | EKS_ADMIN_ROLE=$(aws cloudformation describe-stacks --stack-name ${EKS_EC2_VPC_STACK_NAME} --query 'Stacks[0].Outputs[?OutputKey==`EKSAdminRole`].OutputValue' --output text) 140 | EKS_ADMIN_ARN="arn:aws:iam::${ACCOUNT_ID}:role/${EKS_ADMIN_ROLE}" 141 | CODEBUILD_PROJECT_ROLE=$(aws cloudformation describe-stacks --stack-name ${CODEBUILD_STACK_NAME} --query 'Stacks[0].Outputs[?OutputKey==`CreateEKSSpinnakerRole`].OutputValue' --output text) 142 | CODEBUILD_ROLE_ARN="arn:aws:iam::${ACCOUNT_ID}:role/${CODEBUILD_PROJECT_ROLE}" 143 | updateKubeRoles resources/kubernetes/kubeconfig.yaml ${EKS_ADMIN_ARN} ${EKS_NODE_INSTANCE_ROLE_ARN} ${CODEBUILD_ROLE_ARN} 144 | renderKubeConfig ${K8S_ENDPOINT} ${CA_DATA} ${K8S_NAME} ${EKS_ADMIN_ARN} 145 | if [ -f /sys/hypervisor/uuid ] && [ `head -c 3 /sys/hypervisor/uuid` == ec2 ]; then 146 | if KUBECONFIG=resources/kubernetes/kubeconfig-no-role.yaml kubectl get nodes; then 147 | echo "If you see nodes here, congrats" 148 | else 149 | echo "Something went horribly wrong" 150 | fi 151 | CONTEXT="spinnaker" 152 | export KUBECONFIG=resources/kubernetes/kubeconfig-no-role.yaml 153 | kubectl describe namespace spinnaker && echo "Namespace already exists" || kubectl create namespace spinnaker 154 | kubectl apply -f resources/kubernetes/spinnaker-k8s-role.yaml 155 | TOKEN=$(kubectl get secret \ 156 | $(kubectl get serviceaccount spinnaker-service-account \ 157 | -n spinnaker \ 158 | -o jsonpath='{.secrets[0].name}') \ 159 | -n spinnaker \ 160 | -o jsonpath='{.data.token}' | base64 -d) 161 | kubectl config set-credentials ${CONTEXT}-token-user --token ${TOKEN} 162 | kubectl config set-credentials ${CONTEXT}-token-user --token ${TOKEN} 163 | kubectl config set-context spinnaker-context --cluster=kubernetes --user=spinnaker-token-user 164 | else 165 | if KUBECONFIG=resources/kubernetes/kubeconfig.yaml kubectl get nodes; then 166 | echo "Running with role, if you see nodes here, congrats" 167 | else 168 | echo "Something went horrible wrong with a role" 169 | fi 170 | fi 171 | ./scripts/create_spinnaker_managed.sh -a ${EKS_NODE_INSTANCE_ROLE_ARN} 172 | # We need to create a load balancer and delete it so we can make EKS lbs 173 | SUBNET_IDS=$(aws cloudformation describe-stacks --stack-name ${EKS_EC2_VPC_STACK_NAME} --query 'Stacks[0].Outputs[?OutputKey==`EKSSubnetIds`].OutputValue' --output text) 174 | SUBNET_ID=$(echo "${SUBNET_IDS}" | cut -d "," -f 1) 175 | aws elb create-load-balancer --load-balancer-name temp-lb-${ACCOUNT_ID} --listeners "Protocol=HTTP,LoadBalancerPort=80,InstanceProtocol=HTTP,InstancePort=80" --subnets "${SUBNET_ID}" 176 | aws elb delete-load-balancer --load-balancer-name temp-lb-${ACCOUNT_ID} 177 | } 178 | 179 | main 180 | -------------------------------------------------------------------------------- /resources/cloudformation/spinnaker-eks-ec2.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | --- 5 | AWSTemplateFormatVersion: '2010-09-09' 6 | Description: 'Amazon EKS + Spinnaker Sample VPC' 7 | 8 | Parameters: 9 | 10 | KubeVpcBlock: 11 | Type: String 12 | Default: 192.168.0.0/16 13 | Description: The CIDR range for the EKS VPC. This should be a valid private (RFC 1918) CIDR range. 14 | 15 | KubeSubnet01Block: 16 | Type: String 17 | Default: 192.168.64.0/18 18 | Description: CidrBlock for subnet 01 within the EKS VPC 19 | 20 | KubeSubnet02Block: 21 | Type: String 22 | Default: 192.168.128.0/18 23 | Description: CidrBlock for subnet 02 within the EKS VPC 24 | 25 | KubeSubnet03Block: 26 | Type: String 27 | Default: 192.168.192.0/18 28 | Description: CidrBlock for subnet 03 within the EKS VPC 29 | 30 | Ec2VpcBlock: 31 | Type: String 32 | Default: 10.0.0.0/16 33 | Description: The CIDR range for the EC2 VPC. This should be a vaid private (RFC 1918) CIDR range. 34 | 35 | Ec2Subnet01Block: 36 | Type: String 37 | Default: 10.0.1.0/24 38 | Description: CidrBlock for subnet 01 within the EC2 VPC 39 | 40 | Ec2Subnet02Block: 41 | Type: String 42 | Default: 10.0.2.0/24 43 | Description: CidrBlock for subnet 02 within the EC2 VPC 44 | 45 | Ec2Subnet03Block: 46 | Type: String 47 | Default: 10.0.3.0/24 48 | Description: CidrBlock for subnet 03 within the EC2 VPC 49 | 50 | SpinnakerBucketName: 51 | Type: String 52 | Default: spinnaker-data 53 | Description: Name for the bucket where Spinnaker will save persistent data 54 | 55 | EKSClusterName: 56 | Type: String 57 | Default: spinnaker-infra 58 | Description: Name for the EKS cluster where Spinnaker will reside 59 | 60 | Metadata: 61 | AWS::CloudFormation::Interface: 62 | ParameterGroups: 63 | - 64 | Label: 65 | default: "Worker Network Configuration" 66 | Parameters: 67 | - KubeVpcBlock 68 | - KubeSubnet01Block 69 | - KubeSubnet02Block 70 | - KubeSubnet03Block 71 | - 72 | Label: 73 | default: "EC2 Network Configuration" 74 | Parameters: 75 | - Ec2VpcBlock 76 | - Ec2Subnet01Block 77 | - Ec2Subnet02Block 78 | - Ec2Subnet03Block 79 | 80 | Resources: 81 | EKSVPC: 82 | Type: AWS::EC2::VPC 83 | Properties: 84 | CidrBlock: !Ref KubeVpcBlock 85 | EnableDnsSupport: true 86 | EnableDnsHostnames: true 87 | Tags: 88 | - 89 | Key: Name 90 | Value: !Sub 'EKS-VPC' 91 | - 92 | Key: cloudformation-stack 93 | Value: !Ref AWS::StackId 94 | 95 | EC2VPC: 96 | Type: AWS::EC2::VPC 97 | Properties: 98 | CidrBlock: !Ref Ec2VpcBlock 99 | EnableDnsSupport: true 100 | EnableDnsHostnames: true 101 | Tags: 102 | - 103 | Key: Name 104 | Value: EC2-VPC 105 | - 106 | Key: cloudformation-stack 107 | Value: !Ref AWS::StackId 108 | 109 | EKSInternetGateway: 110 | Type: "AWS::EC2::InternetGateway" 111 | Properties: 112 | Tags: 113 | - Key: Name 114 | Value: EKS-Internet-Gateway 115 | 116 | EC2InternetGateway: 117 | Type: "AWS::EC2::InternetGateway" 118 | Properties: 119 | Tags: 120 | - Key: Name 121 | Value: EC2-Internet-Gateway 122 | 123 | EKSVPCGatewayAttachment: 124 | Type: "AWS::EC2::VPCGatewayAttachment" 125 | Properties: 126 | InternetGatewayId: !Ref EKSInternetGateway 127 | VpcId: !Ref EKSVPC 128 | 129 | EC2VPCGatewayAttachment: 130 | Type: "AWS::EC2::VPCGatewayAttachment" 131 | Properties: 132 | InternetGatewayId: !Ref EC2InternetGateway 133 | VpcId: !Ref EC2VPC 134 | 135 | EKSRouteTable: 136 | Type: AWS::EC2::RouteTable 137 | Properties: 138 | VpcId: !Ref EKSVPC 139 | Tags: 140 | - Key: Name 141 | Value: Public Subnets 142 | - Key: Network 143 | Value: Public 144 | 145 | EC2RouteTable: 146 | Type: AWS::EC2::RouteTable 147 | Properties: 148 | VpcId: !Ref EC2VPC 149 | Tags: 150 | - Key: Name 151 | Value: Public Subnets 152 | - Key: Network 153 | Value: Public 154 | 155 | EKSRoute: 156 | DependsOn: EKSVPCGatewayAttachment 157 | Type: AWS::EC2::Route 158 | Properties: 159 | RouteTableId: !Ref EKSRouteTable 160 | DestinationCidrBlock: 0.0.0.0/0 161 | GatewayId: !Ref EKSInternetGateway 162 | 163 | EC2Route: 164 | DependsOn: EC2VPCGatewayAttachment 165 | Type: AWS::EC2::Route 166 | Properties: 167 | RouteTableId: !Ref EC2RouteTable 168 | DestinationCidrBlock: 0.0.0.0/0 169 | GatewayId: !Ref EC2InternetGateway 170 | 171 | EKSSubnet01: 172 | Type: AWS::EC2::Subnet 173 | Metadata: 174 | Comment: EKS Subnet 01 175 | Properties: 176 | AvailabilityZone: 177 | Fn::Select: 178 | - '0' 179 | - Fn::GetAZs: 180 | Ref: AWS::Region 181 | CidrBlock: 182 | Ref: KubeSubnet01Block 183 | VpcId: 184 | Ref: EKSVPC 185 | Tags: 186 | - Key: Name 187 | Value: !Join ["", ["eks-vpc.internal.", !Select ['0', !GetAZs {Ref: "AWS::Region"}]]] 188 | 189 | EKSSubnet02: 190 | Type: AWS::EC2::Subnet 191 | Metadata: 192 | Comment: EKS Subnet 02 193 | Properties: 194 | AvailabilityZone: 195 | Fn::Select: 196 | - '1' 197 | - Fn::GetAZs: 198 | Ref: AWS::Region 199 | CidrBlock: 200 | Ref: KubeSubnet02Block 201 | VpcId: 202 | Ref: EKSVPC 203 | Tags: 204 | - Key: Name 205 | Value: !Join ["", ["eks-vpc.internal.", !Select ['1', !GetAZs {Ref: "AWS::Region"}]]] 206 | 207 | EKSSubnet03: 208 | Type: AWS::EC2::Subnet 209 | Metadata: 210 | Comment: EKS Subnet 03 211 | Properties: 212 | AvailabilityZone: 213 | Fn::Select: 214 | - '2' 215 | - Fn::GetAZs: 216 | Ref: AWS::Region 217 | CidrBlock: 218 | Ref: KubeSubnet03Block 219 | VpcId: 220 | Ref: EKSVPC 221 | Tags: 222 | - Key: Name 223 | Value: !Join ["", ["eks-vpc.internal.", !Select ['2', !GetAZs {Ref: "AWS::Region"}]]] 224 | 225 | EC2Subnet01: 226 | Type: AWS::EC2::Subnet 227 | Metadata: 228 | Comment: EC2 Subnet 01 229 | Properties: 230 | AvailabilityZone: 231 | Fn::Select: 232 | - '0' 233 | - Fn::GetAZs: 234 | Ref: AWS::Region 235 | CidrBlock: 236 | Ref: Ec2Subnet01Block 237 | VpcId: 238 | Ref: EC2VPC 239 | Tags: 240 | - 241 | Key: Name 242 | Value: !Join ["", ["ec2-vpc.internal.", !Select ['0', !GetAZs {Ref: "AWS::Region"}]]] 243 | - 244 | Key: cloudformation-stack 245 | Value: !Ref AWS::StackId 246 | 247 | EC2Subnet02: 248 | Type: AWS::EC2::Subnet 249 | Metadata: 250 | Comment: EC2 Subnet 02 251 | Properties: 252 | AvailabilityZone: 253 | Fn::Select: 254 | - '1' 255 | - Fn::GetAZs: 256 | Ref: AWS::Region 257 | CidrBlock: 258 | Ref: Ec2Subnet02Block 259 | VpcId: 260 | Ref: EC2VPC 261 | Tags: 262 | - 263 | Key: Name 264 | Value: !Join ["", ["ec2-vpc.internal.", !Select ['1', !GetAZs {Ref: "AWS::Region"}]]] 265 | - 266 | Key: cloudformation-stack 267 | Value: !Ref AWS::StackId 268 | 269 | EC2Subnet03: 270 | Type: AWS::EC2::Subnet 271 | Metadata: 272 | Comment: EC2 Subnet 03 273 | Properties: 274 | AvailabilityZone: 275 | Fn::Select: 276 | - '2' 277 | - Fn::GetAZs: 278 | Ref: AWS::Region 279 | CidrBlock: 280 | Ref: Ec2Subnet03Block 281 | VpcId: 282 | Ref: EC2VPC 283 | Tags: 284 | - 285 | Key: Name 286 | Value: !Join ["", ["ec2-vpc.internal.", !Select ['2', !GetAZs {Ref: "AWS::Region"}]]] 287 | - 288 | Key: cloudformation-stack 289 | Value: !Ref AWS::StackId 290 | 291 | EKSSubnet01RouteTableAssociation: 292 | Type: AWS::EC2::SubnetRouteTableAssociation 293 | Properties: 294 | SubnetId: !Ref EKSSubnet01 295 | RouteTableId: !Ref EKSRouteTable 296 | 297 | EKSSubnet02RouteTableAssociation: 298 | Type: AWS::EC2::SubnetRouteTableAssociation 299 | Properties: 300 | SubnetId: !Ref EKSSubnet02 301 | RouteTableId: !Ref EKSRouteTable 302 | 303 | EKSSubnet03RouteTableAssociation: 304 | Type: AWS::EC2::SubnetRouteTableAssociation 305 | Properties: 306 | SubnetId: !Ref EKSSubnet03 307 | RouteTableId: !Ref EKSRouteTable 308 | 309 | EC2Subnet01RouteTableAssociation: 310 | Type: AWS::EC2::SubnetRouteTableAssociation 311 | Properties: 312 | SubnetId: !Ref EC2Subnet01 313 | RouteTableId: !Ref EC2RouteTable 314 | 315 | EC2Subnet02RouteTableAssociation: 316 | Type: AWS::EC2::SubnetRouteTableAssociation 317 | Properties: 318 | SubnetId: !Ref EC2Subnet02 319 | RouteTableId: !Ref EC2RouteTable 320 | 321 | EC2Subnet03RouteTableAssociation: 322 | Type: AWS::EC2::SubnetRouteTableAssociation 323 | Properties: 324 | SubnetId: !Ref EC2Subnet03 325 | RouteTableId: !Ref EC2RouteTable 326 | 327 | EKSControlPlaneSecurityGroup: 328 | Type: AWS::EC2::SecurityGroup 329 | Properties: 330 | GroupDescription: Cluster communication with worker nodes 331 | VpcId: !Ref EKSVPC 332 | Tags: 333 | - 334 | Key: Name 335 | Value: eks-control-plane-sg 336 | 337 | EKSAdmin: 338 | Type: "AWS::IAM::Role" 339 | Properties: 340 | RoleName: EKSAdminRole 341 | AssumeRolePolicyDocument: 342 | Version: "2012-10-17" 343 | Statement: 344 | - 345 | Effect: "Allow" 346 | Principal: 347 | Service: 348 | - "ec2.amazonaws.com" 349 | Action: 350 | - "sts:AssumeRole" 351 | ManagedPolicyArns: 352 | - arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess 353 | Path: "/" 354 | 355 | EKSAdminInstanceProfile: 356 | Type: "AWS::IAM::InstanceProfile" 357 | Properties: 358 | InstanceProfileName: EKSAdmin 359 | Path: "/" 360 | Roles: 361 | - 362 | Ref: "EKSAdmin" 363 | 364 | EKSClusterRole: 365 | Type: "AWS::IAM::Role" 366 | Properties: 367 | RoleName: EKSCluster 368 | AssumeRolePolicyDocument: 369 | Version: "2012-10-17" 370 | Statement: 371 | - 372 | Effect: "Allow" 373 | Principal: 374 | Service: 375 | - "eks.amazonaws.com" 376 | Action: 377 | - "sts:AssumeRole" 378 | ManagedPolicyArns: 379 | - arn:aws:iam::aws:policy/AmazonEKSServicePolicy 380 | - arn:aws:iam::aws:policy/AmazonEKSClusterPolicy 381 | Path: "/" 382 | 383 | EKSClusterInstanceProfile: 384 | Type: "AWS::IAM::InstanceProfile" 385 | Properties: 386 | InstanceProfileName: EKSCluster 387 | Path: "/" 388 | Roles: 389 | - !Ref EKSClusterRole 390 | 391 | SpinnakerDataBucket: 392 | Type: "AWS::S3::Bucket" 393 | Properties: 394 | BucketName: !Ref SpinnakerBucketName 395 | 396 | SpinnakerEKSCluster: 397 | Type: "AWS::EKS::Cluster" 398 | Properties: 399 | Name: !Ref EKSClusterName 400 | ResourcesVpcConfig: 401 | SecurityGroupIds: 402 | - !Ref EKSControlPlaneSecurityGroup 403 | SubnetIds: 404 | - !Ref EKSSubnet01 405 | - !Ref EKSSubnet02 406 | - !Ref EKSSubnet03 407 | RoleArn: !GetAtt EKSClusterRole.Arn 408 | 409 | Outputs: 410 | 411 | EKSSubnetIds: 412 | Description: All subnets in the EKS VPC 413 | Value: !Join [ ",", [ !Ref EKSSubnet01, !Ref EKSSubnet02, !Ref EKSSubnet03 ] ] 414 | Export: 415 | Name: !Join [ "-", [ !Ref "AWS::StackName", "eks-subnet-ids" ] ] 416 | 417 | EC2SubnetIds: 418 | Description: All subnets in the EC2 VPC 419 | Value: !Join [ ",", [ !Ref EC2Subnet01, !Ref EC2Subnet02, !Ref EC2Subnet03]] 420 | Export: 421 | Name: !Join [ "-", [ !Ref "AWS::StackName", "ec2-subnet-ids" ] ] 422 | 423 | EKSSecurityGroups: 424 | Description: Security group for the EKS cluster control plane communication with worker nodes 425 | Value: !Join [ ",", [ !Ref EKSControlPlaneSecurityGroup ] ] 426 | Export: 427 | Name: !Join [ "-", [ !Ref "AWS::StackName", "eks-security-groups" ] ] 428 | 429 | EKSVpcId: 430 | Description: The EKS VPC Id 431 | Value: !Ref EKSVPC 432 | Export: 433 | Name: !Join [ "-", [ !Ref "AWS::StackName", "eks-vpc-id" ] ] 434 | 435 | EC2VpcId: 436 | Description: The Ec2 VPC Id 437 | Value: !Ref EC2VPC 438 | Export: 439 | Name: !Join [ "-", [ !Ref "AWS::StackName", "ec2-vpc-id" ] ] 440 | 441 | EKSAdminRole: 442 | Description: EKS Admin Role 443 | Value: !Ref EKSAdmin 444 | Export: 445 | Name: !Join [ "-", [ !Ref "AWS::StackName", "eks-admin-role" ] ] 446 | 447 | EKSAdminRoleArn: 448 | Description: EKS Admin Role Arn 449 | Value: !GetAtt EKSAdmin.Arn 450 | Export: 451 | Name: !Join [ "-", [ !Ref "AWS::StackName", "eks-admin-role-arn"]] 452 | 453 | EKSAdminInstanceProfile: 454 | Description: EKS Admin Instance Profile 455 | Value: !Ref EKSAdminInstanceProfile 456 | Export: 457 | Name: !Join [ "-", [ !Ref "AWS::StackName", "eks-admin-profile" ] ] 458 | 459 | EKSClusterRole: 460 | Description: EKS Cluster Role 461 | Value: !Ref EKSClusterRole 462 | Export: 463 | Name: !Join [ "-", [ !Ref "AWS::StackName", "eks-cluster-role" ] ] 464 | 465 | EKSClusterInstanceProfile: 466 | Description: EKS Cluster Instance Profile 467 | Value: !Ref EKSClusterInstanceProfile 468 | Export: 469 | Name: !Join [ "-", [ !Ref "AWS::StackName", "eks-cluster-instance-profile" ] ] 470 | 471 | SpinnakerDataBucket: 472 | Description: Bucket name for spinnaker data 473 | Value: !GetAtt SpinnakerDataBucket.Arn 474 | Export: 475 | Name: !Join [ "-", [ !Ref "AWS::StackName", "spinnaker-data-bucket" ] ] 476 | -------------------------------------------------------------------------------- /resources/cloudformation/spinnaker-eks-nodegroup.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | ## SPDX-License-Identifier: MIT-0 3 | 4 | --- 5 | AWSTemplateFormatVersion: '2010-09-09' 6 | Description: 'Amazon EKS - Node Group' 7 | 8 | Parameters: 9 | 10 | KeyName: 11 | Description: The EC2 Key Pair to allow SSH access to the instances 12 | Type: AWS::EC2::KeyPair::KeyName 13 | 14 | NodeImageId: 15 | Type: AWS::EC2::Image::Id 16 | Description: AMI id for the node instances. 17 | 18 | NodeInstanceType: 19 | Description: EC2 instance type for the node instances 20 | Type: String 21 | Default: t2.medium 22 | AllowedValues: 23 | - t2.small 24 | - t2.medium 25 | - t2.large 26 | - t2.xlarge 27 | - t2.2xlarge 28 | - m3.medium 29 | - m3.large 30 | - m3.xlarge 31 | - m3.2xlarge 32 | - m4.large 33 | - m4.xlarge 34 | - m4.2xlarge 35 | - m4.4xlarge 36 | - m4.10xlarge 37 | - m5.large 38 | - m5.xlarge 39 | - m5.2xlarge 40 | - m5.4xlarge 41 | - m5.12xlarge 42 | - m5.24xlarge 43 | - c4.large 44 | - c4.xlarge 45 | - c4.2xlarge 46 | - c4.4xlarge 47 | - c4.8xlarge 48 | - c5.large 49 | - c5.xlarge 50 | - c5.2xlarge 51 | - c5.4xlarge 52 | - c5.9xlarge 53 | - c5.18xlarge 54 | - i3.large 55 | - i3.xlarge 56 | - i3.2xlarge 57 | - i3.4xlarge 58 | - i3.8xlarge 59 | - i3.16xlarge 60 | - r3.xlarge 61 | - r3.2xlarge 62 | - r3.4xlarge 63 | - r3.8xlarge 64 | - r4.large 65 | - r4.xlarge 66 | - r4.2xlarge 67 | - r4.4xlarge 68 | - r4.8xlarge 69 | - r4.16xlarge 70 | - x1.16xlarge 71 | - x1.32xlarge 72 | - p2.xlarge 73 | - p2.8xlarge 74 | - p2.16xlarge 75 | - p3.2xlarge 76 | - p3.8xlarge 77 | - p3.16xlarge 78 | ConstraintDescription: Must be a valid EC2 instance type 79 | 80 | NodeAutoScalingGroupMinSize: 81 | Type: Number 82 | Description: Minimum size of Node Group ASG. 83 | Default: 1 84 | 85 | NodeAutoScalingGroupMaxSize: 86 | Type: Number 87 | Description: Maximum size of Node Group ASG. 88 | Default: 3 89 | 90 | NodeVolumeSize: 91 | Type: Number 92 | Description: Node volume size 93 | Default: 20 94 | 95 | ClusterName: 96 | Description: The cluster name provided when the cluster was created. If it is incorrect, nodes will not be able to join the cluster. 97 | Type: String 98 | 99 | BootstrapArguments: 100 | Description: Arguments to pass to the bootstrap script. See files/bootstrap.sh in https://github.com/awslabs/amazon-eks-ami 101 | Default: "" 102 | Type: String 103 | 104 | NodeGroupName: 105 | Description: Unique identifier for the Node Group. 106 | Type: String 107 | 108 | NetworkStackName: 109 | Description: The name of the eks and ec2 network stack that contains the export values 110 | Type: String 111 | 112 | Mappings: 113 | MaxPodsPerNode: 114 | c4.large: 115 | MaxPods: 29 116 | c4.xlarge: 117 | MaxPods: 58 118 | c4.2xlarge: 119 | MaxPods: 58 120 | c4.4xlarge: 121 | MaxPods: 234 122 | c4.8xlarge: 123 | MaxPods: 234 124 | c5.large: 125 | MaxPods: 29 126 | c5.xlarge: 127 | MaxPods: 58 128 | c5.2xlarge: 129 | MaxPods: 58 130 | c5.4xlarge: 131 | MaxPods: 234 132 | c5.9xlarge: 133 | MaxPods: 234 134 | c5.18xlarge: 135 | MaxPods: 737 136 | i3.large: 137 | MaxPods: 29 138 | i3.xlarge: 139 | MaxPods: 58 140 | i3.2xlarge: 141 | MaxPods: 58 142 | i3.4xlarge: 143 | MaxPods: 234 144 | i3.8xlarge: 145 | MaxPods: 234 146 | i3.16xlarge: 147 | MaxPods: 737 148 | m3.medium: 149 | MaxPods: 12 150 | m3.large: 151 | MaxPods: 29 152 | m3.xlarge: 153 | MaxPods: 58 154 | m3.2xlarge: 155 | MaxPods: 118 156 | m4.large: 157 | MaxPods: 20 158 | m4.xlarge: 159 | MaxPods: 58 160 | m4.2xlarge: 161 | MaxPods: 58 162 | m4.4xlarge: 163 | MaxPods: 234 164 | m4.10xlarge: 165 | MaxPods: 234 166 | m5.large: 167 | MaxPods: 29 168 | m5.xlarge: 169 | MaxPods: 58 170 | m5.2xlarge: 171 | MaxPods: 58 172 | m5.4xlarge: 173 | MaxPods: 234 174 | m5.12xlarge: 175 | MaxPods: 234 176 | m5.24xlarge: 177 | MaxPods: 737 178 | p2.xlarge: 179 | MaxPods: 58 180 | p2.8xlarge: 181 | MaxPods: 234 182 | p2.16xlarge: 183 | MaxPods: 234 184 | p3.2xlarge: 185 | MaxPods: 58 186 | p3.8xlarge: 187 | MaxPods: 234 188 | p3.16xlarge: 189 | MaxPods: 234 190 | r3.xlarge: 191 | MaxPods: 58 192 | r3.2xlarge: 193 | MaxPods: 58 194 | r3.4xlarge: 195 | MaxPods: 234 196 | r3.8xlarge: 197 | MaxPods: 234 198 | r4.large: 199 | MaxPods: 29 200 | r4.xlarge: 201 | MaxPods: 58 202 | r4.2xlarge: 203 | MaxPods: 58 204 | r4.4xlarge: 205 | MaxPods: 234 206 | r4.8xlarge: 207 | MaxPods: 234 208 | r4.16xlarge: 209 | MaxPods: 737 210 | t2.small: 211 | MaxPods: 8 212 | t2.medium: 213 | MaxPods: 17 214 | t2.large: 215 | MaxPods: 35 216 | t2.xlarge: 217 | MaxPods: 44 218 | t2.2xlarge: 219 | MaxPods: 44 220 | x1.16xlarge: 221 | MaxPods: 234 222 | x1.32xlarge: 223 | MaxPods: 234 224 | 225 | Metadata: 226 | AWS::CloudFormation::Interface: 227 | ParameterGroups: 228 | - 229 | Label: 230 | default: "EKS Cluster" 231 | Parameters: 232 | - ClusterName 233 | - 234 | Label: 235 | default: "Worker Node Configuration" 236 | Parameters: 237 | - NodeGroupName 238 | - NodeAutoScalingGroupMinSize 239 | - NodeAutoScalingGroupMaxSize 240 | - NodeInstanceType 241 | - NodeImageId 242 | - NodeVolumeSize 243 | - KeyName 244 | - BootstrapArguments 245 | Resources: 246 | 247 | NodeInstanceProfile: 248 | Type: AWS::IAM::InstanceProfile 249 | Properties: 250 | InstanceProfileName: EKSNode 251 | Path: "/" 252 | Roles: 253 | - !Ref NodeInstanceRole 254 | 255 | NodeInstanceRole: 256 | Type: AWS::IAM::Role 257 | Properties: 258 | AssumeRolePolicyDocument: 259 | Version: '2012-10-17' 260 | Statement: 261 | - Effect: Allow 262 | Principal: 263 | Service: 264 | - ec2.amazonaws.com 265 | Action: 266 | - sts:AssumeRole 267 | Path: "/" 268 | ManagedPolicyArns: 269 | - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy 270 | - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy 271 | - arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly 272 | - arn:aws:iam::aws:policy/AmazonEC2FullAccess 273 | Policies: 274 | - 275 | PolicyName: "spinnaker-bucket-read" 276 | PolicyDocument: 277 | Version: "2012-10-17" 278 | Statement: 279 | - 280 | Effect: "Allow" 281 | Resource: 282 | Fn::ImportValue: !Sub "${NetworkStackName}-eks-admin-role-arn" 283 | Action: 284 | - "sts:AssumeRole" 285 | - 286 | Effect: "Allow" 287 | Action: 288 | - "s3:PutAnalyticsConfiguration" 289 | - "s3:GetObjectVersionTagging" 290 | - "s3:CreateBucket" 291 | - "s3:ReplicateObject" 292 | - "s3:GetObjectAcl" 293 | - "s3:DeleteBucketWebsite" 294 | - "s3:PutLifecycleConfiguration" 295 | - "s3:GetObjectVersionAcl" 296 | - "s3:PutObjectTagging" 297 | - "s3:DeleteObject" 298 | - "s3:GetIpConfiguration" 299 | - "s3:DeleteObjectTagging" 300 | - "s3:GetBucketWebsite" 301 | - "s3:PutReplicationConfiguration" 302 | - "s3:DeleteObjectVersionTagging" 303 | - "s3:GetBucketNotification" 304 | - "s3:PutBucketCORS" 305 | - "s3:GetReplicationConfiguration" 306 | - "s3:ListMultipartUploadParts" 307 | - "s3:PutObject" 308 | - "s3:GetObject" 309 | - "s3:PutBucketNotification" 310 | - "s3:PutBucketLogging" 311 | - "s3:PutObjectVersionAcl" 312 | - "s3:GetAnalyticsConfiguration" 313 | - "s3:GetObjectVersionForReplication" 314 | - "s3:GetLifecycleConfiguration" 315 | - "s3:ListBucketByTags" 316 | - "s3:GetInventoryConfiguration" 317 | - "s3:GetBucketTagging" 318 | - "s3:PutAccelerateConfiguration" 319 | - "s3:DeleteObjectVersion" 320 | - "s3:GetBucketLogging" 321 | - "s3:ListBucketVersions" 322 | - "s3:ReplicateTags" 323 | - "s3:RestoreObject" 324 | - "s3:ListBucket" 325 | - "s3:GetAccelerateConfiguration" 326 | - "s3:GetBucketPolicy" 327 | - "s3:PutEncryptionConfiguration" 328 | - "s3:GetEncryptionConfiguration" 329 | - "s3:GetObjectVersionTorrent" 330 | - "s3:AbortMultipartUpload" 331 | - "s3:PutBucketTagging" 332 | - "s3:GetBucketRequestPayment" 333 | - "s3:GetObjectTagging" 334 | - "s3:GetMetricsConfiguration" 335 | - "s3:DeleteBucket" 336 | - "s3:PutBucketVersioning" 337 | - "s3:PutObjectAcl" 338 | - "s3:ListBucketMultipartUploads" 339 | - "s3:PutMetricsConfiguration" 340 | - "s3:PutObjectVersionTagging" 341 | - "s3:GetBucketVersioning" 342 | - "s3:GetBucketAcl" 343 | - "s3:PutInventoryConfiguration" 344 | - "s3:PutIpConfiguration" 345 | - "s3:GetObjectTorrent" 346 | - "s3:PutBucketWebsite" 347 | - "s3:PutBucketRequestPayment" 348 | - "s3:GetBucketCORS" 349 | - "s3:GetBucketLocation" 350 | - "s3:ReplicateDelete" 351 | - "s3:GetObjectVersion" 352 | Resource: 353 | - Fn::Sub: 354 | - "${BucketName}/*" 355 | - BucketName: 356 | Fn::ImportValue: !Sub "${NetworkStackName}-spinnaker-data-bucket" 357 | - Fn::ImportValue: !Sub "${NetworkStackName}-spinnaker-data-bucket" 358 | 359 | NodeSecurityGroup: 360 | Type: AWS::EC2::SecurityGroup 361 | Properties: 362 | GroupDescription: Security group for all nodes in the cluster 363 | VpcId: 364 | Fn::ImportValue: !Sub "${NetworkStackName}-eks-vpc-id" 365 | Tags: 366 | - 367 | Key: Name 368 | Value: eks-node-sg 369 | - 370 | Key: !Sub "kubernetes.io/cluster/${ClusterName}" 371 | Value: 'owned' 372 | 373 | NodeSecurityGroupIngress: 374 | Type: AWS::EC2::SecurityGroupIngress 375 | DependsOn: NodeSecurityGroup 376 | Properties: 377 | Description: Allow node to communicate with each other 378 | GroupId: !Ref NodeSecurityGroup 379 | SourceSecurityGroupId: !Ref NodeSecurityGroup 380 | IpProtocol: '-1' 381 | FromPort: 0 382 | ToPort: 65535 383 | 384 | NodeSecurityGroupFromControlPlaneIngress: 385 | Type: AWS::EC2::SecurityGroupIngress 386 | DependsOn: NodeSecurityGroup 387 | Properties: 388 | Description: Allow worker Kubelets and pods to receive communication from the cluster control plane 389 | GroupId: !Ref NodeSecurityGroup 390 | SourceSecurityGroupId: 391 | Fn::ImportValue: !Sub "${NetworkStackName}-eks-security-groups" 392 | IpProtocol: tcp 393 | FromPort: 1025 394 | ToPort: 65535 395 | 396 | ControlPlaneEgressToNodeSecurityGroup: 397 | Type: AWS::EC2::SecurityGroupEgress 398 | DependsOn: NodeSecurityGroup 399 | Properties: 400 | Description: Allow the cluster control plane to communicate with worker Kubelet and pods 401 | GroupId: 402 | Fn::ImportValue: !Sub "${NetworkStackName}-eks-security-groups" 403 | DestinationSecurityGroupId: !Ref NodeSecurityGroup 404 | IpProtocol: tcp 405 | FromPort: 1025 406 | ToPort: 65535 407 | 408 | ClusterControlPlaneSecurityGroupIngress: 409 | Type: AWS::EC2::SecurityGroupIngress 410 | DependsOn: NodeSecurityGroup 411 | Properties: 412 | Description: Allow pods to communicate with the cluster API Server 413 | GroupId: 414 | Fn::ImportValue: !Sub "${NetworkStackName}-eks-security-groups" 415 | SourceSecurityGroupId: !Ref NodeSecurityGroup 416 | IpProtocol: tcp 417 | ToPort: 443 418 | FromPort: 443 419 | 420 | NodeSecurityGroupFromControlPlaneOn443Ingress: 421 | Type: AWS::EC2::SecurityGroupIngress 422 | DependsOn: NodeSecurityGroup 423 | Properties: 424 | Description: Allow pods running extension API servers on port 443 to receive communication from cluster control plane 425 | GroupId: !Ref NodeSecurityGroup 426 | SourceSecurityGroupId: 427 | Fn::ImportValue: !Sub "${NetworkStackName}-eks-security-groups" 428 | IpProtocol: tcp 429 | FromPort: 443 430 | ToPort: 443 431 | 432 | ControlPlaneEgressToNodeSecurityGroupOn443: 433 | Type: AWS::EC2::SecurityGroupEgress 434 | DependsOn: NodeSecurityGroup 435 | Properties: 436 | Description: Allow the cluster control plane to communicate with pods running extension API servers on port 443 437 | GroupId: 438 | Fn::ImportValue: !Sub "${NetworkStackName}-eks-security-groups" 439 | DestinationSecurityGroupId: !Ref NodeSecurityGroup 440 | IpProtocol: tcp 441 | FromPort: 443 442 | ToPort: 443 443 | 444 | ClusterControlPlaneSecurityGroupIngress: 445 | Type: AWS::EC2::SecurityGroupIngress 446 | DependsOn: NodeSecurityGroup 447 | Properties: 448 | Description: Allow pods to communicate with the cluster API Server 449 | GroupId: 450 | Fn::ImportValue: !Sub "${NetworkStackName}-eks-security-groups" 451 | SourceSecurityGroupId: !Ref NodeSecurityGroup 452 | IpProtocol: tcp 453 | ToPort: 443 454 | FromPort: 443 455 | 456 | NodeGroup: 457 | Type: AWS::AutoScaling::AutoScalingGroup 458 | Properties: 459 | DesiredCapacity: !Ref NodeAutoScalingGroupMaxSize 460 | LaunchConfigurationName: !Ref NodeLaunchConfig 461 | MinSize: !Ref NodeAutoScalingGroupMinSize 462 | MaxSize: !Ref NodeAutoScalingGroupMaxSize 463 | VPCZoneIdentifier: 464 | Fn::Split: 465 | - "," 466 | - Fn::ImportValue: !Sub "${NetworkStackName}-eks-subnet-ids" 467 | Tags: 468 | - Key: Name 469 | Value: !Sub "${ClusterName}-${NodeGroupName}-Node" 470 | PropagateAtLaunch: 'true' 471 | - Key: !Sub 'kubernetes.io/cluster/${ClusterName}' 472 | Value: 'owned' 473 | PropagateAtLaunch: 'true' 474 | UpdatePolicy: 475 | AutoScalingRollingUpdate: 476 | MinInstancesInService: '1' 477 | MaxBatchSize: '1' 478 | 479 | NodeLaunchConfig: 480 | Type: AWS::AutoScaling::LaunchConfiguration 481 | Properties: 482 | AssociatePublicIpAddress: 'true' 483 | IamInstanceProfile: !Ref NodeInstanceProfile 484 | ImageId: !Ref NodeImageId 485 | InstanceType: !Ref NodeInstanceType 486 | KeyName: !Ref KeyName 487 | SecurityGroups: 488 | - !Ref NodeSecurityGroup 489 | BlockDeviceMappings: 490 | - DeviceName: /dev/xvda 491 | Ebs: 492 | VolumeSize: !Ref NodeVolumeSize 493 | VolumeType: gp2 494 | DeleteOnTermination: true 495 | UserData: 496 | Fn::Base64: 497 | !Sub | 498 | #!/bin/bash 499 | set -o xtrace 500 | /etc/eks/bootstrap.sh ${ClusterName} ${BootstrapArguments} 501 | /opt/aws/bin/cfn-signal --exit-code $? \ 502 | --stack ${AWS::StackName} \ 503 | --resource NodeGroup \ 504 | --region ${AWS::Region} 505 | 506 | Outputs: 507 | NodeInstanceRole: 508 | Description: The node instance role 509 | Value: !GetAtt NodeInstanceRole.Arn 510 | 511 | --------------------------------------------------------------------------------