├── modules ├── codebuild │ ├── network.tf │ ├── versions.tf │ ├── labels.tf │ ├── outputs.tf │ ├── defaults.tf │ ├── scripts │ │ └── start-build.sh │ ├── variables.tf │ ├── tests │ │ └── defaults │ │ │ └── main.tf │ ├── README.md │ └── main.tf ├── spinnaker-managed-gcp │ ├── versions.tf │ ├── label.tf │ ├── variables.tf │ ├── outputs.tf │ ├── README.md │ └── main.tf ├── spinnaker-managed-aws │ ├── versions.tf │ ├── outputs.tf │ ├── defaults.tf │ ├── labels.tf │ ├── variables.tf │ ├── main.tf │ └── README.md ├── spinnaker-managed-eks │ ├── versions.tf │ ├── labels.tf │ ├── main.tf │ ├── outputs.tf │ ├── tests │ │ └── defaults │ │ │ └── main.tf │ ├── variables.tf │ └── README.md ├── spinnaker-managed-ecs │ ├── versions.tf │ ├── labels.tf │ ├── outputs.tf │ ├── tests │ │ ├── defaults │ │ │ └── main.tf │ │ └── fargate │ │ │ └── main.tf │ ├── variables.tf │ ├── README.md │ └── main.tf ├── frigga │ ├── outputs.tf │ ├── versions.tf │ ├── main.tf │ ├── variables.tf │ └── README.md ├── spinnaker-aware-aws-vpc │ ├── tests │ │ └── defaults │ │ │ └── main.tf │ ├── labels.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ ├── README.md │ └── defaults.tf └── aws-partitions │ ├── outputs.tf │ └── README.md ├── examples ├── blueprint │ ├── default.auto.tfvars │ ├── outputs.tf │ ├── chaosmonkey.toml │ ├── awscb.tf │ ├── main.tf │ └── variables.tf └── aws-modernization-with-spinnaker │ ├── foundation │ ├── default.tf │ ├── outputs.tf │ ├── charts │ │ └── cluster-autoscaler │ │ │ ├── templates │ │ │ ├── serviceaccount.yaml │ │ │ ├── ingress.yaml │ │ │ ├── _helpers.tpl │ │ │ ├── rbac.yaml │ │ │ └── deployment.yaml │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ ├── policy.cluster-autoscaler.json │ ├── variables.tf │ ├── main.tf │ └── policy.aws-loadbalancer-controller.json │ ├── platform │ ├── default.tf │ ├── templates │ │ ├── preuninstall.tpl │ │ ├── terminate-eks-nodes.tpl │ │ └── halconfig.tpl │ ├── variables.tf │ ├── outputs.tf │ └── main.tf │ ├── application │ └── yelbv2 │ │ ├── Gemfile │ │ ├── modules │ │ ├── hostname.rb │ │ ├── restaurant.rb │ │ ├── getstats.rb │ │ ├── getvotes.rb │ │ ├── restaurantsdbread.rb │ │ ├── pageviews.rb │ │ └── restaurantsdbupdate.rb │ │ ├── startup.sh │ │ ├── README.md │ │ ├── buildspec.yml │ │ ├── Dockerfile │ │ ├── manifests │ │ ├── 3.weighted-route.yaml │ │ ├── 2.app-v2.yaml │ │ ├── 4.high-availability.yaml │ │ └── 1.app-v1.yaml │ │ └── yelb-appserver.rb │ ├── backend │ ├── provider.tf │ ├── default.auto.tfvars │ ├── outputs.tf │ ├── main.tf │ └── variables.tf │ ├── provider.tf │ ├── default.auto.tfvars │ ├── main.tf │ └── variables.tf ├── images ├── aws-cw-alarms.png ├── spin-first-look.png ├── aws-vpc-ngw-per-az.png ├── aws-xray-timeline.png ├── aws-xray-topology.png ├── cluster-management.png ├── spin-yelb-app-ing.png ├── spin-yelb-app-logs.png ├── spin-yelb-app-pod.png ├── spin-yelb-new-app.png ├── aws-ec2-shutting-down.png ├── aws-fis-yelb-broken.png ├── chaosmonkey-enabling.png ├── spin-yelb-pipe-app-ha.png ├── spin-yelb-pipe-app-ns.png ├── spin-yelb-pipe-app-v1.png ├── spin-yelb-pipe-app-v2.png ├── spin-yelb-pipe-app-wr.png ├── chaosmonkey-termination.png ├── spin-yelb-pipe-judge-v1.png ├── spin-yelb-pipe-judge-v2.png ├── aws-cw-delete-log-groups.png ├── aws-cw-metrics-dashboard.png ├── aws-fis-yelb-steady-state.png ├── aws-s3-artifact-manifest.png ├── aws-vpc-isolated-subnets.png ├── aws-vpc-single-shared-ngw.png ├── spin-yelb-pipe-build-proj.png ├── spin-yelb-pipe-build-stage.png ├── spin-yelb-pod-terminated.png ├── spin-yelb-new-pipe-chaos-eng.png ├── spin-yelb-new-pipe-svc-mesh.png ├── spin-yelb-pipe-deploy-stage.png ├── spin-yelb-pipe-exec-complete.png ├── aws-fis-eks-pod-anti-affinity.png ├── spin-yelb-pipe-judgment-stage.png └── aws-modernization-with-spinnaker-architecture.png ├── versions.tf ├── charts ├── spinnaker │ ├── Chart.lock │ ├── Chart.yaml │ ├── .helmignore │ ├── templates │ │ ├── secret.yaml │ │ ├── _helpers.tpl │ │ ├── role.yaml │ │ ├── ingress.yaml │ │ ├── configmap.yaml │ │ └── statefulset.yaml │ └── values.yaml └── .helmignore ├── CODEOWNER ├── .gitignore ├── labels.tf ├── .github └── workflows │ └── stale.yaml ├── variables.tf ├── LICENSE.md ├── outputs.tf ├── scripts ├── halconfig.sh └── update-x509ca.sh ├── defaults.tf ├── README.md └── main.tf /modules/codebuild/network.tf: -------------------------------------------------------------------------------- 1 | ## network 2 | -------------------------------------------------------------------------------- /examples/blueprint/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | tags = { example = "spinnaker_blueprint" } 2 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-gcp/versions.tf: -------------------------------------------------------------------------------- 1 | 2 | terraform { 3 | required_version = ">= 0.12" 4 | } 5 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/default.tf: -------------------------------------------------------------------------------- 1 | # default variables 2 | 3 | locals {} 4 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/platform/default.tf: -------------------------------------------------------------------------------- 1 | # default variables 2 | 3 | locals {} 4 | -------------------------------------------------------------------------------- /modules/codebuild/versions.tf: -------------------------------------------------------------------------------- 1 | ## requirements 2 | 3 | terraform { 4 | required_version = ">= 0.12" 5 | } 6 | -------------------------------------------------------------------------------- /images/aws-cw-alarms.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-cw-alarms.png -------------------------------------------------------------------------------- /images/spin-first-look.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-first-look.png -------------------------------------------------------------------------------- /modules/spinnaker-managed-aws/versions.tf: -------------------------------------------------------------------------------- 1 | ## requirements 2 | 3 | terraform { 4 | required_version = ">= 0.13" 5 | } 6 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-eks/versions.tf: -------------------------------------------------------------------------------- 1 | ## requirements 2 | 3 | terraform { 4 | required_version = ">= 0.13" 5 | } 6 | -------------------------------------------------------------------------------- /images/aws-vpc-ngw-per-az.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-vpc-ngw-per-az.png -------------------------------------------------------------------------------- /images/aws-xray-timeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-xray-timeline.png -------------------------------------------------------------------------------- /images/aws-xray-topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-xray-topology.png -------------------------------------------------------------------------------- /images/cluster-management.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/cluster-management.png -------------------------------------------------------------------------------- /images/spin-yelb-app-ing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-app-ing.png -------------------------------------------------------------------------------- /images/spin-yelb-app-logs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-app-logs.png -------------------------------------------------------------------------------- /images/spin-yelb-app-pod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-app-pod.png -------------------------------------------------------------------------------- /images/spin-yelb-new-app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-new-app.png -------------------------------------------------------------------------------- /modules/spinnaker-managed-ecs/versions.tf: -------------------------------------------------------------------------------- 1 | ### requirements 2 | 3 | terraform { 4 | required_version = ">= 0.13" 5 | } 6 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | gem 'pg' 3 | gem 'redis' 4 | -------------------------------------------------------------------------------- /images/aws-ec2-shutting-down.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-ec2-shutting-down.png -------------------------------------------------------------------------------- /images/aws-fis-yelb-broken.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-fis-yelb-broken.png -------------------------------------------------------------------------------- /images/chaosmonkey-enabling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/chaosmonkey-enabling.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-app-ha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-app-ha.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-app-ns.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-app-ns.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-app-v1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-app-v1.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-app-v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-app-v2.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-app-wr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-app-wr.png -------------------------------------------------------------------------------- /images/chaosmonkey-termination.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/chaosmonkey-termination.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-judge-v1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-judge-v1.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-judge-v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-judge-v2.png -------------------------------------------------------------------------------- /images/aws-cw-delete-log-groups.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-cw-delete-log-groups.png -------------------------------------------------------------------------------- /images/aws-cw-metrics-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-cw-metrics-dashboard.png -------------------------------------------------------------------------------- /images/aws-fis-yelb-steady-state.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-fis-yelb-steady-state.png -------------------------------------------------------------------------------- /images/aws-s3-artifact-manifest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-s3-artifact-manifest.png -------------------------------------------------------------------------------- /images/aws-vpc-isolated-subnets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-vpc-isolated-subnets.png -------------------------------------------------------------------------------- /images/aws-vpc-single-shared-ngw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-vpc-single-shared-ngw.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-build-proj.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-build-proj.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-build-stage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-build-stage.png -------------------------------------------------------------------------------- /images/spin-yelb-pod-terminated.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pod-terminated.png -------------------------------------------------------------------------------- /images/spin-yelb-new-pipe-chaos-eng.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-new-pipe-chaos-eng.png -------------------------------------------------------------------------------- /images/spin-yelb-new-pipe-svc-mesh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-new-pipe-svc-mesh.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-deploy-stage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-deploy-stage.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-exec-complete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-exec-complete.png -------------------------------------------------------------------------------- /images/aws-fis-eks-pod-anti-affinity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-fis-eks-pod-anti-affinity.png -------------------------------------------------------------------------------- /images/spin-yelb-pipe-judgment-stage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/spin-yelb-pipe-judgment-stage.png -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/backend/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | } 4 | 5 | provider "aws" { 6 | region = var.aws_region 7 | } 8 | -------------------------------------------------------------------------------- /images/aws-modernization-with-spinnaker-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Young-ook/terraform-aws-spinnaker/HEAD/images/aws-modernization-with-spinnaker-architecture.png -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/modules/hostname.rb: -------------------------------------------------------------------------------- 1 | require 'socket' 2 | 3 | def hostname() 4 | hostnamedata = 'ApplicationVersion2' 5 | return hostnamedata 6 | end 7 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/backend/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | name = "hello" 3 | tags = { 4 | owner = "yourid" 5 | team = "devops" 6 | billing = "prod" 7 | } 8 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-gcp/label.tf: -------------------------------------------------------------------------------- 1 | # frigga naming rule 2 | locals { 3 | name = join("-", compact(["spinnaker", "managed", var.desc])) 4 | credential_json = join("-", [local.name, "credential.json"]) 5 | } 6 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/backend/outputs.tf: -------------------------------------------------------------------------------- 1 | resource "local_file" "backend" { 2 | content = module.tfstate.backend 3 | filename = "${path.cwd}/backend.tf" 4 | file_permission = "0600" 5 | } 6 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | ## requirements 2 | 3 | terraform { 4 | required_version = ">= 0.13" 5 | required_providers { 6 | aws = { 7 | source = "hashicorp/aws" 8 | version = ">= 4.0" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /charts/spinnaker/Chart.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: minio 3 | repository: https://charts.min.io/ 4 | version: 5.0.9 5 | digest: sha256:17a2e934e6298c2b53748835edb66186c919691b45cba5abaff02d9cabc612e8 6 | generated: "2023-05-22T10:48:42.167189+09:00" 7 | -------------------------------------------------------------------------------- /examples/blueprint/outputs.tf: -------------------------------------------------------------------------------- 1 | output "spinnaker" { 2 | description = "Attributes of generated spinnaker" 3 | value = { 4 | artifact_bucket = module.artifact.bucket.id 5 | halconfig = module.spinnaker.halconfig 6 | irsaconfig = module.spinnaker.irsaconfig 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/backend/main.tf: -------------------------------------------------------------------------------- 1 | ### terraform remote state backend 2 | 3 | module "tfstate" { 4 | source = "Young-ook/tfstate/aws" 5 | version = "2.0.0" 6 | name = var.name 7 | tags = var.tags 8 | force_destroy = true 9 | } 10 | -------------------------------------------------------------------------------- /modules/frigga/outputs.tf: -------------------------------------------------------------------------------- 1 | output "name" { 2 | value = local.frigga-name 3 | description = "The generated name for your AWS resource" 4 | } 5 | 6 | output "nametag" { 7 | value = local.name-tag 8 | description = "The map of name-tag to attach to your AWS resource" 9 | } 10 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.0" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 4.0" 7 | } 8 | } 9 | } 10 | 11 | provider "aws" { 12 | region = var.aws_region 13 | } 14 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc" { 2 | value = module.vpc.vpc 3 | } 4 | 5 | output "subnets" { 6 | value = module.vpc.subnets 7 | } 8 | 9 | output "eks" { 10 | value = { 11 | cluster = module.eks.cluster 12 | script = module.eks.kubeconfig 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-gcp/variables.tf: -------------------------------------------------------------------------------- 1 | # variables.tf 2 | 3 | variable "project" { 4 | description = "The project id what you want to make control under spinnaker" 5 | } 6 | 7 | # description 8 | 9 | variable "desc" { 10 | description = "The extra description of module instance" 11 | default = "" 12 | } 13 | -------------------------------------------------------------------------------- /CODEOWNER: -------------------------------------------------------------------------------- 1 | # This is a comment. 2 | # Each line is a file pattern followed by one or more owners. 3 | 4 | # These owners will be the default owners for everything in 5 | # the repo. Unless a later match takes precedence, 6 | # @global-owner1 and @global-owner2 will be requested for 7 | # review when someone opens a pull request. 8 | * @Young-ook 9 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/modules/restaurant.rb: -------------------------------------------------------------------------------- 1 | require_relative 'restaurantsdbread' 2 | require_relative 'restaurantsdbupdate' 3 | 4 | def restaurantsupdate(restaurant) 5 | restaurantsdbupdate(restaurant) 6 | restaurantcount = restaurantsdbread(restaurant) 7 | return restaurantcount 8 | end 9 | -------------------------------------------------------------------------------- /examples/blueprint/chaosmonkey.toml: -------------------------------------------------------------------------------- 1 | [chaosmonkey] 2 | enabled = true 3 | schedule_enabled = false 4 | leashed = false 5 | accounts = ["default"] 6 | 7 | [database] 8 | host = {rds_endpoint} 9 | name = "chaosmonkey" 10 | user = "chaosmonkey_service" 11 | encrypted_password = {password} 12 | 13 | [spinnaker] 14 | endpoint = "http://spin-gate:8084" 15 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-aws/outputs.tf: -------------------------------------------------------------------------------- 1 | output "role_id" { 2 | value = aws_iam_role.spinnaker-managed.id 3 | description = "The generated id(name) of spinnaker managed role" 4 | } 5 | 6 | output "role_arn" { 7 | value = aws_iam_role.spinnaker-managed.arn 8 | description = "The generated arn of spinnaker managed role" 9 | } 10 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/modules/getstats.rb: -------------------------------------------------------------------------------- 1 | require_relative 'hostname' 2 | require_relative 'pageviews' 3 | 4 | def getstats() 5 | hostname = hostname() 6 | pageviews = pageviews() 7 | stats = '{"hostname": "' + hostname + '"' + ", " + '"pageviews":' + pageviews + "}" 8 | return stats 9 | end 10 | -------------------------------------------------------------------------------- /charts/spinnaker/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: spinnaker 3 | description: Open source, multi-cloud continuous delivery platform for releasing software changes with high velocity and confidence. 4 | type: application 5 | version: 1.0.0 6 | dependencies: 7 | - name: minio 8 | version: 5.0.9 9 | repository: https://charts.min.io/ 10 | condition: minio.enabled 11 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/default.auto.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "ap-northeast-2" 2 | azs = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 3 | kubernetes_version = "1.24" 4 | spinnaker_version = "1.24.5" 5 | name = "hello" 6 | tags = { 7 | owner = "yourid" 8 | team = "devops" 9 | billing = "prod" 10 | } 11 | -------------------------------------------------------------------------------- /modules/codebuild/labels.tf: -------------------------------------------------------------------------------- 1 | # frigga naming 2 | module "frigga" { 3 | source = "../frigga" 4 | name = var.name 5 | stack = var.stack 6 | detail = var.detail 7 | } 8 | 9 | locals { 10 | name = module.frigga.name 11 | name-tag = { "Name" = local.name } 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | local.name-tag 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # terraform cache 9 | *.terraform.lock.hcl 10 | 11 | # local terraform vars 12 | local.*.tfvars 13 | 14 | # User kubernetes cluster configs 15 | kubeconfig 16 | 17 | # Local .DS_Store 18 | **/.DS_Store 19 | 20 | # local helm chart cache 21 | **/charts/*.tgz 22 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-aws/defaults.tf: -------------------------------------------------------------------------------- 1 | ### default values 2 | 3 | ### aws partitions 4 | module "aws" { 5 | source = "Young-ook/spinnaker/aws//modules/aws-partitions" 6 | } 7 | 8 | locals { 9 | aws = { 10 | dns = module.aws.partition.dns_suffix 11 | partition = module.aws.partition.partition 12 | region = module.aws.region.name 13 | id = module.aws.caller.account_id 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /modules/frigga/versions.tf: -------------------------------------------------------------------------------- 1 | ## requirements 2 | 3 | terraform { 4 | required_version = ">= 0.12" 5 | required_providers { 6 | null = { 7 | source = "hashicorp/null" 8 | version = ">= 3.0" 9 | } 10 | local = { 11 | source = "hashicorp/local" 12 | version = ">= 2.0" 13 | } 14 | random = { 15 | source = "hashicorp/random" 16 | version = ">= 3.0" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /charts/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /labels.tf: -------------------------------------------------------------------------------- 1 | ### frigga name 2 | module "frigga" { 3 | source = "Young-ook/spinnaker/aws//modules/frigga" 4 | version = "2.3.5" 5 | name = var.name == null || var.name == "" ? "spinnaker" : var.name 6 | petname = var.name == null || var.name == "" ? true : false 7 | } 8 | 9 | locals { 10 | name = module.frigga.name 11 | default-tags = merge( 12 | { "terraform.io" = "managed" }, 13 | { "Name" = local.name }, 14 | ) 15 | } 16 | -------------------------------------------------------------------------------- /charts/spinnaker/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-aws/labels.tf: -------------------------------------------------------------------------------- 1 | ### frigga name 2 | module "frigga" { 3 | source = "Young-ook/spinnaker/aws//modules/frigga" 4 | version = "3.0.0" 5 | name = var.name 6 | stack = var.stack 7 | detail = var.detail 8 | } 9 | 10 | locals { 11 | name = module.frigga.name 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "spinnaker.io" = "managed" }, 15 | { "Name" = local.name }, 16 | ) 17 | } 18 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-eks/labels.tf: -------------------------------------------------------------------------------- 1 | ### frigga name 2 | module "frigga" { 3 | source = "Young-ook/spinnaker/aws//modules/frigga" 4 | version = "2.3.5" 5 | name = var.name 6 | stack = var.stack 7 | detail = var.detail 8 | } 9 | 10 | locals { 11 | name = module.frigga.name 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "spinnaker.io" = "managed" }, 15 | { "Name" = local.name }, 16 | ) 17 | } 18 | -------------------------------------------------------------------------------- /.github/workflows/stale.yaml: -------------------------------------------------------------------------------- 1 | name: 'Close stale issues and PRs' 2 | on: 3 | schedule: 4 | - cron: '30 1 * * *' 5 | 6 | jobs: 7 | stale: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/stale@v4 11 | with: 12 | stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.' 13 | days-before-stale: 30 14 | days-before-close: 5 15 | -------------------------------------------------------------------------------- /charts/spinnaker/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.dockerRegistryAccountSecret }} 2 | --- 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: {{ .Release.Name }}-registry 7 | labels: 8 | {{ include "spinnaker.standard-labels" . | indent 4 }} 9 | component: clouddriver 10 | type: Opaque 11 | data: 12 | {{- range $index, $account := .Values.dockerRegistries }} 13 | {{ $account.name }}: {{ default "" $account.password | b64enc | quote }} 14 | {{- end }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/charts/cluster-autoscaler/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "cluster-autoscaler.serviceAccountName" . }} 6 | labels: 7 | {{- include "cluster-autoscaler.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /modules/codebuild/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | 3 | output "project" { 4 | description = "The CodeBuild project attributes" 5 | value = aws_codebuild_project.cb 6 | } 7 | 8 | output "build" { 9 | description = "Bash script to start a build proejct" 10 | value = join(" ", [ 11 | "bash -e", 12 | format("%s/scripts/start-build.sh", path.module), 13 | format("-r %s", module.aws.region.name), 14 | format("-n %s", aws_codebuild_project.cb.id), 15 | ]) 16 | } 17 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/backend/variables.tf: -------------------------------------------------------------------------------- 1 | ### network 2 | variable "aws_region" { 3 | description = "The aws region to deploy the service into" 4 | type = string 5 | } 6 | 7 | ### description 8 | variable "name" { 9 | description = "The logical name of the module instance" 10 | type = string 11 | } 12 | 13 | ### tags 14 | variable "tags" { 15 | description = "The key-value maps for tagging" 16 | type = map(string) 17 | default = {} 18 | } 19 | -------------------------------------------------------------------------------- /modules/spinnaker-aware-aws-vpc/tests/defaults/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | test = { 4 | source = "terraform.io/builtin/test" 5 | } 6 | } 7 | } 8 | 9 | module "main" { 10 | source = "../../" 11 | cidr = "10.0.0.0/16" 12 | name = "service" 13 | stack = "stack" 14 | detail = "vpc" 15 | tags = { test = "spinnaker-aware-vpc-default" } 16 | } 17 | 18 | output "vpc" { 19 | description = "Atributes of spinnaker aware vpc" 20 | value = module.main.vpc 21 | } 22 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/charts/cluster-autoscaler/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /modules/spinnaker-aware-aws-vpc/labels.tf: -------------------------------------------------------------------------------- 1 | # frigga naming 2 | module "frigga" { 3 | source = "Young-ook/spinnaker/aws//modules/frigga" 4 | version = "2.3.5" 5 | name = var.name 6 | stack = var.stack 7 | detail = var.detail 8 | petname = false 9 | } 10 | 11 | locals { 12 | name = module.frigga.name 13 | default-tags = merge( 14 | { "terraform.io" = "managed" }, 15 | local.vpc-tag 16 | ) 17 | } 18 | 19 | # vpc tags 20 | locals { 21 | vpc-tag = merge( 22 | { "vpc:name" = local.name }, 23 | ) 24 | } 25 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-gcp/outputs.tf: -------------------------------------------------------------------------------- 1 | 2 | output "name" { 3 | description = "The service account name of spinnaker managed role" 4 | value = google_service_account.spinnaker-managed.name 5 | } 6 | 7 | output "email" { 8 | description = "The service account email address of spinnaker managed role" 9 | value = google_service_account.spinnaker-managed.email 10 | } 11 | 12 | output "id" { 13 | description = "The service account resource identity of spinnaker managed role" 14 | value = google_service_account.spinnaker-managed.unique_id 15 | } 16 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # when the variable is populated a search domain entry is added to resolv.conf at startup 4 | # this is needed for the ECS service discovery given the app works by calling host names and not FQDNs 5 | # a search domain can't be added to the container when using the awsvpc mode 6 | # and the awsvpc mode is needed for A records (bridge only supports SRV records) 7 | if [ $SEARCH_DOMAIN ]; then echo "search ${SEARCH_DOMAIN}" >> /etc/resolv.conf; fi 8 | 9 | ruby /app/yelb-appserver.rb -o 0.0.0.0 10 | -------------------------------------------------------------------------------- /modules/aws-partitions/outputs.tf: -------------------------------------------------------------------------------- 1 | data "aws_partition" "current" {} 2 | 3 | output "partition" { 4 | description = "The attribute map of current AWS partition" 5 | value = data.aws_partition.current 6 | } 7 | 8 | data "aws_region" "current" {} 9 | 10 | output "region" { 11 | description = "The attribute map of current AWS region" 12 | value = data.aws_region.current 13 | } 14 | 15 | data "aws_caller_identity" "current" {} 16 | 17 | output "caller" { 18 | description = "The attribute map of current AWS API caller" 19 | value = data.aws_caller_identity.current 20 | } 21 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-ecs/labels.tf: -------------------------------------------------------------------------------- 1 | ### frigga name 2 | module "frigga" { 3 | source = "Young-ook/spinnaker/aws//modules/frigga" 4 | version = "2.3.5" 5 | name = var.name 6 | stack = var.stack 7 | detail = var.detail 8 | } 9 | 10 | locals { 11 | name = module.frigga.name 12 | default-tags = merge( 13 | { "terraform.io" = "managed" }, 14 | { "spinnaker.io" = "managed" }, 15 | local.ecs-tag 16 | ) 17 | } 18 | 19 | # ecs tags 20 | locals { 21 | ecs-tag = merge( 22 | { "ecs:cluster-name" = local.name }, 23 | { "AmazonECSManaged" = "true" }, 24 | ) 25 | } 26 | -------------------------------------------------------------------------------- /modules/spinnaker-aware-aws-vpc/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | subnet_type = (!var.enable_igw && !var.enable_ngw) ? "isolated" : (var.enable_igw && !var.enable_ngw) ? "public" : "private" 3 | } 4 | 5 | module "vpc" { 6 | source = "Young-ook/vpc/aws" 7 | version = "1.0.3" 8 | name = local.name 9 | tags = var.tags 10 | vpc_config = { 11 | azs = var.azs 12 | cidr = var.cidr 13 | single_ngw = var.single_ngw 14 | subnet_type = local.subnet_type 15 | } 16 | vpce_config = var.vpc_endpoint_config 17 | vgw_config = { 18 | enable_vgw = var.enable_vgw 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /modules/frigga/main.tf: -------------------------------------------------------------------------------- 1 | # name and description 2 | # frigga naming rule 3 | resource "random_string" "suffix" { 4 | length = 5 5 | upper = false 6 | lower = true 7 | numeric = false 8 | special = false 9 | } 10 | 11 | resource "random_pet" "name" { 12 | length = 1 13 | separator = "-" 14 | } 15 | 16 | locals { 17 | suffix = var.petname ? random_string.suffix.result : "" 18 | frigga-name = substr(join("-", compact([(var.name == null || var.name == "" ? random_pet.name.id : var.name), var.stack, var.detail, local.suffix])), 0, var.max_length) 19 | name-tag = { "Name" = local.frigga-name } 20 | } 21 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-eks/main.tf: -------------------------------------------------------------------------------- 1 | module "eks" { 2 | source = "Young-ook/eks/aws" 3 | version = "2.0.3" 4 | name = local.name 5 | tags = merge(local.default-tags, var.tags) 6 | subnets = var.subnets 7 | kubernetes_version = var.kubernetes_version 8 | enabled_cluster_log_types = var.enabled_cluster_log_types 9 | node_groups = var.node_groups 10 | managed_node_groups = var.managed_node_groups 11 | policy_arns = var.policy_arns 12 | enable_ssm = var.enable_ssm 13 | } 14 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/modules/getvotes.rb: -------------------------------------------------------------------------------- 1 | require_relative 'restaurantsdbread' 2 | require_relative 'restaurantsdbupdate' 3 | 4 | def getvotes() 5 | outback = restaurantsdbread("outback") 6 | ihop = restaurantsdbread("ihop") 7 | bucadibeppo = restaurantsdbread("bucadibeppo") 8 | chipotle = restaurantsdbread("chipotle") 9 | votes = '[{"name": "outback", "value": ' + outback + '},' + '{"name": "bucadibeppo", "value": ' + bucadibeppo + '},' + '{"name": "ihop", "value": ' + ihop + '}, ' + '{"name": "chipotle", "value": ' + chipotle + '}]' 10 | return votes 11 | end 12 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-gcp/README.md: -------------------------------------------------------------------------------- 1 | # Spinnaker managed Cloud IAM role 2 | [Spinnaker](https://spinnaker.io/) is an open-source, multi-cloud continuous delivery platform for releasing software changes with high velocity and confidence. This is the terraform module to make a service account of GCP project to be managed by spinnaker. 3 | 4 | ## Quickstart 5 | ### Setup 6 | ```hcl 7 | module "spinnaker-managed-role" { 8 | source = "Young-ook/spinnaker/aws//modules/spinnaker-managed-gcp" 9 | version = "~> 2.0" 10 | 11 | desc = "dev" 12 | project = "yourproj" 13 | } 14 | ``` 15 | Run terraform: 16 | ``` 17 | terraform init 18 | terraform apply 19 | ``` 20 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/platform/templates/preuninstall.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | export KUBECONFIG=kubeconfig 4 | 5 | ${eks_update_kubeconfig} 6 | kubectl delete ns ${eks_kubeconfig_context} 7 | 8 | aws appmesh delete-mesh --mesh-name yelb-mesh --region ${aws_region} 9 | 10 | rm kubeconfig 11 | 12 | volumes=$(aws ec2 describe-volumes \ 13 | --filters Name=tag:kubernetes.io/created-for/pvc/namespace,Values=spinnaker \ 14 | --query "Volumes[*].{ID:VolumeId}" \ 15 | --region ${aws_region} \ 16 | --output text) 17 | 18 | for volume in $volumes 19 | do 20 | aws ec2 delete-volume --volume-id $volume --region ${aws_region} 21 | done 22 | 23 | unset KUBECONFIG 24 | -------------------------------------------------------------------------------- /modules/spinnaker-aware-aws-vpc/outputs.tf: -------------------------------------------------------------------------------- 1 | ### output variables 2 | 3 | output "vpc" { 4 | description = "The attributes of the secure vpc" 5 | value = module.vpc.vpc 6 | } 7 | 8 | output "vpce" { 9 | description = "The attributes of VPC endpoints" 10 | value = module.vpc.vpce 11 | } 12 | 13 | output "subnets" { 14 | description = "The map of subnet IDs" 15 | value = module.vpc.subnets 16 | } 17 | 18 | output "route_tables" { 19 | description = "The map of route table IDs" 20 | value = module.vpc.route_tables 21 | } 22 | 23 | output "vgw" { 24 | description = "The attributes of Virtual Private Gateway" 25 | value = module.vpc.vgw 26 | } 27 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/policy.cluster-autoscaler.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "autoscaling:DescribeAutoScalingGroups", 8 | "autoscaling:DescribeAutoScalingInstances", 9 | "autoscaling:DescribeLaunchConfigurations", 10 | "autoscaling:DescribeTags", 11 | "autoscaling:SetDesiredCapacity", 12 | "autoscaling:TerminateInstanceInAutoScalingGroup", 13 | "ec2:DescribeLaunchTemplateVersions" 14 | ], 15 | "Resource": "*" 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/README.md: -------------------------------------------------------------------------------- 1 | This is the application server. It's a Ruby/Sinatra application that exposes a number of APIs that are consumed by the UI (or via curl if you fancy that). 2 | 3 | Originally this application was included in a single file (`yelb-appserver.rb`). This has been since refactored by extracting the single API definition in their separate adapters and modules. This made the transition to Lambda/Serverless easier (one lambda per API definition). This hasn't changed the other deployment models (containers and instances) because those models still launch the `yelb-appserver.rb` main application which imports the modules instead of having everything in a single file (as it was conceived originally). 4 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-ecs/outputs.tf: -------------------------------------------------------------------------------- 1 | ### output variables 2 | 3 | output "cluster" { 4 | description = "The ECS cluster attributes" 5 | value = { 6 | control_plane = aws_ecs_cluster.cp 7 | data_plane = aws_ecs_capacity_provider.ng 8 | } 9 | } 10 | 11 | output "role_arn" { 12 | description = "The generated role ARN of the ECS node group" 13 | value = (local.node_groups_enabled ? zipmap( 14 | ["name", "arn"], 15 | [aws_iam_role.ng.0.name, aws_iam_role.ng.0.arn] 16 | ) : null) 17 | } 18 | 19 | output "features" { 20 | description = "Features configurations for the ECS" 21 | value = { 22 | "node_groups_enabled" = local.node_groups_enabled 23 | "fargate_enabled" = !local.node_groups_enabled 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | phases: 3 | pre_build: 4 | commands: 5 | - $(aws ecr get-login --no-include-email) 6 | - TAG="latest" 7 | - IMAGE_URI="${REPOSITORY_URI}:${TAG}" 8 | build: 9 | commands: 10 | - echo Change the source directory to ${APP_NAME} 11 | - cd ${APP_NAME} 12 | - echo Build a container image 13 | - docker build --tag "$IMAGE_URI" . 14 | post_build: 15 | commands: 16 | - docker push "$IMAGE_URI" 17 | - sed -i "s@@$IMAGE_URI@g" manifests/2.app-v2.yaml 18 | - echo Uploading to ${ARTIFACT_BUCKET} 19 | - aws s3 cp --recursive manifests/ s3://${ARTIFACT_BUCKET}/ 20 | artifacts: 21 | files: 3-meshed-app-v2.yaml 22 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/variables.tf: -------------------------------------------------------------------------------- 1 | ### network 2 | variable "azs" { 3 | description = "A list of availability zones for the vpc" 4 | type = list(any) 5 | } 6 | 7 | variable "cidr" { 8 | description = "The vpc CIDR (e.g. 10.0.0.0/16)" 9 | type = string 10 | default = "10.0.0.0/16" 11 | } 12 | 13 | ### kubernetes 14 | variable "kubernetes_version" { 15 | description = "The target version of kubernetes" 16 | type = string 17 | } 18 | 19 | ### description 20 | variable "name" { 21 | description = "The logical name of the module instance" 22 | type = string 23 | } 24 | 25 | ### tags 26 | variable "tags" { 27 | description = "The key-value maps for tagging" 28 | type = map(string) 29 | default = {} 30 | } 31 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | ### input variables 2 | 3 | ### features 4 | variable "features" { 5 | description = "Feature toggles for spinnaker configuration" 6 | type = any 7 | default = { 8 | aurora = { 9 | enabled = false 10 | } 11 | eks = { 12 | version = "1.24" 13 | ssm_enabled = false 14 | cluster_logs = [] 15 | role_arns = [] 16 | } 17 | s3 = { 18 | enabled = false 19 | force_destroy = false 20 | versioning = false 21 | } 22 | vpc = { 23 | id = null 24 | cidrs = [] 25 | subnets = [] 26 | } 27 | } 28 | } 29 | 30 | ### description 31 | variable "name" { 32 | description = "The logical name of the module instance" 33 | type = string 34 | default = null 35 | } 36 | 37 | ### tags 38 | variable "tags" { 39 | description = "The key-value maps for tagging" 40 | type = map(string) 41 | default = {} 42 | } 43 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/main.tf: -------------------------------------------------------------------------------- 1 | ### frigga naming 2 | module "frigga" { 3 | source = "Young-ook/spinnaker/aws//modules/frigga" 4 | version = "2.3.5" 5 | name = var.name 6 | stack = var.stack 7 | detail = var.detail 8 | } 9 | 10 | ### foundation 11 | module "foundation" { 12 | source = "./foundation" 13 | name = module.frigga.name 14 | azs = var.azs 15 | tags = var.tags 16 | kubernetes_version = var.kubernetes_version 17 | } 18 | 19 | ### platform 20 | module "platform" { 21 | source = "./platform" 22 | name = module.frigga.name 23 | tags = var.tags 24 | aws_region = var.aws_region 25 | azs = var.azs 26 | vpc = module.foundation.vpc 27 | eks = module.foundation.eks 28 | spinnaker_version = var.spinnaker_version 29 | kubernetes_version = var.kubernetes_version 30 | } 31 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-eks/outputs.tf: -------------------------------------------------------------------------------- 1 | # output variables 2 | 3 | output "cluster" { 4 | value = module.eks.cluster 5 | description = "The EKS cluster attributes" 6 | } 7 | 8 | output "role" { 9 | value = module.eks.role 10 | description = "The generated role of the EKS node group" 11 | } 12 | 13 | output "oidc" { 14 | value = module.eks.oidc 15 | description = "The OIDC provider attributes for IAM Role for ServiceAccount" 16 | } 17 | 18 | output "tags" { 19 | value = module.eks.tags 20 | description = "The generated tags for EKS integration" 21 | } 22 | 23 | output "kubeconfig" { 24 | value = join(" ", [ 25 | module.eks.kubeconfig, 26 | "-s true", 27 | ]) 28 | description = "Bash script to update kubeconfig file" 29 | } 30 | 31 | output "kubeauth" { 32 | description = "The kubernetes cluster authentication information for Kubernetes/Helm providers" 33 | sensitive = true 34 | value = module.eks.kubeauth 35 | } 36 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM bitnami/ruby:2.5 2 | 3 | ################## BEGIN INSTALLATION ###################### 4 | 5 | # Set the working directory to /app 6 | WORKDIR /app 7 | 8 | COPY yelb-appserver.rb yelb-appserver.rb 9 | COPY Gemfile Gemfile 10 | COPY modules modules 11 | 12 | ENV LANG=en_us.UTF-8 13 | ENV LC_ALL=C.UTF-8 14 | ENV RACK_ENV=production 15 | 16 | ### hack to allow the setup of the pg gem (which would fail otherwise) 17 | RUN apt-get update 18 | RUN apt-get install libpq-dev postgresql -y 19 | ### end of hack (this would require additional research and optimization) 20 | ### this installs the AWS SDK for DynamoDB (so that appserver can talk to DDB Vs the default Postgres/Redis) 21 | RUN gem install aws-sdk-dynamodb sinatra redis pg --no-document 22 | # Set the working directory to / 23 | WORKDIR / 24 | ADD startup.sh startup.sh 25 | 26 | ##################### INSTALLATION END ##################### 27 | 28 | CMD ["./startup.sh"] 29 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/manifests/3.weighted-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: appmesh.k8s.aws/v1beta2 2 | kind: VirtualRouter 3 | metadata: 4 | name: yelb-appserver 5 | spec: 6 | awsName: yelb-appserver-virtual-router 7 | listeners: 8 | - portMapping: 9 | port: 4567 10 | protocol: http 11 | routes: 12 | - name: route-to-yelb-appserver 13 | httpRoute: 14 | match: 15 | prefix: / 16 | action: 17 | weightedTargets: 18 | - virtualNodeRef: 19 | name: yelb-appserver 20 | weight: 1 21 | - virtualNodeRef: 22 | name: yelb-appserver-v2 23 | weight: 1 24 | retryPolicy: 25 | maxRetries: 2 26 | perRetryTimeout: 27 | unit: ms 28 | value: 2000 29 | httpRetryEvents: 30 | - server-error 31 | - client-error 32 | - gateway-error 33 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-eks/tests/defaults/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | test = { 4 | source = "terraform.io/builtin/test" 5 | } 6 | } 7 | } 8 | 9 | module "vpc" { 10 | source = "Young-ook/vpc/aws" 11 | version = "1.0.3" 12 | } 13 | 14 | module "main" { 15 | source = "../.." 16 | name = "service" 17 | stack = "stack" 18 | detail = "eks-ec2" 19 | tags = { test = "spinnaker-managed-eks-default" } 20 | subnets = values(module.vpc.subnets["public"]) 21 | kubernetes_version = "1.24" 22 | enabled_cluster_log_types = ["api", "audit"] 23 | enable_ssm = true 24 | managed_node_groups = [ 25 | { 26 | name = "default" 27 | desired_size = 1 28 | instance_type = "t3.large" 29 | } 30 | ] 31 | } 32 | 33 | output "cluster" { 34 | description = "The generated AWS EKS cluster" 35 | value = module.main.cluster 36 | } 37 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-aws/variables.tf: -------------------------------------------------------------------------------- 1 | ### feature 2 | variable "base_role_enabled" { 3 | description = "A boolean variable to indicate whether to create a BaseIAMRole for EC2 deployment" 4 | type = bool 5 | default = false 6 | } 7 | 8 | ### security/trusted-roles 9 | variable "trusted_role_arn" { 10 | description = "A list of full arn of iam roles of spinnaker cluster" 11 | type = list(string) 12 | default = [] 13 | } 14 | 15 | ### description 16 | variable "name" { 17 | description = "The logical name of the module instance" 18 | type = string 19 | default = "spinnaker-managed" 20 | } 21 | 22 | variable "stack" { 23 | description = "Text used to identify stack of infrastructure components" 24 | type = string 25 | default = "" 26 | } 27 | 28 | variable "detail" { 29 | description = "The purpose of your aws account" 30 | type = string 31 | default = "" 32 | } 33 | 34 | ### tags 35 | variable "tags" { 36 | description = "The key-value maps for tagging" 37 | type = map(string) 38 | default = {} 39 | } 40 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/platform/templates/terminate-eks-nodes.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Terminate EKS nodes", 3 | "targets": { 4 | "eks-nodes": { 5 | "resourceType": "aws:eks:nodegroup", 6 | "resourceArns": [ 7 | "${nodegroup}" 8 | ], 9 | "selectionMode": "ALL" 10 | } 11 | }, 12 | "actions": { 13 | "TerminateInstances": { 14 | "actionId": "aws:eks:terminate-nodegroup-instances", 15 | "description": "terminate the node instances", 16 | "parameters": { 17 | "instanceTerminationPercentage": "40" 18 | }, 19 | "targets": { 20 | "Nodegroups": "eks-nodes" 21 | } 22 | } 23 | }, 24 | "stopConditions": ${alarm}, 25 | "roleArn": "${role}", 26 | "logConfiguration": { 27 | "logSchemaVersion": 1, 28 | "cloudWatchLogsConfiguration": { 29 | "logGroupArn": "${logs}" 30 | } 31 | }, 32 | "tags": { 33 | "Name": "TerminateEKSNodes" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Young-ook 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | ### output variables 2 | 3 | output "role" { 4 | description = "IAM role for Spinnaker" 5 | value = module.irsa.arn 6 | } 7 | 8 | locals { 9 | helm_chart_name = nonsensitive(module.helm.addons.chart["spin"].chart) 10 | helm_release_name = nonsensitive(module.helm.addons.chart["spin"].name) 11 | halyard_pod = join("-", [local.helm_release_name, "halyard-0"]) 12 | } 13 | 14 | output "halconfig" { 15 | description = "Bash command to access halyard in interactive mode" 16 | value = join(" ", [ 17 | "bash -e", 18 | format("%s/scripts/halconfig.sh", path.module), 19 | format("-r %s", module.aws.region.name), 20 | format("-n %s", module.eks.cluster.name), 21 | format("-p %s", local.halyard_pod), 22 | "-k kubeconfig", 23 | ]) 24 | } 25 | 26 | output "irsaconfig" { 27 | description = "Bash command to apply irsa annotations" 28 | value = module.irsa.kubecli 29 | } 30 | 31 | output "features" { 32 | description = "Feature configurations of spinnaker" 33 | value = { 34 | "aurora_enabled" = local.aurora_enabled 35 | "s3_enabled" = local.s3_enabled 36 | "ssm_enabled" = local.ssm_enabled 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/modules/restaurantsdbread.rb: -------------------------------------------------------------------------------- 1 | require 'pg' 2 | require 'pg_ext' 3 | require 'aws-sdk-dynamodb' 4 | 5 | def restaurantsdbread(restaurant) 6 | if ($yelbddbrestaurants != nil && $yelbddbrestaurants != "") then 7 | dynamodb = Aws::DynamoDB::Client.new(region: $awsregion) 8 | params = { 9 | table_name: $yelbddbrestaurants, 10 | key: { 11 | name: restaurant 12 | } 13 | } 14 | restaurantrecord = dynamodb.get_item(params) 15 | restaurantcount = restaurantrecord.item['restaurantcount'] 16 | else 17 | con = PG.connect :host => $yelbdbhost, 18 | :port => $yelbdbport, 19 | :dbname => 'yelbdatabase', 20 | :user => 'postgres', 21 | :password => 'postgres_password' 22 | con.prepare('statement1', 'SELECT count FROM restaurants WHERE name = $1') 23 | res = con.exec_prepared('statement1', [ restaurant ]) 24 | restaurantcount = res.getvalue(0,0) 25 | con.close 26 | end 27 | return restaurantcount.to_s 28 | end 29 | -------------------------------------------------------------------------------- /modules/codebuild/defaults.tf: -------------------------------------------------------------------------------- 1 | ### default values 2 | 3 | ### aws partitions 4 | module "aws" { 5 | source = "Young-ook/spinnaker/aws//modules/aws-partitions" 6 | } 7 | 8 | locals { 9 | aws = { 10 | dns = module.aws.partition.dns_suffix 11 | id = module.aws.caller.account_id 12 | partition = module.aws.partition.partition 13 | region = module.aws.region.name 14 | } 15 | } 16 | 17 | locals { 18 | default_source = { 19 | type = "NO_SOURCE" 20 | location = null 21 | version = null 22 | buildspec = yamlencode({ 23 | version = "0.2" 24 | phases = { 25 | build = { 26 | commands = [] 27 | } 28 | } 29 | }) 30 | } 31 | default_environment = { 32 | type = "LINUX_CONTAINER" 33 | image = "aws/codebuild/standard:2.0" 34 | compute_type = "BUILD_GENERAL1_SMALL" 35 | image_pull_credentials_type = "CODEBUILD" 36 | privileged_mode = "false" 37 | } 38 | default_artifact = { 39 | type = "NO_ARTIFACTS" 40 | location = null 41 | encryption_disabled = false 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /modules/codebuild/scripts/start-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | CURDIR=`dirname $0` 3 | PROJNAME=codebuild 4 | STATUS="IN_PROGRESS" 5 | 6 | export AWS_REGION=us-east-1 7 | 8 | function print_usage() { 9 | echo "Usage: $0 -n(name) -r(region) " 10 | } 11 | 12 | function process_args() { 13 | if [[ $# < 1 ]]; then 14 | print_usage 15 | exit -1 16 | fi 17 | 18 | while getopts ":n:a:r:k:s:" opt; do 19 | case $opt in 20 | n) PROJNAME="$OPTARG" 21 | ;; 22 | r) AWS_REGION="$OPTARG" 23 | ;; 24 | \?) 25 | >&2 echo "Unrecognized argument '$OPTARG'" 26 | ;; 27 | esac 28 | done 29 | } 30 | 31 | function build () { 32 | ID=$(aws codebuild start-build --region ${AWS_REGION} --output text --query 'build.id' --project-name ${PROJNAME}) 33 | echo "Build ID: ${ID}" 34 | 35 | while [ ${STATUS} == "IN_PROGRESS" ] 36 | do 37 | STATUS=$(aws codebuild batch-get-builds --region ${AWS_REGION} --output text --query 'builds[*].buildStatus' --ids ${ID}) 38 | echo "Build STATUS: ${STATUS}" 39 | sleep 30 40 | done 41 | } 42 | 43 | # main 44 | process_args "$@" 45 | build 46 | 47 | unset AWS_REGION 48 | exit 0 49 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/variables.tf: -------------------------------------------------------------------------------- 1 | ### network 2 | variable "aws_region" { 3 | description = "The aws region to deploy the service into" 4 | type = string 5 | } 6 | 7 | variable "azs" { 8 | description = "The aws availability zones to deploy" 9 | type = list(any) 10 | } 11 | 12 | ### kubernetes 13 | variable "kubernetes_version" { 14 | description = "Kubernetes version" 15 | type = string 16 | } 17 | 18 | ### spinnaker 19 | variable "spinnaker_version" { 20 | description = "Spinnaker version" 21 | type = string 22 | } 23 | 24 | ### description 25 | variable "name" { 26 | description = "The logical name of the module instance" 27 | type = string 28 | } 29 | 30 | variable "stack" { 31 | description = "Text used to identify stack of infrastructure components" 32 | type = string 33 | default = "" 34 | } 35 | 36 | variable "detail" { 37 | description = "The extra description of module instance" 38 | type = string 39 | default = "" 40 | } 41 | 42 | ### tags 43 | variable "tags" { 44 | description = "The key-value maps for tagging" 45 | type = map(string) 46 | default = {} 47 | } 48 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-ecs/tests/defaults/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | test = { 4 | source = "terraform.io/builtin/test" 5 | } 6 | } 7 | } 8 | 9 | module "vpc" { 10 | source = "Young-ook/vpc/aws" 11 | version = "1.0.3" 12 | } 13 | 14 | module "main" { 15 | source = "../.." 16 | name = "service" 17 | stack = "stack" 18 | detail = "ecs-ec2" 19 | tags = { test = "spinnaker-managed-ecs-default" } 20 | subnets = values(module.vpc.subnets["public"]) 21 | container_insights_enabled = true 22 | termination_protection = false 23 | node_groups = [ 24 | { 25 | name = "default" 26 | desired_size = 1 27 | min_size = 1 28 | max_size = 3 29 | instance_type = "m6g.large" 30 | ami_type = "AL2_ARM_64" 31 | } 32 | ] 33 | } 34 | 35 | output "cluster" { 36 | description = "The generated AWS ECS cluster" 37 | value = module.main.cluster 38 | } 39 | 40 | output "features" { 41 | description = "Features configurations of the AWS ECS cluster" 42 | value = module.main.features 43 | } 44 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-ecs/tests/fargate/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | test = { 4 | source = "terraform.io/builtin/test" 5 | } 6 | } 7 | } 8 | 9 | provider "aws" { 10 | region = "ap-northeast-2" 11 | } 12 | 13 | module "vpc" { 14 | source = "Young-ook/vpc/aws" 15 | version = "1.0.3" 16 | vpc_config = { 17 | azs = ["ap-northeast-2a", "ap-northeast-2c", "ap-northeast-2d"] 18 | cidr = "10.10.0.0/16" 19 | subnet_type = "private" 20 | single_ngw = true 21 | } 22 | } 23 | 24 | module "main" { 25 | source = "../.." 26 | name = "service" 27 | stack = "stack" 28 | detail = "ecs-fargate" 29 | tags = { test = "spinnaker-managed-ecs-fargate" } 30 | subnets = slice(values(module.vpc.subnets["private"]), 0, 3) 31 | container_insights_enabled = true 32 | } 33 | 34 | output "cluster" { 35 | description = "The generated AWS ECS cluster" 36 | value = module.main.cluster 37 | } 38 | 39 | output "features" { 40 | description = "Features configurations of the AWS ECS cluster" 41 | value = module.main.features 42 | } 43 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/charts/cluster-autoscaler/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cluster-autoscaler 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | appVersion: "v1.23.0" 24 | -------------------------------------------------------------------------------- /scripts/halconfig.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # halyard interactive mode 3 | set -e 4 | 5 | CURDIR=`dirname $0` 6 | EKS_NAME=eks 7 | 8 | export AWS_REGION=us-east-1 9 | export KUBECONFIG=$CURDIR/kubeconfig 10 | 11 | function print_usage() { 12 | echo "Usage: $0 -k -n(name) -r(region) -p(pod) " 13 | } 14 | 15 | function process_args() { 16 | if [[ $# < 1 ]]; then 17 | print_usage 18 | exit -1 19 | fi 20 | 21 | while getopts ":n:a:r:k:p:" opt; do 22 | case $opt in 23 | n) EKS_NAME="$OPTARG" 24 | ;; 25 | r) AWS_REGION="$OPTARG" 26 | ;; 27 | k) KUBECONFIG="$OPTARG" 28 | ;; 29 | p) POD_NAME="$OPTARG" 30 | ;; 31 | \?) 32 | >&2 echo "Unrecognized argument '$OPTARG'" 33 | ;; 34 | esac 35 | done 36 | } 37 | 38 | function init() { 39 | if [ -e $KUBECONFIG ]; then 40 | rm $KUBECONFIG 41 | fi 42 | 43 | # update kubeconfig 44 | aws eks update-kubeconfig --name $EKS_NAME --region $AWS_REGION 45 | 46 | # restrict access 47 | chmod 600 $KUBECONFIG 48 | } 49 | 50 | function prompt() { 51 | kubectl -n spinnaker exec -it $POD_NAME -- bash 52 | } 53 | 54 | # main 55 | process_args "$@" 56 | init 57 | prompt 58 | 59 | unset AWS_REGION 60 | unset KUBECONFIG 61 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/platform/variables.tf: -------------------------------------------------------------------------------- 1 | ### network 2 | variable "aws_region" { 3 | description = "The aws region to deploy spinnaker" 4 | type = string 5 | } 6 | 7 | variable "azs" { 8 | description = "A list of availability zones for the vpc" 9 | type = list(any) 10 | } 11 | 12 | variable "cidr" { 13 | description = "The vpc CIDR (e.g. 10.0.0.0/16)" 14 | type = string 15 | default = "172.16.0.0/16" 16 | } 17 | 18 | variable "vpc" { 19 | description = "Property map of the application VPC" 20 | type = any 21 | } 22 | 23 | ### kubernetes 24 | variable "eks" { 25 | description = "Attributes of eks kubeconfig for spinnaker integration (halconfig)" 26 | type = any 27 | } 28 | 29 | variable "kubernetes_version" { 30 | description = "The target version of kubernetes" 31 | type = string 32 | } 33 | 34 | variable "spinnaker_version" { 35 | description = "The spinnaker version to deploy" 36 | type = string 37 | } 38 | 39 | ### description 40 | variable "name" { 41 | description = "The logical name of the module instance" 42 | type = string 43 | } 44 | 45 | ### tags 46 | variable "tags" { 47 | description = "The key-value maps for tagging" 48 | type = map(string) 49 | default = {} 50 | } 51 | -------------------------------------------------------------------------------- /modules/frigga/variables.tf: -------------------------------------------------------------------------------- 1 | ### description 2 | variable "name" { 3 | description = "The logical name of the application/service instance" 4 | type = string 5 | default = null 6 | validation { 7 | condition = var.name == null ? true : var.name != null && length(var.name) > 0 8 | error_message = "Length of name is longer than 0." 9 | } 10 | } 11 | 12 | variable "stack" { 13 | description = "Text used to identify stack of infrastructure components (e.g., dev, prod)" 14 | type = string 15 | default = "" 16 | validation { 17 | condition = var.stack != null 18 | error_message = "Stak must not be null." 19 | } 20 | } 21 | 22 | variable "detail" { 23 | description = "The purpose or extra description of your application/service instance" 24 | type = string 25 | default = "" 26 | validation { 27 | condition = var.detail != null 28 | error_message = "Detail must not be null." 29 | } 30 | } 31 | 32 | variable "petname" { 33 | description = "An indicator whether to append a random identifier to the end of the name to avoid duplication" 34 | type = bool 35 | default = true 36 | } 37 | 38 | variable "max_length" { 39 | description = "The maximum length of generated logical name" 40 | type = number 41 | default = 64 42 | } 43 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/charts/cluster-autoscaler/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "cluster-autoscaler.fullname" . -}} 3 | {{- $svcPort := .Values.service.port -}} 4 | {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} 5 | apiVersion: networking.k8s.io/v1beta1 6 | {{- else -}} 7 | apiVersion: extensions/v1beta1 8 | {{- end }} 9 | kind: Ingress 10 | metadata: 11 | name: {{ $fullName }} 12 | labels: 13 | {{- include "cluster-autoscaler.labels" . | nindent 4 }} 14 | {{- with .Values.ingress.annotations }} 15 | annotations: 16 | {{- toYaml . | nindent 4 }} 17 | {{- end }} 18 | spec: 19 | {{- if .Values.ingress.tls }} 20 | tls: 21 | {{- range .Values.ingress.tls }} 22 | - hosts: 23 | {{- range .hosts }} 24 | - {{ . | quote }} 25 | {{- end }} 26 | secretName: {{ .secretName }} 27 | {{- end }} 28 | {{- end }} 29 | rules: 30 | {{- range .Values.ingress.hosts }} 31 | - host: {{ .host | quote }} 32 | http: 33 | paths: 34 | {{- range .paths }} 35 | - path: {{ . }} 36 | backend: 37 | serviceName: {{ $fullName }} 38 | servicePort: {{ $svcPort }} 39 | {{- end }} 40 | {{- end }} 41 | {{- end }} 42 | -------------------------------------------------------------------------------- /examples/blueprint/awscb.tf: -------------------------------------------------------------------------------- 1 | ### pipeline/registry 2 | module "chaosmonkey-repo" { 3 | source = "Young-ook/eks/aws//modules/ecr" 4 | version = "2.0.1" 5 | name = var.name 6 | tags = var.tags 7 | } 8 | 9 | ### pipeline/registry 10 | module "artifact" { 11 | source = "Young-ook/sagemaker/aws//modules/s3" 12 | version = "0.3.4" 13 | name = "artifact-preprod" 14 | tags = var.tags 15 | force_destroy = true 16 | } 17 | 18 | ### pipeline/build 19 | module "chaosmonkey-build" { 20 | source = "Young-ook/spinnaker/aws//modules/codebuild" 21 | version = "2.3.6" 22 | name = var.name 23 | stack = var.stack 24 | tags = var.tags 25 | project = { 26 | source_config = { 27 | type = "GITHUB" 28 | location = "https://github.com/Young-ook/chaosmonkey.git" 29 | buildspec = join("/", ["buildspec.yml"]) 30 | version = "dockerbuild" 31 | } 32 | environment_config = { 33 | image = "aws/codebuild/amazonlinux2-x86_64-standard:3.0" 34 | privileged_mode = true 35 | environment_variables = { 36 | REPOSITORY_URI = module.chaosmonkey-repo.url 37 | } 38 | } 39 | } 40 | policy_arns = [ 41 | module.chaosmonkey-repo.policy_arns["read"], 42 | module.chaosmonkey-repo.policy_arns["write"], 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /modules/aws-partitions/README.md: -------------------------------------------------------------------------------- 1 | # AWS Partition 2 | A Partition is a group of AWS Region and Service objects. You can use a partition to determine what services are available in a region, or what regions a service is available in. 3 | This module provides the attributes of current IAM identity of API caller, the current AWS Partition and Region information where you are running terraform. 4 | 5 | ## Quickstart 6 | ### Setup 7 | ```hcl 8 | module "aws-partitions" { 9 | source = "Young-ook/spinnaker/aws//modules/aws-partitions" 10 | version = ">= 2.0" 11 | } 12 | ``` 13 | Run terraform: 14 | ``` 15 | terraform init 16 | terraform apply 17 | ``` 18 | 19 | And you will see the outputs. 20 | ``` 21 | Apply complete! Resources: 0 added, 0 changed, 0 destroyed. 22 | 23 | Outputs: 24 | 25 | caller = { 26 | "account_id" = "111122223333" 27 | "arn" = "arn:aws:sts::111122223333:assumed-role/admin/your-iam-user" 28 | "id" = "111122223333" 29 | "user_id" = "AXXXXXXXXXXXXXXXXAAAAXXXXYYYY:your-iam-user" 30 | } 31 | partition = { 32 | "dns_suffix" = "amazonaws.com" 33 | "id" = "aws" 34 | "partition" = "aws" 35 | "reverse_dns_prefix" = "com.amazonaws" 36 | } 37 | region = { 38 | "description" = "US East (N. Virginia)" 39 | "endpoint" = "ec2.us-east-1.amazonaws.com" 40 | "id" = "us-east-1" 41 | "name" = "us-east-1" 42 | } 43 | ``` 44 | -------------------------------------------------------------------------------- /examples/blueprint/main.tf: -------------------------------------------------------------------------------- 1 | ### Spinnaker Blueprint 2 | 3 | provider "aws" { 4 | region = var.aws_region 5 | } 6 | 7 | ### remote terraform state 8 | module "tfstate" { 9 | source = "Young-ook/tfstate/aws" 10 | version = "2.0.1" 11 | name = var.name 12 | tags = var.tags 13 | force_destroy = true 14 | generate_config_file = true 15 | } 16 | 17 | ### network 18 | module "spinnaker-aware-aws-vpc" { 19 | source = "Young-ook/spinnaker/aws//modules/spinnaker-aware-aws-vpc" 20 | version = "3.0.0" 21 | name = var.name 22 | stack = "preprod" 23 | tags = var.tags 24 | azs = var.azs 25 | cidr = var.cidr 26 | enable_igw = true 27 | enable_ngw = true 28 | single_ngw = true 29 | } 30 | 31 | ### platform/spinnaker 32 | module "spinnaker" { 33 | source = "Young-ook/spinnaker/aws" 34 | version = "3.0.0" 35 | name = var.name 36 | tags = var.tags 37 | features = { 38 | # aurora = { enabled = true } 39 | # s3 = { enabled = true } 40 | eks = { 41 | ssm_enabled = true 42 | version = var.kubernetes_version 43 | } 44 | vpc = { 45 | id = module.spinnaker-aware-aws-vpc.vpc.id 46 | subnets = values(module.spinnaker-aware-aws-vpc.subnets["private"]) 47 | cidrs = [module.spinnaker-aware-aws-vpc.vpc.cidr_block] 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /modules/codebuild/variables.tf: -------------------------------------------------------------------------------- 1 | ### network 2 | variable "vpc" { 3 | description = "VPC configuration" 4 | type = any 5 | default = null 6 | } 7 | 8 | ### build project 9 | variable "project" { 10 | description = "Build project configuration" 11 | type = any 12 | default = { 13 | source = {} 14 | environment = { 15 | environment_vars = [] 16 | } 17 | artifact = {} 18 | } 19 | } 20 | 21 | ### log 22 | variable "log" { 23 | description = "Log configuration" 24 | type = map(any) 25 | default = null 26 | } 27 | 28 | ### security 29 | variable "policy_arns" { 30 | description = "A list of additional policy ARNs to attach the service role for CodeBuild" 31 | type = list(string) 32 | default = [] 33 | } 34 | 35 | ### description 36 | variable "name" { 37 | description = "The logical name of the module instance" 38 | type = string 39 | default = "codebuild" 40 | } 41 | 42 | variable "stack" { 43 | description = "Text used to identify stack of infrastructure components" 44 | type = string 45 | default = "" 46 | } 47 | 48 | variable "detail" { 49 | description = "The extra description of module instance" 50 | type = string 51 | default = "" 52 | } 53 | 54 | ### tags 55 | variable "tags" { 56 | description = "The key-value maps for tagging" 57 | type = map(string) 58 | default = {} 59 | } 60 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-ecs/variables.tf: -------------------------------------------------------------------------------- 1 | ### network 2 | variable "subnets" { 3 | description = "The list of subnet IDs to deploy your ECS cluster" 4 | type = list(string) 5 | validation { 6 | error_message = "Subnet list must not be null." 7 | condition = var.subnets != null 8 | } 9 | } 10 | 11 | ### ecs cluster 12 | variable "node_groups" { 13 | description = "Node groups definition" 14 | default = [] 15 | } 16 | 17 | ### feature 18 | variable "container_insights_enabled" { 19 | description = "A boolean variable indicating to enable ContainerInsights" 20 | type = bool 21 | default = false 22 | } 23 | 24 | variable "termination_protection" { 25 | description = "A boolean variable indicating to enable Termination Protection of autoscaling group" 26 | type = bool 27 | default = true 28 | } 29 | 30 | ### description 31 | variable "name" { 32 | description = "The logical name of the module instance" 33 | type = string 34 | default = "ecs" 35 | } 36 | 37 | variable "stack" { 38 | description = "Text used to identify stack of infrastructure components" 39 | type = string 40 | default = "" 41 | } 42 | 43 | variable "detail" { 44 | description = "The extra description of module instance" 45 | type = string 46 | default = "" 47 | } 48 | 49 | ### tags 50 | variable "tags" { 51 | description = "The key-value maps for tagging" 52 | type = map(string) 53 | default = {} 54 | } 55 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/modules/pageviews.rb: -------------------------------------------------------------------------------- 1 | require 'redis' 2 | require 'aws-sdk-dynamodb' 3 | 4 | def pageviews() 5 | if ($yelbddbcache != nil && $yelbddbcache != "") then 6 | dynamodb = Aws::DynamoDB::Client.new(region: $awsregion) 7 | params = { 8 | table_name: $yelbddbcache, 9 | key: { 10 | counter: 'pageviews' 11 | } 12 | } 13 | pageviewsrecord = dynamodb.get_item(params) 14 | pageviewscount = pageviewsrecord.item['pageviewscount'] 15 | pageviewscount += 1 16 | params = { 17 | table_name: $yelbddbcache, 18 | key: { 19 | counter: 'pageviews' 20 | }, 21 | update_expression: 'set pageviewscount = :c', 22 | expression_attribute_values: {':c' => pageviewscount}, 23 | return_values: 'UPDATED_NEW' 24 | } 25 | pageviewrecord = dynamodb.update_item(params) 26 | else 27 | redis = Redis.new 28 | redis = Redis.new(:host => $redishost, :port => 6379) 29 | redis.incr("pageviews") 30 | pageviewscount = redis.get("pageviews") 31 | redis.quit() 32 | end 33 | return pageviewscount.to_s 34 | end 35 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/platform/templates/halconfig.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | export KUBECONFIG=spinnaker_kubeconfig 4 | 5 | ${spinnaker_update_kubeconfig} 6 | mv kubeconfig spinnaker_kubeconfig 7 | 8 | ${halyard_kubectl_exec} hal config version edit --version ${spinnaker_version} 9 | 10 | ${halyard_kubectl_exec} hal config ci codebuild account add platform \ 11 | --account-id ${aws_account_id} \ 12 | --assume-role ${spinnaker_managed_aws_role} \ 13 | --region ${aws_region} 14 | ${halyard_kubectl_exec} hal config ci codebuild enable 15 | 16 | ${halyard_kubectl_exec} hal config artifact s3 account add platform \ 17 | --region ${aws_region} 18 | ${halyard_kubectl_exec} hal config artifact s3 enable 19 | 20 | ${eks_update_kubeconfig} 21 | kubectl -n spinnaker cp kubeconfig cd-spinnaker-halyard-0:/home/spinnaker/.kube/ 22 | rm kubeconfig 23 | 24 | ${halyard_kubectl_exec} hal config provider kubernetes account add eks \ 25 | --kubeconfig-file '/home/spinnaker/.kube/kubeconfig' \ 26 | --context ${eks_kubeconfig_context} \ 27 | --environment dev 28 | ${halyard_kubectl_exec} hal config provider kubernetes enable 29 | 30 | ${halyard_kubectl_exec} hal config provider aws account add ec2 \ 31 | --regions ap-northeast-2 us-east-1 us-west-2 eu-west-1 eu-central-1 \ 32 | --account-id ${aws_account_id} \ 33 | --assume-role ${spinnaker_managed_aws_role} \ 34 | --environment dev 35 | ${halyard_kubectl_exec} hal config provider aws enable 36 | 37 | ${halyard_kubectl_exec} hal deploy apply 38 | 39 | unset KUBECONFIG 40 | -------------------------------------------------------------------------------- /defaults.tf: -------------------------------------------------------------------------------- 1 | ### default values 2 | 3 | ### aws partitions 4 | module "aws" { 5 | source = "Young-ook/spinnaker/aws//modules/aws-partitions" 6 | } 7 | 8 | locals { 9 | default_helm = { 10 | name = "spin" 11 | repository = join("/", [path.module, "charts"]) 12 | chart_name = "spinnaker" 13 | chart_version = null 14 | namespace = "spinnaker" 15 | timeout = "500" 16 | cleanup_on_fail = true 17 | dependency_update = true 18 | } 19 | default_eks_cluster = { 20 | version = "1.24" 21 | ssm_enabled = false 22 | cluster_logs = [] 23 | } 24 | default_eks_node_group = { 25 | name = "cd" 26 | instance_type = "m5.xlarge" 27 | min_size = "1" 28 | max_size = "3" 29 | desired_size = "1" 30 | ### A list of ARNs of spinnaker-managed IAM role 31 | ### This is an example: (arn:aws:iam::12345678987:role/spinnakerManaged) 32 | role_arns = [] 33 | } 34 | default_s3_bucket = { 35 | force_destroy = false 36 | versioning = "Enabled" 37 | } 38 | default_aurora_cluster = { 39 | engine = "aurora-mysql" 40 | family = "aurora-mysql8.0" 41 | version = "8.0.mysql_aurora.3.01.0" 42 | port = "3306" 43 | apply_immediately = "true" 44 | cluster_parameters = { 45 | character_set_server = "utf8" 46 | character_set_client = "utf8" 47 | } 48 | } 49 | default_aurora_instance = { 50 | instance_type = "db.r6g.large" 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/modules/restaurantsdbupdate.rb: -------------------------------------------------------------------------------- 1 | require 'pg' 2 | require 'pg_ext' 3 | require 'aws-sdk-dynamodb' 4 | 5 | def restaurantsdbupdate(restaurant) 6 | if ($yelbddbrestaurants != nil && $yelbddbrestaurants != "") then 7 | dynamodb = Aws::DynamoDB::Client.new(region: $awsregion) 8 | params = { 9 | table_name: $yelbddbrestaurants, 10 | key: { 11 | name: restaurant 12 | } 13 | } 14 | restaurantrecord = dynamodb.get_item(params) 15 | restaurantcount = restaurantrecord.item['restaurantcount'] 16 | restaurantcount += 1 17 | params = { 18 | table_name: $yelbddbrestaurants, 19 | key: { 20 | name: restaurant 21 | }, 22 | update_expression: 'set restaurantcount = :c', 23 | expression_attribute_values: {':c' => restaurantcount}, 24 | return_values: 'UPDATED_NEW' 25 | } 26 | restaurantrecord = dynamodb.update_item(params) 27 | else 28 | con = PG.connect :host => $yelbdbhost, 29 | :port => $yelbdbport, 30 | :dbname => 'yelbdatabase', 31 | :user => 'postgres', 32 | :password => 'postgres_password' 33 | con.prepare('statement1', 'UPDATE restaurants SET count = count +1 WHERE name = $1') 34 | res = con.exec_prepared('statement1', [ restaurant ]) 35 | con.close 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/manifests/2.app-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: appmesh.k8s.aws/v1beta2 2 | kind: VirtualNode 3 | metadata: 4 | name: yelb-appserver-v2 5 | spec: 6 | awsName: yelb-appserver-virtual-node-v2 7 | podSelector: 8 | matchLabels: 9 | app: yelb-appserver-v2 10 | listeners: 11 | - portMapping: 12 | port: 4567 13 | protocol: http 14 | serviceDiscovery: 15 | dns: 16 | hostname: >- 17 | yelb-appserver-v2.${#currentStage()['context']['namespaceOverride']}.svc.cluster.local 18 | backends: 19 | - virtualService: 20 | virtualServiceRef: 21 | name: yelb-db 22 | - virtualService: 23 | virtualServiceRef: 24 | name: redis-server 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: yelb-appserver-v2 30 | labels: 31 | app: yelb-appserver-v2 32 | tier: middletier 33 | spec: 34 | type: ClusterIP 35 | ports: 36 | - port: 4567 37 | selector: 38 | app: yelb-appserver-v2 39 | tier: middletier 40 | --- 41 | apiVersion: apps/v1 42 | kind: Deployment 43 | metadata: 44 | name: yelb-appserver-v2 45 | spec: 46 | replicas: 1 47 | selector: 48 | matchLabels: 49 | app: yelb-appserver-v2 50 | tier: middletier 51 | template: 52 | metadata: 53 | labels: 54 | app: yelb-appserver-v2 55 | tier: middletier 56 | spec: 57 | containers: 58 | - name: yelb-appserver-v2 59 | image: 60 | ports: 61 | - containerPort: 4567 62 | -------------------------------------------------------------------------------- /modules/frigga/README.md: -------------------------------------------------------------------------------- 1 | # Frigga naming convention 2 | [Netflix Frigga](https://github.com/Netflix/frigga) is a standalone Java library containing the logic Netflix's Asgard project uses for generating and parsing AWS object names. This is a terraform module to generate a resource name following Frigga naming rule to avoid resource name duplication, and it makes your resources are aware of Spinnaker. 3 | 4 | From Norse mythology. the name Frigga refers the wife of Odin, queen of Asgard. 5 | 6 | ## Assumptions 7 | * You have an AWS account you want to manage by Spinnaker. This module will create a name similar to the following, `---`. 8 | 9 | ## Quickstart 10 | ### Setup 11 | ```hcl 12 | module "frigga" { 13 | source = "Young-ook/spinnaker/aws//modules/frigga" 14 | version = ">= 2.0" 15 | name = "app" 16 | stack = "prod" 17 | detail = "additional-desc" 18 | } 19 | ``` 20 | Run terraform: 21 | ``` 22 | terraform init 23 | terraform apply 24 | ``` 25 | 26 | And you will see the outputs. 27 | ``` 28 | Apply complete! Resources: 1 added, 0 changed, 0 destroyed. 29 | 30 | Outputs: 31 | 32 | name = app-prod-additional-desc-bjmqc 33 | nametag = { 34 | "Name" = "app-prod-additional-desc-bjmqc" 35 | } 36 | ``` 37 | 38 | If you don't want to append a random identifier to the end of generated frigga name, please set petname variable to `false` when applyng the module. 39 | ```hcl 40 | module "frigga" { 41 | source = "Young-ook/spinnaker/aws//modules/frigga" 42 | version = ">= 2.0" 43 | name = "app" 44 | stack = "prod" 45 | detail = "additional-desc" 46 | petname = false 47 | } 48 | ``` 49 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-ecs/README.md: -------------------------------------------------------------------------------- 1 | # Amazon ECS (Elastic Container Service) 2 | [Amazon ECS](https://aws.amazon.com/ecs/) is a fully managed container orchestration service. Customers such as Duolingo, Samsung, GE, and Cookpad use ECS to run their most sensitive and mission critical applications because of its security, reliability, and scalability. This module will create an ECS cluster and capacity providers. 3 | 4 | ## Quickstart 5 | ### Setup 6 | ```hcl 7 | module "ecs" { 8 | source = "Young-ook/spinnaker/aws//modules/spinnaker-managed-ecs" 9 | version = ">= 2.0" 10 | name = "example" 11 | } 12 | ``` 13 | Run terraform: 14 | ``` 15 | terraform init 16 | terraform apply 17 | ``` 18 | 19 | ## Enabling AWS ECS account in spinnaker 20 | This is an example code to enable AWS ECS account in the spinnaker. In this example `ecs-test` is the name of the Amazon ECS account in spinnaker, and `aws-test` is the name of previously added, valid AWS account. Please note that the ECS account uses the same credential from correspoding AWS account. You don't need to configure an additional assumeable role for ECS account. 21 | ``` 22 | kubectl -n spinnaker exec -it cd-spinnaker-halyard-0 -- bash 23 | bash $ hal config provider ecs account add ecs-test --aws-account aws-test 24 | bash $ hal config provider ecs enable 25 | bash $ hal deploy apply 26 | ``` 27 | For more information, please refer to [this](https://spinnaker.io/setup/install/providers/aws/aws-ecs/). 28 | 29 | # Additional Resources 30 | - [Amazon ECS Workshop](https://ecsworkshop.com/) 31 | - [Amazon ECS Scalability Best Practices](https://nathanpeck.com/amazon-ecs-scaling-best-practices/) 32 | -------------------------------------------------------------------------------- /charts/spinnaker/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | 6 | {{- define "spinnaker.name" -}} 7 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 8 | {{- end }} 9 | 10 | {{/* 11 | Create a default fully qualified app name. 12 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 13 | If release name contains chart name it will be used as a full name. 14 | */}} 15 | {{- define "spinnaker.fullname" -}} 16 | {{- if .Values.fullnameOverride }} 17 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 18 | {{- else }} 19 | {{- $name := default .Chart.Name .Values.nameOverride }} 20 | {{- if contains $name .Release.Name }} 21 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 22 | {{- else }} 23 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 24 | {{- end }} 25 | {{- end }} 26 | {{- end }} 27 | 28 | {{/* 29 | Common labels for metadata. 30 | */}} 31 | {{- define "spinnaker.standard-labels-base" -}} 32 | app: {{ include "spinnaker.fullname" . | quote }} 33 | heritage: {{ .Release.Service | quote }} 34 | release: {{ .Release.Name | quote }} 35 | {{- end -}} 36 | {{- define "spinnaker.standard-labels" -}} 37 | {{ include "spinnaker.standard-labels-base" . }} 38 | chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" 39 | {{- end -}} 40 | 41 | {{/* 42 | A set of common selector labels for resources. 43 | */}} 44 | {{- define "spinnaker.standard-selector-labels" -}} 45 | app: {{ include "spinnaker.fullname" . | quote }} 46 | release: {{ .Release.Name | quote }} 47 | {{- end -}} 48 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/platform/outputs.tf: -------------------------------------------------------------------------------- 1 | module "aws-partitions" { 2 | source = "Young-ook/spinnaker/aws//modules/aws-partitions" 3 | } 4 | 5 | ## spinnaker configuration 6 | resource "local_file" "halconfig" { 7 | content = templatefile("${path.module}/templates/halconfig.tpl", { 8 | aws_account_id = module.aws-partitions.caller.account_id 9 | aws_region = module.aws-partitions.region.name 10 | spinnaker_version = var.spinnaker_version 11 | spinnaker_managed_aws_role = module.spinnaker-managed.role_arn 12 | spinnaker_update_kubeconfig = module.spinnaker.kubeconfig 13 | eks_update_kubeconfig = var.eks["script"] 14 | eks_kubeconfig_context = var.eks["cluster"].name 15 | halyard_kubectl_exec = "kubectl -n spinnaker exec -it cd-spinnaker-halyard-0 --" 16 | }) 17 | filename = "${path.cwd}/halconfig.sh" 18 | file_permission = "0700" 19 | } 20 | 21 | resource "local_file" "tunnel" { 22 | content = join("\n", [ 23 | "#!/bin/bash -ex", 24 | "export KUBECONFIG=spinnaker_kubeconfig", 25 | "kubectl -n spinnaker port-forward svc/spin-deck 8080:9000", 26 | ] 27 | ) 28 | filename = "${path.cwd}/tunnel.sh" 29 | file_permission = "0700" 30 | } 31 | 32 | resource "local_file" "preuninstall" { 33 | content = templatefile("${path.module}/templates/preuninstall.tpl", { 34 | aws_region = module.aws-partitions.region.name 35 | eks_update_kubeconfig = var.eks["script"] 36 | eks_kubeconfig_context = var.eks["cluster"].name 37 | }) 38 | filename = "${path.cwd}/preuninstall.sh" 39 | file_permission = "0700" 40 | } 41 | -------------------------------------------------------------------------------- /modules/codebuild/tests/defaults/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | test = { 4 | source = "terraform.io/builtin/test" 5 | } 6 | } 7 | } 8 | 9 | module "vpc" { 10 | source = "Young-ook/vpc/aws" 11 | version = "1.0.3" 12 | } 13 | 14 | # security/firewall 15 | resource "aws_security_group" "ci" { 16 | vpc_id = module.vpc.vpc.id 17 | 18 | ingress { 19 | from_port = 443 20 | to_port = 443 21 | protocol = "tcp" 22 | cidr_blocks = ["0.0.0.0/0"] 23 | } 24 | } 25 | 26 | # cloudwatch logs 27 | module "logs" { 28 | source = "Young-ook/eventbridge/aws//modules/logs" 29 | version = "0.0.7" 30 | name = "codebuild-logs" 31 | log_group = { 32 | namespace = "/aws/codebuild" 33 | retension_days = 3 34 | } 35 | } 36 | 37 | module "main" { 38 | source = "../.." 39 | name = "service" 40 | stack = "stack" 41 | detail = "ci" 42 | tags = { test = "spinnaker-managed-eks-default" } 43 | project = { 44 | source = { 45 | type = "GITHUB" 46 | location = "https://github.com/aws-samples/aws-codebuild-samples.git" 47 | buildspec = "buildspec.yml" 48 | version = "master" 49 | } 50 | environment = { 51 | image = "aws/codebuild/amazonlinux2-x86_64-standard:3.0" 52 | privileged_mode = true 53 | } 54 | } 55 | log = { 56 | cloudwatch_logs = { 57 | group_name = module.logs.log_group.name 58 | } 59 | } 60 | vpc = { 61 | vpc = module.vpc.vpc.id 62 | subnets = values(module.vpc.subnets["public"]) 63 | security_groups = [aws_security_group.ci.id] 64 | } 65 | } 66 | 67 | output "build" { 68 | description = "AWS CLI command to start build project" 69 | value = module.main.build 70 | } 71 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/manifests/4.high-availability.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: yelb-ui 5 | spec: 6 | replicas: 6 7 | selector: 8 | matchLabels: 9 | app: yelb-ui 10 | tier: frontend 11 | template: 12 | metadata: 13 | labels: 14 | app: yelb-ui 15 | tier: frontend 16 | spec: 17 | containers: 18 | - name: yelb-ui 19 | image: mreferre/yelb-ui:0.7 20 | ports: 21 | - containerPort: 80 22 | affinity: 23 | podAntiAffinity: 24 | requiredDuringSchedulingIgnoredDuringExecution: 25 | - labelSelector: 26 | matchExpressions: 27 | - key: "name" 28 | operator: In 29 | values: 30 | - yelb-ui 31 | topologyKey: "kubernetes.io/hostname" 32 | --- 33 | apiVersion: apps/v1 34 | kind: Deployment 35 | metadata: 36 | name: yelb-appserver 37 | spec: 38 | replicas: 6 39 | selector: 40 | matchLabels: 41 | app: yelb-appserver 42 | tier: middletier 43 | template: 44 | metadata: 45 | labels: 46 | app: yelb-appserver 47 | tier: middletier 48 | spec: 49 | containers: 50 | - name: yelb-appserver 51 | image: mreferre/yelb-appserver:0.5 52 | ports: 53 | - containerPort: 4567 54 | affinity: 55 | podAntiAffinity: 56 | requiredDuringSchedulingIgnoredDuringExecution: 57 | - labelSelector: 58 | matchExpressions: 59 | - key: "name" 60 | operator: In 61 | values: 62 | - yelb-appserver 63 | topologyKey: "kubernetes.io/hostname" 64 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-eks/variables.tf: -------------------------------------------------------------------------------- 1 | ### network 2 | variable "subnets" { 3 | description = "The list of subnet IDs to deploy your EKS cluster" 4 | type = list(string) 5 | default = null 6 | } 7 | 8 | ### kubernetes cluster 9 | variable "kubernetes_version" { 10 | description = "The target version of kubernetes" 11 | type = string 12 | default = "1.20" 13 | } 14 | 15 | variable "node_groups" { 16 | description = "Node groups definition" 17 | default = [] 18 | } 19 | 20 | variable "managed_node_groups" { 21 | description = "Amazon managed node groups definition" 22 | default = [] 23 | } 24 | 25 | variable "fargate_profiles" { 26 | description = "Amazon Fargate for EKS profiles" 27 | default = [] 28 | } 29 | 30 | ### feature 31 | variable "enabled_cluster_log_types" { 32 | description = "A list of the desired control plane logging to enable" 33 | type = list(string) 34 | default = [] 35 | } 36 | 37 | variable "enable_ssm" { 38 | description = "Allow ssh access using session manager" 39 | type = bool 40 | default = false 41 | } 42 | 43 | ### security 44 | variable "policy_arns" { 45 | description = "A list of policy ARNs to attach the node groups role" 46 | type = list(string) 47 | default = [] 48 | } 49 | 50 | ### description 51 | variable "name" { 52 | description = "The logical name of the module instance" 53 | type = string 54 | default = "eks" 55 | } 56 | 57 | variable "stack" { 58 | description = "Text used to identify stack of infrastructure components" 59 | type = string 60 | default = "" 61 | } 62 | 63 | variable "detail" { 64 | description = "The extra description of module instance" 65 | type = string 66 | default = "" 67 | } 68 | 69 | ### tags 70 | variable "tags" { 71 | description = "The key-value maps for tagging" 72 | type = map(string) 73 | default = {} 74 | } 75 | -------------------------------------------------------------------------------- /charts/spinnaker/templates/role.yaml: -------------------------------------------------------------------------------- 1 | ### Currently, the halyard does not support to deploy spinnaker microservices 2 | ### with custom kubernetes service account. 3 | {{- if .Values.serviceAccount.create }} 4 | --- 5 | apiVersion: v1 6 | kind: ServiceAccount 7 | metadata: 8 | {{- if .Values.serviceAccount.name }} 9 | name: {{ .Values.serviceAccount.name }} 10 | {{- else }} 11 | name: {{ template "spinnaker.fullname" . }} 12 | {{- end }} 13 | namespace: {{ .Release.Namespace }} 14 | labels: 15 | {{ include "spinnaker.standard-labels" . | indent 4 }} 16 | {{- if .Values.serviceAccount.annotations }} 17 | annotations: 18 | {{ toYaml .Values.serviceAccount.annotations | indent 4 }} 19 | {{- end }} 20 | {{- end }} 21 | 22 | ### In the case of a local cluster Spinnaker needs 23 | ### to be able to deploy to all namespaces in the cluster. 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | {{- if .Values.serviceAccount.name }} 29 | name: {{ .Values.serviceAccount.name }} 30 | {{- else }} 31 | name: {{ template "spinnaker.fullname" . }} 32 | {{- end }} 33 | labels: 34 | {{ include "spinnaker.standard-labels" . | indent 4 }} 35 | roleRef: 36 | apiGroup: rbac.authorization.k8s.io 37 | kind: ClusterRole 38 | name: cluster-admin 39 | subjects: 40 | - namespace: {{ .Release.Namespace }} 41 | kind: ServiceAccount 42 | name: default ### Clouddriver does not currently allow config of its service account. 43 | 44 | ### 45 | ### Halyard 46 | ### 47 | --- 48 | apiVersion: rbac.authorization.k8s.io/v1 49 | kind: RoleBinding 50 | metadata: 51 | name: halyard 52 | labels: 53 | {{ include "spinnaker.standard-labels" . | indent 4 }} 54 | roleRef: 55 | apiGroup: rbac.authorization.k8s.io 56 | kind: ClusterRole 57 | name: edit 58 | subjects: 59 | - kind: ServiceAccount 60 | namespace: {{ .Release.Namespace }} 61 | name: default ### Halyard also needs the same IRSA for applying configuration changes. 62 | -------------------------------------------------------------------------------- /examples/blueprint/variables.tf: -------------------------------------------------------------------------------- 1 | # Variables for providing to module fixture codes 2 | 3 | ### network 4 | variable "aws_region" { 5 | description = "The aws region to deploy" 6 | type = string 7 | default = "ap-northeast-2" 8 | } 9 | 10 | variable "azs" { 11 | description = "A list of availability zones for the vpc to deploy resources" 12 | type = list(string) 13 | default = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"] 14 | } 15 | 16 | variable "cidr" { 17 | description = "The list of CIDR blocks to allow ingress traffic for db access" 18 | type = string 19 | default = "10.0.0.0/16" 20 | } 21 | 22 | ### kubernetes cluster 23 | variable "kubernetes_version" { 24 | description = "The target version of kubernetes" 25 | type = string 26 | default = "1.28" 27 | } 28 | 29 | variable "kubernetes_node_groups" { 30 | description = "EKS managed node groups definition" 31 | default = null 32 | } 33 | 34 | ### rdb cluster 35 | variable "aurora_cluster" { 36 | description = "RDS Aurora for mysql cluster definition" 37 | default = {} 38 | } 39 | 40 | variable "aurora_instances" { 41 | description = "RDS Aurora for mysql instances definition" 42 | default = [] 43 | } 44 | 45 | ### s3 bucket 46 | variable "s3_bucket" { 47 | description = "S3 bucket configuration" 48 | default = {} 49 | } 50 | 51 | ### description 52 | variable "name" { 53 | description = "The logical name of the module instance" 54 | type = string 55 | default = "spinnaker" 56 | } 57 | 58 | variable "stack" { 59 | description = "Text used to identify stack of infrastructure components" 60 | type = string 61 | default = "" 62 | } 63 | 64 | variable "detail" { 65 | description = "The extra description of module instance" 66 | type = string 67 | default = "" 68 | } 69 | 70 | ### tags 71 | variable "tags" { 72 | description = "The key-value maps for tagging" 73 | type = map(string) 74 | default = {} 75 | } 76 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-gcp/main.tf: -------------------------------------------------------------------------------- 1 | 2 | ### 3 | # Be careful! You can accidentally lock yourself out of your project using this resource. 4 | # 5 | # PROCEED with CAUTION. 6 | # 7 | # It's not recommended to use google_project_iam_policy with your provider project 8 | # to avoid locking yourself out, and it should generally only be used with projects 9 | # fully managed by Terraform. 10 | ### 11 | 12 | # spinnaker managed 13 | resource "google_service_account" "spinnaker-managed" { 14 | account_id = local.name 15 | display_name = "spinnaker service account" 16 | } 17 | 18 | resource "google_service_account_key" "spinnaker-managed" { 19 | service_account_id = google_service_account.spinnaker-managed.name 20 | } 21 | 22 | data "google_service_account_key" "spinnaker-managed" { 23 | name = google_service_account_key.spinnaker-managed.name 24 | public_key_type = "TYPE_X509_PEM_FILE" 25 | } 26 | 27 | resource "local_file" "credential" { 28 | content = base64decode(google_service_account_key.spinnaker-managed.private_key) 29 | filename = format("%s/%s", path.cwd, local.credential_json) 30 | } 31 | 32 | resource "null_resource" "chmod" { 33 | depends_on = [local_file.credential] 34 | provisioner "local-exec" { 35 | command = format("chmod 600 %s", local.credential_json) 36 | working_dir = path.cwd 37 | interpreter = ["bash", "-c"] 38 | } 39 | } 40 | 41 | # list of roles which spinnaker service account must has 42 | variable "spinnaker-managed-roles" { 43 | default = [ 44 | "roles/compute.instanceAdmin.v1", 45 | "roles/compute.networkAdmin", 46 | "roles/compute.securityAdmin", 47 | "roles/compute.storageAdmin", 48 | "roles/iam.serviceAccountUser", 49 | ] 50 | } 51 | 52 | # roles and account mapping 53 | resource "google_project_iam_member" "spinnaker-managed" { 54 | count = length(var.spinnaker-managed-roles) 55 | project = var.project 56 | role = var.spinnaker-managed-roles[count.index] 57 | member = format("serviceAccount:%s", google_service_account.spinnaker-managed.email) 58 | } 59 | -------------------------------------------------------------------------------- /charts/spinnaker/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled }} 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | {{- if .Values.ingress.deck.annotations }} 6 | annotations: 7 | {{ toYaml .Values.ingress.deck.annotations | indent 4 }} 8 | {{- end }} 9 | name: spin-deck 10 | labels: 11 | {{ include "spinnaker.standard-labels" . | indent 4 }} 12 | spec: 13 | rules: 14 | - host: {{ .Values.ingress.deck.host | quote }} 15 | http: 16 | paths: 17 | {{- if index $.Values.ingress.deck "annotations" }} 18 | {{- if eq (index $.Values.ingress.deck.annotations "kubernetes.io/ingress.class" | default "") "alb" }} 19 | - path: /* 20 | {{- else }}{{/* Has annotations but ingress class is not "gce" nor "alb" */}} 21 | - path: / 22 | {{- end }} 23 | {{- else}}{{/* Has no annotations */}} 24 | - path: / 25 | {{- end }} 26 | backend: 27 | serviceName: spin-deck 28 | servicePort: 9000 29 | {{- if .Values.ingress.deck.tls }} 30 | tls: 31 | {{ toYaml .Values.ingress.deck.tls | indent 4 }} 32 | {{- end -}} 33 | --- 34 | apiVersion: networking.k8s.io/v1 35 | kind: Ingress 36 | metadata: 37 | {{- if .Values.ingress.gate.annotations }} 38 | annotations: 39 | {{ toYaml .Values.ingress.gate.annotations | indent 4 }} 40 | {{- end }} 41 | name: spin-gate 42 | labels: 43 | {{ include "spinnaker.standard-labels" . | indent 4 }} 44 | spec: 45 | rules: 46 | - host: {{ .Values.ingress.gate.host | quote }} 47 | http: 48 | paths: 49 | {{- if index $.Values.ingress.gate "annotations" }} 50 | {{- if eq (index $.Values.ingress.gate.annotations "kubernetes.io/ingress.class" | default "") "alb" }} 51 | - path: /* 52 | {{- else }}{{/* Has annotations but ingress class is not "gce" nor "alb" */}} 53 | - path: / 54 | {{- end }} 55 | {{- else}}{{/* Has no annotations */}} 56 | - path: / 57 | {{- end }} 58 | backend: 59 | serviceName: spin-gate 60 | servicePort: 8084 61 | {{- if .Values.ingress.gate.tls }} 62 | tls: 63 | {{ toYaml .Values.ingress.gate.tls | indent 4 }} 64 | {{- end }} 65 | {{- end }} 66 | -------------------------------------------------------------------------------- /modules/spinnaker-aware-aws-vpc/variables.tf: -------------------------------------------------------------------------------- 1 | ### input variables 2 | 3 | ### network 4 | variable "cidr" { 5 | description = "The vpc CIDR (e.g. 10.0.0.0/16)" 6 | type = string 7 | default = "10.0.0.0/16" 8 | } 9 | 10 | variable "azs" { 11 | description = "A list of availability zones for the vpc" 12 | type = list(string) 13 | default = ["us-east-1a", "us-east-1b", "us-east-1c"] 14 | } 15 | 16 | variable "vpc_endpoint_config" { 17 | description = "A list of vpc endpoint configurations" 18 | type = list(any) 19 | default = null 20 | } 21 | 22 | variable "amazon_side_asn" { 23 | description = "The Autonomous System Number (ASN) for the Amazon side of the gateway." 24 | type = string 25 | default = "64512" 26 | } 27 | 28 | ### feature 29 | variable "enable_igw" { 30 | description = "Should be true if you want to provision Internet Gateway for internet facing communication" 31 | type = bool 32 | default = true 33 | } 34 | 35 | variable "enable_ngw" { 36 | description = "Should be true if you want to provision NAT Gateway(s) across all of private networks" 37 | type = bool 38 | default = false 39 | } 40 | 41 | variable "single_ngw" { 42 | description = "Should be true if you want to provision a single shared NAT Gateway across all of private networks" 43 | type = bool 44 | default = false 45 | } 46 | 47 | variable "enable_vgw" { 48 | description = "Should be true if you want to create a new Virtual Private Gateway resource and attach it to the VPC" 49 | type = bool 50 | default = false 51 | } 52 | 53 | ### description 54 | variable "name" { 55 | description = "The logical name of the module instance" 56 | type = string 57 | default = "vpc" 58 | } 59 | 60 | variable "stack" { 61 | description = "Text used to identify stack of infrastructure components" 62 | type = string 63 | default = "" 64 | } 65 | 66 | variable "detail" { 67 | description = "The extra description of module instance" 68 | type = string 69 | default = "" 70 | } 71 | 72 | ### tags 73 | variable "tags" { 74 | description = "The key-value maps for tagging" 75 | type = map(string) 76 | default = {} 77 | } 78 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/charts/cluster-autoscaler/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "cluster-autoscaler.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 7 | {{- end }} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "cluster-autoscaler.fullname" -}} 15 | {{- if .Values.fullnameOverride }} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 17 | {{- else }} 18 | {{- $name := default .Chart.Name .Values.nameOverride }} 19 | {{- if contains $name .Release.Name }} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 21 | {{- else }} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "cluster-autoscaler.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 32 | {{- end }} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "cluster-autoscaler.labels" -}} 38 | helm.sh/chart: {{ include "cluster-autoscaler.chart" . }} 39 | {{ include "cluster-autoscaler.selectorLabels" . }} 40 | {{- if .Chart.AppVersion }} 41 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 42 | {{- end }} 43 | app.kubernetes.io/managed-by: {{ .Release.Service }} 44 | {{- end }} 45 | 46 | {{/* 47 | Selector labels 48 | */}} 49 | {{- define "cluster-autoscaler.selectorLabels" -}} 50 | app.kubernetes.io/name: {{ include "cluster-autoscaler.name" . }} 51 | app.kubernetes.io/instance: {{ .Release.Name }} 52 | {{- end }} 53 | 54 | {{/* 55 | Create the name of the service account to use 56 | */}} 57 | {{- define "cluster-autoscaler.serviceAccountName" -}} 58 | {{- if .Values.serviceAccount.create }} 59 | {{- default (include "cluster-autoscaler.fullname" .) .Values.serviceAccount.name }} 60 | {{- else }} 61 | {{- default "default" .Values.serviceAccount.name }} 62 | {{- end }} 63 | {{- end }} 64 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-aws/main.tf: -------------------------------------------------------------------------------- 1 | ### spinnaker managed AWS 2 | 3 | ### security/policy 4 | resource "aws_iam_role" "spinnaker-managed" { 5 | name = local.name 6 | path = "/" 7 | tags = merge(local.default-tags, var.tags) 8 | assume_role_policy = jsonencode({ 9 | Version = "2012-10-17" 10 | Statement = [{ 11 | Action = "sts:AssumeRole" 12 | Effect = "Allow" 13 | Principal = { 14 | Service = [ 15 | format("ecs.%s", local.aws.dns), 16 | format("ecs-tasks.%s", local.aws.dns), 17 | format("application-autoscaling.%s", local.aws.dns) 18 | ], 19 | AWS = flatten([ 20 | local.aws.id, 21 | var.trusted_role_arn, 22 | ]) 23 | } 24 | }] 25 | }) 26 | } 27 | 28 | resource "aws_iam_policy" "poweruser" { 29 | name = format("%s-poweruser", local.name) 30 | description = "Poweruser Access permission for Spinnaker-Managed-Role" 31 | path = "/" 32 | policy = jsonencode({ 33 | Version = "2012-10-17" 34 | Statement = [ 35 | { 36 | "Effect" : "Allow", 37 | "NotAction" : [ 38 | "iam:*", 39 | "organizations:*", 40 | "account:*" 41 | ], 42 | "Resource" : "*" 43 | }, 44 | { 45 | "Effect" : "Allow", 46 | "Action" : [ 47 | "iam:CreateServiceLinkedRole", 48 | "iam:DeleteServiceLinkedRole", 49 | "iam:ListRoles", 50 | "iam:PassRole", 51 | "organizations:DescribeOrganization", 52 | "account:ListRegions" 53 | ], 54 | "Resource" : "*" 55 | } 56 | ] 57 | }) 58 | } 59 | 60 | resource "aws_iam_role_policy_attachment" "poweruser" { 61 | policy_arn = aws_iam_policy.poweruser.arn 62 | role = aws_iam_role.spinnaker-managed.id 63 | } 64 | 65 | # BaseIAMRole 66 | resource "aws_iam_role" "base-iam" { 67 | count = var.base_role_enabled ? 1 : 0 68 | name = "BaseIAMRole" 69 | path = "/" 70 | tags = merge(local.default-tags, var.tags) 71 | assume_role_policy = jsonencode({ 72 | Version = "2012-10-17" 73 | Statement = [{ 74 | Action = "sts:AssumeRole" 75 | Effect = "Allow" 76 | Principal = { 77 | Service = [ 78 | format("ec2.%s", module.aws.partition.dns_suffix), 79 | format("ecs-tasks.%s", module.aws.partition.dns_suffix) 80 | ] 81 | } 82 | }] 83 | }) 84 | } 85 | 86 | resource "aws_iam_instance_profile" "base-iam" { 87 | count = var.base_role_enabled ? 1 : 0 88 | name = "BaseIAMRole" 89 | role = aws_iam_role.base-iam[0].name 90 | } 91 | -------------------------------------------------------------------------------- /modules/spinnaker-aware-aws-vpc/README.md: -------------------------------------------------------------------------------- 1 | # Amazon VPC 2 | [Amazon Virtual Private Cloud(Amazon VPC)](https://aws.amazon.com/vpc/) is a service that lets you launch AWS resources in a logically isolated virtual network that you define. You have complete control over your virtual networking environment, including selection of your own IP address range, creation of subnets, and configuration of route tables and network gateways. You can use both IPv4 and IPv6 for most resources in your virtual private cloud, helping to ensure secure and easy access to resources and applications. 3 | 4 | ## Quickstart 5 | ### Setup 6 | ```hcl 7 | module "vpc" { 8 | source = "Young-ook/spinnaker/aws//modules/spinnaker-aware-aws-vpc" 9 | version = ">= 2.0" 10 | name = "example" 11 | } 12 | ``` 13 | Run terraform: 14 | ``` 15 | terraform init 16 | terraform apply 17 | ``` 18 | 19 | ### Network Architecture 20 | #### Subnets 21 | A VPC that is made from this module consists of different thress subnets that instances can be placed into. There is the description of three subnet types: 22 | 23 | *Isolated* - isolated subnets do not route from or to the Internet, and as such do not require [NAT gateways](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html). They can only connect to or be connected to from other instances in the same VPC. 24 | ![aws-vpc-isolated-subnets](../../images/aws-vpc-isolated-subnets.png) 25 | 26 | *Private* - instances in private subnets are not directly routable from the Internet, and connect out to the Internet via a NAT gateway. Be aware that you will be charged for NAT gateways. 27 | 28 | *Public* - public subnets connect directly to the Internet using an Internet Gateway. If you want your instances to have a public IP address and be directly reachable from the Internet, you must place them in a public subnet. 29 | 30 | #### NAT(Network Address Translation) Gateway 31 | The following diagram shows how to deploy a vpc with a single shared NAT gateway across the availability zones. This is a cost-effective method, but it has the weakness of causing communication problems when something goes wrong with the availability zone where the NAT gateway is located. This is good choice for development environments. 32 | ![aws-vpc-single-shared-ngw](../../images/aws-vpc-single-shared-ngw.png) 33 | 34 | For high availability of communication between resources (instances) inside and outside your VPC, you should deploy a NAT gateway at least per Availability Zone. It is recommended to apply this configuration for production environments. By default, a NAT gateway is created in every public subnet for maximum availability. 35 | ![aws-vpc-ngw-per-az](../../images/aws-vpc-ngw-per-az.png) 36 | -------------------------------------------------------------------------------- /modules/spinnaker-aware-aws-vpc/defaults.tf: -------------------------------------------------------------------------------- 1 | ### default values 2 | 3 | locals { 4 | default_vpc_endpoint_config = [ 5 | { 6 | service = "s3" 7 | type = "Interface" 8 | private_dns_enabled = false 9 | }, 10 | { 11 | service = "ecr.api" 12 | type = "Interface" 13 | private_dns_enabled = true 14 | }, 15 | { 16 | service = "ecr.dkr" 17 | type = "Interface" 18 | private_dns_enabled = true 19 | }, 20 | { 21 | service = "ecs" 22 | type = "Interface" 23 | private_dns_enabled = true 24 | }, 25 | { 26 | service = "ec2" 27 | type = "Interface" 28 | private_dns_enabled = true 29 | }, 30 | { 31 | service = "ec2messages" 32 | type = "Interface" 33 | private_dns_enabled = true 34 | }, 35 | { 36 | service = "autoscaling" 37 | type = "Interface" 38 | private_dns_enabled = true 39 | }, 40 | { 41 | service = "application-autoscaling" 42 | type = "Interface" 43 | private_dns_enabled = true 44 | }, 45 | { 46 | service = "kinesis-streams" 47 | type = "Interface" 48 | private_dns_enabled = true 49 | }, 50 | { 51 | service = "kinesis-firehose" 52 | type = "Interface" 53 | private_dns_enabled = true 54 | }, 55 | { 56 | service = "logs" 57 | type = "Interface" 58 | private_dns_enabled = true 59 | }, 60 | { 61 | service = "monitoring" 62 | type = "Interface" 63 | private_dns_enabled = true 64 | }, 65 | { 66 | service = "sts" 67 | type = "Interface" 68 | private_dns_enabled = true 69 | }, 70 | { 71 | service = "sagemaker.api" 72 | type = "Interface" 73 | private_dns_enabled = true 74 | }, 75 | { 76 | service = "sagemaker.runtime" 77 | type = "Interface" 78 | private_dns_enabled = true 79 | }, 80 | { 81 | service = "notebook" 82 | type = "Interface" 83 | private_dns_enabled = true 84 | }, 85 | { 86 | service = "ssm" 87 | type = "Interface" 88 | private_dns_enabled = true 89 | }, 90 | { 91 | service = "ssmmessages" 92 | type = "Interface" 93 | private_dns_enabled = true 94 | }, 95 | ] 96 | } 97 | -------------------------------------------------------------------------------- /modules/codebuild/README.md: -------------------------------------------------------------------------------- 1 | # AWS CodeBuild 2 | [AWS CodeBuild](https://aws.amazon.com/codebuild/) AWS CodeBuild is a fully managed continuous integration service that compiles source code, runs tests, and produces software packages that are ready to deploy. With CodeBuild, you don’t need to provision, manage, and scale your own build servers. This module will create a CodeBuild project for continuous integration stage in the spinnaker pipeline. 3 | 4 | ## Quickstart 5 | ### Setup 6 | ``` 7 | module "codebuild" { 8 | source = "Young-ook/spinnaker/aws//modules/codebuild" 9 | version = ">= 2.0" 10 | name = "example" 11 | } 12 | ``` 13 | Run terraform: 14 | ``` 15 | terraform init 16 | terraform apply 17 | ``` 18 | 19 | ## Enabling AWS CodeBuild account in spinnaker 20 | After applying this module, you will see CodeBuild project on your AWS environment. And then you can add your CodeBuild project to the spinnaker using Halyard. Setting up AWS CodeBuild as a Continuous Integration (CI) system within Spinnaker allows you to: 21 | - trigger pipelines when an AWS CodeBuild build changes its phase or state 22 | - add an AWS CodeBuild stage to your pipeline 23 | The AWS Codebuild stage requires Spinnaker 1.19 or later. 24 | 25 | This is an example code to enable AWS CodeBuild account in the spinnaker. 26 | ``` 27 | hal config ci codebuild account add aws-ci \ 28 | --account-id '0123456879031' \ 29 | --assume-role role/spinnaker-test-xgsj \ 30 | --region ap-northeast-2 31 | hal config ci codebuild enable 32 | hal deploy apply 33 | ``` 34 | **[Important]** Don't forget only one region is allowed for current CodeBuild(CI) configuration. 35 | 36 | For more information, please refer to [this](https://spinnaker.io/setup/ci/codebuild/). 37 | 38 | ## Build project examples 39 | 40 | ``` 41 | module "codebuild" { 42 | source = "Young-ook/spinnaker/aws//modules/codebuild" 43 | version = ">= 2.0" 44 | name = "example" 45 | project = { 46 | source = { 47 | type = "GITHUB" 48 | location = "https://github.com/aws-samples/aws-codebuild-samples.git" 49 | buildspec = "buildspec.yml" 50 | version = "master" 51 | } 52 | environment = { 53 | image = "aws/codebuild/amazonlinux2-x86_64-standard:3.0" 54 | privileged_mode = true 55 | } 56 | artifact = { 57 | type = "CODEPIPELINE" 58 | } 59 | } 60 | } 61 | ``` 62 | 63 | ``` 64 | module "codebuild" { 65 | source = "Young-ook/spinnaker/aws//modules/codebuild" 66 | version = ">= 2.0" 67 | name = "example" 68 | project = { 69 | source = { 70 | type = "CODEPIPELINE" 71 | } 72 | environment = { 73 | environment_variables = { 74 | HELLO = "WORLD" 75 | } 76 | } 77 | artifact = { 78 | type = "S3" 79 | location = "s3-bucket-name" 80 | encryption_disabled = true 81 | } 82 | } 83 | } 84 | ``` 85 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-aws/README.md: -------------------------------------------------------------------------------- 1 | # Spinnaker Managed AWS 2 | [Spinnaker](https://spinnaker.io/) is an open-source, multi-cloud continuous delivery platform for releasing software changes with high velocity and confidence. This is a terraform module for an IAM role for Spinnaker to control your AWS account. In other words, it helps you convert your AWS account to be controlled by the spinnaker. This module will create an IAM role on your AWS account and the role name will be similar to the following, `---spinnaker-managed`. At the end, you can integrate this role with the Spinnaker. For more information, please follow the instructions below. 3 | 4 | ## Quickstart 5 | ### Setup 6 | ```hcl 7 | module "spinnaker-managed" { 8 | source = "Young-ook/spinnaker/aws//modules/spinnaker-managed-aws" 9 | version = ">= 2.0" 10 | name = "example" 11 | trusted_role_arn = ["arn:aws:iam::1234567890321:role/spinnaker-test-xgsj"] 12 | } 13 | ``` 14 | Run terraform: 15 | ``` 16 | terraform init 17 | terraform apply 18 | ``` 19 | 20 | ## Update the trusted relationship 21 | ### Create a spinnaker managed role 22 | After you've done previous step to lanuch a spinnaker, you will see the generated IAM roles and policies. Then, you can now start to integrate with an AWS account as `spinnaker managed` to make it to be managed by your spinnaker. Back to the terraform configuration file after applying the `spinnaker-managed` module on your target AWS account, and add the ARN you've generated by the `spinnaker-managed` into the `assume_role_arn` variable of spinnaker. For more details, please refer to the **Update the spinnaker role** below. 23 | 24 | ### Update the spinnaker role 25 | After applying `spinnaker-managed-aws` submodule, you will get an ARN of IAM role from output variable. It may look like `arn:aws:iam::012345678912:role/example-spinnaker-managed`. Don't forget you have to add the spinnaker managed role to `assume_role_arn` list of spinnaker terraform module, because spinnaker application needs a permission to access the target AWS accounts via assume role API using AWS SDK. Here is an example to show how to link the spinnaker managed roles to spinnaker role. 26 | ```hcl 27 | module "spinnaker" { 28 | source = "Young-ook/spinnaker/aws" 29 | version = "~> 2.0" 30 | ... 31 | 32 | assume_role_arn = [module.spinnaker-managed.role_arn] 33 | } 34 | ``` 35 | 36 | ## Enabling AWS account in spinnaker 37 | To enable AWS account in the spinnaker, you have to access the halyard pod using `kubectl` command. 38 | ``` 39 | kubectl -n spinnaker exec -it cd-spinnaker-halyard-0 -- bash 40 | bash $ hal config provider aws account add aws-test \ 41 | --account-id '0123456879031' \ 42 | --assume-role role/spinnaker-test-xgsj \ 43 | --regions us-east-1, us-west-2 44 | bash $ hal config provider aws enable 45 | bash $ hal deploy apply 46 | ``` 47 | After you configure the Spinnaker AWS provider you can manage AWS resources depending on what you included in the AWS policy. You would be able to deploy EC2 resources with Spinnaker. 48 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/charts/cluster-autoscaler/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for cluster-autoscaler. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: k8s.gcr.io/autoscaling/cluster-autoscaler 9 | pullPolicy: IfNotPresent 10 | # Overrides the image tag whose default is the chart appVersion. 11 | tag: "" 12 | 13 | imagePullSecrets: [] 14 | nameOverride: "" 15 | fullnameOverride: "" 16 | 17 | extraVolumes: 18 | - name: ssl-certs 19 | hostPath: 20 | path: /etc/ssl/certs/ca-bundle.crt 21 | 22 | extraVolumeMounts: 23 | - name: ssl-certs 24 | mountPath: /etc/ssl/certs/ca-certificates.crt 25 | readOnly: true 26 | 27 | serviceAccount: 28 | # Specifies whether a service account should be created 29 | create: true 30 | # Annotations to add to the service account 31 | annotations: {} 32 | # The name of the service account to use. 33 | # If not set and create is true, a name is generated using the fullname template 34 | name: "" 35 | 36 | lables: 37 | k8s-addon: cluster-autoscaler.addons.k8s.io 38 | k8s-app: cluster-autoscaler 39 | 40 | podAnnotations: 41 | prometheus.io/scrape: 'true' 42 | prometheus.io/port: '8085' 43 | 44 | podSecurityContext: {} 45 | # fsGroup: 2000 46 | 47 | securityContext: {} 48 | # capabilities: 49 | # drop: 50 | # - ALL 51 | # readOnlyRootFilesystem: true 52 | # runAsNonRoot: true 53 | # runAsUser: 1000 54 | 55 | service: 56 | type: ClusterIP 57 | port: 80 58 | 59 | ingress: 60 | enabled: false 61 | annotations: {} 62 | # kubernetes.io/ingress.class: nginx 63 | # kubernetes.io/tls-acme: "true" 64 | hosts: 65 | - host: chart-example.local 66 | paths: [] 67 | tls: [] 68 | # - secretName: chart-example-tls 69 | # hosts: 70 | # - chart-example.local 71 | 72 | resources: 73 | limits: 74 | cpu: 100m 75 | memory: 500Mi 76 | requests: 77 | cpu: 100m 78 | memory: 500Mi 79 | 80 | autoscaling: 81 | enabled: false 82 | minReplicas: 1 83 | maxReplicas: 100 84 | targetCPUUtilizationPercentage: 80 85 | # targetMemoryUtilizationPercentage: 80 86 | 87 | nodeSelector: {} 88 | 89 | tolerations: [] 90 | 91 | affinity: {} 92 | 93 | # cloudProvider -- The cloud provider where the autoscaler runs. 94 | cloudProvider: aws 95 | 96 | autoDiscovery: 97 | # autoDiscovery.clusterName -- Enable autodiscovery for `cloudProvider=aws`, for groups matching `autoDiscovery.tags`. 98 | clusterName: # cluster.local 99 | 100 | # AWS: Set tags as described in https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md#auto-discovery-setup 101 | # autoDiscovery.tags -- ASG tags to match, run through `tpl`. 102 | tags: 103 | - k8s.io/cluster-autoscaler/enabled 104 | - k8s.io/cluster-autoscaler/{{ .Values.autoDiscovery.clusterName }} 105 | # - kubernetes.io/cluster/{{ .Values.autoDiscovery.clusterName }} 106 | 107 | autoscalingGroups: [] 108 | # For AWS, Azure AKS or Magnum. At least one element is required if not using `autoDiscovery`. For example: 109 | # - name: asg1 110 | # maxSize: 2 111 | # minSize: 1 112 | # - name: asg2 113 | # maxSize: 2 114 | # minSize: 1 115 | 116 | # Environment variables for AWS provider 117 | awsRegion: "" 118 | awsAccessKeyID: "" 119 | awsSecretAccessKey: "" 120 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-eks/README.md: -------------------------------------------------------------------------------- 1 | # Amazon EKS (Elastic Kubernetes Service) 2 | [Amazon EKS](https://aws.amazon.com/eks/) is a fully managed Kubernetes service. Customers trust EKS to run their most sensitive and mission critical applications because of its security, reliability, and scalability. This module will create a spinnaker managed EKS including control plane and data plane. And it gives you a utility bash script to configure RBAC on the EKS cluster. And users can configure an IAM Role for Kubernetes Service Account using terraform module. It is an important part to strengthen security by minimizing access permission of Kubernetes Pods. For more information about configuration of service account mapping for IAM role in Kubernetes, please check out the [IRSA(IAM Role for Service Account](https://github.com/Young-ook/terraform-aws-eks/blob/main/modules/iam-role-for-serviceaccount/README.md). 3 | 4 | ## Quickstart 5 | ### Setup 6 | ```hcl 7 | module "eks" { 8 | source = "Young-ook/spinnaker/aws//modules/spinnaker-managed-eks" 9 | version = ">= 2.0" 10 | name = "example" 11 | } 12 | ``` 13 | Run terraform: 14 | ``` 15 | terraform init 16 | terraform apply 17 | ``` 18 | After then you will see the created EKS cluster and node groups. 19 | 20 | ## Generate kubernetes config 21 | This terraform module provides users a shell script that extracts the kubeconfig file of the EKS cluster. For more details, please visit the [terraform eks module]( 22 | https://github.com/Young-ook/terraform-aws-eks/blob/main/README.md#generate-kubernetes-config). 23 | Prepare the kubeconfig file with credentials to access the EKS cluster using the script described above. This is important when activating your Kubenetes account in the next step. 24 | 25 | ## Store kubernetes config 26 | Upload the kubeconfig file received from the script describe in the previous step to the S3 bucket that is created by this terraform module. It may look like below. 27 | ``` 28 | aws s3 cp kubeconfig s3://spinnaker-dev-tc1-xyzbc/ 29 | ``` 30 | 31 | ## Using S3 as a persistent storage 32 | ``` 33 | kubectl -n spinnaker exec -it cd-spinnaker-halyard-0 -- bash 34 | bash $ hal config storage s3 edit --region ap-northeast-2 --bucket spinnaker-dev-tc1-xyzbc 35 | bash $ hal config storage edit --type s3 36 | bash $ hal deploy apply 37 | ``` 38 | 39 | ## Enabling Kubernetes account in spinnaker 40 | This is an example code to enable Kubernetes account in the spinnaker. In this example `eks-test` is the name of the Kubernetes account in spinnaker. Please note that Kubernetes account uses the credential from a Kubernetes config file. Don't forget replace context and kubeconfig-file parameters with yours. 41 | ``` 42 | kubectl -n spinnaker exec -it cd-spinnaker-halyard-0 -- bash 43 | bash $ hal config provider kubernetes account add eks-test \ 44 | --kubeconfig-file 'encryptedFile:s3!r:ap-northeast-2!b:spinnaker-dev-tc1-xyzbc!f:kubeconfig' \ 45 | --context eks-test \ 46 | --environment dev \ 47 | bash $ hal config provider kubernetes enable 48 | bash $ hal deploy apply 49 | ``` 50 | For more information, please refer to [this](https://spinnaker.io/setup/install/providers/kubernetes-v2/). 51 | 52 | ## More information 53 | - [Configuration S3 Storage](https://spinnaker.io/setup/install/storage/s3/) 54 | - [Secrets Management in Halyard](https://spinnaker.io/reference/halyard/secrets/) 55 | - [Halyard Command for Kubernetes Account Management](https://spinnaker.io/reference/halyard/commands/#hal-config-provider-kubernetes-account-add) 56 | -------------------------------------------------------------------------------- /charts/spinnaker/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: {{ .Release.Name }}-halyard 6 | labels: 7 | {{ include "spinnaker.standard-labels" . | indent 4 }} 8 | data: 9 | init.sh: | 10 | #!/bin/bash 11 | 12 | # Override Halyard daemon's listen address 13 | cp /opt/halyard/config/* /tmp/config 14 | printf 'server.address: 0.0.0.0\n' > /tmp/config/halyard-local.yml 15 | 16 | # Use Redis deployed via the dependent Helm chart 17 | rm -rf /tmp/spinnaker/.hal/default/service-settings 18 | mkdir -p /tmp/spinnaker/.hal/default/service-settings 19 | cp /tmp/service-settings/* /tmp/spinnaker/.hal/default/service-settings/ 20 | 21 | rm -rf /tmp/spinnaker/.hal/default/profiles 22 | mkdir -p /tmp/spinnaker/.hal/default/profiles 23 | 24 | {{- if .Values.serviceConfigs }} 25 | for filename in /tmp/service-configs/*; do 26 | basename=$(basename -- "$filename") 27 | fname="${basename#*_}" 28 | servicename="${basename%%_*}" 29 | 30 | mkdir -p "/tmp/spinnaker/.hal/.boms/$servicename" 31 | cp "$filename" "/tmp/spinnaker/.hal/.boms/$servicename/$fname" 32 | done 33 | {{- end }} 34 | --- 35 | apiVersion: v1 36 | kind: ConfigMap 37 | metadata: 38 | name: {{ .Release.Name }}-halyard-service-settings 39 | labels: 40 | {{ include "spinnaker.standard-labels" . | indent 4 }} 41 | 42 | {{/* 43 | Render settings for each service by merging predefined defaults with values passed by 44 | .Values.serviceSettings 45 | */}} 46 | {{- $settings := dict -}} 47 | 48 | {{/* Defaults: gate service */}} 49 | {{- if .Values.ingress.enabled -}} 50 | {{- $gateDefaults := dict -}} 51 | {{- $_ := set $gateDefaults "kubernetes" (dict "useExecHealthCheck" false "serviceType" "NodePort") -}} 52 | {{- $_ := set $settings "gate.yml" $gateDefaults -}} 53 | {{- end -}} 54 | 55 | {{/* Defaults: deck service */}} 56 | {{- $deckDefaults := dict -}} 57 | {{- $_ := set $deckDefaults "env" (dict "API_HOST" "http://spin-gate:8084") -}} 58 | {{- if .Values.ingress.enabled -}} 59 | {{- $_ := set $deckDefaults "kubernetes" (dict "useExecHealthCheck" false "serviceType" "NodePort") -}} 60 | {{- end -}} 61 | {{- $_ := set $settings "deck.yml" $deckDefaults -}} 62 | 63 | {{- /* Merge dictionaries with passed values */}} 64 | {{- if .Values.serviceSettings -}} 65 | {{- $_ := mergeOverwrite $settings .Values.serviceSettings -}} 66 | {{- end -}} 67 | 68 | {{- /* Convert the content of settings key to YAML string */}} 69 | {{- range $filename, $content := $settings -}} 70 | {{- if not (typeIs "string" $content) -}} 71 | {{- $_ := set $settings $filename ($content | toYaml) -}} 72 | {{- end -}} 73 | {{- end -}} 74 | 75 | data: 76 | {{ $settings | toYaml | indent 2 }} 77 | 78 | {{ if .Values.serviceConfigs -}} 79 | --- 80 | apiVersion: v1 81 | kind: ConfigMap 82 | metadata: 83 | name: {{ .Release.Name }}-halyard-service-configs 84 | labels: 85 | {{ include "spinnaker.standard-labels" . | indent 4 }} 86 | 87 | {{/* 88 | Render local configuration for each service with values passed by 89 | .Values.serviceConfigs 90 | */}} 91 | {{- $settings := dict -}} 92 | 93 | {{- if .Values.serviceConfigs -}} 94 | {{- $_ := mergeOverwrite $settings .Values.serviceConfigs -}} 95 | {{- end -}} 96 | 97 | {{- /* Convert the content of settings key to YAML string */}} 98 | {{- range $filename, $content := $settings -}} 99 | {{- if not (typeIs "string" $content) -}} 100 | {{- $_ := set $settings $filename ($content | toYaml) -}} 101 | {{- end -}} 102 | {{- end -}} 103 | 104 | data: 105 | {{ $settings | toYaml | indent 2 }} 106 | {{- end -}} 107 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/charts/cluster-autoscaler/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | name: {{ include "cluster-autoscaler.fullname" . }} 7 | labels: {{- include "cluster-autoscaler.labels" . | nindent 4 }} 8 | rules: 9 | - apiGroups: [""] 10 | resources: ["events", "endpoints"] 11 | verbs: ["create", "patch"] 12 | - apiGroups: [""] 13 | resources: ["pods/eviction"] 14 | verbs: ["create"] 15 | - apiGroups: [""] 16 | resources: ["pods/status"] 17 | verbs: ["update"] 18 | - apiGroups: [""] 19 | resources: ["endpoints"] 20 | resourceNames: ["cluster-autoscaler"] 21 | verbs: ["get", "update"] 22 | - apiGroups: [""] 23 | resources: ["nodes"] 24 | verbs: ["watch", "list", "get", "update"] 25 | - apiGroups: [""] 26 | resources: 27 | - "pods" 28 | - "services" 29 | - "replicationcontrollers" 30 | - "persistentvolumeclaims" 31 | - "persistentvolumes" 32 | verbs: ["watch", "list", "get"] 33 | - apiGroups: ["extensions"] 34 | resources: ["replicasets", "daemonsets"] 35 | verbs: ["watch", "list", "get"] 36 | - apiGroups: ["policy"] 37 | resources: ["poddisruptionbudgets"] 38 | verbs: ["watch", "list"] 39 | - apiGroups: ["apps"] 40 | resources: ["statefulsets", "replicasets", "daemonsets"] 41 | verbs: ["watch", "list", "get"] 42 | - apiGroups: ["storage.k8s.io"] 43 | resources: ["storageclasses", "csinodes"] 44 | verbs: ["watch", "list", "get"] 45 | - apiGroups: ["batch", "extensions"] 46 | resources: ["jobs"] 47 | verbs: ["get", "list", "watch", "patch"] 48 | - apiGroups: ["coordination.k8s.io"] 49 | resources: ["leases"] 50 | verbs: ["create"] 51 | - apiGroups: ["coordination.k8s.io"] 52 | resourceNames: ["cluster-autoscaler"] 53 | resources: ["leases"] 54 | verbs: ["get", "update"] 55 | 56 | --- 57 | apiVersion: rbac.authorization.k8s.io/v1 58 | kind: Role 59 | metadata: 60 | namespace: {{ .Release.Namespace }} 61 | name: {{ include "cluster-autoscaler.fullname" . }} 62 | labels: {{- include "cluster-autoscaler.labels" . | nindent 4 }} 63 | rules: 64 | - apiGroups: [""] 65 | resources: ["configmaps"] 66 | verbs: ["create","list","watch"] 67 | - apiGroups: [""] 68 | resources: ["configmaps"] 69 | resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] 70 | verbs: ["delete", "get", "update", "watch"] 71 | 72 | --- 73 | apiVersion: rbac.authorization.k8s.io/v1 74 | kind: ClusterRoleBinding 75 | metadata: 76 | namespace: {{ .Release.Namespace }} 77 | name: {{ include "cluster-autoscaler.fullname" . }} 78 | labels: {{- include "cluster-autoscaler.labels" . | nindent 4 }} 79 | subjects: 80 | - kind: ServiceAccount 81 | namespace: {{ .Release.Namespace }} 82 | name: {{ include "cluster-autoscaler.serviceAccountName" . }} 83 | roleRef: 84 | apiGroup: rbac.authorization.k8s.io 85 | kind: ClusterRole 86 | name: {{ include "cluster-autoscaler.fullname" . }} 87 | 88 | --- 89 | apiVersion: rbac.authorization.k8s.io/v1 90 | kind: RoleBinding 91 | metadata: 92 | namespace: {{ .Release.Namespace }} 93 | name: {{ include "cluster-autoscaler.fullname" . }} 94 | labels: {{- include "cluster-autoscaler.labels" . | nindent 4 }} 95 | subjects: 96 | - kind: ServiceAccount 97 | namespace: {{ .Release.Namespace }} 98 | name: {{ include "cluster-autoscaler.serviceAccountName" . }} 99 | roleRef: 100 | apiGroup: rbac.authorization.k8s.io 101 | kind: Role 102 | name: {{ include "cluster-autoscaler.fullname" . }} 103 | {{- end }} 104 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/charts/cluster-autoscaler/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: {{ include "cluster-autoscaler.fullname" . }} 6 | labels: 7 | {{- include "cluster-autoscaler.labels" . | nindent 4 }} 8 | spec: 9 | {{- if not .Values.autoscaling.enabled }} 10 | replicas: {{ .Values.replicaCount }} 11 | {{- end }} 12 | selector: 13 | matchLabels: 14 | {{- include "cluster-autoscaler.selectorLabels" . | nindent 6 }} 15 | template: 16 | metadata: 17 | {{- with .Values.podAnnotations }} 18 | annotations: 19 | {{- toYaml . | nindent 8 }} 20 | {{- end }} 21 | labels: 22 | {{- include "cluster-autoscaler.selectorLabels" . | nindent 8 }} 23 | spec: 24 | {{- with .Values.imagePullSecrets }} 25 | imagePullSecrets: 26 | {{- toYaml . | nindent 8 }} 27 | {{- end }} 28 | serviceAccountName: {{ include "cluster-autoscaler.serviceAccountName" . }} 29 | securityContext: 30 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 31 | containers: 32 | - name: cluster-autoscaler 33 | securityContext: 34 | {{- toYaml .Values.securityContext | nindent 12 }} 35 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 36 | imagePullPolicy: {{ .Values.image.pullPolicy }} 37 | command: 38 | - ./cluster-autoscaler 39 | - --cloud-provider={{ .Values.cloudProvider }} 40 | - --namespace={{ .Release.Namespace }} 41 | - --skip-nodes-with-local-storage=false 42 | - --expander=least-waste 43 | {{- if .Values.autoscalingGroups }} 44 | {{- range .Values.autoscalingGroups }} 45 | - --nodes={{ .minSize }}:{{ .maxSize }}:{{ .name }} 46 | {{- end }} 47 | {{- end }} 48 | {{- if eq .Values.cloudProvider "aws" }} 49 | {{- if .Values.autoDiscovery.clusterName }} 50 | - --node-group-auto-discovery=asg:tag={{ tpl (join "," .Values.autoDiscovery.tags) . }} 51 | {{- end }} 52 | {{- range $key, $value := .Values.extraArgs }} 53 | - --{{ $key }}={{ $value }} 54 | {{- end }} 55 | {{- end }} 56 | env: 57 | {{- if and (eq .Values.cloudProvider "aws") (ne .Values.awsRegion "") }} 58 | - name: AWS_REGION 59 | value: "{{ .Values.awsRegion }}" 60 | {{- if .Values.awsAccessKeyID }} 61 | - name: AWS_ACCESS_KEY_ID 62 | valueFrom: 63 | secretKeyRef: 64 | key: AwsAccessKeyId 65 | name: {{ template "cluster-autoscaler.fullname" . }} 66 | {{- end }} 67 | {{- if .Values.awsSecretAccessKey }} 68 | - name: AWS_SECRET_ACCESS_KEY 69 | valueFrom: 70 | secretKeyRef: 71 | key: AwsSecretAccessKey 72 | name: {{ template "cluster-autoscaler.fullname" . }} 73 | {{- end }} 74 | {{- end }} 75 | {{- if .Values.extraVolumeMounts }} 76 | volumeMounts: 77 | {{ toYaml .Values.extraVolumeMounts | nindent 12 }} 78 | {{- end }} 79 | resources: 80 | {{- toYaml .Values.resources | nindent 12 }} 81 | {{- if .Values.extraVolumes }} 82 | volumes: 83 | {{- toYaml .Values.extraVolumes | nindent 10 }} 84 | {{- end }} 85 | {{- with .Values.nodeSelector }} 86 | nodeSelector: 87 | {{- toYaml . | nindent 8 }} 88 | {{- end }} 89 | {{- with .Values.affinity }} 90 | affinity: 91 | {{- toYaml . | nindent 8 }} 92 | {{- end }} 93 | {{- with .Values.tolerations }} 94 | tolerations: 95 | {{- toYaml . | nindent 8 }} 96 | {{- end }} 97 | {{- end }} 98 | -------------------------------------------------------------------------------- /charts/spinnaker/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | repository: us-docker.pkg.dev/spinnaker-community/docker/halyard 3 | tag: stable 4 | pullSecrets: [] 5 | # Set to false to disable persistence data volume for halyard 6 | persistence: 7 | enabled: false 8 | # Uncomment to add storage class for the persistence data volume 9 | # storageClass: 10 | # Provide additional parameters to halyard deploy apply command 11 | 12 | # Define annotations you want to add on halyard pod 13 | annotations: {} 14 | 15 | ## Uncomment the following resources definitions to control the cpu and memory 16 | # resources allocated for the halyard pod 17 | resources: {} 18 | # requests: 19 | # memory: "1Gi" 20 | # cpu: "100m" 21 | # limits: 22 | # memory: "2Gi" 23 | # cpu: "200m" 24 | 25 | # Node labels for pod assignment 26 | # Ref: https://kubernetes.io/docs/user-guide/node-selection/ 27 | # nodeSelector to provide to each of the Spinnaker components 28 | nodeSelector: {} 29 | 30 | # Node tolerations 31 | # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ 32 | tolerations: [] 33 | 34 | serviceAccount: 35 | create: false 36 | name: default 37 | annotations: {} 38 | 39 | securityContext: 40 | # Specifies permissions to write for user/group 41 | runAsUser: 1000 42 | fsGroup: 1000 43 | 44 | ## Define custom settings for Spinnaker services. Read more for details: 45 | ## https://www.spinnaker.io/reference/halyard/custom/#custom-service-settings 46 | ## You can use it to add annotations for pods, override the image, etc. 47 | serviceSettings: 48 | # deck.yml: 49 | # artifactId: gcr.io/spinnaker-marketplace/deck:2.9.0-20190412012808 50 | # kubernetes: 51 | # podAnnotations: 52 | # iam.amazonaws.com/role: 53 | #clouddriver.yml: 54 | # kubernetes: 55 | # serviceAccountName: spinnaker 56 | 57 | ## Define local configuration for Spinnaker services. 58 | ## The contents of these files would be copies of the configuration normally retrieved from 59 | ## `gs://halconfig/`, but instead need to be available locally on the halyard pod to facilitate 60 | ## offline installation. 61 | ## Read more for details: 62 | ## https://www.spinnaker.io/guides/operator/custom-boms/#boms-and-configuration-on-your-filesystem 63 | ## The key for each entry must be the name of the service and a file name separated by the '_' character. 64 | serviceConfigs: {} 65 | # clouddriver_clouddriver-ro.yml: |- 66 | # ... 67 | # clouddriver_clouddriver-rw.yml: |- 68 | # ... 69 | # clouddriver_clouddriver.yml: |- 70 | # ... 71 | # deck_settings.json: |- 72 | # ... 73 | # echo_echo.yml: |- 74 | # ... 75 | 76 | ## Change this if youd like to expose Spinnaker outside the cluster 77 | ingress: 78 | enabled: false 79 | #deck: 80 | # host: spinnaker.example.org 81 | # annotations: 82 | # ingress.kubernetes.io/ssl-redirect: 'true' 83 | # kubernetes.io/ingress.class: nginx 84 | # kubernetes.io/tls-acme: "true" 85 | # tls: 86 | # - secretName: -tls 87 | # hosts: 88 | # - domain.com 89 | 90 | #gate: 91 | # host: gate.spinnaker.example.org 92 | # annotations: 93 | # ingress.kubernetes.io/ssl-redirect: 'true' 94 | # kubernetes.io/ingress.class: nginx 95 | # kubernetes.io/tls-acme: "true" 96 | # tls: 97 | # - secretName: -tls 98 | # hosts: 99 | # - domain.com 100 | 101 | ## Minio access/secret keys for the in-cluster S3 usage 102 | ## Minio is not exposed publically 103 | minio: 104 | enabled: true 105 | resources: 106 | requests: 107 | memory: 512Mi 108 | accessKey: spinnakeradmin 109 | secretKey: spinnakeradmin 110 | defaultBucket: 111 | enabled: true 112 | name: "spinnaker" 113 | nodeSelector: {} 114 | # Use an single replica for deveopment or test 115 | replicas: 1 116 | mode: standalone 117 | persistence: 118 | # Uncomment if you don't want to create a PVC for minio 119 | enabled: false 120 | -------------------------------------------------------------------------------- /charts/spinnaker/templates/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Release.Name }}-halyard 5 | labels: 6 | {{ include "spinnaker.standard-labels" . | indent 4 }} 7 | component: halyard 8 | spec: 9 | ports: 10 | - port: 8064 11 | name: daemon 12 | clusterIP: None 13 | selector: 14 | app: halyard 15 | component: halyard 16 | --- 17 | apiVersion: apps/v1 18 | kind: StatefulSet 19 | metadata: 20 | name: {{ .Release.Name }}-halyard 21 | labels: 22 | {{ include "spinnaker.standard-labels" . | indent 4 }} 23 | spec: 24 | serviceName: {{ .Release.Name }}-halyard 25 | replicas: 1 26 | selector: 27 | matchLabels: 28 | {{ include "spinnaker.standard-selector-labels" . | indent 6 }} 29 | component: halyard 30 | template: 31 | metadata: 32 | annotations: 33 | {{- if .Values.annotations }} 34 | {{ toYaml .Values.annotations | indent 8 }} 35 | {{- end }} 36 | labels: 37 | {{ include "spinnaker.standard-labels" . | indent 8 }} 38 | component: halyard 39 | spec: 40 | securityContext: 41 | runAsUser: {{ .Values.securityContext.runAsUser }} 42 | fsGroup: {{ .Values.securityContext.fsGroup }} 43 | {{- if .Values.nodeSelector }} 44 | nodeSelector: 45 | {{ toYaml .Values.nodeSelector | indent 8 }} 46 | {{- end }} 47 | {{- if .Values.tolerations }} 48 | tolerations: 49 | {{ toYaml .Values.tolerations | indent 8 }} 50 | {{- end }} 51 | initContainers: 52 | - name: create-halyard-local 53 | image: {{ .Values.image.repository }}:{{ .Values.image.tag }} 54 | command: 55 | - bash 56 | - /tmp/initscript/init.sh 57 | volumeMounts: 58 | - name: halyard-initscript 59 | mountPath: /tmp/initscript 60 | - name: halyard-home 61 | mountPath: /tmp/spinnaker 62 | - name: service-settings 63 | mountPath: /tmp/service-settings 64 | {{- if .Values.serviceConfigs }} 65 | - name: service-configs 66 | mountPath: /tmp/service-configs 67 | {{- end }} 68 | {{- if .Values.env }} 69 | env: 70 | {{ toYaml .Values.env | indent 8 }} 71 | {{- end }} 72 | containers: 73 | - name: halyard 74 | image: {{ .Values.image.repository }}:{{ .Values.image.tag }} 75 | ports: 76 | - containerPort: 8064 77 | name: daemon 78 | {{- if .Values.resources }} 79 | resources: 80 | {{ toYaml .Values.resources | indent 10 }} 81 | {{- end }} 82 | volumeMounts: 83 | - name: halyard-home 84 | mountPath: /home/spinnaker 85 | {{- if .Values.env }} 86 | env: 87 | {{ toYaml .Values.env | indent 8 }} 88 | {{- end }} 89 | volumes: 90 | {{- if not .Values.persistence.enabled }} 91 | - name: halyard-home 92 | emptyDir: {} 93 | {{- end }} 94 | - name: service-settings 95 | configMap: 96 | name: {{ .Release.Name }}-halyard-service-settings 97 | {{- if .Values.serviceConfigs }} 98 | - name: service-configs 99 | configMap: 100 | name: {{ .Release.Name }}-halyard-service-configs 101 | {{- end }} 102 | - name: halyard-initscript 103 | configMap: 104 | name: {{ .Release.Name }}-halyard 105 | {{- if .Values.image.pullSecrets }} 106 | imagePullSecrets: 107 | {{- range .Values.image.pullSecrets }} 108 | - name: {{ . }} 109 | {{- end}} 110 | {{- end}} 111 | {{- if .Values.persistence.enabled }} 112 | volumeClaimTemplates: 113 | - metadata: 114 | name: halyard-home 115 | labels: 116 | {{ include "spinnaker.standard-labels-base" . | indent 8 }} 117 | spec: 118 | accessModes: [ "ReadWriteOnce" ] 119 | {{- with .Values.persistence.storageClass }} 120 | storageClassName: {{ . }} 121 | {{- end }} 122 | resources: 123 | requests: 124 | storage: 10Gi 125 | {{- end }} 126 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Spinnaker 2 | [Spinnaker](https://spinnaker.io/) is an open-source, multi-cloud continuous delivery platform for releasing software changes with high velocity and confidence. This is the terraform module to build and install spinnaker on AWS. This module will create Amazon EKS, Amazon Aurora, Amazon S3 resources for spinnaker and utilise Helm chart to install spinnaker application on kubernetes. And it will also create a VPC to place an EKS and an Aurora cluster for the spinnaker. If you want to know how to use this module, please check below examples for more details. 3 | 4 | ## Examples 5 | - [Spinnaker Blueprint](https://github.com/Young-ook/terraform-aws-spinnaker/blob/main/examples/blueprint) 6 | - [AWS Modernization with Spinnaker](https://github.com/Young-ook/terraform-aws-spinnaker/blob/main/examples/aws-modernization-with-spinnaker) 7 | 8 | ## Getting started 9 | ### AWS CLI 10 | Follow the official guide to install and configure profiles. 11 | - [AWS CLI Installation](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) 12 | - [AWS CLI Configuration](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) 13 | 14 | After the installation is complete, you can check the aws cli version: 15 | ``` 16 | aws --version 17 | aws-cli/2.5.8 Python/3.9.11 Darwin/21.4.0 exe/x86_64 prompt/off 18 | ``` 19 | 20 | ### Terraform 21 | Terraform is an open-source infrastructure as code software tool that enables you to safely and predictably create, change, and improve infrastructure. 22 | 23 | #### Install 24 | This is the official guide for terraform binary installation. Please visit this [Install Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) website and follow the instructions. 25 | 26 | Or, you can manually get a specific version of terraform binary from the websiate. Move to the [Downloads](https://www.terraform.io/downloads.html) page and look for the appropriate package for your system. Download the selected zip archive package. Unzip and install terraform by navigating to a directory included in your system's `PATH`. 27 | 28 | Or, you can use [tfenv](https://github.com/tfutils/tfenv) utility. It is very useful and easy solution to install and switch the multiple versions of terraform-cli. 29 | 30 | First, install tfenv using brew. 31 | ``` 32 | brew install tfenv 33 | ``` 34 | Then, you can use tfenv in your workspace like below. 35 | ``` 36 | tfenv install 37 | tfenv use 38 | ``` 39 | Also this tool is helpful to upgrade terraform v0.12. It is a major release focused on configuration language improvements and thus includes some changes that you'll need to consider when upgrading. But the version 0.11 and 0.12 are very different. So if some codes are written in older version and others are in 0.12 it would be great for us to have nice tool to support quick switching of version. 40 | ``` 41 | tfenv list 42 | tfenv install latest 43 | tfenv use 44 | ``` 45 | 46 | ### Kubernetes CLI 47 | Here is a simple way to install the kubernetes command line tool on your environment if you are on macOS. 48 | ``` 49 | brew install kubernetes-cli 50 | ``` 51 | 52 | For more information about kubernetes tools, please visit this [page](https://kubernetes.io/docs/tasks/tools/) and follow the **kubectl** instructions if you want to install tools. 53 | 54 | ### Setup 55 | ```hcl 56 | module "spinnaker" { 57 | source = "Young-ook/spinnaker/aws" 58 | version = "3.0.0" 59 | name = "spinnaker" 60 | stack = "test" 61 | tags = { env = "test" } 62 | } 63 | ``` 64 | Run terraform: 65 | ``` 66 | terraform init 67 | terraform apply 68 | ``` 69 | 70 | # Additional Resources 71 | ## Case Study 72 | - [Netflix](https://cd.foundation/case-studies/spinnaker-case-studies/spinnaker-case-study-netflix/) 73 | - [How Netflix Built Spinnaker, a High Velocity Continuous Delivery Platform](https://thenewstack.io/netflix-built-spinnaker-high-velocity-continuous-delivery-platform/) 74 | - [Simplifying delivery as code with Spinnaker and Kubernetes](https://aws.amazon.com/solutions/case-studies/netflix-kubernetes-reinvent2020-video/) 75 | - [Google Waze SRE](https://sre.google/workbook/organizational-change/) 76 | - [AWS re:Invent 2022 - Reimagining multi-account deployments for security and speed (NFX305)](https://youtu.be/MKc9r6xOTpk) 77 | 78 | ## Netflix Projects 79 | - [Netflix OSS](https://netflix.github.io/) 80 | -------------------------------------------------------------------------------- /scripts/update-x509ca.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | # create new self-signed certificate authority (CA) 3 | 4 | CURDIR=`dirname $0` 5 | CERT_HOME=$CURDIR/cert 6 | 7 | # Variable 8 | CNTRY="KR" 9 | STAT="ICN" 10 | LOC="ICN" 11 | ORG="ORG" 12 | CN="your@email.com" 13 | GROUP="spinnaker-team1\nspinnaker-team2" 14 | 15 | # Conditions 16 | CLT=false 17 | SVR=false 18 | 19 | function print_usage() { 20 | echo "Usage: $0 -a(all) | -c(client only)" 21 | } 22 | 23 | function process_args() { 24 | if [[ $# < 1 ]]; then 25 | print_usage 26 | exit -1 27 | fi 28 | 29 | while [[ $# > 0 ]]; do 30 | local key="$1" 31 | shift 32 | case $key in 33 | -a) 34 | CLT=true 35 | SVR=true 36 | ;; 37 | -c) 38 | CLT=true 39 | SVR=false 40 | ;; 41 | *) 42 | >&2 echo "Unrecognized argument '$key'" 43 | exit -1 44 | esac 45 | done 46 | } 47 | 48 | function clean() { 49 | if [ -e $CERT_HOME ]; then 50 | if $CLT; then 51 | find "$CERT_HOME/" -name "client.*" -type f -delete 52 | find "$CERT_HOME/" -name "openssl.conf" -type f -delete 53 | fi 54 | 55 | if $CLT && $SVR; then 56 | rm -r "$CERT_HOME" 57 | mkdir -p "$CERT_HOME" 58 | fi 59 | else 60 | mkdir -p "$CERT_HOME" 61 | fi 62 | } 63 | 64 | function gen_ca() { 65 | openssl genrsa -out $CERT_HOME/ca.key 4096 66 | openssl req -new -x509 -days 365 -key $CERT_HOME/ca.key -out $CERT_HOME/ca.crt \ 67 | -subj "/C=$CNTRY/ST=$STAT/L=$LOC/O=$ORG/OU=$ORG/CN=$CN" 68 | } 69 | 70 | function gen_server_crt() { 71 | openssl genrsa -out $CERT_HOME/server.key 4096 72 | openssl req -new -key $CERT_HOME/server.key -out $CERT_HOME/server.csr \ 73 | -subj "/C=$CNTRY/ST=$STAT/L=$LOC/O=$ORG/OU=$ORG/CN=$CN" 74 | 75 | openssl x509 -req -days 365 -in $CERT_HOME/server.csr -CA $CERT_HOME/ca.crt -CAkey $CERT_HOME/ca.key \ 76 | -CAcreateserial -out $CERT_HOME/server.crt 77 | 78 | echo "---------------------------------------------------------------------------" 79 | echo "Automatically generated key will be stored in $CERT_HOME/server.secret file" 80 | echo "---------------------------------------------------------------------------" 81 | # password auto-generation 82 | local PASSWD=$(pwgen 20 1) 83 | echo $PASSWD > $CERT_HOME/server.secret | chmod 600 $CERT_HOME/server.secret 84 | 85 | openssl pkcs12 -export -clcerts -in $CERT_HOME/server.crt \ 86 | -inkey $CERT_HOME/server.key -out $CERT_HOME/server.p12 \ 87 | -name spinnaker -password pass:$PASSWD 88 | 89 | keytool -keystore $CERT_HOME/keystore.jks -import -trustcacerts -alias ca \ 90 | -file $CERT_HOME/ca.crt -storepass $PASSWD 91 | 92 | keytool -importkeystore \ 93 | -srcalias spinnaker -srckeystore $CERT_HOME/server.p12 -srcstoretype pkcs12 \ 94 | -srcstorepass $PASSWD \ 95 | -destalias server -destkeystore $CERT_HOME/keystore.jks -deststoretype jks \ 96 | -deststorepass $PASSWD -destkeypass $PASSWD 97 | } 98 | 99 | function gen_client_crt() { 100 | # x509 config file 101 | cat << EOF > $CERT_HOME/openssl.conf 102 | [ req ] 103 | #default_bits = 2048 104 | #default_md = sha256 105 | #default_keyfile = privkey.pem 106 | distinguished_name = req_distinguished_name 107 | req_extensions = v3_req 108 | x509_extensions = v3_req 109 | 110 | [ req_distinguished_name ] 111 | countryName = $CNTRY 112 | countryName_min = 2 113 | countryName_max = 2 114 | stateOrProvinceName = $STAT 115 | localityName = $LOC 116 | 0.organizationName = $ORG 117 | organizationalUnitName = $ORG 118 | commonName = $CN 119 | commonName_max = 64 120 | emailAddress = Email Address 121 | emailAddress_max = 64 122 | 123 | [ v3_req ] 124 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 125 | 1.2.840.10070.8.1 = ASN1:UTF8String:$GROUP 126 | EOF 127 | 128 | openssl req -nodes -newkey rsa:2048 -keyout $CERT_HOME/client.key -out $CERT_HOME/client.csr \ 129 | -subj "/C=$CNTRY/ST=$STAT/L=$LOC/O=$ORG/OU=$ORG/CN=$CN" -config $CERT_HOME/openssl.conf 130 | 131 | # create x509 certificates chain 132 | openssl x509 -req -days 365 -in $CERT_HOME/client.csr -out $CERT_HOME/client.crt \ 133 | -CA $CERT_HOME/ca.crt -CAkey $CERT_HOME/ca.key -CAcreateserial \ 134 | -extfile $CERT_HOME/openssl.conf -extensions v3_req 135 | } 136 | 137 | # main 138 | process_args "$@" 139 | clean 140 | 141 | if $SVR && $CLT; then 142 | gen_ca 143 | gen_server_crt 144 | fi 145 | 146 | if $CLT; then 147 | gen_client_crt 148 | fi 149 | -------------------------------------------------------------------------------- /modules/codebuild/main.tf: -------------------------------------------------------------------------------- 1 | ## managed continuous integration service 2 | 3 | locals { 4 | artifact = lookup(var.project, "artifact", local.default_artifact) 5 | environment = lookup(var.project, "environment", local.default_environment) 6 | source = lookup(var.project, "source", local.default_source) 7 | } 8 | 9 | resource "aws_codebuild_project" "cb" { 10 | name = local.name 11 | tags = merge(local.default-tags, var.tags) 12 | description = "CodeBuild project" 13 | build_timeout = "5" 14 | service_role = aws_iam_role.cb.arn 15 | 16 | artifacts { 17 | type = lookup(local.artifact, "type", local.default_artifact.type) 18 | location = lookup(local.artifact, "location", local.default_artifact.location) 19 | encryption_disabled = lookup(local.artifact, "encryption_disabled", local.default_artifact.encryption_disabled) 20 | } 21 | 22 | environment { 23 | type = lookup(local.environment, "type", local.default_environment["type"]) 24 | image = lookup(local.environment, "image", local.default_environment["image"]) 25 | compute_type = lookup(local.environment, "compute_type", local.default_environment["compute_type"]) 26 | image_pull_credentials_type = lookup(local.environment, "image_pull_credentials_type", local.default_environment["image_pull_credentials_type"]) 27 | privileged_mode = lookup(local.environment, "privileged_mode", local.default_environment["privileged_mode"]) 28 | 29 | dynamic "environment_variable" { 30 | for_each = lookup(local.environment, "environment_variables", {}) 31 | content { 32 | name = environment_variable.key 33 | value = environment_variable.value 34 | } 35 | } 36 | } 37 | 38 | source { 39 | type = lookup(local.source, "type", local.default_source["type"]) 40 | location = lookup(local.source, "location", local.default_source["location"]) 41 | buildspec = lookup(local.source, "buildspec", local.default_source["buildspec"]) 42 | git_clone_depth = lookup(local.source, "git_clone_depth", 1) 43 | } 44 | source_version = lookup(local.source, "version", local.default_source["version"]) 45 | 46 | dynamic "logs_config" { 47 | for_each = var.log != null ? var.log : {} 48 | content { 49 | dynamic "cloudwatch_logs" { 50 | for_each = logs_config.key == "cloudwatch_logs" ? var.log : {} 51 | content { 52 | status = lookup(cloudwatch_logs.value, "status", null) 53 | group_name = lookup(cloudwatch_logs.value, "group_name", null) 54 | stream_name = lookup(cloudwatch_logs.value, "stream_name", null) 55 | } 56 | } 57 | 58 | dynamic "s3_logs" { 59 | for_each = logs_config.key == "s3_logs" ? var.log : {} 60 | content { 61 | status = lookup(s3_logs.value, "status", null) 62 | location = lookup(s3_logs.value, "location", null) 63 | encryption_disabled = lookup(s3_logs.value, "encryption_disabled", null) 64 | } 65 | } 66 | } 67 | } 68 | 69 | dynamic "vpc_config" { 70 | for_each = toset(var.vpc != null ? ["vpc"] : []) 71 | content { 72 | vpc_id = lookup(var.vpc, "vpc", null) 73 | subnets = lookup(var.vpc, "subnets", null) 74 | security_group_ids = lookup(var.vpc, "security_groups", null) 75 | } 76 | } 77 | } 78 | 79 | # security/policy 80 | resource "aws_iam_role" "cb" { 81 | name = format("%s-codebuild", local.name) 82 | tags = merge(local.default-tags, var.tags) 83 | assume_role_policy = jsonencode({ 84 | Statement = [{ 85 | Action = "sts:AssumeRole" 86 | Effect = "Allow" 87 | Principal = { 88 | Service = [format("codebuild.%s", module.aws.partition.dns_suffix)] 89 | } 90 | }] 91 | Version = "2012-10-17" 92 | }) 93 | } 94 | 95 | resource "aws_iam_policy" "cb" { 96 | name = join("-", [local.name, "codebuild"]) 97 | description = format("Allow access to ECR and S3 for build process") 98 | policy = jsonencode({ 99 | Version = "2012-10-17" 100 | Statement = [ 101 | { 102 | Action = [ 103 | "logs:CreateLogGroup", 104 | "logs:CreateLogStream", 105 | "logs:PutLogEvents", 106 | ] 107 | Effect = "Allow" 108 | Resource = [format("arn:%s:logs:*:*:*", module.aws.partition.partition)] 109 | }, 110 | { 111 | Action = [ 112 | "ecr:GetAuthorizationToken", 113 | "ssm:GetParameters", 114 | ] 115 | Effect = "Allow" 116 | Resource = ["*"] 117 | }, 118 | { 119 | "Action" = [ 120 | "ec2:CreateNetworkInterface", 121 | "ec2:DescribeDhcpOptions", 122 | "ec2:DescribeNetworkInterfaces", 123 | "ec2:DeleteNetworkInterface", 124 | "ec2:DescribeSubnets", 125 | "ec2:DescribeSecurityGroups", 126 | "ec2:DescribeVpcs" 127 | ], 128 | "Effect" = "Allow", 129 | "Resource" = ["*"] 130 | }, 131 | { 132 | "Action" = [ 133 | "ec2:CreateNetworkInterfacePermission" 134 | ], 135 | "Effect" = "Allow", 136 | Resource = [format("arn:%s:ec2:%s:%s:network-interface/*", 137 | module.aws.partition.partition, 138 | module.aws.region.name, 139 | module.aws.caller.account_id, 140 | )] 141 | "Condition" = { 142 | "StringEquals" = { 143 | "ec2:AuthorizedService" = "codebuild.amazonaws.com" 144 | } 145 | } 146 | } 147 | ] 148 | }) 149 | } 150 | 151 | resource "aws_iam_role_policy_attachment" "cb" { 152 | policy_arn = aws_iam_policy.cb.arn 153 | role = aws_iam_role.cb.name 154 | } 155 | 156 | resource "aws_iam_role_policy_attachment" "extra" { 157 | for_each = { for key, val in var.policy_arns : key => val } 158 | policy_arn = each.value 159 | role = aws_iam_role.cb.name 160 | } 161 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/main.tf: -------------------------------------------------------------------------------- 1 | ### aws partitions 2 | module "aws" { 3 | source = "Young-ook/spinnaker/aws//modules/aws-partitions" 4 | } 5 | 6 | ### foundation/network 7 | module "vpc" { 8 | source = "Young-ook/vpc/aws" 9 | version = "1.0.3" 10 | name = var.name 11 | tags = merge(var.tags, (module.eks.tags.shared == null ? {} : module.eks.tags.shared)) 12 | vpc_config = { 13 | azs = var.azs 14 | cidr = var.cidr 15 | single_ngw = true 16 | subnet_type = "private" 17 | } 18 | } 19 | 20 | ### foundation/kubernetes 21 | module "eks" { 22 | source = "Young-ook/eks/aws" 23 | version = "2.0.3" 24 | name = var.name 25 | tags = merge(var.tags, { release = "canary" }) 26 | subnets = values(module.vpc.subnets["private"]) 27 | enable_ssm = true 28 | kubernetes_version = var.kubernetes_version 29 | managed_node_groups = [ 30 | { 31 | name = "default" 32 | min_size = 1 33 | max_size = 9 34 | desired_size = 3 35 | instance_type = "t3.small" 36 | } 37 | ] 38 | policy_arns = [ 39 | format("arn:%s:iam::aws:policy/AWSXRayDaemonWriteAccess", module.aws.partition.partition), 40 | format("arn:%s:iam::aws:policy/AWSAppMeshEnvoyAccess", module.aws.partition.partition), 41 | ] 42 | } 43 | 44 | provider "helm" { 45 | kubernetes { 46 | host = module.eks.kubeauth.host 47 | token = module.eks.kubeauth.token 48 | cluster_ca_certificate = module.eks.kubeauth.ca 49 | } 50 | } 51 | 52 | ### kubernetes-addons 53 | module "base" { 54 | depends_on = [module.eks] 55 | source = "Young-ook/eks/aws//modules/helm-addons" 56 | version = "2.0.4" 57 | tags = var.tags 58 | addons = [ 59 | { 60 | ### for more details, https://cert-manager.io/docs/installation/helm/ 61 | repository = "https://charts.jetstack.io" 62 | name = "cert-manager" 63 | chart_name = "cert-manager" 64 | chart_version = "v1.11.2" 65 | namespace = "cert-manager" 66 | create_namespace = true 67 | values = { 68 | "installCRDs" = "true" 69 | } 70 | }, 71 | ] 72 | } 73 | 74 | module "awsctl" { 75 | depends_on = [module.base] 76 | source = "Young-ook/eks/aws//modules/helm-addons" 77 | version = "2.0.4" 78 | tags = var.tags 79 | addons = [ 80 | { 81 | repository = "https://aws.github.io/eks-charts" 82 | name = "aws-load-balancer-controller" 83 | chart_name = "aws-load-balancer-controller" 84 | namespace = "kube-system" 85 | serviceaccount = "aws-load-balancer-controller" 86 | values = { 87 | "clusterName" = module.eks.cluster.name 88 | "enableServiceMutatorWebhook" = "false" 89 | } 90 | oidc = module.eks.oidc 91 | policy_arns = [aws_iam_policy.lbc.arn] 92 | }, 93 | { 94 | repository = "https://aws.github.io/eks-charts" 95 | name = "aws-cloudwatch-metrics" 96 | chart_name = "aws-cloudwatch-metrics" 97 | namespace = "kube-system" 98 | serviceaccount = "aws-cloudwatch-metrics" 99 | values = { 100 | "clusterName" = module.eks.cluster.name 101 | } 102 | oidc = module.eks.oidc 103 | policy_arns = [ 104 | format("arn:%s:iam::aws:policy/CloudWatchAgentServerPolicy", module.aws.partition.partition) 105 | ] 106 | }, 107 | { 108 | repository = "https://aws.github.io/eks-charts" 109 | name = "aws-for-fluent-bit" 110 | chart_name = "aws-for-fluent-bit" 111 | namespace = "kube-system" 112 | serviceaccount = "aws-for-fluent-bit" 113 | values = { 114 | "cloudWatch.enabled" = true 115 | "cloudWatch.region" = module.aws.region.name 116 | "cloudWatch.logGroupName" = format("/aws/containerinsights/%s/application", module.eks.cluster.name) 117 | "firehose.enabled" = false 118 | "kinesis.enabled" = false 119 | "elasticsearch.enabled" = false 120 | } 121 | oidc = module.eks.oidc 122 | policy_arns = [ 123 | format("arn:%s:iam::aws:policy/CloudWatchAgentServerPolicy", module.aws.partition.partition) 124 | ] 125 | }, 126 | { 127 | repository = "${path.module}/charts/" 128 | name = "cluster-autoscaler" 129 | chart_name = "cluster-autoscaler" 130 | namespace = "kube-system" 131 | serviceaccount = "cluster-autoscaler" 132 | values = { 133 | "awsRegion" = module.aws.region.name 134 | "autoDiscovery.clusterName" = module.eks.cluster.name 135 | } 136 | oidc = module.eks.oidc 137 | policy_arns = [aws_iam_policy.cas.arn] 138 | }, 139 | { 140 | repository = "https://aws.github.io/eks-charts" 141 | name = "appmesh-controller" 142 | chart_name = "appmesh-controller" 143 | namespace = "kube-system" 144 | serviceaccount = "appmesh-controller" 145 | values = { 146 | "region" = module.aws.region.name 147 | "tracing.enabled" = true 148 | "tracing.provider" = "x-ray" 149 | } 150 | oidc = module.eks.oidc 151 | policy_arns = [ 152 | format("arn:%s:iam::aws:policy/AWSAppMeshEnvoyAccess", module.aws.partition.partition), 153 | format("arn:%s:iam::aws:policy/AWSCloudMapFullAccess", module.aws.partition.partition), 154 | format("arn:%s:iam::aws:policy/AWSXRayDaemonWriteAccess", module.aws.partition.partition), 155 | ] 156 | }, 157 | ] 158 | } 159 | 160 | resource "aws_iam_policy" "lbc" { 161 | name = "aws-loadbalancer-controller" 162 | tags = merge({ "terraform.io" = "managed" }, var.tags) 163 | description = format("Allow aws-load-balancer-controller to manage AWS resources") 164 | policy = file("${path.module}/policy.aws-loadbalancer-controller.json") 165 | } 166 | 167 | resource "aws_iam_policy" "cas" { 168 | name = "cluster-autoscaler" 169 | tags = merge({ "terraform.io" = "managed" }, var.tags) 170 | description = format("Allow cluster-autoscaler to manage AWS resources") 171 | policy = file("${path.module}/policy.cluster-autoscaler.json") 172 | } 173 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/platform/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | services = ["yelbv2", ] 3 | buildpath = "examples/aws-modernization-with-spinnaker/application" 4 | } 5 | 6 | ### platform/ecr 7 | module "ecr" { 8 | for_each = toset(local.services) 9 | source = "Young-ook/eks/aws//modules/ecr" 10 | version = "2.0.3" 11 | name = each.key 12 | scan_on_push = false 13 | } 14 | 15 | ### platform/ci 16 | module "ci" { 17 | for_each = toset(local.services) 18 | source = "Young-ook/spinnaker/aws//modules/codebuild" 19 | version = "2.3.1" 20 | name = join("-", [each.key, var.name]) 21 | tags = var.tags 22 | project = { 23 | source = { 24 | type = "GITHUB" 25 | location = "https://github.com/Young-ook/terraform-aws-spinnaker.git" 26 | buildspec = join("/", [local.buildpath, each.key, "buildspec.yml"]) 27 | version = "main" 28 | } 29 | environment = { 30 | image = "aws/codebuild/standard:4.0" 31 | privileged_mode = true 32 | environment_variables = { 33 | ARTIFACT_BUCKET = module.artifact.bucket.id 34 | REPOSITORY_URI = module.ecr[each.key].url 35 | APP_NAME = join("/", [local.buildpath, each.key]) 36 | } 37 | } 38 | } 39 | policy_arns = [ 40 | module.ecr[each.key].policy_arns["read"], 41 | module.ecr[each.key].policy_arns["write"], 42 | module.artifact.policy_arns["write"], 43 | ] 44 | log = { 45 | cloudwatch_logs = { 46 | group_name = module.logs["codebuild"].log_group.name 47 | } 48 | } 49 | } 50 | 51 | # artifact bucket 52 | module "artifact" { 53 | source = "Young-ook/sagemaker/aws//modules/s3" 54 | version = "0.3.4" 55 | name = join("-", ["artifact", var.name]) 56 | tags = var.tags 57 | force_destroy = true 58 | } 59 | 60 | ### platform/spinnaker 61 | module "spinnaker" { 62 | source = "Young-ook/spinnaker/aws" 63 | version = "2.2.3" 64 | name = "spinnaker" 65 | tags = var.tags 66 | region = var.aws_region 67 | azs = var.azs 68 | cidr = var.cidr 69 | kubernetes_version = var.kubernetes_version 70 | kubernetes_enable_ssm = true 71 | kubernetes_node_groups = [ 72 | { 73 | name = "default" 74 | min_size = 1 75 | max_size = 2 76 | desired_size = 1 77 | disk_size = "500" 78 | instance_type = "m5.xlarge" 79 | } 80 | ] 81 | kubernetes_policy_arns = [ 82 | module.artifact.policy_arns["read"], 83 | ] 84 | aurora_cluster = {} 85 | s3_bucket = { 86 | force_destroy = true 87 | } 88 | helm = { 89 | values = { 90 | "halyard.image.tag" = "1.44.0" 91 | } 92 | } 93 | assume_role_arn = [ 94 | module.spinnaker-managed.role_arn, 95 | ] 96 | } 97 | 98 | module "spinnaker-managed" { 99 | source = "Young-ook/spinnaker/aws//modules/spinnaker-managed-aws" 100 | version = "2.2.3" 101 | name = var.name 102 | trusted_role_arn = [module.spinnaker.role.arn] 103 | } 104 | 105 | ### platform/fis 106 | resource "aws_cloudwatch_metric_alarm" "svc" { 107 | alarm_name = join("-", [var.name, "svc", "alarm"]) 108 | alarm_description = "This metric monitors healty backed pods of a service" 109 | tags = merge(var.tags) 110 | metric_name = "service_number_of_running_pods" 111 | comparison_operator = "LessThanThreshold" 112 | datapoints_to_alarm = 1 113 | evaluation_periods = 1 114 | namespace = "ContainerInsights" 115 | period = 30 116 | threshold = 1 117 | statistic = "Average" 118 | insufficient_data_actions = [] 119 | dimensions = { 120 | ClusterName = var.eks["cluster"].name 121 | Namespace = var.eks["cluster"].name 122 | Service = "yelb-ui" 123 | } 124 | } 125 | 126 | resource "aws_cloudwatch_metric_alarm" "cpu" { 127 | alarm_name = join("-", [var.name, "cpu", "alarm"]) 128 | alarm_description = "This metric monitors ec2 cpu utilization" 129 | tags = merge(var.tags) 130 | metric_name = "node_cpu_utilization" 131 | comparison_operator = "GreaterThanOrEqualToThreshold" 132 | datapoints_to_alarm = 1 133 | evaluation_periods = 1 134 | namespace = "ContainerInsights" 135 | period = 30 136 | threshold = 60 137 | statistic = "Average" 138 | insufficient_data_actions = [] 139 | dimensions = { 140 | ClusterName = var.eks["cluster"].name 141 | } 142 | } 143 | 144 | module "logs" { 145 | source = "Young-ook/eventbridge/aws//modules/logs" 146 | version = "0.0.9" 147 | for_each = { for l in [ 148 | { 149 | type = "codebuild" 150 | log_group = { 151 | namespace = "/aws/codebuild" 152 | retension_days = 3 153 | } 154 | }, 155 | { 156 | type = "fis" 157 | log_group = { 158 | namespace = "/aws/fis" 159 | retension_days = 3 160 | } 161 | }, 162 | ] : l.type => l } 163 | name = join("-", [var.name, each.key]) 164 | log_group = each.value.log_group 165 | } 166 | 167 | # drawing lots for choosing a subnet 168 | resource "random_integer" "az" { 169 | min = 0 170 | max = length(var.azs) - 1 171 | } 172 | 173 | module "awsfis" { 174 | source = "Young-ook/fis/aws" 175 | version = "1.0.1" 176 | name = var.name 177 | tags = var.tags 178 | experiments = [ 179 | { 180 | name = "terminate-eks-nodes" 181 | template = "${path.module}/templates/terminate-eks-nodes.tpl" 182 | params = { 183 | az = var.azs[random_integer.az.result] 184 | vpc = var.vpc.id 185 | nodegroup = var.eks["cluster"].data_plane.managed_node_groups.default.arn 186 | role = module.awsfis.role["fis"].arn 187 | logs = format("%s:*", module.logs["fis"].log_group.arn) 188 | alarm = jsonencode([ 189 | { 190 | source = "aws:cloudwatch:alarm" 191 | value = aws_cloudwatch_metric_alarm.svc.arn 192 | }, 193 | { 194 | source = "aws:cloudwatch:alarm" 195 | value = aws_cloudwatch_metric_alarm.cpu.arn 196 | }, 197 | ]) 198 | } 199 | }, 200 | ] 201 | } 202 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/yelb-appserver.rb: -------------------------------------------------------------------------------- 1 | ################################################################################# 2 | #### Massimo Re Ferre' #### 3 | #### www.it20.info #### 4 | #### Yelb, a simple web application #### 5 | ################################################################################# 6 | 7 | ################################################################################# 8 | #### yelb-appserver.rb is the app (ruby based) component of the Yelb app #### 9 | #### Yelb connects to a backend database for persistency #### 10 | ################################################################################# 11 | 12 | require 'sinatra' 13 | require 'aws-sdk-dynamodb' 14 | require_relative 'modules/pageviews' 15 | require_relative 'modules/getvotes' 16 | require_relative 'modules/restaurant' 17 | require_relative 'modules/hostname' 18 | require_relative 'modules/getstats' 19 | require_relative 'modules/restaurantsdbupdate' 20 | require_relative 'modules/restaurantsdbread' 21 | 22 | # the disabled protection is required when running in production behind an nginx reverse proxy 23 | # without this option, the angular application will spit a `forbidden` error message 24 | disable :protection 25 | 26 | # the system variable RACK_ENV controls which environment you are enabling 27 | # if you choose 'custom' with RACK_ENV, all systems variables in the section need to be set before launching the yelb-appserver application 28 | # the DDB/Region variables in test/development are there for convenience (there is no logic to avoid exceptions when reading these variables) 29 | # there is no expectations to be able to use DDB for test/dev 30 | 31 | configure :production do 32 | set :redishost, "redis-server" 33 | set :port, 4567 34 | set :yelbdbhost => "yelb-db" 35 | set :yelbdbport => 5432 36 | set :yelbddbrestaurants => ENV['YELB_DDB_RESTAURANTS'] 37 | set :yelbddbcache => ENV['YELB_DDB_CACHE'] 38 | set :awsregion => ENV['AWS_REGION'] 39 | end 40 | configure :test do 41 | set :redishost, "redis-server" 42 | set :port, 4567 43 | set :yelbdbhost => "yelb-db" 44 | set :yelbdbport => 5432 45 | set :yelbddbrestaurants => ENV['YELB_DDB_RESTAURANTS'] 46 | set :yelbddbcache => ENV['YELB_DDB_CACHE'] 47 | set :awsregion => ENV['AWS_REGION'] 48 | end 49 | configure :development do 50 | set :redishost, "localhost" 51 | set :port, 4567 52 | set :yelbdbhost => "localhost" 53 | set :yelbdbport => 5432 54 | set :yelbddbrestaurants => ENV['YELB_DDB_RESTAURANTS'] 55 | set :yelbddbcache => ENV['YELB_DDB_CACHE'] 56 | set :awsregion => ENV['AWS_REGION'] 57 | end 58 | configure :custom do 59 | set :redishost, ENV['REDIS_SERVER_ENDPOINT'] 60 | set :port, 4567 61 | set :yelbdbhost => ENV['YELB_DB_SERVER_ENDPOINT'] 62 | set :yelbdbport => 5432 63 | set :yelbddbrestaurants => ENV['YELB_DDB_RESTAURANTS'] 64 | set :yelbddbcache => ENV['YELB_DDB_CACHE'] 65 | set :awsregion => ENV['AWS_REGION'] 66 | 67 | end 68 | 69 | options "*" do 70 | response.headers["Allow"] = "HEAD,GET,PUT,DELETE,OPTIONS" 71 | 72 | # Needed for AngularJS 73 | response.headers["Access-Control-Allow-Headers"] = "X-Requested-With, X-HTTP-Method-Override, Content-Type, Cache-Control, Accept" 74 | 75 | halt HTTP_STATUS_OK 76 | end 77 | 78 | $yelbdbhost = settings.yelbdbhost 79 | $redishost = settings.redishost 80 | # the yelbddbcache, yelbdbrestaurants and the awsregion variables are only intended to use in the serverless scenario (DDB) 81 | if (settings.yelbddbcache != nil) then $yelbddbcache = settings.yelbddbcache end 82 | if (settings.yelbddbrestaurants != nil) then $yelbddbrestaurants = settings.yelbddbrestaurants end 83 | if (settings.awsregion != nil) then $awsregion = settings.awsregion end 84 | 85 | get '/api/pageviews' do 86 | headers 'Access-Control-Allow-Origin' => '*' 87 | headers 'Access-Control-Allow-Headers' => 'Authorization,Accepts,Content-Type,X-CSRF-Token,X-Requested-With' 88 | headers 'Access-Control-Allow-Methods' => 'GET,POST,PUT,DELETE,OPTIONS' 89 | content_type 'application/json' 90 | @pageviews = pageviews() 91 | end #get /api/pageviews 92 | 93 | get '/api/hostname' do 94 | headers 'Access-Control-Allow-Origin' => '*' 95 | headers 'Access-Control-Allow-Headers' => 'Authorization,Accepts,Content-Type,X-CSRF-Token,X-Requested-With' 96 | headers 'Access-Control-Allow-Methods' => 'GET,POST,PUT,DELETE,OPTIONS' 97 | content_type 'application/json' 98 | @hostname = hostname() 99 | end #get /api/hostname 100 | 101 | get '/api/getstats' do 102 | headers 'Access-Control-Allow-Origin' => '*' 103 | headers 'Access-Control-Allow-Headers' => 'Authorization,Accepts,Content-Type,X-CSRF-Token,X-Requested-With' 104 | headers 'Access-Control-Allow-Methods' => 'GET,POST,PUT,DELETE,OPTIONS' 105 | content_type 'application/json' 106 | @stats = getstats() 107 | end #get /api/getstats 108 | 109 | get '/api/getvotes' do 110 | headers 'Access-Control-Allow-Origin' => '*' 111 | headers 'Access-Control-Allow-Headers' => 'Authorization,Accepts,Content-Type,X-CSRF-Token,X-Requested-With' 112 | headers 'Access-Control-Allow-Methods' => 'GET,POST,PUT,DELETE,OPTIONS' 113 | content_type 'application/json' 114 | @votes = getvotes() 115 | end #get /api/getvotes 116 | 117 | get '/api/ihop' do 118 | headers 'Access-Control-Allow-Origin' => '*' 119 | headers 'Access-Control-Allow-Headers' => 'Authorization,Accepts,Content-Type,X-CSRF-Token,X-Requested-With' 120 | headers 'Access-Control-Allow-Methods' => 'GET,POST,PUT,DELETE,OPTIONS' 121 | @ihop = restaurantsupdate("ihop") 122 | end #get /api/ihop 123 | 124 | get '/api/chipotle' do 125 | headers 'Access-Control-Allow-Origin' => '*' 126 | headers 'Access-Control-Allow-Headers' => 'Authorization,Accepts,Content-Type,X-CSRF-Token,X-Requested-With' 127 | headers 'Access-Control-Allow-Methods' => 'GET,POST,PUT,DELETE,OPTIONS' 128 | @chipotle = restaurantsupdate("chipotle") 129 | end #get /api/chipotle 130 | 131 | get '/api/outback' do 132 | headers 'Access-Control-Allow-Origin' => '*' 133 | headers 'Access-Control-Allow-Headers' => 'Authorization,Accepts,Content-Type,X-CSRF-Token,X-Requested-With' 134 | headers 'Access-Control-Allow-Methods' => 'GET,POST,PUT,DELETE,OPTIONS' 135 | @outback = restaurantsupdate("outback") 136 | end #get /api/outback 137 | 138 | get '/api/bucadibeppo' do 139 | headers 'Access-Control-Allow-Origin' => '*' 140 | headers 'Access-Control-Allow-Headers' => 'Authorization,Accepts,Content-Type,X-CSRF-Token,X-Requested-With' 141 | headers 'Access-Control-Allow-Methods' => 'GET,POST,PUT,DELETE,OPTIONS' 142 | @bucadibeppo = restaurantsupdate("bucadibeppo") 143 | end #get /api/bucadibeppo 144 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | aws_enabled = can(var.features.eks.role_arns) ? ((length(var.features.eks.role_arns) > 0) ? true : false) : false 3 | aurora_enabled = try(var.features.aurora.enabled, false) ? true : false 4 | s3_enabled = try(var.features.s3.enabled, false) ? true : false 5 | ssm_enabled = try(var.features.eks.ssm_enabled, false) ? true : false 6 | spinnaker_storage = local.s3_enabled ? { 7 | "minio.enabled" = "false" 8 | "s3.enabled" = "true" 9 | "s3.bucket" = module.s3["enabled"].bucket.id 10 | "s3.region" = module.aws.region.name 11 | } : {} 12 | } 13 | 14 | ### security/policy 15 | resource "aws_iam_policy" "bake-ami" { 16 | name = join("-", [local.name, "bake-ami"]) 17 | policy = jsonencode({ 18 | Version = "2012-10-17" 19 | Statement = [{ 20 | Action = [ 21 | "iam:PassRole", 22 | "ec2:AttachVolume", 23 | "ec2:AuthorizeSecurityGroupIngress", 24 | "ec2:CopyImage", 25 | "ec2:CreateImage", 26 | "ec2:CreateKeypair", 27 | "ec2:CreateSecurityGroup", 28 | "ec2:CreateSnapshot", 29 | "ec2:CreateTags", 30 | "ec2:CreateVolume", 31 | "ec2:DeleteKeyPair", 32 | "ec2:DeleteSecurityGroup", 33 | "ec2:DeleteSnapshot", 34 | "ec2:DeleteVolume", 35 | "ec2:DeregisterImage", 36 | "ec2:Describe*", 37 | "ec2:DetachVolume", 38 | "ec2:GetPasswordData", 39 | "ec2:ModifyImageAttribute", 40 | "ec2:ModifyInstanceAttribute", 41 | "ec2:ModifySnapshotAttribute", 42 | "ec2:RegisterImage", 43 | "ec2:RunInstances", 44 | "ec2:StopInstances", 45 | "ec2:TerminateInstances", 46 | "ec2:RequestSpotInstances", 47 | "ec2:CancelSpotInstanceRequests", 48 | "ec2:DescribeSpotInstanceRequests", 49 | "ec2:DescribeSpotPriceHistory", 50 | ] 51 | Effect = "Allow" 52 | Resource = ["*"] 53 | }] 54 | }) 55 | } 56 | 57 | ### security/policy 58 | ### Allow spinnaker to assume cross AWS account iam roles 59 | resource "aws_iam_policy" "assume-roles" { 60 | for_each = local.aws_enabled ? toset(["enabled"]) : [] 61 | name = join("-", [local.name, "assume"]) 62 | policy = jsonencode({ 63 | Version = "2012-10-17" 64 | Statement = [{ 65 | Action = "sts:AssumeRole" 66 | Effect = "Allow" 67 | Resource = flatten([try(var.features.eks.role_arns, [])]) 68 | }] 69 | }) 70 | } 71 | 72 | ### security/policy 73 | module "irsa" { 74 | source = "Young-ook/eks/aws//modules/irsa" 75 | version = "2.0.4" 76 | tags = merge(local.default-tags, var.tags) 77 | name = "spinnaker" 78 | namespace = "spinnaker" 79 | serviceaccount = "default" 80 | oidc_url = module.eks.oidc.url 81 | oidc_arn = module.eks.oidc.arn 82 | policy_arns = flatten(concat([ 83 | aws_iam_policy.bake-ami.arn, 84 | ], 85 | local.aws_enabled ? [ 86 | aws_iam_policy.assume-roles["enabled"].arn, 87 | ] : [], 88 | local.s3_enabled ? [ 89 | module.s3["enabled"].policy_arns.read, 90 | module.s3["enabled"].policy_arns.write, 91 | ] : [], 92 | )) 93 | } 94 | 95 | ### application/kubernetes 96 | module "eks" { 97 | source = "Young-ook/eks/aws" 98 | version = "2.0.4" 99 | name = local.name 100 | tags = merge(local.default-tags, var.tags) 101 | subnets = try(var.features.vpc.subnets, []) 102 | enable_ssm = try(var.features.eks.ssm_enabled, local.default_eks_cluster["ssm_enabled"]) 103 | enabled_cluster_log_types = try(var.features.eks.cluster_logs, local.default_eks_cluster["cluster_logs"]) 104 | kubernetes_version = try(var.features.eks.version, local.default_eks_cluster["version"]) 105 | managed_node_groups = [local.default_eks_node_group] 106 | } 107 | 108 | ### database/aurora 109 | module "rds" { 110 | for_each = local.aurora_enabled ? toset(["enabled"]) : [] 111 | source = "Young-ook/aurora/aws" 112 | version = "2.0.0" 113 | name = local.name 114 | vpc = try(var.features.vpc.id, null) 115 | subnets = try(var.features.vpc.subnets, []) 116 | cidrs = try(var.features.vpc.cidrs, []) 117 | aurora_cluster = local.default_aurora_cluster 118 | aurora_instances = [local.default_aurora_instance] 119 | } 120 | 121 | ### staoge/s3 122 | module "s3" { 123 | for_each = local.s3_enabled ? toset(["enabled"]) : [] 124 | source = "Young-ook/sagemaker/aws//modules/s3" 125 | version = "0.3.4" 126 | name = local.name 127 | tags = var.tags 128 | force_destroy = try(var.features.s3.force_destroy, local.default_s3_bucket["force_destroy"]) 129 | versioning = try(var.features.s3.versioning, local.default_s3_bucket["versioning"]) 130 | } 131 | 132 | ### kubernetes-addons 133 | provider "helm" { 134 | alias = "spinnaker" 135 | kubernetes { 136 | host = module.eks.kubeauth.host 137 | token = module.eks.kubeauth.token 138 | cluster_ca_certificate = module.eks.kubeauth.ca 139 | } 140 | } 141 | 142 | module "ctl" { 143 | depends_on = [module.eks] 144 | source = "Young-ook/eks/aws//modules/eks-addons" 145 | version = "2.0.4" 146 | tags = merge(local.default-tags, var.tags) 147 | addons = [ 148 | { 149 | name = "aws-ebs-csi-driver" 150 | namespace = "kube-system" 151 | serviceaccount = "ebs-csi-controller-sa" 152 | eks_name = module.eks.cluster.name 153 | oidc = module.eks.oidc 154 | policy_arns = [ 155 | format("arn:%s:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy", module.aws.partition.partition), 156 | ] 157 | }, 158 | ] 159 | } 160 | 161 | module "helm" { 162 | depends_on = [module.ctl] 163 | providers = { helm = helm.spinnaker } 164 | source = "Young-ook/eks/aws//modules/helm-addons" 165 | version = "2.0.6" 166 | tags = merge(local.default-tags, var.tags) 167 | addons = [ 168 | { 169 | repository = "https://kubernetes-sigs.github.io/metrics-server/" 170 | name = "metrics-server" 171 | chart_name = "metrics-server" 172 | namespace = "kube-system" 173 | serviceaccount = "metrics-server" 174 | values = { 175 | "args[0]" = "--kubelet-preferred-address-types=InternalIP" 176 | } 177 | }, 178 | { 179 | repository = "https://prometheus-community.github.io/helm-charts" 180 | name = "prometheus" 181 | chart_name = "prometheus" 182 | namespace = "prometheus" 183 | serviceaccount = "prometheus" 184 | values = { 185 | "alertmanager.persistentVolume.storageClass" = "gp2" 186 | "server.persistentVolume.storageClass" = "gp2" 187 | } 188 | }, 189 | { 190 | repository = local.default_helm["repository"] 191 | name = local.default_helm["name"] 192 | chart_name = local.default_helm["chart_name"] 193 | chart_version = local.default_helm["chart_version"] 194 | namespace = local.default_helm["namespace"] 195 | timeout = local.default_helm["timeout"] 196 | dependency_update = local.default_helm["dependency_update"] 197 | cleanup_on_fail = local.default_helm["cleanup_on_fail"] 198 | create_namespace = true 199 | values = merge( 200 | local.spinnaker_storage, 201 | { 202 | "minio.enabled" = local.s3_enabled ? "false" : "true" 203 | "minio.rootUser" = "spinnakeradmin" 204 | "minio.rootPassword" = "spinnakeradmin" 205 | }) 206 | }, 207 | ] 208 | } 209 | -------------------------------------------------------------------------------- /modules/spinnaker-managed-ecs/main.tf: -------------------------------------------------------------------------------- 1 | ### aws partitions 2 | module "aws" { 3 | source = "Young-ook/spinnaker/aws//modules/aws-partitions" 4 | } 5 | 6 | ## features 7 | locals { 8 | node_groups_enabled = (var.node_groups != null ? ((length(var.node_groups) > 0) ? true : false) : false) 9 | } 10 | 11 | ### cluster 12 | resource "aws_ecs_cluster" "cp" { 13 | name = local.name 14 | tags = merge(local.default-tags, var.tags) 15 | 16 | dynamic "setting" { 17 | for_each = { 18 | containerInsights = var.container_insights_enabled ? "enabled" : "disabled" 19 | } 20 | content { 21 | name = setting.key 22 | value = setting.value 23 | } 24 | } 25 | 26 | depends_on = [ 27 | aws_ecs_capacity_provider.ng, 28 | ] 29 | } 30 | 31 | ### node groups (ng) 32 | ### security/policy 33 | resource "aws_iam_role" "ng" { 34 | count = local.node_groups_enabled ? 1 : 0 35 | name = format("%s-ng", local.name) 36 | tags = merge(local.default-tags, var.tags) 37 | assume_role_policy = jsonencode({ 38 | Statement = [{ 39 | Action = "sts:AssumeRole" 40 | Effect = "Allow" 41 | Principal = { 42 | Service = [format("ec2.%s", module.aws.partition.dns_suffix)] 43 | } 44 | }] 45 | Version = "2012-10-17" 46 | }) 47 | } 48 | 49 | resource "aws_iam_instance_profile" "ng" { 50 | count = local.node_groups_enabled ? 1 : 0 51 | name = format("%s-ng", local.name) 52 | role = aws_iam_role.ng.0.name 53 | } 54 | 55 | resource "aws_iam_role_policy_attachment" "ecs-ng" { 56 | count = local.node_groups_enabled ? 1 : 0 57 | policy_arn = format("arn:%s:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role", module.aws.partition.partition) 58 | role = aws_iam_role.ng.0.name 59 | } 60 | 61 | resource "aws_iam_role_policy_attachment" "ecr-read" { 62 | count = local.node_groups_enabled ? 1 : 0 63 | policy_arn = format("arn:%s:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", module.aws.partition.partition) 64 | role = aws_iam_role.ng.0.name 65 | } 66 | 67 | ### ecs-optimized linux 68 | data "aws_ami" "ecs" { 69 | for_each = { for ng in var.node_groups : ng.name => ng if local.node_groups_enabled } 70 | owners = ["amazon"] 71 | most_recent = true 72 | 73 | filter { 74 | name = "name" 75 | values = ["amzn2-ami-ecs-hvm-*"] 76 | } 77 | filter { 78 | name = "architecture" 79 | values = [length(regexall("ARM", lookup(each.value, "ami_type", "AL2_x86_64"))) > 0 ? "arm64" : "x86_64"] 80 | } 81 | } 82 | 83 | data "template_cloudinit_config" "boot" { 84 | for_each = { for ng in var.node_groups : ng.name => ng if local.node_groups_enabled } 85 | base64_encode = true 86 | gzip = false 87 | 88 | part { 89 | content_type = "text/x-shellscript" 90 | content = <<-EOT 91 | #!/bin/bash -v 92 | echo ECS_CLUSTER=${local.name} >> /etc/ecs/ecs.config 93 | start ecs 94 | EOT 95 | } 96 | } 97 | 98 | resource "aws_launch_template" "ng" { 99 | for_each = { for ng in var.node_groups : ng.name => ng if local.node_groups_enabled } 100 | name = format("ecs-%s", uuid()) 101 | tags = merge(local.default-tags, var.tags) 102 | image_id = data.aws_ami.ecs[each.key].id 103 | user_data = data.template_cloudinit_config.boot[each.key].rendered 104 | instance_type = lookup(each.value, "instance_type", "t3.medium") 105 | 106 | iam_instance_profile { 107 | arn = aws_iam_instance_profile.ng.0.arn 108 | } 109 | 110 | block_device_mappings { 111 | device_name = "/dev/xvda" 112 | ebs { 113 | volume_size = lookup(each.value, "disk_size", "30") 114 | volume_type = "gp2" 115 | delete_on_termination = true 116 | } 117 | } 118 | 119 | tag_specifications { 120 | resource_type = "instance" 121 | tags = merge(local.default-tags, var.tags) 122 | } 123 | 124 | lifecycle { 125 | create_before_destroy = true 126 | ignore_changes = [name] 127 | } 128 | } 129 | 130 | resource "aws_autoscaling_group" "ng" { 131 | for_each = { for ng in var.node_groups : ng.name => ng if local.node_groups_enabled } 132 | name = format("ecs-%s", uuid()) 133 | vpc_zone_identifier = var.subnets 134 | max_size = lookup(each.value, "max_size", 3) 135 | min_size = lookup(each.value, "min_size", 1) 136 | desired_capacity = lookup(each.value, "desired_size", 1) 137 | force_delete = true 138 | protect_from_scale_in = var.termination_protection 139 | termination_policies = ["Default"] 140 | enabled_metrics = [ 141 | "GroupMinSize", 142 | "GroupMaxSize", 143 | "GroupDesiredCapacity", 144 | "GroupInServiceInstances", 145 | "GroupPendingInstances", 146 | "GroupStandbyInstances", 147 | "GroupTerminatingInstances", 148 | "GroupTotalInstances", 149 | ] 150 | 151 | mixed_instances_policy { 152 | launch_template { 153 | launch_template_specification { 154 | launch_template_id = aws_launch_template.ng[each.key].id 155 | version = aws_launch_template.ng[each.key].latest_version 156 | } 157 | 158 | dynamic "override" { 159 | for_each = lookup(each.value, "launch_override", []) 160 | content { 161 | instance_type = lookup(override.value, "instance_type", null) 162 | weighted_capacity = lookup(override.value, "weighted_capacity", null) 163 | } 164 | } 165 | } 166 | 167 | dynamic "instances_distribution" { 168 | for_each = { for key, val in each.value : key => val if key == "instances_distribution" } 169 | content { 170 | on_demand_allocation_strategy = lookup(instances_distribution.value, "on_demand_allocation_strategy", null) 171 | on_demand_base_capacity = lookup(instances_distribution.value, "on_demand_base_capacity", null) 172 | on_demand_percentage_above_base_capacity = lookup(instances_distribution.value, "on_demand_percentage_above_base_capacity", null) 173 | spot_allocation_strategy = lookup(instances_distribution.value, "spot_allocation_strategy", null) 174 | spot_instance_pools = lookup(instances_distribution.value, "spot_instance_pools", null) 175 | spot_max_price = lookup(instances_distribution.value, "spot_max_price", null) 176 | } 177 | } 178 | } 179 | 180 | dynamic "tag" { 181 | for_each = local.ecs-tag 182 | content { 183 | key = tag.key 184 | value = tag.value 185 | propagate_at_launch = true 186 | } 187 | } 188 | 189 | lifecycle { 190 | create_before_destroy = true 191 | ignore_changes = [desired_capacity, name] 192 | } 193 | 194 | depends_on = [ 195 | aws_iam_role.ng, 196 | aws_iam_role_policy_attachment.ecs-ng, 197 | aws_iam_role_policy_attachment.ecr-read, 198 | aws_launch_template.ng, 199 | ] 200 | } 201 | 202 | #### cluster/capacity 203 | resource "aws_ecs_capacity_provider" "ng" { 204 | for_each = { for ng in var.node_groups : ng.name => ng if local.node_groups_enabled } 205 | name = each.key 206 | tags = merge(local.default-tags, var.tags) 207 | 208 | auto_scaling_group_provider { 209 | auto_scaling_group_arn = aws_autoscaling_group.ng[each.key].arn 210 | managed_termination_protection = var.termination_protection ? "ENABLED" : "DISABLED" 211 | 212 | managed_scaling { 213 | maximum_scaling_step_size = lookup(each.value, "max_scaling_step_size", null) 214 | minimum_scaling_step_size = lookup(each.value, "min_scaling_step_size", null) 215 | status = "ENABLED" 216 | target_capacity = lookup(each.value, "target_capacity", 100) 217 | } 218 | } 219 | } 220 | 221 | resource "aws_ecs_cluster_capacity_providers" "ng" { 222 | cluster_name = aws_ecs_cluster.cp.name 223 | capacity_providers = local.node_groups_enabled ? keys(aws_ecs_capacity_provider.ng) : ["FARGATE"] 224 | } 225 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/foundation/policy.aws-loadbalancer-controller.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "iam:CreateServiceLinkedRole", 8 | "ec2:DescribeAccountAttributes", 9 | "ec2:DescribeAddresses", 10 | "ec2:DescribeAvailabilityZones", 11 | "ec2:DescribeInternetGateways", 12 | "ec2:DescribeVpcs", 13 | "ec2:DescribeSubnets", 14 | "ec2:DescribeSecurityGroups", 15 | "ec2:DescribeInstances", 16 | "ec2:DescribeNetworkInterfaces", 17 | "ec2:DescribeTags", 18 | "ec2:GetCoipPoolUsage", 19 | "ec2:DescribeCoipPools", 20 | "elasticloadbalancing:DescribeLoadBalancers", 21 | "elasticloadbalancing:DescribeLoadBalancerAttributes", 22 | "elasticloadbalancing:DescribeListeners", 23 | "elasticloadbalancing:DescribeListenerCertificates", 24 | "elasticloadbalancing:DescribeSSLPolicies", 25 | "elasticloadbalancing:DescribeRules", 26 | "elasticloadbalancing:DescribeTargetGroups", 27 | "elasticloadbalancing:DescribeTargetGroupAttributes", 28 | "elasticloadbalancing:DescribeTargetHealth", 29 | "elasticloadbalancing:DescribeTags" 30 | ], 31 | "Resource": "*" 32 | }, 33 | { 34 | "Effect": "Allow", 35 | "Action": [ 36 | "cognito-idp:DescribeUserPoolClient", 37 | "acm:ListCertificates", 38 | "acm:DescribeCertificate", 39 | "iam:ListServerCertificates", 40 | "iam:GetServerCertificate", 41 | "waf-regional:GetWebACL", 42 | "waf-regional:GetWebACLForResource", 43 | "waf-regional:AssociateWebACL", 44 | "waf-regional:DisassociateWebACL", 45 | "wafv2:GetWebACL", 46 | "wafv2:GetWebACLForResource", 47 | "wafv2:AssociateWebACL", 48 | "wafv2:DisassociateWebACL", 49 | "shield:GetSubscriptionState", 50 | "shield:DescribeProtection", 51 | "shield:CreateProtection", 52 | "shield:DeleteProtection" 53 | ], 54 | "Resource": "*" 55 | }, 56 | { 57 | "Effect": "Allow", 58 | "Action": [ 59 | "ec2:AuthorizeSecurityGroupIngress", 60 | "ec2:RevokeSecurityGroupIngress" 61 | ], 62 | "Resource": "*" 63 | }, 64 | { 65 | "Effect": "Allow", 66 | "Action": [ 67 | "ec2:CreateSecurityGroup" 68 | ], 69 | "Resource": "*" 70 | }, 71 | { 72 | "Effect": "Allow", 73 | "Action": [ 74 | "ec2:CreateTags" 75 | ], 76 | "Resource": "arn:aws:ec2:*:*:security-group/*", 77 | "Condition": { 78 | "StringEquals": { 79 | "ec2:CreateAction": "CreateSecurityGroup" 80 | }, 81 | "Null": { 82 | "aws:RequestTag/elbv2.k8s.aws/cluster": "false" 83 | } 84 | } 85 | }, 86 | { 87 | "Effect": "Allow", 88 | "Action": [ 89 | "ec2:CreateTags", 90 | "ec2:DeleteTags" 91 | ], 92 | "Resource": "arn:aws:ec2:*:*:security-group/*", 93 | "Condition": { 94 | "Null": { 95 | "aws:RequestTag/elbv2.k8s.aws/cluster": "true", 96 | "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" 97 | } 98 | } 99 | }, 100 | { 101 | "Effect": "Allow", 102 | "Action": [ 103 | "ec2:AuthorizeSecurityGroupIngress", 104 | "ec2:RevokeSecurityGroupIngress", 105 | "ec2:DeleteSecurityGroup" 106 | ], 107 | "Resource": "*", 108 | "Condition": { 109 | "Null": { 110 | "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" 111 | } 112 | } 113 | }, 114 | { 115 | "Effect": "Allow", 116 | "Action": [ 117 | "elasticloadbalancing:CreateLoadBalancer", 118 | "elasticloadbalancing:CreateTargetGroup" 119 | ], 120 | "Resource": "*", 121 | "Condition": { 122 | "Null": { 123 | "aws:RequestTag/elbv2.k8s.aws/cluster": "false" 124 | } 125 | } 126 | }, 127 | { 128 | "Effect": "Allow", 129 | "Action": [ 130 | "elasticloadbalancing:CreateListener", 131 | "elasticloadbalancing:DeleteListener", 132 | "elasticloadbalancing:CreateRule", 133 | "elasticloadbalancing:DeleteRule" 134 | ], 135 | "Resource": "*" 136 | }, 137 | { 138 | "Effect": "Allow", 139 | "Action": [ 140 | "elasticloadbalancing:AddTags", 141 | "elasticloadbalancing:RemoveTags" 142 | ], 143 | "Resource": [ 144 | "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", 145 | "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", 146 | "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" 147 | ], 148 | "Condition": { 149 | "Null": { 150 | "aws:RequestTag/elbv2.k8s.aws/cluster": "true", 151 | "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" 152 | } 153 | } 154 | }, 155 | { 156 | "Effect": "Allow", 157 | "Action": [ 158 | "elasticloadbalancing:AddTags", 159 | "elasticloadbalancing:RemoveTags" 160 | ], 161 | "Resource": [ 162 | "arn:aws:elasticloadbalancing:*:*:listener/net/*/*/*", 163 | "arn:aws:elasticloadbalancing:*:*:listener/app/*/*/*", 164 | "arn:aws:elasticloadbalancing:*:*:listener-rule/net/*/*/*", 165 | "arn:aws:elasticloadbalancing:*:*:listener-rule/app/*/*/*" 166 | ] 167 | }, 168 | { 169 | "Effect": "Allow", 170 | "Action": [ 171 | "elasticloadbalancing:ModifyLoadBalancerAttributes", 172 | "elasticloadbalancing:SetIpAddressType", 173 | "elasticloadbalancing:SetSecurityGroups", 174 | "elasticloadbalancing:SetSubnets", 175 | "elasticloadbalancing:DeleteLoadBalancer", 176 | "elasticloadbalancing:ModifyTargetGroup", 177 | "elasticloadbalancing:ModifyTargetGroupAttributes", 178 | "elasticloadbalancing:DeleteTargetGroup" 179 | ], 180 | "Resource": "*", 181 | "Condition": { 182 | "Null": { 183 | "aws:ResourceTag/elbv2.k8s.aws/cluster": "false" 184 | } 185 | } 186 | }, 187 | { 188 | "Effect": "Allow", 189 | "Action": [ 190 | "elasticloadbalancing:RegisterTargets", 191 | "elasticloadbalancing:DeregisterTargets" 192 | ], 193 | "Resource": "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*" 194 | }, 195 | { 196 | "Effect": "Allow", 197 | "Action": [ 198 | "elasticloadbalancing:SetWebAcl", 199 | "elasticloadbalancing:ModifyListener", 200 | "elasticloadbalancing:AddListenerCertificates", 201 | "elasticloadbalancing:RemoveListenerCertificates", 202 | "elasticloadbalancing:ModifyRule" 203 | ], 204 | "Resource": "*" 205 | } 206 | ] 207 | } 208 | -------------------------------------------------------------------------------- /examples/aws-modernization-with-spinnaker/application/yelbv2/manifests/1.app-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: '${#currentStage()[''context''][''namespaceOverride'']}' 5 | labels: 6 | mesh: yelb-mesh 7 | appmesh.k8s.aws/sidecarInjectorWebhook: enabled 8 | --- 9 | apiVersion: v1 10 | kind: Service 11 | metadata: 12 | name: redis-server 13 | labels: 14 | app: redis-server 15 | tier: cache 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | - port: 6379 20 | selector: 21 | app: redis-server 22 | tier: cache 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: yelb-db 28 | labels: 29 | app: yelb-db 30 | tier: backenddb 31 | spec: 32 | type: ClusterIP 33 | ports: 34 | - port: 5432 35 | selector: 36 | app: yelb-db 37 | tier: backenddb 38 | --- 39 | apiVersion: v1 40 | kind: Service 41 | metadata: 42 | name: yelb-appserver 43 | labels: 44 | app: yelb-appserver 45 | tier: middletier 46 | spec: 47 | type: ClusterIP 48 | ports: 49 | - port: 4567 50 | selector: 51 | app: yelb-appserver 52 | tier: middletier 53 | --- 54 | apiVersion: v1 55 | kind: Service 56 | metadata: 57 | name: yelb-ui 58 | labels: 59 | app: yelb-ui 60 | tier: frontend 61 | spec: 62 | type: NodePort 63 | ports: 64 | - port: 80 65 | protocol: TCP 66 | selector: 67 | app: yelb-ui 68 | tier: frontend 69 | --- 70 | apiVersion: networking.k8s.io/v1 71 | kind: Ingress 72 | metadata: 73 | name: yelb-ingress 74 | annotations: 75 | kubernetes.io/ingress.class: alb 76 | alb.ingress.kubernetes.io/scheme: internet-facing 77 | alb.ingress.kubernetes.io/target-type: ip 78 | spec: 79 | rules: 80 | - http: 81 | paths: 82 | - path: / 83 | pathType: Prefix 84 | backend: 85 | service: 86 | name: yelb-ui 87 | port: 88 | number: 80 89 | --- 90 | apiVersion: apps/v1 91 | kind: Deployment 92 | metadata: 93 | name: yelb-ui 94 | spec: 95 | replicas: 1 96 | selector: 97 | matchLabels: 98 | app: yelb-ui 99 | tier: frontend 100 | template: 101 | metadata: 102 | labels: 103 | app: yelb-ui 104 | tier: frontend 105 | spec: 106 | containers: 107 | - name: yelb-ui 108 | image: mreferre/yelb-ui:0.7 109 | ports: 110 | - containerPort: 80 111 | --- 112 | apiVersion: apps/v1 113 | kind: Deployment 114 | metadata: 115 | name: redis-server 116 | spec: 117 | selector: 118 | matchLabels: 119 | app: redis-server 120 | tier: cache 121 | replicas: 1 122 | template: 123 | metadata: 124 | labels: 125 | app: redis-server 126 | tier: cache 127 | spec: 128 | containers: 129 | - name: redis-server 130 | image: redis:4.0.2 131 | ports: 132 | - containerPort: 6379 133 | --- 134 | apiVersion: apps/v1 135 | kind: Deployment 136 | metadata: 137 | name: yelb-db 138 | spec: 139 | replicas: 1 140 | selector: 141 | matchLabels: 142 | app: yelb-db 143 | tier: backenddb 144 | template: 145 | metadata: 146 | labels: 147 | app: yelb-db 148 | tier: backenddb 149 | spec: 150 | containers: 151 | - name: yelb-db 152 | image: mreferre/yelb-db:0.5 153 | ports: 154 | - containerPort: 5432 155 | --- 156 | apiVersion: apps/v1 157 | kind: Deployment 158 | metadata: 159 | name: yelb-appserver 160 | spec: 161 | replicas: 1 162 | selector: 163 | matchLabels: 164 | app: yelb-appserver 165 | tier: middletier 166 | template: 167 | metadata: 168 | labels: 169 | app: yelb-appserver 170 | tier: middletier 171 | spec: 172 | containers: 173 | - name: yelb-appserver 174 | image: mreferre/yelb-appserver:0.5 175 | ports: 176 | - containerPort: 4567 177 | --- 178 | apiVersion: appmesh.k8s.aws/v1beta2 179 | kind: Mesh 180 | metadata: 181 | name: yelb-mesh 182 | spec: 183 | namespaceSelector: 184 | matchLabels: 185 | mesh: yelb-mesh 186 | --- 187 | apiVersion: appmesh.k8s.aws/v1beta2 188 | kind: VirtualNode 189 | metadata: 190 | name: yelb-db 191 | spec: 192 | awsName: yelb-db-virtual-node 193 | podSelector: 194 | matchLabels: 195 | app: yelb-db 196 | listeners: 197 | - portMapping: 198 | port: 5432 199 | protocol: tcp 200 | serviceDiscovery: 201 | dns: 202 | hostname: >- 203 | yelb-db.${#currentStage()['context']['namespaceOverride']}.svc.cluster.local 204 | --- 205 | apiVersion: appmesh.k8s.aws/v1beta2 206 | kind: VirtualService 207 | metadata: 208 | name: yelb-db 209 | spec: 210 | awsName: yelb-db 211 | provider: 212 | virtualNode: 213 | virtualNodeRef: 214 | name: yelb-db 215 | --- 216 | apiVersion: appmesh.k8s.aws/v1beta2 217 | kind: VirtualNode 218 | metadata: 219 | name: redis-server 220 | spec: 221 | awsName: redis-server-virtual-node 222 | podSelector: 223 | matchLabels: 224 | app: redis-server 225 | listeners: 226 | - portMapping: 227 | port: 6379 228 | protocol: tcp 229 | serviceDiscovery: 230 | dns: 231 | hostname: >- 232 | redis-server.${#currentStage()['context']['namespaceOverride']}.svc.cluster.local 233 | --- 234 | apiVersion: appmesh.k8s.aws/v1beta2 235 | kind: VirtualService 236 | metadata: 237 | name: redis-server 238 | spec: 239 | awsName: redis-server 240 | provider: 241 | virtualNode: 242 | virtualNodeRef: 243 | name: redis-server 244 | --- 245 | apiVersion: appmesh.k8s.aws/v1beta2 246 | kind: VirtualNode 247 | metadata: 248 | name: yelb-ui 249 | spec: 250 | awsName: yelb-ui-virtual-node 251 | podSelector: 252 | matchLabels: 253 | app: yelb-ui 254 | listeners: 255 | - portMapping: 256 | port: 80 257 | protocol: http 258 | serviceDiscovery: 259 | dns: 260 | hostname: >- 261 | yelb-ui.${#currentStage()['context']['namespaceOverride']}.svc.cluster.local 262 | backends: 263 | - virtualService: 264 | virtualServiceRef: 265 | name: yelb-appserver 266 | --- 267 | apiVersion: appmesh.k8s.aws/v1beta2 268 | kind: VirtualService 269 | metadata: 270 | name: yelb-ui 271 | spec: 272 | awsName: yelb-ui 273 | provider: 274 | virtualNode: 275 | virtualNodeRef: 276 | name: yelb-ui 277 | --- 278 | apiVersion: appmesh.k8s.aws/v1beta2 279 | kind: VirtualNode 280 | metadata: 281 | name: yelb-appserver 282 | spec: 283 | awsName: yelb-appserver-virtual-node 284 | podSelector: 285 | matchLabels: 286 | app: yelb-appserver 287 | listeners: 288 | - portMapping: 289 | port: 4567 290 | protocol: http 291 | serviceDiscovery: 292 | dns: 293 | hostname: >- 294 | yelb-appserver.${#currentStage()['context']['namespaceOverride']}.svc.cluster.local 295 | backends: 296 | - virtualService: 297 | virtualServiceRef: 298 | name: yelb-db 299 | - virtualService: 300 | virtualServiceRef: 301 | name: redis-server 302 | --- 303 | apiVersion: appmesh.k8s.aws/v1beta2 304 | kind: VirtualRouter 305 | metadata: 306 | name: yelb-appserver 307 | spec: 308 | awsName: yelb-appserver-virtual-router 309 | listeners: 310 | - portMapping: 311 | port: 4567 312 | protocol: http 313 | routes: 314 | - name: route-to-yelb-appserver 315 | httpRoute: 316 | match: 317 | prefix: / 318 | action: 319 | weightedTargets: 320 | - virtualNodeRef: 321 | name: yelb-appserver 322 | weight: 1 323 | retryPolicy: 324 | maxRetries: 2 325 | perRetryTimeout: 326 | unit: ms 327 | value: 2000 328 | httpRetryEvents: 329 | - server-error 330 | - client-error 331 | - gateway-error 332 | --- 333 | apiVersion: appmesh.k8s.aws/v1beta2 334 | kind: VirtualService 335 | metadata: 336 | name: yelb-appserver 337 | spec: 338 | awsName: yelb-appserver 339 | provider: 340 | virtualRouter: 341 | virtualRouterRef: 342 | name: yelb-appserver 343 | --------------------------------------------------------------------------------