├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── assets └── Demostack_overview.webp ├── backend.hcl.example ├── backend.tf ├── configure_eks.sh ├── main.tf ├── modules ├── ad.tf.bkp ├── aws-iam.tf.bkp ├── aws.tf ├── boundary-lb.tf ├── consul-lb.tf ├── dns.tf ├── ebs.tf ├── eks.tf ├── f5.tf.bkp ├── f5m.tf.bkp ├── fabio-lb.tf ├── helper.sh.bkp ├── nomad-lb.tf ├── outputs.tf ├── server.tf ├── servers-ag.tf.bkp ├── templates │ ├── cloudtrail.tf.bkp │ ├── cts │ │ └── f5.tpl │ ├── policies │ │ └── assume-role.json │ ├── server │ │ ├── consul.sh │ │ ├── nomad.sh │ │ └── vault.sh │ ├── shared │ │ ├── base.sh │ │ └── docker.sh │ ├── windows │ │ └── init.ps1 │ └── workers │ │ ├── consul.sh │ │ ├── ebs_volumes.sh │ │ ├── nomad.sh │ │ └── user.sh ├── tfc.tf ├── tls.tf ├── traefik.tf ├── variables.tf ├── vault-lb.tf ├── windows.tf └── workers.tf ├── outputs.tf ├── terraform.tfvars.example └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | .terraform/ 3 | **/.terraform/* 4 | 5 | *.terraform.lock.hcl 6 | 7 | # .tfstate files 8 | *.tfstate 9 | *.tfstate.* 10 | 11 | # .tfvars files 12 | *.tfvars 13 | 14 | backend.hcl 15 | 16 | */.vscode/* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: login init demostack apply 2 | .PHONY: all doormat_creds doormat_aws deploy destroy console 3 | TFC_ORG = emea-se-playground-2019 4 | WORKSPACE_DEMOSTACK = GUY-HCP-Demostack-AWS 5 | DOORMAT_AWS_ACCOUNT = aws_guy_test 6 | VARIABLE_SET_ID = varset-BDhuaxrwsjowYcFX 7 | login: 8 | doormat login 9 | init: 10 | terraform init 11 | demostack: 12 | doormat aws --account $(DOORMAT_AWS_ACCOUNT) tf-push --local 13 | varset: 14 | doormat aws tf-push variable-set --account $(DOORMAT_AWS_ACCOUNT) --id $(VARIABLE_SET_ID) 15 | apply: 16 | terraform init 17 | terraform plan 18 | terraform apply 19 | destroy: 20 | terraform destroy -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # terraform-aws-demostack 2 | Meant as a reference (and a demo enviroment) and not for production use, 3 | this project configures Nomad, Vault, and Consul (all from Hashicorp) on a variable number of servers and workers. 4 | 5 | Set up are Nomad jobs, Vault configurations, and Consul queries. 6 | 7 | ## Solution Diagram 8 | ![Solution Diagram](./assets/Demostack_overview.webp) 9 | 10 | ## Dependencies 11 | 12 | 13 | ### TLS 14 | 15 | 16 | 17 | ## Consul 18 | 19 | 20 | 21 | ## Vault 22 | 23 | 24 | 25 | ## Nomad 26 | 27 | 28 | 29 | ## Troubleshooting 30 | To begin debugging, check the cloud-init output: 31 | 32 | ```shell 33 | $ sudo tail -f /var/log/cloud-init-output.log 34 | ``` 35 | -------------------------------------------------------------------------------- /assets/Demostack_overview.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GuyBarros/terraform-aws-demostack/a3960f3432d34263aeba2651d4a690827e957920/assets/Demostack_overview.webp -------------------------------------------------------------------------------- /backend.hcl.example: -------------------------------------------------------------------------------- 1 | hostname = "app.terraform.io" 2 | organization = "" 3 | 4 | workspaces { 5 | name = """ 6 | } -------------------------------------------------------------------------------- /backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "remote" { 3 | hostname = "app.terraform.io" 4 | organization = "emea-se-playground-2019" 5 | workspaces { 6 | name = "Guy-AWS-Demostack" 7 | } 8 | } 9 | } -------------------------------------------------------------------------------- /configure_eks.sh: -------------------------------------------------------------------------------- 1 | aws eks --region eu-west-2 update-kubeconfig --name guystack1-eks 2 | # arn:aws:eks:eu-west-2:958215610051:cluster/guystack1-eks 3 | 4 | kubectl config get-contexts -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | 2 | 3 | provider "aws" { 4 | # region = var.primary_region 5 | # alias = "primary" 6 | # default_tags { 7 | # tags = local.common_tags 8 | # } 9 | } 10 | 11 | 12 | 13 | module "cluster" { 14 | source = "./modules" 15 | for_each = var.clusters 16 | owner = var.owner 17 | region = each.value.region 18 | namespace = each.value.namespace 19 | public_key = var.public_key 20 | servers = var.servers 21 | workers = var.workers 22 | vaultlicense = var.vaultlicense 23 | consullicense = var.consullicense 24 | nomadlicense = var.nomadlicense 25 | enterprise = var.enterprise 26 | fabio_url = var.fabio_url 27 | cni_version = var.cni_version 28 | created-by = var.created-by 29 | sleep-at-night = var.sleep-at-night 30 | TTL = var.TTL 31 | vpc_cidr_block = var.vpc_cidr_block 32 | cidr_blocks = var.cidr_blocks 33 | instance_type_server = var.instance_type_server 34 | instance_type_worker = var.instance_type_worker 35 | zone_id = var.zone_id 36 | run_nomad_jobs = var.run_nomad_jobs 37 | host_access_ip = var.host_access_ip 38 | primary_datacenter = each.value.namespace 39 | 40 | # EMEA-SE-PLAYGROUND 41 | consul_join_tag_value = "${each.value.namespace}-${random_id.consul_join_tag_value.hex}" 42 | consul_gossip_key = random_id.consul_gossip_key.hex 43 | #consul_master_token = data.terraform_remote_state.tls.outputs.consul_master_token 44 | #consul_master_token = "5fder467-5gf5-8ju7-1q2w-y6gj78kl9gfd" 45 | consul_master_token = uuid() 46 | nomad_gossip_key = random_id.nomad_gossip_key.hex 47 | 48 | #F5 Creds 49 | f5_username = var.f5_username 50 | f5_password = var.f5_password 51 | } 52 | 53 | # Consul gossip encryption key 54 | resource "random_id" "consul_gossip_key" { 55 | byte_length = 16 56 | } 57 | 58 | # Consul master token 59 | resource "random_id" "consul_master_token" { 60 | byte_length = 16 61 | } 62 | 63 | # Consul join key 64 | resource "random_id" "consul_join_tag_value" { 65 | byte_length = 16 66 | } 67 | 68 | # Nomad gossip encryption key 69 | resource "random_id" "nomad_gossip_key" { 70 | byte_length = 16 71 | } -------------------------------------------------------------------------------- /modules/ad.tf.bkp: -------------------------------------------------------------------------------- 1 | resource "aws_directory_service_directory" "demostack" { 2 | name = "hashidemos.io" 3 | password = "SuperSecretPassw0rd" 4 | edition = "Standard" 5 | type = "MicrosoftAD" 6 | 7 | vpc_settings { 8 | vpc_id = aws_vpc.demostack.id 9 | subnet_ids = aws_subnet.demostack.*.id 10 | } 11 | 12 | tags = merge(local.common_tags ,{ 13 | ConsulJoin = "${var.consul_join_tag_value}" , 14 | Purpose = "demostack" , 15 | function = "ActiveDirectory" 16 | Name = "${var.namespace}-AD" , 17 | } 18 | ) 19 | 20 | } -------------------------------------------------------------------------------- /modules/aws-iam.tf.bkp: -------------------------------------------------------------------------------- 1 | ############################## Boundary ########################## 2 | 3 | data "aws_caller_identity" "current" {} 4 | 5 | data "aws_region" "current" {} 6 | 7 | data "aws_iam_policy" "demo_user_permissions_boundary" { 8 | name = "DemoUser" 9 | } 10 | 11 | locals { 12 | my_email = split("/", data.aws_caller_identity.current.arn)[2] 13 | } 14 | 15 | # Create the user to be used in Boundary for dynamic host discovery. Then attach the policy to the user. 16 | resource "aws_iam_user" "boundary_dynamic_host_catalog" { 17 | name = "${var.namespace}-bdhc" 18 | permissions_boundary = data.aws_iam_policy.demo_user_permissions_boundary.arn 19 | force_destroy = true 20 | } 21 | 22 | resource "aws_iam_user_policy" "boundary_dynamic_host_catalog" { 23 | user = aws_iam_user.boundary_dynamic_host_catalog.name 24 | policy = data.aws_iam_policy.demo_user_permissions_boundary.policy 25 | name = "DemoUserInlinePolicy" 26 | } 27 | 28 | # Generate some secrets to pass in to the Boundary configuration. 29 | # WARNING: These secrets are not encrypted in the state file. Ensure that you do not commit your state file! 30 | resource "aws_iam_access_key" "boundary_dynamic_host_catalog" { 31 | user = aws_iam_user.boundary_dynamic_host_catalog.name 32 | } 33 | 34 | ############################## Vault ########################## 35 | 36 | 37 | resource "aws_iam_user" "vault_mount_user" { 38 | name = "demo-${var.namespace}-vault-user" 39 | permissions_boundary = data.aws_iam_policy.demo_user_permissions_boundary.arn 40 | force_destroy = true 41 | } 42 | 43 | resource "aws_iam_user_policy" "vault_mount_user" { 44 | user = aws_iam_user.vault_mount_user.name 45 | policy = data.aws_iam_policy.demo_user_permissions_boundary.policy 46 | name = "DemoUserInlinePolicy" 47 | } 48 | 49 | resource "aws_iam_access_key" "vault_mount_user" { 50 | user = aws_iam_user.vault_mount_user.name 51 | } 52 | 53 | # Vault Mount AWS Role Setup 54 | 55 | data "aws_iam_policy_document" "vault_dynamic_iam_user_policy" { 56 | statement { 57 | sid = "VaultDemoUserDescribeEC2Regions" 58 | actions = ["ec2:DescribeRegions"] 59 | resources = ["*"] 60 | } 61 | } 62 | 63 | data "aws_iam_role" "vault_target_iam_role" { 64 | name = "vault-assumed-role-credentials-demo" 65 | } 66 | 67 | ############################## Terraform Platform ########################## 68 | 69 | -------------------------------------------------------------------------------- /modules/aws.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.11.0" 3 | } 4 | 5 | 6 | //Getting the Domaing name 7 | data "aws_route53_zone" "fdqn" { 8 | zone_id = var.zone_id 9 | } 10 | 11 | 12 | data "aws_ami" "ubuntu" { 13 | most_recent = true 14 | filter { 15 | name = "name" 16 | # values = ["ubuntu/images/*ubuntu-jammy-22.04-amd64-server-*"] 17 | values = ["ubuntu/images/*ubuntu-jammy-22.04-arm64-server-*"] 18 | } 19 | 20 | filter { 21 | name = "virtualization-type" 22 | values = ["hvm"] 23 | } 24 | 25 | owners = ["099720109477"] # Canonical 26 | } 27 | 28 | resource "aws_vpc" "demostack" { 29 | cidr_block = var.vpc_cidr_block 30 | enable_dns_hostnames = true 31 | enable_dns_support = true 32 | 33 | tags = local.common_tags 34 | 35 | } 36 | 37 | resource "aws_internet_gateway" "demostack" { 38 | vpc_id = aws_vpc.demostack.id 39 | 40 | tags = local.common_tags 41 | } 42 | 43 | resource "aws_route" "internet_access" { 44 | route_table_id = aws_vpc.demostack.main_route_table_id 45 | destination_cidr_block = "0.0.0.0/0" 46 | gateway_id = aws_internet_gateway.demostack.id 47 | 48 | } 49 | 50 | data "aws_availability_zones" "available" {} 51 | 52 | resource "aws_subnet" "demostack" { 53 | count = length(var.cidr_blocks) 54 | vpc_id = aws_vpc.demostack.id 55 | availability_zone = data.aws_availability_zones.available.names[count.index] 56 | cidr_block = var.cidr_blocks[count.index] 57 | map_public_ip_on_launch = true 58 | 59 | tags = local.common_tags 60 | } 61 | 62 | 63 | 64 | resource "aws_security_group" "demostack" { 65 | name_prefix = var.namespace 66 | vpc_id = aws_vpc.demostack.id 67 | 68 | tags = local.common_tags 69 | #Allow internal communication between nodes 70 | ingress { 71 | from_port = -1 72 | to_port = -1 73 | protocol = -1 74 | } 75 | 76 | ingress { 77 | from_port = 4000 78 | to_port = 32000 79 | protocol = "tcp" 80 | cidr_blocks = ["10.1.0.0/18"] 81 | } 82 | 83 | ingress { 84 | from_port = 4000 85 | to_port = 32000 86 | protocol = "udp" 87 | cidr_blocks = ["10.1.0.0/18"] 88 | } 89 | 90 | 91 | ingress { 92 | from_port = 4000 93 | to_port = 32000 94 | protocol = "tcp" 95 | cidr_blocks = ["10.2.0.0/18"] 96 | } 97 | 98 | ingress { 99 | from_port = 4000 100 | to_port = 32000 101 | protocol = "udp" 102 | cidr_blocks = ["10.2.0.0/18"] 103 | } 104 | 105 | # SSH access if host_access_ip has CIDR blocks 106 | dynamic "ingress" { 107 | for_each = var.host_access_ip 108 | content { 109 | from_port = 22 110 | to_port = 22 111 | protocol = "tcp" 112 | cidr_blocks = [ingress.value] 113 | } 114 | } 115 | 116 | # RDP access if host_access_ip has CIDR blocks 117 | dynamic "ingress" { 118 | for_each = var.host_access_ip 119 | content { 120 | from_port = 3389 121 | to_port = 3389 122 | protocol = "tcp" 123 | cidr_blocks = [ingress.value] 124 | } 125 | } 126 | 127 | 128 | #HTTP 129 | dynamic "ingress" { 130 | for_each = var.host_access_ip 131 | content { 132 | from_port = 80 133 | to_port = 80 134 | protocol = "tcp" 135 | cidr_blocks = [ingress.value] 136 | } 137 | } 138 | 139 | #Demostack LDAP 140 | dynamic "ingress" { 141 | for_each = var.host_access_ip 142 | content { 143 | from_port = 389 144 | to_port = 389 145 | protocol = "tcp" 146 | cidr_blocks = [ingress.value] 147 | } 148 | } 149 | 150 | 151 | #Demostack HTTPS 152 | dynamic "ingress" { 153 | for_each = var.host_access_ip 154 | content { 155 | from_port = 443 156 | to_port = 443 157 | protocol = "tcp" 158 | cidr_blocks = [ingress.value] 159 | } 160 | } 161 | 162 | #Grafana 163 | dynamic "ingress" { 164 | for_each = var.host_access_ip 165 | content { 166 | from_port = 1521 167 | to_port = 1521 168 | protocol = "tcp" 169 | cidr_blocks = [ingress.value] 170 | } 171 | } 172 | 173 | #Grafana 174 | dynamic "ingress" { 175 | for_each = var.host_access_ip 176 | content { 177 | from_port = 3000 178 | to_port = 3000 179 | protocol = "tcp" 180 | cidr_blocks = [ingress.value] 181 | } 182 | } 183 | 184 | #Demostack Postgres + pgadmin 185 | dynamic "ingress" { 186 | for_each = var.host_access_ip 187 | content { 188 | from_port = 5000 189 | to_port = 5500 190 | protocol = "tcp" 191 | cidr_blocks = [ingress.value] 192 | # cidr_blocks = flatten([ingress.value,data.tfe_ip_ranges.addresses.api]) 193 | } 194 | } 195 | 196 | #Consul and Vault and Boundary ports 197 | dynamic "ingress" { 198 | for_each = var.host_access_ip 199 | content { 200 | from_port = 8000 201 | to_port = 9300 202 | protocol = "tcp" 203 | cidr_blocks = [ingress.value] 204 | # cidr_blocks = flatten([ingress.value,data.tfe_ip_ranges.addresses.api]) 205 | } 206 | } 207 | 208 | #Fabio Ports 209 | dynamic "ingress" { 210 | for_each = var.host_access_ip 211 | content { 212 | from_port = 9998 213 | to_port = 9999 214 | protocol = "tcp" 215 | cidr_blocks = [ingress.value] 216 | # cidr_blocks = flatten([ingress.value,data.tfe_ip_ranges.addresses.api]) 217 | } 218 | } 219 | 220 | #Nomad 221 | dynamic "ingress" { 222 | for_each = var.host_access_ip 223 | content { 224 | from_port = 3000 225 | to_port = 4999 226 | protocol = "tcp" 227 | cidr_blocks = [ingress.value] 228 | # cidr_blocks = flatten([ingress.value,data.tfe_ip_ranges.addresses.api]) 229 | } 230 | } 231 | 232 | #More nomad ports & Boundary 233 | 234 | dynamic "ingress" { 235 | for_each = var.host_access_ip 236 | content { 237 | from_port = 20000 238 | to_port = 65535 239 | protocol = "tcp" 240 | cidr_blocks = [ingress.value] 241 | # cidr_blocks = flatten([ingress.value,data.tfe_ip_ranges.addresses.api]) 242 | } 243 | } 244 | 245 | egress { 246 | from_port = 0 247 | to_port = 0 248 | protocol = "-1" 249 | cidr_blocks = ["0.0.0.0/0"] 250 | } 251 | 252 | } 253 | 254 | resource "aws_key_pair" "demostack" { 255 | key_name = var.namespace 256 | public_key = var.public_key 257 | 258 | tags = local.common_tags 259 | } 260 | 261 | resource "aws_iam_instance_profile" "consul-join" { 262 | name = "${var.namespace}-consul-join-instance-profile" 263 | role = aws_iam_role.consul-join.name 264 | tags = local.common_tags 265 | 266 | } 267 | 268 | resource "aws_kms_key" "demostackVaultKeys" { 269 | description = "KMS for the Consul Demo Vault" 270 | deletion_window_in_days = 10 271 | 272 | tags = local.common_tags 273 | } 274 | 275 | resource "aws_iam_policy" "consul-join" { 276 | name = "${var.namespace}-consul-join-iam-policy" 277 | description = "Allows Consul nodes to describe instances for joining." 278 | 279 | policy = data.aws_iam_policy_document.vault-server.json 280 | 281 | tags = local.common_tags 282 | } 283 | 284 | 285 | resource "aws_iam_role" "consul-join" { 286 | name = "${var.namespace}-consul-join-role" 287 | assume_role_policy = file("${path.module}/templates/policies/assume-role.json") 288 | 289 | tags = local.common_tags 290 | } 291 | 292 | resource "aws_iam_policy_attachment" "consul-join" { 293 | name = "${var.namespace}-consul-join-policy-attach" 294 | roles = [aws_iam_role.consul-join.name] 295 | policy_arn = aws_iam_policy.consul-join.arn 296 | 297 | } 298 | 299 | 300 | data "aws_iam_policy_document" "vault-server" { 301 | statement { 302 | sid = "VaultKMSUnseal" 303 | effect = "Allow" 304 | 305 | actions = [ 306 | "kms:Encrypt", 307 | "kms:Decrypt", 308 | "kms:DescribeKey", 309 | ] 310 | 311 | resources = [aws_kms_key.demostackVaultKeys.arn] 312 | } 313 | 314 | statement { 315 | effect = "Allow" 316 | 317 | actions = [ 318 | "ec2:DescribeInstances", 319 | "iam:PassRole", 320 | "iam:ListRoles", 321 | "cloudwatch:PutMetricData", 322 | "ds:DescribeDirectories", 323 | "ec2:DescribeInstanceStatus", 324 | "logs:*", 325 | "ec2messages:*", 326 | "ec2:DescribeInstances", 327 | "ec2:DescribeTags", 328 | "ec2:DescribeVolumes", 329 | "ec2:AttachVolume", 330 | "ec2:DetachVolume", 331 | "autoscaling:DescribeAutoScalingGroups", 332 | ] 333 | 334 | resources = ["*"] 335 | } 336 | 337 | } 338 | -------------------------------------------------------------------------------- /modules/boundary-lb.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_lb" "boundary-controller" { 3 | name = "${var.namespace}-boundary-ctrl" 4 | load_balancer_type = "network" 5 | internal = false 6 | subnets = aws_subnet.demostack.*.id 7 | 8 | tags = local.common_tags 9 | } 10 | 11 | resource "aws_lb_target_group" "boundary-controller" { 12 | name = "${var.namespace}-boundary-ctrl" 13 | port = 9200 14 | protocol = "TCP" 15 | vpc_id = aws_vpc.demostack.id 16 | 17 | stickiness { 18 | enabled = true 19 | type = "source_ip" 20 | } 21 | tags = local.common_tags 22 | } 23 | 24 | resource "aws_lb_target_group_attachment" "boundary-controller-servers" { 25 | count = var.servers 26 | target_group_arn = aws_lb_target_group.boundary-controller.arn 27 | target_id = aws_instance.servers[count.index].id 28 | port = 9200 29 | } 30 | 31 | resource "aws_lb_target_group_attachment" "boundary-controller-workers" { 32 | count = var.workers 33 | target_group_arn = aws_lb_target_group.boundary-controller.arn 34 | target_id = aws_instance.workers[count.index].id 35 | port = 9200 36 | } 37 | 38 | 39 | resource "aws_lb_listener" "boundary-controller" { 40 | load_balancer_arn = aws_lb.boundary-controller.arn 41 | port = "9200" 42 | protocol = "TCP" 43 | 44 | default_action { 45 | type = "forward" 46 | target_group_arn = aws_lb_target_group.boundary-controller.arn 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /modules/consul-lb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_alb" "consul" { 2 | name = "${var.namespace}-consul" 3 | 4 | security_groups = [aws_security_group.demostack.id] 5 | subnets = aws_subnet.demostack.*.id 6 | 7 | tags = local.common_tags 8 | } 9 | 10 | resource "aws_alb_target_group" "consul" { 11 | name = "${var.namespace}-consul" 12 | 13 | port = "8500" 14 | vpc_id = aws_vpc.demostack.id 15 | protocol = "HTTP" 16 | 17 | health_check { 18 | interval = "5" 19 | timeout = "2" 20 | path = "/v1/status/leader" 21 | port = "8500" 22 | protocol = "HTTP" 23 | matcher = "200,429" 24 | healthy_threshold = 2 25 | } 26 | } 27 | 28 | resource "aws_alb_listener" "consul" { 29 | depends_on = [ 30 | aws_acm_certificate_validation.cert 31 | ] 32 | 33 | load_balancer_arn = aws_alb.consul.arn 34 | 35 | port = "8500" 36 | protocol = "HTTPS" 37 | certificate_arn = aws_acm_certificate_validation.cert.certificate_arn 38 | ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06" 39 | 40 | default_action { 41 | target_group_arn = aws_alb_target_group.consul.arn 42 | type = "forward" 43 | } 44 | } 45 | 46 | resource "aws_alb_target_group_attachment" "consul-servers" { 47 | count = var.servers 48 | target_group_arn = aws_alb_target_group.consul.arn 49 | target_id = aws_instance.servers[count.index].id 50 | port = "8500" 51 | } 52 | 53 | resource "aws_alb_target_group_attachment" "consul-workers" { 54 | count = var.workers 55 | target_group_arn = aws_alb_target_group.consul.arn 56 | target_id = aws_instance.workers[count.index].id 57 | port = "8500" 58 | } 59 | -------------------------------------------------------------------------------- /modules/dns.tf: -------------------------------------------------------------------------------- 1 | 2 | 3 | resource "aws_route53_record" "boundary" { 4 | zone_id = var.zone_id 5 | name = "boundary.${var.namespace}" 6 | #name = "traefik" 7 | type = "CNAME" 8 | records = [aws_lb.boundary-controller.dns_name] 9 | ttl = "300" 10 | 11 | } 12 | 13 | 14 | resource "aws_route53_record" "traefik" { 15 | zone_id = var.zone_id 16 | name = "traefik.${var.namespace}" 17 | #name = "traefik" 18 | type = "CNAME" 19 | records = [aws_alb.traefik.dns_name] 20 | ttl = "300" 21 | 22 | } 23 | resource "aws_route53_record" "fabio" { 24 | zone_id = var.zone_id 25 | name = "fabio.${var.namespace}" 26 | #name = "fabio" 27 | type = "CNAME" 28 | records = [aws_alb.fabio.dns_name] 29 | ttl = "300" 30 | 31 | } 32 | resource "aws_route53_record" "consul" { 33 | zone_id = var.zone_id 34 | name = "consul.${var.namespace}" 35 | #name = "consul" 36 | type = "CNAME" 37 | records = [aws_alb.consul.dns_name] 38 | ttl = "300" 39 | } 40 | 41 | resource "aws_route53_record" "nomad" { 42 | zone_id = var.zone_id 43 | name = "nomad.${var.namespace}" 44 | // name = "nomad" 45 | type = "CNAME" 46 | records = [aws_alb.nomad.dns_name] 47 | ttl = "300" 48 | 49 | 50 | } 51 | resource "aws_route53_record" "vault" { 52 | zone_id = var.zone_id 53 | name = "vault.${var.namespace}" 54 | // name = "vault" 55 | type = "CNAME" 56 | records = [aws_alb.vault.dns_name] 57 | ttl = "300" 58 | 59 | 60 | } 61 | 62 | resource "aws_route53_record" "servers" { 63 | count = var.servers 64 | zone_id = var.zone_id 65 | name = "server-${count.index}.${var.namespace}" 66 | // name = "server-${count.index}" 67 | type = "CNAME" 68 | records = [element(aws_instance.servers.*.public_dns, count.index)] 69 | ttl = "300" 70 | 71 | 72 | } 73 | 74 | resource "aws_route53_record" "workers" { 75 | count = var.workers 76 | zone_id = var.zone_id 77 | name = "workers-${count.index}.${var.namespace}" 78 | // name = "workers-${count.index}" 79 | type = "CNAME" 80 | records = [element(aws_instance.workers.*.public_dns, count.index)] 81 | ttl = "300" 82 | 83 | 84 | } 85 | 86 | -------------------------------------------------------------------------------- /modules/ebs.tf: -------------------------------------------------------------------------------- 1 | 2 | 3 | resource "aws_ebs_volume" "mysql" { 4 | availability_zone = data.aws_availability_zones.available.names[0] 5 | size = 40 6 | tags = local.common_tags 7 | } 8 | 9 | resource "aws_ebs_volume" "mongodb" { 10 | availability_zone = data.aws_availability_zones.available.names[0] 11 | size = 40 12 | tags = local.common_tags 13 | } 14 | 15 | resource "aws_ebs_volume" "prometheus" { 16 | availability_zone = data.aws_availability_zones.available.names[0] 17 | size = 40 18 | tags = local.common_tags 19 | } 20 | 21 | resource "aws_ebs_volume" "shared" { 22 | availability_zone = data.aws_availability_zones.available.names[0] 23 | size = 40 24 | tags = local.common_tags 25 | } 26 | -------------------------------------------------------------------------------- /modules/eks.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_iam_role" "eks" { 3 | name = "${var.namespace}-eks" 4 | 5 | assume_role_policy = < output.json 3 | 4 | function setup-vault { 5 | export VAULT_SKIP_VERIFY=1 6 | export VAULT_NAMESPACE=admin 7 | export VAULT_ADDR=$(jq -r .HCP_Vault_Public_address.value output.json) 8 | export VAULT_TOKEN=$(jq -r .HCP_Vault_token.value output.json) 9 | } 10 | 11 | function setup-consul { 12 | export CONSUL_HTTP_TOKEN=$(jq -r .HCP_Consul_token.value output.json) 13 | export CONSUL_HTTP_ADDR=$(jq -r .HCP_Consul_Public_address.value output.json) 14 | } 15 | 16 | function setup-nomad { 17 | # export NOMAD_TOKEN=$(jq -r .HCP_Consul_token.value output.json) 18 | export NOMAD_ADDR=$(jq -r .Primary_Nomad.value output.json) 19 | } 20 | 21 | function setup-nomad-acl { 22 | vault kv get -field=nomad_acls nomad/bootstrap > nomad_acls.json 23 | export NOMAD_TOKEN=$(jq -r .SecretID nomad_acls.json) 24 | export NOMAD_ADDR=$(jq -r .Primary_Nomad.value output.json) 25 | } 26 | 27 | setup-vault 28 | setup-consul 29 | setup-nomad 30 | -------------------------------------------------------------------------------- /modules/nomad-lb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_alb" "nomad" { 2 | name = "${var.namespace}-nomad" 3 | 4 | security_groups = [aws_security_group.demostack.id] 5 | subnets = aws_subnet.demostack.*.id 6 | 7 | tags = local.common_tags 8 | } 9 | 10 | resource "aws_alb_target_group" "nomad" { 11 | name = "${var.namespace}-nomad" 12 | 13 | port = "4646" 14 | vpc_id = aws_vpc.demostack.id 15 | protocol = "HTTPS" 16 | 17 | health_check { 18 | interval = "5" 19 | timeout = "2" 20 | path = "/v1/agent/health" 21 | port = "4646" 22 | protocol = "HTTPS" 23 | matcher = "200,429" 24 | healthy_threshold = 2 25 | } 26 | } 27 | 28 | resource "aws_alb_listener" "nomad" { 29 | depends_on = [ 30 | aws_acm_certificate_validation.cert 31 | ] 32 | 33 | load_balancer_arn = aws_alb.nomad.arn 34 | 35 | port = "4646" 36 | protocol = "HTTPS" 37 | certificate_arn = aws_acm_certificate_validation.cert.certificate_arn 38 | ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06" 39 | 40 | default_action { 41 | target_group_arn = aws_alb_target_group.nomad.arn 42 | type = "forward" 43 | } 44 | } 45 | 46 | resource "aws_alb_target_group_attachment" "nomad" { 47 | count = var.servers 48 | target_group_arn = aws_alb_target_group.nomad.arn 49 | target_id = element(aws_instance.servers.*.id, count.index) 50 | port = "4646" 51 | 52 | } 53 | -------------------------------------------------------------------------------- /modules/outputs.tf: -------------------------------------------------------------------------------- 1 | ////////////////////// Module ////////////////////////// 2 | 3 | output "servers" { 4 | value = aws_route53_record.servers.*.fqdn 5 | } 6 | 7 | output "workers" { 8 | value = aws_route53_record.workers.*.fqdn 9 | } 10 | 11 | output "vpc_id" { 12 | value = aws_vpc.demostack.id 13 | } 14 | 15 | output "subnet_ids" { 16 | value = aws_subnet.demostack.*.id 17 | } 18 | 19 | output "traefik_lb" { 20 | value = "http://${aws_route53_record.traefik.fqdn}:8080" 21 | } 22 | 23 | output "fabio_lb" { 24 | value = "http://${aws_route53_record.fabio.fqdn}:9999" 25 | } 26 | 27 | output "vault_ui" { 28 | value = "https://${aws_route53_record.vault.fqdn}:8200" 29 | } 30 | 31 | output "nomad_ui" { 32 | value = "https://${aws_route53_record.nomad.fqdn}:4646" 33 | } 34 | 35 | output "consul_ui" { 36 | value = "https://${aws_route53_record.consul.fqdn}:8500" 37 | } 38 | 39 | output "boundary_ui" { 40 | value = "http://${aws_route53_record.boundary.fqdn}:9200" 41 | # value = "troubleshooting" 42 | } 43 | 44 | 45 | /* 46 | output "eks_endpoint" { 47 | value = aws_eks_cluster.eks.endpoint 48 | } 49 | output "eks_ca" { 50 | // value = aws_eks_cluster.eks.endpoint 51 | value = aws_eks_cluster.eks.certificate_authority.0.data 52 | } 53 | */ -------------------------------------------------------------------------------- /modules/server.tf: -------------------------------------------------------------------------------- 1 | 2 | # Gzip cloud-init config 3 | data "cloudinit_config" "servers" { 4 | count = var.servers 5 | 6 | gzip = true 7 | base64_encode = true 8 | 9 | #base 10 | part { 11 | content_type = "text/x-shellscript" 12 | content = templatefile("${path.module}/templates/shared/base.sh",{ 13 | region = var.region 14 | enterprise = var.enterprise 15 | node_name = "${var.namespace}-server-${count.index}" 16 | me_ca = tls_self_signed_cert.root.cert_pem 17 | me_cert = element(tls_locally_signed_cert.server.*.cert_pem, count.index) 18 | me_key = element(tls_private_key.server.*.private_key_pem, count.index) 19 | vault0_cert = tls_locally_signed_cert.server.0.cert_pem 20 | vault0_key = tls_private_key.server.0.private_key_pem 21 | public_key = var.public_key 22 | }) 23 | } 24 | 25 | #docker 26 | part { 27 | content_type = "text/x-shellscript" 28 | content = file("${path.module}/templates/shared/docker.sh") 29 | } 30 | 31 | #consul 32 | part { 33 | content_type = "text/x-shellscript" 34 | content = templatefile("${path.module}/templates/server/consul.sh",{ 35 | region = var.region 36 | node_name = "${var.namespace}-server-${count.index}" 37 | # Consul 38 | consullicense = var.consullicense 39 | primary_datacenter = var.primary_datacenter 40 | consul_gossip_key = var.consul_gossip_key 41 | consul_join_tag_key = "ConsulJoin" 42 | consul_join_tag_value = var.consul_join_tag_value 43 | consul_master_token = var.consul_master_token 44 | consul_servers = var.servers 45 | }) 46 | } 47 | 48 | #vault 49 | part { 50 | content_type = "text/x-shellscript" 51 | content = templatefile("${path.module}/templates/server/vault.sh",{ 52 | region = var.region 53 | enterprise = var.enterprise 54 | node_name = "${var.namespace}-server-${count.index}" 55 | kmskey = aws_kms_key.demostackVaultKeys.id 56 | # Consul 57 | consul_master_token = var.consul_master_token 58 | # Vault 59 | vaultlicense = var.vaultlicense 60 | namespace = var.namespace 61 | vault_root_token = random_id.vault-root-token.hex 62 | vault_servers = var.servers 63 | vault_api_addr = "https://${aws_route53_record.vault.fqdn}:8200" 64 | vault_join_tag_key = "VaultJoin" 65 | vault_join_tag_value = var.consul_join_tag_value 66 | }) 67 | } 68 | 69 | #nomad 70 | part { 71 | content_type = "text/x-shellscript" 72 | content = templatefile("${path.module}/templates/server/nomad.sh",{ 73 | node_name = "${var.namespace}-server-${count.index}" 74 | # Nomad 75 | vault_api_addr = "https://${aws_route53_record.vault.fqdn}:8200" 76 | nomad_gossip_key = var.nomad_gossip_key 77 | nomad_servers = var.servers 78 | cni_version = var.cni_version 79 | nomadlicense = var.nomadlicense 80 | }) 81 | } 82 | #end 83 | } 84 | 85 | resource "aws_instance" "servers" { 86 | count = var.servers 87 | 88 | ami = data.aws_ami.ubuntu.id 89 | instance_type = var.instance_type_server 90 | key_name = aws_key_pair.demostack.id 91 | 92 | subnet_id = element(aws_subnet.demostack.*.id, count.index) 93 | iam_instance_profile = aws_iam_instance_profile.consul-join.name 94 | vpc_security_group_ids = [aws_security_group.demostack.id] 95 | root_block_device { 96 | volume_size = "240" 97 | delete_on_termination = "true" 98 | } 99 | 100 | ebs_block_device { 101 | device_name = "/dev/xvdd" 102 | volume_type = "gp2" 103 | volume_size = "240" 104 | delete_on_termination = "true" 105 | } 106 | 107 | 108 | tags = merge(local.common_tags ,{ 109 | VaultJoin = "${var.consul_join_tag_value}" , 110 | ConsulJoin = "${var.consul_join_tag_value}" , 111 | Purpose = "demostack" , 112 | function = "server" , 113 | Name = "${var.namespace}-server-${count.index}" , 114 | } 115 | ) 116 | 117 | user_data = element(data.cloudinit_config.servers.*.rendered, count.index) 118 | } 119 | -------------------------------------------------------------------------------- /modules/servers-ag.tf.bkp: -------------------------------------------------------------------------------- 1 | data "template_file" "servers-asg" { 2 | 3 | template = join("\n", tolist([ 4 | file("${path.module}/templates/shared/base.sh"), 5 | file("${path.module}/templates/shared/docker.sh"), 6 | file("${path.module}/templates/server/consul.sh"), 7 | file("${path.module}/templates/server/vault.sh"), 8 | file("${path.module}/templates/server/nomad.sh"), 9 | ])) 10 | 11 | vars = { 12 | region = var.region 13 | 14 | enterprise = var.enterprise 15 | vaultlicense = var.vaultlicense 16 | consullicense = var.consullicense 17 | kmskey = aws_kms_key.demostackVaultKeys.id 18 | namespace = var.namespace 19 | node_name = "${var.namespace}-server-${count.index}" 20 | 21 | 22 | # me_ca = tls_self_signed_cert.root.cert_pem 23 | me_ca = var.ca_cert_pem 24 | me_cert = element(tls_locally_signed_cert.server.*.cert_pem, count.index) 25 | me_key = element(tls_private_key.server.*.private_key_pem, count.index) 26 | public_key = var.public_key 27 | 28 | # Consul 29 | primary_datacenter = var.primary_datacenter 30 | consul_gossip_key = var.consul_gossip_key 31 | consul_join_tag_key = "ConsulJoin" 32 | consul_join_tag_value = var.consul_join_tag_value 33 | consul_master_token = var.consul_master_token 34 | consul_servers = var.servers 35 | 36 | # Nomad 37 | nomad_gossip_key = var.nomad_gossip_key 38 | nomad_servers = var.servers 39 | cni_version = var.cni_version 40 | nomadlicense = var.nomadlicense 41 | 42 | # Nomad jobs 43 | fabio_url = var.fabio_url 44 | 45 | # Vault 46 | vault_root_token = random_id.vault-root-token.hex 47 | vault_servers = var.servers 48 | vault_api_addr = "https://${aws_route53_record.vault.fqdn}:8200" 49 | 50 | } 51 | } 52 | 53 | # Gzip cloud-init config 54 | data "template_cloudinit_config" "servers-asg" { 55 | count = var.servers 56 | 57 | gzip = true 58 | base64_encode = true 59 | 60 | part { 61 | content_type = "text/x-shellscript" 62 | content = element(data.template_file.servers.*.rendered, count.index) 63 | } 64 | } 65 | 66 | 67 | 68 | resource "aws_autoscaling_group" "servers-asg" { 69 | name = "${var.namespace}-servers-asg" 70 | launch_configuration = aws_launch_configuration.demostack.name 71 | desired_capacity = var.servers 72 | min_size = 1 73 | max_size = var.servers+5 74 | vpc_zone_identifier = [module.vpc.public_subnets[0]] 75 | 76 | lifecycle { 77 | create_before_destroy = true 78 | } 79 | 80 | tags = merge(local.common_tags ,{ 81 | ConsulJoin = "${var.consul_join_tag_value}" , 82 | Purpose = "demostack" , 83 | function = "server" , 84 | Name = "${var.namespace}-server-${count.index}" , 85 | } 86 | ) 87 | 88 | } 89 | 90 | resource "aws_launch_configuration" "servers-asg" { 91 | name_prefix = "${var.prefix}-servers-" 92 | image_id = data.aws_ami.ubuntu.id 93 | instance_type = var.instance_type_server 94 | associate_public_ip_address = true 95 | 96 | vpc_security_group_ids = [aws_security_group.demostack.id] 97 | key_name = aws_key_pair.demostack.id 98 | user_data = element(data.template_cloudinit_config.servers.*.rendered, count.index) 99 | 100 | lifecycle { 101 | create_before_destroy = true 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /modules/templates/cloudtrail.tf.bkp: -------------------------------------------------------------------------------- 1 | data "aws_caller_identity" "current" {} 2 | 3 | resource "aws_cloudtrail" "demostack" { 4 | name = "tf-${var.namespace}-cloudtrail" 5 | s3_bucket_name = aws_s3_bucket.cloudtrail.id 6 | s3_key_prefix = "${var.namespace}" 7 | include_global_service_events = false 8 | } 9 | 10 | resource "aws_s3_bucket" "cloudtrail" { 11 | bucket = "tf-${var.namespace}-trail" 12 | force_destroy = true 13 | 14 | policy = <&- 11 | exec 1>$npipe 12 | exec 2>&1 13 | 14 | cat << 'EOF' > /config/cloud/runtime-init-conf.yaml 15 | --- 16 | runtime_parameters: [] 17 | pre_onboard_enabled: 18 | - name: provision_rest 19 | type: inline 20 | commands: 21 | - /usr/bin/setdb provision.extramb 500 22 | - /usr/bin/setdb restjavad.useextramb true 23 | - /usr/bin/setdb setup.run false 24 | extension_packages: 25 | install_operations: 26 | - extensionType: as3 27 | extensionVersion: 3.30.0 28 | extensionUrl: file:///var/config/rest/downloads/f5-appsvcs-3.30.0-5.noarch.rpm 29 | - extensionType: fast 30 | extensionVersion: 1.11.0 31 | extensionUrl: file:///var/config/rest/downloads/f5-appsvcs-templates-1.11.0-1.noarch.rpm 32 | extension_services: 33 | service_operations: [] 34 | post_onboard_enabled: 35 | - name: custom-config 36 | type: inline 37 | commands: 38 | - bash /config/custom-config.sh 39 | EOF 40 | cat << 'EOF' > /config/custom-config.sh 41 | #!/bin/bash 42 | source /usr/lib/bigstart/bigip-ready-functions 43 | wait_bigip_ready 44 | tmsh modify /auth user admin encrypted-password '${encrypted_password}' 45 | tmsh modify auth user admin shell bash 46 | tmsh save sys config 47 | rm -f /config/custom-config.sh 48 | sleep 60 49 | bigstart restart restnoded 50 | EOF 51 | 52 | source /usr/lib/bigstart/bigip-ready-functions 53 | wait_bigip_ready 54 | 55 | for i in {1..30}; do 56 | curl -fv --retry 1 --connect-timeout 5 -L "https://github.com/F5Networks/f5-appsvcs-extension/releases/download/v3.30.0/f5-appsvcs-3.30.0-5.noarch.rpm" -o "/var/config/rest/downloads/f5-appsvcs-3.30.0-5.noarch.rpm" && break || sleep 10 57 | done 58 | 59 | for i in {1..30}; do 60 | curl -fv --retry 1 --connect-timeout 5 -L "https://cdn.f5.com/product/cloudsolutions/f5-bigip-runtime-init/v1.3.2/dist/f5-bigip-runtime-init-1.3.2-1.gz.run" -o "/var/config/rest/downloads/f5-bigip-runtime-init-1.3.2-1.gz.run" && break || sleep 10 61 | done 62 | 63 | for i in {1..30}; do 64 | curl -fv --retry 1 --connect-timeout 5 -L "https://github.com/F5Networks/f5-appsvcs-templates/releases/download/v1.11.0/f5-appsvcs-templates-1.11.0-1.noarch.rpm" -o "/var/config/rest/downloads/f5-appsvcs-templates-1.11.0-1.noarch.rpm" && break || sleep 10 65 | done 66 | 67 | bash /var/config/rest/downloads/f5-bigip-runtime-init-1.3.2-1.gz.run -- '--cloud aws --skip-verify --skip-toolchain-metadata-sync' 68 | 69 | f5-bigip-runtime-init --config-file /config/cloud/runtime-init-conf.yaml -------------------------------------------------------------------------------- /modules/templates/policies/assume-role.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Action": "sts:AssumeRole", 6 | "Principal": { 7 | "Service": "ec2.amazonaws.com" 8 | }, 9 | "Effect": "Allow", 10 | "Sid": "" 11 | }, 12 | { 13 | "Effect": "Allow", 14 | "Principal": { 15 | "AWS": "arn:aws:sts::711129375688:assumed-role/se_demos_dev-developer/Guy@hashicorp.com" 16 | }, 17 | "Action": "sts:AssumeRole" 18 | } 19 | ] 20 | 21 | } 22 | -------------------------------------------------------------------------------- /modules/templates/server/consul.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "==> getting the aws metadata token" 4 | export TOKEN=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") 5 | 6 | echo "==> check token was set" 7 | echo $TOKEN 8 | 9 | echo "--> Writing configuration" 10 | sudo mkdir -p /mnt/consul 11 | sudo mkdir -p /etc/consul.d 12 | sudo mkdir -p /etc/consul.d/acl_policies 13 | 14 | echo "--> clean up any default config." 15 | sudo rm /etc/consul.d/* 16 | 17 | 18 | sudo tee /etc/consul.d/config.json > /dev/null < /etc/consul.d/acl.hcl 64 | acl = { 65 | enabled = true 66 | default_policy = "allow" 67 | enable_token_persistence = true 68 | tokens { 69 | initial_management = "${consul_master_token}" 70 | } 71 | down_policy = "extend-cache" 72 | } 73 | EOF 74 | 75 | echo "--> Writing profile" 76 | sudo tee /etc/profile.d/consul.sh > /dev/null <<"EOF" 77 | alias conslu="consul" 78 | alias ocnsul="consul" 79 | EOF 80 | source /etc/profile.d/consul.sh 81 | 82 | ####################################################### 83 | echo "--> Generating systemd configuration" 84 | sudo tee /etc/systemd/system/consul.service > /dev/null <<"EOF" 85 | [Unit] 86 | Description=Consul 87 | Documentation=https://www.consul.io/docs/ 88 | Requires=network-online.target 89 | After=network-online.target 90 | 91 | [Service] 92 | Restart=on-failure 93 | ExecStart=/usr/bin/consul agent -config-dir="/etc/consul.d" 94 | ExecReload=/bin/kill -HUP $MAINPID 95 | KillSignal=SIGINT 96 | #Enterprise License 97 | Environment=CONSUL_LICENSE=${consullicense} 98 | Environment=CONSUL_HTTP_TOKEN=${consul_master_token} 99 | [Install] 100 | WantedBy=multi-user.target 101 | EOF 102 | sudo systemctl enable consul 103 | sudo systemctl restart consul 104 | 105 | export CONSUL_HTTP_TOKEN=${consul_master_token} 106 | export CONSUL_HTTP_ADDR=http://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8500 107 | 108 | 109 | #TODO - CONSUL ACL Bootstrap 110 | echo "--> setting up ACL system" 111 | ############################################ 112 | sudo tee /etc/consul.d/acl_policies/${node_name}.hcl > /dev/null < /dev/null < Waiting for all Consul servers" 153 | while [ "$(consul members 2>&1 | grep "server" | grep "alive" | wc -l)" -lt "${consul_servers}" ]; do 154 | sleep 3 155 | done 156 | 157 | echo "--> Waiting for Consul leader #1 " 158 | while [ -z "$(curl -skfS http://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8500/v1/status/leader)" ]; do 159 | sleep 3 160 | done 161 | consul acl policy create -name consul_${node_name} -rules @/etc/consul.d/acl_policies/${node_name}.hcl 162 | consul acl token create -format=json -description "consul ${node_name} agent token" -policy-name consul_${node_name} > /etc/consul.d/consul_${node_name}_token.json 163 | 164 | ################################## 165 | 166 | 167 | 168 | 169 | 170 | echo "--> setting up resolv.conf" 171 | ################################## 172 | ln -sf /run/systemd/resolve/resolv.conf /etc/resolv.conf 173 | 174 | mkdir /etc/systemd/resolved.conf.d 175 | touch /etc/systemd/resolved.conf.d/forward-consul-domains.conf 176 | 177 | IPV4=$(ec2metadata --local-ipv4) 178 | 179 | printf "[Resolve]\nDNS=127.0.0.1\nDomains=~consul\n" > /etc/systemd/resolved.conf.d/forward-consul-domains.conf 180 | 181 | iptables -t nat -A PREROUTING -p udp -m udp --dport 53 -j REDIRECT --to-ports 8600 182 | iptables -t nat -A PREROUTING -p tcp -m tcp --dport 53 -j REDIRECT --to-ports 8600 183 | iptables -t nat -A OUTPUT -d localhost -p udp -m udp --dport 53 -j REDIRECT --to-ports 8600 184 | iptables -t nat -A OUTPUT -d localhost -p tcp -m tcp --dport 53 -j REDIRECT --to-ports 8600 185 | 186 | systemctl daemon-reload 187 | systemctl restart systemd-resolved 188 | 189 | sleep 3 190 | 191 | echo "--> Waiting for all Consul servers" 192 | while [ "$(consul members 2>&1 | grep "server" | grep "alive" | wc -l)" -lt "${consul_servers}" ]; do 193 | sleep 3 194 | done 195 | 196 | echo "--> Waiting for Consul leader #2" 197 | while [ -z "$(curl -skfS http://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8500/v1/status/leader)" ]; do 198 | sleep 3 199 | done 200 | 201 | 202 | ######### 203 | 204 | echo "==> Consul is done!" 205 | -------------------------------------------------------------------------------- /modules/templates/server/nomad.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "==> getting the aws metadata token" 4 | export TOKEN=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") 5 | 6 | echo "==> check token was set" 7 | echo $TOKEN 8 | 9 | echo "--> clean up any default config." 10 | sudo rm /etc/nomad.d/* 11 | 12 | 13 | 14 | echo "NOMAD --> Waiting for Vault to be active" 15 | VAULT_ADDR="https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8200" 16 | URL="$VAULT_ADDR/v1/sys/health" 17 | HTTP_STATUS=0 18 | 19 | echo "Vault API ADDRESS:" $VAULT_ADDR 20 | 21 | while [[ $HTTP_STATUS -ne 200 && $HTTP_STATUS -ne 473 && $HTTP_STATUS -ne 429 ]]; do 22 | HTTP_STATUS=$(curl -k -o /dev/null -w "%%{http_code}" $URL) 23 | sleep 1 24 | done 25 | 26 | 27 | 28 | export CONSUL_HTTP_ADDR=http://$(private_ip):8500 29 | 30 | echo "--> Generating Vault token..." 31 | export VAULT_TOKEN="$(consul kv get service/vault/root-token)" 32 | export NOMAD_VAULT_TOKEN="$(VAULT_TOKEN="$VAULT_TOKEN" \ 33 | VAULT_ADDR="https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8200" \ 34 | VAULT_SKIP_VERIFY=true \ 35 | vault token create -field=token -policy=superuser -policy=nomad-server -display-name=${node_name} -id=${node_name} -period=72h)" 36 | 37 | consul kv put service/vault/${node_name}-token $NOMAD_VAULT_TOKEN 38 | 39 | 40 | echo "--> Installing CNI plugin" 41 | sudo mkdir -p /opt/cni/bin/ 42 | export ARCH_CNI=$( [ $(uname -m) = aarch64 ] && echo arm64 || echo amd64) 43 | export CNI_PLUGIN_VERSION=${cni_version} 44 | sudo wget "https://github.com/containernetworking/plugins/releases/download/$${CNI_PLUGIN_VERSION}/cni-plugins-linux-$${ARCH_CNI}-$${CNI_PLUGIN_VERSION}".tgz && \ 45 | sudo tar -xzf cni-plugins-linux-$${ARCH_CNI}-v$${CNI_PLUGIN_VERSION}.tgz -C /opt/cni/bin/ 46 | 47 | export AWS_REGION=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -fsq http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/[a-z]$//') 48 | export AWS_AZ=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/placement/availability-zone) 49 | 50 | 51 | echo "--> Writing configuration" 52 | sudo mkdir -p /mnt/nomad 53 | sudo mkdir -p /etc/nomad.d 54 | 55 | 56 | echo "--> clean up any default config." 57 | sudo rm /etc/nomad.d/* 58 | 59 | echo "--> creating directories for host volumes" 60 | sudo mkdir -p /etc/nomad.d/host-volumes/wp-runner 61 | sudo mkdir -p /etc/nomad.d/host-volumes/wp-server 62 | 63 | 64 | sudo tee /etc/nomad.d/nomad.hcl > /dev/null < Writing profile" 149 | sudo tee /etc/profile.d/nomad.sh > /dev/null <<"EOF" 150 | alias noamd="nomad" 151 | alias nomas="nomad" 152 | alias nomda="nomad" 153 | export NOMAD_ADDR="https://${node_name}.node.consul:4646" 154 | export NOMAD_CACERT="/usr/local/share/ca-certificates/01-me.crt" 155 | export NOMAD_CLIENT_CERT="/etc/ssl/certs/me.crt" 156 | export NOMAD_CLIENT_KEY="/etc/ssl/certs/me.key" 157 | EOF 158 | source /etc/profile.d/nomad.sh 159 | 160 | echo "--> Generating systemd configuration" 161 | sudo tee /etc/systemd/system/nomad.service > /dev/null < Waiting for Nomad leader" 188 | while ! curl -k https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):4646/v1/status/leader --show-error; do 189 | sleep 2 190 | done 191 | 192 | echo "--> Waiting for a list of Nomad peers" 193 | while ! curl -k https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):4646/v1/status/peers --show-error; do 194 | sleep 2 195 | done 196 | 197 | echo "--> Waiting for all Nomad servers" 198 | while [ "$(nomad server members 2>&1 | grep "alive" | wc -l)" -lt "${nomad_servers}" ]; do 199 | sleep 5 200 | done 201 | 202 | echo "==> Nomad is done!" 203 | -------------------------------------------------------------------------------- /modules/templates/server/vault.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "==> getting the aws metadata token" 4 | export TOKEN=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") 5 | 6 | echo "==> check token was set" 7 | echo $TOKEN 8 | 9 | 10 | echo "--> clean up any default config." 11 | sudo rm /etc/vault.d/* 12 | 13 | 14 | 15 | echo "==> Vault (server)" 16 | # Vault expects the key to be concatenated with the CA 17 | sudo mkdir -p /mnt/vault 18 | sudo mkdir -p /etc/vault.d/tls/ 19 | sudo mkdir -p /etc/vault.d/plugins/ 20 | sudo tee /etc/vault.d/tls/vault.crt > /dev/null < Writing configuration" 27 | sudo mkdir -p /etc/vault.d 28 | sudo tee /etc/vault.d/config.hcl > /dev/null < Writing profile" 96 | sudo tee /etc/profile.d/vault.sh > /dev/null <<"EOF" 97 | alias vualt="vault" 98 | export VAULT_ADDR="https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8200" 99 | EOF 100 | source /etc/profile.d/vault.sh 101 | 102 | echo "--> Generating systemd configuration" 103 | sudo tee /etc/systemd/system/vault.service > /dev/null <<"EOF" 104 | [Unit] 105 | Description=Vault 106 | Documentation=https://www.vaultproject.io/docs/ 107 | Requires=network-online.target 108 | After=network-online.target 109 | [Service] 110 | Restart=on-failure 111 | ExecStart=/usr/bin/vault server -config="/etc/vault.d/config.hcl" 112 | ExecReload=/bin/kill -HUP $MAINPID 113 | #Enterprise License 114 | Environment=VAULT_LICENSE=${vaultlicense} 115 | KillSignal=SIGINT 116 | [Install] 117 | WantedBy=multi-user.target 118 | EOF 119 | 120 | sudo systemctl enable vault 121 | sudo systemctl start vault 122 | sleep 8 123 | if [ "${node_name}" == "${namespace}-server-0" ] 124 | then 125 | echo "--> Initializing vault from server 0" 126 | export CONSUL_HTTP_TOKEN=${consul_master_token} 127 | consul lock -name=vault-init tmp/vault/lock "$(cat <<"EOF" 128 | set -e 129 | sleep 2 130 | export VAULT_ADDR="https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8200" 131 | export VAULT_SKIP_VERIFY=true 132 | if ! vault operator init -status >/dev/null; then 133 | vault operator init -recovery-shares=1 -recovery-threshold=1 > /tmp/out.txt 134 | cat /tmp/out.txt | grep "Recovery Key 1" | sed 's/Recovery Key 1: //' | consul kv put service/vault/recovery-key - 135 | cat /tmp/out.txt | grep "Initial Root Token" | sed 's/Initial Root Token: //' | consul kv put service/vault/root-token - 136 | 137 | export VAULT_TOKEN=$(consul kv get service/vault/root-token) 138 | echo "ROOT TOKEN: $VAULT_TOKEN" 139 | 140 | sudo systemctl enable vault 141 | sudo systemctl restart vault 142 | else 143 | export VAULT_ADDR="https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8200" 144 | export VAULT_SKIP_VERIFY=true 145 | export VAULT_TOKEN=$(consul kv get service/vault/root-token) 146 | echo "ROOT TOKEN: $VAULT_TOKEN" 147 | sudo systemctl enable vault 148 | sudo systemctl restart vault 149 | fi 150 | sleep 8 151 | EOF 152 | )" 153 | fi 154 | 155 | 156 | 157 | echo "--> Waiting for Vault to be active" 158 | VAULT_ADDR="https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8200" 159 | URL="$VAULT_ADDR/v1/sys/health" 160 | HTTP_STATUS=0 161 | 162 | while [[ $HTTP_STATUS -ne 200 && $HTTP_STATUS -ne 473 && $HTTP_STATUS -ne 429 ]]; do 163 | HTTP_STATUS=$(curl -k -o /dev/null -w "%%{http_code}" $URL) 164 | sleep 1 165 | done 166 | 167 | echo "HTTP status code is either 200 or 473. Continuing with the script..." 168 | 169 | echo "--> Attempting to create nomad role" 170 | 171 | echo "--> Adding Nomad policy" 172 | echo "--> Retrieving root token..." 173 | export VAULT_TOKEN=$(consul kv get service/vault/root-token) 174 | 175 | export VAULT_ADDR="https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8200" 176 | export VAULT_SKIP_VERIFY=true 177 | 178 | vault policy write nomad-server - < Creating Nomad token role" 252 | vault write auth/token/roles/nomad-cluster \ 253 | name=nomad-cluster \ 254 | period=259200 \ 255 | renewable=true \ 256 | orphan=false \ 257 | disallowed_policies=nomad-server \ 258 | explicit_max_ttl=0 259 | 260 | echo "--> Mount KV in Vault" 261 | { 262 | vault secrets enable -version=2 kv && 263 | echo "--> KV Mounted succesfully" 264 | } || 265 | { 266 | echo "--> KV Already mounted, moving on" 267 | } 268 | 269 | echo "--> Creating Initial secret for Nomad KV" 270 | vault kv put kv/test message='Hello world' 271 | 272 | 273 | echo "--> nomad nginx-vault-pki demo prep" 274 | { 275 | vault secrets enable pki 276 | }|| 277 | { 278 | echo "--> pki already enabled, moving on" 279 | } 280 | 281 | { 282 | vault write pki/root/generate/internal common_name=service.consul 283 | }|| 284 | { 285 | echo "--> pki generate internal already configured, moving on" 286 | } 287 | { 288 | vault write pki/roles/consul-service generate_lease=true allowed_domains="service.consul" allow_subdomains="true" 289 | }|| 290 | { 291 | echo "--> pki role already configured, moving on" 292 | } 293 | 294 | { 295 | vault policy write superuser - < superuser role already configured, moving on" 331 | } 332 | 333 | 334 | echo "-->Boundary setup" 335 | { 336 | vault namespace create boundary 337 | }|| 338 | { 339 | echo "--> Boundary namespace already created, moving on" 340 | } 341 | 342 | echo "-->mount transit in boundary namespace" 343 | { 344 | 345 | vault secrets enable -namespace=boundary -path=transit transit 346 | 347 | }|| 348 | { 349 | echo "--> transit already mounted, moving on" 350 | } 351 | 352 | echo "--> creating boundary root key" 353 | { 354 | vault write -namespace=boundary -f transit/keys/root 355 | }|| 356 | { 357 | echo "--> root key already exists, moving on" 358 | } 359 | 360 | echo "--> creating boundary worker-auth key" 361 | { 362 | vault write -namespace=boundary -f transit/keys/worker-auth 363 | 364 | }|| 365 | { 366 | echo "--> worker-auth key already exists, moving on" 367 | } 368 | 369 | 370 | echo "==> Vault is done!" -------------------------------------------------------------------------------- /modules/templates/shared/base.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | 4 | echo "==> Base" 5 | 6 | echo "==> getting the aws metadata token" 7 | export TOKEN=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") 8 | 9 | echo "==> check token was set" 10 | echo $TOKEN 11 | 12 | 13 | echo "==> libc6 issue workaround" 14 | echo 'libc6 libraries/restart-without-asking boolean true' | sudo debconf-set-selections 15 | 16 | echo "--> Adding trusted root CA" 17 | sudo tee /usr/local/share/ca-certificates/01-me.crt > /dev/null </dev/null 21 | 22 | echo "--> Adding my certificates" 23 | sudo tee /etc/ssl/certs/me.crt > /dev/null < /dev/null < /dev/null < /dev/null < Updating apt-cache" 50 | ssh-apt update 51 | 52 | 53 | 54 | echo "--> Setting iptables for bridge networking" 55 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-arptables 56 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables 57 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables 58 | 59 | echo "--> Making iptables settings for bridge networking config change" 60 | sudo tee /etc/sysctl.d/nomadtables > /dev/null < Adding Hashicorp repo" 67 | wget -O- https://apt.releases.hashicorp.com/gpg | gpg --dearmor | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg 68 | echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list 69 | 70 | 71 | 72 | echo "--> updated version of Nodejs" 73 | curl -sL https://deb.nodesource.com/setup_22.x | sudo -E bash - 74 | 75 | sudo apt update 76 | 77 | echo "--> Installing common dependencies" 78 | sudo apt-get install -y \ 79 | build-essential \ 80 | nodejs \ 81 | curl \ 82 | emacs \ 83 | git \ 84 | jq \ 85 | tmux \ 86 | unzip \ 87 | vim \ 88 | wget \ 89 | tree \ 90 | nfs-kernel-server \ 91 | nfs-common \ 92 | python3-pip \ 93 | ruby-full \ 94 | apt-transport-https \ 95 | ca-certificates \ 96 | curl \ 97 | gnupg-agent \ 98 | software-properties-common \ 99 | openjdk-17-jdk-headless \ 100 | prometheus-node-exporter \ 101 | golang-go \ 102 | alien \ 103 | waypoint \ 104 | qemu-system \ 105 | &>/dev/null 106 | 107 | 108 | echo "--> Disabling checkpoint" 109 | sudo tee /etc/profile.d/checkpoint.sh > /dev/null <<"EOF" 110 | export CHECKPOINT_DISABLE=1 111 | EOF 112 | source /etc/profile.d/checkpoint.sh 113 | 114 | if [ ${enterprise} == 0 ] 115 | then 116 | sudo apt-get install -y \ 117 | vault \ 118 | consul \ 119 | nomad \ 120 | &>/dev/null 121 | 122 | else 123 | sudo apt-get install -y \ 124 | vault-enterprise \ 125 | consul-enterprise \ 126 | nomad-enterprise \ 127 | &>/dev/null 128 | 129 | fi 130 | 131 | # echo "--> Install Envoy" 132 | # curl -L https://getenvoy.io/cli | sudo bash -s -- -b /usr/local/bin 133 | # getenvoy run standard:1.16.0 -- --version 134 | # sudo cp ~/.getenvoy/builds/standard/1.16.0/linux_glibc/bin/envoy /usr/bin/ 135 | 136 | 137 | echo "==> Base is done!" 138 | -------------------------------------------------------------------------------- /modules/templates/shared/docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo "==> Docker" 3 | 4 | # uninstall all conflicting packages 5 | for pkg in docker.io docker-doc docker-compose docker-compose-v2 podman-docker containerd runc; do sudo apt-get remove $pkg; done 6 | 7 | 8 | # Add Docker's official GPG key: 9 | sudo apt-get update 10 | sudo apt-get install ca-certificates curl gnupg 11 | sudo install -m 0755 -d /etc/apt/keyrings 12 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 13 | sudo chmod a+r /etc/apt/keyrings/docker.gpg 14 | 15 | # Add the repository to Apt sources: 16 | echo \ 17 | "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 18 | "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ 19 | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 20 | sudo apt-get update 21 | 22 | sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 23 | 24 | -------------------------------------------------------------------------------- /modules/templates/windows/init.ps1: -------------------------------------------------------------------------------- 1 | 2 | Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) 3 | 4 | choco install chocolateygui vscode googlechrome git Firefox github-desktop vault softerraldapbrowser ldapadmin ldapexplorer beekeeper-studio -y 5 | 6 | # Get the latest Boundary Desktop Version 7 | $url = "https://api.releases.hashicorp.com/v1/releases/boundary-desktop/latest" 8 | $response = Invoke-RestMethod -Uri $url 9 | $build = $response.builds | Where-Object { $_.arch -eq "amd64" -and $_.os -eq "windows" } 10 | $build.url 11 | 12 | # Define the destination path for the downloaded file 13 | $destinationPath = Join-Path -Path $env:TEMP -ChildPath "boundary-desktop.zip" 14 | 15 | # Download the file 16 | Invoke-WebRequest -Uri $build.url -OutFile $destinationPath 17 | 18 | # Define the folder path where you want to extract the contents 19 | $newFolderPath = Join-Path -Path ([Environment]::GetFolderPath('Desktop')) -ChildPath "Boundary_desktop" 20 | 21 | # Create the new folder if it does not exist 22 | if (-Not (Test-Path -Path $newFolderPath)) { 23 | New-Item -ItemType Directory -Path $newFolderPath 24 | } 25 | 26 | # Unzip the file to the new folder path 27 | Expand-Archive -Path $destinationPath -DestinationPath $newFolderPath 28 | 29 | # Create a shortcut to Boundary.exe on the Desktop 30 | $shortcutPath = Join-Path -Path ([Environment]::GetFolderPath('Desktop')) -ChildPath "Boundary Desktop.lnk" 31 | $targetPath = Join-Path -Path $newFolderPath -ChildPath "Boundary.exe" 32 | $WshShell = New-Object -ComObject WScript.Shell 33 | $Shortcut = $WshShell.CreateShortcut($shortcutPath) 34 | $Shortcut.TargetPath = $targetPath 35 | $Shortcut.Save() 36 | 37 | # Remove the downloaded zip file if no longer needed 38 | Remove-Item -Path $destinationPath 39 | 40 | # add the a 41 | 42 | # Confirm completion 43 | Write-Output "Boundary Desktop downloaded and extracted to Desktop successfully." 44 | 45 | git clone https://github.com/GuyBarros/ad-lab C:\ad-lab 46 | 47 | -------------------------------------------------------------------------------- /modules/templates/workers/consul.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo "==> Consul (client)" 3 | 4 | 5 | echo "==> getting the aws metadata token" 6 | export TOKEN=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") 7 | 8 | echo "==> check token was set" 9 | echo $TOKEN 10 | 11 | 12 | 13 | echo "--> Writing configuration" 14 | sudo mkdir -p /mnt/consul 15 | sudo mkdir -p /etc/consul.d 16 | 17 | echo "--> clean up any default config." 18 | sudo rm /etc/consul.d/* 19 | 20 | #"client_addr": "$(private_ip) 127.0.0.1", 21 | sudo tee /etc/consul.d/config.json > /dev/null < /etc/consul.d/acl.hcl 64 | acl = { 65 | enabled = true 66 | default_policy = "allow" 67 | enable_token_persistence = true 68 | tokens { 69 | initial_management = "${consul_master_token}" 70 | } 71 | down_policy = "extend-cache" 72 | } 73 | EOF 74 | 75 | 76 | echo "--> Writing profile" 77 | sudo tee /etc/profile.d/consul.sh > /dev/null <<"EOF" 78 | alias conslu="consul" 79 | alias ocnsul="consul" 80 | EOF 81 | source /etc/profile.d/consul.sh 82 | 83 | 84 | 85 | 86 | 87 | echo "--> Making consul.d world-writable..." 88 | sudo chmod 0777 /etc/consul.d/ 89 | 90 | echo "--> Generating systemd configuration" 91 | sudo tee /etc/systemd/system/consul.service > /dev/null <<"EOF" 92 | [Unit] 93 | Description=Consul 94 | Documentation=https://www.consul.io/docs/ 95 | Requires=network-online.target 96 | After=network-online.target 97 | 98 | [Service] 99 | Restart=on-failure 100 | ExecStart=/usr/bin/consul agent -config-dir="/etc/consul.d" 101 | ExecReload=/bin/kill -HUP $MAINPID 102 | KillSignal=SIGINT 103 | #Enterprise License 104 | Environment=CONSUL_LICENSE=${consullicense} 105 | Environment=CONSUL_HTTP_TOKEN=${consul_master_token} 106 | 107 | [Install] 108 | WantedBy=multi-user.target 109 | EOF 110 | sudo systemctl enable consul 111 | sudo systemctl start consul 112 | 113 | export CONSUL_HTTP_ADDR=http://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):8500 114 | 115 | echo "--> setting up resolv.conf" 116 | ################################## 117 | ln -sf /run/systemd/resolve/resolv.conf /etc/resolv.conf 118 | 119 | mkdir /etc/systemd/resolved.conf.d 120 | touch /etc/systemd/resolved.conf.d/forward-consul-domains.conf 121 | 122 | IPV4=$(ec2metadata --local-ipv4) 123 | 124 | printf "[Resolve]\nDNS=127.0.0.1\nDomains=~consul\n" > /etc/systemd/resolved.conf.d/forward-consul-domains.conf 125 | 126 | iptables -t nat -A PREROUTING -p udp -m udp --dport 53 -j REDIRECT --to-ports 8600 127 | iptables -t nat -A PREROUTING -p tcp -m tcp --dport 53 -j REDIRECT --to-ports 8600 128 | iptables -t nat -A OUTPUT -d localhost -p udp -m udp --dport 53 -j REDIRECT --to-ports 8600 129 | iptables -t nat -A OUTPUT -d localhost -p tcp -m tcp --dport 53 -j REDIRECT --to-ports 8600 130 | 131 | systemctl daemon-reload 132 | systemctl restart systemd-resolved 133 | 134 | sleep 3 135 | 136 | ################################## 137 | 138 | echo "==> Consul is done!" 139 | -------------------------------------------------------------------------------- /modules/templates/workers/ebs_volumes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "==> getting the aws metadata token" 4 | export TOKEN=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") 5 | 6 | echo "==> check token was set" 7 | echo $TOKEN 8 | 9 | echo "--> Configuring EBS mounts" 10 | 11 | # export NOMAD_ADDR=https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):4646 12 | 13 | echo "--> Create EBS CSI plugin job" 14 | { 15 | sudo tee /etc/nomad.d/default_jobs/plugin-ebs-controller.nomad > /dev/null < CSI plugin job skipped" 50 | } 51 | echo "--> Create Nodes CSI plugin job" 52 | { 53 | sudo tee /etc/nomad.d/default_jobs/plugin-ebs-nodes.nomad > /dev/null < Nodes job skipped" 96 | } 97 | echo "--> Mysql" 98 | { 99 | sudo tee /etc/nomad.d/default_jobs/mysql_ebs_volume.hcl > /dev/null < Mysql failed, probably already done" 111 | } 112 | 113 | echo "--> Mongodb" 114 | { 115 | sudo tee /etc/nomad.d/default_jobs/mongodb_ebs_volume.hcl > /dev/null < MongoDB failed, probably already done" 129 | } 130 | 131 | echo "--> Prometheus" 132 | { 133 | sudo tee /etc/nomad.d/default_jobs/prometheus_ebs_volume.hcl > /dev/null < Prometheus failed, probably already done" 145 | } 146 | echo "--> Shared" 147 | { 148 | sudo tee /etc/nomad.d/default_jobs/shared_ebs_volume.hcl > /dev/null < Shared failed, probably already done" 160 | } 161 | 162 | if [ ${index} == ${count} ] 163 | then 164 | echo "--> last worker, lets do this" 165 | #### 166 | echo "--> Waiting for Nomad leader" 167 | while ! curl -s -k https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):4646/v1/status/leader --show-error; do 168 | sleep 2 169 | done 170 | 171 | echo "--> Waiting for a list of Nomad peers" 172 | while ! curl -s -k https://$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4):4646/v1/status/peers --show-error; do 173 | sleep 2 174 | done 175 | 176 | echo "--> Waiting for all Nomad servers" 177 | while [ "$(nomad server members 2>&1 | grep "alive" | wc -l)" -lt "${nomad_servers}" ]; do 178 | sleep 5 179 | done 180 | #### 181 | nomad run /etc/nomad.d/default_jobs/plugin-ebs-controller.nomad 182 | nomad run /etc/nomad.d/default_jobs/plugin-ebs-nodes.nomad 183 | 184 | sleep 5 185 | nomad volume register /etc/nomad.d/default_jobs/mongodb_ebs_volume.hcl 186 | else 187 | echo "--> not the last worker, skip" 188 | fi 189 | 190 | 191 | echo "==> Configuring EBS mounts is Done!" -------------------------------------------------------------------------------- /modules/templates/workers/nomad.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo "==> Nomad (client)" 3 | 4 | echo "==> getting the aws metadata token" 5 | export TOKEN=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") 6 | 7 | echo "==> check token was set" 8 | echo $TOKEN 9 | 10 | 11 | echo "--> Installing CNI plugin" 12 | sudo mkdir -p /opt/cni/bin/ 13 | export ARCH_CNI=$( [ $(uname -m) = aarch64 ] && echo arm64 || echo amd64) 14 | export CNI_PLUGIN_VERSION=${cni_version} 15 | sudo wget "https://github.com/containernetworking/plugins/releases/download/$${CNI_PLUGIN_VERSION}/cni-plugins-linux-$${ARCH_CNI}-$${CNI_PLUGIN_VERSION}".tgz && \ 16 | sudo tar -xzf cni-plugins-linux-$${ARCH_CNI}-v$${CNI_PLUGIN_VERSION}.tgz -C /opt/cni/bin/ 17 | 18 | export AWS_REGION=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" -fsq http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/[a-z]$//') 19 | export AWS_AZ=$(curl -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/placement/availability-zone) 20 | 21 | echo "--> Installing" 22 | sudo mkdir -p /mnt/nomad 23 | sudo mkdir -p /etc/nomad.d/default_jobs 24 | 25 | echo "--> clean up any default config." 26 | sudo rm /etc/nomad.d/* 27 | 28 | echo "--> creating directories for host volumes" 29 | sudo mkdir -p /etc/nomad.d/host-volumes/wp-runner 30 | sudo mkdir -p /etc/nomad.d/host-volumes/wp-server 31 | 32 | 33 | sudo tee /etc/nomad.d/config.hcl > /dev/null < Writing profile" 120 | sudo tee /etc/profile.d/nomad.sh > /dev/null <<"EOF" 121 | alias noamd="nomad" 122 | alias nomas="nomad" 123 | alias nomda="nomad" 124 | export NOMAD_ADDR="https://${node_name}.node.consul:4646" 125 | export NOMAD_CACERT="/usr/local/share/ca-certificates/01-me.crt" 126 | export NOMAD_CLIENT_CERT="/etc/ssl/certs/me.crt" 127 | export NOMAD_CLIENT_KEY="/etc/ssl/certs/me.key" 128 | EOF 129 | source /etc/profile.d/nomad.sh 130 | 131 | echo "--> Generating upstart configuration" 132 | sudo tee /etc/systemd/system/nomad.service > /dev/null < Starting nomad" 151 | sudo systemctl enable nomad 152 | sudo systemctl start nomad 153 | 154 | echo "==> Run Nomad is Done!" 155 | -------------------------------------------------------------------------------- /modules/templates/workers/user.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo "==> User" 3 | 4 | echo "--> Creating user" 5 | sudo useradd "${demo_username}" \ 6 | --shell /bin/bash \ 7 | --create-home 8 | echo "${demo_username}:${demo_password}" | sudo chpasswd 9 | sudo tee "/etc/sudoers.d/${demo_username}" > /dev/null <<"EOF" 10 | %${demo_username} ALL=NOPASSWD:ALL 11 | EOF 12 | sudo chmod 0440 "/etc/sudoers.d/${demo_username}" 13 | sudo usermod -a -G sudo "${demo_username}" 14 | sudo su "${demo_username}" \ 15 | -c "ssh-keygen -q -t rsa -N '' -b 4096 -f ~/.ssh/id_rsa -C training@hashicorp.com" 16 | sudo sed -i "/^PasswordAuthentication/c\PasswordAuthentication yes" /etc/ssh/sshd_config 17 | sudo systemctl restart ssh 18 | sudo su "${demo_username}" \ 19 | -c 'git config --global color.ui true' 20 | sudo su "${demo_username}" \ 21 | -c 'git config --global user.email "training@hashicorp.com"' 22 | sudo su ${demo_username} \ 23 | -c 'git config --global user.name "HashiCorp Demo"' 24 | sudo su ${demo_username} \ 25 | -c 'git config --global credential.helper "cache --timeout=3600"' 26 | sudo su ${demo_username} \ 27 | -c 'mkdir -p ~/.cache; touch ~/.cache/motd.legal-displayed; touch ~/.sudo_as_admin_successful' 28 | 29 | echo "--> Giving sudoless for Docker" 30 | sudo usermod -aG docker "${demo_username}" 31 | 32 | 33 | echo "--> Adding helper for identity retrieval" 34 | sudo tee /etc/profile.d/identity.sh > /dev/null <<"EOF" 35 | function identity { 36 | echo "${identity}" 37 | } 38 | EOF 39 | 40 | echo "--> Ignoring LastLog" 41 | sudo sed -i'' 's/PrintLastLog\ yes/PrintLastLog\ no/' /etc/ssh/sshd_config 42 | sudo systemctl restart ssh 43 | 44 | echo "--> Setting bash prompt" 45 | sudo tee -a "/home/${demo_username}/.bashrc" > /dev/null <<"EOF" 46 | export PS1="\u@\h:\w > " 47 | EOF 48 | 49 | echo "--> Installing Vim plugin for Terraform" 50 | # Pathogen bundle manager 51 | mkdir -p /home/${demo_username}/.vim/autoload /home/${demo_username}/.vim/bundle && curl -LSso /home/${demo_username}/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim 52 | echo "execute pathogen#infect()" >> /home/${demo_username}/.vimrc 53 | # Terraform plugin 54 | cd /home/${demo_username}/.vim/bundle && git clone https://github.com/hashivim/vim-terraform.git 55 | # Airline plugin for vim statusbar 56 | git clone https://github.com/vim-airline/vim-airline /home/${demo_username}/.vim/bundle/vim-airline 57 | sudo chown -R ${demo_username}:${demo_username} /home/${demo_username}/.vim* 58 | 59 | echo "==> User is done!" 60 | -------------------------------------------------------------------------------- /modules/tfc.tf: -------------------------------------------------------------------------------- 1 | data "tfe_ip_ranges" "addresses" {} -------------------------------------------------------------------------------- /modules/tls.tf: -------------------------------------------------------------------------------- 1 | 2 | # Root private key 3 | resource "tls_private_key" "root" { 4 | algorithm = "ECDSA" 5 | ecdsa_curve = "P521" 6 | } 7 | 8 | # Root certificate 9 | resource "tls_self_signed_cert" "root" { 10 | private_key_pem = tls_private_key.root.private_key_pem 11 | 12 | subject { 13 | common_name = var.namespace 14 | organization = "Hashicorp Demos" 15 | } 16 | 17 | validity_period_hours = 8760 18 | 19 | allowed_uses = [ 20 | "cert_signing", 21 | "digital_signature", 22 | "crl_signing", 23 | ] 24 | 25 | is_ca_certificate = true 26 | } 27 | 28 | 29 | # Server private key 30 | resource "tls_private_key" "server" { 31 | count = var.servers 32 | algorithm = "ECDSA" 33 | ecdsa_curve = "P521" 34 | } 35 | 36 | # Server signing request 37 | resource "tls_cert_request" "server" { 38 | count = var.servers 39 | #key_algorithm = element(tls_private_key.server.*.algorithm, count.index) 40 | private_key_pem = element(tls_private_key.server.*.private_key_pem, count.index) 41 | 42 | subject { 43 | common_name = "${var.namespace}-server-${count.index}.node.consul" 44 | organization = "HashiCorp Demostack" 45 | } 46 | 47 | dns_names = [ 48 | # Consul 49 | "${var.namespace}-server-${count.index}.node.consul", 50 | "${var.namespace}-server-${count.index}.node.${var.region}.consul", 51 | 52 | "*.service.consul", 53 | "*.service.${var.region}.consul", 54 | "*.query.consul", 55 | "consul.service.consul", 56 | 57 | # Nomad 58 | "nomad.service.consul", 59 | "nomad.service.${var.region}.consul", 60 | 61 | 62 | "client.global.nomad", 63 | "server.global.nomad", 64 | 65 | # Vault 66 | "vault.service.consul", 67 | "vault.query.consul", 68 | "active.vault.service.consul", 69 | "active.vault.service.${var.region}.consul", 70 | "standby.vault.service.consul", 71 | "standby.vault.service.${var.region}.consul", 72 | "performance-standby.vault.service.consul", 73 | "performance-standby.vault.service.${var.region}.consul", 74 | 75 | # Common 76 | "localhost", 77 | "*.${var.namespace}.${data.aws_route53_zone.fdqn.name}", 78 | 79 | ] 80 | 81 | // ip_addresses = ["${aws_eip.server_ips.*.public_ip }"] 82 | } 83 | 84 | # Server certificate 85 | resource "tls_locally_signed_cert" "server" { 86 | count = var.servers 87 | cert_request_pem = element(tls_cert_request.server.*.cert_request_pem, count.index) 88 | #ca_key_algorithm = var.ca_key_algorithm 89 | ca_private_key_pem = tls_self_signed_cert.root.private_key_pem 90 | ca_cert_pem = tls_self_signed_cert.root.cert_pem 91 | 92 | validity_period_hours = 720 # 30 days 93 | 94 | allowed_uses = [ 95 | "client_auth", 96 | "digital_signature", 97 | "key_agreement", 98 | "key_encipherment", 99 | "server_auth", 100 | ] 101 | } 102 | 103 | # Vault initial root token 104 | resource "random_id" "vault-root-token" { 105 | byte_length = 8 106 | prefix = "${var.namespace}-" 107 | } 108 | 109 | # Client private key 110 | 111 | resource "tls_private_key" "workers" { 112 | count = var.workers 113 | algorithm = "ECDSA" 114 | ecdsa_curve = "P521" 115 | } 116 | 117 | # Client signing request 118 | resource "tls_cert_request" "workers" { 119 | count = var.workers 120 | #key_algorithm = element(tls_private_key.workers.*.algorithm, count.index) 121 | private_key_pem = element(tls_private_key.workers.*.private_key_pem, count.index) 122 | 123 | subject { 124 | common_name = "${var.namespace}-worker-${count.index}.node.consul" 125 | organization = "HashiCorp Demostack" 126 | } 127 | 128 | dns_names = [ 129 | # Consul 130 | "${var.namespace}-worker-${count.index}.node.consul", 131 | "${var.namespace}-worker-${count.index}.node.${var.region}.consul", 132 | 133 | "*.service.consul", 134 | "*.service.${var.region}.consul", 135 | "*.query.consul", 136 | "consul.service.consul", 137 | 138 | # Nomad 139 | "nomad.service.consul", 140 | "nomad.service.${var.region}.consul", 141 | 142 | "client.global.nomad", 143 | "server.global.nomad", 144 | 145 | # Vault 146 | "vault.service.consul", 147 | "vault.query.consul", 148 | "active.vault.service.consul", 149 | "active.vault.service.${var.region}.consul", 150 | "standby.vault.service.consul", 151 | "standby.vault.service.${var.region}.consul", 152 | "performance-standby.vault.service.consul", 153 | "performance-standby.vault.service.${var.region}.consul", 154 | 155 | # Common 156 | "localhost", 157 | "*.${var.namespace}.${data.aws_route53_zone.fdqn.name}", 158 | ] 159 | 160 | /* 161 | ip_addresses = [ 162 | "127.0.0.1", 163 | ] 164 | */ 165 | // ip_addresses = ["${aws_eip.server_ips.*.public_ip }"] 166 | } 167 | 168 | # Client certificate 169 | 170 | resource "tls_locally_signed_cert" "workers" { 171 | count = var.workers 172 | cert_request_pem = element(tls_cert_request.workers.*.cert_request_pem, count.index) 173 | 174 | #ca_key_algorithm = var.ca_key_algorithm 175 | ca_private_key_pem = tls_self_signed_cert.root.private_key_pem 176 | ca_cert_pem = tls_self_signed_cert.root.cert_pem 177 | 178 | validity_period_hours = 720 # 30 days 179 | 180 | allowed_uses = [ 181 | "client_auth", 182 | "digital_signature", 183 | "key_agreement", 184 | "key_encipherment", 185 | "server_auth", 186 | ] 187 | } 188 | 189 | 190 | // ALB certs 191 | resource "aws_acm_certificate" "cert" { 192 | domain_name = "*.${var.namespace}.${data.aws_route53_zone.fdqn.name}" 193 | validation_method = "DNS" 194 | 195 | 196 | lifecycle { 197 | create_before_destroy = true 198 | } 199 | } 200 | 201 | /* 202 | resource "aws_route53_record" "validation_record" { 203 | name = aws_acm_certificate.cert.domain_validation_options.0.resource_record_name 204 | type = aws_acm_certificate.cert.domain_validation_options.0.resource_record_type 205 | zone_id = var.zone_id 206 | records = [aws_acm_certificate.cert.domain_validation_options.0.resource_record_value] 207 | ttl = "60" 208 | allow_overwrite = true 209 | 210 | lifecycle { 211 | create_before_destroy = true 212 | } 213 | } 214 | */ 215 | # "${element(azurerm_public_ip.example.*.id, count.index)}" 216 | resource "aws_route53_record" "validation_record" { 217 | name = "${element(aws_acm_certificate.cert.domain_validation_options.*.resource_record_name, 0)}" 218 | type = "${element(aws_acm_certificate.cert.domain_validation_options.*.resource_record_type, 0)}" 219 | zone_id = var.zone_id 220 | records = ["${element(aws_acm_certificate.cert.domain_validation_options.*.resource_record_value, 0)}"] 221 | ttl = "60" 222 | allow_overwrite = true 223 | 224 | lifecycle { 225 | create_before_destroy = true 226 | } 227 | } 228 | 229 | 230 | resource "aws_acm_certificate_validation" "cert" { 231 | certificate_arn = aws_acm_certificate.cert.arn 232 | validation_record_fqdns = [ 233 | aws_route53_record.validation_record.fqdn, 234 | ] 235 | } -------------------------------------------------------------------------------- /modules/traefik.tf: -------------------------------------------------------------------------------- 1 | resource "aws_alb" "traefik" { 2 | name = "${var.namespace}-traefik" 3 | 4 | security_groups = [aws_security_group.demostack.id] 5 | subnets = aws_subnet.demostack.*.id 6 | 7 | tags = local.common_tags 8 | } 9 | 10 | resource "aws_alb_target_group" "traefik" { 11 | name = "${var.namespace}-traefik" 12 | port = "8080" 13 | vpc_id = aws_vpc.demostack.id 14 | protocol = "HTTP" 15 | 16 | health_check { 17 | interval = "5" 18 | timeout = "2" 19 | path = "/ping" 20 | port = "8080" 21 | protocol = "HTTP" 22 | healthy_threshold = 2 23 | matcher = 200 24 | } 25 | } 26 | 27 | resource "aws_alb_target_group" "traefik-ui" { 28 | name = "${var.namespace}-traefik-ui" 29 | port = "8081" 30 | vpc_id = aws_vpc.demostack.id 31 | protocol = "HTTP" 32 | 33 | health_check { 34 | interval = "5" 35 | timeout = "2" 36 | path = "/ping" 37 | port = "8080" 38 | protocol = "HTTP" 39 | healthy_threshold = 2 40 | matcher = 200 41 | } 42 | } 43 | 44 | resource "aws_alb_listener" "traefik" { 45 | load_balancer_arn = aws_alb.traefik.arn 46 | 47 | port = "8080" 48 | protocol = "HTTP" 49 | 50 | default_action { 51 | target_group_arn = aws_alb_target_group.traefik.arn 52 | type = "forward" 53 | } 54 | } 55 | 56 | resource "aws_alb_listener" "traefik-ui" { 57 | load_balancer_arn = aws_alb.traefik.arn 58 | 59 | port = "8081" 60 | protocol = "HTTP" 61 | 62 | default_action { 63 | target_group_arn = aws_alb_target_group.traefik-ui.arn 64 | type = "forward" 65 | } 66 | } 67 | 68 | resource "aws_alb_target_group_attachment" "traefik-workers" { 69 | count = var.workers 70 | target_group_arn = aws_alb_target_group.traefik.arn 71 | target_id = aws_instance.workers[count.index].id 72 | port = "8080" 73 | } 74 | 75 | resource "aws_alb_target_group_attachment" "traefik-ui-workers" { 76 | count = var.workers 77 | target_group_arn = aws_alb_target_group.traefik-ui.arn 78 | target_id = aws_instance.workers[count.index].id 79 | port = "8081" 80 | } 81 | 82 | resource "aws_alb_target_group_attachment" "traefik-servers" { 83 | count = var.servers 84 | target_group_arn = aws_alb_target_group.traefik.arn 85 | target_id = aws_instance.servers[count.index].id 86 | port = "8080" 87 | } 88 | 89 | resource "aws_alb_target_group_attachment" "traefik-ui-servers" { 90 | count = var.servers 91 | target_group_arn = aws_alb_target_group.traefik-ui.arn 92 | target_id = aws_instance.servers[count.index].id 93 | port = "8081" 94 | } 95 | -------------------------------------------------------------------------------- /modules/variables.tf: -------------------------------------------------------------------------------- 1 | 2 | locals { 3 | # Common tags to be assigned to all resources 4 | common_tags = { 5 | Name = var.namespace 6 | owner = var.owner 7 | created-by = var.created-by 8 | sleep-at-night = var.sleep-at-night 9 | ttl = var.TTL 10 | se-region = var.region 11 | terraform = true 12 | purpose = "SE Demostack" 13 | } 14 | } 15 | 16 | 17 | variable "region" { 18 | description = "The region to create resources." 19 | default = "eu-west-2" 20 | } 21 | 22 | variable "namespace" { 23 | description = <