├── .gitignore ├── LICENSE ├── README.md ├── aws-client-vpn ├── .terraform.lock.hcl ├── README.md ├── ec2.tf ├── iam.tf ├── locals.tf ├── main.tf ├── networking.tf ├── outputs.tf └── versions.tf ├── aws-highly-available-nat-gateway-setup ├── .terraform.lock.hcl ├── README.md ├── data.tf ├── get_public_ip.sh ├── instances.tf ├── internet_gateway.tf ├── main.tf ├── nat.tf ├── outputs.tf ├── route_tables.tf ├── security_groups.tf ├── subnets.tf └── vpc.tf ├── aws-nacl-basics ├── .terraform.lock.hcl ├── README.md ├── data.tf ├── get_public_ip.sh ├── instance.tf ├── internet_gateway.tf ├── main.tf ├── nacls.tf ├── outputs.tf ├── security_groups.tf ├── subnets.tf └── vpc.tf ├── aws-secrets-manager ├── .terraform.lock.hcl ├── README.md ├── bastion.tf ├── get_public_ip.sh ├── main.tf ├── outputs.tf ├── rds.tf └── variables.tf ├── aws-ssm-ec2-connect ├── .terraform.lock.hcl ├── README.md ├── data.tf ├── locals.tf ├── main.tf ├── outputs.tf ├── provider.tf └── variables.tf ├── eks-cluster-without-module ├── README.md └── terraform │ ├── .terraform.lock.hcl │ ├── 0-locals.tf │ ├── 1-provider.tf │ ├── 2-vpc.tf │ ├── 3-igw.tf │ ├── 4-subnets.tf │ ├── 5-nat.tf │ ├── 6-routes.tf │ ├── 7-eks.tf │ └── 8-nodes.tf ├── eks-with-managed-node-group ├── .terraform.lock.hcl ├── 01-vpc.tf ├── 02-eks.tf ├── README.md ├── outputs.tf └── provider.tf ├── example-rpc-call-to-ec2 ├── .terraform.lock.hcl ├── README.md ├── client.py ├── ec2.tf ├── get_public_ip.sh ├── main.tf ├── outputs.tf ├── server.py └── test_add_numbers.py ├── img ├── aws_logo.png ├── diagram-ssm.png ├── diagram_rds_bastion.png ├── eks_access.png ├── eks_node.png ├── gitea_aws.png ├── highly-available-nat-gw.png ├── kubernetes-cluster-architecture.svg ├── nacl_scheme.png ├── nat_instances_scheme.png ├── private_ip.png ├── route.png ├── rpc_call.png ├── single_nat.png ├── subnet_image.png ├── teamcity_logo.png ├── terraform-get-secrets-from-secrets-manager.png ├── terraform-logo.png ├── vpc_for_eks.png ├── vpc_resource_map.png └── vpn_diagram.png ├── nat-instance-setup-for-nonprod ├── .terraform.lock.hcl ├── README.md ├── bastion.tf ├── configure_nat.sh ├── data.tf ├── disable_source_dest_check.sh ├── get_public_ip.sh ├── main.tf ├── outputs.tf └── versions.tf ├── nginx-webserver-ec2 ├── README.md ├── ec2-instance │ ├── .terraform.lock.hcl │ ├── main.tf │ └── sg.tf └── ssh-key │ ├── .terraform.lock.hcl │ └── key_pair.tf ├── s3-dynamodb-backend ├── .terraform.lock.hcl ├── README.md ├── main.tf └── outputs.tf ├── secrets-mgmt-with-sops ├── .sops.yaml ├── .terraform.lock.hcl ├── README.md ├── data.tf ├── main.tf ├── resources.tf ├── secrets.enc.json └── variables.tf ├── standard-vpc-for-eks ├── README.md └── terraform │ ├── .terraform.lock.hcl │ ├── 0-locals.tf │ ├── 1-provider.tf │ ├── 2-vpc.tf │ ├── 3-igw.tf │ ├── 4-subnets.tf │ ├── 5-nat.tf │ └── 6-routes.tf └── teamcity-on-aws ├── README.md ├── main.tf ├── outputs.tf └── ssh_key.tf /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | crash.*.log 11 | 12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 13 | # password, private keys, and other secrets. These should not be part of version 14 | # control as they are data points which are potentially sensitive and subject 15 | # to change depending on the environment. 16 | *.tfvars 17 | *.tfvars.json 18 | 19 | # Ignore override files as they are usually used to override resources locally and so 20 | # are not checked in 21 | override.tf 22 | override.tf.json 23 | *_override.tf 24 | *_override.tf.json 25 | 26 | # Include override files you do wish to add to version control using negated pattern 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | # Ignore CLI configuration files 33 | .terraformrc 34 | terraform.rc 35 | 36 | # Byte-compiled / optimized / DLL files 37 | __pycache__/ 38 | *.py[cod] 39 | *$py.class 40 | 41 | # C extensions 42 | *.so 43 | 44 | # Distribution / packaging 45 | .Python 46 | build/ 47 | develop-eggs/ 48 | dist/ 49 | downloads/ 50 | eggs/ 51 | .eggs/ 52 | lib/ 53 | lib64/ 54 | parts/ 55 | sdist/ 56 | var/ 57 | wheels/ 58 | share/python-wheels/ 59 | *.egg-info/ 60 | .installed.cfg 61 | *.egg 62 | MANIFEST 63 | 64 | # PyInstaller 65 | # Usually these files are written by a python script from a template 66 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 67 | *.manifest 68 | *.spec 69 | 70 | # Installer logs 71 | pip-log.txt 72 | pip-delete-this-directory.txt 73 | 74 | # Unit test / coverage reports 75 | htmlcov/ 76 | .tox/ 77 | .nox/ 78 | .coverage 79 | .coverage.* 80 | .cache 81 | nosetests.xml 82 | coverage.xml 83 | *.cover 84 | *.py,cover 85 | .hypothesis/ 86 | .pytest_cache/ 87 | cover/ 88 | 89 | # Translations 90 | *.mo 91 | *.pot 92 | 93 | # Django stuff: 94 | *.log 95 | local_settings.py 96 | db.sqlite3 97 | db.sqlite3-journal 98 | 99 | # Flask stuff: 100 | instance/ 101 | .webassets-cache 102 | 103 | # Scrapy stuff: 104 | .scrapy 105 | 106 | # Sphinx documentation 107 | docs/_build/ 108 | 109 | # PyBuilder 110 | .pybuilder/ 111 | target/ 112 | 113 | # Jupyter Notebook 114 | .ipynb_checkpoints 115 | 116 | # IPython 117 | profile_default/ 118 | ipython_config.py 119 | 120 | # pyenv 121 | # For a library or package, you might want to ignore these files since the code is 122 | # intended to run in multiple environments; otherwise, check them in: 123 | # .python-version 124 | 125 | # pipenv 126 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 127 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 128 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 129 | # install all needed dependencies. 130 | #Pipfile.lock 131 | 132 | # poetry 133 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 134 | # This is especially recommended for binary packages to ensure reproducibility, and is more 135 | # commonly ignored for libraries. 136 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 137 | #poetry.lock 138 | 139 | # pdm 140 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 141 | #pdm.lock 142 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 143 | # in version control. 144 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control 145 | .pdm.toml 146 | .pdm-python 147 | .pdm-build/ 148 | 149 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 150 | __pypackages__/ 151 | 152 | # Celery stuff 153 | celerybeat-schedule 154 | celerybeat.pid 155 | 156 | # SageMath parsed files 157 | *.sage.py 158 | 159 | # Environments 160 | .env 161 | .venv 162 | env/ 163 | venv/ 164 | ENV/ 165 | env.bak/ 166 | venv.bak/ 167 | 168 | # Spyder project settings 169 | .spyderproject 170 | .spyproject 171 | 172 | # Rope project settings 173 | .ropeproject 174 | 175 | # mkdocs documentation 176 | /site 177 | 178 | # mypy 179 | .mypy_cache/ 180 | .dmypy.json 181 | dmypy.json 182 | 183 | # Pyre type checker 184 | .pyre/ 185 | 186 | # pytype static type analyzer 187 | .pytype/ 188 | 189 | # Cython debug symbols 190 | cython_debug/ 191 | 192 | # PyCharm 193 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 194 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 195 | # and can be added to the global gitignore or merged into this file. For a more nuclear 196 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 197 | #.idea/ 198 | 199 | # SSH keys 200 | ssh-key-* 201 | 202 | # terragrunt cache directories 203 | **/.terragrunt-cache/* 204 | 205 | # Terragrunt debug output file (when using `--terragrunt-debug` option) 206 | # See: https://terragrunt.gruntwork.io/docs/reference/cli-options/#terragrunt-debug 207 | terragrunt-debug.tfvars.json 208 | 209 | # Packer Cache objects 210 | packer_cache/ 211 | 212 | # Packer Crash log 213 | crash.log 214 | 215 | # https://www.packer.io/guides/hcl/variables 216 | # Exclude all .pkrvars.hcl files, which are likely to contain sensitive data, 217 | # such as password, private keys, and other secrets. These should not be part of 218 | # version control as they are data points which are potentially sensitive and 219 | # subject to change depending on the environment. 220 | # 221 | *.pkrvars.hcl 222 | 223 | # For built packer boxes 224 | *.box -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) [year] [fullname] 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # terraform-cookbook 2 | 3 | ![](./img/terraform-logo.png) 4 | 5 | [HashiCorp Terraform](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli) is an infrastructure as code tool that lets you define both cloud and on-prem resources in human-readable configuration files that you can version, 6 | reuse, and share. You can then use a consistent workflow to provision and manage all of your infrastructure throughout its lifecycle. 7 | Terraform can manage low-level components like compute, storage, and networking resources, as well as high-level components like 8 | DNS entries and SaaS features. 9 | 10 | ## Recipes 11 | 12 | - [Deploy single EC2 instance with SSH keys generated and NGINX web server installed](./nginx-webserver-ec2/) 13 | - [TeamCity deployed on AWS](./teamcity-on-aws/) 14 | - [Terraform remote state S3 backend and DynamoDB for state locking](./s3-dynamodb-backend/) 15 | - [Use open-source SOPS tool to handle sensitive values in Terraform code](./secrets-mgmt-with-sops/) 16 | - [Use AWS Secrets Manager to handle sensitive values in Terraform code](./aws-secrets-manager/) 17 | - [Example of making Remote Procedure Call to a service running on a remote EC2 instance](./example-rpc-call-to-ec2/) 18 | - [Use AWS SSM Session Manager to connect to EC2 instance in private subnet](./aws-ssm-ec2-connect/) 19 | - [Provision AWS Client VPN to access private resources in VPC](./aws-client-vpn/) 20 | - [Provision NAT instance for non-prod workloads](./nat-instance-setup-for-nonprod/) 21 | - [Example setup of AWS NAT Gateway with high availability](./aws-highly-available-nat-gateway-setup/) 22 | - [Basics of AWS NACLs and Securuty Groups](./aws-nacl-basics/) 23 | - [EKS with Managed Node Groups](./eks-with-managed-node-group/) 24 | - [Provision Standard Type of VPC for EKS without using modules](./standard-vpc-for-eks/) 25 | - [Provision EKS with Managed Node Groups without using modules](./eks-cluster-without-module/) -------------------------------------------------------------------------------- /aws-client-vpn/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.78.0" 6 | constraints = ">= 5.46.0" 7 | hashes = [ 8 | "h1:OUmta/bL/0S6g4K/Mn1LBkEnMWNCq4dLsfYrdrllcEo=", 9 | "zh:0ae7d41b96441d0cf7ce2e1337657bdb2e1e5c9f1c2227b0642e1dcec2f9dfba", 10 | "zh:21f8f1edf477681ea3b095c02cad6b8e85262e45015de58e84e0c7b2bfe9a1f6", 11 | "zh:2bdc335e341bf98445255549ae93d66cfb9bca706e62b949da98fe467c182cad", 12 | "zh:2fe4096e260367a225a9faf4a424d62b87e5498f12cb43bdb6f4e713d11b82c3", 13 | "zh:3c63bb7a7925d65118d17461f4691a22dbb55ea39a7404e4d71f6ccca8765f8b", 14 | "zh:6609a28a1c638a1901d8007b5386868ccfd313b4df2e98b35d9fdef436974e3b", 15 | "zh:7ae3aef43bc4b365824cca4659cf92459d766800656e354bdbf83feabab835e8", 16 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 17 | "zh:c314efe454adc6ca483261c6906e64315aeb9db0c0332818714e9b81e07df0f0", 18 | "zh:cd3e30396b554bbc1d260252db8a0f344065d619038fe60ea870689cd32c6aa9", 19 | "zh:d1ba48fd9d8a1cb1daa927fb9e8bb708b857f2792d796e110460c6fdcd896a47", 20 | "zh:d31c8abe75cb9cdc1c59ad9d356a1c3ae1ba8cd29ac15eb7e01b6cd01221ab04", 21 | "zh:dc27c5c2116b4d9b404753f73bccaa635bce21f3bfb4bb7bc8e63225c36c98fe", 22 | "zh:de491f0d05408378413187475c815d8cb2ac6bfa63d0b42a30ad5ee492e51c07", 23 | "zh:eb44b45a40f80a309dd5b0eb7d7fcb2cbfe588fe2f18b173ef5851346898a662", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/hashicorp/tls" { 28 | version = "4.0.5" 29 | constraints = "4.0.5" 30 | hashes = [ 31 | "h1:e4LBdJoZJNOQXPWgOAG0UuPBVhCStu98PieNlqJTmeU=", 32 | "zh:01cfb11cb74654c003f6d4e32bbef8f5969ee2856394a96d127da4949c65153e", 33 | "zh:0472ea1574026aa1e8ca82bb6df2c40cd0478e9336b7a8a64e652119a2fa4f32", 34 | "zh:1a8ddba2b1550c5d02003ea5d6cdda2eef6870ece86c5619f33edd699c9dc14b", 35 | "zh:1e3bb505c000adb12cdf60af5b08f0ed68bc3955b0d4d4a126db5ca4d429eb4a", 36 | "zh:6636401b2463c25e03e68a6b786acf91a311c78444b1dc4f97c539f9f78de22a", 37 | "zh:76858f9d8b460e7b2a338c477671d07286b0d287fd2d2e3214030ae8f61dd56e", 38 | "zh:a13b69fb43cb8746793b3069c4d897bb18f454290b496f19d03c3387d1c9a2dc", 39 | "zh:a90ca81bb9bb509063b736842250ecff0f886a91baae8de65c8430168001dad9", 40 | "zh:c4de401395936e41234f1956ebadbd2ed9f414e6908f27d578614aaa529870d4", 41 | "zh:c657e121af8fde19964482997f0de2d5173217274f6997e16389e7707ed8ece8", 42 | "zh:d68b07a67fbd604c38ec9733069fbf23441436fecf554de6c75c032f82e1ef19", 43 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 44 | ] 45 | } 46 | -------------------------------------------------------------------------------- /aws-client-vpn/README.md: -------------------------------------------------------------------------------- 1 | # Implement AWS Client VPN to connect to private EC2 instance 2 | 3 | This project shows how to provision [AWS Client VPN service](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/what-is.html) to connect to private resources in VPC. In this example, we're connecting to an EC2 instance in a private subnet. VPC is custom defined and do not have any public subnets, IGW or Elastic IPs for outside world Internet connection. 4 | 5 | ![](../img/vpn_diagram.png) 6 | 7 | AWS Client VPN provisioned in [mutual authentication mode](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/mutual.html), meaning Client VPN uses certificates to perform authentication between the client and the server. It also means that [no self-service portal can be provisioned when mutual authentication is used](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/cvpn-self-service-portal.html). 8 | 9 | ## Client VPN 10 | 11 | AWS Client VPN provisioned via the following module: [https://registry.terraform.io/modules/babicamir/vpn-client/aws/latest](https://registry.terraform.io/modules/babicamir/vpn-client/aws/latest) 12 | 13 | This module creates a separate private S3 bucket, where it stores generated `.ovpn` OpenVPN configuration files for connection. 14 | 15 | Set `split` attribute to `true` if you want to be able make other connections in parallel with VPN (Internet, etc). Otherwise set it to `false` to keep connection only to VPN tunnel. For more information, see [Split-tunnel on Client VPN endpoints](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) 16 | 17 | ## S3 bucket with OpenVPN config files 18 | 19 | Use these commands to list and download your OpenVPN config files. 20 | 21 | To list all your S3 buckets: 22 | ```bash 23 | aws s3api list-buckets --query "Buckets[].Name" --output table 24 | ``` 25 | 26 | To list object in S3 bucket: 27 | ```bash 28 | aws s3api list-objects --bucket --prefix 29 | ``` 30 | 31 | To download specific object from S3 bucket: 32 | ```bash 33 | aws s3 cp s3:/// 34 | ``` 35 | 36 | ## Connect to VPN 37 | 38 | After you download OpenVPN config file, connect to VPN via: 39 | ```bash 40 | sudo openvpn --config .ovpn 41 | ``` 42 | 43 | **NOTE** 44 | For OpenVPN installation on Ubuntu, see [How to install and use OpenVPN](https://ubuntu.com/server/docs/how-to-install-and-use-openvpn#simple-client-configuration) 45 | 46 | ## Verify VPN connection 47 | 48 | For connection verification, ensure that you have installed: 49 | ```bash 50 | sudo apt install net-tools 51 | sudo apt install traceroute 52 | ``` 53 | 54 | To check the VPN connection route: 55 | ```bash 56 | route -n 57 | ``` 58 | 59 | ![](../img/route.png) 60 | 61 | To see the hops: 62 | ```bash 63 | traceroute 64 | ``` 65 | 66 | Ping EC2: 67 | ```bash 68 | ping 69 | ``` 70 | 71 | To connect to web server running on EC2 port 80: 72 | ```bash 73 | curl 74 | ``` 75 | 76 | ![](../img/private_ip.png) 77 | 78 | ## Troubleshooting 79 | 80 | EC2 instance is configured with SSM connection. This allows to connect to private EC2 instance without using any SSH keys and public endpoints. For more information, see [github.com/Brain2life/terraform-cookbook/tree/main/aws-ssm-ec2-connect](https://github.com/Brain2life/terraform-cookbook/tree/main/aws-ssm-ec2-connect) 81 | 82 | To connect to EC2 instance: 83 | ```bash 84 | aws ssm start-session --target 85 | ``` 86 | 87 | To find out EC2 instance ID: 88 | ```bash 89 | aws ec2 describe-instances \ 90 | --region us-east-1 \ 91 | --filters "Name=instance-state-name,Values=running" \ 92 | --query "Reservations[*].Instances[*].InstanceId" \ 93 | --output text 94 | ``` 95 | 96 | General troubleshooting steps: 97 | - Check SG configuration if it allows web and ICMP traffic 98 | - Check if Bash script for deploying web service is running 99 | - Check if VPC and Subnet ID's are specified correctly 100 | - Check if client's CIDR range do not overlap with target VPC's CIDR range -------------------------------------------------------------------------------- /aws-client-vpn/ec2.tf: -------------------------------------------------------------------------------- 1 | # EC2 instance definition 2 | resource "aws_instance" "nginx_instance" { 3 | ami = "ami-0866a3c8686eaeeba" # Ubuntu 24.04 us-east-1 4 | instance_type = "t2.micro" 5 | iam_instance_profile = aws_iam_instance_profile.ssm_instance_profile.name 6 | network_interface { 7 | network_interface_id = aws_network_interface.private_subnet_interface.id 8 | device_index = 0 9 | } 10 | 11 | 12 | 13 | tags = { 14 | Name = "nginx-instance" 15 | } 16 | 17 | # User data to install Nginx and serve custom HTML 18 | user_data = <<-EOF 19 | #!/bin/bash 20 | mkdir -p /var/www/html 21 | INSTANCE_PRIVATE_IP=$(TOKEN=`curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600"` && curl -H "X-aws-ec2-metadata-token: $TOKEN" -s http://169.254.169.254/latest/meta-data/local-ipv4) 22 | echo "

EC2 instance private IP is $INSTANCE_PRIVATE_IP

" > /var/www/html/index.html 23 | cd /var/www/html 24 | nohup python3 -m http.server 80 > /var/log/python-http-server.log 2>&1 & 25 | EOF 26 | } -------------------------------------------------------------------------------- /aws-client-vpn/iam.tf: -------------------------------------------------------------------------------- 1 | # Create IAM Role 2 | resource "aws_iam_role" "ssm_role" { 3 | name = "ec2-ssm-role" 4 | 5 | assume_role_policy = jsonencode({ 6 | Version = "2012-10-17", 7 | Statement = [ 8 | { 9 | Action = "sts:AssumeRole", 10 | Effect = "Allow", 11 | Principal = { 12 | Service = "ec2.amazonaws.com" 13 | } 14 | } 15 | ] 16 | }) 17 | 18 | tags = { 19 | Name = "EC2-SSM-Role" 20 | } 21 | } 22 | 23 | # Attach the AmazonSSMManagedInstanceCore Policy 24 | resource "aws_iam_role_policy_attachment" "ssm_managed_policy" { 25 | role = aws_iam_role.ssm_role.name 26 | policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" 27 | } 28 | 29 | # Create IAM Instance Profile 30 | resource "aws_iam_instance_profile" "ssm_instance_profile" { 31 | name = "ec2-ssm-instance-profile" 32 | role = aws_iam_role.ssm_role.name 33 | } -------------------------------------------------------------------------------- /aws-client-vpn/locals.tf: -------------------------------------------------------------------------------- 1 | # Define reusable locals 2 | locals { 3 | vpc_id = aws_vpc.custom_vpc.id 4 | subnet_id = aws_subnet.private_subnet.id 5 | 6 | services = { 7 | "ec2messages" : { 8 | "name" : "com.amazonaws.us-east-1.ec2messages" 9 | }, 10 | "ssm" : { 11 | "name" : "com.amazonaws.us-east-1.ssm" 12 | }, 13 | "ssmmessages" : { 14 | "name" : "com.amazonaws.us-east-1.ssmmessages" 15 | } 16 | } 17 | } -------------------------------------------------------------------------------- /aws-client-vpn/main.tf: -------------------------------------------------------------------------------- 1 | # Provision Client VPN 2 | module "vpn-client" { 3 | source = "babicamir/vpn-client/aws" 4 | version = "1.0.1" 5 | organization_name = "MinionEnterprise" 6 | project-name = "Fun" 7 | environment = "dev" 8 | # Network information 9 | vpc_id = local.vpc_id 10 | subnet_id = local.subnet_id 11 | client_cidr_block = "172.0.0.0/22" # It must be different from the primary VPC CIDR 12 | # VPN config options 13 | split_tunnel = "true" # 'true' - set if you want Internet and other network connections to be available, otherwise restrict only to VPN connection 14 | vpn_inactive_period = "300" # seconds 15 | session_timeout_hours = "8" # Expected values 8, 10, 12, 24h 16 | logs_retention_in_days = "7" 17 | # List of users to be created 18 | aws-vpn-client-list = ["root", "devs"] #Do not delete "root" user! 19 | } -------------------------------------------------------------------------------- /aws-client-vpn/networking.tf: -------------------------------------------------------------------------------- 1 | # VPC definition 2 | resource "aws_vpc" "custom_vpc" { 3 | cidr_block = "10.0.0.0/16" 4 | enable_dns_support = true 5 | enable_dns_hostnames = true 6 | tags = { 7 | Name = "custom-private-vpc" 8 | } 9 | } 10 | 11 | # Private subnet definition 12 | resource "aws_subnet" "private_subnet" { 13 | vpc_id = aws_vpc.custom_vpc.id 14 | cidr_block = "10.0.1.0/24" 15 | map_public_ip_on_launch = false 16 | tags = { 17 | Name = "private-subnet" 18 | } 19 | } 20 | 21 | # Security group definition 22 | resource "aws_security_group" "ec2_sg" { 23 | name = "ec2-sg" 24 | description = "Allow ICMP and Web traffic" 25 | vpc_id = aws_vpc.custom_vpc.id 26 | 27 | ingress { 28 | from_port = -1 29 | to_port = -1 30 | protocol = "icmp" 31 | cidr_blocks = ["0.0.0.0/0"] 32 | } 33 | 34 | # Allow SSM HTTPS inbount traffic 35 | ingress { 36 | from_port = 443 37 | to_port = 443 38 | protocol = "tcp" 39 | cidr_blocks = ["0.0.0.0/0"] 40 | } 41 | 42 | ingress { 43 | from_port = 80 44 | to_port = 80 45 | protocol = "tcp" 46 | cidr_blocks = ["0.0.0.0/0"] 47 | } 48 | 49 | egress { 50 | from_port = 0 51 | to_port = 0 52 | protocol = "-1" 53 | cidr_blocks = ["0.0.0.0/0"] 54 | } 55 | 56 | tags = { 57 | Name = "ec2-sg" 58 | } 59 | } 60 | 61 | # Define network interface to connect to private subnet 62 | resource "aws_network_interface" "private_subnet_interface" { 63 | subnet_id = aws_subnet.private_subnet.id 64 | private_ips = ["10.0.1.86"] # Define private IP 65 | 66 | security_groups = [ 67 | aws_security_group.ec2_sg.id 68 | ] 69 | 70 | tags = { 71 | Name = "primary-network-interface" 72 | } 73 | } 74 | 75 | # Define VPC endpoint for SSM connection 76 | resource "aws_vpc_endpoint" "ssm_endpoint" { 77 | for_each = local.services 78 | vpc_id = aws_vpc.custom_vpc.id 79 | 80 | service_name = each.value.name 81 | vpc_endpoint_type = "Interface" 82 | security_group_ids = [aws_security_group.ec2_sg.id] 83 | private_dns_enabled = true 84 | ip_address_type = "ipv4" 85 | subnet_ids = [aws_subnet.private_subnet.id] 86 | } 87 | -------------------------------------------------------------------------------- /aws-client-vpn/outputs.tf: -------------------------------------------------------------------------------- 1 | # Output EC2 private IP 2 | output "ec2_private_ip" { 3 | value = aws_instance.nginx_instance.private_ip 4 | } -------------------------------------------------------------------------------- /aws-client-vpn/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = ">= 5.46.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = "us-east-1" 12 | } -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "4.67.0" 6 | constraints = "~> 4.0" 7 | hashes = [ 8 | "h1:dCRc4GqsyfqHEMjgtlM1EympBcgTmcTkWaJmtd91+KA=", 9 | "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", 10 | "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", 11 | "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", 12 | "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", 13 | "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", 14 | "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", 15 | "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", 16 | "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", 17 | "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", 18 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 19 | "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", 20 | "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", 21 | "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", 22 | "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", 23 | "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/hashicorp/external" { 28 | version = "2.3.4" 29 | hashes = [ 30 | "h1:XWkRZOLKMjci9/JAtE8X8fWOt7A4u+9mgXSUjc4Wuyo=", 31 | "zh:037fd82cd86227359bc010672cd174235e2d337601d4686f526d0f53c87447cb", 32 | "zh:0ea1db63d6173d01f2fa8eb8989f0809a55135a0d8d424b08ba5dabad73095fa", 33 | "zh:17a4d0a306566f2e45778fbac48744b6fd9c958aaa359e79f144c6358cb93af0", 34 | "zh:298e5408ab17fd2e90d2cd6d406c6d02344fe610de5b7dae943a58b958e76691", 35 | "zh:38ecfd29ee0785fd93164812dcbe0664ebbe5417473f3b2658087ca5a0286ecb", 36 | "zh:59f6a6f31acf66f4ea3667a555a70eba5d406c6e6d93c2c641b81d63261eeace", 37 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 38 | "zh:ad0279dfd09d713db0c18469f585e58d04748ca72d9ada83883492e0dd13bd58", 39 | "zh:c69f66fd21f5e2c8ecf7ca68d9091c40f19ad913aef21e3ce23836e91b8cbb5f", 40 | "zh:d4a56f8c48aa86fc8e0c233d56850f5783f322d6336f3bf1916e293246b6b5d4", 41 | "zh:f2b394ebd4af33f343835517e80fc876f79361f4688220833bc3c77655dd2202", 42 | "zh:f31982f29f12834e5d21e010856eddd19d59cd8f449adf470655bfd19354377e", 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/README.md: -------------------------------------------------------------------------------- 1 | # Example of NAT Gateway with High Availability 2 | 3 | This example project showcases how to achieve fault tolerance of NAT Gateways by deploying them into different AZs. Each NAT gateway is created in a specific Availability Zone and implemented with redundancy in that zone. 4 | 5 | To learn more about NAT Gateways, see [NAT Gateway Basics](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-basics.html) 6 | 7 | ![](../img/highly-available-nat-gw.png) 8 | 9 | The NAT gateways in public subnets enable instances in a private subnet to send outbound traffic to the internet, while preventing the internet from establishing connections to the instances. To learn more about different use cases, see [NAT Gateway scenarios](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-scenarios.html). 10 | 11 | ## Create a Key Pair 12 | 13 | To be able to SSH into Bastion host and private instances, provision the SSH key: 14 | ```bash 15 | aws ec2 create-key-pair --key-name ssh-key-pair --query 'KeyMaterial' --output text > ssh-key-pair.pem 16 | chmod 400 ssh-key-pair.pem 17 | ``` 18 | 19 | **Transfer SSH key into Bastion host** 20 | In order to SSH into private instance from Bastion host, you need to transfer the SSH key. One of the option is to create the key on Bastion host: 21 | ```bash 22 | # Copy the contents of the private SSH key from local machine 23 | xclip -sel clip < ssh-key-pair.pem 24 | 25 | # On Bastion host machine 26 | touch ssh-key-pair.pem 27 | chmod 400 ssh-key-pair.pem 28 | 29 | # Paste the contents of the private SSH key 30 | vim ssh-key-pair.pem 31 | ``` 32 | 33 | ## Initialize the project 34 | 35 | Run the following commands to deploy the infrastructure: 36 | ```bash 37 | terraform init 38 | terraform apply 39 | ``` 40 | 41 | ## Verify the connection to the Internet from private instance 42 | 43 | SSH into private instance from Bastion host: 44 | ```bash 45 | ssh -i ssh-key-pair.pem ubuntu@ 46 | ``` 47 | 48 | Ping `google.com`: 49 | ```bash 50 | ping google.com 51 | ``` 52 | 53 | ## Pros and Cons of this approach 54 | 55 | ### **Pros** 56 | 1. **High Availability:** 57 | - Each NAT Gateway is tied to a specific Availability Zone (AZ). By provisioning one NAT Gateway per AZ, you ensure that private instances in one AZ can still access the internet if another AZ fails. 58 | 59 | 2. **Reduced Latency:** 60 | - Private instances route internet-bound traffic to the NAT Gateway in their AZ. This avoids cross-AZ traffic, reducing latency and improving performance. 61 | 62 | 3. **Fault Isolation:** 63 | - If a NAT Gateway or its associated infrastructure in one AZ fails, private instances in other AZs remain unaffected. 64 | 65 | 4. **Scalability:** 66 | - Traffic is distributed across multiple NAT Gateways, reducing the chance of any single NAT Gateway becoming a bottleneck. 67 | 68 | --- 69 | 70 | ### **Cons** 71 | 1. **Higher Costs:** 72 | - AWS charges per NAT Gateway and for data transfer. Having multiple NAT Gateways increases these costs, especially in scenarios with low traffic volumes. 73 | 74 | 2. **Management Overhead:** 75 | - This setup requires careful management of route tables and associations for each AZ, which can add complexity to the infrastructure. 76 | 77 | 3. **Underutilization:** 78 | - In low-traffic environments, multiple NAT Gateways might be underutilized, leading to inefficient resource usage. 79 | 80 | 4. **Complexity for Scaling Across AZs:** 81 | - When expanding to more AZs, you'll need to add additional NAT Gateways and update route tables, increasing maintenance effort. 82 | 83 | --- 84 | 85 | ### **Alternatives** 86 | 1. **Single NAT Gateway (Lower Cost, Lower Resilience):** 87 | - A single NAT Gateway shared across all private subnets reduces costs but introduces a single point of failure and higher cross-AZ traffic costs. 88 | 89 | ![](../img/single_nat.png) 90 | 91 | 2. **NAT Instances:** 92 | - NAT Instances can replace NAT Gateways for lower costs in some cases, but they require manual scaling, patching, and monitoring, making them less resilient and more management-intensive. For more information, see [this example project](https://github.com/Brain2life/terraform-cookbook/tree/main/nat-instance-setup-for-nonprod). 93 | 94 | --- 95 | 96 | ### **When to Use This Setup** 97 | - Your workload is **highly available** across multiple AZs. 98 | - Your application has **significant internet-bound traffic**, justifying the cost of multiple NAT Gateways. 99 | - You prioritize **resilience and performance** over cost. 100 | 101 | --- 102 | 103 | ### **When to Consider Alternatives** 104 | - You have a **low-traffic environment** with minimal outbound internet usage. 105 | - Cost is a **major constraint**. 106 | - You don’t require **strict fault isolation** across AZs. 107 | 108 | --- 109 | 110 | This setup is ideal for production environments requiring high availability and scalability, but less suitable for cost-sensitive or low-traffic environments. 111 | 112 | ## References 113 | - [AWS Docs: NAT gateways](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html) 114 | - [CloudZero Blog: AWS NAT Gateway Pricing: Simple Strategies To Limit Costs](https://www.cloudzero.com/blog/reduce-nat-gateway-costs/) -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/data.tf: -------------------------------------------------------------------------------- 1 | # Call the external data source to get public IP 2 | data "external" "my_ip" { 3 | program = ["bash", "./get_public_ip.sh"] 4 | } -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/get_public_ip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ip=$(curl -s https://checkip.amazonaws.com) 3 | echo "{\"ip\": \"$ip\"}" -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/instances.tf: -------------------------------------------------------------------------------- 1 | # Bastion host 2 | resource "aws_instance" "bastion" { 3 | ami = "ami-07a63969ac0961461" # Ubuntu 22.04 4 | instance_type = "t2.micro" 5 | subnet_id = aws_subnet.public_a.id # Place into us-east-1a public subnet 6 | security_groups = [aws_security_group.bastion_sg.id] 7 | key_name = "ssh-key-pair" # SSH key name to access Bastion host 8 | 9 | tags = { 10 | Name = "bastion-host" 11 | } 12 | } 13 | 14 | # EC2 instance in private subnet in us-east-1a 15 | resource "aws_instance" "private_a" { 16 | ami = "ami-07a63969ac0961461" # Ubuntu 22.04 17 | instance_type = "t2.micro" 18 | subnet_id = aws_subnet.private_a.id # Place into us-east-1a private subnet 19 | security_groups = [aws_security_group.private_sg.id] 20 | key_name = "ssh-key-pair" 21 | 22 | tags = { 23 | Name = "private-instance-a" 24 | } 25 | } 26 | 27 | # EC2 instance in private subnet in us-east-1b 28 | resource "aws_instance" "private_b" { 29 | ami = "ami-0c94855ba95c71c99" # Ubuntu 22.04 30 | instance_type = "t2.micro" 31 | subnet_id = aws_subnet.private_b.id # Place into us-east-1b private subnet 32 | security_groups = [aws_security_group.private_sg.id] 33 | key_name = "ssh-key-pair" 34 | 35 | tags = { 36 | Name = "private-instance-b" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/internet_gateway.tf: -------------------------------------------------------------------------------- 1 | resource "aws_internet_gateway" "main" { 2 | vpc_id = aws_vpc.main.id 3 | tags = { 4 | Name = "main-igw" 5 | } 6 | } -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.14" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 4.0" 7 | } 8 | } 9 | } 10 | 11 | provider "aws" { 12 | region = "us-east-1" 13 | } -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/nat.tf: -------------------------------------------------------------------------------- 1 | # Provision Elastic IP for NAT Gateway in us-east-1a 2 | resource "aws_eip" "nat_a" { 3 | vpc = true 4 | } 5 | 6 | # Provision NAT Gateway in us-east-1a 7 | resource "aws_nat_gateway" "nat_a" { 8 | allocation_id = aws_eip.nat_a.id 9 | subnet_id = aws_subnet.public_a.id 10 | tags = { 11 | Name = "nat-gateway-a" 12 | } 13 | } 14 | 15 | # Provision Elastic IP for NAT Gateway in us-east-1b 16 | resource "aws_eip" "nat_b" { 17 | vpc = true 18 | } 19 | 20 | # Provision NAT Gateway in us-east-1b 21 | resource "aws_nat_gateway" "nat_b" { 22 | allocation_id = aws_eip.nat_b.id 23 | subnet_id = aws_subnet.public_b.id 24 | tags = { 25 | Name = "nat-gateway-b" 26 | } 27 | } -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/outputs.tf: -------------------------------------------------------------------------------- 1 | # Public IP of Bastion host 2 | output "bastion_public_ip" { 3 | value = aws_instance.bastion.public_ip 4 | } 5 | 6 | # Private IP of EC2 instance in private subnet us-east-1a 7 | output "private_instance_a_private_ip" { 8 | value = aws_instance.private_a.private_ip 9 | } 10 | 11 | # Private IP of EC2 instance in private subnet us-east-1b 12 | output "private_instance_b_private_ip" { 13 | value = aws_instance.private_b.private_ip 14 | } 15 | 16 | # Public IP of NAT Gateway in us-east-1a 17 | output "nat_gateway_a_eip" { 18 | value = aws_eip.nat_a.public_ip 19 | } 20 | 21 | # Public IP of NAT Gateway in us-east-1b 22 | output "nat_gateway_b_eip" { 23 | value = aws_eip.nat_b.public_ip 24 | } -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/route_tables.tf: -------------------------------------------------------------------------------- 1 | # Route table via IGW 2 | resource "aws_route_table" "public" { 3 | vpc_id = aws_vpc.main.id 4 | route { 5 | cidr_block = "0.0.0.0/0" 6 | gateway_id = aws_internet_gateway.main.id 7 | } 8 | tags = { 9 | Name = "public-route-table" 10 | } 11 | } 12 | 13 | # Associate public subnet in us-east-1a with IGW route 14 | resource "aws_route_table_association" "public_a" { 15 | subnet_id = aws_subnet.public_a.id 16 | route_table_id = aws_route_table.public.id 17 | } 18 | 19 | # Associate public subnet in us-east-1b with IGW route 20 | resource "aws_route_table_association" "public_b" { 21 | subnet_id = aws_subnet.public_b.id 22 | route_table_id = aws_route_table.public.id 23 | } 24 | 25 | # Associate private subnet in us-east-1a with NAT Gateway in us-east-1a 26 | resource "aws_route_table" "private_a" { 27 | vpc_id = aws_vpc.main.id 28 | route { 29 | cidr_block = "0.0.0.0/0" 30 | nat_gateway_id = aws_nat_gateway.nat_a.id 31 | } 32 | tags = { 33 | Name = "private-route-table-a" 34 | } 35 | } 36 | 37 | # Associate private subnet in us-east-1a with NAT Gateway in us-east-1b 38 | resource "aws_route_table" "private_b" { 39 | vpc_id = aws_vpc.main.id 40 | route { 41 | cidr_block = "0.0.0.0/0" 42 | nat_gateway_id = aws_nat_gateway.nat_b.id 43 | } 44 | tags = { 45 | Name = "private-route-table-b" 46 | } 47 | } 48 | 49 | # Associate private subnet us-east-1a with route via NAT Gateway in us-east-1a 50 | resource "aws_route_table_association" "private_a" { 51 | subnet_id = aws_subnet.private_a.id 52 | route_table_id = aws_route_table.private_a.id 53 | } 54 | 55 | # Associate private subnet us-east-1b with route via NAT Gateway in us-east-1b 56 | resource "aws_route_table_association" "private_b" { 57 | subnet_id = aws_subnet.private_b.id 58 | route_table_id = aws_route_table.private_b.id 59 | } 60 | -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/security_groups.tf: -------------------------------------------------------------------------------- 1 | # Security group for Bastion instance 2 | resource "aws_security_group" "bastion_sg" { 3 | vpc_id = aws_vpc.main.id 4 | 5 | ingress { 6 | from_port = 22 7 | to_port = 22 8 | protocol = "tcp" 9 | cidr_blocks = ["${data.external.my_ip.result.ip}/32"] # Restrict to your public IP 10 | } 11 | 12 | egress { 13 | from_port = 0 14 | to_port = 0 15 | protocol = "-1" 16 | cidr_blocks = ["0.0.0.0/0"] 17 | } 18 | 19 | tags = { 20 | Name = "bastion-sg" 21 | } 22 | } 23 | 24 | # Security group to allow connection from Bastion host to instances in private subnets 25 | resource "aws_security_group" "private_sg" { 26 | vpc_id = aws_vpc.main.id 27 | 28 | ingress { 29 | from_port = 22 30 | to_port = 22 31 | protocol = "tcp" 32 | security_groups = [aws_security_group.bastion_sg.id] 33 | } 34 | 35 | egress { 36 | from_port = 0 37 | to_port = 0 38 | protocol = "-1" 39 | cidr_blocks = ["0.0.0.0/0"] 40 | } 41 | 42 | tags = { 43 | Name = "private-sg" 44 | } 45 | } -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/subnets.tf: -------------------------------------------------------------------------------- 1 | # Public subnet in us-east-1a 2 | resource "aws_subnet" "public_a" { 3 | vpc_id = aws_vpc.main.id 4 | cidr_block = "10.0.1.0/24" 5 | availability_zone = "us-east-1a" 6 | map_public_ip_on_launch = true 7 | tags = { 8 | Name = "public-subnet-a" 9 | } 10 | } 11 | 12 | # Private subnet in us-east-1a 13 | resource "aws_subnet" "private_a" { 14 | vpc_id = aws_vpc.main.id 15 | cidr_block = "10.0.2.0/24" 16 | availability_zone = "us-east-1a" 17 | tags = { 18 | Name = "private-subnet-a" 19 | } 20 | } 21 | 22 | # Public subnet in us-east-1b 23 | resource "aws_subnet" "public_b" { 24 | vpc_id = aws_vpc.main.id 25 | cidr_block = "10.0.3.0/24" 26 | availability_zone = "us-east-1b" 27 | map_public_ip_on_launch = true 28 | tags = { 29 | Name = "public-subnet-b" 30 | } 31 | } 32 | 33 | # Private subnet in us-east-1b 34 | resource "aws_subnet" "private_b" { 35 | vpc_id = aws_vpc.main.id 36 | cidr_block = "10.0.4.0/24" 37 | availability_zone = "us-east-1b" 38 | tags = { 39 | Name = "private-subnet-b" 40 | } 41 | } -------------------------------------------------------------------------------- /aws-highly-available-nat-gateway-setup/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "main" { 2 | cidr_block = "10.0.0.0/16" 3 | enable_dns_support = true 4 | enable_dns_hostnames = true 5 | tags = { 6 | Name = "main-vpc" 7 | } 8 | } -------------------------------------------------------------------------------- /aws-nacl-basics/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "4.67.0" 6 | constraints = "~> 4.0" 7 | hashes = [ 8 | "h1:dCRc4GqsyfqHEMjgtlM1EympBcgTmcTkWaJmtd91+KA=", 9 | "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", 10 | "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", 11 | "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", 12 | "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", 13 | "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", 14 | "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", 15 | "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", 16 | "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", 17 | "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", 18 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 19 | "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", 20 | "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", 21 | "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", 22 | "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", 23 | "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/hashicorp/external" { 28 | version = "2.3.4" 29 | hashes = [ 30 | "h1:XWkRZOLKMjci9/JAtE8X8fWOt7A4u+9mgXSUjc4Wuyo=", 31 | "zh:037fd82cd86227359bc010672cd174235e2d337601d4686f526d0f53c87447cb", 32 | "zh:0ea1db63d6173d01f2fa8eb8989f0809a55135a0d8d424b08ba5dabad73095fa", 33 | "zh:17a4d0a306566f2e45778fbac48744b6fd9c958aaa359e79f144c6358cb93af0", 34 | "zh:298e5408ab17fd2e90d2cd6d406c6d02344fe610de5b7dae943a58b958e76691", 35 | "zh:38ecfd29ee0785fd93164812dcbe0664ebbe5417473f3b2658087ca5a0286ecb", 36 | "zh:59f6a6f31acf66f4ea3667a555a70eba5d406c6e6d93c2c641b81d63261eeace", 37 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 38 | "zh:ad0279dfd09d713db0c18469f585e58d04748ca72d9ada83883492e0dd13bd58", 39 | "zh:c69f66fd21f5e2c8ecf7ca68d9091c40f19ad913aef21e3ce23836e91b8cbb5f", 40 | "zh:d4a56f8c48aa86fc8e0c233d56850f5783f322d6336f3bf1916e293246b6b5d4", 41 | "zh:f2b394ebd4af33f343835517e80fc876f79361f4688220833bc3c77655dd2202", 42 | "zh:f31982f29f12834e5d21e010856eddd19d59cd8f449adf470655bfd19354377e", 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /aws-nacl-basics/README.md: -------------------------------------------------------------------------------- 1 | # AWS Network Access Control List (NACL) and Security Groups (SGs) basics 2 | 3 | This project demonstrates the working logic of two foundational network security features in AWS: [**Network Access Control Lists (NACLs)**](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html) and [**Security Groups (SGs)**](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html). 4 | 5 | ![](../img/nacl_scheme.png) 6 | 7 | # Table of contents: 8 | - [Level of Operation](#level-of-operation) 9 | - [Rules Structure](#rules-structure) 10 | - [Rule Evaluation](#rule-evaluation) 11 | - [Default Security Group](#default-security-group) 12 | - [Use Cases](#use-cases) 13 | - [When to Use Each](#when-to-use-each) 14 | - [Best Practices](#best-practices) 15 | - [Example Scenario](#example-scenario) 16 | - [Project setup](#project-setup) 17 | - [Create a Key Pair](#create-a-key-pair) 18 | - [Initialize the project](#initialize-the-project) 19 | - [Verify the connection to the Web instance](#verify-the-connection-to-the-web-instance) 20 | - [Change NACL rule to deny traffic](#change-nacl-rule-to-deny-traffic) 21 | - [References](#references) 22 | 23 | AWS **Network ACLs (NACLs)** and **Security Groups** are critical components of network security in AWS. Both serve to control inbound and outbound traffic, but they operate at different layers and have distinct use cases. 24 | 25 | --- 26 | 27 | ### **Level of Operation** 28 | - **Security Groups:** 29 | - Operate at the instance level (virtual firewall for an instance). 30 | - Attached to EC2 instances or other resources. 31 | 32 | ![](https://i.imgur.com/Dx29hQe.png) 33 | 34 | - **NACLs:** 35 | - Operate at the subnet level. 36 | - Automatically apply to all instances within the associated subnet. 37 | 38 | ![](https://i.imgur.com/ky3mzsV.png) 39 | 40 | --- 41 | 42 | ### **Rules Structure** 43 | - **Security Groups:** 44 | - Allow-only rules. [You cannot explicitly deny traffic](https://docs.aws.amazon.com/vpc/latest/userguide/security-group-rules.html); you can only specify what to allow. 45 | - Stateful: Return traffic for allowed inbound traffic is automatically allowed, and vice versa. 46 | 47 | - **NACLs:** 48 | - [Allow and deny rules](https://docs.aws.amazon.com/vpc/latest/userguide/nacl-rules.html). You can explicitly deny traffic. 49 | - Stateless: Return traffic must be explicitly allowed by a separate rule. 50 | 51 | --- 52 | 53 | ### **Rule Evaluation** 54 | - **Security Groups:** 55 | - Rules are evaluated collectively. This means that if you attach several SG rules like allowing traffic on port 443 and 22, then both will be allowed. 56 | - If any rule matches, the traffic is allowed. 57 | 58 | - **NACLs:** 59 | - Rules are evaluated in order, based on rule number (starting with the lowest). 60 | - Once a rule matches, evaluation stops. 61 | 62 | --- 63 | 64 | ### **Default Security Group** 65 | - **Security Groups:** 66 | - Default rule of the default security group allows inbound traffic from all resources that are assigned to this security group and allows all outbound traffic. 67 | 68 | The following table describes the default inbound rules for a default security group. 69 | 70 | ![](https://i.imgur.com/PEX9tsh.png) 71 | 72 | The following table describes the default outbound rules for a default security group. 73 | 74 | ![](https://i.imgur.com/PNCJqXL.png) 75 | 76 | - **Example:** 77 | The following diagram shows a VPC with a default security group, an internet gateway, and a NAT gateway. The default security contains only its default rules, and it is associated with two EC2 instances running in the VPC. In this scenario, each instance can receive inbound traffic from the other instance on all ports and protocols. The default rules do not allow the instances to receive traffic from the internet gateway or the NAT gateway. If your instances must receive additional traffic, it is recommended that you create a security group with the required rules and associate the new security group with the instances instead of the default security group. 78 | 79 | ![](https://i.imgur.com/NRWhzo8.png) 80 | 81 | - **NACLs:** 82 | - Default NACL [allows all inbound and outbound traffic](https://docs.aws.amazon.com/vpc/latest/userguide/default-network-acl.html). 83 | - Custom NACLs [deny all traffic by default](https://docs.aws.amazon.com/vpc/latest/userguide/custom-network-acl.html) until rules are added. 84 | 85 | --- 86 | 87 | ### **Use Cases** 88 | - **Security Groups:** 89 | - Best for fine-grained control at the instance level. 90 | - Use when you need to manage security for individual EC2 instances or groups of instances. 91 | - Ideal for application-level controls, such as allowing traffic to a web server (HTTP, HTTPS) or database server (MySQL, PostgreSQL). 92 | 93 | - **NACLs:** 94 | - Best for broader control at the subnet level. 95 | - Use to provide an additional layer of security for subnets in a VPC. 96 | - Ideal for network-level controls, such as blocking a specific IP range or restricting traffic based on a port across multiple instances. 97 | 98 | --- 99 | 100 | ### **When to Use Each** 101 | 1. **Use Security Groups:** 102 | - When you want granular control of traffic to individual instances. 103 | - To manage instance-specific protocols and ports (e.g., HTTP, SSH, database). 104 | - In combination with Auto Scaling groups or when instance IPs are dynamic. 105 | 106 | 2. **Use NACLs:** 107 | - When you need to enforce broad rules that apply to all instances in a subnet. 108 | - To block specific IP ranges or ports at the subnet level. 109 | - When you want a stateless firewall that applies universally to all traffic entering or exiting a subnet. 110 | 111 | --- 112 | 113 | ### **Best Practices** 114 | 1. **Layered Security:** Use both Security Groups and NACLs together for defense-in-depth: 115 | - NACLs as a coarse-grained, subnet-level firewall. 116 | - Security Groups for fine-grained, instance-level access control. 117 | 118 | 2. **Least Privilege:** Allow only the traffic necessary for your application and block everything else. 119 | 120 | 3. **Test Changes:** Before implementing rules in production, test them in a staging environment to ensure they function as intended. 121 | 122 | --- 123 | 124 | ### **Example Scenario** 125 | **Web Application Deployment:** 126 | - **Security Group:** 127 | - Allow inbound HTTP/HTTPS (80/443) and SSH (22) traffic from specific IPs to the web server. 128 | - Allow MySQL traffic (3306) from the application servers to the database instance. 129 | 130 | - **NACL:** 131 | - Deny traffic from malicious IP ranges at the subnet level. 132 | - Allow HTTP/HTTPS (80/443) traffic to the subnet and block everything else not explicitly allowed. 133 | 134 | By using NACLs and Security Groups appropriately, you ensure a robust, multi-layered security posture in your AWS environment. 135 | 136 | --- 137 | 138 | ## **Project setup** 139 | 140 | ### **Create a Key Pair** 141 | To be able to SSH into Web instance, provision the SSH key: 142 | ```bash 143 | aws ec2 create-key-pair --key-name ssh-key-pair --query 'KeyMaterial' --output text > ssh-key-pair.pem 144 | chmod 400 ssh-key-pair.pem 145 | ``` 146 | 147 | ### **Initialize the project** 148 | Run the following commands to deploy the infrastructure: 149 | ```bash 150 | terraform init 151 | terraform apply 152 | ``` 153 | 154 | ### Verify the connection to the Web instance 155 | SSH into Web instance: 156 | ```bash 157 | ssh -i ssh-key-pair.pem ubuntu@ 158 | ``` 159 | 160 | ### Change NACL rule to deny traffic 161 | To see NACL rules in action, deny SSH traffic on subnet level and try to reconnect to Web instance via SSH: 162 | 163 | ![](https://i.imgur.com/Z57OTxY.png) 164 | 165 | As a result you won't be able to connect to Web instance despite the fact that you have allowed inbound SSH traffic on Security Group level. 166 | 167 | ### **References** 168 | 169 | - [AWS Docs: Control subnet traffic with network access control lists](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-network-acls.html) 170 | - [AWS Docs: Security Groups](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html) 171 | - [Cloudviz Blog: AWS Security Group vs NACL](https://cloudviz.io/blog/aws-security-group-vs-nacl) -------------------------------------------------------------------------------- /aws-nacl-basics/data.tf: -------------------------------------------------------------------------------- 1 | # Call the external data source to get public IP 2 | data "external" "my_ip" { 3 | program = ["bash", "./get_public_ip.sh"] 4 | } -------------------------------------------------------------------------------- /aws-nacl-basics/get_public_ip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ip=$(curl -s https://checkip.amazonaws.com) 3 | echo "{\"ip\": \"$ip\"}" -------------------------------------------------------------------------------- /aws-nacl-basics/instance.tf: -------------------------------------------------------------------------------- 1 | # EC2 Instance 2 | resource "aws_instance" "web" { 3 | ami = "ami-07a63969ac0961461" # Ubuntu 22.04 4 | instance_type = "t2.micro" 5 | subnet_id = aws_subnet.public.id 6 | security_groups = [aws_security_group.ec2_sg.id] 7 | key_name = "ssh-key-pair" # SSH key to access the instance 8 | 9 | tags = { 10 | Name = "MyEC2Instance" 11 | } 12 | } -------------------------------------------------------------------------------- /aws-nacl-basics/internet_gateway.tf: -------------------------------------------------------------------------------- 1 | # Internet Gateway 2 | resource "aws_internet_gateway" "igw" { 3 | vpc_id = aws_vpc.main.id 4 | 5 | tags = { 6 | Name = "MyIGW" 7 | } 8 | } -------------------------------------------------------------------------------- /aws-nacl-basics/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.14" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 4.0" 7 | } 8 | } 9 | } 10 | 11 | provider "aws" { 12 | region = "us-east-1" 13 | } -------------------------------------------------------------------------------- /aws-nacl-basics/nacls.tf: -------------------------------------------------------------------------------- 1 | # Network ACL 2 | resource "aws_network_acl" "public" { 3 | vpc_id = aws_vpc.main.id 4 | 5 | ingress { 6 | rule_no = 100 7 | protocol = "tcp" 8 | action = "allow" 9 | cidr_block = "${data.external.my_ip.result.ip}/32" 10 | from_port = 22 11 | to_port = 22 12 | } 13 | 14 | egress { 15 | rule_no = 100 16 | protocol = "-1" 17 | action = "allow" 18 | cidr_block = "${data.external.my_ip.result.ip}/32" 19 | from_port = 0 20 | to_port = 0 21 | } 22 | 23 | tags = { 24 | Name = "PublicNACL" 25 | } 26 | } 27 | 28 | # Associate NACL with Subnet 29 | resource "aws_network_acl_association" "public" { 30 | subnet_id = aws_subnet.public.id 31 | network_acl_id = aws_network_acl.public.id 32 | } -------------------------------------------------------------------------------- /aws-nacl-basics/outputs.tf: -------------------------------------------------------------------------------- 1 | # Public IP of the instance 2 | output "web_instance_public_ip" { 3 | value = aws_instance.web.public_ip 4 | } -------------------------------------------------------------------------------- /aws-nacl-basics/security_groups.tf: -------------------------------------------------------------------------------- 1 | # Security Group 2 | resource "aws_security_group" "ec2_sg" { 3 | name_prefix = "EC2SecurityGroup-" 4 | description = "Allow SSH traffic" 5 | vpc_id = aws_vpc.main.id 6 | 7 | ingress { 8 | from_port = 22 9 | to_port = 22 10 | protocol = "tcp" 11 | cidr_blocks = ["${data.external.my_ip.result.ip}/32"] # Restrict to your public IP 12 | } 13 | 14 | egress { 15 | from_port = 0 16 | to_port = 0 17 | protocol = "-1" 18 | cidr_blocks = ["0.0.0.0/0"] 19 | } 20 | 21 | tags = { 22 | Name = "EC2SecurityGroup" 23 | } 24 | } -------------------------------------------------------------------------------- /aws-nacl-basics/subnets.tf: -------------------------------------------------------------------------------- 1 | # Public Subnet 2 | resource "aws_subnet" "public" { 3 | vpc_id = aws_vpc.main.id 4 | cidr_block = "10.0.1.0/24" 5 | map_public_ip_on_launch = true 6 | 7 | tags = { 8 | Name = "PublicSubnet" 9 | } 10 | } 11 | 12 | # Route Table to IGW 13 | resource "aws_route_table" "public" { 14 | vpc_id = aws_vpc.main.id 15 | 16 | route { 17 | cidr_block = "0.0.0.0/0" 18 | gateway_id = aws_internet_gateway.igw.id 19 | } 20 | 21 | tags = { 22 | Name = "PublicRouteTable" 23 | } 24 | } 25 | 26 | # Associate Route Table with Subnet 27 | resource "aws_route_table_association" "public" { 28 | subnet_id = aws_subnet.public.id 29 | route_table_id = aws_route_table.public.id 30 | } 31 | 32 | -------------------------------------------------------------------------------- /aws-nacl-basics/vpc.tf: -------------------------------------------------------------------------------- 1 | # VPC 2 | resource "aws_vpc" "main" { 3 | cidr_block = "10.0.0.0/16" 4 | enable_dns_hostnames = true 5 | enable_dns_support = true 6 | 7 | tags = { 8 | Name = "MyVPC" 9 | } 10 | } -------------------------------------------------------------------------------- /aws-secrets-manager/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "4.67.0" 6 | constraints = "~> 4.0" 7 | hashes = [ 8 | "h1:dCRc4GqsyfqHEMjgtlM1EympBcgTmcTkWaJmtd91+KA=", 9 | "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", 10 | "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", 11 | "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", 12 | "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", 13 | "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", 14 | "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", 15 | "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", 16 | "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", 17 | "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", 18 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 19 | "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", 20 | "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", 21 | "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", 22 | "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", 23 | "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/hashicorp/external" { 28 | version = "2.3.4" 29 | hashes = [ 30 | "h1:XWkRZOLKMjci9/JAtE8X8fWOt7A4u+9mgXSUjc4Wuyo=", 31 | "zh:037fd82cd86227359bc010672cd174235e2d337601d4686f526d0f53c87447cb", 32 | "zh:0ea1db63d6173d01f2fa8eb8989f0809a55135a0d8d424b08ba5dabad73095fa", 33 | "zh:17a4d0a306566f2e45778fbac48744b6fd9c958aaa359e79f144c6358cb93af0", 34 | "zh:298e5408ab17fd2e90d2cd6d406c6d02344fe610de5b7dae943a58b958e76691", 35 | "zh:38ecfd29ee0785fd93164812dcbe0664ebbe5417473f3b2658087ca5a0286ecb", 36 | "zh:59f6a6f31acf66f4ea3667a555a70eba5d406c6e6d93c2c641b81d63261eeace", 37 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 38 | "zh:ad0279dfd09d713db0c18469f585e58d04748ca72d9ada83883492e0dd13bd58", 39 | "zh:c69f66fd21f5e2c8ecf7ca68d9091c40f19ad913aef21e3ce23836e91b8cbb5f", 40 | "zh:d4a56f8c48aa86fc8e0c233d56850f5783f322d6336f3bf1916e293246b6b5d4", 41 | "zh:f2b394ebd4af33f343835517e80fc876f79361f4688220833bc3c77655dd2202", 42 | "zh:f31982f29f12834e5d21e010856eddd19d59cd8f449adf470655bfd19354377e", 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /aws-secrets-manager/README.md: -------------------------------------------------------------------------------- 1 | ## Handling sensitive data in Terraform code with AWS Secrets Manager 2 | 3 | [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) provides a secure way to store and manage sensitive information. This template shows how to securely handle RDS instance credentials in Terraform by integrating AWS Secrets Manager. 4 | 5 | Secrets Manager uses 256-bit Advanced Encryption Standard (AES) symmetric data keys to encrypt secret values. For more information, see [Secret encryption and decryption in AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/security-encryption.html). 6 | 7 | **NOTE** 8 | AWS Secrets Manager is a paid service. For more information, see [AWS Secrets Manager Pricing](https://aws.amazon.com/secrets-manager/pricing/) 9 | 10 | ## Overview 11 | 12 | This template uses random secrets generation when you deploy IaC help you protect sensitive data from the very start, known as *zero hours*. The sensitive data is never known to anyone, right from the deployment phase. 13 | 14 | ![](../img/terraform-get-secrets-from-secrets-manager.png) 15 | 16 | 1. Through Terraform, use AWS Secrets Manager to generate a random password secret. 17 | 2. Terraform uses this random password secret, which is stored in AWS Secrets Manager, to access the database. 18 | 3. The username and database name values are also stored in AWS Secrets Manager and supplied as a variable inputs during `terraform apply` command. 19 | 20 | **NOTE** 21 | **Sensitive data is visible in plain text in the Terraform state file**. To help protect sensitive data, store the Terraform state file in the remote backend with encryption enabled at-rest and in-transit. For example, use AWS S3 remote backend and DynamoDB table for state locking. For more information, see [S3 DynamoDB Backend](../s3-dynamodb-backend/) 22 | 23 | ## Getting generated random password and secret values 24 | 25 | To get generated random password and secret values stored in AWS Secrets Manager via AWS CLI: 26 | ```bash 27 | aws secretsmanager get-secret-value --secret-id my-org/my-env/my-rds-secret 28 | ``` 29 | 30 | To extract password from secret values, first ensure that you have `jq` installed: 31 | ```bash 32 | sudo apt-get update 33 | sudo apt-get install jq 34 | ``` 35 | 36 | Extract the password: 37 | ```bash 38 | aws secretsmanager get-secret-value \ 39 | --secret-id my-org/my-env/my-rds-secret \ 40 | --query SecretString \ 41 | --output text | jq -r '.password' 42 | ``` 43 | 44 | ## Check connection to the RDS instance 45 | 46 | This template uses EC2 bastion host in public subnet to access RDS instance with `mysql` client as shown in the following diagram: 47 | 48 | ![](../img/diagram_rds_bastion.png) 49 | 50 | **NOTE** 51 | Generate public and private SSH keys before running terraform code. Store them in the default `~/.ssh` path. 52 | 53 | 1. To get public IP address of running EC2 instances in your account: 54 | ```bash 55 | aws ec2 describe-instances \ 56 | --filters "Name=instance-state-name,Values=running" \ 57 | --query 'Reservations[*].Instances[*].{InstanceID:InstanceId,PublicIP:PublicIpAddress}' \ 58 | --output table 59 | ``` 60 | 2. To connect to the bastion host: 61 | ```bash 62 | ssh ubuntu@ 63 | ``` 64 | 3. To connect to the RDS instance: 65 | ```bash 66 | sudo mysql -h -u -p 67 | ``` 68 | 69 | ## References 70 | - [AWS Docs: Using Secrets Manager and Terraform](https://docs.aws.amazon.com/prescriptive-guidance/latest/secure-sensitive-data-secrets-manager-terraform/using-secrets-manager-and-terraform.html) 71 | - [YouTube HashiCorp: How Secure are Your Sensitive Values in Terraform? Common Pitfalls of Scale Factory's Clients](https://www.youtube.com/watch?v=zYAuVrLacGc) 72 | - [Data source: aws_secretsmanager_random_password](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/secretsmanager_random_password) 73 | - [Resource: aws_secretsmanager_secret_version](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret_version) 74 | - [Resource: aws_secretsmanager_secret](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/secretsmanager_secret) 75 | - [AWS Secrets Manager Pricing](https://aws.amazon.com/secrets-manager/pricing/) -------------------------------------------------------------------------------- /aws-secrets-manager/bastion.tf: -------------------------------------------------------------------------------- 1 | # Data block to get the default VPC ID 2 | data "aws_vpc" "default" { 3 | default = true 4 | } 5 | 6 | # Call the external data source to get public IP 7 | data "external" "my_ip" { 8 | program = ["bash", "./get_public_ip.sh"] 9 | } 10 | 11 | # Security Group for Bastion Host 12 | resource "aws_security_group" "bastion_sg" { 13 | name = "bastion-host-sg" 14 | vpc_id = data.aws_vpc.default.id 15 | 16 | # Allow SSH access from your IP 17 | ingress { 18 | from_port = 22 19 | to_port = 22 20 | protocol = "tcp" 21 | cidr_blocks = ["${data.external.my_ip.result.ip}/32"] 22 | } 23 | 24 | # Outbound traffic (allow all) 25 | egress { 26 | from_port = 0 27 | to_port = 0 28 | protocol = "-1" 29 | cidr_blocks = ["0.0.0.0/0"] 30 | } 31 | 32 | tags = { 33 | Name = "Bastion Security Group" 34 | } 35 | } 36 | 37 | # EC2 Key Pair (Replace if you have an existing one) 38 | resource "aws_key_pair" "bastion_key" { 39 | key_name = "bastion-key" 40 | public_key = file("~/.ssh/aws_test_demo_key.pub") # Replace with the path to your public key 41 | } 42 | 43 | # Bastion Host EC2 Instance 44 | resource "aws_instance" "bastion" { 45 | ami = "ami-005fc0f236362e99f" # Ubuntu 22.04 AMI (change for your region) 46 | instance_type = "t2.micro" 47 | key_name = aws_key_pair.bastion_key.key_name 48 | associate_public_ip_address = true # Ensure public IP 49 | 50 | security_groups = [aws_security_group.bastion_sg.name] 51 | 52 | # Install MySQL client via user data script 53 | user_data = <<-EOF 54 | #!/bin/bash 55 | sudo apt update -y 56 | sudo apt install mysql-server -y 57 | EOF 58 | 59 | tags = { 60 | Name = "Bastion Host" 61 | } 62 | } -------------------------------------------------------------------------------- /aws-secrets-manager/get_public_ip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ip=$(curl -s https://checkip.amazonaws.com) 3 | echo "{\"ip\": \"$ip\"}" 4 | -------------------------------------------------------------------------------- /aws-secrets-manager/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.14" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 4.0" 7 | } 8 | } 9 | } 10 | 11 | provider "aws" { 12 | region = "us-east-1" 13 | } 14 | -------------------------------------------------------------------------------- /aws-secrets-manager/outputs.tf: -------------------------------------------------------------------------------- 1 | output "db_endpoint" { 2 | value = aws_db_instance.default.endpoint 3 | description = "The connection endpoint" 4 | } -------------------------------------------------------------------------------- /aws-secrets-manager/rds.tf: -------------------------------------------------------------------------------- 1 | data "aws_secretsmanager_random_password" "rds_password" { 2 | password_length = 20 3 | exclude_numbers = false 4 | exclude_punctuation = true 5 | include_space = false 6 | } 7 | 8 | resource "aws_secretsmanager_secret" "rds_secret" { 9 | name = "my-org/my-env/my-rds-secret" 10 | } 11 | 12 | resource "aws_secretsmanager_secret_version" "rds_secret_version" { 13 | secret_id = aws_secretsmanager_secret.rds_secret.id 14 | secret_string = jsonencode({ 15 | db_name = var.rds_db_name 16 | username = var.rds_username, 17 | password = data.aws_secretsmanager_random_password.rds_password.random_password 18 | }) 19 | } 20 | 21 | data "aws_secretsmanager_secret" "rds_secret" { 22 | name = aws_secretsmanager_secret.rds_secret.name 23 | } 24 | 25 | data "aws_secretsmanager_secret_version" "rds_secret_version" { 26 | secret_id = data.aws_secretsmanager_secret.rds_secret.id 27 | } 28 | 29 | # Define local variable with retrieved secrets 30 | locals { 31 | rds_credentials = sensitive(jsondecode(data.aws_secretsmanager_secret_version.rds_secret_version.secret_string)) 32 | } 33 | 34 | # Create Security Group for the RDS Database 35 | resource "aws_security_group" "rds_sg" { 36 | name = "rds-db-sg" 37 | vpc_id = data.aws_vpc.default.id 38 | 39 | # Allow MySQL traffic from Bastion Host's SG 40 | ingress { 41 | from_port = 3306 42 | to_port = 3306 43 | protocol = "tcp" 44 | security_groups = [aws_security_group.bastion_sg.id] 45 | } 46 | 47 | # Outbound traffic (allow all) 48 | egress { 49 | from_port = 0 50 | to_port = 0 51 | protocol = "-1" 52 | cidr_blocks = ["0.0.0.0/0"] 53 | } 54 | 55 | tags = { 56 | Name = "RDS Security Group" 57 | } 58 | } 59 | 60 | resource "aws_db_instance" "default" { 61 | allocated_storage = 20 62 | engine = "mysql" 63 | engine_version = "8.0.32" 64 | instance_class = "db.t3.micro" 65 | vpc_security_group_ids = [aws_security_group.rds_sg.id] # Attach RDS SG 66 | 67 | db_name = local.rds_credentials["db_name"] 68 | username = local.rds_credentials["username"] 69 | password = local.rds_credentials["password"] 70 | 71 | skip_final_snapshot = true 72 | publicly_accessible = false 73 | apply_immediately = true 74 | } 75 | -------------------------------------------------------------------------------- /aws-secrets-manager/variables.tf: -------------------------------------------------------------------------------- 1 | variable "rds_username" { 2 | type = string 3 | sensitive = true 4 | } 5 | 6 | variable "rds_db_name" { 7 | type = string 8 | sensitive = true 9 | } 10 | -------------------------------------------------------------------------------- /aws-ssm-ec2-connect/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.74.0" 6 | constraints = ">= 1.0.0" 7 | hashes = [ 8 | "h1:HMaN/L2hf1PN2YLdlQRbE49f4RF7VuqEVpqxNtJ2+18=", 9 | "zh:1e2d65add4d63af5b396ae33d55c48303eca6c86bd1be0f6fae13267a9b47bc4", 10 | "zh:20ddec3dac3d06a188f12e58b6428854949b1295e937c5d4dca4866dc1c937af", 11 | "zh:35b72de4e6a3e3d69efc07184fb413406262fe447b2d82d57eaf8c787a068a06", 12 | "zh:44eada24a50cd869aadc4b29f9e791fdf262d7f426921e9ac2893bbb86013176", 13 | "zh:455e666e3a9a2312b3b9f434b87a404b6515d64a8853751e20566a6548f9df9e", 14 | "zh:58b3ae74abfca7b9b61f42f0c8b10d97f9b01aff18bd1d4ab091129c9d203707", 15 | "zh:840a8a32d5923f9e7422f9c80d165c3f89bb6ea370b8283095081e39050a8ea8", 16 | "zh:87cb6dbbdbc1b73bdde4b8b5d6d780914a3e8f1df0385da4ea7323dc1a68468f", 17 | "zh:8b8953e39b0e6e6156c5570d1ca653450bfa0d9b280e2475f01ee5c51a6554db", 18 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 19 | "zh:9bd750262e2fb0187a8420a561e55b0a1da738f690f53f5c7df170cb1f380459", 20 | "zh:9d2474c1432dfa5e1db197e2dd6cd61a6a15452e0bc7acd09ca86b3cdb228871", 21 | "zh:b763ecaf471c7737a5c6e4cf257b5318e922a6610fd83b36ed8eb68582a8642e", 22 | "zh:c1344cd8fe03ff7433a19b14b14a1898c2ca5ba22a468fb8e1687f0a7f564d52", 23 | "zh:dc0e0abf3be7402d0d022ced82816884356115ed27646df9c7222609e96840e6", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /aws-ssm-ec2-connect/README.md: -------------------------------------------------------------------------------- 1 | # Connect to private EC2 instance via AWS SSM 2 | 3 | This project provides example of how you can use the AWS Systems Manager Session Manager to securely connect to an Amazon Elastic Compute Cloud (Amazon EC2) bastion host without using any long-lived SSH keys. 4 | 5 | ## Architecture 6 | ![Architecture of a private EC2 instance managed by SSM](../img/diagram-ssm.png) 7 | 8 | ## Overview 9 | 10 | **Stack** 11 | - A VPC with a single private subnet with EC2 instance running inside 12 | - IAM role and instance profile 13 | - Amazon VPC security groups and security group rules for the endpoints and EC2 instance 14 | 15 | **Technologies** 16 | - [AWS Systems Manager (SSM) Session Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html) - is a fully managed service that allows you to securely access and manage your EC2 instances without requiring SSH or RDP access. By using the AWS Management Console, AWS CLI, or SDKs, you can establish sessions to your instances through encrypted channels, ensuring secure communication. No open inbound ports and no need to manage bastion hosts or SSH keys 17 | - [EC2 instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html) - is a container for an IAM role that allows EC2 instances to interact with other AWS services securely. Instead of embedding AWS access keys and secrets within your application, which poses security risks, an instance profile provides temporary, rotating credentials for an EC2 instance to access AWS resources according to the permissions of its associated IAM role. 18 | - [AWS PrivateLink](https://aws.amazon.com/privatelink/) - provides secure and private connectivity between Virtual Private Clouds (VPCs) and AWS services or third-party services over the Amazon network. With PrivateLink, you can connect to services in a highly secure way without traversing the public internet. Instead, the data stays within AWS’s private network, reducing exposure to potential security threats and improving latency and reliability. 19 | 20 | ## Prerequisites 21 | 22 | To connect to an EC2 instance in a private subnet using AWS Systems Manager (SSM), you’ll need to ensure a few prerequisites are met: 23 | 24 | 1. **SSM Agent**: Confirm that the SSM agent is installed and running on your EC2 instance. Some AMIs already have agent pre-installed. For more information, see [Find AMIs with the SSM Agent preinstalled](https://docs.aws.amazon.com/systems-manager/latest/userguide/ami-preinstalled-agent.html). This example template uses Amazon Linux 2 image with pre-installed agent. For manual installation, refer to [Manually installing and uninstalling SSM Agent on EC2 instances for Linux.](https://docs.aws.amazon.com/systems-manager/latest/userguide/manually-install-ssm-agent-linux.html) 25 | 2. **SSM plugin for AWS CLI**: Configure your AWS CLI with SSM plugin. For more information, see [Install the Session Manager plugin for the AWS CLI](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) 26 | 3. **IAM Role**: Attach an IAM role to the instance with the [`AmazonSSMManagedInstanceCore`](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AmazonSSMManagedInstanceCore.html) policy. This allows SSM to connect to the instance. 27 | 4. **VPC Endpoints** (Optional): If your instance doesn’t have direct internet access, set up VPC endpoints for SSM and for EC2 messages. For more information, see [Improve the security of EC2 instances by using VPC endpoints for Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/setup-create-vpc.html) 28 | 29 | **NOTE** 30 | The alternative to using a VPC endpoint is to allow outbound internet access on your managed instances. In this case, the managed instances must also allow HTTPS (port 443) outbound traffic to the following endpoints: 31 | - `ssm.region.amazonaws.com` - The endpoint for the Systems Manager service. 32 | - `ssmmessages.region.amazonaws.com` - Systems Manager uses this endpoint to make calls from SSM Agent to the Systems Manager service. 33 | - `ec2messages.region.amazonaws.com` - Session Manager uses this endpoint to connect to your EC2 instance through a secure data channel. 34 | 35 | Set your value for `region`. 36 | 37 | Once these are set up, you can connect to the instance using the following steps: 38 | 39 | ## Start the SSM Session 40 | 41 | Run the following command to start an SSM session to your instance, substituting `` with the actual instance ID: 42 | 43 | ```bash 44 | aws ssm start-session --target --profile 45 | ``` 46 | 47 | To retrieve the instance ID of an EC2 instance via tag name: 48 | ```bash 49 | aws ec2 describe-instances \ 50 | --filters "Name=tag:Name,Values= --region 53 | ``` 54 | 55 | ### Example 56 | 57 | If your instance ID is `i-0abcd1234efgh5678`, the command would be: 58 | 59 | ```bash 60 | aws ssm start-session --target i-0abcd1234efgh5678 --profile my-profile --region us-east-1 61 | ``` 62 | 63 | ### Troubleshooting Tips 64 | - If the command fails, check your instance’s IAM role and ensure it has the necessary permissions. 65 | - Verify that the instance shows as “managed” in the Systems Manager console in Fleet Manager tab. 66 | - Ensure the required VPC endpoints are set up if the instance lacks internet access. 67 | 68 | This should establish an SSM session to your instance in the private subnet without requiring a direct SSH connection. 69 | 70 | ## Code structure 71 | 72 | ### Resources 73 | 74 | | Name | Type | 75 | |------|------| 76 | | [aws_iam_instance_profile.ec2](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_instance_profile) | resource | 77 | | [aws_iam_role.ec2](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource | 78 | | [aws_iam_role_policy_attachment.ssm_managed_ec2](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | 79 | | [aws_instance.ec2](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) | resource | 80 | | [aws_security_group.ec2_all](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | 81 | | [aws_security_group.ssm_https](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | 82 | | [aws_subnet.subnet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | 83 | | [aws_vpc.main](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) | resource | 84 | | [aws_vpc_endpoint.ssm_endpoint](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_endpoint) | resource | 85 | | [aws_ami.amazon_linux_2](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | 86 | 87 | ### Inputs 88 | 89 | | Name | Description | Type | Default | Required | 90 | |------|-------------|------|---------|:--------:| 91 | | [common\_tags](#input\_common\_tags) | Common tags for resources | `map(string)` |
{
"Environment": "dev",
"Name": "ssm-private-ec2"
}
| no | 92 | | [region](#input\_region) | Region target to deploy resources | `string` | `"us-east-1"` | no | 93 | | [vpc\_cidr](#input\_vpc\_cidr) | CIDR block of the VPC | `string` | `"10.0.0.0/16"` | no | 94 | 95 | ### Outputs 96 | 97 | | Name | Description | 98 | |------|-------------| 99 | | [ami\_amazon\_linux2\_arn](#output\_ami\_amazon\_linux2\_arn) | ARN of the Amazon Linux 2 AMI | 100 | | [ec2\_arn](#output\_ec2\_arn) | ARN of the EC2 instance | 101 | | [vpc\_arn](#output\_vpc\_arn) | ARN of the VPC | 102 | 103 | ## References 104 | - [AWS Docs: Improve the security of EC2 instances by using VPC endpoints for Systems Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/setup-create-vpc.html) 105 | - [AWS Docs: Access a bastion host by using Session Manager and Amazon EC2 Instance Connect](https://docs.aws.amazon.com/prescriptive-guidance/latest/patterns/access-a-bastion-host-by-using-session-manager-and-amazon-ec2-instance-connect.html) 106 | - [Blog: AWS Systems Manager Session Manager: bye bye bastion hosts!](https://blog.pipetail.io/posts/2020-02-24-amazon-ssm-session-manager/) 107 | - [https://github.com/aws-samples/secured-bastion-host-terraform](https://github.com/aws-samples/secured-bastion-host-terraform) -------------------------------------------------------------------------------- /aws-ssm-ec2-connect/data.tf: -------------------------------------------------------------------------------- 1 | # Amazon Linux 2 AMI value 2 | data "aws_ami" "amazon_linux_2" { 3 | most_recent = true 4 | filter { 5 | name = "owner-alias" 6 | values = ["amazon"] 7 | } 8 | filter { 9 | name = "name" 10 | values = ["amzn2-ami-hvm*"] 11 | } 12 | } -------------------------------------------------------------------------------- /aws-ssm-ec2-connect/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | services = { 3 | "ec2messages" : { 4 | "name" : "com.amazonaws.${var.region}.ec2messages" 5 | }, 6 | "ssm" : { 7 | "name" : "com.amazonaws.${var.region}.ssm" 8 | }, 9 | "ssmmessages" : { 10 | "name" : "com.amazonaws.${var.region}.ssmmessages" 11 | } 12 | } 13 | } -------------------------------------------------------------------------------- /aws-ssm-ec2-connect/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "main" { 2 | cidr_block = var.vpc_cidr 3 | enable_dns_support = true 4 | enable_dns_hostnames = true 5 | tags = var.common_tags 6 | } 7 | 8 | resource "aws_subnet" "subnet" { 9 | vpc_id = aws_vpc.main.id 10 | cidr_block = cidrsubnet(var.vpc_cidr, 8, 0) # Derive a subnet with prefix 8 and index 0 from the main VPC’s CIDR block 11 | tags = var.common_tags 12 | } 13 | 14 | # Set IAM role for EC2 to access AWS SSM 15 | resource "aws_iam_role" "ec2" { 16 | name = "ec2" 17 | tags = var.common_tags 18 | assume_role_policy = < There are no additional costs to use Amazon EKS managed node groups, you only pay for the AWS resources you provision. These include Amazon EC2 instances, Amazon EBS volumes, Amazon EKS cluster hours, and any other AWS infrastructure. There are no minimum fees and no upfront commitments. For more information, see [Amazon EC2 pricing](https://aws.amazon.com/ec2/pricing/). 23 | 24 | --- 25 | 26 | ### 🔧 Behind the scenes 27 | 28 | When you define a managed node group: 29 | - AWS uses a **standard AMI** (like [`Amazon EKS-optimized Linux`](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html)). 30 | - Creates an **Auto Scaling Group** for the nodes. 31 | - Automatically **joins nodes to the EKS cluster**. 32 | - Nodes launched as part of a managed node group are **automatically tagged for auto-discovery by the Kubernetes [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md)** 33 | 34 | --- 35 | 36 | ### 🆚 Self-managed vs Managed Node Groups 37 | 38 | | Feature | Managed Node Group | Self-managed Node Group | 39 | |--------|---------------------|--------------------------| 40 | | Provisioned by AWS? | ✅ Yes | ❌ No (you create the ASG manually) | 41 | | Node lifecycle managed? | ✅ Yes | ❌ You manage it | 42 | | Updates and draining? | ✅ Yes | ❌ Manual | 43 | | Custom bootstrap logic? | ❌ Limited | ✅ Full control | 44 | | Use custom AMIs easily? | ⚠️ Somewhat tricky | ✅ Fully customizable | 45 | 46 | --- 47 | 48 | ### 🔑 Access the cluster 49 | 50 | To update your local `kubeconfig` file and access cluster, run: 51 | ```bash 52 | aws eks update-kubeconfig --region us-east-1 --name eks-cluster 53 | ``` 54 | 55 | To verify access: 56 | ```bash 57 | kubectl get no 58 | ``` 59 | 60 | --- 61 | 62 | ### 🔍 Inspect the cluster 63 | 64 | Set the following variables: 65 | ```bash 66 | export AWS_REGION=us-east-1 67 | export EKS_CLUSTER_NAME=eks-cluster 68 | export EKS_MNG_NAME= 69 | ``` 70 | 71 | To get the actual name of the provisioned node group name, run: 72 | ```bash 73 | terraform output 74 | ``` 75 | 76 | Node group name should look like this: `example-20250416095857186100000013` 77 | 78 | Inspect the provisioned managed node group: 79 | ```bash 80 | eksctl get nodegroup --cluster $EKS_CLUSTER_NAME --name $EKS_MNG_NAME 81 | ``` 82 | 83 | ![](https://i.imgur.com/25VDxbr.png) 84 | 85 | You can also inspect the nodes and the placement in the availability zones. 86 | ```bash 87 | kubectl get nodes -o wide --label-columns topology.kubernetes.io/zone 88 | ``` 89 | 90 | You should see that by default nodes are distributed over multiple subnets in various availability zones, providing high availability 91 | 92 | ![](https://i.imgur.com/T2bUuWp.png) 93 | 94 | 95 | --- 96 | 97 | ### ➕ Add nodes to the cluster 98 | 99 | While working with your cluster, you may need to update your managed node group configuration to add additional nodes to support the needs of your workloads. 100 | We will be using the `aws eks update-nodegroup-config` command to scale a node group. 101 | 102 | We'll scale the nodegroup by changing the node count from 2 to 4 for **desired capacity** using below command: 103 | ```bash 104 | aws eks update-nodegroup-config --cluster-name $EKS_CLUSTER_NAME \ 105 | --nodegroup-name $EKS_MNG_NAME --scaling-config minSize=2,maxSize=6,desiredSize=4 106 | ``` 107 | 108 | After making changes to the node group it may take up to **2-3 minutes for node provisioning** and configuration changes to take effect. Let's retrieve the nodegroup configuration again and look at minimum size, maximum size and desired capacity of nodes using `eksctl` command below: 109 | ```bash 110 | eksctl get nodegroup --name $EKS_MNG_NAME --cluster $EKS_CLUSTER_NAME 111 | ``` 112 | 113 | Monitor the nodes in the cluster using the following command with the --watch argument until there are 4 nodes: 114 | ```bash 115 | kubectl get nodes --watch 116 | ``` 117 | 118 | You should see 4 provisioned nodes: 119 | 120 | ![](https://i.imgur.com/am4A2VQ.png) 121 | 122 | --- 123 | 124 | ### 🚀 (OPTIONAL) Deploy the sample application 125 | 126 | The sample application models a simple web store application, where customers can browse a catalog, add items to their cart and complete an order through the checkout process. 127 | 128 | ![](https://eksworkshop.com/assets/images/home-139b528766858df3dd66ae3c09ec12ad.webp) 129 | 130 | You can find the full source code for the sample application on [GitHub](https://github.com/aws-containers/retail-store-sample-app). 131 | 132 | The application has several components and dependencies: 133 | 134 | ![](https://eksworkshop.com/assets/images/architecture-e1a8acbd5d28dacee67a6548ca9dbefa.webp) 135 | 136 | | Component | Description | 137 | |----------------|-----------------------------------------------------------------------------| 138 | | UI | Provides the front end user interface and aggregates API calls to the various other services. | 139 | | Catalog | API for product listings and details | 140 | | Cart | API for customer shopping carts | 141 | | Checkout | API to orchestrate the checkout process | 142 | | Orders | API to receive and process customer orders | 143 | | Static assets | Serves static assets like images related to the product catalog | 144 | 145 | Use kubectl to run the application: 146 | ```bash 147 | kubectl apply -f https://github.com/aws-containers/retail-store-sample-app/releases/latest/download/kubernetes.yaml 148 | kubectl wait --for=condition=available deployments --all 149 | ``` 150 | 151 | Get the URL for the frontend load balancer like so: 152 | ```bash 153 | kubectl get svc ui 154 | ``` 155 | 156 | ![](https://i.imgur.com/KujZN3f.png) 157 | 158 | To remove the application use kubectl again: 159 | ```bash 160 | kubectl delete -f https://github.com/aws-containers/retail-store-sample-app/releases/latest/download/kubernetes.yaml 161 | ``` 162 | 163 | --- 164 | 165 | ### ⬆️ Upgrading AMIs 166 | 167 | The [Amazon EKS optimized Amazon Linux AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-amis.html) is built on top of Amazon Linux 2, and is configured to serve as the base image for Amazon EKS nodes. It's considered a best practice to use the latest version of the EKS-Optimized AMI when you add nodes to an EKS cluster, as new releases include Kubernetes patches and security updates. It's also important to upgrade existing nodes already provisioned in the EKS cluster. 168 | 169 | EKS managed node groups provides the capability to automate the update of the AMI being used by the nodes it manages. It will automatically drain nodes using the Kubernetes API and respects the [Pod disruption budgets](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) that you set for your Pods to ensure that your applications stay available. 170 | 171 | > A **Pod Disruption Budget (PDB)** is a Kubernetes policy that ensures a certain number of pods always stay running by limiting how many can be taken down during voluntary disruptions like node upgrades or scaling operations. 172 | 173 | The Amazon EKS managed worker node upgrade has 4 phases: 174 | 175 | **Setup**: 176 | - Create a new Amazon EC2 Launch Template version associated with Auto Scaling group with the latest AMI 177 | - Point your Auto Scaling group to use the latest version of the launch template 178 | - Determine the maximum number of nodes to upgrade in parallel using the `updateconfig` property for the node group. 179 | 180 | **Scale Up**: 181 | - During the upgrade process, the upgraded nodes are launched in the same availability zone as those that are being upgraded 182 | - Increments the Auto Scaling Group’s maximum size and desired size to support the additional nodes 183 | - After scaling the Auto Scaling Group, it checks if the nodes using the latest configuration are present in the node group. 184 | - Applies a `eks.amazonaws.com/nodegroup=unschedulable:NoSchedule` taint on every node in the node group without the latest labels. This prevents nodes that have already been updated from a previous failed update from being tainted. 185 | 186 | **Upgrade**: 187 | - Randomly selects a node and drains the Pods from the node. 188 | - Cordons the node after every Pod is evicted and waits for 60 seconds 189 | - Sends a termination request to the Auto Scaling Group for the cordoned node. 190 | - Applies same across all nodes which are part of Managed Node group making sure there are no nodes with older version 191 | 192 | **Scale Down**: 193 | - The scale down phase decrements the Auto Scaling group maximum size and desired size by one until the the values are the same as before the update started. 194 | 195 | To find out what the latest AMI version is available for EKS: 196 | ```bash 197 | EKS_VERSION=$(aws eks describe-cluster --name $EKS_CLUSTER_NAME --query "cluster.version" --output text) 198 | aws ssm get-parameter --name /aws/service/eks/optimized-ami/$EKS_VERSION/amazon-linux-2/recommended/image_id --region $AWS_REGION --query "Parameter.Value" --output text 199 | ``` 200 | 201 | When you initiate a managed node group update, Amazon EKS automatically updates your nodes for you, completing the steps listed above. If you're using an Amazon EKS optimized AMI, Amazon EKS automatically applies the latest security patches and operating system updates to your nodes as part of the latest AMI release version. 202 | 203 | To initiate an update of the managed node group, run: 204 | ```bash 205 | aws eks update-nodegroup-version --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_MNG_NAME 206 | ``` 207 | 208 | You can watch activity on the nodes using `kubectl`: 209 | ```bash 210 | kubectl get nodes --watch 211 | ``` 212 | 213 | --- 214 | 215 | ### 🔗 References 216 | 217 | - [AWS Docs: Simplify node lifecycle with managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) 218 | - [Amazon EKS: Best Practices for Reliability](https://docs.aws.amazon.com/eks/latest/best-practices/reliability.html) 219 | - [Terraform module to create Amazon Elastic Kubernetes (EKS) resources](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest) 220 | - [EKS Workshop: Managed Node Groups](https://eksworkshop.com/docs/fundamentals/managed-node-groups/) -------------------------------------------------------------------------------- /eks-with-managed-node-group/outputs.tf: -------------------------------------------------------------------------------- 1 | output "eks_node_group_id_map" { 2 | description = "Map of node group names to their Node Group IDs" 3 | value = { for name, ng in module.eks.eks_managed_node_groups : name => ng.node_group_id } 4 | } 5 | -------------------------------------------------------------------------------- /eks-with-managed-node-group/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = ">= 5.62" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = "us-east-1" 12 | } -------------------------------------------------------------------------------- /example-rpc-call-to-ec2/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "4.67.0" 6 | constraints = "~> 4.0" 7 | hashes = [ 8 | "h1:dCRc4GqsyfqHEMjgtlM1EympBcgTmcTkWaJmtd91+KA=", 9 | "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", 10 | "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", 11 | "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", 12 | "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", 13 | "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", 14 | "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", 15 | "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", 16 | "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", 17 | "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", 18 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 19 | "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", 20 | "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", 21 | "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", 22 | "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", 23 | "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/hashicorp/external" { 28 | version = "2.3.4" 29 | hashes = [ 30 | "h1:XWkRZOLKMjci9/JAtE8X8fWOt7A4u+9mgXSUjc4Wuyo=", 31 | "zh:037fd82cd86227359bc010672cd174235e2d337601d4686f526d0f53c87447cb", 32 | "zh:0ea1db63d6173d01f2fa8eb8989f0809a55135a0d8d424b08ba5dabad73095fa", 33 | "zh:17a4d0a306566f2e45778fbac48744b6fd9c958aaa359e79f144c6358cb93af0", 34 | "zh:298e5408ab17fd2e90d2cd6d406c6d02344fe610de5b7dae943a58b958e76691", 35 | "zh:38ecfd29ee0785fd93164812dcbe0664ebbe5417473f3b2658087ca5a0286ecb", 36 | "zh:59f6a6f31acf66f4ea3667a555a70eba5d406c6e6d93c2c641b81d63261eeace", 37 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 38 | "zh:ad0279dfd09d713db0c18469f585e58d04748ca72d9ada83883492e0dd13bd58", 39 | "zh:c69f66fd21f5e2c8ecf7ca68d9091c40f19ad913aef21e3ce23836e91b8cbb5f", 40 | "zh:d4a56f8c48aa86fc8e0c233d56850f5783f322d6336f3bf1916e293246b6b5d4", 41 | "zh:f2b394ebd4af33f343835517e80fc876f79361f4688220833bc3c77655dd2202", 42 | "zh:f31982f29f12834e5d21e010856eddd19d59cd8f449adf470655bfd19354377e", 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /example-rpc-call-to-ec2/README.md: -------------------------------------------------------------------------------- 1 | ## Example of making Remote Procedure Call to a service running on a remote EC2 instance 2 | 3 | This example demonstrates how to make a Remote Procedure Call (RPC) from your local laptop to a service running on a remote AWS EC2 instance. It uses Python's built-in `xmlrpc` library for simplicity. Example also provides Terraform code to provision the necessary AWS infrastructure, including the EC2 instance and networking components. 4 | 5 | **Key Takeaways**: 6 | 7 | - **RPC Simplifies Remote Communication**: Allows you to invoke remote methods as if they were local. 8 | - **Security Is Crucial**: Always consider security best practices when exposing services over the internet. 9 | 10 | **Disclaimer**: This example is for educational purposes. In a production environment, additional considerations are necessary, such as robust error handling, logging, security enhancements (authentication, encryption), and scalability. 11 | 12 | ### **Overview** 13 | 14 | ![](../img/rpc_call.png) 15 | 16 | - **Server**: A Python XML-RPC server running on an AWS EC2 instance. 17 | - **Client**: A Python script running on your laptop that calls the remote procedure on the server. 18 | - **Infrastructure**: Provisioned using Terraform, including: 19 | - An EC2 instance running Amazon Linux 2. 20 | - Security Group configured to allow incoming traffic on the RPC port (port 8000). 21 | 22 | ### **Prerequisites** 23 | 24 | - **SSH Key Pair**: An existing SSH key pair to access the EC2 instance. 25 | - **Note**: For security reasons, Terraform cannot create SSH key pairs with private keys. You need to create one beforehand and make sure it's registered in AWS. 26 | - **Python**: Installed on both your laptop and the EC2 instance (Python 3 recommended). 27 | 28 | --- 29 | 30 | ### **Prepare the SSH Key Pair** 31 | 32 | Before running the Terraform code, ensure you have an SSH key pair: 33 | 34 | 1. **Generate a Key Pair (if you don't have one)**: 35 | 36 | ```bash 37 | ssh-keygen -t rsa -b 2048 -f my-ec2-keypair 38 | ``` 39 | 40 | This will generate `my-ec2-keypair` (private key) and `my-ec2-keypair.pub` (public key). 41 | 42 | 2. **Import the Public Key into AWS**: 43 | 44 | - Go to the AWS Management Console. 45 | - Navigate to **EC2** > **Key Pairs**. 46 | - Click **Import Key Pair**. 47 | - Name it `my-ec2-keypair` and upload the contents of `my-ec2-keypair.pub`. 48 | 49 | ### **Deploy the Infrastructure with Terraform** 50 | 51 | 1. **Initialize the Terraform Project**: 52 | 53 | ```bash 54 | terraform init 55 | ``` 56 | 57 | 2. **Preview the Changes**: 58 | 59 | ```bash 60 | terraform plan 61 | ``` 62 | 63 | 3. **Apply the Changes to Create Resources**: 64 | 65 | ```bash 66 | terraform apply 67 | ``` 68 | 69 | 4. **Note the Public IP Address**: 70 | 71 | After the resources are created, Terraform will output the details. Note the public IP address of the EC2 instance. 72 | 73 | ### **Set Up the RPC Server on the EC2 Instance** 74 | 75 | 1. **Connect to the EC2 Instance via SSH**: 76 | 77 | ```bash 78 | ssh -i my-ec2-keypair ec2-user@ 79 | ``` 80 | 81 | Replace `` with the Public IP assigned to your instance. 82 | 83 | 2. **Verify Python Installation**: 84 | 85 | ```bash 86 | python3 --version 87 | ``` 88 | 89 | If Python 3 is not installed, install it: 90 | 91 | ```bash 92 | sudo yum update -y 93 | sudo yum install -y python3 94 | ``` 95 | 96 | 3. **Create the RPC Server Script**: 97 | 98 | ```bash 99 | vim server.py 100 | ``` 101 | 102 | Paste the following code: 103 | 104 | ```python 105 | from xmlrpc.server import SimpleXMLRPCServer 106 | 107 | def add_numbers(x, y): 108 | """Adds two numbers and returns the result.""" 109 | return x + y 110 | 111 | # Create an XML-RPC server 112 | server = SimpleXMLRPCServer(("0.0.0.0", 8000)) 113 | print("Server is listening on port 8000...") 114 | 115 | # Register the function so it can be called via RPC 116 | server.register_function(add_numbers, "add_numbers") 117 | 118 | # Run the server's main loop 119 | server.serve_forever() 120 | ``` 121 | 122 | **Note**: The `"0.0.0.0"` address is used to listen on all network interfaces. 123 | 124 | 4. **Run the RPC Server**: 125 | 126 | ```bash 127 | python3 server.py 128 | ``` 129 | 130 | You should see: 131 | 132 | ``` 133 | Server is listening on port 8000... 134 | ``` 135 | 136 | **Optional**: To keep the server running after you log out, you can use `screen`, `tmux`, or run the script as a background process. 137 | 138 | - **Using `screen`**: 139 | 140 | ```bash 141 | sudo yum install -y screen 142 | screen -S rpc_server 143 | python3 server.py 144 | ``` 145 | 146 | Press `Ctrl+A` then `D` to detach. 147 | 148 | --- 149 | 150 | ### **Run the RPC Client on Your Laptop** 151 | 152 | 1. **Place the public IP of EC2 instance**: 153 | 154 | Open `client.py` file and replace `` with the Public IP of your EC2 instance. 155 | 156 | 2. **Run the Client Script**: 157 | 158 | ```bash 159 | python3 client.py 160 | ``` 161 | 162 | You should see: 163 | 164 | ``` 165 | The result is: 7 166 | ``` 167 | 168 | ### **Running Unit Tests** 169 | 170 | To run unit tests: 171 | ```bash 172 | python -m unittest test_add_numbers.py 173 | ``` 174 | 175 | These unit tests use the **real remote calls** to the RPC server on EC2 instance. 176 | 177 | ### **Security Considerations** 178 | 179 | - **Restrict Inbound Traffic**: 180 | 181 | - For security, you should restrict the inbound rules in your Security Group to allow traffic only from your IP address. 182 | - You can find your public IP address by visiting [whatismyip.com](https://www.whatismyip.com/) or use the `get_public_ip.sh` script. 183 | 184 | - **Firewall on EC2 Instance**: 185 | 186 | - Ensure that the EC2 instance's firewall (iptables) allows incoming connections on port 8000. 187 | - Amazon Linux 2 typically doesn't have restrictive iptables rules by default. 188 | 189 | - **SSL/TLS Encryption**: 190 | 191 | - For production use, consider securing the RPC communication using SSL/TLS to encrypt the data transmitted over the network. 192 | 193 | - **Authentication (Optional)**: 194 | 195 | - Implement authentication mechanisms to ensure that only authorized clients can call your RPC server. 196 | 197 | ### **Cleaning Up Resources** 198 | 199 | To avoid incurring charges on your AWS account, destroy the resources when you're done: 200 | 201 | ```bash 202 | terraform destroy 203 | ``` 204 | 205 | ### **References** 206 | 207 | - **AWS Provider for Terraform**: [https://registry.terraform.io/providers/hashicorp/aws/latest/docs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) 208 | - **Python xmlrpc Library**: [https://docs.python.org/3/library/xmlrpc.html](https://docs.python.org/3/library/xmlrpc.html) 209 | - **AWS EC2 User Guide**: [https://docs.aws.amazon.com/ec2/index.html](https://docs.aws.amazon.com/ec2/index.html) -------------------------------------------------------------------------------- /example-rpc-call-to-ec2/client.py: -------------------------------------------------------------------------------- 1 | import xmlrpc.client 2 | 3 | # Replace with the public IP 4 | server_ip = "" # or "localhost" for local testing 5 | 6 | # Create a proxy to the server 7 | proxy = xmlrpc.client.ServerProxy(f"http://{server_ip}:8000/") 8 | 9 | def get_sum(a, b): 10 | # Call the remote function as if it were local 11 | return proxy.add_numbers(a, b) 12 | 13 | # Test run 14 | if __name__ == "__main__": 15 | result = get_sum(2, 5) 16 | print(f"The result is: {result}") 17 | -------------------------------------------------------------------------------- /example-rpc-call-to-ec2/ec2.tf: -------------------------------------------------------------------------------- 1 | # Call the external data source to get your public IP 2 | data "external" "my_ip" { 3 | program = ["bash", "./get_public_ip.sh"] 4 | } 5 | 6 | # EC2 Key Pair (Replace if you have an existing one) 7 | resource "aws_key_pair" "my_ec2_keypair" { 8 | key_name = "my-ec2-keypair" 9 | public_key = file("~/.ssh/aws_test_demo_key.pub") # Replace with the path to your public key 10 | } 11 | 12 | # Security Group to allow SSH and RPC traffic 13 | resource "aws_security_group" "rpc_server_sg" { 14 | name = "rpc-server-sg" 15 | description = "Allow SSH and RPC traffic" 16 | 17 | ingress { 18 | description = "SSH" 19 | from_port = 22 20 | to_port = 22 21 | protocol = "tcp" 22 | cidr_blocks = ["${data.external.my_ip.result.ip}/32"] # Restrict to your IP for security 23 | } 24 | 25 | ingress { 26 | description = "RPC Port" 27 | from_port = 8000 28 | to_port = 8000 29 | protocol = "tcp" 30 | cidr_blocks = ["${data.external.my_ip.result.ip}/32"] # Restrict to your IP for security 31 | } 32 | 33 | egress { 34 | description = "All outbound traffic" 35 | from_port = 0 36 | to_port = 0 37 | protocol = "-1" 38 | cidr_blocks = ["0.0.0.0/0"] 39 | } 40 | } 41 | 42 | # EC2 Instance 43 | resource "aws_instance" "rpc_server" { 44 | ami = "ami-06b21ccaeff8cd686" # Amazon Linux 2 AMI in us-east-1 45 | instance_type = "t2.micro" 46 | key_name = "my-ec2-keypair" # Replace with your key pair name 47 | security_groups = [aws_security_group.rpc_server_sg.name] 48 | associate_public_ip_address = true 49 | 50 | tags = { 51 | Name = "RPC Server Instance" 52 | } 53 | 54 | # User data script to install Python 3 55 | user_data = <<-EOF 56 | #!/bin/bash 57 | yum update -y 58 | yum install -y python3 59 | EOF 60 | } 61 | -------------------------------------------------------------------------------- /example-rpc-call-to-ec2/get_public_ip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ip=$(curl -s https://checkip.amazonaws.com) 3 | echo "{\"ip\": \"$ip\"}" 4 | -------------------------------------------------------------------------------- /example-rpc-call-to-ec2/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.14" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 4.0" 7 | } 8 | } 9 | } 10 | 11 | provider "aws" { 12 | region = "us-east-1" 13 | } 14 | -------------------------------------------------------------------------------- /example-rpc-call-to-ec2/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ec2_public_ip" { 2 | description = "Public IP address of the EC2 instance" 3 | value = aws_instance.rpc_server.public_ip 4 | } 5 | -------------------------------------------------------------------------------- /example-rpc-call-to-ec2/server.py: -------------------------------------------------------------------------------- 1 | # Run this code on EC2 instance 2 | 3 | from xmlrpc.server import SimpleXMLRPCServer 4 | 5 | def add_numbers(x, y): 6 | """Adds two numbers and returns the result.""" 7 | return x + y 8 | 9 | # Create an XML-RPC server 10 | server = SimpleXMLRPCServer(("0.0.0.0", 8000)) 11 | print("Server is listening on port 8000...") 12 | 13 | # Register the function so it can be called via RPC 14 | server.register_function(add_numbers, "add_numbers") 15 | 16 | # Run the server's main loop 17 | server.serve_forever() -------------------------------------------------------------------------------- /example-rpc-call-to-ec2/test_add_numbers.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from client import get_sum 3 | 4 | class TestClient(unittest.TestCase): 5 | 6 | def test_add_numbers(self): 7 | # Test the addition function through the client connected to the server 8 | result = get_sum(3, 7) 9 | self.assertEqual(result, 10) 10 | 11 | def test_add_with_zero(self): 12 | # Test with zero to ensure basic cases are covered 13 | result = get_sum(0, 5) 14 | self.assertEqual(result, 5) 15 | 16 | def test_negative_numbers(self): 17 | # Test with negative numbers 18 | result = get_sum(-2, -3) 19 | self.assertEqual(result, -5) 20 | 21 | if __name__ == '__main__': 22 | unittest.main() 23 | -------------------------------------------------------------------------------- /img/aws_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/aws_logo.png -------------------------------------------------------------------------------- /img/diagram-ssm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/diagram-ssm.png -------------------------------------------------------------------------------- /img/diagram_rds_bastion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/diagram_rds_bastion.png -------------------------------------------------------------------------------- /img/eks_access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/eks_access.png -------------------------------------------------------------------------------- /img/eks_node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/eks_node.png -------------------------------------------------------------------------------- /img/gitea_aws.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/gitea_aws.png -------------------------------------------------------------------------------- /img/highly-available-nat-gw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/highly-available-nat-gw.png -------------------------------------------------------------------------------- /img/nacl_scheme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/nacl_scheme.png -------------------------------------------------------------------------------- /img/nat_instances_scheme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/nat_instances_scheme.png -------------------------------------------------------------------------------- /img/private_ip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/private_ip.png -------------------------------------------------------------------------------- /img/route.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/route.png -------------------------------------------------------------------------------- /img/rpc_call.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/rpc_call.png -------------------------------------------------------------------------------- /img/single_nat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/single_nat.png -------------------------------------------------------------------------------- /img/subnet_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/subnet_image.png -------------------------------------------------------------------------------- /img/teamcity_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/teamcity_logo.png -------------------------------------------------------------------------------- /img/terraform-get-secrets-from-secrets-manager.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/terraform-get-secrets-from-secrets-manager.png -------------------------------------------------------------------------------- /img/terraform-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/terraform-logo.png -------------------------------------------------------------------------------- /img/vpc_for_eks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/vpc_for_eks.png -------------------------------------------------------------------------------- /img/vpc_resource_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/vpc_resource_map.png -------------------------------------------------------------------------------- /img/vpn_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Brain2life/terraform-cookbook/67bda2203d557fca5b8366b135db2289544843fe/img/vpn_diagram.png -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "4.67.0" 6 | constraints = "~> 4.0" 7 | hashes = [ 8 | "h1:dCRc4GqsyfqHEMjgtlM1EympBcgTmcTkWaJmtd91+KA=", 9 | "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", 10 | "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", 11 | "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", 12 | "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", 13 | "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", 14 | "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", 15 | "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", 16 | "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", 17 | "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", 18 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 19 | "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", 20 | "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", 21 | "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", 22 | "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", 23 | "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/hashicorp/external" { 28 | version = "2.3.4" 29 | hashes = [ 30 | "h1:XWkRZOLKMjci9/JAtE8X8fWOt7A4u+9mgXSUjc4Wuyo=", 31 | "zh:037fd82cd86227359bc010672cd174235e2d337601d4686f526d0f53c87447cb", 32 | "zh:0ea1db63d6173d01f2fa8eb8989f0809a55135a0d8d424b08ba5dabad73095fa", 33 | "zh:17a4d0a306566f2e45778fbac48744b6fd9c958aaa359e79f144c6358cb93af0", 34 | "zh:298e5408ab17fd2e90d2cd6d406c6d02344fe610de5b7dae943a58b958e76691", 35 | "zh:38ecfd29ee0785fd93164812dcbe0664ebbe5417473f3b2658087ca5a0286ecb", 36 | "zh:59f6a6f31acf66f4ea3667a555a70eba5d406c6e6d93c2c641b81d63261eeace", 37 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 38 | "zh:ad0279dfd09d713db0c18469f585e58d04748ca72d9ada83883492e0dd13bd58", 39 | "zh:c69f66fd21f5e2c8ecf7ca68d9091c40f19ad913aef21e3ce23836e91b8cbb5f", 40 | "zh:d4a56f8c48aa86fc8e0c233d56850f5783f322d6336f3bf1916e293246b6b5d4", 41 | "zh:f2b394ebd4af33f343835517e80fc876f79361f4688220833bc3c77655dd2202", 42 | "zh:f31982f29f12834e5d21e010856eddd19d59cd8f449adf470655bfd19354377e", 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/README.md: -------------------------------------------------------------------------------- 1 | # NAT instance setup in non-production environments 2 | 3 | This example project demonstrates the configuration that ensures **high availability** and **cost efficiency** of NAT instances in a non-production environment. You can use this project as a testing environment for developers. The cost compared to NAT gateways is lower, however it requires administering and maintenance activities for NAT instances. 4 | 5 | ![](../img/nat_instances_scheme.png) 6 | 7 | - Deploying each NAT instance in a separate AZ provides **high-availability** 8 | - Auto-scaling group provides **horizontal scaling** ensuring that at any moment in time the specified number of NAT instances is up and running 9 | 10 | # Table of contents: 11 | - [AWS NAT Instances vs. AWS NAT Gateways](#aws-nat-instances-vs-aws-nat-gateways) 12 | - [Key Differences](#key-differences) 13 | - [Best Practices](#best-practices) 14 | - [Use Cases](#use-cases) 15 | - [Summary](#summary) 16 | - [Provision project](#provision-project) 17 | - [Create a Key Pair](#create-a-key-pair) 18 | - [Initialize the project](#initialize-the-project) 19 | - [Disable host checking](#disable-host-checking) 20 | - [Transfer SSH key into Bastion host](#transfer-ssh-key-into-bastion-host) 21 | - [Verify the connection to the Internet from private instance](#verify-the-connection-to-the-internet-from-private-instance) 22 | - [References](#references) 23 | 24 | ### AWS NAT Instances vs. AWS NAT Gateways 25 | 26 | For more information about differences between NAT instances and NAT Gateways, see [Compare NAT gateways and NAT instances](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-comparison.html) 27 | 28 | #### **AWS NAT Instances** 29 | - **Definition**: A [NAT (Network Address Translation) instance](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html) is an EC2 instance configured to allow instances in a private subnet to connect to the internet or other AWS services, while preventing incoming connections from the internet. 30 | - **Setup**: Manually configure a NAT instance by launching an EC2 instance in a public subnet and enabling IP forwarding. 31 | - **Management**: Requires manual scaling, monitoring, and updates. 32 | - **Customizability**: Highly customizable—you can install custom software, apply specific configurations, or use it for specialized use cases. 33 | - **Performance**: Limited by the instance type and size you select. No inherent redundancy. 34 | - **Cost**: May be less expensive for small-scale setups because it only incurs EC2 instance and [data transfer costs](https://aws.amazon.com/ec2/pricing/on-demand/#:~:text=Generation%20Instances%20page.-,Data%20Transfer,-The%20pricing%20below). 35 | - **Security**: Requires manual setup of security groups, network ACLs, and [instance-specific hardening](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-best-practices.html). 36 | 37 | #### **AWS NAT Gateways** 38 | - **Definition**: A [managed NAT service](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html) provided by AWS that allows instances in a private subnet to access the internet securely. 39 | - **Setup**: Simple to set up via the AWS Management Console or AWS CLI. The NAT gateway is automatically deployed in a public subnet. 40 | - **Management**: Fully managed by AWS, with built-in scaling and high availability within an Availability Zone. 41 | - **Customizability**: Limited customization compared to NAT instances. 42 | - **Performance**: Scales automatically to handle high volumes of traffic. Supports up to 45 Gbps. 43 | - **Cost**: Higher cost due to [per-hour NAT Gateway usage fees](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-pricing.html) and data processing charges. 44 | - **Security**: Security group configurations aren't needed for the NAT Gateway itself; it's inherently secure by design. 45 | 46 | --- 47 | 48 | ### **Key Differences** 49 | | Feature | NAT Instance | NAT Gateway | 50 | |----------------------|----------------------------|--------------------------------| 51 | | **Management** | Manual | Fully managed by AWS | 52 | | **Performance** | Limited by instance size | Scalable up to 45 Gbps | 53 | | **Redundancy** | Needs manual setup | High availability in one AZ | 54 | | **Customizability** | Highly customizable | Limited customization | 55 | | **Cost** | Lower at low traffic levels | Higher, especially for high traffic | 56 | | **Setup Complexity** | Requires configuration | Simple to deploy | 57 | | **Monitoring** | Needs manual setup | Integrated with AWS metrics | 58 | 59 | --- 60 | 61 | ### **Best Practices** 62 | 63 | #### **For NAT Instances** 64 | 1. **Instance Type**: 65 | - Use an instance type with enhanced networking for better performance (e.g., T3, C5). 66 | 2. **High Availability**: 67 | - Use an Auto Scaling Group and health checks to replace a failed instance automatically. 68 | - Use multiple NAT instances in different AZs to avoid single points of failure. 69 | 3. **Security**: 70 | - Harden the NAT instance by disabling unnecessary services and securing access. 71 | - Configure security groups and network ACLs properly. 72 | 4. **Monitoring**: 73 | - Use CloudWatch metrics to monitor network performance and usage. 74 | - Configure detailed logging via custom tools. 75 | 76 | #### **For NAT Gateways** 77 | 1. **Redundancy**: 78 | - Deploy NAT Gateways in multiple Availability Zones for fault tolerance. 79 | 2. **Cost Optimization**: 80 | - Optimize the data transfer by reducing unnecessary traffic through the NAT Gateway. 81 | - Use private endpoints for frequently accessed AWS services to minimize data charges. 82 | 3. **Performance**: 83 | - Use NAT Gateways for high-throughput applications, as they scale automatically. 84 | 4. **Monitoring**: 85 | - Use AWS CloudWatch for monitoring and alarms on metrics like packets in/out or errors. 86 | 87 | --- 88 | 89 | ### **Use Cases** 90 | 91 | #### **When to Use NAT Instances** 92 | - When cost is a primary concern for low-traffic environments. 93 | - When custom software or configurations are required (e.g., custom firewalls, traffic shaping). 94 | - In smaller environments or test setups where scalability and redundancy are not critical. 95 | 96 | #### **When to Use NAT Gateways** 97 | - For production environments requiring high reliability and scalability. 98 | - For high-throughput workloads that demand consistent performance. 99 | - When simplicity and minimal operational overhead are priorities. 100 | - For organizations that prefer fully managed AWS services with built-in monitoring and security. 101 | 102 | --- 103 | 104 | ### **Summary** 105 | - **NAT Instances** are cost-effective and flexible but require significant management and operational effort. 106 | - **NAT Gateways** are more suitable for production environments due to their scalability, simplicity, and fault-tolerant design, though they come at a higher cost. 107 | - Select the option that aligns with your workload, scalability requirements, and budget. For most production use cases, **NAT Gateways** are recommended for their reliability and ease of use. 108 | 109 | 110 | ## Provision project 111 | 112 | ### Create a Key Pair 113 | 114 | To be able to SSH into Bastion host and NAT instances, provision the SSH key: 115 | ```bash 116 | aws ec2 create-key-pair --key-name ssh-key-pair --query 'KeyMaterial' --output text > ssh-key-pair.pem 117 | chmod 400 ssh-key-pair.pem 118 | ``` 119 | 120 | ### Initialize the project 121 | 122 | Run the following commands to deploy the infrastructure: 123 | ```bash 124 | terraform init 125 | terraform apply 126 | ``` 127 | 128 | ### Disable host checking 129 | 130 | In order to make NAT instances to be able to process the traffic, you need to disable host checking: 131 | ```bash 132 | ./disable_source_dest_check.sh 133 | ``` 134 | 135 | This Bash script disables source and destination checks on NAT instances. For more information, see [Disable source/destination checks](https://docs.aws.amazon.com/vpc/latest/userguide/work-with-nat-instances.html#EIP_Disable_SrcDestCheck) 136 | 137 | ### Transfer SSH key into Bastion host 138 | 139 | In order to SSH into private instance from Bastion host, you need to transfer the SSH key. One of the option is to create the key on Bastion host: 140 | ```bash 141 | # Copy the contents of the private SSH key from local machine 142 | xclip -sel clip < ssh-key-pair.pem 143 | 144 | # On Bastion host machine 145 | touch ssh-key-pair.pem 146 | chmod 400 ssh-key-pair.pem 147 | 148 | # Paste the contents of the private SSH key 149 | vim ssh-key-pair.pem 150 | ``` 151 | 152 | ### Verify the connection to the Internet from private instance 153 | 154 | SSH into private instance from Bastion host: 155 | ```bash 156 | ssh -i ssh-key-pair.pem ec2-user@ 157 | ``` 158 | 159 | Ping `google.com`: 160 | ```bash 161 | ping google.com 162 | ``` 163 | 164 | **NOTE** 165 | NAT configuration is done via EC2 user script `configure_nat.sh`. For more information, see [Create NAT AMI](https://docs.aws.amazon.com/vpc/latest/userguide/work-with-nat-instances.html#create-nat-ami) 166 | 167 | ## References 168 | 169 | - [AWS Docs: NAT instances](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html) -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/bastion.tf: -------------------------------------------------------------------------------- 1 | # Security Group for Bastion Host 2 | resource "aws_security_group" "bastion" { 3 | vpc_id = aws_vpc.main.id 4 | 5 | ingress { 6 | from_port = 22 7 | to_port = 22 8 | protocol = "tcp" 9 | cidr_blocks = ["${data.external.my_ip.result.ip}/32"] # Restrict to your IP 10 | } 11 | 12 | egress { 13 | from_port = 0 14 | to_port = 0 15 | protocol = "-1" 16 | cidr_blocks = ["0.0.0.0/0"] 17 | } 18 | 19 | tags = { 20 | Name = "bastion-host-sg" 21 | } 22 | } 23 | 24 | # Bastion Host Instance 25 | resource "aws_instance" "bastion" { 26 | ami = data.aws_ami.amazon_linux.id 27 | instance_type = "t3.micro" 28 | subnet_id = aws_subnet.public[0].id # Place in the first public subnet 29 | key_name = "ssh-key-pair" 30 | 31 | security_groups = [ 32 | aws_security_group.bastion.id 33 | ] 34 | 35 | tags = { 36 | Name = "bastion-host" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/configure_nat.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo yum install iptables-services -y 3 | sudo systemctl enable iptables 4 | sudo systemctl start iptables 5 | echo "net.ipv4.ip_forward=1" | sudo tee /etc/sysctl.d/custom-ip-forwarding.conf 6 | sudo sysctl -p /etc/sysctl.d/custom-ip-forwarding.conf 7 | PRIMARY_IF=$(netstat -i | awk '/^e/{print $1; exit}') 8 | sudo /sbin/iptables -t nat -A POSTROUTING -o $PRIMARY_IF -j MASQUERADE 9 | sudo /sbin/iptables -F FORWARD 10 | sudo service iptables save -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/data.tf: -------------------------------------------------------------------------------- 1 | # Call the external data source to get public IP 2 | data "external" "my_ip" { 3 | program = ["bash", "./get_public_ip.sh"] 4 | } -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/disable_source_dest_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Variables 4 | REGION="us-east-1" # Change to your AWS region 5 | TAG_KEY="Name" 6 | TAG_VALUE_PREFIX="nat-instance-asg-" # Prefix for NAT instance Name tag 7 | 8 | # Fetch instance IDs with the specified tag pattern 9 | echo "Fetching NAT instance IDs with tag ${TAG_KEY} starting with ${TAG_VALUE_PREFIX}..." 10 | INSTANCE_IDS=$(aws ec2 describe-instances \ 11 | --region "${REGION}" \ 12 | --filters "Name=tag:${TAG_KEY},Values=${TAG_VALUE_PREFIX}*" "Name=instance-state-name,Values=running" \ 13 | --query "Reservations[].Instances[].InstanceId" \ 14 | --output text) 15 | 16 | if [ -z "${INSTANCE_IDS}" ]; then 17 | echo "No NAT instances found with the specified tag prefix." 18 | exit 1 19 | fi 20 | 21 | # Disable source/destination check for each instance 22 | for INSTANCE_ID in ${INSTANCE_IDS}; do 23 | echo "Disabling source/destination check for instance: ${INSTANCE_ID}..." 24 | aws ec2 modify-instance-attribute \ 25 | --region "${REGION}" \ 26 | --instance-id "${INSTANCE_ID}" \ 27 | --source-dest-check "{\"Value\": false}" 28 | 29 | if [ $? -eq 0 ]; then 30 | echo "Successfully disabled source/destination check for ${INSTANCE_ID}." 31 | else 32 | echo "Failed to disable source/destination check for ${INSTANCE_ID}." 33 | fi 34 | done 35 | 36 | echo "Source/destination check disabled for all matching NAT instances." 37 | -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/get_public_ip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ip=$(curl -s https://checkip.amazonaws.com) 3 | echo "{\"ip\": \"$ip\"}" -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/main.tf: -------------------------------------------------------------------------------- 1 | # VPC 2 | resource "aws_vpc" "main" { 3 | cidr_block = "10.0.0.0/16" 4 | enable_dns_support = true 5 | enable_dns_hostnames = true 6 | tags = { 7 | Name = "two-az-nat-vpc" 8 | } 9 | } 10 | 11 | # Internet Gateway 12 | resource "aws_internet_gateway" "main" { 13 | vpc_id = aws_vpc.main.id 14 | tags = { 15 | Name = "main-internet-gateway" 16 | } 17 | } 18 | 19 | # Public Subnets (Two AZs) 20 | resource "aws_subnet" "public" { 21 | count = 2 22 | vpc_id = aws_vpc.main.id 23 | cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, count.index) # 10.0.0.0/24 and 10.0.1.0/24 24 | availability_zone = element(["us-east-1a", "us-east-1b"], count.index) 25 | map_public_ip_on_launch = true 26 | tags = { 27 | Name = "public-subnet-${count.index + 1}" 28 | } 29 | } 30 | 31 | # Private Subnets (Two AZs) 32 | resource "aws_subnet" "private" { 33 | count = 2 34 | vpc_id = aws_vpc.main.id 35 | cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, count.index + 2) # 10.0.2.0/24 and 10.0.3.0/24 36 | availability_zone = element(["us-east-1a", "us-east-1b"], count.index) 37 | tags = { 38 | Name = "private-subnet-${count.index + 1}" 39 | } 40 | } 41 | 42 | # Route Table for Public Subnets 43 | resource "aws_route_table" "public" { 44 | vpc_id = aws_vpc.main.id 45 | 46 | route { 47 | cidr_block = "0.0.0.0/0" 48 | gateway_id = aws_internet_gateway.main.id 49 | } 50 | 51 | tags = { 52 | Name = "public-route-table" 53 | } 54 | } 55 | 56 | # Associate Public Route Table with Public Subnets 57 | resource "aws_route_table_association" "public" { 58 | count = 2 59 | subnet_id = aws_subnet.public[count.index].id 60 | route_table_id = aws_route_table.public.id 61 | } 62 | 63 | # Security Group for NAT Instances 64 | resource "aws_security_group" "nat_instance" { 65 | vpc_id = aws_vpc.main.id 66 | 67 | ingress { 68 | from_port = 80 69 | to_port = 80 70 | protocol = "tcp" 71 | cidr_blocks = ["0.0.0.0/0"] 72 | } 73 | 74 | ingress { 75 | from_port = 443 76 | to_port = 443 77 | protocol = "tcp" 78 | cidr_blocks = ["0.0.0.0/0"] 79 | } 80 | 81 | ingress { 82 | from_port = -1 83 | to_port = -1 84 | protocol = "icmp" 85 | cidr_blocks = [aws_vpc.main.cidr_block] # Allow ICMP traffic in VPC 86 | } 87 | 88 | ingress { 89 | from_port = 22 90 | to_port = 22 91 | protocol = "tcp" 92 | cidr_blocks = ["${data.external.my_ip.result.ip}/32"] # Restrict to your IP 93 | } 94 | 95 | egress { 96 | from_port = 0 97 | to_port = 0 98 | protocol = "-1" 99 | cidr_blocks = ["0.0.0.0/0"] 100 | } 101 | 102 | tags = { 103 | Name = "nat-instance-sg" 104 | } 105 | } 106 | 107 | # Security Group for Private EC2 Instances 108 | resource "aws_security_group" "private_instance" { 109 | vpc_id = aws_vpc.main.id 110 | 111 | ingress { 112 | from_port = -1 113 | to_port = -1 114 | protocol = "icmp" 115 | cidr_blocks = ["0.0.0.0/0"] 116 | } 117 | 118 | ingress { 119 | from_port = 22 120 | to_port = 22 121 | protocol = "tcp" 122 | security_groups = [aws_security_group.bastion.id] # Allow Bastion SG 123 | } 124 | 125 | egress { 126 | from_port = 0 127 | to_port = 0 128 | protocol = "-1" 129 | cidr_blocks = ["0.0.0.0/0"] 130 | } 131 | 132 | tags = { 133 | Name = "private-ec2-sg" 134 | } 135 | } 136 | 137 | # Launch Template for NAT Instances 138 | resource "aws_launch_template" "nat" { 139 | name_prefix = "nat-instance" 140 | 141 | image_id = "ami-0453ec754f44f9a4a" # Amazon Linux 2023 142 | instance_type = "t3.micro" 143 | key_name = "ssh-key-pair" 144 | 145 | network_interfaces { 146 | associate_public_ip_address = true 147 | security_groups = [aws_security_group.nat_instance.id] 148 | } 149 | 150 | # Configure NAT service 151 | user_data = filebase64("configure_nat.sh") 152 | 153 | tags = { 154 | Name = "nat-instance-template" 155 | } 156 | } 157 | 158 | # Auto Scaling Groups for NAT Instances (One per AZ) 159 | resource "aws_autoscaling_group" "nat" { 160 | count = 2 161 | vpc_zone_identifier = [aws_subnet.public[count.index].id] 162 | 163 | launch_template { 164 | id = aws_launch_template.nat.id 165 | version = "$Latest" 166 | } 167 | 168 | min_size = 1 169 | max_size = 1 170 | desired_capacity = 1 171 | 172 | tag { 173 | key = "Name" 174 | value = "nat-instance-asg-${count.index + 1}" 175 | propagate_at_launch = true 176 | } 177 | } 178 | 179 | # Private EC2 Instances (One per Private Subnet) 180 | resource "aws_instance" "private" { 181 | count = 2 182 | ami = data.aws_ami.amazon_linux.id 183 | instance_type = "t3.micro" 184 | subnet_id = aws_subnet.private[count.index].id 185 | key_name = "ssh-key-pair" 186 | security_groups = [ 187 | aws_security_group.private_instance.id 188 | ] 189 | 190 | tags = { 191 | Name = "private-ec2-${count.index + 1}" 192 | } 193 | } 194 | 195 | # Data Source for Amazon Linux AMI 196 | data "aws_ami" "amazon_linux" { 197 | most_recent = true 198 | owners = ["amazon"] 199 | 200 | filter { 201 | name = "name" 202 | values = ["amzn2-ami-hvm-*-x86_64-gp2"] 203 | } 204 | } 205 | 206 | # Retrieve NAT Instances in Public Subnet 207 | data "aws_instances" "nat_instances" { 208 | 209 | depends_on = [aws_autoscaling_group.nat] # Ensure ASG is created first 210 | 211 | filter { 212 | name = "tag:Name" 213 | values = ["nat-instance-asg-*"] # Matches the Name tag applied during launch 214 | } 215 | 216 | filter { 217 | name = "instance-state-name" 218 | values = ["running"] # Only retrieve running instances 219 | } 220 | } 221 | 222 | # Define route tables for resources in private subnets 223 | resource "aws_route_table" "private" { 224 | count = 2 225 | 226 | vpc_id = aws_vpc.main.id 227 | 228 | route { 229 | cidr_block = "0.0.0.0/0" 230 | instance_id = data.aws_instances.nat_instances.ids[0] # Go to Internet via NAT instance target 231 | } 232 | } 233 | 234 | # Associate Private Route Tables with Private Subnets 235 | resource "aws_route_table_association" "private" { 236 | count = 2 237 | subnet_id = aws_subnet.private[count.index].id 238 | route_table_id = aws_route_table.private[count.index].id 239 | } -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/outputs.tf: -------------------------------------------------------------------------------- 1 | output "bastion_public_ip" { 2 | value = aws_instance.bastion.public_ip 3 | description = "Public IP address of the Bastion Host" 4 | } 5 | 6 | output "nat_public_ips" { 7 | value = data.aws_instances.nat_instances.public_ips 8 | description = "Public IP addresses of the NAT instances" 9 | } 10 | 11 | output "private_instance_ip_first" { 12 | value = aws_instance.private[0].private_ip 13 | description = "Private IP address of the first EC2 instance in private subnet" 14 | } 15 | 16 | output "private_instance_ip_second" { 17 | value = aws_instance.private[1].private_ip 18 | description = "Private IP address of the second EC2 instance in private subnet" 19 | } -------------------------------------------------------------------------------- /nat-instance-setup-for-nonprod/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.14" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 4.0" 7 | } 8 | } 9 | } 10 | 11 | provider "aws" { 12 | region = "us-east-1" 13 | } -------------------------------------------------------------------------------- /nginx-webserver-ec2/README.md: -------------------------------------------------------------------------------- 1 | # Deploy nginx based web server on AWS EC2 instance 2 | 3 | This terraform template deploys single EC2 instance with Nginx web server installed and running. The web server opens `HTTP 80` and `SSH 22` ports for access. SSH key and EC2 instances are deployed via respective separate folders. 4 | 5 | Deployment steps: 6 | 1. First deploy SSH key and obtain the generated private key 7 | 2. Deploy EC2 instance with SSH key linked in the previous step 8 | 9 | AWS Terraform provider version is locked with `5.45.0` version. 10 | 11 | ## Explanation of `key_pair.tf` 12 | - **TLS Private Key Resource**: The `tls_private_key` resource from the `tls` provider is used to generate a private RSA key. 13 | - **AWS Key Pair Resource**: The `aws_key_pair` resource uploads the generated public key to AWS under the specified key name. This allows you to use this key pair for EC2 instances. 14 | - **Output**: The private key is outputted for your use. It’s marked as sensitive to prevent it from being displayed in the logs. 15 | 16 | ## References: 17 | - [Amazon EC2 Ubuntu AMI Locator](https://cloud-images.ubuntu.com/locator/ec2/) 18 | - [CentOS AWS AMI Cloud Images](https://www.centos.org/download/aws-images/) -------------------------------------------------------------------------------- /nginx-webserver-ec2/ec2-instance/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.45.0" 6 | constraints = "5.45.0" 7 | hashes = [ 8 | "h1:4Vgk51R7iTY1oczaTQDG+DkA9nE8TmjlUtecqXX6qDU=", 9 | "zh:1379bcf45aef3d486ee18b4f767bfecd40a0056510d26107f388be3d7994c368", 10 | "zh:1615a6f5495acfb3a0cb72324587261dd4d72711a3cc51aff13167b14531501e", 11 | "zh:18b69a0f33f8b1862fbd3f200756b7e83e087b73687085f2cf9c7da4c318e3e6", 12 | "zh:2c5e7aecd197bc3d3b19290bad8cf4c390c2c6a77bb165da4e11f53f2dfe2e54", 13 | "zh:3794da9bef97596e3bc60e12cdd915bda5ec2ed62cd1cd93723d58b4981905fe", 14 | "zh:40a5e45ed91801f83db76dffd467dcf425ea2ca8642327cf01119601cb86021c", 15 | "zh:4abfc3f53d0256a7d5d1fa5e931e4601b02db3d1da28f452341d3823d0518f1a", 16 | "zh:4eb0e98078f79aeb06b5ff6115286dc2135d12a80287885698d04036425494a2", 17 | "zh:75470efbadea4a8d783642497acaeec5077fc4a7f3df3340defeaa1c7de29bf7", 18 | "zh:8861a0b4891d5fa2fa7142f236ae613cea966c45b5472e3915a4ac3abcbaf487", 19 | "zh:8bf6f21cd9390b742ca0b4393fde92616ca9e6553fb75003a0999006ad233d35", 20 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 21 | "zh:ad73008a044e75d337acda910fb54d8b81a366873c8a413fec1291034899a814", 22 | "zh:bf261713b0b8bebfe8c199291365b87d9043849f28a2dc764bafdde73ae43693", 23 | "zh:da3bafa1fd830be418dfcc730e85085fe67c0d415c066716f2ac350a2306f40a", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /nginx-webserver-ec2/ec2-instance/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "= 5.45.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = "us-east-1" 12 | } 13 | 14 | resource "aws_instance" "web_server" { 15 | ami = "ami-061612d72693df8ce" # Update this to the latest Ubuntu AMI in your region. This AMI is Ubuntu 20.04 Focal Fossa 16 | instance_type = "t2.micro" 17 | key_name = "terraform-generated-key" # Use the name of your existing key pair 18 | 19 | security_groups = [aws_security_group.web_sg.name] 20 | 21 | user_data = <<-EOF 22 | #!/bin/bash 23 | sudo apt update 24 | sudo apt install -y nginx 25 | echo 'Hello from Web Server!' | sudo tee /var/www/html/index.html 26 | EOF 27 | 28 | tags = { 29 | Name = "WebServer" 30 | } 31 | } 32 | 33 | output "public_ip" { 34 | value = aws_instance.web_server.public_ip 35 | description = "The public IP address of the web server." 36 | } -------------------------------------------------------------------------------- /nginx-webserver-ec2/ec2-instance/sg.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "web_sg" { 2 | name = "web_sg" 3 | description = "Allow HTTP and SSH access" 4 | 5 | ingress { 6 | from_port = 80 7 | to_port = 80 8 | protocol = "tcp" 9 | cidr_blocks = ["0.0.0.0/0"] 10 | } 11 | 12 | ingress { 13 | from_port = 22 14 | to_port = 22 15 | protocol = "tcp" 16 | cidr_blocks = ["0.0.0.0/0"] # For security, restrict this to your IP range 17 | } 18 | 19 | egress { 20 | from_port = 0 21 | to_port = 0 22 | protocol = "-1" 23 | cidr_blocks = ["0.0.0.0/0"] 24 | } 25 | } -------------------------------------------------------------------------------- /nginx-webserver-ec2/ssh-key/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.45.0" 6 | constraints = "5.45.0" 7 | hashes = [ 8 | "h1:4Vgk51R7iTY1oczaTQDG+DkA9nE8TmjlUtecqXX6qDU=", 9 | "zh:1379bcf45aef3d486ee18b4f767bfecd40a0056510d26107f388be3d7994c368", 10 | "zh:1615a6f5495acfb3a0cb72324587261dd4d72711a3cc51aff13167b14531501e", 11 | "zh:18b69a0f33f8b1862fbd3f200756b7e83e087b73687085f2cf9c7da4c318e3e6", 12 | "zh:2c5e7aecd197bc3d3b19290bad8cf4c390c2c6a77bb165da4e11f53f2dfe2e54", 13 | "zh:3794da9bef97596e3bc60e12cdd915bda5ec2ed62cd1cd93723d58b4981905fe", 14 | "zh:40a5e45ed91801f83db76dffd467dcf425ea2ca8642327cf01119601cb86021c", 15 | "zh:4abfc3f53d0256a7d5d1fa5e931e4601b02db3d1da28f452341d3823d0518f1a", 16 | "zh:4eb0e98078f79aeb06b5ff6115286dc2135d12a80287885698d04036425494a2", 17 | "zh:75470efbadea4a8d783642497acaeec5077fc4a7f3df3340defeaa1c7de29bf7", 18 | "zh:8861a0b4891d5fa2fa7142f236ae613cea966c45b5472e3915a4ac3abcbaf487", 19 | "zh:8bf6f21cd9390b742ca0b4393fde92616ca9e6553fb75003a0999006ad233d35", 20 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 21 | "zh:ad73008a044e75d337acda910fb54d8b81a366873c8a413fec1291034899a814", 22 | "zh:bf261713b0b8bebfe8c199291365b87d9043849f28a2dc764bafdde73ae43693", 23 | "zh:da3bafa1fd830be418dfcc730e85085fe67c0d415c066716f2ac350a2306f40a", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/hashicorp/local" { 28 | version = "2.5.1" 29 | hashes = [ 30 | "h1:8oTPe2VUL6E2d3OcrvqyjI4Nn/Y/UEQN26WLk5O/B0g=", 31 | "zh:0af29ce2b7b5712319bf6424cb58d13b852bf9a777011a545fac99c7fdcdf561", 32 | "zh:126063ea0d79dad1f68fa4e4d556793c0108ce278034f101d1dbbb2463924561", 33 | "zh:196bfb49086f22fd4db46033e01655b0e5e036a5582d250412cc690fa7995de5", 34 | "zh:37c92ec084d059d37d6cffdb683ccf68e3a5f8d2eb69dd73c8e43ad003ef8d24", 35 | "zh:4269f01a98513651ad66763c16b268f4c2da76cc892ccfd54b401fff6cc11667", 36 | "zh:51904350b9c728f963eef0c28f1d43e73d010333133eb7f30999a8fb6a0cc3d8", 37 | "zh:73a66611359b83d0c3fcba2984610273f7954002febb8a57242bbb86d967b635", 38 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 39 | "zh:7ae387993a92bcc379063229b3cce8af7eaf082dd9306598fcd42352994d2de0", 40 | "zh:9e0f365f807b088646db6e4a8d4b188129d9ebdbcf2568c8ab33bddd1b82c867", 41 | "zh:b5263acbd8ae51c9cbffa79743fbcadcb7908057c87eb22fd9048268056efbc4", 42 | "zh:dfcd88ac5f13c0d04e24be00b686d069b4879cc4add1b7b1a8ae545783d97520", 43 | ] 44 | } 45 | 46 | provider "registry.terraform.io/hashicorp/tls" { 47 | version = "4.0.5" 48 | hashes = [ 49 | "h1:e4LBdJoZJNOQXPWgOAG0UuPBVhCStu98PieNlqJTmeU=", 50 | "zh:01cfb11cb74654c003f6d4e32bbef8f5969ee2856394a96d127da4949c65153e", 51 | "zh:0472ea1574026aa1e8ca82bb6df2c40cd0478e9336b7a8a64e652119a2fa4f32", 52 | "zh:1a8ddba2b1550c5d02003ea5d6cdda2eef6870ece86c5619f33edd699c9dc14b", 53 | "zh:1e3bb505c000adb12cdf60af5b08f0ed68bc3955b0d4d4a126db5ca4d429eb4a", 54 | "zh:6636401b2463c25e03e68a6b786acf91a311c78444b1dc4f97c539f9f78de22a", 55 | "zh:76858f9d8b460e7b2a338c477671d07286b0d287fd2d2e3214030ae8f61dd56e", 56 | "zh:a13b69fb43cb8746793b3069c4d897bb18f454290b496f19d03c3387d1c9a2dc", 57 | "zh:a90ca81bb9bb509063b736842250ecff0f886a91baae8de65c8430168001dad9", 58 | "zh:c4de401395936e41234f1956ebadbd2ed9f414e6908f27d578614aaa529870d4", 59 | "zh:c657e121af8fde19964482997f0de2d5173217274f6997e16389e7707ed8ece8", 60 | "zh:d68b07a67fbd604c38ec9733069fbf23441436fecf554de6c75c032f82e1ef19", 61 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 62 | ] 63 | } 64 | -------------------------------------------------------------------------------- /nginx-webserver-ec2/ssh-key/key_pair.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "= 5.45.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = "us-east-1" 12 | } 13 | 14 | resource "tls_private_key" "example" { 15 | algorithm = "RSA" 16 | rsa_bits = 2048 17 | } 18 | 19 | resource "aws_key_pair" "generated_key" { 20 | key_name = "terraform-generated-key" 21 | public_key = tls_private_key.example.public_key_openssh 22 | } 23 | 24 | resource "local_file" "private_key_file" { 25 | content = tls_private_key.example.private_key_pem 26 | filename = "${path.module}/generated-ssh-key.pem" 27 | file_permission = "0600" 28 | } 29 | 30 | output "key_name" { 31 | value = aws_key_pair.generated_key.key_name 32 | } -------------------------------------------------------------------------------- /s3-dynamodb-backend/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.66.0" 6 | hashes = [ 7 | "h1:RHs4rOiKrKJqr8UhVW7yqfoMVwaofQ+9ChP41rAzc1A=", 8 | "zh:071c908eb18627f4becdaf0a9fe95d7a61f69be365080aba2ef5e24f6314392b", 9 | "zh:3dea2a474c6ad4be5b508de4e90064ec485e3fbcebb264cb6c4dec660e3ea8b5", 10 | "zh:56c0b81e3bbf4e9ccb2efb984f8758e2bc563ce179ff3aecc1145df268b046d1", 11 | "zh:5f34b75a9ef69cad8c79115ecc0697427d7f673143b81a28c3cf8d5decfd7f93", 12 | "zh:65632bc2c408775ee44cb32a72e7c48376001a9a7b3adbc2c9b4d088a7d58650", 13 | "zh:6d0550459941dfb39582fadd20bfad8816255a827bfaafb932d51d66030fcdd5", 14 | "zh:7f1811ef179e507fdcc9776eb8dc3d650339f8b84dd084642cf7314c5ca26745", 15 | "zh:8a793d816d7ef57e71758fe95bf830cfca70d121df70778b65cc11065ad004fd", 16 | "zh:8c7cda08adba01b5ae8cc4e5fbf16761451f0fab01327e5f44fc47b7248ba653", 17 | "zh:96d855f1771342771855c0fb2d47ff6a731e8f2fa5d242b18037c751fd63e6c3", 18 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 19 | "zh:b2a62669b72c2471820410b58d764102b11c24e326831ddcfae85c7d20795acf", 20 | "zh:b4a6b251ac24c8f5522581f8d55238d249d0008d36f64475beefc3791f229e1d", 21 | "zh:ca519fa7ee1cac30439c7e2d311a0ecea6a5dae2d175fe8440f30133688b6272", 22 | "zh:fbcd54e7d65806b0038fc8a0fbdc717e1284298ff66e22aac39dcc5a22cc99e5", 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /s3-dynamodb-backend/README.md: -------------------------------------------------------------------------------- 1 | # Configure Terraform remote state with AWS S3 and DynamoDB for state locking 2 | 3 | ## Overview 4 | 5 | The given template configures AWS S3 backend to store Terraform state and sets AWS DynamoDB for state locking. 6 | 7 | The template sets the following configurations: 8 | - Enable S3 bucket versioning 9 | - Enable default server-side encryption with AES256 10 | - Enable S3 bucket logging to monitor the access to the state file. For storing logs use separate S3 bucket. 11 | - Enable S3 Object locking to prevent even versioned objects from being deleted before a retention period 12 | - Disable public access to S3 bucket 13 | - Prevent accidental deletion of S3 bucket 14 | - Sets DynamoDB with pay per request billing mode 15 | 16 | To configure your your project to use new remote S3 backend add the following code with your values: 17 | ``` 18 | terraform { 19 | backend "s3" { 20 | bucket = "s3-bucket-state" 21 | key = "global/s3/terraform.tfstate" 22 | region = "us-east-1" 23 | 24 | dynamodb_table = "dynamodb-state-locks" 25 | encrypt = true 26 | } 27 | } 28 | ``` 29 | 30 | To migrate to a new backend use the following command: 31 | ```bash 32 | terraform init -migrate-state 33 | ``` 34 | 35 | ## Adding S3 bucket policies to further restrict access 36 | It's good to explicitly define a bucket policy to further restrict access to the S3 bucket. You can enforce that only specific users or roles can access the bucket. This is left as optional feature. 37 | 38 | Example of a bucket policy to restrict access to only the necessary IAM roles: 39 | ``` 40 | resource "aws_s3_bucket_policy" "terraform_state_policy" { 41 | bucket = aws_s3_bucket.terraform_state.id 42 | 43 | policy = jsonencode({ 44 | Version = "2012-10-17" 45 | Statement = [ 46 | { 47 | Effect = "Allow" 48 | Principal = { 49 | AWS = "arn:aws:iam::YOUR_ACCOUNT_ID:role/YOUR_ROLE" 50 | } 51 | Action = "s3:*" 52 | Resource = [ 53 | "${aws_s3_bucket.terraform_state.arn}", 54 | "${aws_s3_bucket.terraform_state.arn}/*" 55 | ] 56 | }, 57 | { 58 | Effect = "Deny" 59 | Principal = "*" 60 | Action = "s3:*" 61 | Resource = [ 62 | "${aws_s3_bucket.terraform_state.arn}", 63 | "${aws_s3_bucket.terraform_state.arn}/*" 64 | ] 65 | Condition = { 66 | Bool = { 67 | "aws:SecureTransport" = "false" 68 | } 69 | } 70 | } 71 | ] 72 | }) 73 | } 74 | ``` 75 | 76 | ## References: 77 | - [Terraform Docs: S3 backend](https://developer.hashicorp.com/terraform/language/backend/s3) 78 | - [AWS S3 object lock](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) -------------------------------------------------------------------------------- /s3-dynamodb-backend/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "us-east-1" 3 | } 4 | 5 | # Create S3 bucket for terraform state 6 | resource "aws_s3_bucket" "terraform_state" { 7 | bucket = "s3-bucket-state" 8 | object_lock_enabled = true 9 | 10 | # Prevent accidental deletion of this S3 bucket. Set value to 'true' 11 | lifecycle { 12 | prevent_destroy = false 13 | } 14 | 15 | tags = { 16 | Name = "TerraformStateBucket" 17 | Environment = "Dev" 18 | } 19 | } 20 | 21 | # Enable S3 bucket versioning 22 | resource "aws_s3_bucket_versioning" "enabled" { 23 | bucket = aws_s3_bucket.terraform_state.id 24 | versioning_configuration { 25 | status = "Enabled" 26 | } 27 | } 28 | 29 | # Enable server-side encryption for the S3 bucket 30 | resource "aws_s3_bucket_server_side_encryption_configuration" "default" { 31 | bucket = aws_s3_bucket.terraform_state.id 32 | 33 | rule { 34 | apply_server_side_encryption_by_default { 35 | sse_algorithm = "AES256" 36 | } 37 | } 38 | } 39 | 40 | # Enable logging for the S3 bucket to monitor and track access to your Terraform state 41 | resource "aws_s3_bucket_logging" "terraform_state_logging" { 42 | bucket = aws_s3_bucket.terraform_state.id 43 | target_bucket = "s3-bucket-state-logs" 44 | target_prefix = "log/" 45 | } 46 | 47 | # Disable public access to S3 bucket 48 | resource "aws_s3_bucket_public_access_block" "public_access" { 49 | bucket = aws_s3_bucket.terraform_state.id 50 | block_public_acls = true 51 | block_public_policy = true 52 | ignore_public_acls = true 53 | restrict_public_buckets = true 54 | } 55 | 56 | # Enable S3 Object Lock to prevent even versioned objects from being deleted before a retention period 57 | resource "aws_s3_bucket_object_lock_configuration" "lock_config" { 58 | bucket = aws_s3_bucket.terraform_state.id 59 | 60 | rule { 61 | default_retention { 62 | mode = "GOVERNANCE" 63 | days = 30 64 | } 65 | } 66 | } 67 | 68 | # Create DynamoDB table for state locking 69 | resource "aws_dynamodb_table" "terraform_locks" { 70 | name = "dynamodb-state-locks" 71 | billing_mode = "PAY_PER_REQUEST" 72 | hash_key = "LockID" 73 | attribute { 74 | name = "LockID" 75 | type = "S" 76 | } 77 | 78 | tags = { 79 | Name = "TerraformStateLocks" 80 | Environment = "Dev" 81 | } 82 | } -------------------------------------------------------------------------------- /s3-dynamodb-backend/outputs.tf: -------------------------------------------------------------------------------- 1 | output "s3_bucket_arn" { 2 | value = aws_s3_bucket.terraform_state.arn 3 | description = "The ARN of the S3 bucket" 4 | } 5 | 6 | output "dynamodb_table_name" { 7 | value = aws_dynamodb_table.terraform_locks.name 8 | description = "The name of the DynamoDB table" 9 | } -------------------------------------------------------------------------------- /secrets-mgmt-with-sops/.sops.yaml: -------------------------------------------------------------------------------- 1 | creation_rules: 2 | # AWS KMS key for encryption and decryption 3 | - kms: "arn:aws:kms:us-east-1:533267306038:key/100345dd-9530-44ee-80e7-d49469f38a7c" -------------------------------------------------------------------------------- /secrets-mgmt-with-sops/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/carlpett/sops" { 5 | version = "0.6.3" 6 | constraints = "~> 0.6.0" 7 | hashes = [ 8 | "h1:1Dx43rBcTvC7sKDvAsLwM038mph2BBLs9VcF4UAGs7A=", 9 | "zh:0c9f0cad271cbd4c3fa97961b6276ff86f7db2d5dc39ea7de687b5e5091b5f4b", 10 | "zh:4180cf700dc8ccc71db5d4a0496c22a54301a617ae53d93bba91cb142694552e", 11 | "zh:656f35d78120bd50d82767a814f53a0a4f96ff1f5f79e40089f259bb1f09ce9e", 12 | "zh:98e53430bbf13631b314f9107372262151503c133b0551a370e23a0b451f5005", 13 | "zh:a9c5cdcf12ea89eee54e55779f0e0c868350fc2b48b79a394500d3b04ea28919", 14 | "zh:b042e80e60c0745ee3c8c49860ecb6e069f75c4c60aa8d6dd2f188f8c0a4f4ab", 15 | "zh:fe2194ecf065beb9a384b4893cd9d3d975e39db89b38d2b5af05c43352d83397", 16 | ] 17 | } 18 | 19 | provider "registry.terraform.io/hashicorp/aws" { 20 | version = "4.67.0" 21 | constraints = "~> 4.0" 22 | hashes = [ 23 | "h1:dCRc4GqsyfqHEMjgtlM1EympBcgTmcTkWaJmtd91+KA=", 24 | "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", 25 | "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", 26 | "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", 27 | "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", 28 | "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", 29 | "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", 30 | "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", 31 | "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", 32 | "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", 33 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 34 | "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", 35 | "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", 36 | "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", 37 | "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", 38 | "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /secrets-mgmt-with-sops/README.md: -------------------------------------------------------------------------------- 1 | ## Secret management in Terraform with Mozilla SOPS tool 2 | 3 | This Terraform configuration demonstrates how to deploy an AWS RDS MySQL database using credentials securely stored and managed with SOPS. By integrating SOPS with Terraform, you can maintain secure, encrypted secrets without exposing sensitive information in your infrastructure code. 4 | 5 | The MySQL database username and password values are set via variables read from a SOPS-encrypted file. SOPS is an open-source tool from Mozilla for a management of sensitive values in the code. To learn more about SOPS, see [Securing Secrets with SOPS: An Introduction](https://maxat-akbanov.com/securing-secrets-with-sops-an-introduction) 6 | 7 | ### **Prerequisites:** 8 | 9 | 1. **SOPS Installation:** Ensure that [SOPS](https://github.com/mozilla/sops) is installed on your machine. 10 | 11 | 2. **Encryption Keys:** Have your encryption keys set up. SOPS supports various backends like AWS KMS, GCP KMS, Azure Key Vault, or PGP keys. This example uses AWS KMS keys. 12 | 13 | To create AWS KMS keys: 14 | ```bash 15 | aws kms create-key \ 16 | --region us-east-1 \ 17 | --description "KMS key for encryption" \ 18 | --key-usage ENCRYPT_DECRYPT \ 19 | --origin AWS_KMS \ 20 | --output json 21 | ``` 22 | 23 | To list KMS keys: 24 | ```bash 25 | aws kms list-keys --region us-east-1 26 | ``` 27 | 28 | 3. **Terraform Installation:** Make sure Terraform is installed and configured on your machine. 29 | 30 | **NOTES:** 31 | - For handling the decryption process of SOPS, this example uses `carlpett/sops` Terraform provider. To learn more about this provider, see [sops](https://registry.terraform.io/providers/carlpett/sops/latest). 32 | - The default encryption process is configured via `.sops.yaml` file. The AWS KMS key is specified in the following format: `- kms: "arn:aws:kms:::key/"` 33 | - The contents of `secrets.enc.json` file is encrypted with AWS KMS key and committed to the repository. To decrypt values from encrypted file with `sops`: 34 | ```bash 35 | sops -d secrets.enc.json 36 | ``` 37 | To be able to decrypt you need to have the corresponging AWS KMS decryption key. 38 | 39 | **Directory Structure:** 40 | 41 | ``` 42 | . 43 | ├── main.tf 44 | ├── variables.tf 45 | ├── data.tf 46 | ├── resources.tf 47 | ├── secrets.enc.json 48 | ├── .sops.yaml 49 | ├── README.md 50 | ``` 51 | 52 | ### `secrets.enc.json` 53 | 54 | This file contains your database credentials encrypted with SOPS. 55 | 56 | **Before Encryption (`secrets.enc.json`):** 57 | 58 | ```json 59 | { 60 | "db_username": "myuser", 61 | "db_password": "mypassword" 62 | } 63 | ``` 64 | 65 | **Encrypt the File:** 66 | 67 | Use SOPS to encrypt the file. Assuming you have your keys set up, run: 68 | 69 | ```bash 70 | sops -e -i secrets.enc.json 71 | ``` 72 | 73 | After encryption, `secrets.enc.json` will contain encrypted values. 74 | 75 | --- 76 | 77 | ### **Running Terraform** 78 | 79 | Initialize and apply your Terraform configuration: 80 | 81 | ```bash 82 | terraform init 83 | terraform apply 84 | ``` 85 | 86 | During the `apply` phase SOPS will use the decryption keys from AWS KMS to read the username and password values. 87 | 88 | ### **Additional Resources:** 89 | - [SOPS GitHub Repository](https://github.com/mozilla/sops) 90 | - [Terraform SOPS Provider Documentation](https://registry.terraform.io/providers/carlpett/sops/latest/docs) -------------------------------------------------------------------------------- /secrets-mgmt-with-sops/data.tf: -------------------------------------------------------------------------------- 1 | data "sops_file" "db_credentials" { 2 | source_file = "${path.module}/secrets.enc.json" 3 | } 4 | -------------------------------------------------------------------------------- /secrets-mgmt-with-sops/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 4.0" 7 | } 8 | sops = { 9 | source = "carlpett/sops" 10 | version = "~> 0.6.0" 11 | } 12 | } 13 | } 14 | 15 | provider "aws" { 16 | region = var.aws_region 17 | } 18 | 19 | provider "sops" {} 20 | -------------------------------------------------------------------------------- /secrets-mgmt-with-sops/resources.tf: -------------------------------------------------------------------------------- 1 | resource "aws_db_instance" "default" { 2 | allocated_storage = 20 3 | engine = "mysql" 4 | engine_version = "8.0" 5 | instance_class = "db.t3.micro" 6 | db_name = var.db_name 7 | username = data.sops_file.db_credentials.data["db_username"] 8 | password = data.sops_file.db_credentials.data["db_password"] 9 | parameter_group_name = "default.mysql8.0" 10 | skip_final_snapshot = true 11 | 12 | # Optional: Define VPC security group IDs if needed 13 | # vpc_security_group_ids = [aws_security_group.db_sg.id] 14 | 15 | # Optional: Define DB subnet group if deploying into a VPC 16 | # subnet_ids = [aws_subnet.db_subnet.id] 17 | } 18 | -------------------------------------------------------------------------------- /secrets-mgmt-with-sops/secrets.enc.json: -------------------------------------------------------------------------------- 1 | { 2 | "db_username": "ENC[AES256_GCM,data:OgO1w9Gf,iv:mKm+riM791WU/33dQBADHhe/Y7JTu8GjVDMwF18koVA=,tag:hqfn/3WvL1nEJCSyRueqbw==,type:str]", 3 | "db_password": "ENC[AES256_GCM,data:p/5ogHlIcbzYLA==,iv:9syhkenZdN8YRfvdO0K8kfFGCmi6VmLIPqrReEaMEuY=,tag:I8gUa5CiSVOcH42t0KMESg==,type:str]", 4 | "sops": { 5 | "kms": [ 6 | { 7 | "arn": "arn:aws:kms:us-east-1:533267306038:key/100345dd-9530-44ee-80e7-d49469f38a7c", 8 | "created_at": "2024-10-18T19:57:33Z", 9 | "enc": "AQICAHjBPck8IPBvch8bZFOetMX3FamEw14l+F2gyeaDE1gLWgHtJz0dyo2DL4jz0JHY3+ZZAAAAfjB8BgkqhkiG9w0BBwagbzBtAgEAMGgGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMh4xhQuBD5ZwaMSafAgEQgDuGjb5RIzul3mMtCkx3iJc6kDEdvJ2KIJPOwffkLJ5b5sRu0LPOjOgUVIVm2AWFSdMS9/rhTX74bw8jDw==", 10 | "aws_profile": "" 11 | } 12 | ], 13 | "gcp_kms": null, 14 | "azure_kv": null, 15 | "hc_vault": null, 16 | "age": null, 17 | "lastmodified": "2024-10-18T19:57:34Z", 18 | "mac": "ENC[AES256_GCM,data:sLCENIlSLyQEV0DalxrpF9AGgojKOntK1n1YFvuSpW0A/loVI91iZSN4AKXg7uzptv26ZnQqANz4w/DE/Jfr6w+a4+lRc5UkwM2g7UQKC9dhPSRCC0SqEgN3zd6W9D3TUmpfTXIujSbD8ZSg5KzBb+qsErnuOj/7lsiW6h2G7Jk=,iv:qhEztjP6CEZucLLfOso+gShjU2Ak4dOCXZlP+P50s/k=,tag:W0qefr6zZ9V/vXiRUiunYw==,type:str]", 19 | "pgp": null, 20 | "unencrypted_suffix": "_unencrypted", 21 | "version": "3.9.1" 22 | } 23 | } -------------------------------------------------------------------------------- /secrets-mgmt-with-sops/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | description = "The AWS region to deploy resources" 3 | default = "us-east-1" 4 | } 5 | 6 | variable "db_name" { 7 | description = "The name of the database" 8 | default = "mydb" 9 | } 10 | -------------------------------------------------------------------------------- /standard-vpc-for-eks/README.md: -------------------------------------------------------------------------------- 1 | # Provision standard type of VPC for EKS cluster 2 | 3 | This demo project shows how to provision VPC with 2 public and 2 private subnets in 2 AZs for EKS cluster 4 | 5 | ![](../img/vpc_for_eks.png) 6 | 7 | ## Overview 8 | 9 | When you create a EKS cluster, you need to **specify VPC with at least two subnets that are in different AZs**. For more information, see [VPC and Subnet Considerations](https://docs.aws.amazon.com/eks/latest/best-practices/subnets.html). 10 | 11 | An EKS cluster consists of two VPCs: 12 | - An **AWS-managed VPC** that **hosts the Kubernetes control plane**. This VPC **does not appear** in the customer account. 13 | - A **customer-managed VPC** that **hosts the Kubernetes worker nodes**. This is where containers run, as well as other customer-managed AWS infrastructure such as load balancers used by the cluster. This VPC appears in the customer account. You need to create customer-managed VPC prior creating a cluster. 14 | 15 | The **worker nodes in the customer VPC need the ability to connect to the managed API server endpoint** in the AWS VPC. This allows the nodes to register with the Kubernetes control plane and receive requests to run application Pods. 16 | 17 | ![](../img/subnet_image.png) 18 | 19 | - Kubernetes version `1.32`. For more information, see [EKS Kubernetes Standard Support Versions](https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions-standard.html) 20 | 21 | ## Project structure 22 | 23 | The project is structured as follows: 24 | ```bash 25 | . 26 | ├── 0-locals.tf 27 | ├── 1-provider.tf 28 | ├── 2-vpc.tf 29 | ├── 3-igw.tf 30 | ├── 4-subnets.tf 31 | ├── 5-nat.tf 32 | ├── 6-routes.tf 33 | ``` 34 | 35 | | **File** | **Purpose** | 36 | | --------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 37 | | `0-locals.tf` | ✅ **Defines local variables** that are used throughout the config (e.g., environment name, AZs, CIDR blocks). Helps avoid duplication and centralizes configuration logic. | 38 | | `1-provider.tf` | ✅ **Declares the AWS provider** and its configuration (e.g., region, version). This is required for Terraform to know how and where to create resources. | 39 | | `2-vpc.tf` | ✅ **Creates the main VPC** resource, specifying the CIDR block and DNS settings. This is the network foundation. | 40 | | `3-igw.tf` | ✅ **Creates the Internet Gateway**, used to route traffic from public subnets to the internet. | 41 | | `4-subnets.tf` | ✅ **Creates public and private subnets** in different Availability Zones. Tags are applied to support Kubernetes integration (e.g., EKS). | 42 | | `5-nat.tf` | ✅ **Creates a NAT Gateway and Elastic IP**. Used for providing internet access to private subnets without exposing them directly. | 43 | | `6-routes.tf` | ✅ **Creates route tables and associations** for both public and private subnets. Defines how traffic flows within the VPC and out to the internet or NAT gateway. | 44 | 45 | File numbering helps to logically order the resources and improve readability. However, note that Terraform treats all `.tf` files in a directory equally and loads them in an unordered fashion before building the resource dependency graph. 46 | 47 | ## VPC Configuration 48 | 49 | Enable DNS and Hostnames support in your VPC: 50 | ```hcl 51 | enable_dns_support = true 52 | enable_dns_hostnames = true 53 | ``` 54 | 55 | This is required for EKS addons such as [EFS CSI driver](https://www.eksworkshop.com/docs/fundamentals/storage/efs/efs-csi-driver), AWS Client VPN and EKS worker nodes to communicate. 56 | 57 | In a **VPC used for EKS worker nodes**, these settings are critical for enabling internal name resolution and proper DNS behavior inside the cluster: 58 | 59 | --- 60 | 61 | ### `enable_dns_support = true` 62 | 63 | **Purpose:** 64 | Enables **Amazon-provided DNS** within the VPC. 65 | 66 | **What it does:** 67 | 68 | * Ensures that instances in the VPC (e.g., EKS nodes or pods) can **resolve public DNS names** (like `amazonaws.com`) and **VPC internal hostnames** (like other EC2 private DNS names). 69 | * Necessary for cluster components that rely on internal name resolution, such as `CoreDNS`, which resolves Kubernetes service names. 70 | 71 | **When to enable:** 72 | 🔹 **Always enable** this for any EKS (or EC2) deployment. 73 | Without it, DNS resolution won’t work, breaking basic cluster functionality. 74 | 75 | --- 76 | 77 | ### `enable_dns_hostnames = true` 78 | 79 | **Purpose:** 80 | Enables assigning **DNS hostnames** (like `ip-10-0-1-12.ec2.internal`) to EC2 instances launched in the VPC. 81 | 82 | **What it does:** 83 | 84 | - Lets EC2 instances (EKS worker nodes) receive a private DNS name associated with their private IP. 85 | - Important for internal Kubernetes operations like: 86 | 87 | - **Kubelet registration** 88 | - **Node name resolution** 89 | - **IAM roles for service accounts (IRSA)** via hostname-based DNS calls to the metadata service 90 | 91 | **When to enable:** 92 | - Required **if your subnets are public** or you use **custom DNS setups**, or want to rely on EC2 DNS names. 93 | - Also required for **IRSA to work**, especially with `amazonaws.com` endpoints routed via DNS. 94 | 95 | --- 96 | 97 | | Setting | Needed for EKS | Why It’s Important | Enable in EKS? | 98 | | ---------------------- | -------------- | ------------------------------------------------ | -------------- | 99 | | `enable_dns_support` | ✅ Yes | Enables DNS resolution inside the VPC | ✅ Always | 100 | | `enable_dns_hostnames` | ✅ Yes | Assigns DNS names to EC2 instances (EKS workers) | ✅ Usually | 101 | 102 | | **Attribute** | **Description** | 103 | | -------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 104 | | `enableDnsHostnames` | Controls whether EC2 instances in the VPC are assigned **DNS hostnames**.

🔹 If an instance has a **public IP**, enabling this allows it to get a **public DNS name** like `ec2-54-123-45-67.compute-1.amazonaws.com`.

🔹 Defaults to `false`, unless the VPC is the **default VPC**. | 105 | | `enableDnsSupport` | Controls whether the VPC supports **DNS resolution** using the **Amazon-provided DNS server**.

🔹 Must be `true` for EC2 instances (and EKS nodes) to resolve domain names like `amazonaws.com` or internal names.

🔹 Defaults to `true`. | 106 | 107 | For more information, see [VPC requirements and considerations](https://docs.aws.amazon.com/eks/latest/userguide/network-reqs.html#network-requirements-vpc) 108 | 109 | ## Subnets Configuration 110 | 111 | If you want to deploy load balancers to a subnet, the subnet must have the following tags: 112 | ```hcl 113 | # Used by Kubernetes to mark as internal load balancer subnet 114 | "kubernetes.io/role/internal-elb" = "1" 115 | 116 | # Tag for public load balancer use in Kubernetes 117 | "kubernetes.io/role/elb" = "1" 118 | ``` 119 | 120 | ## Shared and Owned Tag Values 121 | 122 | The tag `kubernetes.io/cluster/` with values like **`owned`** or **`shared`** is used by **Amazon EKS** and the **Kubernetes cloud controller manager** to identify subnets, security groups, and other AWS resources that belong to or are usable by the cluster. 123 | 124 | --- 125 | 126 | | **Tag Key** | **Tag Value** | **Meaning** | 127 | | ---------------------------------- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 128 | | `kubernetes.io/cluster/my-cluster` | `owned` | The resource (e.g., subnet, security group) is **created and exclusively used** by this EKS cluster. EKS may delete or modify it during cluster lifecycle operations (e.g., deletion). | 129 | | `kubernetes.io/cluster/my-cluster` | `shared` | The resource is **shared** among multiple clusters or manually managed. EKS will **not delete or modify** it when the cluster is deleted. | 130 | 131 | --- 132 | 133 | * **`owned`**: 134 | 135 | * You created the resource specifically for this EKS cluster. 136 | * You're okay with EKS managing the lifecycle of that resource. 137 | * Safe for EKS to clean up on cluster deletion. 138 | 139 | * **`shared`**: 140 | 141 | * The resource (like a subnet or SG) is **reused** across clusters or managed externally. 142 | * You **don’t want EKS to delete or change** it automatically. 143 | * Useful in **multi-cluster setups** or **shared VPCs**. 144 | 145 | --- 146 | 147 | ```hcl 148 | tags = { 149 | "kubernetes.io/cluster/my-cluster" = "owned" # EKS manages this resource 150 | } 151 | ``` 152 | 153 | vs. 154 | 155 | ```hcl 156 | tags = { 157 | "kubernetes.io/cluster/my-cluster" = "shared" # You manage this resource 158 | } 159 | ``` 160 | 161 | For more information, see [Subnet requirements for nodes](https://docs.aws.amazon.com/eks/latest/userguide/network-reqs.html#:~:text=Subnet%20requirements%20for%20nodes) 162 | 163 | ## Provision VPC 164 | 165 | To provision VPC: 166 | ```bash 167 | terraform apply 168 | ``` 169 | 170 | You'll get the following VPC: 171 | 172 | ![](../img/vpc_resource_map.png) 173 | 174 | ## References 175 | - [AWS Docs: View Amazon EKS networking requirements for VPC and subnets](https://docs.aws.amazon.com/eks/latest/userguide/network-reqs.html) 176 | - [YouTube: Create AWS VPC using Terraform: AWS EKS Kubernetes Tutorial](https://www.youtube.com/watch?v=aRXg75S5DWA&list=PLiMWaCMwGJXnKY6XmeifEpjIfkWRo9v2l) -------------------------------------------------------------------------------- /standard-vpc-for-eks/terraform/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "5.98.0" 6 | constraints = ">= 5.62.0" 7 | hashes = [ 8 | "h1:KgOCdSG6euSc2lquuFlISJU/CzQTRhAO7WoaASxLZRc=", 9 | "zh:23377bd90204b6203b904f48f53edcae3294eb072d8fc18a4531c0cde531a3a1", 10 | "zh:2e55a6ea14cc43b08cf82d43063e96c5c2f58ee953c2628523d0ee918fe3b609", 11 | "zh:4885a817c16fdaaeddc5031edc9594c1f300db0e5b23be7cd76a473e7dcc7b4f", 12 | "zh:6ca7177ad4e5c9d93dee4be1ac0792b37107df04657fddfe0c976f36abdd18b5", 13 | "zh:78bf8eb0a67bae5dede09666676c7a38c9fb8d1b80a90ba06cf36ae268257d6f", 14 | "zh:874b5a99457a3f88e2915df8773120846b63d820868a8f43082193f3dc84adcb", 15 | "zh:95e1e4cf587cde4537ac9dfee9e94270652c812ab31fce3a431778c053abf354", 16 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 17 | "zh:a75145b58b241d64570803e6565c72467cd664633df32678755b51871f553e50", 18 | "zh:aa31b13d0b0e8432940d6892a48b6268721fa54a02ed62ee42745186ee32f58d", 19 | "zh:ae4565770f76672ce8e96528cbb66afdade1f91383123c079c7fdeafcb3d2877", 20 | "zh:b99f042c45bf6aa69dd73f3f6d9cbe0b495b30442c526e0b3810089c059ba724", 21 | "zh:bbb38e86d926ef101cefafe8fe090c57f2b1356eac9fc5ec81af310c50375897", 22 | "zh:d03c89988ba4a0bd3cfc8659f951183ae7027aa8018a7ca1e53a300944af59cb", 23 | "zh:d179ef28843fe663fc63169291a211898199009f0d3f63f0a6f65349e77727ec", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /standard-vpc-for-eks/terraform/0-locals.tf: -------------------------------------------------------------------------------- 1 | # Define commonly used values across the module 2 | locals { 3 | env = "dev" 4 | region = "us-east-1" 5 | zone1 = "us-east-1a" 6 | zone2 = "us-east-1b" 7 | eks_name = "eks_cluster" 8 | eks_version = "1.32" # EKS supported Kubernetes version 9 | } -------------------------------------------------------------------------------- /standard-vpc-for-eks/terraform/1-provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = local.region 3 | } 4 | 5 | terraform { 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = ">= 5.62" 10 | } 11 | } 12 | } -------------------------------------------------------------------------------- /standard-vpc-for-eks/terraform/2-vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "main" { 2 | # The IP range for the VPC in CIDR notation 3 | cidr_block = "10.0.0.0/16" 4 | 5 | # Enables DNS resolution via the Amazon-provided DNS server. 6 | # Must be true for EC2 instances and EKS pods to resolve domain names. 7 | enable_dns_support = true 8 | 9 | # Enables DNS hostnames for instances launched in the VPC. 10 | # Required for assigning internal DNS names (e.g., ip-10-0-0-1.ec2.internal). 11 | enable_dns_hostnames = true 12 | 13 | tags = { 14 | # Tag used to identify the VPC by name. This tag will be visible in AWS Console by default 15 | Name = "${local.env}-vpc" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /standard-vpc-for-eks/terraform/3-igw.tf: -------------------------------------------------------------------------------- 1 | resource "aws_internet_gateway" "igw" { 2 | # Associates the internet gateway with the specified VPC 3 | vpc_id = aws_vpc.main.id 4 | 5 | tags = { 6 | # Tag used to identify the internet gateway by environment 7 | Name = "${local.env}-igw" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /standard-vpc-for-eks/terraform/4-subnets.tf: -------------------------------------------------------------------------------- 1 | # Private subnet in zone1 2 | resource "aws_subnet" "private_zone1" { 3 | # Associate the subnet with the main VPC 4 | vpc_id = aws_vpc.main.id 5 | 6 | # Private subnet CIDR block (first /20 block in VPC) 7 | cidr_block = "10.0.0.0/20" 8 | availability_zone = local.zone1 9 | 10 | tags = { 11 | # Tag for identification 12 | Name = "${local.env}-private-${local.zone1}" 13 | 14 | # Used by Kubernetes to mark as internal load balancer subnet 15 | "kubernetes.io/role/internal-elb" = "1" 16 | 17 | # Used only by EKS service to recognize the subnet 18 | "kubernetes.io/cluster/${local.env}-${local.eks_name}" = "owned" 19 | } 20 | } 21 | 22 | # Private subnet in zone2 23 | resource "aws_subnet" "private_zone2" { 24 | vpc_id = aws_vpc.main.id 25 | 26 | # Next /20 block, non-overlapping with private_zone1 27 | cidr_block = "10.0.16.0/20" 28 | availability_zone = local.zone2 29 | 30 | tags = { 31 | Name = "${local.env}-private-${local.zone2}" 32 | "kubernetes.io/role/internal-elb" = "1" 33 | "kubernetes.io/cluster/${local.env}-${local.eks_name}" = "owned" 34 | } 35 | } 36 | 37 | # Public subnet in zone1 38 | resource "aws_subnet" "public_zone1" { 39 | vpc_id = aws_vpc.main.id 40 | 41 | # Public subnet CIDR block, after private ranges 42 | cidr_block = "10.0.32.0/20" 43 | availability_zone = local.zone1 44 | 45 | # Enables automatic public IP assignment for launched instances 46 | map_public_ip_on_launch = true 47 | 48 | tags = { 49 | Name = "${local.env}-public-${local.zone1}" 50 | 51 | # Tag for public load balancer use in Kubernetes 52 | "kubernetes.io/role/elb" = "1" 53 | 54 | # EKS cluster discovery 55 | "kubernetes.io/cluster/${local.env}-${local.eks_name}" = "owned" 56 | } 57 | } 58 | 59 | # Public subnet in zone2 60 | resource "aws_subnet" "public_zone2" { 61 | vpc_id = aws_vpc.main.id 62 | 63 | # Next /20 block for public zone2, non-overlapping 64 | cidr_block = "10.0.48.0/20" 65 | availability_zone = local.zone2 66 | 67 | map_public_ip_on_launch = true 68 | 69 | tags = { 70 | Name = "${local.env}-public-${local.zone2}" 71 | "kubernetes.io/role/elb" = "1" 72 | "kubernetes.io/cluster/${local.env}-${local.eks_name}" = "owned" 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /standard-vpc-for-eks/terraform/5-nat.tf: -------------------------------------------------------------------------------- 1 | # Allocate an Elastic IP to be used by the NAT Gateway 2 | resource "aws_eip" "nat_ip" { 3 | # Set the EIP domain to "vpc" for use in a VPC 4 | domain = "vpc" 5 | 6 | tags = { 7 | # Tag to identify the Elastic IP by environment 8 | Name = "${local.env}-nat-ip" 9 | } 10 | } 11 | 12 | # Create a NAT Gateway to allow internet access for instances in private subnets 13 | resource "aws_nat_gateway" "nat" { 14 | # Associate the NAT Gateway with the Elastic IP 15 | allocation_id = aws_eip.nat_ip.id 16 | 17 | # Deploy the NAT Gateway into a public subnet so it can route traffic to the internet 18 | subnet_id = aws_subnet.public_zone1.id 19 | 20 | tags = { 21 | # Tag to identify the NAT Gateway by environment 22 | Name = "${local.env}-nat" 23 | } 24 | 25 | # Ensure that the Internet Gateway is created before the NAT Gateway 26 | depends_on = [aws_internet_gateway.igw] 27 | } 28 | -------------------------------------------------------------------------------- /standard-vpc-for-eks/terraform/6-routes.tf: -------------------------------------------------------------------------------- 1 | # Route table for private subnets 2 | resource "aws_route_table" "private_rtb" { 3 | # Associate with the main VPC 4 | vpc_id = aws_vpc.main.id 5 | 6 | # Route all outbound traffic from private subnets through the NAT Gateway 7 | route { 8 | cidr_block = "0.0.0.0/0" 9 | nat_gateway_id = aws_nat_gateway.nat.id 10 | } 11 | 12 | tags = { 13 | # Tag to identify the private route table 14 | Name = "${local.env}-private-rtb" 15 | } 16 | } 17 | 18 | # Route table for public subnets 19 | resource "aws_route_table" "public_rtb" { 20 | # Associate with the main VPC 21 | vpc_id = aws_vpc.main.id 22 | 23 | # Route all outbound traffic from public subnets directly through the Internet Gateway 24 | route { 25 | cidr_block = "0.0.0.0/0" 26 | gateway_id = aws_internet_gateway.igw.id 27 | } 28 | 29 | tags = { 30 | # Tag to identify the public route table 31 | Name = "${local.env}-public-rtb" 32 | } 33 | } 34 | 35 | # Associate private subnet in zone1 with private route table 36 | resource "aws_route_table_association" "private_zone1" { 37 | subnet_id = aws_subnet.private_zone1.id 38 | route_table_id = aws_route_table.private_rtb.id 39 | } 40 | 41 | # Associate private subnet in zone2 with private route table 42 | resource "aws_route_table_association" "private_zone2" { 43 | subnet_id = aws_subnet.private_zone2.id 44 | route_table_id = aws_route_table.private_rtb.id 45 | } 46 | 47 | # Associate public subnet in zone1 with public route table 48 | resource "aws_route_table_association" "public_zone1" { 49 | subnet_id = aws_subnet.public_zone1.id 50 | route_table_id = aws_route_table.public_rtb.id 51 | } 52 | 53 | # Associate public subnet in zone2 with public route table 54 | resource "aws_route_table_association" "public_zone2" { 55 | subnet_id = aws_subnet.public_zone2.id 56 | route_table_id = aws_route_table.public_rtb.id 57 | } 58 | -------------------------------------------------------------------------------- /teamcity-on-aws/README.md: -------------------------------------------------------------------------------- 1 | # Deploy TeamCity server on AWS 2 | 3 | ![](../img/teamcity_logo.png) 4 | 5 | This template deploys TeamCity server and two separate machines with build agents installed. 6 | 7 | - By default the setup uses the `t3.medium` based EC2 instances with Ubuntu 20.04 Focal Fossa amd64 based OS. 8 | - Default region `us-east-1` 9 | 10 | > `t3.medium` type of EC2 instance is in free tier, however you may experience performance issues. If you need better performance, consider switching to paid options of `m5.large` or `c5.large` type of instances. 11 | 12 | For more information about AMI images available for Ubuntu in AWS region, see [Amazon EC2 AMI locator](https://cloud-images.ubuntu.com/locator/ec2/) 13 | 14 | Delay is added after provisioning of EC2 instance with TeamCity server in order to get the `user_data` script finish installation and fully provision the machine. For more information about delay provider in Terraform, see [time_sleep resource](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) 15 | 16 | ## SSH keys 17 | 18 | SSH keys are generated by Terraform code and the private key is saved locally into `generated-ssh-key.pem` file. 19 | 20 | ## TeamCity server installation 21 | 22 | TeamCity server installed via `user_data` script. For more information about installation, see "[Install TeamCity on Linux or MacOS](https://www.jetbrains.com/help/teamcity/install-teamcity-server-on-linux-or-macos.html)" 23 | 24 | ## Build agent installation and configuration 25 | 26 | Build agents are run on separate EC2 instance machines via Docker containers. For more information, see [Agent Docker Images](https://www.jetbrains.com/help/teamcity/agent-docker-images.html). 27 | 28 | For more information about agent installation option, see [Install TeamCity agent](https://www.jetbrains.com/help/teamcity/install-teamcity-agent.html) 29 | 30 | TeamCity agents can be connected to the TeamCity server via Agent Push option. For more information, see [Install via Agent Push](https://www.jetbrains.com/help/teamcity/install-teamcity-agent.html#Install+via+Agent+Push) 31 | 32 | For agent configuration, see [Start TeamCity Agent](https://www.jetbrains.com/help/teamcity/start-teamcity-agent.html) -------------------------------------------------------------------------------- /teamcity-on-aws/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 5.0" 6 | } 7 | 8 | time = { 9 | source = "hashicorp/time" 10 | version = "0.11.2" 11 | } 12 | 13 | } 14 | } 15 | 16 | provider "aws" { 17 | region = "us-east-1" # Change to your preferred region 18 | } 19 | 20 | resource "aws_instance" "teamcity_server" { 21 | ami = "ami-0c819f65440d5f1d1" # Ubuntu 20.04 amd64 us-east-1 22 | instance_type = "t3.medium" 23 | key_name = "terraform-generated-key" # Replace with your key pair name 24 | 25 | tags = { 26 | Name = "TeamCityServer" 27 | } 28 | 29 | user_data = <<-EOF 30 | #!/bin/bash 31 | adduser teamcity 32 | apt update && apt install wget -y 33 | cd /opt 34 | wget https://download.jetbrains.com/teamcity/TeamCity-2022.10.1.tar.gz 35 | tar xfz TeamCity-2022.10.1.tar.gz 36 | apt install java-common -y 37 | wget https://corretto.aws/downloads/latest/amazon-corretto-11-x64-linux-jdk.deb 38 | dpkg --install amazon-corretto-11-x64-linux-jdk.deb 39 | chown -R teamcity:teamcity TeamCity 40 | su teamcity 41 | TeamCity/bin/runAll.sh start 42 | EOF 43 | 44 | vpc_security_group_ids = [aws_security_group.teamcity_sg.id] 45 | } 46 | 47 | # Delay resource to wait for the TeamCity server to start 48 | resource "time_sleep" "wait_for_teamcity_server" { 49 | depends_on = [aws_instance.teamcity_server] 50 | 51 | create_duration = "5m" # Wait for 5 minutes 52 | } 53 | 54 | # TeamCity Build Agent 55 | resource "aws_instance" "teamcity_agent" { 56 | count = 2 # Number of build agents 57 | ami = "ami-0c819f65440d5f1d1" # Ubuntu 20.04 amd64 us-east-1 58 | instance_type = "t3.medium" 59 | key_name = "terraform-generated-key" # Replace with your key pair name 60 | 61 | tags = { 62 | Name = "TeamCityAgent" 63 | } 64 | 65 | user_data = <<-EOF 66 | #!/bin/bash 67 | apt-get update -y 68 | apt-get install -y docker.io 69 | systemctl start docker 70 | usermod -aG docker ubuntu 71 | docker run -d -e SERVER_URL="http://$(aws_instance.teamcity_server.public_ip):8111" \ 72 | -v /data/teamcity_agent/conf:/data/teamcity_agent/conf \ 73 | jetbrains/teamcity-agent 74 | EOF 75 | 76 | vpc_security_group_ids = [aws_security_group.teamcity_sg.id] 77 | } 78 | 79 | resource "aws_security_group" "teamcity_sg" { 80 | name = "teamcity_sg" 81 | description = "Allow SSH and TeamCity traffic" 82 | 83 | ingress { 84 | from_port = 22 85 | to_port = 22 86 | protocol = "tcp" 87 | cidr_blocks = ["/32"] 88 | } 89 | 90 | ingress { 91 | from_port = 8111 92 | to_port = 8111 93 | protocol = "tcp" 94 | cidr_blocks = ["0.0.0.0/0"] 95 | } 96 | 97 | egress { 98 | from_port = 0 99 | to_port = 0 100 | protocol = "-1" 101 | cidr_blocks = ["0.0.0.0/0"] 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /teamcity-on-aws/outputs.tf: -------------------------------------------------------------------------------- 1 | output "teamcity_server_public_ip" { 2 | description = "The public IP address of the TeamCity server" 3 | value = aws_instance.teamcity_server.public_ip 4 | } 5 | 6 | output "teamcity_agent_public_ips" { 7 | description = "The public IP addresses of the TeamCity build agents" 8 | value = aws_instance.teamcity_agent[*].public_ip 9 | } -------------------------------------------------------------------------------- /teamcity-on-aws/ssh_key.tf: -------------------------------------------------------------------------------- 1 | resource "tls_private_key" "example" { 2 | algorithm = "RSA" 3 | rsa_bits = 2048 4 | } 5 | 6 | resource "aws_key_pair" "generated_key" { 7 | key_name = "terraform-generated-key" 8 | public_key = tls_private_key.example.public_key_openssh 9 | } 10 | 11 | resource "local_file" "private_key_file" { 12 | content = tls_private_key.example.private_key_pem 13 | filename = "${path.module}/generated-ssh-key.pem" 14 | file_permission = "0600" 15 | } 16 | 17 | output "key_name" { 18 | value = aws_key_pair.generated_key.key_name 19 | } --------------------------------------------------------------------------------