├── LAB00-Terraform-Install-AWS-Configuration.md ├── LAB01-Terraform-Docker-Without-Cloud.md ├── LAB02-Resources-Basic-EC2.md ├── LAB03-Variables-Locals-Output-EC2.md ├── LAB04-Meta-Arguments-IAM-User-Group-Policy.md ├── LAB05-Dynamic-Blocks-Security-Groups-EC2.md ├── LAB06-Data-Sources-EC2.md ├── LAB07-Provisioners-Null-Resources.md ├── LAB08-Modules-EC2.md ├── LAB09-Workspaces-EC2.md ├── LAB10-Templates-User-Policy.md ├── LAB11-Backend-Remote-State.md ├── LICENSE ├── README.md ├── SAMPLE01-EC2-VPC-Ubuntu-Win-SSH-RDP.md ├── SAMPLE02-Lambda-API-Gateway-Python.md ├── SAMPLE03-EC2-EBS-EFS.md ├── SAMPLE04-ECR-ECS-ELB-VPC-ECS-Service.md ├── SAMPLE05-Lambda-Container-ApiGateway-FlaskApp.md ├── SAMPLE06-EKS-ManagedNodes-Blueprint.md ├── SAMPLE07-CodeCommit-Pipeline-Build-Deploy-Lambda.md ├── SAMPLE08-S3-CloudFront-Static-WebSite.md ├── SAMPLE09-GitlabServer-on-Premise-GitlabRunner-on-EC2.md ├── SAMPLE10-MLOps-SageMaker-GitHub-Codepipeline-CodeBuild-CodeDeploy.md ├── Terraform-Cheatsheet.md ├── labs ├── backend-remote-state │ └── main.tf ├── basic-resource-ec2-ubuntu │ └── main.tf ├── data-sources │ └── main.tf ├── dynamic-blocks │ └── main.tf ├── iamuser-metaargs-count-for-foreach-map │ ├── count │ │ └── main.tf │ ├── for_each │ │ └── main.tf │ └── map │ │ └── main.tf ├── modules │ ├── main.tf │ ├── module1 │ │ ├── main.tf │ │ └── variables.tf │ └── module2 │ │ ├── main.tf │ │ └── variables.tf ├── provisioners-nullresources │ ├── main.tf │ └── test-file.txt ├── template │ ├── main.tf │ └── policy.tftpl ├── terraform-docker-without-cloud │ └── main.tf ├── variables-locals-output │ ├── main.tf │ ├── terraform-dev.tfvars │ ├── terraform-prod.tfvars │ └── variables.tf └── workspace │ ├── main.tf │ ├── terraform-dev.tfvars │ ├── terraform-prod.tfvars │ └── variables.tf └── samples ├── codecommit-codepipeline-codebuild-codedeploy-lambda-container ├── lambda_bootstrap │ ├── lambda │ │ ├── Dockerfile │ │ ├── aws-lambda-url.py │ │ ├── docker-test.sh │ │ └── requirements.txt │ ├── main.tf │ ├── outputs.tf │ ├── providers.tf │ ├── terraform.tfvars │ └── variables.tf ├── main.tf ├── modules │ ├── codecommit │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf │ ├── codepipeline │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── roles.tf │ │ ├── templates │ │ │ ├── buildspec_build.yml │ │ │ └── buildspec_deploy.yml │ │ └── variables.tf │ └── ecr │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf ├── outputs.tf ├── providers.tf ├── terraform.tfvars └── variables.tf ├── ec2-ebs-efs ├── ebs.tf ├── efs.tf └── main.tf ├── ec2-vpc-ubuntu-win-ssh-rdp └── main.tf ├── ecr-ecs-elb-vpc-ecsservice-container ├── 1_vpc.tf ├── 2_ecs.tf ├── 3_elb.tf ├── 4_ecs_service.tf ├── ecr │ └── 0_ecr.tf └── flask-app │ ├── Dockerfile │ ├── README.md │ ├── app │ ├── app.py │ ├── hello.py │ ├── init_db.py │ ├── schema.sql │ ├── static │ │ └── css │ │ │ └── style.css │ └── templates │ │ ├── base.html │ │ ├── create.html │ │ ├── edit.html │ │ ├── index.html │ │ └── post.html │ └── requirements.txt ├── eks-managed-node-blueprint ├── README.md └── main.tf ├── gitlabserver-on-premise-runner-on-EC2 ├── docker-compose.yml ├── main.tf └── test-gitlab-runner │ ├── docker-windows │ └── Dockerfile │ ├── gitlab-ci.yml │ ├── requirements.txt │ ├── src │ ├── __init__.py │ └── main.py │ └── test │ ├── __init__.py │ └── test_main.py ├── lambda-container-apigateway-flaskapp ├── 1_lambda.tf ├── 2_api_gateway.tf ├── ecr │ └── 0_ecr.tf └── flask-app-serverless │ ├── Dockerfile │ ├── README.md │ ├── app │ ├── app.py │ ├── hello.py │ ├── init_db.py │ ├── schema.sql │ ├── static │ │ └── css │ │ │ └── style.css │ └── templates │ │ ├── base.html │ │ ├── create.html │ │ ├── edit.html │ │ ├── index.html │ │ └── post.html │ └── requirements.txt ├── lambda-role-policy-apigateway-python ├── api-gateway.tf ├── code │ └── main.py └── lambda.tf ├── mlops-sagemaker-github-codepipeline-codebuild-codedeploy ├── Notebooks │ ├── SageMaker_Customer_Churn_XGB_Pipeline.ipynb │ ├── SageMaker_Customer_Churn_XGB_end2end.ipynb │ ├── assume-role.json │ ├── preprocess.py │ └── test.csv ├── modelbuild_pipeline │ ├── README.md │ ├── pipelines │ │ ├── __init__.py │ │ ├── __version__.py │ │ ├── _utils.py │ │ ├── customer_churn │ │ │ ├── __init__.py │ │ │ ├── evaluate.py │ │ │ ├── pipeline.py │ │ │ └── preprocess.py │ │ ├── get_pipeline_definition.py │ │ └── run_pipeline.py │ ├── setup.cfg │ ├── setup.py │ ├── tests │ │ └── test_pipelines.py │ └── tox.ini ├── modeldeploy_pipeline │ ├── README.md │ ├── build.py │ ├── endpoint-config-template.yml │ ├── fix_model_permission.py │ ├── prod-config.json │ ├── setup.py │ ├── staging-config.json │ └── test │ │ ├── test.py │ │ ├── test_buildspec.yml │ │ ├── test_buildspec_singleaccount.yml │ │ └── test_singleaccount.py └── terraform │ ├── events.tf │ ├── iam_roles.tf │ ├── main.tf │ ├── modelbuild_buildspec.yml │ ├── modelbuild_ci_pipeline.tf │ ├── modelbuild_codebuild.tf │ ├── modelbuild_hooks.tf │ ├── modeldeploy_buildspec.yml │ ├── modeldeploy_cd_pipeline.tf │ ├── modeldeploy_codebuild.tf │ ├── modeldeploy_hooks.tf │ ├── modeldeploy_testbuild.tf │ ├── s3.tf │ ├── terraform.tfvars │ └── variables.tf └── s3-cloudfront-static-website ├── cloudfront.tf ├── s3.tf └── website ├── assets ├── favicon.ico └── img │ ├── avataaars.svg │ └── portfolio │ ├── cabin.png │ ├── cake.png │ ├── circus.png │ ├── game.png │ ├── safe.png │ └── submarine.png ├── css └── styles.css ├── error.html ├── index.html └── js └── scripts.js /LAB00-Terraform-Install-AWS-Configuration.md: -------------------------------------------------------------------------------- 1 | ## LAB: Terraform Install, AWS Configuration with Terraform 2 | 3 | This scenario shows: 4 | - how to configure your Terraform with AWS 5 | 6 | ## Steps 7 | 8 | - Install Terraform: 9 | - https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli 10 | 11 | - For Windows: 12 | 13 | ``` 14 | choco install terraform 15 | ``` 16 | 17 | - Then, add Terraform app into the Environment Variables. 18 | 19 | ![image](https://user-images.githubusercontent.com/10358317/226994354-ef99ce99-c9b7-480e-ad09-36b88c6fe841.png) 20 | 21 | - Download AWS CLI: 22 | - https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html 23 | 24 | - For Windows: 25 | 26 | ``` 27 | msiexec.exe /i https://awscli.amazonaws.com/AWSCLIV2.msi 28 | ``` 29 | 30 | ![image](https://user-images.githubusercontent.com/10358317/226995232-d88e1533-2aa0-4d6c-b201-5ab1c58d389f.png) 31 | 32 | 33 | - Create AWS Root Account: 34 | - https://repost.aws/knowledge-center/create-and-activate-aws-account 35 | 36 | - Create IAM Admin User: 37 | 38 | ![image](https://user-images.githubusercontent.com/10358317/226996766-678ae1af-1161-4d8a-9b49-4bb3915b1ba5.png) 39 | 40 | 41 | - Create AWS Access Keys. 42 | 43 | - Access keys consist of two parts: 44 | - an access key ID (for example, AKIAIOSFODNN7EXAMPLE), 45 | - a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). 46 | 47 | - You must use both the access key ID and secret access key together to authenticate your requests. 48 | 49 | ![image](https://user-images.githubusercontent.com/10358317/226998180-cd80ae08-a05c-479b-baad-fae9c2f094df.png) 50 | 51 | - Configure AWS with AWS CLI (use command: aws configure): 52 | 53 | ``` 54 | $ aws configure 55 | AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE 56 | AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 57 | Default region name [None]: eu-central-1 58 | Default output format [None]: json 59 | ``` 60 | 61 | - After command, AWS creates: 62 | - Credentials file => C:\Users\username\.aws\credentials 63 | - Config file => C:\Users\username\.aws\config 64 | 65 | ``` 66 | # credentials file 67 | [default] 68 | aws_access_key_id = AKIAIOSFODNN7EXAMPLE 69 | aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 70 | ``` 71 | 72 | ``` 73 | # config file 74 | [default] 75 | region = eu-central-1 76 | output = json 77 | ``` 78 | 79 | - Now, it is your ready to run Terraform! 80 | 81 | ## Reference 82 | - Terraform Install: https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli 83 | - AWS CLI Install: https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html 84 | - AWS Access Keys: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html 85 | - AWS CLI Configuration: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html 86 | -------------------------------------------------------------------------------- /LAB01-Terraform-Docker-Without-Cloud.md: -------------------------------------------------------------------------------- 1 | ## LAB-01: Terraform Docker => Pull Docker Image, Create Docker Container on Local Machine 2 | 3 | This scenario shows: 4 | - how to use Terraform to manage Docker commands (image pull, container create, etc.) 5 | - without using any cloud, with Terraform Docker module, learning Terraform and making more practice could be easier. 6 | 7 | 8 | **Code:** https://github.com/omerbsezer/Fast-Terraform/blob/main/labs/terraform-docker-without-cloud/main.tf 9 | 10 | ### Prerequisite 11 | 12 | - You should have a look following lab: 13 | - [LAB-00: Terraform Install, AWS Configuration with Terraform](https://github.com/omerbsezer/Fast-Terraform/blob/main/LAB00-Terraform-Install-AWS-Configuration.md) 14 | - Install Docker on your system. 15 | - Ubuntu: https://docs.docker.com/engine/install/ubuntu/ 16 | - Windows: https://docs.docker.com/desktop/install/windows-install/ 17 | - Mac: https://docs.docker.com/desktop/install/mac-install/ 18 | 19 | ## Steps 20 | 21 | - Create main.tf and copy the code: 22 | 23 | ``` 24 | # main.tf 25 | terraform { 26 | required_providers { 27 | docker = { 28 | source = "kreuzwerker/docker" 29 | version = "~> 3.0.2" 30 | } 31 | } 32 | } 33 | 34 | provider "docker" { 35 | host = "npipe:////.//pipe//docker_engine" 36 | } 37 | 38 | resource "docker_image" "windows" { 39 | name = "mcr.microsoft.com/powershell:lts-windowsservercore-1809" 40 | keep_locally = true 41 | } 42 | 43 | # docker container run -p 80:8000 --name=tutorial -it mcr.microsoft.com/powershell:lts-windowsservercore-1809 powershell 44 | resource "docker_container" "windows" { 45 | image = docker_image.windows.image_id 46 | name = "tutorial" 47 | 48 | stdin_open = true # docker run -i 49 | tty = true # docker run -t 50 | 51 | entrypoint = ["powershell"] 52 | 53 | ports { 54 | internal = 80 55 | external = 8000 56 | } 57 | } 58 | ``` 59 | **Code:** https://github.com/omerbsezer/Fast-Terraform/blob/main/labs/terraform-docker-without-cloud/main.tf 60 | 61 | ![image](https://user-images.githubusercontent.com/10358317/227287393-09ff08a1-9db2-4fc5-98e8-1a20c2bdf9be.png) 62 | 63 | - Run init command: 64 | 65 | ``` 66 | terraform init 67 | ``` 68 | 69 | ![image](https://user-images.githubusercontent.com/10358317/227279233-74013a80-0a71-4c0c-84b7-e9cec6c9d30f.png) 70 | 71 | - Validate file: 72 | 73 | ``` 74 | terraform validate 75 | ``` 76 | 77 | - Run plan command: 78 | 79 | ``` 80 | terraform plan 81 | ``` 82 | 83 | ![image](https://user-images.githubusercontent.com/10358317/227279536-a2f72789-36f6-4ee1-82df-a0bed834d34d.png) 84 | 85 | - Run apply command to create resources. Then, Terraform asks to confirm, write "yes": 86 | 87 | ``` 88 | terraform apply 89 | ``` 90 | 91 | ![image](https://user-images.githubusercontent.com/10358317/227281131-7463a9dc-1f61-4f48-a906-410725c0af19.png) 92 | 93 | - With "docker container ls -a", running container is viewed: 94 | 95 | ![image](https://user-images.githubusercontent.com/10358317/227280862-04483fb7-530d-4a75-ad22-21e8a5cbf49b.png) 96 | 97 | - Run following command to connect container powershell: 98 | 99 | ``` 100 | docker container exec -it tutorial powershell 101 | ``` 102 | 103 | - Now, we are in the container, to prove it, we are looking at the users in the container (ContainerAdministrator, ContainerUser) 104 | 105 | ![image](https://user-images.githubusercontent.com/10358317/227282456-4452cbe2-611c-491a-a9f7-f6bbaab28d69.png) 106 | 107 | - Before Terraform runs the container, it pulls the image: 108 | 109 | ![image](https://user-images.githubusercontent.com/10358317/227283686-a6b2ee63-8c01-4610-84c0-3f5d5f622166.png) 110 | 111 | - When "keep_locally = true" in image part, image will be kept after terraform destroy. 112 | 113 | ``` 114 | terraform destroy 115 | ``` 116 | 117 | ![image](https://user-images.githubusercontent.com/10358317/227284301-9fa06ebe-faa5-47aa-ac52-234bf6ca2c4e.png) 118 | 119 | - After destroy command, container is deleted, but image is still kept 120 | 121 | ![image](https://user-images.githubusercontent.com/10358317/227285010-450e8cc2-b3e8-4dd5-9272-65d9f69bfd18.png) 122 | 123 | - With Terraform, we can manage docker images, containers.. 124 | - More information: https://registry.terraform.io/providers/kreuzwerker/docker/latest/docs/resources/container 125 | 126 | 127 | -------------------------------------------------------------------------------- /LAB06-Data-Sources-EC2.md: -------------------------------------------------------------------------------- 1 | ## LAB-06: Data Sources with Depends_on => Provision EC2 2 | 3 | This scenario shows: 4 | - how to use Data Source to fetch/retrieve data (existed resource information) from AWS 5 | 6 | **Code:** https://github.com/omerbsezer/Fast-Terraform/blob/main/labs/data-sources/main.tf 7 | 8 | ### Prerequisite 9 | 10 | - You should have a look following lab: 11 | - [LAB-00: Terraform Install, AWS Configuration with Terraform](https://github.com/omerbsezer/Fast-Terraform/blob/main/LAB00-Terraform-Install-AWS-Configuration.md) 12 | 13 | ## Steps 14 | 15 | - With data sources, existed resource information can be fetched/retrieved. 16 | - "filter" provide to select/filter the existed instances 17 | - "depends_on" provide to run the data block after resource created 18 | 19 | ``` 20 | ... 21 | data "aws_instance" "data_instance" { 22 | filter { 23 | name = "tag:Name" 24 | values = ["Basic Instance"] 25 | } 26 | 27 | depends_on = [ 28 | aws_instance.instance 29 | ] 30 | } 31 | 32 | output "instance_info" { 33 | value = data.aws_instance.data_instance 34 | } 35 | ... 36 | ``` 37 | 38 | ![image](https://user-images.githubusercontent.com/10358317/229291040-febb8404-4c00-48c3-b99d-ea0af0e68825.png) 39 | 40 | - Create main.tf: 41 | 42 | ``` 43 | terraform { 44 | required_providers { 45 | aws = { 46 | source = "hashicorp/aws" 47 | version = "~> 4.16" 48 | } 49 | } 50 | 51 | required_version = ">= 1.2.0" 52 | } 53 | 54 | provider "aws" { 55 | region = "eu-central-1" 56 | } 57 | 58 | resource "aws_instance" "instance" { 59 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 60 | instance_type = "t2.nano" 61 | 62 | tags = { 63 | Name = "Basic Instance" 64 | } 65 | } 66 | 67 | # filter/select the existed instances 68 | # depends_on if aws_instance.instance is created 69 | 70 | data "aws_instance" "data_instance" { 71 | filter { 72 | name = "tag:Name" 73 | values = ["Basic Instance"] 74 | } 75 | 76 | depends_on = [ 77 | aws_instance.instance 78 | ] 79 | } 80 | 81 | output "instance_info" { 82 | value = data.aws_instance.data_instance 83 | } 84 | 85 | output "instance_public_ip" { 86 | value = data.aws_instance.data_instance.public_ip 87 | } 88 | ``` 89 | 90 | **Code:** https://github.com/omerbsezer/Fast-Terraform/blob/main/labs/data-sources/main.tf 91 | 92 | ![image](https://user-images.githubusercontent.com/10358317/229291093-e5febd7a-fa05-44bc-a224-00a18035b869.png) 93 | 94 | - Run init, validate command: 95 | 96 | ``` 97 | terraform init 98 | terraform validate 99 | ``` 100 | 101 | - Run plan, apply command: 102 | 103 | ``` 104 | terraform plan 105 | terraform apply 106 | ``` 107 | 108 | ![image](https://user-images.githubusercontent.com/10358317/229291488-831a796e-b77a-43ee-92a1-814630834907.png) 109 | 110 | ![image](https://user-images.githubusercontent.com/10358317/229291530-985497f5-87a4-41d8-8fec-3f6217d62e6d.png) 111 | 112 | - With output, details can be viewed: 113 | 114 | ![image](https://user-images.githubusercontent.com/10358317/229291636-963cfbf8-4735-4d62-bae4-c803f70a1775.png) 115 | 116 | ![image](https://user-images.githubusercontent.com/10358317/229291821-28dea44f-04cf-42ef-b436-cbdfc77bd294.png) 117 | 118 | - Destroy infrastructure: 119 | 120 | ``` 121 | terraform destroy 122 | ``` 123 | 124 | ![image](https://user-images.githubusercontent.com/10358317/229291943-7a61e1c5-743f-4508-928f-04d738c2bb5a.png) 125 | 126 | ![image](https://user-images.githubusercontent.com/10358317/229291973-1d789992-8a90-4709-8da1-2db5b3b79b46.png) 127 | 128 | 129 | -------------------------------------------------------------------------------- /LAB11-Backend-Remote-State.md: -------------------------------------------------------------------------------- 1 | ## LAB-11: Backend - Remote States => Provision EC2 and Save State File on S3 2 | 3 | This scenario shows: 4 | - how to use backend and save Terraform state file on S3 5 | 6 | **Code:** https://github.com/omerbsezer/Fast-Terraform/blob/main/labs/backend-remote-state/ 7 | 8 | ### Prerequisite 9 | 10 | - You should have a look following lab: 11 | - [LAB-00: Terraform Install, AWS Configuration with Terraform](https://github.com/omerbsezer/Fast-Terraform/blob/main/LAB00-Terraform-Install-AWS-Configuration.md) 12 | 13 | ## Steps 14 | 15 | - With enabling remote state file using backend: 16 | - multiple user can work on the same state file 17 | - saving common state file on S3 is possible 18 | 19 | - Create S3 bucket on AWS 20 | 21 | ![image](https://user-images.githubusercontent.com/10358317/230646169-7b9a7210-bd64-4f50-acf7-12690d293490.png) 22 | 23 | ![image](https://user-images.githubusercontent.com/10358317/230646417-8c460ba7-e45b-4560-8859-d1ebfcac4812.png) 24 | 25 | 26 | - Create basic main.tf file. 27 | 28 | 29 | ``` 30 | # main.tf 31 | terraform { 32 | required_providers { 33 | aws = { 34 | source = "hashicorp/aws" 35 | version = "~> 4.16" 36 | } 37 | } 38 | 39 | required_version = ">= 1.2.0" 40 | 41 | backend "s3" { 42 | bucket = "terraform-state" 43 | key = "key/terraform.tfstate" 44 | region = "eu-central-1" 45 | } 46 | } 47 | 48 | provider "aws" { 49 | region = "eu-central-1" 50 | } 51 | 52 | resource "aws_instance" "instance" { 53 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 54 | instance_type = "t2.nano" 55 | 56 | tags = { 57 | Name = "Basic Instance" 58 | } 59 | } 60 | ``` 61 | 62 | **Code:** https://github.com/omerbsezer/Fast-Terraform/blob/main/labs/backend-remote-state/main.tf 63 | 64 | ![image](https://user-images.githubusercontent.com/10358317/230646618-f1200c13-eb83-4bcd-b353-a6c9a02272bd.png) 65 | 66 | 67 | - Run init, validate command: 68 | 69 | ``` 70 | terraform init 71 | terraform validate 72 | ``` 73 | 74 | ![image](https://user-images.githubusercontent.com/10358317/230646686-0b8ad133-d2e4-4ebf-8505-00eb769b1e5a.png) 75 | 76 | - Run plan, apply command: 77 | 78 | ``` 79 | terraform plan # for dry-run 80 | terraform apply 81 | ``` 82 | 83 | - On AWS S3, tfstate file is created: 84 | 85 | ![image](https://user-images.githubusercontent.com/10358317/230647235-3224ec77-2483-460c-81c6-40e9e434f869.png) 86 | 87 | - On local machine, state file is not saved now: 88 | 89 | ![image](https://user-images.githubusercontent.com/10358317/230647627-b5038f51-b34f-443c-b72a-d9c140d3d770.png) 90 | 91 | - On AWS, state file can be viewed, downloaded: 92 | 93 | ![image](https://user-images.githubusercontent.com/10358317/230647970-97d72d67-a588-40ff-a94f-3ea765dfe274.png) 94 | 95 | - With pull command, state file can be download on local machine: 96 | 97 | ``` 98 | terraform state pull > terraform.tfstate 99 | ``` 100 | 101 | ![image](https://user-images.githubusercontent.com/10358317/230648330-d89d0b53-617b-4449-a2cb-b92b163cbfdd.png) 102 | 103 | 104 | - Run destroy command: 105 | 106 | ``` 107 | terraform destroy 108 | ``` 109 | 110 | - After destroy command, all resources are deleted in state file on S3: 111 | 112 | ![image](https://user-images.githubusercontent.com/10358317/230649315-7cb1d236-145b-49ed-ad3f-60aaa01d7ca0.png) 113 | 114 | - To download updated state file: 115 | 116 | ``` 117 | terraform state pull > terraform.tfstate 118 | ``` 119 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Ömer Berat Sezer 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /labs/backend-remote-state/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | 11 | backend "s3" { 12 | bucket = "terraform-state" 13 | key = "key/terraform.tfstate" 14 | region = "eu-central-1" 15 | } 16 | } 17 | 18 | provider "aws" { 19 | region = "eu-central-1" 20 | } 21 | 22 | resource "aws_instance" "instance" { 23 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 24 | instance_type = "t2.nano" 25 | 26 | tags = { 27 | Name = "Basic Instance" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /labs/basic-resource-ec2-ubuntu/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | provider "aws" { 13 | region = "eu-central-1" 14 | } 15 | 16 | resource "aws_instance" "instance" { 17 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 18 | instance_type = "t2.nano" 19 | 20 | tags = { 21 | Name = "Basic Instance" 22 | } 23 | } -------------------------------------------------------------------------------- /labs/data-sources/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | provider "aws" { 13 | region = "eu-central-1" 14 | } 15 | 16 | resource "aws_instance" "instance" { 17 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 18 | instance_type = "t2.nano" 19 | 20 | tags = { 21 | Name = "Basic Instance" 22 | } 23 | } 24 | 25 | # with data source, new resource is not created. 26 | # data source provides to fetch (read) or retrieve the data from AWS 27 | # filter/select the existed instances 28 | # depends_on if aws_instance.instance is created 29 | 30 | data "aws_instance" "data_instance" { 31 | filter { 32 | name = "tag:Name" 33 | values = ["Basic Instance"] 34 | } 35 | 36 | depends_on = [ 37 | aws_instance.instance 38 | ] 39 | } 40 | 41 | output "instance_info" { 42 | value = data.aws_instance.data_instance 43 | } 44 | 45 | output "instance_public_ip" { 46 | value = data.aws_instance.data_instance.public_ip 47 | } 48 | -------------------------------------------------------------------------------- /labs/dynamic-blocks/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | provider "aws" { 13 | region = "eu-central-1" 14 | } 15 | 16 | resource "aws_vpc" "my_vpc" { 17 | cidr_block = "10.0.0.0/16" 18 | enable_dns_hostnames = true 19 | tags = { 20 | Name = "My VPC" 21 | } 22 | } 23 | 24 | resource "aws_subnet" "public" { 25 | vpc_id = aws_vpc.my_vpc.id 26 | cidr_block = "10.0.0.0/24" 27 | availability_zone = "eu-central-1c" 28 | tags = { 29 | Name = "Public Subnet" 30 | } 31 | } 32 | 33 | resource "aws_internet_gateway" "my_vpc_igw" { 34 | vpc_id = aws_vpc.my_vpc.id 35 | tags = { 36 | Name = "My VPC - Internet Gateway" 37 | } 38 | } 39 | 40 | resource "aws_route_table" "my_vpc_eu_central_1c_public" { 41 | vpc_id = aws_vpc.my_vpc.id 42 | route { 43 | cidr_block = "0.0.0.0/0" 44 | gateway_id = aws_internet_gateway.my_vpc_igw.id 45 | } 46 | tags = { 47 | Name = "Public Subnet Route Table" 48 | } 49 | } 50 | resource "aws_route_table_association" "my_vpc_eu_central_1c_public" { 51 | subnet_id = aws_subnet.public.id 52 | route_table_id = aws_route_table.my_vpc_eu_central_1c_public.id 53 | } 54 | 55 | locals { 56 | ingress_rules = [{ 57 | port = 22 58 | description = "Ingress rules for port SSH" 59 | }, 60 | { 61 | port = 80 62 | description = "Ingress rules for port HTTP" 63 | }, 64 | { 65 | port = 443 66 | description = "Ingress rules for port HTTPS" 67 | }] 68 | } 69 | 70 | resource "aws_security_group" "main" { 71 | name = "resource_with_dynamic_block" 72 | description = "Allow SSH inbound connections" 73 | vpc_id = aws_vpc.my_vpc.id # todo: update it with data.aws_vpc.main.id 74 | 75 | dynamic "ingress" { 76 | for_each = local.ingress_rules 77 | 78 | content { 79 | description = ingress.value.description 80 | from_port = ingress.value.port 81 | to_port = ingress.value.port 82 | protocol = "tcp" 83 | cidr_blocks = ["0.0.0.0/0"] 84 | } 85 | } 86 | 87 | egress { 88 | from_port = 0 89 | to_port = 0 90 | protocol = "-1" 91 | cidr_blocks = ["0.0.0.0/0"] 92 | } 93 | 94 | tags = { 95 | Name = "AWS security group dynamic block" 96 | } 97 | } 98 | 99 | resource "aws_instance" "ubuntu2204" { 100 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 101 | instance_type = "t2.nano" 102 | key_name = "testkey" 103 | vpc_security_group_ids = [aws_security_group.main.id] 104 | subnet_id = aws_subnet.public.id 105 | associate_public_ip_address = true 106 | tags = { 107 | Name = "Ubuntu 22.04" 108 | } 109 | } 110 | 111 | output "instance_ubuntu2204_public_ip" { 112 | value = "${aws_instance.ubuntu2204.public_ip}" 113 | } 114 | 115 | 116 | -------------------------------------------------------------------------------- /labs/iamuser-metaargs-count-for-foreach-map/count/main.tf: -------------------------------------------------------------------------------- 1 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role 2 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy_attachment 3 | # IAM users - roles - permissions 4 | # User -> User Group -> Policy (Permission) 5 | # AWS Services -> Roles -> Policy (Permission) 6 | terraform { 7 | required_providers { 8 | aws = { 9 | source = "hashicorp/aws" 10 | version = "~> 4.16" 11 | } 12 | } 13 | 14 | required_version = ">= 1.2.0" 15 | } 16 | ##################################################### 17 | # User - User Group Attachment (With Index Count) 18 | resource "aws_iam_user_group_membership" "user1_group_attach" { 19 | user = aws_iam_user.user_example[0].name 20 | 21 | groups = [ 22 | aws_iam_group.admin_group.name, 23 | aws_iam_group.dev_group.name, 24 | ] 25 | } 26 | 27 | resource "aws_iam_user_group_membership" "user2_group_attach" { 28 | user = aws_iam_user.user_example[1].id 29 | 30 | groups = [ 31 | aws_iam_group.admin_group.name 32 | ] 33 | } 34 | 35 | resource "aws_iam_user_group_membership" "user3_group_attach" { 36 | user = aws_iam_user.user_example[2].name 37 | 38 | groups = [ 39 | aws_iam_group.dev_group.name 40 | ] 41 | } 42 | ##################################################### 43 | # User Group Definition 44 | resource "aws_iam_group" "admin_group" { 45 | name = "admin_group" 46 | } 47 | 48 | resource "aws_iam_group" "dev_group" { 49 | name = "dev_group" 50 | } 51 | ##################################################### 52 | # Policy Definition, Policy-Group Attachment 53 | data "aws_iam_policy_document" "admin_policy" { 54 | statement { 55 | effect = "Allow" 56 | actions = ["*"] 57 | resources = ["*"] 58 | } 59 | } 60 | resource "aws_iam_policy" "admin_policy" { 61 | name = "admin-policy" 62 | description = "Admin policy" 63 | policy = data.aws_iam_policy_document.admin_policy.json 64 | } 65 | 66 | data "aws_iam_policy_document" "ec2_policy" { 67 | statement { 68 | effect = "Allow" 69 | actions = ["ec2:Describe*"] 70 | resources = ["*"] 71 | } 72 | } 73 | 74 | resource "aws_iam_policy" "ec2_policy" { 75 | name = "ec2-policy" 76 | description = "EC2 policy" 77 | policy = data.aws_iam_policy_document.ec2_policy.json 78 | } 79 | 80 | ##################################################### 81 | # Policy Attachment to the Admin, Dev Group 82 | resource "aws_iam_group_policy_attachment" "admin_group_admin_policy_attach" { 83 | group = aws_iam_group.admin_group.name 84 | policy_arn = aws_iam_policy.admin_policy.arn 85 | } 86 | 87 | resource "aws_iam_group_policy_attachment" "dev_group_ec2_policy_attach" { 88 | group = aws_iam_group.dev_group.name 89 | policy_arn = aws_iam_policy.ec2_policy.arn 90 | } 91 | 92 | ##################################################### 93 | # Username Definition 94 | # With Count 95 | resource "aws_iam_user" "user_example" { 96 | count = length(var.user_names) 97 | name = var.user_names[count.index] 98 | } 99 | # count, use list 100 | variable "user_names" { 101 | description = "IAM usernames" 102 | type = list(string) 103 | default = ["username1_admin_dev", "username2_admin", "username3_dev_ec2"] 104 | } 105 | ##################################################### 106 | # With for loop 107 | output "print_the_names" { 108 | value = [for name in var.user_names : name] 109 | } 110 | -------------------------------------------------------------------------------- /labs/iamuser-metaargs-count-for-foreach-map/for_each/main.tf: -------------------------------------------------------------------------------- 1 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role 2 | # https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy_attachment 3 | # IAM users - roles - permissions 4 | # User -> User Group -> Policy (Permission) 5 | terraform { 6 | required_providers { 7 | aws = { 8 | source = "hashicorp/aws" 9 | version = "~> 4.16" 10 | } 11 | } 12 | 13 | required_version = ">= 1.2.0" 14 | } 15 | 16 | ##################################################### 17 | # User - User Group Attachment (With Index Count) 18 | resource "aws_iam_user_group_membership" "user1_group_attach" { 19 | user = aws_iam_user.user_example["username1_admin_dev"].name 20 | 21 | groups = [ 22 | aws_iam_group.admin_group.name, 23 | aws_iam_group.dev_group.name, 24 | ] 25 | } 26 | 27 | resource "aws_iam_user_group_membership" "user2_group_attach" { 28 | user = aws_iam_user.user_example["username2_admin"].id 29 | 30 | groups = [ 31 | aws_iam_group.admin_group.name 32 | ] 33 | } 34 | 35 | resource "aws_iam_user_group_membership" "user3_group_attach" { 36 | user = aws_iam_user.user_example["username3_dev_s3"].name 37 | 38 | groups = [ 39 | aws_iam_group.dev_group.name 40 | ] 41 | } 42 | ##################################################### 43 | # User Group Definition 44 | resource "aws_iam_group" "admin_group" { 45 | name = "admin_group" 46 | } 47 | 48 | resource "aws_iam_group" "dev_group" { 49 | name = "dev_group" 50 | } 51 | ##################################################### 52 | # Policy Definition, Policy-Group Attachment 53 | data "aws_iam_policy_document" "admin_policy" { 54 | statement { 55 | effect = "Allow" 56 | actions = ["*"] 57 | resources = ["*"] 58 | } 59 | } 60 | resource "aws_iam_policy" "admin_policy" { 61 | name = "admin-policy" 62 | description = "Admin policy" 63 | policy = data.aws_iam_policy_document.admin_policy.json 64 | } 65 | 66 | data "aws_iam_policy_document" "s3_policy" { 67 | statement { 68 | effect = "Allow" 69 | actions = ["s3:*"] 70 | resources = [ 71 | "arn:aws:s3:::mybucket", 72 | "arn:aws:s3:::mybucket/*" 73 | ] 74 | } 75 | } 76 | 77 | resource "aws_iam_policy" "s3_policy" { 78 | name = "s3-policy" 79 | description = "S3 policy" 80 | policy = data.aws_iam_policy_document.s3_policy.json 81 | } 82 | 83 | ##################################################### 84 | # Policy Attachment to the Admin, Dev Group 85 | resource "aws_iam_group_policy_attachment" "admin_group_admin_policy_attach" { 86 | group = aws_iam_group.admin_group.name 87 | policy_arn = aws_iam_policy.admin_policy.arn 88 | } 89 | 90 | resource "aws_iam_group_policy_attachment" "dev_group_s3_policy_attach" { 91 | group = aws_iam_group.dev_group.name 92 | policy_arn = aws_iam_policy.s3_policy.arn 93 | } 94 | ##################################################### 95 | # With for_each 96 | resource "aws_iam_user" "user_example" { 97 | for_each = var.user_names 98 | name = each.value 99 | } 100 | # for each, use set instead of list 101 | variable "user_names" { 102 | description = "IAM usernames" 103 | type = set(string) 104 | default = ["username1_admin_dev", "username2_admin", "username3_dev_s3"] 105 | } 106 | ##################################################### 107 | # With for loop 108 | output "print_the_names" { 109 | value = [for name in var.user_names : name] 110 | } 111 | -------------------------------------------------------------------------------- /labs/iamuser-metaargs-count-for-foreach-map/map/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | ##################################################### 13 | # With for_each 14 | resource "aws_iam_user" "example" { 15 | for_each = var.user_names 16 | name = each.value 17 | } 18 | # With Map 19 | variable "user_names" { 20 | description = "map" 21 | type = map(string) 22 | default = { 23 | user1 = "username1" 24 | user2 = "username2" 25 | user3 = "username3" 26 | } 27 | } 28 | # with for loop on map 29 | output "user_with_roles" { 30 | value = [for name, role in var.user_names : "${name} is the ${role}"] 31 | } 32 | -------------------------------------------------------------------------------- /labs/modules/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | 13 | module "webserver-1" { 14 | source = ".//module1" 15 | 16 | instance_type = "t2.nano" 17 | tag = "Webserver1 - Module1 - 20.04" 18 | location = "eu-central-1" 19 | availability_zone = "eu-central-1c" 20 | ami = "ami-0e067cc8a2b58de59" # Ubuntu 20.04 eu-central-1 Frankfurt 21 | 22 | } 23 | 24 | module "webserver-2" { 25 | source = ".//module2" 26 | 27 | instance_type = "t2.micro" 28 | tag = "Webserver2 - Module2 - 22.04" 29 | location = "eu-central-1" 30 | availability_zone = "eu-central-1a" 31 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 32 | } -------------------------------------------------------------------------------- /labs/modules/module1/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | provider "aws" { 13 | region = var.location 14 | } 15 | 16 | locals { 17 | staging_env = "module1" 18 | } 19 | 20 | resource "aws_vpc" "my_vpc" { 21 | cidr_block = "10.0.0.0/16" 22 | enable_dns_hostnames = true 23 | tags = { 24 | Name = "${local.staging_env}-vpc-tag" 25 | } 26 | } 27 | 28 | resource "aws_subnet" "public" { 29 | vpc_id = aws_vpc.my_vpc.id 30 | cidr_block = "10.0.0.0/24" 31 | availability_zone = var.availability_zone 32 | tags = { 33 | Name = "${local.staging_env}-subnet-tag" 34 | } 35 | } 36 | 37 | resource "aws_internet_gateway" "igw" { 38 | vpc_id = aws_vpc.my_vpc.id 39 | tags = { 40 | Name = "${local.staging_env}-Internet Gateway" 41 | } 42 | } 43 | 44 | resource "aws_route_table" "rt" { 45 | vpc_id = aws_vpc.my_vpc.id 46 | route { 47 | cidr_block = "0.0.0.0/0" 48 | gateway_id = aws_internet_gateway.igw.id 49 | } 50 | tags = { 51 | Name = "${local.staging_env}- Public Subnet Route Table" 52 | } 53 | } 54 | resource "aws_route_table_association" "rta" { 55 | subnet_id = aws_subnet.public.id 56 | route_table_id = aws_route_table.rt.id 57 | } 58 | 59 | resource "aws_security_group" "ssg" { 60 | name = "module1_security_group" # name should be different on modules 61 | description = "Allow SSH inbound connections" 62 | vpc_id = aws_vpc.my_vpc.id 63 | # for SSH 64 | ingress { 65 | from_port = 22 66 | to_port = 22 67 | protocol = "tcp" 68 | cidr_blocks = ["0.0.0.0/0"] 69 | } 70 | # for HTTP Apache Server 71 | ingress { 72 | from_port = 80 73 | to_port = 80 74 | protocol = "tcp" 75 | cidr_blocks = ["0.0.0.0/0"] 76 | } 77 | # for HTTPS Apache Server 78 | ingress { 79 | from_port = 443 80 | to_port = 443 81 | protocol = "tcp" 82 | cidr_blocks = ["0.0.0.0/0"] 83 | } 84 | egress { 85 | from_port = 0 86 | to_port = 0 87 | protocol = "-1" 88 | cidr_blocks = ["0.0.0.0/0"] 89 | } 90 | tags = { 91 | Name = "allow_ssh_sg" 92 | } 93 | } 94 | 95 | resource "aws_instance" "ec2" { 96 | ami = var.ami 97 | instance_type = var.instance_type 98 | subnet_id = aws_subnet.public.id 99 | associate_public_ip_address = true 100 | vpc_security_group_ids = [aws_security_group.ssg.id] 101 | user_data = <<-EOF 102 | #! /bin/bash 103 | sudo apt-get update 104 | sudo apt-get install -y apache2 105 | sudo systemctl start apache2 106 | sudo systemctl enable apache2 107 | echo "

!! MODULE-1 !!: Deployed via Terraform from $(hostname -f)

" | sudo tee /var/www/html/index.html 108 | EOF 109 | tags = { 110 | Name = var.tag 111 | } 112 | 113 | } 114 | 115 | # output single values 116 | output "public_ip" { 117 | value = aws_instance.ec2.public_ip 118 | } 119 | 120 | -------------------------------------------------------------------------------- /labs/modules/module1/variables.tf: -------------------------------------------------------------------------------- 1 | variable "instance_type" { 2 | type = string 3 | description = "EC2 Instance Type" 4 | } 5 | 6 | variable "tag" { 7 | type = string 8 | description = "The tag for the EC2 instance" 9 | } 10 | 11 | variable "location" { 12 | type = string 13 | description = "The project region" 14 | default = "eu-central-1" 15 | } 16 | 17 | variable "availability_zone" { 18 | type = string 19 | description = "The project availability zone" 20 | default = "eu-central-1c" 21 | } 22 | 23 | variable "ami" { 24 | type = string 25 | description = "The project region" 26 | } 27 | 28 | -------------------------------------------------------------------------------- /labs/modules/module2/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | provider "aws" { 13 | region = var.location 14 | } 15 | 16 | locals { 17 | staging_env = "module2" 18 | } 19 | 20 | resource "aws_vpc" "my_vpc" { 21 | cidr_block = "10.5.0.0/16" 22 | enable_dns_hostnames = true 23 | tags = { 24 | Name = "${local.staging_env}-vpc-tag" 25 | } 26 | } 27 | 28 | resource "aws_subnet" "public" { 29 | vpc_id = aws_vpc.my_vpc.id 30 | cidr_block = "10.5.0.0/24" 31 | availability_zone = var.availability_zone 32 | tags = { 33 | Name = "${local.staging_env}-subnet-tag" 34 | } 35 | } 36 | 37 | resource "aws_internet_gateway" "igw" { 38 | vpc_id = aws_vpc.my_vpc.id 39 | tags = { 40 | Name = "${local.staging_env}-Internet Gateway" 41 | } 42 | } 43 | 44 | resource "aws_route_table" "rt" { 45 | vpc_id = aws_vpc.my_vpc.id 46 | route { 47 | cidr_block = "0.0.0.0/0" 48 | gateway_id = aws_internet_gateway.igw.id 49 | } 50 | tags = { 51 | Name = "${local.staging_env}- Public Subnet Route Table" 52 | } 53 | } 54 | resource "aws_route_table_association" "rta" { 55 | subnet_id = aws_subnet.public.id 56 | route_table_id = aws_route_table.rt.id 57 | } 58 | 59 | resource "aws_security_group" "ssg" { 60 | name = "module2_security_group" # name should be different on modules 61 | description = "Allow SSH inbound connections" 62 | vpc_id = aws_vpc.my_vpc.id 63 | # for SSH 64 | ingress { 65 | from_port = 22 66 | to_port = 22 67 | protocol = "tcp" 68 | cidr_blocks = ["0.0.0.0/0"] 69 | } 70 | # for HTTP Apache Server 71 | ingress { 72 | from_port = 80 73 | to_port = 80 74 | protocol = "tcp" 75 | cidr_blocks = ["0.0.0.0/0"] 76 | } 77 | # for HTTPS Apache Server 78 | ingress { 79 | from_port = 443 80 | to_port = 443 81 | protocol = "tcp" 82 | cidr_blocks = ["0.0.0.0/0"] 83 | } 84 | egress { 85 | from_port = 0 86 | to_port = 0 87 | protocol = "-1" 88 | cidr_blocks = ["0.0.0.0/0"] 89 | } 90 | tags = { 91 | Name = "allow_ssh_sg" 92 | } 93 | } 94 | 95 | resource "aws_instance" "ec2" { 96 | ami = var.ami 97 | instance_type = var.instance_type 98 | subnet_id = aws_subnet.public.id 99 | associate_public_ip_address = true 100 | vpc_security_group_ids = [aws_security_group.ssg.id] 101 | user_data = <<-EOF 102 | #! /bin/bash 103 | sudo apt-get update 104 | sudo apt-get install -y apache2 105 | sudo systemctl start apache2 106 | sudo systemctl enable apache2 107 | echo "

** MODULE-2 **: Deployed via Terraform from $(hostname -f)

" | sudo tee /var/www/html/index.html 108 | EOF 109 | tags = { 110 | Name = var.tag 111 | } 112 | 113 | } 114 | 115 | # output single values 116 | output "public_ip" { 117 | value = aws_instance.ec2.public_ip 118 | } 119 | 120 | -------------------------------------------------------------------------------- /labs/modules/module2/variables.tf: -------------------------------------------------------------------------------- 1 | variable "instance_type" { 2 | type = string 3 | description = "EC2 Instance Type" 4 | } 5 | 6 | variable "tag" { 7 | type = string 8 | description = "The tag for the EC2 instance" 9 | } 10 | 11 | variable "location" { 12 | type = string 13 | description = "The project region" 14 | default = "eu-central-1" 15 | } 16 | 17 | variable "availability_zone" { 18 | type = string 19 | description = "The project availability zone" 20 | default = "eu-central-1c" 21 | } 22 | 23 | variable "ami" { 24 | type = string 25 | description = "The project region" 26 | } 27 | 28 | -------------------------------------------------------------------------------- /labs/provisioners-nullresources/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | required_version = ">= 1.2.0" 9 | } 10 | 11 | provider "aws" { 12 | region = "eu-central-1" 13 | } 14 | 15 | resource "aws_vpc" "my_vpc" { 16 | cidr_block = "10.0.0.0/16" 17 | enable_dns_hostnames = true 18 | tags = { 19 | Name = "My VPC" 20 | } 21 | } 22 | 23 | resource "aws_subnet" "public" { 24 | vpc_id = aws_vpc.my_vpc.id 25 | cidr_block = "10.0.0.0/24" 26 | availability_zone = "eu-central-1c" 27 | tags = { 28 | Name = "Public Subnet" 29 | } 30 | } 31 | 32 | resource "aws_internet_gateway" "my_vpc_igw" { 33 | vpc_id = aws_vpc.my_vpc.id 34 | tags = { 35 | Name = "My VPC - Internet Gateway" 36 | } 37 | } 38 | 39 | resource "aws_route_table" "my_vpc_eu_central_1c_public" { 40 | vpc_id = aws_vpc.my_vpc.id 41 | route { 42 | cidr_block = "0.0.0.0/0" 43 | gateway_id = aws_internet_gateway.my_vpc_igw.id 44 | } 45 | tags = { 46 | Name = "Public Subnet Route Table" 47 | } 48 | } 49 | resource "aws_route_table_association" "my_vpc_eu_central_1c_public" { 50 | subnet_id = aws_subnet.public.id 51 | route_table_id = aws_route_table.my_vpc_eu_central_1c_public.id 52 | } 53 | 54 | resource "aws_security_group" "allow_ssh" { 55 | name = "allow_ssh_sg" 56 | description = "Allow SSH inbound connections" 57 | vpc_id = aws_vpc.my_vpc.id 58 | # for SSH 59 | ingress { 60 | from_port = 22 61 | to_port = 22 62 | protocol = "tcp" 63 | cidr_blocks = ["0.0.0.0/0"] 64 | } 65 | egress { 66 | from_port = 0 67 | to_port = 0 68 | protocol = "-1" 69 | cidr_blocks = ["0.0.0.0/0"] 70 | } 71 | tags = { 72 | Name = "allow_ssh_sg" 73 | } 74 | } 75 | 76 | resource "aws_instance" "ubuntu2204" { 77 | 78 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 79 | instance_type = "t2.nano" 80 | key_name = "testkey" 81 | vpc_security_group_ids = [aws_security_group.allow_ssh.id] 82 | subnet_id = aws_subnet.public.id 83 | associate_public_ip_address = true 84 | 85 | tags = { 86 | Name = "Ubuntu 22.04" 87 | } 88 | 89 | provisioner "file" { 90 | source = "test-file.txt" 91 | destination = "/home/ubuntu/test-file.txt" 92 | } 93 | 94 | provisioner "file" { 95 | content = "I want to copy this string to the destination file => server.txt (using provisioner file content)" 96 | destination = "/home/ubuntu/server.txt" 97 | } 98 | 99 | provisioner "remote-exec" { 100 | inline = [ 101 | "touch hello.txt", 102 | "echo helloworld remote-exec provisioner >> hello.txt", 103 | ] 104 | } 105 | 106 | connection { 107 | type = "ssh" 108 | host = self.public_ip 109 | user = "ubuntu" 110 | private_key = file("testkey.pem") 111 | timeout = "4m" 112 | } 113 | } 114 | 115 | resource "null_resource" "example" { 116 | provisioner "local-exec" { 117 | command = "'This is test file for null resource local-exec' >> nullresource-generated.txt" 118 | interpreter = ["PowerShell", "-Command"] 119 | } 120 | } 121 | 122 | -------------------------------------------------------------------------------- /labs/provisioners-nullresources/test-file.txt: -------------------------------------------------------------------------------- 1 | this is test file 2 | transferring this file to remote instance with "provisioner file" -------------------------------------------------------------------------------- /labs/template/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | provider "aws" { 13 | region = "eu-central-1" 14 | } 15 | 16 | resource "aws_iam_user" "newuser" { 17 | name = "New-User" # must only contain alphanumeric characters, hyphens, underscores, commas, periods, @ symbols, plus and equals signs 18 | } 19 | resource "aws_iam_access_key" "access_key" { 20 | user = aws_iam_user.newuser.name 21 | } 22 | 23 | resource "aws_iam_user_policy" "instanceManageUser_assume_role" { 24 | name = "EC2-S3-Lambda-DynamoDb-Policy" 25 | user = "${aws_iam_user.newuser.name}" 26 | policy = templatefile("${path.module}/policy.tftpl", { 27 | ec2_policies = [ 28 | "ec2:RunInstances", 29 | "ec2:StopInstances", 30 | "ec2:StartInstances", 31 | "ec2:TerminateInstances", 32 | "ec2:TerminateInstances", 33 | "ec2:Describe*", 34 | "ec2:CreateTags", 35 | "ec2:RequestSpotInstances" 36 | ], 37 | s3_policies = [ 38 | "s3:Get*", 39 | "s3:List*", 40 | "s3:Describe*", 41 | "s3-object-lambda:Get*", 42 | "s3-object-lambda:List*" 43 | ], 44 | lambda_policies = [ 45 | "lambda:Create*", 46 | "lambda:List*", 47 | "lambda:Delete*", 48 | "lambda:Get*" 49 | ], 50 | dynamodb_policies = [ 51 | "dynamodb:Describe*", 52 | "dynamodb:Update*", 53 | "dynamodb:Get*", 54 | "dynamodb:List*", 55 | "dynamodb:BatchGetItem", 56 | "dynamodb:Query", 57 | "dynamodb:Scan", 58 | "dynamodb:PartiQLSelect" 59 | ], 60 | }) 61 | } 62 | 63 | output "secret_key" { 64 | value = aws_iam_access_key.access_key.secret 65 | sensitive = true 66 | } 67 | 68 | output "access_key" { 69 | value = aws_iam_access_key.access_key.id 70 | } -------------------------------------------------------------------------------- /labs/template/policy.tftpl: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": ${jsonencode(ec2_policies)}, 7 | "Resource": "*" 8 | }, 9 | { 10 | "Effect": "Allow", 11 | "Action": ${jsonencode(s3_policies)}, 12 | "Resource": "*" 13 | }, 14 | { 15 | "Effect": "Allow", 16 | "Action": ${jsonencode(lambda_policies)}, 17 | "Resource": "*" 18 | }, 19 | { 20 | "Effect": "Allow", 21 | "Action": ${jsonencode(dynamodb_policies)}, 22 | "Resource": "*" 23 | } 24 | ] 25 | } -------------------------------------------------------------------------------- /labs/terraform-docker-without-cloud/main.tf: -------------------------------------------------------------------------------- 1 | # windows, prerequisite: install docker on your system 2 | # details, usage: https://registry.terraform.io/providers/kreuzwerker/docker/latest/docs/resources/container 3 | terraform { 4 | required_providers { 5 | docker = { 6 | source = "kreuzwerker/docker" 7 | version = "~> 3.0.2" 8 | } 9 | } 10 | } 11 | 12 | provider "docker" { 13 | host = "npipe:////.//pipe//docker_engine" 14 | } 15 | 16 | resource "docker_image" "windows" { 17 | name = "mcr.microsoft.com/powershell:lts-windowsservercore-1809" 18 | keep_locally = true 19 | } 20 | 21 | # docker container run -p 80:8000 --name=tutorial -it mcr.microsoft.com/powershell:lts-windowsservercore-1809 powershell 22 | resource "docker_container" "windows" { 23 | image = docker_image.windows.image_id 24 | name = "tutorial" 25 | 26 | stdin_open = true # docker run -i 27 | tty = true # docker run -t 28 | 29 | entrypoint = ["powershell"] 30 | 31 | ports { 32 | internal = 80 33 | external = 8000 34 | } 35 | } 36 | 37 | # docker container ls -a 38 | # docker container exec -it tutorial powershell 39 | # ls, exit 40 | # terraform destroy -auto-approve 41 | -------------------------------------------------------------------------------- /labs/variables-locals-output/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | provider "aws" { 13 | region = var.location 14 | } 15 | 16 | locals { 17 | staging_env = "staging" 18 | } 19 | 20 | resource "aws_vpc" "my_vpc" { 21 | cidr_block = "10.0.0.0/16" 22 | enable_dns_hostnames = true 23 | tags = { 24 | Name = "${local.staging_env}-vpc-tag" 25 | } 26 | } 27 | 28 | resource "aws_subnet" "my_subnet" { 29 | vpc_id = aws_vpc.my_vpc.id 30 | cidr_block = "10.0.0.0/16" 31 | availability_zone = var.availability_zone 32 | tags = { 33 | Name = "${local.staging_env}-subnet-tag" 34 | } 35 | } 36 | 37 | resource "aws_internet_gateway" "my_vpc_igw" { 38 | vpc_id = aws_vpc.my_vpc.id 39 | tags = { 40 | Name = "${local.staging_env}-Internet Gateway" 41 | } 42 | } 43 | 44 | resource "aws_route_table" "my_vpc_eu_central_1c_public" { 45 | vpc_id = aws_vpc.my_vpc.id 46 | route { 47 | cidr_block = "0.0.0.0/0" 48 | gateway_id = aws_internet_gateway.my_vpc_igw.id 49 | } 50 | tags = { 51 | Name = "${local.staging_env}- Public Subnet Route Table" 52 | } 53 | } 54 | resource "aws_route_table_association" "my_vpc_eu_central_1c_public" { 55 | subnet_id = aws_subnet.my_subnet.id 56 | route_table_id = aws_route_table.my_vpc_eu_central_1c_public.id 57 | } 58 | 59 | resource "aws_instance" "ec2_example" { 60 | 61 | ami = var.ami 62 | instance_type = var.instance_type 63 | subnet_id = aws_subnet.my_subnet.id 64 | associate_public_ip_address = true 65 | 66 | tags = { 67 | Name = var.tag 68 | } 69 | } 70 | 71 | # output single values 72 | output "public_ip" { 73 | value = aws_instance.ec2_example.public_ip 74 | } 75 | 76 | # output single values 77 | output "public_dns" { 78 | value = aws_instance.ec2_example.public_dns 79 | } 80 | 81 | # output multiple values 82 | output "instance_ips" { 83 | value = { 84 | public_ip = aws_instance.ec2_example.public_ip 85 | private_ip = aws_instance.ec2_example.private_ip 86 | } 87 | } 88 | 89 | # terraform init 90 | 91 | # terraform plan --var-file="terraform-dev.tfvars" 92 | # terraform apply --var-file="terraform-dev.tfvars" 93 | # terraform destroy --var-file="terraform-dev.tfvars" 94 | 95 | # terraform plan --var-file="terraform-prod.tfvars" 96 | # terraform apply --var-file="terraform-prod.tfvars" 97 | # terraform destroy --var-file="terraform-prod.tfvars" -------------------------------------------------------------------------------- /labs/variables-locals-output/terraform-dev.tfvars: -------------------------------------------------------------------------------- 1 | instance_type = "t2.nano" 2 | tag = "EC2 Instance for DEV" 3 | location = "eu-central-1" 4 | availability_zone = "eu-central-1c" 5 | ami = "ami-0e067cc8a2b58de59" # Ubuntu 20.04 eu-central-1 Frankfurt 6 | -------------------------------------------------------------------------------- /labs/variables-locals-output/terraform-prod.tfvars: -------------------------------------------------------------------------------- 1 | instance_type = "t2.micro" 2 | tag = "EC2 Instance for PROD" 3 | location = "eu-central-1" 4 | availability_zone = "eu-central-1c" 5 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 6 | -------------------------------------------------------------------------------- /labs/variables-locals-output/variables.tf: -------------------------------------------------------------------------------- 1 | variable "instance_type" { 2 | type = string 3 | description = "EC2 Instance Type" 4 | } 5 | 6 | variable "tag" { 7 | type = string 8 | description = "The tag for the EC2 instance" 9 | } 10 | 11 | variable "location" { 12 | type = string 13 | description = "The project region" 14 | default = "eu-central-1" 15 | } 16 | 17 | variable "availability_zone" { 18 | type = string 19 | description = "The project availability zone" 20 | default = "eu-central-1c" 21 | } 22 | 23 | variable "ami" { 24 | type = string 25 | description = "The project region" 26 | } 27 | 28 | -------------------------------------------------------------------------------- /labs/workspace/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | provider "aws" { 13 | region = var.location 14 | } 15 | 16 | locals { 17 | tag = "${terraform.workspace} EC2" 18 | } 19 | 20 | resource "aws_instance" "instance" { 21 | ami = var.ami 22 | instance_type = var.instance_type 23 | 24 | tags = { 25 | Name = local.tag 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /labs/workspace/terraform-dev.tfvars: -------------------------------------------------------------------------------- 1 | instance_type = "t2.nano" 2 | location = "eu-central-1" 3 | ami = "ami-0e067cc8a2b58de59" # Ubuntu 20.04 eu-central-1 Frankfurt 4 | -------------------------------------------------------------------------------- /labs/workspace/terraform-prod.tfvars: -------------------------------------------------------------------------------- 1 | instance_type = "t2.micro" 2 | location = "eu-central-1" 3 | ami = "ami-0d1ddd83282187d18" # Ubuntu 22.04 eu-central-1 Frankfurt 4 | -------------------------------------------------------------------------------- /labs/workspace/variables.tf: -------------------------------------------------------------------------------- 1 | variable "instance_type" { 2 | type = string 3 | description = "EC2 Instance Type" 4 | } 5 | 6 | variable "location" { 7 | type = string 8 | description = "The project region" 9 | default = "eu-central-1" 10 | } 11 | 12 | variable "ami" { 13 | type = string 14 | description = "The project region" 15 | } 16 | -------------------------------------------------------------------------------- /samples/codecommit-codepipeline-codebuild-codedeploy-lambda-container/lambda_bootstrap/lambda/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/lambda/python:3.8 2 | COPY requirements.txt ${LAMBDA_TASK_ROOT} 3 | RUN pip3 install --no-cache-dir -r requirements.txt 4 | COPY aws-lambda-url.py ${LAMBDA_TASK_ROOT} 5 | 6 | CMD ["aws-lambda-url.lambda_handler"] -------------------------------------------------------------------------------- /samples/codecommit-codepipeline-codebuild-codedeploy-lambda-container/lambda_bootstrap/lambda/aws-lambda-url.py: -------------------------------------------------------------------------------- 1 | from bs4 import BeautifulSoup 2 | import requests 3 | import boto3 4 | 5 | url="https://search.longhornrealty.com/idx/results/listings?pt=4&a_propStatus%5B%5D=Active&ccz=city&idxID=c007&per=25&srt=newest&city%5B%5D=22332&city%5B%5D=45916" 6 | page = requests.get(url) 7 | 8 | 9 | def lambda_handler(event, context): 10 | try: 11 | soup = BeautifulSoup(page.content, "html.parser") 12 | results = soup.find(id="idx-results-category-active") 13 | listings = results.find_all("article") 14 | 15 | for listing in listings: 16 | prices = listing.find( 17 | 'div', {'class': 'idx-listing-card__price'}).get_text() 18 | address_city = listing.find( 19 | 'span', {'class': 'idx-listing-card__address--City'}).get_text() 20 | print(prices, address_city) 21 | except Exception as e: 22 | print(e) 23 | -------------------------------------------------------------------------------- /samples/codecommit-codepipeline-codebuild-codedeploy-lambda-container/lambda_bootstrap/lambda/docker-test.sh: -------------------------------------------------------------------------------- 1 | #!bin/bash 2 | 3 | #aws sts get-caller-identity 4 | 5 | docker build . -t aws-lambda-url:0.0.1 6 | 7 | docker run \ 8 | -p 9000:8080 \ 9 | -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ 10 | -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ 11 | -e AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN \ 12 | -e AWS_REGION=$AWS_REGION \ 13 | aws-lambda-url:0.0.1 -------------------------------------------------------------------------------- /samples/codecommit-codepipeline-codebuild-codedeploy-lambda-container/lambda_bootstrap/lambda/requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4 2 | requests -------------------------------------------------------------------------------- /samples/codecommit-codepipeline-codebuild-codedeploy-lambda-container/lambda_bootstrap/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_ecr_image" "lambda_image_latest" { 2 | repository_name = split("/", var.ecr_repo_url)[1] 3 | image_tag = "latest" 4 | } 5 | 6 | resource "aws_iam_role" "iam_for_lambda" { 7 | name = "${var.env_namespace}_lambda_role" 8 | assume_role_policy = < IGW -> Route Table -> Subnets 2 | terraform { 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 4.16" 7 | } 8 | } 9 | required_version = ">= 1.2.0" 10 | } 11 | 12 | provider "aws" { 13 | region = "eu-central-1" 14 | } 15 | 16 | resource "aws_vpc" "my_vpc" { 17 | cidr_block = "10.0.0.0/16" 18 | enable_dns_hostnames = true 19 | tags = { 20 | Name = "My VPC" 21 | } 22 | } 23 | 24 | resource "aws_subnet" "public_subnet_a" { 25 | availability_zone = "eu-central-1a" 26 | vpc_id = aws_vpc.my_vpc.id 27 | cidr_block = "10.0.0.0/24" 28 | tags = { 29 | Name = "Public Subnet A" 30 | } 31 | } 32 | 33 | resource "aws_subnet" "public_subnet_b" { 34 | availability_zone = "eu-central-1b" 35 | vpc_id = aws_vpc.my_vpc.id 36 | cidr_block = "10.0.1.0/24" 37 | tags = { 38 | Name = "Public Subnet B" 39 | } 40 | } 41 | 42 | resource "aws_subnet" "public_subnet_c" { 43 | availability_zone = "eu-central-1c" 44 | vpc_id = aws_vpc.my_vpc.id 45 | cidr_block = "10.0.2.0/24" 46 | tags = { 47 | Name = "Public Subnet C" 48 | } 49 | } 50 | 51 | resource "aws_internet_gateway" "igw" { 52 | vpc_id = aws_vpc.my_vpc.id 53 | tags = { 54 | Name = "My VPC - Internet Gateway" 55 | } 56 | } 57 | 58 | resource "aws_route_table" "route_table" { 59 | vpc_id = aws_vpc.my_vpc.id 60 | route { 61 | cidr_block = "0.0.0.0/0" 62 | gateway_id = aws_internet_gateway.igw.id 63 | } 64 | tags = { 65 | Name = "Public Subnet Route Table" 66 | } 67 | } 68 | 69 | resource "aws_route_table_association" "route_table_association1" { 70 | subnet_id = aws_subnet.public_subnet_a.id 71 | route_table_id = aws_route_table.route_table.id 72 | } 73 | 74 | resource "aws_route_table_association" "route_table_association2" { 75 | subnet_id = aws_subnet.public_subnet_b.id 76 | route_table_id = aws_route_table.route_table.id 77 | } 78 | 79 | resource "aws_route_table_association" "route_table_association3" { 80 | subnet_id = aws_subnet.public_subnet_c.id 81 | route_table_id = aws_route_table.route_table.id 82 | } -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/2_ecs.tf: -------------------------------------------------------------------------------- 1 | # Getting data existed ECR 2 | data "aws_ecr_repository" "flask_app" { 3 | name = "flask-app" 4 | } 5 | 6 | # Creating ECS Cluster 7 | resource "aws_ecs_cluster" "my_cluster" { 8 | name = "my-cluster" # Naming the cluster 9 | } 10 | 11 | # Creating ECS Task 12 | resource "aws_ecs_task_definition" "flask_app_task" { 13 | family = "flask-app-task" 14 | container_definitions = < IGW -> LB Security Groups -> Application Load Balancer (Listener 80) -> Target Groups -> ECS Service -> ECS SG -> Tasks on each subnets 2 | 3 | # Creating Load Balancer (LB) 4 | resource "aws_alb" "application_load_balancer" { 5 | name = "test-lb-tf" # Naming our load balancer 6 | load_balancer_type = "application" 7 | subnets = [ 8 | "${aws_subnet.public_subnet_a.id}", 9 | "${aws_subnet.public_subnet_b.id}", 10 | "${aws_subnet.public_subnet_c.id}" 11 | ] 12 | # Referencing the security group 13 | security_groups = ["${aws_security_group.load_balancer_security_group.id}"] 14 | } 15 | 16 | # Creating a security group for LB 17 | resource "aws_security_group" "load_balancer_security_group" { 18 | vpc_id = aws_vpc.my_vpc.id 19 | ingress { 20 | from_port = 80 21 | to_port = 80 22 | protocol = "tcp" 23 | cidr_blocks = ["0.0.0.0/0"] # Allowing traffic in from all sources 24 | } 25 | 26 | egress { 27 | from_port = 0 28 | to_port = 0 29 | protocol = "-1" 30 | cidr_blocks = ["0.0.0.0/0"] 31 | } 32 | } 33 | 34 | # Creating LB Target Group 35 | resource "aws_lb_target_group" "target_group" { 36 | name = "target-group" 37 | port = 80 38 | protocol = "HTTP" 39 | target_type = "ip" 40 | vpc_id = "${aws_vpc.my_vpc.id}" 41 | } 42 | 43 | # Creating LB Listener 44 | resource "aws_lb_listener" "listener" { 45 | load_balancer_arn = "${aws_alb.application_load_balancer.arn}" # Referencing our load balancer 46 | port = "80" 47 | protocol = "HTTP" 48 | default_action { 49 | type = "forward" 50 | target_group_arn = "${aws_lb_target_group.target_group.arn}" # Referencing our target group 51 | } 52 | } -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/4_ecs_service.tf: -------------------------------------------------------------------------------- 1 | # Creating ECS Service 2 | resource "aws_ecs_service" "my_first_service" { 3 | name = "my-first-service" # Naming our first service 4 | cluster = "${aws_ecs_cluster.my_cluster.id}" # Referencing our created Cluster 5 | task_definition = "${aws_ecs_task_definition.flask_app_task.arn}" # Referencing the task our service will spin up 6 | launch_type = "FARGATE" 7 | desired_count = 3 # Setting the number of containers to 3 8 | 9 | load_balancer { 10 | target_group_arn = "${aws_lb_target_group.target_group.arn}" # Referencing our target group 11 | container_name = "${aws_ecs_task_definition.flask_app_task.family}" 12 | container_port = 5000 # Specifying the container port 13 | } 14 | 15 | network_configuration { 16 | subnets = ["${aws_subnet.public_subnet_a.id}", "${aws_subnet.public_subnet_b.id}", "${aws_subnet.public_subnet_c.id}"] 17 | assign_public_ip = true # Providing our containers with public IPs 18 | security_groups = ["${aws_security_group.service_security_group.id}"] # Setting the security group 19 | } 20 | } 21 | 22 | # Creating SG for ECS Container Service, referencing the load balancer security group 23 | resource "aws_security_group" "service_security_group" { 24 | vpc_id = aws_vpc.my_vpc.id 25 | ingress { 26 | from_port = 0 27 | to_port = 0 28 | protocol = "-1" 29 | # Only allowing traffic in from the load balancer security group 30 | security_groups = ["${aws_security_group.load_balancer_security_group.id}"] 31 | } 32 | 33 | egress { 34 | from_port = 0 35 | to_port = 0 36 | protocol = "-1" 37 | cidr_blocks = ["0.0.0.0/0"] 38 | } 39 | } 40 | 41 | #Log the load balancer app URL 42 | output "app_url" { 43 | value = aws_alb.application_load_balancer.dns_name 44 | } -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/ecr/0_ecr.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | required_version = ">= 1.2.0" 9 | } 10 | 11 | # Creating Elastic Container Repository for application 12 | resource "aws_ecr_repository" "flask_app" { 13 | name = "flask-app" 14 | } 15 | 16 | 17 | # aws ecr get-login-password --region REGION | docker login --username AWS --password-stdin ID.dkr.ecr.REGION.amazonaws.com 18 | # docker build -t flask-app . 19 | # docker tag flask-app:latest ID.dkr.REGION.amazonaws.com/flask-app:latest 20 | # docker push ID.dkr.REGION.amazonaws.com/flask-app:latest -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8-slim-buster 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt requirements.txt 6 | 7 | RUN pip3 install -r requirements.txt 8 | 9 | COPY app . 10 | 11 | ENV FLASK_APP=app 12 | 13 | ENV FLASK_ENV=development 14 | 15 | EXPOSE 5000 16 | 17 | RUN python init_db.py 18 | 19 | CMD [ "python3", "-m" , "flask", "run", "--host=0.0.0.0","--port","5000"] -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/README.md: -------------------------------------------------------------------------------- 1 | ## Flask-App Docker Image 2 | 3 | - Dockerfile is created for this sample. 4 | - App code is open source and taken: 5 | - https://www.digitalocean.com/community/tutorials/how-to-make-a-web-application-using-flask-in-python-3 6 | - https://github.com/do-community/flask_blog 7 | 8 | - To build Linux Container and run on local: 9 | 10 | ``` 11 | docker build -t flask-app . 12 | docker container run -p 5000:5000 -d flask-app 13 | ``` 14 | 15 | ![image](https://user-images.githubusercontent.com/10358317/232225583-253f20dc-4d95-43b3-a4a7-d156f2d0c886.png) 16 | 17 | 18 | - If you are using WSL2 on Windows, use sensible browser on WSL2 19 | 20 | ``` 21 | sensible-browser http://localhost:5000/ 22 | ``` 23 | 24 | ![image](https://user-images.githubusercontent.com/10358317/232225726-d02927fe-9d64-4fba-b279-ff7c0ec7dbc1.png) 25 | 26 | 27 | - For app, Thank you Digital Ocean! 28 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/app.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | from flask import Flask, render_template, request, url_for, flash, redirect 3 | from werkzeug.exceptions import abort 4 | 5 | 6 | def get_db_connection(): 7 | conn = sqlite3.connect('database.db') 8 | conn.row_factory = sqlite3.Row 9 | return conn 10 | 11 | 12 | def get_post(post_id): 13 | conn = get_db_connection() 14 | post = conn.execute('SELECT * FROM posts WHERE id = ?', 15 | (post_id,)).fetchone() 16 | conn.close() 17 | if post is None: 18 | abort(404) 19 | return post 20 | 21 | 22 | app = Flask(__name__) 23 | app.config['SECRET_KEY'] = '99' 24 | 25 | 26 | @app.route('/') 27 | def index(): 28 | conn = get_db_connection() 29 | posts = conn.execute('SELECT * FROM posts').fetchall() 30 | conn.close() 31 | return render_template('index.html', posts=posts) 32 | 33 | 34 | @app.route('/') 35 | def post(post_id): 36 | post = get_post(post_id) 37 | return render_template('post.html', post=post) 38 | 39 | 40 | @app.route('/create', methods=('GET', 'POST')) 41 | def create(): 42 | if request.method == 'POST': 43 | title = request.form['title'] 44 | content = request.form['content'] 45 | 46 | if not title: 47 | flash('Title is required!') 48 | else: 49 | conn = get_db_connection() 50 | conn.execute('INSERT INTO posts (title, content) VALUES (?, ?)', 51 | (title, content)) 52 | conn.commit() 53 | conn.close() 54 | return redirect(url_for('index')) 55 | 56 | return render_template('create.html') 57 | 58 | 59 | @app.route('//edit', methods=('GET', 'POST')) 60 | def edit(id): 61 | post = get_post(id) 62 | 63 | if request.method == 'POST': 64 | title = request.form['title'] 65 | content = request.form['content'] 66 | 67 | if not title: 68 | flash('Title is required!') 69 | else: 70 | conn = get_db_connection() 71 | conn.execute('UPDATE posts SET title = ?, content = ?' 72 | ' WHERE id = ?', 73 | (title, content, id)) 74 | conn.commit() 75 | conn.close() 76 | return redirect(url_for('index')) 77 | 78 | return render_template('edit.html', post=post) 79 | 80 | 81 | @app.route('//delete', methods=('POST',)) 82 | def delete(id): 83 | post = get_post(id) 84 | conn = get_db_connection() 85 | conn.execute('DELETE FROM posts WHERE id = ?', (id,)) 86 | conn.commit() 87 | conn.close() 88 | flash('"{}" was successfully deleted!'.format(post['title'])) 89 | return redirect(url_for('index')) 90 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/hello.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request, escape 2 | 3 | app = Flask(__name__) 4 | 5 | 6 | @app.route('/') 7 | def hello(): 8 | return 'Hello, World!' 9 | 10 | 11 | @app.route('/greet') 12 | def greet(): 13 | name = request.args['name'] 14 | return ''' 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 |

Hi {}

23 | 24 | '''.format(escape(name)) 25 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/init_db.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | connection = sqlite3.connect('database.db') 4 | 5 | 6 | with open('schema.sql') as f: 7 | connection.executescript(f.read()) 8 | 9 | cur = connection.cursor() 10 | 11 | cur.execute("INSERT INTO posts (title, content) VALUES (?, ?)", 12 | ('First Post', 'Content for the first post') 13 | ) 14 | 15 | cur.execute("INSERT INTO posts (title, content) VALUES (?, ?)", 16 | ('Second Post', 'Content for the second post') 17 | ) 18 | 19 | connection.commit() 20 | connection.close() 21 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/schema.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS posts; 2 | 3 | CREATE TABLE posts ( 4 | id INTEGER PRIMARY KEY AUTOINCREMENT, 5 | created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, 6 | title TEXT NOT NULL, 7 | content TEXT NOT NULL 8 | ); 9 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/static/css/style.css: -------------------------------------------------------------------------------- 1 | h1 { 2 | border: 2px #eee solid; 3 | color: brown; 4 | text-align: center; 5 | padding: 10px; 6 | } 7 | 8 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | {% block title %} {% endblock %} 12 | 13 | 14 | 30 |
31 | {% for message in get_flashed_messages() %} 32 |
{{ message }}
33 | {% endfor %} 34 | {% block content %} {% endblock %} 35 |
36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/templates/create.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |

{% block title %} Create a New Post {% endblock %}

5 | 6 |
7 |
8 | 9 | 12 |
13 | 14 |
15 | 16 | 18 |
19 |
20 | 21 |
22 |
23 | {% endblock %} 24 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/templates/edit.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |

{% block title %} Edit "{{ post['title'] }}" {% endblock %}

5 | 6 |
7 |
8 | 9 | 12 | 13 |
14 | 15 |
16 | 17 | 19 |
20 |
21 | 22 |
23 |
24 |
25 |
26 | 29 |
30 | {% endblock %} 31 | 32 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |

{% block title %} Welcome to FlaskBlog {% endblock %}

5 | {% for post in posts %} 6 | 7 |

{{ post['title'] }}

8 |
9 | {{ post['created'] }} 10 | 11 | Edit 12 | 13 |
14 | {% endfor %} 15 | {% endblock %} 16 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/app/templates/post.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |

{% block title %} {{ post['title'] }} {% endblock %}

5 | {{ post['created'] }} 6 |

{{ post['content'] }}

7 | {% endblock %} 8 | -------------------------------------------------------------------------------- /samples/ecr-ecs-elb-vpc-ecsservice-container/flask-app/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | db-sqlite3 3 | Werkzeug -------------------------------------------------------------------------------- /samples/eks-managed-node-blueprint/README.md: -------------------------------------------------------------------------------- 1 | ## Modules 2 | 3 | - Terraform EKS Module: 4 | - https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest 5 | - https://github.com/terraform-aws-modules/terraform-aws-eks 6 | - Kubernetes Addons: 7 | - https://github.com/aws-ia/terraform-aws-eks-blueprints/modules/kubernetes-addons 8 | - Terraform VPC Module: 9 | - https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws/latest 10 | - https://github.com/terraform-aws-modules/terraform-aws-vpc 11 | 12 | -------------------------------------------------------------------------------- /samples/eks-managed-node-blueprint/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = ">= 4.47" 6 | } 7 | kubernetes = { 8 | source = "hashicorp/kubernetes" 9 | version = ">= 2.10" 10 | } 11 | helm = { 12 | source = "hashicorp/helm" 13 | version = ">= 2.4.1" 14 | } 15 | } 16 | required_version = ">= 1.2.0" 17 | } 18 | 19 | provider "aws" { 20 | region = local.region 21 | } 22 | 23 | provider "kubernetes" { 24 | host = module.eks.cluster_endpoint 25 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) 26 | token = data.aws_eks_cluster_auth.this.token 27 | } 28 | 29 | provider "helm" { 30 | kubernetes { 31 | host = module.eks.cluster_endpoint 32 | cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) 33 | token = data.aws_eks_cluster_auth.this.token 34 | } 35 | } 36 | 37 | data "aws_eks_cluster_auth" "this" { 38 | name = module.eks.cluster_name 39 | } 40 | 41 | data "aws_availability_zones" "available" {} 42 | 43 | locals { 44 | name = basename(path.cwd) 45 | region = "eu-central-1" 46 | 47 | cluster_version = "1.24" 48 | 49 | vpc_cidr = "10.0.0.0/16" 50 | azs = slice(data.aws_availability_zones.available.names, 0, 3) 51 | 52 | tags = { 53 | Blueprint = local.name 54 | GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" 55 | } 56 | } 57 | 58 | ################################################################################ 59 | # Cluster 60 | ################################################################################ 61 | 62 | #tfsec:ignore:aws-eks-enable-control-plane-logging 63 | module "eks" { 64 | source = "terraform-aws-modules/eks/aws" 65 | version = "~> 19.12" 66 | 67 | cluster_name = local.name 68 | cluster_version = local.cluster_version 69 | cluster_endpoint_public_access = true 70 | 71 | # EKS Addons 72 | cluster_addons = { 73 | coredns = {} 74 | kube-proxy = {} 75 | vpc-cni = {} 76 | } 77 | 78 | vpc_id = module.vpc.vpc_id 79 | subnet_ids = module.vpc.private_subnets 80 | 81 | eks_managed_node_groups = { 82 | initial = { 83 | instance_types = ["m5.large"] 84 | 85 | min_size = 1 86 | max_size = 3 87 | desired_size = 2 88 | } 89 | } 90 | 91 | tags = local.tags 92 | } 93 | 94 | ################################################################################ 95 | # Kubernetes Addons 96 | ################################################################################ 97 | 98 | module "eks_blueprints_kubernetes_addons" { 99 | source = "github.com/aws-ia/terraform-aws-eks-blueprints/modules/kubernetes-addons" 100 | 101 | eks_cluster_id = module.eks.cluster_name 102 | eks_cluster_endpoint = module.eks.cluster_endpoint 103 | eks_oidc_provider = module.eks.oidc_provider 104 | eks_cluster_version = module.eks.cluster_version 105 | 106 | # Add-ons 107 | enable_metrics_server = true 108 | enable_cluster_autoscaler = true 109 | eks_worker_security_group_id = module.eks.cluster_security_group_id 110 | 111 | tags = local.tags 112 | } 113 | 114 | ################################################################################ 115 | # Supporting Resources 116 | ################################################################################ 117 | 118 | module "vpc" { 119 | source = "terraform-aws-modules/vpc/aws" 120 | version = "~> 4.0" 121 | 122 | name = local.name 123 | cidr = local.vpc_cidr 124 | 125 | azs = local.azs 126 | private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] 127 | public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] 128 | 129 | enable_nat_gateway = true 130 | single_nat_gateway = true 131 | 132 | public_subnet_tags = { 133 | "kubernetes.io/role/elb" = 1 134 | } 135 | 136 | private_subnet_tags = { 137 | "kubernetes.io/role/internal-elb" = 1 138 | } 139 | 140 | tags = local.tags 141 | } 142 | 143 | output "configure_kubectl" { 144 | description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" 145 | value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}" 146 | } -------------------------------------------------------------------------------- /samples/gitlabserver-on-premise-runner-on-EC2/docker-compose.yml: -------------------------------------------------------------------------------- 1 | # Gitlab Server on WSL2 (Linux) 2 | version: '3.6' 3 | services: 4 | web: 5 | image: 'gitlab/gitlab-ee:latest' 6 | restart: always 7 | hostname: 'gitlab.example.com' 8 | environment: 9 | GITLAB_OMNIBUS_CONFIG: | 10 | external_url 'http://gitlab.example.com' 11 | #external_url 'https://gitlab.example.com' 12 | ports: 13 | - '150:80' 14 | - '443:443' 15 | - '22:22' 16 | volumes: 17 | - '/home/omer/gitlab-tmp/config:/etc/gitlab' 18 | - '/home/omer/gitlab-tmp/logs:/var/log/gitlab' 19 | - '/home/omer/gitlab-tmp/data:/var/opt/gitlab' 20 | shm_size: '256m' -------------------------------------------------------------------------------- /samples/gitlabserver-on-premise-runner-on-EC2/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | required_version = ">= 1.2.0" 9 | } 10 | 11 | provider "aws" { 12 | region = "eu-central-1" 13 | } 14 | 15 | resource "aws_vpc" "my_vpc" { 16 | cidr_block = "10.0.0.0/16" 17 | enable_dns_hostnames = true 18 | tags = { 19 | Name = "My VPC" 20 | } 21 | } 22 | 23 | resource "aws_subnet" "public" { 24 | vpc_id = aws_vpc.my_vpc.id 25 | cidr_block = "10.0.0.0/24" 26 | availability_zone = "eu-central-1c" 27 | tags = { 28 | Name = "Public Subnet" 29 | } 30 | } 31 | 32 | resource "aws_internet_gateway" "my_vpc_igw" { 33 | vpc_id = aws_vpc.my_vpc.id 34 | tags = { 35 | Name = "My VPC - Internet Gateway" 36 | } 37 | } 38 | 39 | resource "aws_route_table" "my_vpc_eu_central_1c_public" { 40 | vpc_id = aws_vpc.my_vpc.id 41 | route { 42 | cidr_block = "0.0.0.0/0" 43 | gateway_id = aws_internet_gateway.my_vpc_igw.id 44 | } 45 | tags = { 46 | Name = "Public Subnet Route Table" 47 | } 48 | } 49 | resource "aws_route_table_association" "my_vpc_eu_central_1c_public" { 50 | subnet_id = aws_subnet.public.id 51 | route_table_id = aws_route_table.my_vpc_eu_central_1c_public.id 52 | } 53 | 54 | resource "aws_security_group" "allow_ssh" { 55 | name = "allow_ssh_sg" 56 | description = "Allow SSH inbound connections" 57 | vpc_id = aws_vpc.my_vpc.id 58 | # for SSH 59 | ingress { 60 | from_port = 22 61 | to_port = 22 62 | protocol = "tcp" 63 | cidr_blocks = ["0.0.0.0/0"] 64 | } 65 | # for HTTP 66 | ingress { 67 | from_port = 80 68 | to_port = 80 69 | protocol = "tcp" 70 | cidr_blocks = ["0.0.0.0/0"] 71 | } 72 | # for HTTP 73 | ingress { 74 | from_port = 150 75 | to_port = 150 76 | protocol = "tcp" 77 | cidr_blocks = ["0.0.0.0/0"] 78 | } 79 | # for HTTPS 80 | ingress { 81 | from_port = 443 82 | to_port = 443 83 | protocol = "tcp" 84 | cidr_blocks = ["0.0.0.0/0"] 85 | } 86 | # for RDP 87 | ingress { 88 | from_port = 3389 89 | to_port = 3389 90 | protocol = "tcp" 91 | cidr_blocks = ["0.0.0.0/0"] 92 | } 93 | # for ping 94 | ingress { 95 | from_port = -1 96 | to_port = -1 97 | protocol = "icmp" 98 | cidr_blocks = ["10.0.0.0/16"] 99 | } 100 | egress { 101 | from_port = 0 102 | to_port = 0 103 | protocol = "-1" 104 | cidr_blocks = ["0.0.0.0/0"] 105 | } 106 | tags = { 107 | Name = "allow_ssh_sg" 108 | } 109 | } 110 | 111 | resource "aws_instance" "ubuntu2004" { 112 | ami = "ami-0e067cc8a2b58de59" # Ubuntu 20.04 eu-central-1 Frankfurt 113 | instance_type = "t2.micro" 114 | key_name = "testkey" 115 | vpc_security_group_ids = [aws_security_group.allow_ssh.id] 116 | subnet_id = aws_subnet.public.id 117 | associate_public_ip_address = true 118 | user_data = <<-EOF 119 | #! /bin/bash 120 | sudo apt-get update 121 | sudo apt-get install ca-certificates curl gnupg -y 122 | sudo install -m 0755 -d /etc/apt/keyrings 123 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 124 | sudo chmod a+r /etc/apt/keyrings/docker.gpg 125 | echo \ 126 | "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ 127 | "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ 128 | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 129 | sudo apt-get update 130 | sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y 131 | sudo docker run hello-world 132 | curl -L "https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh" | sudo bash 133 | sudo apt-get install gitlab-runner 134 | EOF 135 | tags = { 136 | Name = "Ubuntu 20.04" 137 | } 138 | } 139 | 140 | 141 | output "instance_ubuntu2004_public_ip" { 142 | value = "${aws_instance.ubuntu2004.public_ip}" 143 | } 144 | 145 | -------------------------------------------------------------------------------- /samples/gitlabserver-on-premise-runner-on-EC2/test-gitlab-runner/docker-windows/Dockerfile: -------------------------------------------------------------------------------- 1 | # escape=` 2 | 3 | FROM mcr.microsoft.com/windows/servercore:1809 4 | 5 | # Restore the default Windows shell for correct batch processing. 6 | SHELL ["cmd", "/S", "/C"] 7 | 8 | # install choco (win package manager like apt-get) 9 | RUN @"%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell.exe" -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" && SET "PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin" 10 | 11 | # install python3.7 12 | RUN choco install -y python --version=3.7.2 ` 13 | && set PATH=%PATH%;C:\Python37\ 14 | 15 | RUN choco install pwsh --version=7.3.3 -y 16 | 17 | CMD ["powershell.exe", "-NoLogo", "-ExecutionPolicy", "Bypass"] -------------------------------------------------------------------------------- /samples/gitlabserver-on-premise-runner-on-EC2/test-gitlab-runner/gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | .image-linux: &image-linux 2 | image: 'python:3.7' 3 | 4 | .image-windows: &image-windows 5 | image: 'omerbsezer/python:3.7-windowsservercore-1809' 6 | 7 | stages: 8 | - build_debug 9 | - run_test 10 | 11 | workflow: 12 | rules: 13 | - if: $CI_COMMIT_BRANCH == "main" 14 | 15 | .build_linux: &build_linux 16 | <<: *image-linux 17 | script: 18 | - python --version 19 | - pip install -r requirements.txt 20 | - pwd 21 | - pylint --version 22 | - pylint -d C0301 src/*.py 23 | tags: 24 | - ec2-shared 25 | 26 | .build_windows: &build_windows 27 | <<: *image-windows 28 | script: 29 | - whoami 30 | - python --version 31 | - pip install -r requirements.txt 32 | - pwd 33 | - pylint --version 34 | - cd src/; pylint -d C0301 main.py 35 | tags: 36 | - ec2-shared-windows 37 | 38 | build_linux_debug: 39 | <<: *build_linux 40 | stage: build_debug 41 | 42 | build_windows_debug: 43 | extends: .build_windows 44 | stage: build_debug 45 | 46 | run_linux_test: 47 | <<: *image-linux 48 | stage: run_test 49 | needs: 50 | - build_linux_debug 51 | script: 52 | - python --version 53 | - pip install -r requirements.txt 54 | - cd test/;pytest -v 55 | coverage: '/lines: \d+\.\d+/' 56 | tags: 57 | - ec2-shared 58 | 59 | run_windows_test: 60 | <<: *image-windows 61 | stage: run_test 62 | needs: 63 | - build_windows_debug 64 | script: 65 | - python --version 66 | - pip install -r requirements.txt 67 | - cd test/;pytest -v 68 | tags: 69 | - ec2-shared-windows 70 | 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /samples/gitlabserver-on-premise-runner-on-EC2/test-gitlab-runner/requirements.txt: -------------------------------------------------------------------------------- 1 | pylint==2.4.4 2 | pytest==5.3.5 -------------------------------------------------------------------------------- /samples/gitlabserver-on-premise-runner-on-EC2/test-gitlab-runner/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omerbsezer/Fast-Terraform/5ff4bbac8abd87430a6b9184339071a1c0ec6158/samples/gitlabserver-on-premise-runner-on-EC2/test-gitlab-runner/src/__init__.py -------------------------------------------------------------------------------- /samples/gitlabserver-on-premise-runner-on-EC2/test-gitlab-runner/src/main.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file is example 3 | ''' 4 | 5 | def func(number: int) -> int: 6 | ''' 7 | This function increases 8 | ''' 9 | return number + 1 10 | 11 | if __name__ == '__main__': 12 | print('Running Python File...') 13 | A = 3 14 | print(f'{A} + 1 =', func(A)) 15 | -------------------------------------------------------------------------------- /samples/gitlabserver-on-premise-runner-on-EC2/test-gitlab-runner/test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omerbsezer/Fast-Terraform/5ff4bbac8abd87430a6b9184339071a1c0ec6158/samples/gitlabserver-on-premise-runner-on-EC2/test-gitlab-runner/test/__init__.py -------------------------------------------------------------------------------- /samples/gitlabserver-on-premise-runner-on-EC2/test-gitlab-runner/test/test_main.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from src.main import func 3 | 4 | def test_answer(): 5 | assert func(3) == 4 6 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/1_lambda.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | required_version = ">= 1.2.0" 9 | } 10 | 11 | # Create IAM Role for lambda 12 | resource "aws_iam_role" "lambda_role" { 13 | name = "aws_lambda_role" 14 | assume_role_policy = <') 43 | def post(post_id): 44 | post = get_post(post_id) 45 | return render_template('post.html', post=post) 46 | 47 | 48 | @app.route('/create', methods=('GET', 'POST')) 49 | def create(): 50 | if request.method == 'POST': 51 | title = request.form['title'] 52 | content = request.form['content'] 53 | 54 | if not title: 55 | flash('Title is required!') 56 | else: 57 | conn = get_db_connection() 58 | conn.execute('INSERT INTO posts (title, content) VALUES (?, ?)', 59 | (title, content)) 60 | conn.commit() 61 | conn.close() 62 | return redirect(url_for('index')) 63 | 64 | return render_template('create.html') 65 | 66 | 67 | @app.route('//edit', methods=('GET', 'POST')) 68 | def edit(id): 69 | post = get_post(id) 70 | 71 | if request.method == 'POST': 72 | title = request.form['title'] 73 | content = request.form['content'] 74 | 75 | if not title: 76 | flash('Title is required!') 77 | else: 78 | conn = get_db_connection() 79 | conn.execute('UPDATE posts SET title = ?, content = ?' 80 | ' WHERE id = ?', 81 | (title, content, id)) 82 | conn.commit() 83 | conn.close() 84 | return redirect(url_for('index')) 85 | 86 | return render_template('edit.html', post=post) 87 | 88 | 89 | @app.route('//delete', methods=('POST',)) 90 | def delete(id): 91 | post = get_post(id) 92 | conn = get_db_connection() 93 | conn.execute('DELETE FROM posts WHERE id = ?', (id,)) 94 | conn.commit() 95 | conn.close() 96 | flash('"{}" was successfully deleted!'.format(post['title'])) 97 | return redirect(url_for('index')) 98 | 99 | 100 | def handler(event, context): 101 | return serverless_wsgi.handle_request(app, event, context) 102 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/app/hello.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request, escape 2 | 3 | app = Flask(__name__) 4 | 5 | 6 | @app.route('/') 7 | def hello(): 8 | return 'Hello, World!' 9 | 10 | 11 | @app.route('/greet') 12 | def greet(): 13 | name = request.args['name'] 14 | return ''' 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 |

Hi {}

23 | 24 | '''.format(escape(name)) 25 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/app/init_db.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | 3 | connection = sqlite3.connect('database.db') 4 | 5 | 6 | with open('schema.sql') as f: 7 | connection.executescript(f.read()) 8 | 9 | cur = connection.cursor() 10 | 11 | cur.execute("INSERT INTO posts (title, content) VALUES (?, ?)", 12 | ('First Post', 'Content for the first post') 13 | ) 14 | 15 | cur.execute("INSERT INTO posts (title, content) VALUES (?, ?)", 16 | ('Second Post', 'Content for the second post') 17 | ) 18 | 19 | connection.commit() 20 | connection.close() 21 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/app/schema.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS posts; 2 | 3 | CREATE TABLE posts ( 4 | id INTEGER PRIMARY KEY AUTOINCREMENT, 5 | created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, 6 | title TEXT NOT NULL, 7 | content TEXT NOT NULL 8 | ); 9 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/app/static/css/style.css: -------------------------------------------------------------------------------- 1 | h1 { 2 | border: 2px #eee solid; 3 | color: brown; 4 | text-align: center; 5 | padding: 10px; 6 | } 7 | 8 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/app/templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | {% block title %} {% endblock %} 12 | 13 | 14 | 30 |
31 | {% for message in get_flashed_messages() %} 32 |
{{ message }}
33 | {% endfor %} 34 | {% block content %} {% endblock %} 35 |
36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/app/templates/create.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |

{% block title %} Create a New Post {% endblock %}

5 | 6 |
7 |
8 | 9 | 12 |
13 | 14 |
15 | 16 | 18 |
19 |
20 | 21 |
22 |
23 | {% endblock %} 24 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/app/templates/edit.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |

{% block title %} Edit "{{ post['title'] }}" {% endblock %}

5 | 6 |
7 |
8 | 9 | 12 | 13 |
14 | 15 |
16 | 17 | 19 |
20 |
21 | 22 |
23 |
24 |
25 |
26 | 29 |
30 | {% endblock %} 31 | 32 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/app/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |

{% block title %} Welcome to FlaskBlog {% endblock %}

5 | {% for post in posts %} 6 | 7 |

{{ post['title'] }}

8 |
9 | {{ post['created'] }} 10 | 11 | Edit 12 | 13 |
14 | {% endfor %} 15 | {% endblock %} 16 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/app/templates/post.html: -------------------------------------------------------------------------------- 1 | {% extends 'base.html' %} 2 | 3 | {% block content %} 4 |

{% block title %} {{ post['title'] }} {% endblock %}

5 | {{ post['created'] }} 6 |

{{ post['content'] }}

7 | {% endblock %} 8 | -------------------------------------------------------------------------------- /samples/lambda-container-apigateway-flaskapp/flask-app-serverless/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | db-sqlite3 3 | Werkzeug 4 | serverless-wsgi>=2.0.2 5 | ushlex -------------------------------------------------------------------------------- /samples/lambda-role-policy-apigateway-python/api-gateway.tf: -------------------------------------------------------------------------------- 1 | # Create API Gateway with Rest API type 2 | resource "aws_api_gateway_rest_api" "example" { 3 | name = "Serverless" 4 | description = "Serverless Application using Terraform" 5 | } 6 | 7 | # Defines a resource in the API Gateway that will capture any request path. The path_part = "{proxy+}" allows API Gateway to match all requests that have any path pattern, enabling dynamic routing. 8 | resource "aws_api_gateway_resource" "proxy" { 9 | rest_api_id = aws_api_gateway_rest_api.example.id 10 | parent_id = aws_api_gateway_rest_api.example.root_resource_id 11 | path_part = "{proxy+}" # with proxy, this resource will match any request path 12 | } 13 | 14 | # Configures to allow any HTTP method (GET, POST, DELETE, etc.) and does not require any specific authorization. It's set for the previously defined proxy resource. 15 | resource "aws_api_gateway_method" "proxy" { 16 | rest_api_id = aws_api_gateway_rest_api.example.id 17 | resource_id = aws_api_gateway_resource.proxy.id 18 | http_method = "ANY" # with ANY, it allows any request method to be used, all incoming requests will match this resource 19 | authorization = "NONE" 20 | } 21 | 22 | # API Gateway - Lambda Connection 23 | # The AWS_PROXY type means API Gateway will directly pass the request details (method, headers, body, path parameters, etc.) to the Lambda function 24 | resource "aws_api_gateway_integration" "lambda" { 25 | rest_api_id = aws_api_gateway_rest_api.example.id 26 | resource_id = aws_api_gateway_method.proxy.resource_id 27 | http_method = aws_api_gateway_method.proxy.http_method 28 | integration_http_method = "POST" 29 | type = "AWS_PROXY" # With AWS_PROXY, it causes API gateway to call into the API of another AWS service 30 | uri = aws_lambda_function.lambda_function.invoke_arn 31 | } 32 | 33 | # The proxy resource cannot match an empty path at the root of the API. API GW doesn't route requests to the root (/) path using the proxy, so a separate method and integration for the root resource are required. 34 | # To handle that, a similar configuration must be applied to the root resource that is built in to the REST API object. 35 | # This ensures that requests to the root (e.g., https://api.example.com/) are also forwarded to the Lambda function. 36 | resource "aws_api_gateway_method" "proxy_root" { 37 | rest_api_id = aws_api_gateway_rest_api.example.id 38 | resource_id = aws_api_gateway_rest_api.example.root_resource_id 39 | http_method = "ANY" 40 | authorization = "NONE" 41 | } 42 | 43 | resource "aws_api_gateway_integration" "lambda_root" { 44 | rest_api_id = aws_api_gateway_rest_api.example.id 45 | resource_id = aws_api_gateway_method.proxy_root.resource_id 46 | http_method = aws_api_gateway_method.proxy_root.http_method 47 | integration_http_method = "POST" 48 | type = "AWS_PROXY" # With AWS_PROXY, it causes API gateway to call into the API of another AWS service 49 | uri = aws_lambda_function.lambda_function.invoke_arn 50 | } 51 | 52 | # Deploy API Gateway 53 | # Deploys the API to the specified stage (test stage). The depends_on ensures that the API is not deployed until both the Lambda integrations (for proxy and root) are complete. 54 | resource "aws_api_gateway_deployment" "example" { 55 | depends_on = [ 56 | aws_api_gateway_integration.lambda, 57 | aws_api_gateway_integration.lambda_root, 58 | ] 59 | rest_api_id = aws_api_gateway_rest_api.example.id 60 | stage_name = "test" 61 | } 62 | 63 | # Output to the URL 64 | output "base_url" { 65 | value = aws_api_gateway_deployment.example.invoke_url 66 | } 67 | -------------------------------------------------------------------------------- /samples/lambda-role-policy-apigateway-python/code/main.py: -------------------------------------------------------------------------------- 1 | def lambda_handler(event, context): 2 | content = """ 3 | 4 |

Hello Website running on Lambda! Deployed via Terraform

5 | 6 | """ 7 | response ={ 8 | "statusCode": 200, 9 | "body": content, 10 | "headers": {"Content-Type": "text/html",}, 11 | } 12 | return response -------------------------------------------------------------------------------- /samples/lambda-role-policy-apigateway-python/lambda.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.16" 6 | } 7 | } 8 | required_version = ">= 1.2.0" 9 | } 10 | 11 | # Create IAM Role for lambda 12 | # The assume_role_policy defines who or what can assume this role. In this case, it allows lambda to assume the role, which is necessary for Lambda functions to execute with this role's permissions. 13 | # The policy allows STS (Security Token Service) to manage temporary credentials for the Lambda service. 14 | resource "aws_iam_role" "lambda_role" { 15 | name = "aws_lambda_role" 16 | assume_role_policy = < check only Python 3.7 compatible 31 | FI50, 32 | FI51, 33 | FI52, 34 | FI53, 35 | FI54, 36 | FI55, 37 | FI56, 38 | FI57, 39 | W503 40 | 41 | require-code = True 42 | 43 | [testenv] 44 | commands = 45 | pytest --cov=pipelines --cov-append {posargs} 46 | coverage report --fail-under=0 47 | deps = .[test] 48 | depends = 49 | {py36,py37,py38}: clean 50 | 51 | [testenv:flake8] 52 | skipdist = true 53 | skip_install = true 54 | deps = flake8 55 | commands = flake8 56 | 57 | [testenv:black-format] 58 | deps = black 59 | commands = 60 | black -l 100 ./ 61 | 62 | [testenv:black-check] 63 | deps = black 64 | commands = 65 | black -l 100 --check ./ 66 | 67 | [testenv:clean] 68 | skip_install = true 69 | deps = coverage 70 | commands = coverage erase 71 | 72 | [testenv:pydocstyle] 73 | deps = pydocstyle 74 | commands = 75 | pydocstyle pipelines 76 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/README.md: -------------------------------------------------------------------------------- 1 | # modeldeploy_pipeline 2 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/endpoint-config-template.yml: -------------------------------------------------------------------------------- 1 | Description: 2 | This template is built and deployed by the infrastructure pipeline in various stages (staging/production) as required. 3 | It specifies the resources that need to be created, like the SageMaker Endpoint. It can be extended to include resources like 4 | AutoScalingPolicy, API Gateway, etc,. as required. 5 | Parameters: 6 | SageMakerProjectName: 7 | Type: String 8 | Description: Name of the project 9 | MinLength: 1 10 | MaxLength: 32 11 | AllowedPattern: ^[a-zA-Z](-*[a-zA-Z0-9])* 12 | ModelExecutionRoleArn: 13 | Type: String 14 | Description: Execution role used for deploying the model. 15 | ModelPackageName: 16 | Type: String 17 | Description: The trained Model Package Name 18 | StageName: 19 | Type: String 20 | Description: 21 | The name for a project pipeline stage, such as Staging or Prod, for 22 | which resources are provisioned and deployed. 23 | EndpointInstanceCount: 24 | Type: Number 25 | Description: Number of instances to launch for the endpoint. 26 | MinValue: 1 27 | EndpointInstanceType: 28 | Type: String 29 | Description: The ML compute instance type for the endpoint. 30 | 31 | Resources: 32 | Model: 33 | Type: AWS::SageMaker::Model 34 | Properties: 35 | PrimaryContainer: 36 | ModelPackageName: !Ref ModelPackageName 37 | ExecutionRoleArn: !Ref ModelExecutionRoleArn 38 | 39 | EndpointConfig: 40 | Type: AWS::SageMaker::EndpointConfig 41 | Properties: 42 | ProductionVariants: 43 | - InitialInstanceCount: !Ref EndpointInstanceCount 44 | InitialVariantWeight: 1.0 45 | InstanceType: !Ref EndpointInstanceType 46 | ModelName: !GetAtt Model.ModelName 47 | VariantName: AllTraffic 48 | 49 | Endpoint: 50 | Type: AWS::SageMaker::Endpoint 51 | Properties: 52 | EndpointName: !Sub ${SageMakerProjectName}-${StageName} 53 | EndpointConfigName: !GetAtt EndpointConfig.EndpointConfigName 54 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/fix_model_permission.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import boto3 3 | import json 4 | import os 5 | import logging 6 | from botocore.exceptions import ClientError 7 | 8 | # this script is a workaround to fix some permission issues with the file 9 | # created for the model and stored in an S3 bucket 10 | 11 | s3_client = boto3.client('s3') 12 | sm_client = boto3.client('sagemaker') 13 | 14 | if __name__ == "__main__": 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("--log-level", type=str, default=os.environ.get("LOGLEVEL", "INFO").upper()) 17 | parser.add_argument("--prod-config-file", type=str, default="prod-config-export.json") 18 | 19 | args, _ = parser.parse_known_args() 20 | 21 | # Configure logging to output the line number and message 22 | log_format = "%(levelname)s: [%(filename)s:%(lineno)s] %(message)s" 23 | logging.basicConfig(format=log_format, level=args.log_level) 24 | 25 | # first retrieve the name of the package that will be deployed 26 | model_package_name = None 27 | with open(args.prod_config_file, 'r') as f: 28 | for param in json.loads(f.read()): 29 | if param.get('ParameterKey') == 'ModelPackageName': 30 | model_package_name = param.get('ParameterValue') 31 | if model_package_name is None: 32 | raise Exception("Configuration file must include ModelPackageName parameter") 33 | 34 | # then, describe it to get the S3 URL of the model 35 | resp = sm_client.describe_model_package(ModelPackageName=model_package_name) 36 | model_data_url = resp['InferenceSpecification']['Containers'][0]['ModelDataUrl'] 37 | _,_,bucket_name,key = model_data_url.split('/', 3) 38 | 39 | # finally, copy the file to override the permissions 40 | with open('/tmp/model.tar.gz', 'wb') as data: 41 | s3_client.download_fileobj(bucket_name, key, data) 42 | with open('/tmp/model.tar.gz', 'rb') as data: 43 | s3_client.upload_fileobj(data, bucket_name, key) 44 | 45 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/prod-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "Parameters": { 3 | "StageName": "prod-0306", 4 | "EndpointInstanceCount": "1", 5 | "EndpointInstanceType": "ml.m5.large" 6 | } 7 | } -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/setup.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import os 5 | import argparse 6 | import boto3 7 | from botocore.exceptions import ClientError 8 | 9 | logger = logging.getLogger(__name__) 10 | sm_client = boto3.client("sagemaker") 11 | org_client = boto3.client("organizations") 12 | 13 | if __name__ == "__main__": 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("--log-level", type=str, default=os.environ.get("LOGLEVEL", "INFO").upper()) 16 | parser.add_argument("--sagemaker-project-id", type=str, required=True) 17 | parser.add_argument("--sagemaker-project-name", type=str, required=True) 18 | parser.add_argument("--model-package-group-name", type=str, required=True) 19 | parser.add_argument("--organizational-unit-staging-id", type=str, required=True) 20 | parser.add_argument("--organizational-unit-prod-id", type=str, required=True) 21 | 22 | args, _ = parser.parse_known_args() 23 | 24 | # Configure logging to output the line number and message 25 | log_format = "%(levelname)s: [%(filename)s:%(lineno)s] %(message)s" 26 | logging.basicConfig(format=log_format, level=args.log_level) 27 | model_package_group_arn = None 28 | # Create model package group if necessary 29 | try: 30 | # check if the model package group exists 31 | resp = sm_client.describe_model_package_group( 32 | ModelPackageGroupName=args.model_package_group_name) 33 | model_package_group_arn = resp['ModelPackageGroupArn'] 34 | except ClientError as e: 35 | if e.response['Error']['Code'] == 'ValidationException': 36 | # it doesn't exist, lets create a new one 37 | resp = sm_client.create_model_package_group( 38 | ModelPackageGroupName=args.model_package_group_name, 39 | ModelPackageGroupDescription="Multi account model group", 40 | Tags=[ 41 | {'Key': 'sagemaker:project-name', 'Value': args.sagemaker_project_name}, 42 | {'Key': 'sagemaker:project-id', 'Value': args.sagemaker_project_id}, 43 | ] 44 | ) 45 | model_package_group_arn = resp['ModelPackageGroupArn'] 46 | else: 47 | raise e 48 | staging_ou_id = args.organizational_unit_staging_id 49 | prod_ou_id = args.organizational_unit_prod_id 50 | 51 | # finally, we need to update the model package group policy 52 | # Get the account principals based on staging and prod ids 53 | staging_accounts = [i['Id'] for i in org_client.list_accounts_for_parent(ParentId=staging_ou_id)['Accounts']] 54 | prod_accounts = [i['Id'] for i in org_client.list_accounts_for_parent(ParentId=prod_ou_id)['Accounts']] 55 | # update the policy 56 | sm_client.put_model_package_group_policy( 57 | ModelPackageGroupName=args.model_package_group_name, 58 | ResourcePolicy=json.dumps({ 59 | 'Version': '2012-10-17', 60 | 'Statement': [{ 61 | 'Sid': 'Stmt1527884065456', 62 | 'Effect': 'Allow', 63 | 'Principal': {'AWS': ['arn:aws:iam::%s:root' % i for i in staging_accounts + prod_accounts] }, 64 | 'Action': 'sagemaker:CreateModel', 65 | 'Resource': '%s/*' % model_package_group_arn.replace('model-package-group', 'model-package') 66 | }] 67 | }) 68 | ) 69 | 70 | 71 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/staging-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "Parameters": { 3 | "StageName": "staging-0306", 4 | "EndpointInstanceCount": "1", 5 | "EndpointInstanceType": "ml.m5.large" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/test/test.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import os 5 | 6 | import boto3 7 | from botocore.exceptions import ClientError 8 | 9 | logger = logging.getLogger(__name__) 10 | sm_client = boto3.client("sagemaker") 11 | 12 | 13 | def invoke_endpoint(endpoint_name): 14 | """ 15 | Add custom logic here to invoke the endpoint and validate reponse 16 | """ 17 | return {"endpoint_name": endpoint_name, "success": True} 18 | 19 | 20 | def test_endpoint(endpoint_name): 21 | """ 22 | Describe the endpoint and ensure InSerivce, then invoke endpoint. Raises exception on error. 23 | """ 24 | error_message = None 25 | try: 26 | # Ensure endpoint is in service 27 | response = sm_client.describe_endpoint(EndpointName=endpoint_name) 28 | status = response["EndpointStatus"] 29 | if status != "InService": 30 | error_message = f"SageMaker endpoint: {endpoint_name} status: {status} not InService" 31 | logger.error(error_message) 32 | raise Exception(error_message) 33 | 34 | # Output if endpoint has data capture enbaled 35 | endpoint_config_name = response["EndpointConfigName"] 36 | response = sm_client.describe_endpoint_config(EndpointConfigName=endpoint_config_name) 37 | if "DataCaptureConfig" in response and response["DataCaptureConfig"]["EnableCapture"]: 38 | logger.info(f"data capture enabled for endpoint config {endpoint_config_name}") 39 | 40 | # Call endpoint to handle 41 | return invoke_endpoint(endpoint_name) 42 | except ClientError as e: 43 | error_message = e.response["Error"]["Message"] 44 | logger.error(error_message) 45 | raise Exception(error_message) 46 | 47 | 48 | if __name__ == "__main__": 49 | parser = argparse.ArgumentParser() 50 | parser.add_argument("--log-level", type=str, default=os.environ.get("LOGLEVEL", "INFO").upper()) 51 | parser.add_argument("--import-build-config", type=str, required=True) 52 | parser.add_argument("--export-test-results", type=str, required=True) 53 | args, _ = parser.parse_known_args() 54 | 55 | # Configure logging to output the line number and message 56 | log_format = "%(levelname)s: [%(filename)s:%(lineno)s] %(message)s" 57 | logging.basicConfig(format=log_format, level=args.log_level) 58 | 59 | # Load the build config 60 | with open(args.import_build_config, "r") as f: 61 | config = json.load(f) 62 | 63 | # Get the endpoint name from sagemaker project name 64 | endpoint_name = "{}-{}".format( 65 | config["Parameters"]["SageMakerProjectName"], config["Parameters"]["StageName"] 66 | ) 67 | results = test_endpoint(endpoint_name) 68 | 69 | # Print results and write to file 70 | logger.debug(json.dumps(results, indent=4)) 71 | with open(args.export_test_results, "w") as f: 72 | json.dump(results, f, indent=4) 73 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/test/test_buildspec.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | runtime-versions: 6 | python: 3.8 7 | build: 8 | commands: 9 | # Call the test python code 10 | - python test/test.py --import-build-config $CODEBUILD_SRC_DIR_BuildArtifact/staging-config-export.json --export-test-results ${EXPORT_TEST_RESULTS} 11 | # Show the test results file 12 | - cat ${EXPORT_TEST_RESULTS} 13 | 14 | artifacts: 15 | files: 16 | - ${EXPORT_TEST_RESULTS} 17 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/test/test_buildspec_singleaccount.yml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | 3 | phases: 4 | install: 5 | runtime-versions: 6 | python: 3.8 7 | build: 8 | commands: 9 | # Call the test python code 10 | - python test/test.py --import-build-config $CODEBUILD_SRC_DIR_BuildArtifact/staging-config-export.json --export-test-results ${EXPORT_TEST_RESULTS} 11 | # Show the test results file 12 | - cat ${EXPORT_TEST_RESULTS} 13 | 14 | artifacts: 15 | files: 16 | - ${EXPORT_TEST_RESULTS} 17 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/modeldeploy_pipeline/test/test_singleaccount.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import os 5 | 6 | import boto3 7 | from botocore.exceptions import ClientError 8 | 9 | logger = logging.getLogger(__name__) 10 | sm_client = boto3.client("sagemaker") 11 | 12 | 13 | def invoke_endpoint(endpoint_name): 14 | """ 15 | Add custom logic here to invoke the endpoint and validate reponse 16 | """ 17 | return {"endpoint_name": endpoint_name, "success": True} 18 | 19 | 20 | def test_endpoint(endpoint_name): 21 | """ 22 | Describe the endpoint and ensure InSerivce, then invoke endpoint. Raises exception on error. 23 | """ 24 | error_message = None 25 | try: 26 | # Ensure endpoint is in service 27 | response = sm_client.describe_endpoint(EndpointName=endpoint_name) 28 | status = response["EndpointStatus"] 29 | if status != "InService": 30 | error_message = f"SageMaker endpoint: {endpoint_name} status: {status} not InService" 31 | logger.error(error_message) 32 | raise Exception(error_message) 33 | 34 | # Output if endpoint has data capture enbaled 35 | endpoint_config_name = response["EndpointConfigName"] 36 | response = sm_client.describe_endpoint_config(EndpointConfigName=endpoint_config_name) 37 | if "DataCaptureConfig" in response and response["DataCaptureConfig"]["EnableCapture"]: 38 | logger.info(f"data capture enabled for endpoint config {endpoint_config_name}") 39 | 40 | # Call endpoint to handle 41 | return invoke_endpoint(endpoint_name) 42 | except ClientError as e: 43 | error_message = e.response["Error"]["Message"] 44 | logger.error(error_message) 45 | raise Exception(error_message) 46 | 47 | 48 | if __name__ == "__main__": 49 | parser = argparse.ArgumentParser() 50 | parser.add_argument("--log-level", type=str, default=os.environ.get("LOGLEVEL", "INFO").upper()) 51 | parser.add_argument("--import-build-config", type=str, required=True) 52 | parser.add_argument("--export-test-results", type=str, required=True) 53 | args, _ = parser.parse_known_args() 54 | 55 | # Configure logging to output the line number and message 56 | log_format = "%(levelname)s: [%(filename)s:%(lineno)s] %(message)s" 57 | logging.basicConfig(format=log_format, level=args.log_level) 58 | 59 | # Load the build config 60 | with open(args.import_build_config, "r") as f: 61 | config = json.load(f) 62 | 63 | # Get the endpoint name from sagemaker project name 64 | endpoint_name = "{}-{}".format( 65 | config["Parameters"]["SageMakerProjectName"], config["Parameters"]["StageName"] 66 | ) 67 | results = test_endpoint(endpoint_name) 68 | 69 | # Print results and write to file 70 | logger.debug(json.dumps(results, indent=4)) 71 | with open(args.export_test_results, "w") as f: 72 | json.dump(results, f, indent=4) 73 | -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/terraform/events.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_event_rule" "sm_model_registry_rule" { 2 | name = "sm-model-registry-event-rule" 3 | description = "Capture new model registry" 4 | 5 | event_pattern = <" 2 | env = "dev" 3 | project_name = "" #"aws-ml-11052023" 4 | project_id = "" #"04052023" 5 | region = "us-east-1" 6 | repository_owner = "" 7 | build_repository_name = "modelbuild_pipeline" 8 | deploy_repository_name = "modeldeploy_pipeline" 9 | artifacts_bucket_name = "" # "artifact-ml-11052023" #join("-", [var.project_name, var.project_id, var.env]) 10 | github_token = "" # to pull modelbuild and modeldeploy -------------------------------------------------------------------------------- /samples/mlops-sagemaker-github-codepipeline-codebuild-codedeploy/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "repository_branch" { 2 | description = "Repository branch to connect to" 3 | default = "" 4 | } 5 | variable "env" { 6 | description = "Deployment environment" 7 | default = "dev" 8 | } 9 | variable "project_name" { 10 | description = "Project name" 11 | default = "" 12 | } 13 | variable "project_id" { 14 | description = "Project ID" 15 | default = "" 16 | } 17 | variable "region" { 18 | description = "AWS region" 19 | default = "us-east-1" 20 | } 21 | 22 | variable "repository_owner" { 23 | description = "GitHub repository owner" 24 | default = "" 25 | } 26 | 27 | variable "build_repository_name" { 28 | description = "GitHub repository name" 29 | default = "modelbuild_pipeline" 30 | } 31 | 32 | variable "deploy_repository_name" { 33 | description = "GitHub repository name" 34 | default = "modeldeploy_pipeline" 35 | } 36 | 37 | variable "artifacts_bucket_name" { 38 | description = "S3 Bucket for storing artifacts" 39 | default = "" 40 | } 41 | 42 | variable "github_token" { 43 | description = "GitHub token" 44 | default = "" 45 | } 46 | -------------------------------------------------------------------------------- /samples/s3-cloudfront-static-website/cloudfront.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | s3_origin_id = "s3-my-website2023" 3 | } 4 | 5 | resource "aws_cloudfront_origin_access_identity" "origin_access_identity" { 6 | comment = "s3-my-website2023" 7 | } 8 | 9 | resource "aws_cloudfront_distribution" "s3_distribution" { 10 | origin { 11 | domain_name = aws_s3_bucket.mybucket.bucket_regional_domain_name 12 | origin_id = local.s3_origin_id 13 | 14 | s3_origin_config { 15 | origin_access_identity = aws_cloudfront_origin_access_identity.origin_access_identity.cloudfront_access_identity_path 16 | } 17 | } 18 | 19 | enabled = true 20 | is_ipv6_enabled = true 21 | comment = "my-cloudfront" 22 | default_root_object = "index.html" 23 | 24 | # Configure logging here if required 25 | #logging_config { 26 | # include_cookies = false 27 | # bucket = "mylogs.s3.amazonaws.com" 28 | # prefix = "myprefix" 29 | #} 30 | 31 | # If you have domain configured use it here 32 | #aliases = ["mywebsite.example.com", "s3-static-web-dev.example.com"] 33 | 34 | default_cache_behavior { 35 | allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] 36 | cached_methods = ["GET", "HEAD"] 37 | target_origin_id = local.s3_origin_id 38 | 39 | forwarded_values { 40 | query_string = false 41 | 42 | cookies { 43 | forward = "none" 44 | } 45 | } 46 | 47 | viewer_protocol_policy = "allow-all" 48 | min_ttl = 0 49 | default_ttl = 3600 50 | max_ttl = 86400 51 | } 52 | 53 | # Cache behavior with precedence 0 54 | ordered_cache_behavior { 55 | path_pattern = "/content/immutable/*" 56 | allowed_methods = ["GET", "HEAD", "OPTIONS"] 57 | cached_methods = ["GET", "HEAD", "OPTIONS"] 58 | target_origin_id = local.s3_origin_id 59 | 60 | forwarded_values { 61 | query_string = false 62 | headers = ["Origin"] 63 | 64 | cookies { 65 | forward = "none" 66 | } 67 | } 68 | 69 | min_ttl = 0 70 | default_ttl = 86400 71 | max_ttl = 31536000 72 | compress = true 73 | viewer_protocol_policy = "redirect-to-https" 74 | } 75 | 76 | # Cache behavior with precedence 1 77 | ordered_cache_behavior { 78 | path_pattern = "/content/*" 79 | allowed_methods = ["GET", "HEAD", "OPTIONS"] 80 | cached_methods = ["GET", "HEAD"] 81 | target_origin_id = local.s3_origin_id 82 | 83 | forwarded_values { 84 | query_string = false 85 | 86 | cookies { 87 | forward = "none" 88 | } 89 | } 90 | 91 | min_ttl = 0 92 | default_ttl = 3600 93 | max_ttl = 86400 94 | compress = true 95 | viewer_protocol_policy = "redirect-to-https" 96 | } 97 | 98 | price_class = "PriceClass_200" 99 | 100 | restrictions { 101 | geo_restriction { 102 | restriction_type = "whitelist" 103 | locations = ["US", "CA", "GB", "DE", "IN", "IR"] 104 | } 105 | } 106 | 107 | tags = { 108 | Environment = "development" 109 | Name = "my-tag" 110 | } 111 | 112 | viewer_certificate { 113 | cloudfront_default_certificate = true 114 | } 115 | } 116 | 117 | # to get the Cloud front URL if doamin/alias is not configured 118 | output "cloudfront_domain_name" { 119 | value = aws_cloudfront_distribution.s3_distribution.domain_name 120 | } 121 | -------------------------------------------------------------------------------- /samples/s3-cloudfront-static-website/website/assets/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omerbsezer/Fast-Terraform/5ff4bbac8abd87430a6b9184339071a1c0ec6158/samples/s3-cloudfront-static-website/website/assets/favicon.ico -------------------------------------------------------------------------------- /samples/s3-cloudfront-static-website/website/assets/img/portfolio/cabin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omerbsezer/Fast-Terraform/5ff4bbac8abd87430a6b9184339071a1c0ec6158/samples/s3-cloudfront-static-website/website/assets/img/portfolio/cabin.png -------------------------------------------------------------------------------- /samples/s3-cloudfront-static-website/website/assets/img/portfolio/cake.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omerbsezer/Fast-Terraform/5ff4bbac8abd87430a6b9184339071a1c0ec6158/samples/s3-cloudfront-static-website/website/assets/img/portfolio/cake.png -------------------------------------------------------------------------------- /samples/s3-cloudfront-static-website/website/assets/img/portfolio/circus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omerbsezer/Fast-Terraform/5ff4bbac8abd87430a6b9184339071a1c0ec6158/samples/s3-cloudfront-static-website/website/assets/img/portfolio/circus.png -------------------------------------------------------------------------------- /samples/s3-cloudfront-static-website/website/assets/img/portfolio/game.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omerbsezer/Fast-Terraform/5ff4bbac8abd87430a6b9184339071a1c0ec6158/samples/s3-cloudfront-static-website/website/assets/img/portfolio/game.png -------------------------------------------------------------------------------- /samples/s3-cloudfront-static-website/website/assets/img/portfolio/safe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omerbsezer/Fast-Terraform/5ff4bbac8abd87430a6b9184339071a1c0ec6158/samples/s3-cloudfront-static-website/website/assets/img/portfolio/safe.png -------------------------------------------------------------------------------- /samples/s3-cloudfront-static-website/website/assets/img/portfolio/submarine.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/omerbsezer/Fast-Terraform/5ff4bbac8abd87430a6b9184339071a1c0ec6158/samples/s3-cloudfront-static-website/website/assets/img/portfolio/submarine.png -------------------------------------------------------------------------------- /samples/s3-cloudfront-static-website/website/js/scripts.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * Start Bootstrap - Freelancer v7.0.7 (https://startbootstrap.com/theme/freelancer) 3 | * Copyright 2013-2023 Start Bootstrap 4 | * Licensed under MIT (https://github.com/StartBootstrap/startbootstrap-freelancer/blob/master/LICENSE) 5 | */ 6 | // 7 | // Scripts 8 | // 9 | 10 | window.addEventListener('DOMContentLoaded', event => { 11 | 12 | // Navbar shrink function 13 | var navbarShrink = function () { 14 | const navbarCollapsible = document.body.querySelector('#mainNav'); 15 | if (!navbarCollapsible) { 16 | return; 17 | } 18 | if (window.scrollY === 0) { 19 | navbarCollapsible.classList.remove('navbar-shrink') 20 | } else { 21 | navbarCollapsible.classList.add('navbar-shrink') 22 | } 23 | 24 | }; 25 | 26 | // Shrink the navbar 27 | navbarShrink(); 28 | 29 | // Shrink the navbar when page is scrolled 30 | document.addEventListener('scroll', navbarShrink); 31 | 32 | // Activate Bootstrap scrollspy on the main nav element 33 | const mainNav = document.body.querySelector('#mainNav'); 34 | if (mainNav) { 35 | new bootstrap.ScrollSpy(document.body, { 36 | target: '#mainNav', 37 | rootMargin: '0px 0px -40%', 38 | }); 39 | }; 40 | 41 | // Collapse responsive navbar when toggler is visible 42 | const navbarToggler = document.body.querySelector('.navbar-toggler'); 43 | const responsiveNavItems = [].slice.call( 44 | document.querySelectorAll('#navbarResponsive .nav-link') 45 | ); 46 | responsiveNavItems.map(function (responsiveNavItem) { 47 | responsiveNavItem.addEventListener('click', () => { 48 | if (window.getComputedStyle(navbarToggler).display !== 'none') { 49 | navbarToggler.click(); 50 | } 51 | }); 52 | }); 53 | 54 | }); 55 | --------------------------------------------------------------------------------