├── .checkov.yaml ├── .github └── ISSUE_TEMPLATE │ ├── bug-report.yml │ ├── config.yml │ └── feature-request.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .tflint.hcl ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── application └── ecsdemo-python │ ├── .bandit │ ├── .coveragerc │ ├── .dockerignore │ ├── .gitignore │ ├── .pylintrc │ ├── Dockerfile │ ├── app.py │ ├── requirements-dev.txt │ ├── requirements.txt │ ├── static │ ├── amazon-ecs.png │ └── css │ │ └── style.css │ ├── templates │ └── index.html │ ├── tests │ ├── __init__.py │ └── unit │ │ ├── __init__.py │ │ └── test_ecs_app.py │ └── version.txt ├── docs ├── aquasec.md ├── datadog.md └── github.md ├── images ├── connect-to-github-1.png ├── connect-to-github-2.png ├── first_demo_page.png ├── github_connection.png ├── github_connection2.png ├── spok.png └── successful-deployment-alb.jpg ├── modules ├── aquasec │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── codepipeline │ └── github │ │ ├── codepipeline_python │ │ ├── locals.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── versions.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── s3 │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── variables.tf │ │ └── versions.tf │ │ ├── variables.tf │ │ └── versions.tf └── datadog │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf └── patterns ├── aquasec-sidecar └── Dockerfile └── fargate-cluster ├── aquasec.tf.draft ├── delete_ecr_images.sh ├── main.tf ├── networking.tf ├── outputs.tf ├── terraform.tfvars ├── variables.tf └── versions.tf /.checkov.yaml: -------------------------------------------------------------------------------- 1 | block-list-secret-scan: [] 2 | branch: master 3 | directory: 4 | - . 5 | download-external-modules: true 6 | evaluate-variables: true 7 | external-modules-download-path: .external_modules 8 | framework: 9 | - terraform 10 | mask: [] 11 | output: 12 | - sarif 13 | - junitxml 14 | output-file-path: console,checkov-output/results_junitxml.xml 15 | quiet: true 16 | secrets-history-timeout: 12h 17 | secrets-scan-file-type: [] 18 | summary-position: top 19 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.yml: -------------------------------------------------------------------------------- 1 | name: "🐛 Bug Report" 2 | description: Report a bug 3 | title: "(short issue description)" 4 | labels: [bug, needs-triage] 5 | assignees: [] 6 | body: 7 | - type: textarea 8 | id: description 9 | attributes: 10 | label: Describe the bug 11 | description: What is the problem? A clear and concise description of the bug. 12 | validations: 13 | required: true 14 | - type: textarea 15 | id: expected 16 | attributes: 17 | label: Expected Behavior 18 | description: | 19 | What did you expect to happen? 20 | validations: 21 | required: true 22 | - type: textarea 23 | id: current 24 | attributes: 25 | label: Current Behavior 26 | description: | 27 | What actually happened? 28 | 29 | Please include full errors, uncaught exceptions, stack traces, and relevant logs. 30 | If service responses are relevant, please include wire logs. 31 | validations: 32 | required: true 33 | - type: textarea 34 | id: reproduction 35 | attributes: 36 | label: Reproduction Steps 37 | description: | 38 | Provide a self-contained, concise snippet of code that can be used to reproduce the issue. 39 | For more complex issues provide a repo with the smallest sample that reproduces the bug. 40 | validations: 41 | required: true 42 | - type: textarea 43 | id: solution 44 | attributes: 45 | label: Possible Solution 46 | description: | 47 | Suggest a fix/reason for the bug 48 | validations: 49 | required: false 50 | - type: textarea 51 | id: context 52 | attributes: 53 | label: Additional Information/Context 54 | description: | 55 | Anything else that might be relevant for troubleshooting this bug. Providing context helps us come up with a solution that is most useful in the real world. 56 | validations: 57 | required: false 58 | 59 | - type: input 60 | id: terraform-version 61 | attributes: 62 | label: Terraform Version 63 | description: Output of `terraform version` 64 | validations: 65 | required: true 66 | 67 | - type: input 68 | id: operating-system 69 | attributes: 70 | label: OS 71 | validations: 72 | required: true 73 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | blank_issues_enabled: false 3 | contact_links: 4 | - name: 💬 General Question 5 | url: https://github.com/aws-samples/fargate-serverless-platform-operator-kit/discussions/categories/q-a 6 | about: Please ask and answer questions as a discussion thread. 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🚀 Feature Request 3 | description: Suggest an idea for this project 4 | title: "(short issue description)" 5 | labels: [feature-request, needs-triage] 6 | assignees: [] 7 | body: 8 | - type: textarea 9 | id: description 10 | attributes: 11 | label: Describe the feature 12 | description: A clear and concise description of the feature you are proposing. 13 | validations: 14 | required: true 15 | - type: textarea 16 | id: use-case 17 | attributes: 18 | label: Use Case 19 | description: | 20 | Why do you need this feature? For example: "I'm always frustrated when..." 21 | validations: 22 | required: true 23 | - type: textarea 24 | id: solution 25 | attributes: 26 | label: Proposed Solution 27 | description: | 28 | Suggest how to implement the addition or change. Please include prototype/workaround/sketch/reference implementation. 29 | validations: 30 | required: false 31 | - type: textarea 32 | id: other 33 | attributes: 34 | label: Other Information 35 | description: | 36 | Any alternative solutions or features you considered, a more detailed explanation, stack traces, related issues, links for context, etc. 37 | validations: 38 | required: false 39 | - type: checkboxes 40 | id: ack 41 | attributes: 42 | label: Acknowledgements 43 | options: 44 | - label: I may be able to implement this feature request 45 | required: false 46 | - label: This feature might incur a breaking change 47 | required: false 48 | 49 | - type: input 50 | id: terraform-version 51 | attributes: 52 | label: Terraform Version 53 | description: Output of `terraform version` 54 | validations: 55 | required: true 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by https://www.toptal.com/developers/gitignore/api/macos,windows 2 | # Edit at https://www.toptal.com/developers/gitignore?templates=macos,windows 3 | 4 | ### macOS ### 5 | # General 6 | .DS_Store 7 | .AppleDouble 8 | .LSOverride 9 | 10 | # Icon must end with two \r 11 | Icon 12 | 13 | # Thumbnails 14 | ._* 15 | 16 | # Files that might appear in the root of a volume 17 | .DocumentRevisions-V100 18 | .fseventsd 19 | .Spotlight-V100 20 | .TemporaryItems 21 | .Trashes 22 | .VolumeIcon.icns 23 | .com.apple.timemachine.donotpresent 24 | 25 | # Directories potentially created on remote AFP share 26 | .AppleDB 27 | .AppleDesktop 28 | Network Trash Folder 29 | Temporary Items 30 | .apdisk 31 | 32 | ### macOS Patch ### 33 | # iCloud generated files 34 | *.icloud 35 | 36 | ### Windows ### 37 | # Windows thumbnail cache files 38 | Thumbs.db 39 | Thumbs.db:encryptable 40 | ehthumbs.db 41 | ehthumbs_vista.db 42 | 43 | # Dump file 44 | *.stackdump 45 | 46 | # Folder config file 47 | [Dd]esktop.ini 48 | 49 | # Recycle Bin used on file shares 50 | $RECYCLE.BIN/ 51 | 52 | # Windows Installer files 53 | *.cab 54 | *.msi 55 | *.msix 56 | *.msm 57 | *.msp 58 | 59 | # Windows shortcuts 60 | *.lnk 61 | 62 | # End of https://www.toptal.com/developers/gitignore/api/macos,windows 63 | 64 | plan.out 65 | plan.out.json 66 | # Local .terraform directories 67 | .terraform/ 68 | # .tfstate files 69 | *.tfstate 70 | *.tfstate.* 71 | .terraform* 72 | # Crash log files 73 | crash.log 74 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as 75 | # password, private keys, and other secrets. These should not be part of version 76 | # control as they are data points which are potentially sensitive and subject 77 | # to change depending on the environment. 78 | # 79 | # Ignore override files as they are usually used to override resources locally and so 80 | # are not checked in 81 | override.tf 82 | override.tf.json 83 | *_override.tf 84 | *_override.tf.json 85 | *.tfvars 86 | # Include override files you do wish to add to version control using negated pattern 87 | # 88 | # !example_override.tf 89 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 90 | # example: *tfplan* 91 | # Ignore CLI configuration files 92 | .terraformrc 93 | terraform.rc 94 | .terraform.lock.hcl 95 | go.mod 96 | go.sum 97 | # Ignore autogenerated hooks files 98 | checkov-output/ 99 | .external_modules/ 100 | results.sarif 101 | 102 | #Python virtualenv 103 | .env 104 | .venv 105 | 106 | #Other files 107 | .DS_Store 108 | .hypothesis 109 | 110 | !terraform.tfvars 111 | microenforcer 112 | backend.tf -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.4.0 4 | hooks: 5 | - id: check-yaml 6 | args: [--allow-multiple-documents] 7 | - id: check-json 8 | - id: trailing-whitespace 9 | args: ["--markdown-linebreak-ext=md"] 10 | - id: check-added-large-files 11 | - id: check-executables-have-shebangs 12 | - id: check-shebang-scripts-are-executable 13 | - id: check-merge-conflict 14 | - id: check-vcs-permalinks 15 | - id: detect-private-key 16 | - id: detect-aws-credentials 17 | args: ["--allow-missing-credentials"] 18 | - id: end-of-file-fixer 19 | - id: no-commit-to-branch 20 | - id: pretty-format-json 21 | args: 22 | - --autofix 23 | - repo: https://github.com/zricethezav/gitleaks 24 | rev: v8.16.3 25 | hooks: 26 | - id: gitleaks 27 | - repo: https://github.com/bridgecrewio/checkov.git 28 | rev: 2.3.247 29 | hooks: 30 | - id: checkov 31 | args: 32 | - --config-file 33 | - .checkov.yaml 34 | - repo: https://github.com/antonbabenko/pre-commit-terraform 35 | rev: v1.79.1 36 | hooks: 37 | - id: terraform_fmt 38 | - id: terraform_validate 39 | args: 40 | - --hook-config=--retry-once-with-cleanup=true 41 | - id: terraform_docs 42 | args: 43 | - --args=--config=.terraform-docs.yaml 44 | - id: terraform_tflint 45 | args: 46 | - --args=--config=__GIT_WORKING_DIR__/.tflint.hcl 47 | -------------------------------------------------------------------------------- /.tflint.hcl: -------------------------------------------------------------------------------- 1 | config { 2 | format = "compact" 3 | plugin_dir = "~/.tflint.d/plugins" 4 | 5 | call_module_type = "none" 6 | force = false 7 | disabled_by_default = true 8 | } 9 | 10 | plugin "aws" { 11 | enabled = true 12 | version = "0.23.0" 13 | source = "github.com/terraform-linters/tflint-ruleset-aws" 14 | } 15 | 16 | rule "terraform_deprecated_interpolation" { 17 | enabled = true 18 | } 19 | 20 | rule "terraform_deprecated_index" { 21 | enabled = true 22 | } 23 | 24 | rule "terraform_unused_declarations" { 25 | enabled = true 26 | } 27 | 28 | rule "terraform_comment_syntax" { 29 | enabled = true 30 | } 31 | 32 | rule "terraform_documented_outputs" { 33 | enabled = true 34 | } 35 | 36 | rule "terraform_documented_variables" { 37 | enabled = true 38 | } 39 | 40 | rule "terraform_typed_variables" { 41 | enabled = true 42 | } 43 | 44 | rule "terraform_module_pinned_source" { 45 | enabled = true 46 | } 47 | 48 | rule "terraform_naming_convention" { 49 | enabled = true 50 | } 51 | 52 | rule "terraform_required_version" { 53 | enabled = true 54 | } 55 | 56 | rule "terraform_required_providers" { 57 | enabled = true 58 | } 59 | 60 | rule "terraform_standard_module_structure" { 61 | enabled = true 62 | } 63 | 64 | rule "terraform_workspace_remote" { 65 | enabled = true 66 | } 67 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Serverless Platform Operator Kit (SPOK) 2 | 3 | AWS SPOK, known as an opinionated solution bundle, offers a robust technical stack that emphasizes best practices. What this means for you is that launching production-level workloads on ECS can now be done much quicker. But it's not just about ECS and Fargate. SPOK also incorporates essential tools such as Terraform, GitHub, Amazon CloudWatch, and AWS Secrets Manager for establishing end-to-end solution. These services are the go-to choices for many customers already using ECS/Fargate, ensuring you're working with the tools that have proven their worth. Importantly, this bundle is designed to simplify your workflow, letting you focus more on what you do best. 4 | 5 | 6 | ## SPOK V1 7 | 8 | ![Foundation Module v1](images/spok.png) 9 | 10 | ## Getting started 11 | 12 | To make it easy for you to get started with SPOK(Serverless Platform Operator Kit), here's a list of next steps. We are going to provision ECS Cluster with AWS Fargate, serverless compute engine for containers. 13 | 14 | ### Deploy Your Pattern 15 | 16 | Go to your pattern and run the following command: 17 | 18 | ```shell 19 | terraform init 20 | terraform validate 21 | terraform plan #Here check the plan that Terraform outputs in case you want to change something. 22 | terraform apply --auto-approve 23 | ``` 24 | 25 | --- 26 | 27 | ### Choose your integration available on **terraform.tfvars** 28 | 29 | Please refer following readme file for integration with third party patterns. 30 | 31 | * [Datadog](./docs/datadog.md) 32 | * [AquaSec](./docs/aquasec.md) 33 | * [Github Pipeline](./docs/github.md) 34 | 35 | ### Outputs 36 | After the execution of the Terraform code you will get an output with needed IDs and values needed as input for the following *terraform apply*. You can use this infrastructure to run other examples, all you need is the `cluster_name`. 37 | 38 | ### Check Deployment Status 39 | On the ECS Console you will find the **ecs-core** cluster, in it there will be a service called **ecsdemo-backend**, this service has an Application Load Balancer associated, you can execute the command **terraform output** on the **ecs-service** path and it will show the URL of the ALB, once you open that URL in a web browser it will show the following sample page: 40 | 41 | ![Successful First Deployment](images/first_demo_page.png) 42 | 43 | If you keep refreshing the page it will show a different IP address and Availability Zone to demonstrate the Load Balancing. 44 | 45 | ### Cleanup 46 | Run the following command if you want to delete all the resources created before. If you have created other blueprints and they use these infrastructure then destroy those blueprint resources first. 47 | 48 | Go to your pattern and run the following command: 49 | ```shell 50 | terraform destroy 51 | ``` 52 | 53 | Is possible that the deletion of the ECR Repository fails because there are images left on the repository, hence please delete manually the images and execute the command again. 54 | 55 | ## Best Practices 56 | 57 | Terraform uses persisted state data to keep track of the resources it manages. You can either integrate with HCP Terraform to store state data or define a backend block to store state in a remote object. This lets multiple people access the state data and work together on that collection of infrastructure resources. 58 | 59 | * Dynamodb Table (A single DynamoDB table is used to lock multiple remote state files) 60 | * S3 Bucket (Stores the state as a given key in a given bucket) 61 | 62 | In case you don't have it, use this CLI to create the DynamoDB table it: 63 | 64 | ```shell 65 | aws dynamodb create-table --table-name TerraformLockStates \ 66 | --attribute-definitions AttributeName=LockID,AttributeType=S \ 67 | --key-schema AttributeName=LockID,KeyType=HASH \ 68 | --billing-mode PAY_PER_REQUEST --region us-east-1 69 | ``` 70 | 71 | That CLI will create a DynamoDB table on us-east-1 region with name TerraformLockStates and pay per request billing mode, the table will have a Hash Key called LockID 72 | 73 | Use this CLI to create the S3 bucket: 74 | 75 | ```shell 76 | account_id=$(aws sts get-caller-identity --query "Account" --output text) && \ 77 | aws s3api create-bucket --bucket ${account_id}-terraform-states \ 78 | --region us-east-1 --output text --no-cli-pager && aws s3api put-bucket-versioning \ 79 | --bucket ${account_id}-terraform-states --versioning-configuration Status=Enabled 80 | ``` 81 | 82 | That CLI will crete a private S3 Bucket with name AccountID-terraform-states with versioning enabled by default. 83 | 84 | ### Configure Backend 85 | 86 | Go to your pattern and create the following file: 87 | 88 | * **backend.tf** 89 | 90 | Replace the **bucket name** and **dynamodb_table**, if your region is different than us-east-1 also change **region** 91 | 92 | ``` 93 | terraform { 94 | backend "s3" { 95 | bucket = "${account_id}-terraform-states" <--- here 96 | ... 97 | region = "us-east-1" <--- here if your region is different than us-east-1 98 | dynamodb_table = "TerraformLockStates" <--- here 99 | } 100 | } 101 | ``` 102 | 103 | After configuration, please run the following command: 104 | 105 | ```shell 106 | terraform init 107 | ``` 108 | 109 | ## Troubleshooting 110 | 111 | ### Error: Error acquiring the state lock 112 | 113 | Error message: 2 errors occurred: 114 | * ResourceNotFoundException: Requested resource not found 115 | * ResourceNotFoundException: Requested resource not found 116 | 117 | #### Solution 118 | 119 | Create DynamoDB Table on **backend.tf** to store state lock (terraform.backend.dynamodb_table). 120 | -------------------------------------------------------------------------------- /application/ecsdemo-python/.bandit: -------------------------------------------------------------------------------- 1 | [bandit] 2 | exclude = ./static,./templates,./.env,./tests -------------------------------------------------------------------------------- /application/ecsdemo-python/.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | app.py 4 | .venv/* 5 | __init__.py 6 | .env/* 7 | source = 8 | . 9 | [report] 10 | include_namespace_packages = True -------------------------------------------------------------------------------- /application/ecsdemo-python/.dockerignore: -------------------------------------------------------------------------------- 1 | .bandit 2 | tests/* 3 | 4 | *.swp 5 | package-lock.json 6 | .pytest_cache 7 | *.egg-info 8 | */dist 9 | */build 10 | # Byte-compiled / optimized / DLL files 11 | __pycache__/ 12 | *.py[cod] 13 | *$py.class 14 | 15 | # Environments 16 | .env 17 | .venv 18 | env/ 19 | venv/ 20 | ENV/ 21 | env.bak/ 22 | venv.bak/ 23 | 24 | # Coverage 25 | .coverage 26 | reports/ -------------------------------------------------------------------------------- /application/ecsdemo-python/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | __pycache__ 3 | .pytest_cache 4 | .venv -------------------------------------------------------------------------------- /application/ecsdemo-python/.pylintrc: -------------------------------------------------------------------------------- 1 | [MAIN] 2 | 3 | # Analyse import fallback blocks. This can be used to support both Python 2 and 4 | # 3 compatible code, which means that the block might have code that exists 5 | # only in one or another interpreter, leading to false positives when analysed. 6 | analyse-fallback-blocks=no 7 | 8 | # Load and enable all available extensions. Use --list-extensions to see a list 9 | # all available extensions. 10 | #enable-all-extensions= 11 | 12 | # In error mode, messages with a category besides ERROR or FATAL are 13 | # suppressed, and no reports are done by default. Error mode is compatible with 14 | # disabling specific errors. 15 | #errors-only= 16 | 17 | # Always return a 0 (non-error) status code, even if lint errors are found. 18 | # This is primarily useful in continuous integration scripts. 19 | #exit-zero= 20 | 21 | # A comma-separated list of package or module names from where C extensions may 22 | # be loaded. Extensions are loading into the active Python interpreter and may 23 | # run arbitrary code. 24 | extension-pkg-allow-list= 25 | 26 | # A comma-separated list of package or module names from where C extensions may 27 | # be loaded. Extensions are loading into the active Python interpreter and may 28 | # run arbitrary code. (This is an alternative name to extension-pkg-allow-list 29 | # for backward compatibility.) 30 | extension-pkg-whitelist= 31 | 32 | # Return non-zero exit code if any of these messages/categories are detected, 33 | # even if score is above --fail-under value. Syntax same as enable. Messages 34 | # specified are enabled, while categories only check already-enabled messages. 35 | fail-on= 36 | 37 | # Specify a score threshold under which the program will exit with error. 38 | fail-under=10 39 | 40 | # Interpret the stdin as a python script, whose filename needs to be passed as 41 | # the module_or_package argument. 42 | #from-stdin= 43 | 44 | # Files or directories to be skipped. They should be base names, not paths. 45 | ignore=CVS 46 | 47 | # Add files or directories matching the regular expressions patterns to the 48 | # ignore-list. The regex matches against paths and can be in Posix or Windows 49 | # format. Because '\' represents the directory delimiter on Windows systems, it 50 | # can't be used as an escape character. 51 | ignore-paths= 52 | 53 | # Files or directories matching the regular expression patterns are skipped. 54 | # The regex matches against base names, not paths. The default value ignores 55 | # Emacs file locks 56 | ignore-patterns=^\.# 57 | 58 | # List of module names for which member attributes should not be checked 59 | # (useful for modules/projects where namespaces are manipulated during runtime 60 | # and thus existing member attributes cannot be deduced by static analysis). It 61 | # supports qualified module names, as well as Unix pattern matching. 62 | ignored-modules= 63 | 64 | # Python code to execute, usually for sys.path manipulation such as 65 | # pygtk.require(). 66 | #init-hook= 67 | 68 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the 69 | # number of processors available to use, and will cap the count on Windows to 70 | # avoid hangs. 71 | jobs=1 72 | 73 | # Control the amount of potential inferred values when inferring a single 74 | # object. This can help the performance when dealing with large functions or 75 | # complex, nested conditions. 76 | limit-inference-results=100 77 | 78 | # List of plugins (as comma separated values of python module names) to load, 79 | # usually to register additional checkers. 80 | load-plugins= 81 | 82 | # Pickle collected data for later comparisons. 83 | persistent=yes 84 | 85 | # Minimum Python version to use for version dependent checks. Will default to 86 | # the version used to run pylint. 87 | py-version=3.10 88 | 89 | # Discover python modules and packages in the file system subtree. 90 | recursive=no 91 | 92 | # When enabled, pylint would attempt to guess common misconfiguration and emit 93 | # user-friendly hints instead of false-positive error messages. 94 | suggestion-mode=yes 95 | 96 | # Allow loading of arbitrary C extensions. Extensions are imported into the 97 | # active Python interpreter and may run arbitrary code. 98 | unsafe-load-any-extension=no 99 | 100 | # In verbose mode, extra non-checker-related info will be displayed. 101 | #verbose= 102 | 103 | 104 | [BASIC] 105 | 106 | # Naming style matching correct argument names. 107 | argument-naming-style=snake_case 108 | 109 | # Regular expression matching correct argument names. Overrides argument- 110 | # naming-style. If left empty, argument names will be checked with the set 111 | # naming style. 112 | #argument-rgx= 113 | 114 | # Naming style matching correct attribute names. 115 | attr-naming-style=snake_case 116 | 117 | # Regular expression matching correct attribute names. Overrides attr-naming- 118 | # style. If left empty, attribute names will be checked with the set naming 119 | # style. 120 | #attr-rgx= 121 | 122 | # Bad variable names which should always be refused, separated by a comma. 123 | bad-names=foo, 124 | bar, 125 | baz, 126 | toto, 127 | tutu, 128 | tata 129 | 130 | # Bad variable names regexes, separated by a comma. If names match any regex, 131 | # they will always be refused 132 | bad-names-rgxs= 133 | 134 | # Naming style matching correct class attribute names. 135 | class-attribute-naming-style=any 136 | 137 | # Regular expression matching correct class attribute names. Overrides class- 138 | # attribute-naming-style. If left empty, class attribute names will be checked 139 | # with the set naming style. 140 | #class-attribute-rgx= 141 | 142 | # Naming style matching correct class constant names. 143 | class-const-naming-style=UPPER_CASE 144 | 145 | # Regular expression matching correct class constant names. Overrides class- 146 | # const-naming-style. If left empty, class constant names will be checked with 147 | # the set naming style. 148 | #class-const-rgx= 149 | 150 | # Naming style matching correct class names. 151 | class-naming-style=PascalCase 152 | 153 | # Regular expression matching correct class names. Overrides class-naming- 154 | # style. If left empty, class names will be checked with the set naming style. 155 | #class-rgx= 156 | 157 | # Naming style matching correct constant names. 158 | const-naming-style=UPPER_CASE 159 | 160 | # Regular expression matching correct constant names. Overrides const-naming- 161 | # style. If left empty, constant names will be checked with the set naming 162 | # style. 163 | #const-rgx= 164 | 165 | # Minimum line length for functions/classes that require docstrings, shorter 166 | # ones are exempt. 167 | docstring-min-length=-1 168 | 169 | # Naming style matching correct function names. 170 | function-naming-style=snake_case 171 | 172 | # Regular expression matching correct function names. Overrides function- 173 | # naming-style. If left empty, function names will be checked with the set 174 | # naming style. 175 | #function-rgx= 176 | 177 | # Good variable names which should always be accepted, separated by a comma. 178 | good-names=i, 179 | j, 180 | k, 181 | ex, 182 | Run, 183 | _ 184 | 185 | # Good variable names regexes, separated by a comma. If names match any regex, 186 | # they will always be accepted 187 | good-names-rgxs= 188 | 189 | # Include a hint for the correct naming format with invalid-name. 190 | include-naming-hint=no 191 | 192 | # Naming style matching correct inline iteration names. 193 | inlinevar-naming-style=any 194 | 195 | # Regular expression matching correct inline iteration names. Overrides 196 | # inlinevar-naming-style. If left empty, inline iteration names will be checked 197 | # with the set naming style. 198 | #inlinevar-rgx= 199 | 200 | # Naming style matching correct method names. 201 | method-naming-style=snake_case 202 | 203 | # Regular expression matching correct method names. Overrides method-naming- 204 | # style. If left empty, method names will be checked with the set naming style. 205 | #method-rgx= 206 | 207 | # Naming style matching correct module names. 208 | module-naming-style=snake_case 209 | 210 | # Regular expression matching correct module names. Overrides module-naming- 211 | # style. If left empty, module names will be checked with the set naming style. 212 | #module-rgx= 213 | 214 | # Colon-delimited sets of names that determine each other's naming style when 215 | # the name regexes allow several styles. 216 | name-group= 217 | 218 | # Regular expression which should only match function or class names that do 219 | # not require a docstring. 220 | no-docstring-rgx=^_ 221 | 222 | # List of decorators that produce properties, such as abc.abstractproperty. Add 223 | # to this list to register other decorators that produce valid properties. 224 | # These decorators are taken in consideration only for invalid-name. 225 | property-classes=abc.abstractproperty 226 | 227 | # Regular expression matching correct type variable names. If left empty, type 228 | # variable names will be checked with the set naming style. 229 | #typevar-rgx= 230 | 231 | # Naming style matching correct variable names. 232 | variable-naming-style=snake_case 233 | 234 | # Regular expression matching correct variable names. Overrides variable- 235 | # naming-style. If left empty, variable names will be checked with the set 236 | # naming style. 237 | #variable-rgx= 238 | 239 | 240 | [CLASSES] 241 | 242 | # Warn about protected attribute access inside special methods 243 | check-protected-access-in-special-methods=no 244 | 245 | # List of method names used to declare (i.e. assign) instance attributes. 246 | defining-attr-methods=__init__, 247 | __new__, 248 | setUp, 249 | __post_init__ 250 | 251 | # List of member names, which should be excluded from the protected access 252 | # warning. 253 | exclude-protected=_asdict, 254 | _fields, 255 | _replace, 256 | _source, 257 | _make 258 | 259 | # List of valid names for the first argument in a class method. 260 | valid-classmethod-first-arg=cls 261 | 262 | # List of valid names for the first argument in a metaclass class method. 263 | valid-metaclass-classmethod-first-arg=cls 264 | 265 | 266 | [DESIGN] 267 | 268 | # List of regular expressions of class ancestor names to ignore when counting 269 | # public methods (see R0903) 270 | exclude-too-few-public-methods= 271 | 272 | # List of qualified class names to ignore when counting class parents (see 273 | # R0901) 274 | ignored-parents= 275 | 276 | # Maximum number of arguments for function / method. 277 | max-args=5 278 | 279 | # Maximum number of attributes for a class (see R0902). 280 | max-attributes=7 281 | 282 | # Maximum number of boolean expressions in an if statement (see R0916). 283 | max-bool-expr=5 284 | 285 | # Maximum number of branch for function / method body. 286 | max-branches=12 287 | 288 | # Maximum number of locals for function / method body. 289 | max-locals=15 290 | 291 | # Maximum number of parents for a class (see R0901). 292 | max-parents=7 293 | 294 | # Maximum number of public methods for a class (see R0904). 295 | max-public-methods=20 296 | 297 | # Maximum number of return / yield for function / method body. 298 | max-returns=6 299 | 300 | # Maximum number of statements in function / method body. 301 | max-statements=50 302 | 303 | # Minimum number of public methods for a class (see R0903). 304 | min-public-methods=2 305 | 306 | 307 | [EXCEPTIONS] 308 | 309 | # Exceptions that will emit a warning when caught. 310 | overgeneral-exceptions=builtins.BaseException, 311 | builtins.Exception 312 | 313 | 314 | [FORMAT] 315 | 316 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 317 | expected-line-ending-format= 318 | 319 | # Regexp for a line that is allowed to be longer than the limit. 320 | ignore-long-lines=^\s*(# )??$ 321 | 322 | # Number of spaces of indent required inside a hanging or continued line. 323 | indent-after-paren=4 324 | 325 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 326 | # tab). 327 | indent-string=' ' 328 | 329 | # Maximum number of characters on a single line. 330 | max-line-length=826 331 | 332 | # Maximum number of lines in a module. 333 | max-module-lines=1000 334 | 335 | # Allow the body of a class to be on the same line as the declaration if body 336 | # contains single statement. 337 | single-line-class-stmt=no 338 | 339 | # Allow the body of an if to be on the same line as the test if there is no 340 | # else. 341 | single-line-if-stmt=no 342 | 343 | 344 | [IMPORTS] 345 | 346 | # List of modules that can be imported at any level, not just the top level 347 | # one. 348 | allow-any-import-level= 349 | 350 | # Allow wildcard imports from modules that define __all__. 351 | allow-wildcard-with-all=no 352 | 353 | # Deprecated modules which should not be used, separated by a comma. 354 | deprecated-modules= 355 | 356 | # Output a graph (.gv or any supported image format) of external dependencies 357 | # to the given file (report RP0402 must not be disabled). 358 | ext-import-graph= 359 | 360 | # Output a graph (.gv or any supported image format) of all (i.e. internal and 361 | # external) dependencies to the given file (report RP0402 must not be 362 | # disabled). 363 | import-graph= 364 | 365 | # Output a graph (.gv or any supported image format) of internal dependencies 366 | # to the given file (report RP0402 must not be disabled). 367 | int-import-graph= 368 | 369 | # Force import order to recognize a module as part of the standard 370 | # compatibility libraries. 371 | known-standard-library= 372 | 373 | # Force import order to recognize a module as part of a third party library. 374 | known-third-party=enchant 375 | 376 | # Couples of modules and preferred modules, separated by a comma. 377 | preferred-modules= 378 | 379 | 380 | [LOGGING] 381 | 382 | # The type of string formatting that logging methods do. `old` means using % 383 | # formatting, `new` is for `{}` formatting. 384 | logging-format-style=old 385 | 386 | # Logging modules to check that the string format arguments are in logging 387 | # function parameter format. 388 | logging-modules=logging 389 | 390 | 391 | [MESSAGES CONTROL] 392 | 393 | # Only show warnings with the listed confidence levels. Leave empty to show 394 | # all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, 395 | # UNDEFINED. 396 | confidence=HIGH, 397 | CONTROL_FLOW, 398 | INFERENCE, 399 | INFERENCE_FAILURE, 400 | UNDEFINED 401 | 402 | # Disable the message, report, category or checker with the given id(s). You 403 | # can either give multiple identifiers separated by comma (,) or put this 404 | # option multiple times (only on the command line, not in the configuration 405 | # file where it should appear only once). You can also use "--disable=all" to 406 | # disable everything first and then re-enable specific checks. For example, if 407 | # you want to run only the similarities checker, you can use "--disable=all 408 | # --enable=similarities". If you want to run only the classes checker, but have 409 | # no Warning level messages displayed, use "--disable=all --enable=classes 410 | # --disable=W". 411 | disable=raw-checker-failed, 412 | bad-inline-option, 413 | locally-disabled, 414 | file-ignored, 415 | suppressed-message, 416 | useless-suppression, 417 | deprecated-pragma, 418 | use-symbolic-message-instead, 419 | C0116, 420 | C0114, 421 | C0304 422 | 423 | # Enable the message, report, category or checker with the given id(s). You can 424 | # either give multiple identifier separated by comma (,) or put this option 425 | # multiple time (only on the command line, not in the configuration file where 426 | # it should appear only once). See also the "--disable" option for examples. 427 | enable=c-extension-no-member 428 | 429 | 430 | [METHOD_ARGS] 431 | 432 | # List of qualified names (i.e., library.method) which require a timeout 433 | # parameter e.g. 'requests.api.get,requests.api.post' 434 | timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request 435 | 436 | 437 | [MISCELLANEOUS] 438 | 439 | # List of note tags to take in consideration, separated by a comma. 440 | notes=FIXME, 441 | XXX, 442 | TODO 443 | 444 | # Regular expression of note tags to take in consideration. 445 | notes-rgx= 446 | 447 | 448 | [REFACTORING] 449 | 450 | # Maximum number of nested blocks for function / method body 451 | max-nested-blocks=5 452 | 453 | # Complete name of functions that never returns. When checking for 454 | # inconsistent-return-statements if a never returning function is called then 455 | # it will be considered as an explicit return statement and no message will be 456 | # printed. 457 | never-returning-functions=sys.exit,argparse.parse_error 458 | 459 | 460 | [REPORTS] 461 | 462 | # Python expression which should return a score less than or equal to 10. You 463 | # have access to the variables 'fatal', 'error', 'warning', 'refactor', 464 | # 'convention', and 'info' which contain the number of messages in each 465 | # category, as well as 'statement' which is the total number of statements 466 | # analyzed. This score is used by the global evaluation report (RP0004). 467 | evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) 468 | 469 | # Template used to display messages. This is a python new-style format string 470 | # used to format the message information. See doc for all details. 471 | msg-template= 472 | 473 | # Set the output format. Available formats are text, parseable, colorized, json 474 | # and msvs (visual studio). You can also give a reporter class, e.g. 475 | # mypackage.mymodule.MyReporterClass. 476 | #output-format= 477 | 478 | # Tells whether to display a full report or only the messages. 479 | reports=no 480 | 481 | # Activate the evaluation score. 482 | score=yes 483 | 484 | 485 | [SIMILARITIES] 486 | 487 | # Comments are removed from the similarity computation 488 | ignore-comments=yes 489 | 490 | # Docstrings are removed from the similarity computation 491 | ignore-docstrings=yes 492 | 493 | # Imports are removed from the similarity computation 494 | ignore-imports=yes 495 | 496 | # Signatures are removed from the similarity computation 497 | ignore-signatures=yes 498 | 499 | # Minimum lines number of a similarity. 500 | min-similarity-lines=4 501 | 502 | 503 | [SPELLING] 504 | 505 | # Limits count of emitted suggestions for spelling mistakes. 506 | max-spelling-suggestions=4 507 | 508 | # Spelling dictionary name. Available dictionaries: none. To make it work, 509 | # install the 'python-enchant' package. 510 | spelling-dict= 511 | 512 | # List of comma separated words that should be considered directives if they 513 | # appear at the beginning of a comment and should not be checked. 514 | spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: 515 | 516 | # List of comma separated words that should not be checked. 517 | spelling-ignore-words= 518 | 519 | # A path to a file that contains the private dictionary; one word per line. 520 | spelling-private-dict-file= 521 | 522 | # Tells whether to store unknown words to the private dictionary (see the 523 | # --spelling-private-dict-file option) instead of raising a message. 524 | spelling-store-unknown-words=no 525 | 526 | 527 | [STRING] 528 | 529 | # This flag controls whether inconsistent-quotes generates a warning when the 530 | # character used as a quote delimiter is used inconsistently within a module. 531 | check-quote-consistency=no 532 | 533 | # This flag controls whether the implicit-str-concat should generate a warning 534 | # on implicit string concatenation in sequences defined over several lines. 535 | check-str-concat-over-line-jumps=no 536 | 537 | 538 | [TYPECHECK] 539 | 540 | # List of decorators that produce context managers, such as 541 | # contextlib.contextmanager. Add to this list to register other decorators that 542 | # produce valid context managers. 543 | contextmanager-decorators=contextlib.contextmanager 544 | 545 | # List of members which are set dynamically and missed by pylint inference 546 | # system, and so shouldn't trigger E1101 when accessed. Python regular 547 | # expressions are accepted. 548 | generated-members= 549 | 550 | # Tells whether to warn about missing members when the owner of the attribute 551 | # is inferred to be None. 552 | ignore-none=yes 553 | 554 | # This flag controls whether pylint should warn about no-member and similar 555 | # checks whenever an opaque object is returned when inferring. The inference 556 | # can return multiple potential results while evaluating a Python object, but 557 | # some branches might not be evaluated, which results in partial inference. In 558 | # that case, it might be useful to still emit no-member and other checks for 559 | # the rest of the inferred objects. 560 | ignore-on-opaque-inference=yes 561 | 562 | # List of symbolic message names to ignore for Mixin members. 563 | ignored-checks-for-mixins=no-member, 564 | not-async-context-manager, 565 | not-context-manager, 566 | attribute-defined-outside-init 567 | 568 | # List of class names for which member attributes should not be checked (useful 569 | # for classes with dynamically set attributes). This supports the use of 570 | # qualified names. 571 | ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace 572 | 573 | # Show a hint with possible names when a member name was not found. The aspect 574 | # of finding the hint is based on edit distance. 575 | missing-member-hint=yes 576 | 577 | # The minimum edit distance a name should have in order to be considered a 578 | # similar match for a missing member name. 579 | missing-member-hint-distance=1 580 | 581 | # The total number of similar names that should be taken in consideration when 582 | # showing a hint for a missing member. 583 | missing-member-max-choices=1 584 | 585 | # Regex pattern to define which classes are considered mixins. 586 | mixin-class-rgx=.*[Mm]ixin 587 | 588 | # List of decorators that change the signature of a decorated function. 589 | signature-mutators= 590 | 591 | 592 | [VARIABLES] 593 | 594 | # List of additional names supposed to be defined in builtins. Remember that 595 | # you should avoid defining new builtins when possible. 596 | additional-builtins= 597 | 598 | # Tells whether unused global variables should be treated as a violation. 599 | allow-global-unused-variables=yes 600 | 601 | # List of names allowed to shadow builtins 602 | allowed-redefined-builtins= 603 | 604 | # List of strings which can identify a callback function by name. A callback 605 | # name must start or end with one of those strings. 606 | callbacks=cb_, 607 | _cb 608 | 609 | # A regular expression matching the name of dummy variables (i.e. expected to 610 | # not be used). 611 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 612 | 613 | # Argument names that match this expression will be ignored. 614 | ignored-argument-names=_.*|^ignored_|^unused_ 615 | 616 | # Tells whether we should check for unused import in __init__ files. 617 | init-import=no 618 | 619 | # List of qualified module names which can have objects that can redefine 620 | # builtins. 621 | redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io 622 | -------------------------------------------------------------------------------- /application/ecsdemo-python/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/docker/library/alpine:3.19.1 2 | 3 | RUN apk add --no-cache python3 4 | RUN rm /usr/lib/python3.11/EXTERNALLY-MANAGED 5 | RUN python3 -m ensurepip 6 | RUN pip3 install --upgrade pip 7 | 8 | # Update vulnerable packages 9 | RUN apk update && apk upgrade 10 | RUN pip3 install --upgrade setuptools 11 | 12 | WORKDIR /app 13 | ADD . /app 14 | RUN pip3 install -r requirements.txt 15 | 16 | CMD ["python", "app.py"] -------------------------------------------------------------------------------- /application/ecsdemo-python/app.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import platform 3 | import os 4 | import sys 5 | import logging 6 | import flask 7 | 8 | LOG_LEVEL = logging.INFO 9 | logger = logging.getLogger() 10 | logger.setLevel(LOG_LEVEL) 11 | log_handler = logging.StreamHandler(sys.stdout) 12 | logger.addHandler(log_handler) 13 | 14 | def create_app(): 15 | #pylint: disable=W0621 16 | app = flask.Flask(__name__) 17 | @app.route('/') 18 | def hello(): 19 | logger.info('--------GET Root---------') 20 | 21 | app_name = os.environ.get('APP_NAME', 'Amazon ECS Flask Webpage') 22 | container_service = os.environ.get('CONTAINER_SERVICE', 'AWS') 23 | infra_version = os.environ.get('INFRA_VERSION', '0.0.0') 24 | python_version = platform.python_version() 25 | now = datetime.datetime.now() 26 | 27 | return flask.render_template( 28 | 'index.html', 29 | name=app_name, 30 | platform=container_service, 31 | infra_version=infra_version, 32 | flask_version=flask.__version__, 33 | python_version=python_version, 34 | time=now 35 | ) 36 | return app 37 | 38 | if __name__ == '__main__': 39 | app = create_app() 40 | HOST = '0.0.0.0' #nosec 41 | port = int(os.environ.get('PORT_IN', '3000')) 42 | logger.info('--------start main---------') 43 | app.run(host=HOST, port=port) 44 | -------------------------------------------------------------------------------- /application/ecsdemo-python/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | pylint==2.17.4 2 | pytest==7.4.0 3 | coverage==7.2.7 4 | bandit==1.7.8 5 | safety==2.3.5 -------------------------------------------------------------------------------- /application/ecsdemo-python/requirements.txt: -------------------------------------------------------------------------------- 1 | flask==2.3.2 2 | -------------------------------------------------------------------------------- /application/ecsdemo-python/static/amazon-ecs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/application/ecsdemo-python/static/amazon-ecs.png -------------------------------------------------------------------------------- /application/ecsdemo-python/static/css/style.css: -------------------------------------------------------------------------------- 1 | 2 | body { 3 | font-family: Helvetica; 4 | background-color: #f4f4f4; 5 | display: flex; 6 | flex-direction: column; 7 | justify-content: center; 8 | align-items: center; 9 | min-height: 100vh; 10 | margin: 20px; 11 | } 12 | 13 | .container { 14 | display: flex; 15 | flex-direction: column; 16 | justify-content: center; 17 | align-items: center; 18 | background-color: #ffe8ca; 19 | padding: 30px; 20 | box-shadow: 0px 3px 5px rgba(0, 0, 0, 0.1); 21 | border-radius: 5px; 22 | } 23 | 24 | h1 { 25 | color: #232F3E; 26 | font-size: 32px; 27 | font-weight: bold; 28 | margin-bottom: 20px; 29 | } 30 | -------------------------------------------------------------------------------- /application/ecsdemo-python/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Amazon ECS Simple App 7 | 8 | 9 | 10 |
11 |

12 | Amazon ECS Logo 13 |

14 |

{{ name }}

15 |
16 |
17 |

Congratulations!

18 |

Your Flask application is now running on a container in {{ platform }}-version {{infra_version}}

19 |

This container is running Flask-version {{ flask_version }}

20 |
21 | 22 | 23 | -------------------------------------------------------------------------------- /application/ecsdemo-python/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/application/ecsdemo-python/tests/__init__.py -------------------------------------------------------------------------------- /application/ecsdemo-python/tests/unit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/application/ecsdemo-python/tests/unit/__init__.py -------------------------------------------------------------------------------- /application/ecsdemo-python/tests/unit/test_ecs_app.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import flask 3 | #pylint: disable=E0401 4 | from app import create_app 5 | 6 | app_mock = flask.Flask(__name__) 7 | 8 | #pylint: disable=W0621 9 | @pytest.fixture() 10 | def app(): 11 | app = create_app() 12 | yield app 13 | 14 | #pylint: disable=W0621 15 | @pytest.fixture() 16 | def client(app): 17 | return app.test_client() 18 | 19 | #pylint: disable=W0621 20 | @pytest.fixture() 21 | def runner(app): 22 | return app.test_cli_runner() 23 | 24 | #pylint: disable=W0621 25 | def test_flask_server(client): 26 | expected_response = b'\n\n\n \n \n Amazon ECS Simple App\n \n\n\n
\n

\n Amazon ECS Logo\n

\n

Amazon ECS Flask Webpage

\n
\n
\n

Congratulations!

\n

Your Flask application is now running on a container in AWS-version 0.0.0

\n

This container is running Flask-version 2.3.2

\n
\n\n' 27 | response = client.get("/") 28 | assert expected_response == response.data 29 | -------------------------------------------------------------------------------- /application/ecsdemo-python/version.txt: -------------------------------------------------------------------------------- 1 | 1.0.0 -------------------------------------------------------------------------------- /docs/aquasec.md: -------------------------------------------------------------------------------- 1 | ## Security Component - AquaSec Integration module 2 | 3 | The sensitive information for AquaSec will be managed by AWS Secrets Manager. 4 | 5 | - **aqua_url**: Should be the Aqua URL created by your account on AquaSec. 6 | - **username**: Should be the Username created by your account on AquaSec. 7 | - **password**: Should be the Password created by your account on AquaSec. 8 | 9 | For more information about AquaSec please check the [official docs](https://registry.terraform.io/providers/aquasecurity/aquasec/latest/docs). 10 | 11 | Go to **patterns/fargate-cluster/aquasec.tf.draft** and rename the file to **aquasec.tf**, after go to **patterns/fargate-cluster/terraform.tfvars** and configure the following parameters: 12 | 13 | ```shell 14 | ################################################################################ 15 | # Module - Aquasec 16 | ################################################################################ 17 | 18 | enable_aquasec = false -------> true 19 | enable_aquasec_sidecar = false 20 | enable_aquasec_sidecar_ecr_repository = false 21 | 22 | aquasec = { 23 | secret_manager_name = "aquasec" 24 | } 25 | 26 | aquasec_microenforcer_sidecar = { 27 | name = "aqua-sidecar" 28 | cpu = 512 29 | memory = 1024 30 | essential = false 31 | image = "xxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/aqua-sidecar:latest" 32 | memory_reservation = 50 33 | } 34 | 35 | secret_aquasec = { 36 | aqua_url = "https://cloud.aquasec.com" 37 | username = "username" -------> Your username 38 | password = "password" -------> Your password 39 | } 40 | 41 | ``` 42 | 43 | ### MicroEnforcer Sidecar (Vulnerabilities Scanning and Protection) 44 | 45 | The MicroEnforcer is supplied as an executable which is embedded as a component of your container image. 46 | 47 | #### Deploy Amazon ECR 48 | 49 | Configure the following parameters on **patterns/fargate-cluster/terraform.tfvars**: 50 | 51 | ```shell 52 | ################################################################################ 53 | # Module - Aquasec 54 | ################################################################################ 55 | 56 | ... 57 | 58 | enable_aquasec_sidecar_ecr_repository = true 59 | 60 | ... 61 | 62 | ``` 63 | 64 | Deploy Aqua Sidecar ECR Repository: 65 | 66 | ```shell 67 | terraform init 68 | terraform validate 69 | terraform plan #Here check the plan that Terraform outputs in case you want to change something. 70 | terraform apply --auto-approve 71 | ``` 72 | 73 | #### Build container image for Aqua Sidecar 74 | 75 | You can obtain MicroEnforcer using the link below. You will need the username and password you have received from Aqua Security. 76 | 77 | ```shell 78 | https://download.aquasec.com/micro-enforcer/2022.4/x86/microenforcer 79 | ``` 80 | 81 | Alternatively, for the ARM64 executable: 82 | 83 | ```shell 84 | https://download.aquasec.com/micro-enforcer/2022.4/arm64/microenforcer 85 | ``` 86 | 87 | Give the file execute permission: 88 | 89 | ```shell 90 | chmod +x microenforcer 91 | ``` 92 | 93 | Copy the file on **patterns/aquasec-sidecar**. 94 | 95 | Move to **patterns/aquasec-sidecar**, build and push the sidecar image to your registry: 96 | 97 | ```shell 98 | aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin xxxxxxx.dkr.ecr.us-east-1.amazonaws.com 99 | ``` 100 | ```shell 101 | docker build --no-cache -t aqua-sidecar . --platform linux/amd64 102 | ``` 103 | ```shell 104 | docker tag aqua-sidecar:latest xxxxxxx.dkr.ecr.us-east-1.amazonaws.com/aqua-sidecar:latest 105 | ``` 106 | ```shell 107 | docker push xxxxxxx.dkr.ecr.us-east-1.amazonaws.com/aqua-sidecar:latest 108 | ``` 109 | 110 | - **xxxxxxx** represents the aws account id. 111 | 112 | #### Add Aqua Sidecar into your ECS Service 113 | 114 | In the section **ECS Service Sample** on **patterns/fargate-cluster/terraform.tfvars**, please uncomment the following lines: 115 | 116 | ```shell 117 | ################################################################################ 118 | # ECS Service Sample 119 | ################################################################################ 120 | 121 | ... 122 | 123 | # container_sample_entrypoint = ["/.aquasec/bin/microenforcer","bash", "/usr/src/app/startup.sh"] 124 | # container_sample_command = [] 125 | # container_sample_environment = [ 126 | # { 127 | # name = "AQUA_MICROENFORCER" 128 | # value = "1" 129 | # }, 130 | # { 131 | # name = "AQUA_SERVER" 132 | # value = "xxxxxxx-gw.cloud.aquasec.com:443" 133 | # }, 134 | # { 135 | # name = "AQUA_TOKEN" 136 | # value = "xxxxxx-xxxxx-xxxxx-xxxxx-xxxxx" 137 | # }, 138 | # { 139 | # name = "AQUA_IMAGE_ID" 140 | # value = "xxxxx" 141 | # } 142 | # ] 143 | 144 | # container_sample_volumes_from = [{ 145 | # sourceContainer = "aqua-sidecar" 146 | # readOnly = false 147 | # }] 148 | ``` 149 | 150 | - **AQUA_MICROENFORCER**: Required to make runtime protection operate in MicroEnforcer mode. 151 | - **AQUA_SERVER**: The IP address and port (usually 443) of any Aqua Gateway. 152 | - **AQUA_TOKEN**: The deployment token of any MicroEnforcer group. In the Aqua UI: Navigate to Administration > Aqua Enforcers and edit a MicroEnforcer group (e.g., the "default micro enforcer group"). 153 | - **AQUA_IMAGE_ID**: The Docker image ID of the application image. Do not specify this if you want the MicroEnforcer to fetch the image name and image digest from ECS metadata (recommended). 154 | 155 | **NOTE**: Since the sidecar container will run very briefly, to expose the MicroEnforcer executable. 156 | 157 | Also, Configure the following parameters on **patterns/fargate-cluster/terraform.tfvars**: 158 | 159 | ```shell 160 | ################################################################################ 161 | # Module - Aquasec 162 | ################################################################################ 163 | 164 | ... 165 | 166 | enable_aquasec_sidecar = true 167 | 168 | ... 169 | 170 | aquasec_microenforcer_sidecar = { 171 | name = "aqua-sidecar" 172 | cpu = 512 173 | memory = 1024 174 | essential = false 175 | image = "xxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/aqua-sidecar:latest" 176 | memory_reservation = 50 177 | } 178 | 179 | ... 180 | 181 | ``` 182 | 183 | - **name**: Aqua Sidecar container name (no changes required). 184 | - **cpu**: Aqua Sidecar container cpu (no changes required). 185 | - **memory**: Aqua Sidecar container memory (no changes required). 186 | - **essential**: Aqua Sidecar container essential (no changes required). 187 | - **image**: Aqua Sidecar container image (change **xxxxxxxx** for your AWS Account). 188 | - **memory_reservation**: Aqua Sidecar container memory reservation (no changes required). 189 | 190 | Deploy Aqua Sidecar Integration: 191 | 192 | ```shell 193 | terraform init 194 | terraform validate 195 | terraform plan #Here check the plan that Terraform outputs in case you want to change something. 196 | terraform apply --auto-approve 197 | ``` 198 | 199 | ### Cleanup 200 | 201 | Rename **aquasec.tf** to **aquasec.tf.draft** and configure the following values: 202 | 203 | ```shell 204 | ################################################################################ 205 | # Module - Aquasec 206 | ################################################################################ 207 | 208 | enable_aquasec = true -------> false 209 | 210 | ``` 211 | 212 | 213 | -------------------------------------------------------------------------------- /docs/datadog.md: -------------------------------------------------------------------------------- 1 | ## Monitoring Component - Datadog integration module 2 | 3 | The sensitive information for Datadog will be managed by AWS Secrets Manager. 4 | 5 | - **datadog_api_key**: Should be the API Key created by your org on Datadog. 6 | - **datadot_app_key**: Should be the Application Key created by your org on Datadog. 7 | 8 | For more information about the Keys used by Datadog please check the [official docs](https://docs.datadoghq.com/account_management/api-app-keys/). 9 | 10 | Configure the following parameters on **patterns/fargate-cluster/terraform.tfvars**: 11 | 12 | ```shell 13 | ################################################################################ 14 | # Module - Datadog 15 | ################################################################################ 16 | 17 | enable_datadog = false -----> true 18 | 19 | sns_topic_name_for_alerts = "sns-containters-ecs-topic-alerts" 20 | 21 | datadog_integration_aws = { 22 | roleName = "DatadogAWSIntegrationRole" 23 | alert_cpuutilization_threshold = "80" 24 | alert_memory_utilization_threshold = "80" 25 | secret_manager_name = "datadog" 26 | external_id = "XXXXXX" -----------> External ID Auth Datadog 27 | } 28 | 29 | secret_datadog = { 30 | datadog_api_key = "datadog_api_key" -----------> Datadog API Key 31 | datadog_app_key = "datadog_app_key" -----------> Datadog App Key 32 | } 33 | ``` 34 | 35 | Deploy datadog Integration: 36 | 37 | ```shell 38 | terraform init 39 | terraform validate 40 | terraform plan #Here check the plan that Terraform outputs in case you want to change something. 41 | terraform apply --auto-approve 42 | ``` -------------------------------------------------------------------------------- /docs/github.md: -------------------------------------------------------------------------------- 1 | ## CI/CD Component - Github integration module 2 | 3 | You need to first create an **AWS CodeStar** connection in your account to connect **Github** with **AWS**. 4 | 5 | 1. Go to Codepipeline console and select **Settings**, after go to **Connections** and click **Create Connection** button. 6 | 2. Select github as Provider and select connect to Github: 7 | 8 | ![Connect CodeBuild to Github step 1](../images/github_connection.png) 9 | 10 | 3. Accept the connection 11 | 12 | 13 | 14 | 4. after creation, copy ARN of connection and go to **patterns/fargate-cluster/terraform.tfvars** and change the following configuration: 15 | 16 | ```shell 17 | ################################################################################ 18 | # Module - Codepipeline with Github 19 | ################################################################################ 20 | 21 | enable_codepipeline_github = false -----> true 22 | 23 | repository_name = "ecsdemo-python" ------> your repository name, if you want to test the integration, our suggestion is do a Fork of the following project https://github.com/edmiranda/ecsdemo-python 24 | secret_manager_name = "github" 25 | 26 | secret_github = { 27 | code_star_connection_arn = "arn:aws:codeconnections:us-east-1:xxxxxxx:connection/xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxx" ------> Your CodeConnection ARN 28 | organization_name = "xxxxxxx" ------> Your Organization Name 29 | } 30 | ``` 31 | 32 | Deploy Github CI/CD Integration: 33 | 34 | ```shell 35 | terraform init 36 | terraform validate 37 | terraform plan #Here check the plan that Terraform outputs in case you want to change something. 38 | terraform apply --auto-approve 39 | ``` 40 | 41 | ### Cleanup 42 | 43 | Removing ECR Images and S3 Bucket 44 | 45 | ```shell 46 | ./delete_ecr_images.sh us-east-1 ecs-core 47 | 48 | ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 49 | 50 | aws s3api delete-objects \ 51 | --bucket $ACCOUNT_ID-ecs-github-pipeline-artifacts \ 52 | --delete "$(aws s3api list-object-versions \ 53 | --bucket "$ACCOUNT_ID-ecs-github-pipeline-artifacts" \ 54 | --output=json \ 55 | --query='{Objects: Versions[].{Key:Key,VersionId:VersionId}}')" 56 | 57 | ``` 58 | 59 | Disable integration: 60 | 61 | ```shell 62 | ################################################################################ 63 | # Module - Codepipeline with Github 64 | ################################################################################ 65 | 66 | enable_codepipeline_github = true -----> false 67 | 68 | ``` 69 | 70 | ```shell 71 | terraform apply 72 | 73 | ``` -------------------------------------------------------------------------------- /images/connect-to-github-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/images/connect-to-github-1.png -------------------------------------------------------------------------------- /images/connect-to-github-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/images/connect-to-github-2.png -------------------------------------------------------------------------------- /images/first_demo_page.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/images/first_demo_page.png -------------------------------------------------------------------------------- /images/github_connection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/images/github_connection.png -------------------------------------------------------------------------------- /images/github_connection2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/images/github_connection2.png -------------------------------------------------------------------------------- /images/spok.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/images/spok.png -------------------------------------------------------------------------------- /images/successful-deployment-alb.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/images/successful-deployment-alb.jpg -------------------------------------------------------------------------------- /modules/aquasec/main.tf: -------------------------------------------------------------------------------- 1 | # Configure the AquaSec provider 2 | provider "aquasec" { 3 | username = var.username 4 | aqua_url = var.aqua_url 5 | password = var.password 6 | 7 | # If you are using unverifiable certificates (e.g. self-signed) you may need to disable certificate verification 8 | verify_tls = false # Alternatively sourced from $AQUA_TLS_VERIFY 9 | } 10 | 11 | ################################################################################ 12 | # Integration Registry 13 | ################################################################################ 14 | 15 | resource "aquasec_integration_registry" "integration_registry" { 16 | count = var.enable_aquasec ? 1 : 0 17 | name = var.aquasec_registry.name 18 | type = var.aquasec_registry.type 19 | advanced_settings_cleanup = false 20 | always_pull_patterns = [":latest", ":${var.aquasec_registry.tag_included}"] 21 | author = var.aquasec_registry.author 22 | auto_cleanup = false 23 | auto_pull = true 24 | auto_pull_interval = 1 25 | auto_pull_max = 100 26 | auto_pull_rescan = false 27 | auto_pull_time = "08:45" 28 | description = "Automatically discovered registry" 29 | 30 | options { 31 | option = "ARNRole" 32 | value = var.aquasec_registry.arn_role 33 | } 34 | options { 35 | option = "sts:ExternalId" 36 | value = var.aquasec_registry.sts_external_id 37 | } 38 | options { 39 | option = "TestImagePull" 40 | value = "${var.aquasec_registry.image_pull}:${var.aquasec_registry.tag_included}" 41 | } 42 | 43 | prefixes = [ 44 | var.aquasec_registry.ecr_repository 45 | ] 46 | 47 | pull_image_age = "0D" 48 | pull_image_count = 3 49 | pull_image_tag_pattern = [":${var.aquasec_registry.tag_included}"] 50 | pull_repo_patterns_excluded = [":${var.aquasec_registry.tag_excluded}"] 51 | 52 | url = var.account.region 53 | scanner_name = [] 54 | scanner_type = "any" 55 | 56 | } 57 | 58 | ################################################################################ 59 | # ECR Repository 60 | ################################################################################ 61 | 62 | module "ecr" { 63 | count = var.enable_aquasec_sidecar ? 1 : 0 64 | #checkov:skip=CKV_TF_1:Using full commit hash generate a bug where the ref is not found on the CI. 65 | source = "terraform-aws-modules/ecr/aws" 66 | version = "1.6.0" 67 | repository_name = "aqua-sidecar" 68 | registry_scan_type = "BASIC" 69 | repository_image_tag_mutability = "IMMUTABLE" 70 | manage_registry_scanning_configuration = true 71 | create_lifecycle_policy = false 72 | registry_scan_rules = [ 73 | { 74 | scan_frequency = "SCAN_ON_PUSH" 75 | filter = "*" 76 | filter_type = "WILDCARD" 77 | } 78 | ] 79 | } -------------------------------------------------------------------------------- /modules/aquasec/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/modules/aquasec/outputs.tf -------------------------------------------------------------------------------- /modules/aquasec/variables.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # ECS Cluster Fargate Configuration 3 | ################################################################################ 4 | 5 | variable "account" { 6 | description = "Generic parameters for the variable" 7 | type = map(string) 8 | default = { 9 | region = "us-east-1" 10 | } 11 | } 12 | 13 | ################################################################################ 14 | # AquaSec Integration 15 | ################################################################################ 16 | 17 | variable "enable_aquasec" { 18 | description = "Enable or Disable Aquasec Integration" 19 | type = bool 20 | default = false 21 | } 22 | 23 | variable "enable_aquasec_sidecar" { 24 | description = "Enable or Disable Aquasec Sidecar" 25 | type = bool 26 | default = false 27 | } 28 | 29 | variable "aquasec_registry" { 30 | description = "Parameter for AquaSec Integration Registry" 31 | type = map(string) 32 | default = { 33 | name = "ECR Integration" 34 | type = "AWS" 35 | author = "example@amazon.com" 36 | arn_role = "arn:aws:iam::809940063064:role/Admin" 37 | sts_external_id = "IsengardExternalIduNsxNCILFN16" 38 | image_pull = "sonarqube" 39 | tag_included = "9.9.4-community" 40 | tag_excluded = "xyz" 41 | ecr_repository = "809940063064.dkr.ecr.us-east-1.amazonaws.com" 42 | } 43 | } 44 | 45 | variable "username" { 46 | description = "Aquasec Username" 47 | type = string 48 | } 49 | 50 | variable "aqua_url" { 51 | description = "Aquasec Username" 52 | type = string 53 | } 54 | 55 | variable "password" { 56 | description = "Aquasec Username" 57 | type = string 58 | } -------------------------------------------------------------------------------- /modules/aquasec/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aquasec = { 6 | version = "0.8.27" 7 | source = "aquasecurity/aquasec" 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /modules/codepipeline/github/codepipeline_python/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | pipeline_name = "${var.repository_name}-github-pipeline" 3 | } 4 | -------------------------------------------------------------------------------- /modules/codepipeline/github/codepipeline_python/main.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # CodePipeline Role 3 | ################################################################################ 4 | 5 | data "aws_iam_policy" "ec2_full_access" { 6 | name = "AmazonEC2FullAccess" 7 | } 8 | 9 | data "aws_iam_policy" "r53_domain_access" { 10 | name = "AmazonRoute53DomainsFullAccess" 11 | } 12 | 13 | data "aws_iam_policy" "ecs_full_access" { 14 | name = "AmazonECS_FullAccess" 15 | } 16 | 17 | resource "aws_iam_role" "codepipeline_role" { 18 | name = "${var.repository_name}-pipeline-role" 19 | assume_role_policy = < /dev/null 601 | - echo "deb [signed-by=/usr/share/keyrings/trivy.gpg] https://aquasecurity.github.io/trivy-repo/deb $(lsb_release -sc) main" | sudo tee -a /etc/apt/sources.list.d/trivy.list 602 | - apt update -y 603 | - apt-get install trivy -y 604 | build: 605 | commands: 606 | - docker build -t app:local . 607 | post_build: 608 | commands: 609 | - trivy image app:local 610 | EOF 611 | } 612 | 613 | artifacts { 614 | type = "CODEPIPELINE" 615 | } 616 | 617 | environment { 618 | compute_type = var.build_compute_type 619 | image = var.build_image 620 | type = "LINUX_CONTAINER" 621 | privileged_mode = true 622 | } 623 | } 624 | 625 | ################################################################################ 626 | # CodePipeline - PyLint Step 627 | ################################################################################ 628 | 629 | resource "aws_codebuild_project" "pylint" { 630 | #checkov:skip=CKV_AWS_316:Privileged Mode is required for using container runtime. 631 | #checkov:skip=CKV_AWS_314:Default Logging. 632 | name = "${local.pipeline_name}-linter" 633 | description = "${var.repository_name} PyLint" 634 | service_role = aws_iam_role.codebuild_step_role.arn 635 | build_timeout = "15" 636 | encryption_key = var.artifacts_bucket_encryption_key_arn 637 | 638 | source { 639 | type = "CODEPIPELINE" 640 | buildspec = < artifacts/imagedefinitions.json 755 | - cat artifacts/imagedefinitions.json 756 | artifacts: 757 | files: 758 | - '**/*' 759 | base-directory: 'artifacts' 760 | discard-paths: yes 761 | EOF 762 | } 763 | 764 | artifacts { 765 | type = "CODEPIPELINE" 766 | } 767 | 768 | environment { 769 | compute_type = var.build_compute_type 770 | image = var.build_image 771 | type = "LINUX_CONTAINER" 772 | privileged_mode = true 773 | 774 | environment_variable { 775 | name = "ECR_REPO_NAME" 776 | value = var.ecr_repository_name 777 | } 778 | environment_variable { 779 | name = "FOLDER_PATH" 780 | value = "." 781 | } 782 | 783 | environment_variable { 784 | name = "CONTAINER_NAME" 785 | value = var.container_name 786 | } 787 | } 788 | } -------------------------------------------------------------------------------- /modules/codepipeline/github/codepipeline_python/outputs.tf: -------------------------------------------------------------------------------- 1 | output "pipeline_arn" { 2 | description = "The pipeline ARN" 3 | value = aws_codepipeline.pipeline.arn 4 | } 5 | -------------------------------------------------------------------------------- /modules/codepipeline/github/codepipeline_python/variables.tf: -------------------------------------------------------------------------------- 1 | variable "repository_name" { 2 | type = string 3 | description = "The repository name to use in CodePipeline source stage" 4 | } 5 | 6 | variable "branch_name" { 7 | type = string 8 | description = "The repository branch name to use in CodePipeline source stage" 9 | default = "main" 10 | } 11 | 12 | variable "artifacts_bucket_arn" { 13 | type = string 14 | description = "The s3 artifacts bucket ARN" 15 | } 16 | 17 | variable "artifacts_bucket_encryption_key_arn" { 18 | type = string 19 | description = "The s3 artifacts bucket KMS key ARN" 20 | } 21 | 22 | variable "account_id" { 23 | type = string 24 | description = "The AWS account ID" 25 | } 26 | 27 | variable "aws_region" { 28 | type = string 29 | description = "The AWS region" 30 | } 31 | 32 | variable "build_compute_type" { 33 | type = string 34 | description = "The CodeBuild projects compute type" 35 | default = "BUILD_GENERAL1_SMALL" 36 | } 37 | 38 | variable "build_image" { 39 | type = string 40 | description = "The CodeBuild projects image" 41 | default = "aws/codebuild/standard:7.0" 42 | } 43 | 44 | variable "pipeline_articats_bucket_name" { 45 | type = string 46 | description = "The Pipeline artifacts bucket name" 47 | } 48 | 49 | variable "ecr_repository_name" { 50 | type = string 51 | description = "The ECR repository name for the app" 52 | } 53 | 54 | variable "cluster_name" { 55 | type = string 56 | description = "The ECS cluster name" 57 | } 58 | 59 | variable "container_name" { 60 | type = string 61 | description = "The ECS service main container name" 62 | } 63 | 64 | variable "service_name" { 65 | type = string 66 | description = "The ECS service name" 67 | } 68 | 69 | variable "code_star_connection_arn" { 70 | type = string 71 | description = "The CodeStar connection ARN" 72 | } 73 | 74 | variable "organization_name" { 75 | type = string 76 | description = "The Github organization name" 77 | } 78 | -------------------------------------------------------------------------------- /modules/codepipeline/github/codepipeline_python/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13.1" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.22" 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /modules/codepipeline/github/main.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # CodePipeline Artifacts Bucket 3 | ################################################################################ 4 | 5 | module "pipeline_artifacts_bucket" { 6 | source = "./s3" 7 | bucket_name = "${var.account_id}-ecs-github-pipeline-artifacts" 8 | kms_key_name = "${var.account_id}-ecs-github-pipeline-artifacts-key" 9 | } 10 | 11 | ################################################################################ 12 | # Microservice pipeline 13 | ################################################################################ 14 | 15 | resource "aws_secretsmanager_secret" "github" { 16 | name = var.secret_manager_name 17 | description = "Github Secrets" 18 | recovery_window_in_days = 0 19 | } 20 | 21 | resource "aws_secretsmanager_secret_version" "github_version" { 22 | secret_id = aws_secretsmanager_secret.github.id 23 | secret_string = jsonencode(var.secret_github) 24 | } 25 | 26 | module "python_microservice_pipeline" { 27 | source = "./codepipeline_python" 28 | repository_name = var.repository_name 29 | artifacts_bucket_arn = module.pipeline_artifacts_bucket.bucket_arn 30 | artifacts_bucket_encryption_key_arn = module.pipeline_artifacts_bucket.bucket_key_arn 31 | account_id = var.account_id 32 | aws_region = var.account.region 33 | pipeline_articats_bucket_name = module.pipeline_artifacts_bucket.bucket_name 34 | ecr_repository_name = var.ecr_repository_name 35 | cluster_name = var.cluster_name 36 | container_name = var.container_sample.name 37 | service_name = var.service_sample.name 38 | organization_name = jsondecode(aws_secretsmanager_secret_version.github_version.secret_string)["organization_name"] 39 | code_star_connection_arn = jsondecode(aws_secretsmanager_secret_version.github_version.secret_string)["code_star_connection_arn"] 40 | } 41 | 42 | ################################################################################ 43 | # ECR Repository 44 | ################################################################################ 45 | 46 | module "ecr" { 47 | #checkov:skip=CKV_TF_1:Using full commit hash generate a bug where the ref is not found on the CI. 48 | source = "terraform-aws-modules/ecr/aws" 49 | version = "1.6.0" 50 | repository_name = var.ecr_repository_name 51 | registry_scan_type = "BASIC" 52 | repository_image_tag_mutability = "IMMUTABLE" 53 | manage_registry_scanning_configuration = true 54 | create_lifecycle_policy = false 55 | registry_scan_rules = [ 56 | { 57 | scan_frequency = "SCAN_ON_PUSH" 58 | filter = "*" 59 | filter_type = "WILDCARD" 60 | } 61 | ] 62 | } -------------------------------------------------------------------------------- /modules/codepipeline/github/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/modules/codepipeline/github/outputs.tf -------------------------------------------------------------------------------- /modules/codepipeline/github/s3/main.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # S3 Artifacts Bucket 3 | ################################################################################ 4 | 5 | resource "aws_kms_key" "bucket_encryption_key" { 6 | #checkov:skip=CKV2_AWS_64: Key Policy 7 | 8 | description = var.kms_key_name 9 | deletion_window_in_days = 10 10 | enable_key_rotation = true 11 | } 12 | resource "aws_s3_bucket" "bucket" { 13 | #checkov:skip=CKV_AWS_18:Access log is not needed in not needed in in pipeline artifacts bucket 14 | #checkov:skip=CKV_AWS_144: S3 bucket has cross-region replication is not needed in in pipeline artifacts bucket 15 | #checkov:skip=CKV_AWS_21: Versioning not needed in pipeline artifacts bucket 16 | #checkov:skip=CKV2_AWS_62:No need for event notification on this pipeline artifacts bucket. 17 | #checkov:skip=CKV2_AWS_61:No for lifecycle configuration. 18 | bucket = var.bucket_name 19 | } 20 | 21 | resource "aws_s3_bucket_ownership_controls" "ownership_controls" { 22 | #checkov:skip=CKV2_AWS_65:ACL for Owner. 23 | bucket = aws_s3_bucket.bucket.id 24 | rule { 25 | object_ownership = "BucketOwnerPreferred" 26 | } 27 | } 28 | 29 | resource "aws_s3_bucket_public_access_block" "bucket_public_access_block" { 30 | bucket = aws_s3_bucket.bucket.id 31 | block_public_acls = true 32 | block_public_policy = true 33 | restrict_public_buckets = true 34 | ignore_public_acls = true 35 | } 36 | 37 | resource "aws_s3_bucket_acl" "bucket_acl" { 38 | bucket = aws_s3_bucket.bucket.id 39 | acl = "private" 40 | depends_on = [aws_s3_bucket_ownership_controls.ownership_controls] 41 | } 42 | 43 | resource "aws_s3_bucket_server_side_encryption_configuration" "bucket_encryption" { 44 | bucket = aws_s3_bucket.bucket.bucket 45 | rule { 46 | apply_server_side_encryption_by_default { 47 | kms_master_key_id = aws_kms_key.bucket_encryption_key.arn 48 | sse_algorithm = "aws:kms" 49 | } 50 | } 51 | } -------------------------------------------------------------------------------- /modules/codepipeline/github/s3/outputs.tf: -------------------------------------------------------------------------------- 1 | output "bucket_arn" { 2 | description = "The S3 bucket ARN" 3 | value = aws_s3_bucket.bucket.arn 4 | } 5 | 6 | output "bucket_key_arn" { 7 | description = "The KMS key id" 8 | value = aws_kms_key.bucket_encryption_key.arn 9 | } 10 | 11 | output "bucket_name" { 12 | description = "The S3 bucket name" 13 | value = aws_s3_bucket.bucket.bucket 14 | } -------------------------------------------------------------------------------- /modules/codepipeline/github/s3/variables.tf: -------------------------------------------------------------------------------- 1 | variable "bucket_name" { 2 | type = string 3 | description = "The S3 bucket name" 4 | } 5 | 6 | variable "kms_key_name" { 7 | type = string 8 | description = "The S3 KMS encryption key name" 9 | } -------------------------------------------------------------------------------- /modules/codepipeline/github/s3/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13.1" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.22" 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /modules/codepipeline/github/variables.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # ECS Cluster Fargate Configuration 3 | ################################################################################ 4 | variable "cluster_name" { 5 | description = "Cluster Name" 6 | type = string 7 | default = "ecs-core" 8 | } 9 | 10 | variable "account" { 11 | description = "Generic parameters for the variable" 12 | type = map(string) 13 | default = { 14 | region = "us-east-1" 15 | } 16 | } 17 | 18 | variable "account_id" { 19 | description = "Accound ID" 20 | type = string 21 | } 22 | 23 | 24 | ################################################################################ 25 | # Github Integration 26 | ################################################################################ 27 | 28 | variable "secret_manager_name" { 29 | type = string 30 | description = "Github Secret Manager Name" 31 | } 32 | 33 | variable "ecr_repository_name" { 34 | type = string 35 | description = "The ECR repository name for the app" 36 | } 37 | 38 | variable "repository_name" { 39 | type = string 40 | description = "The repository name to use in CodePipeline source stage" 41 | } 42 | 43 | variable "service_sample" { 44 | description = "Parameter for Service Sample" 45 | type = map(string) 46 | default = { 47 | cpu = 1024 48 | memory = 4096 49 | name = "ecsdemo" 50 | } 51 | } 52 | 53 | variable "container_sample" { 54 | description = "Parameter for Container Sample" 55 | type = map(string) 56 | default = { 57 | cpu = 512 58 | memory = 1024 59 | port = 3000 60 | name = "ecs-sample" 61 | } 62 | } 63 | 64 | variable "secret_github" { 65 | default = { 66 | code_star_connection_arn = "code_star_connection_arn" 67 | organization_name = "organization_name" 68 | } 69 | 70 | type = map(string) 71 | } -------------------------------------------------------------------------------- /modules/codepipeline/github/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13.1" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.22" 8 | } 9 | } 10 | } -------------------------------------------------------------------------------- /modules/datadog/main.tf: -------------------------------------------------------------------------------- 1 | # Configure the Datadog provider 2 | provider "datadog" { 3 | api_key = try(jsondecode(aws_secretsmanager_secret_version.datadog_version[0].secret_string)["datadog_api_key"], "datadog_api_key") 4 | app_key = try(jsondecode(aws_secretsmanager_secret_version.datadog_version[0].secret_string)["datadog_app_key"], "datadog_app_key") 5 | validate = var.enable_datadog 6 | } 7 | 8 | resource "aws_secretsmanager_secret" "datadog" { 9 | count = var.enable_datadog ? 1 : 0 10 | name = "datadog" 11 | description = "Datadog Secrets" 12 | recovery_window_in_days = 0 13 | } 14 | 15 | resource "aws_secretsmanager_secret_version" "datadog_version" { 16 | count = var.enable_datadog ? 1 : 0 17 | secret_id = aws_secretsmanager_secret.datadog[0].id 18 | secret_string = jsonencode(var.secret_datadog) 19 | } 20 | 21 | data "aws_iam_policy_document" "datadog_aws_integration_assume_role" { 22 | count = var.enable_datadog ? 1 : 0 23 | statement { 24 | actions = ["sts:AssumeRole"] 25 | 26 | principals { 27 | type = "AWS" 28 | identifiers = ["arn:aws:iam::464622532012:root"] 29 | } 30 | condition { 31 | test = "StringEquals" 32 | variable = "sts:ExternalId" 33 | 34 | values = [ 35 | var.datadog_integration_aws.external_id 36 | ] 37 | } 38 | } 39 | } 40 | 41 | data "aws_iam_policy_document" "datadog_aws_integration" { 42 | count = var.enable_datadog ? 1 : 0 43 | #checkov:skip=CKV_AWS_111:Datadog required permissions:https://docs.datadoghq.com/integrations/amazon_web_services/#aws-iam-permissions. 44 | #checkov:skip=CKV_AWS_356:Datadog required permissions:https://docs.datadoghq.com/integrations/amazon_web_services/#aws-iam-permissions. 45 | statement { 46 | actions = [ 47 | "apigateway:GET", 48 | "autoscaling:Describe*", 49 | "backup:List*", 50 | "budgets:ViewBudget", 51 | "cloudfront:GetDistributionConfig", 52 | "cloudfront:ListDistributions", 53 | "cloudtrail:DescribeTrails", 54 | "cloudtrail:GetTrailStatus", 55 | "cloudtrail:LookupEvents", 56 | "cloudwatch:Describe*", 57 | "cloudwatch:Get*", 58 | "cloudwatch:List*", 59 | "codedeploy:List*", 60 | "codedeploy:BatchGet*", 61 | "directconnect:Describe*", 62 | "dynamodb:List*", 63 | "dynamodb:Describe*", 64 | "ec2:Describe*", 65 | "ecs:Describe*", 66 | "ecs:List*", 67 | "elasticache:Describe*", 68 | "elasticache:List*", 69 | "elasticfilesystem:DescribeFileSystems", 70 | "elasticfilesystem:DescribeTags", 71 | "elasticfilesystem:DescribeAccessPoints", 72 | "elasticloadbalancing:Describe*", 73 | "elasticmapreduce:List*", 74 | "elasticmapreduce:Describe*", 75 | "es:ListTags", 76 | "es:ListDomainNames", 77 | "es:DescribeElasticsearchDomains", 78 | "events:CreateEventBus", 79 | "fsx:DescribeFileSystems", 80 | "fsx:ListTagsForResource", 81 | "health:DescribeEvents", 82 | "health:DescribeEventDetails", 83 | "health:DescribeAffectedEntities", 84 | "kinesis:List*", 85 | "kinesis:Describe*", 86 | "lambda:GetPolicy", 87 | "lambda:List*", 88 | "logs:DeleteSubscriptionFilter", 89 | "logs:DescribeLogGroups", 90 | "logs:DescribeLogStreams", 91 | "logs:DescribeSubscriptionFilters", 92 | "logs:FilterLogEvents", 93 | "logs:PutSubscriptionFilter", 94 | "logs:TestMetricFilter", 95 | "organizations:Describe*", 96 | "organizations:List*", 97 | "rds:Describe*", 98 | "rds:List*", 99 | "redshift:DescribeClusters", 100 | "redshift:DescribeLoggingStatus", 101 | "route53:List*", 102 | "s3:GetBucketLogging", 103 | "s3:GetBucketLocation", 104 | "s3:GetBucketNotification", 105 | "s3:GetBucketTagging", 106 | "s3:ListAllMyBuckets", 107 | "s3:PutBucketNotification", 108 | "ses:Get*", 109 | "sns:List*", 110 | "sns:Publish", 111 | "sqs:ListQueues", 112 | "states:ListStateMachines", 113 | "states:DescribeStateMachine", 114 | "support:DescribeTrustedAdvisor*", 115 | "support:RefreshTrustedAdvisorCheck", 116 | "tag:GetResources", 117 | "tag:GetTagKeys", 118 | "tag:GetTagValues", 119 | "xray:BatchGetTraces", 120 | "xray:GetTraceSummaries" 121 | ] 122 | resources = ["*"] 123 | } 124 | } 125 | 126 | 127 | resource "aws_iam_policy" "datadog_aws_integration" { 128 | count = var.enable_datadog ? 1 : 0 129 | name = "DatadogAWSIntegrationPolicy" 130 | policy = data.aws_iam_policy_document.datadog_aws_integration[0].json 131 | } 132 | 133 | resource "aws_iam_role" "datadog_aws_integration" { 134 | count = var.enable_datadog ? 1 : 0 135 | name = var.datadog_integration_aws.roleName 136 | description = "Role for Datadog AWS Integration" 137 | assume_role_policy = data.aws_iam_policy_document.datadog_aws_integration_assume_role[0].json 138 | } 139 | 140 | resource "aws_iam_role_policy_attachment" "datadog_aws_integration" { 141 | count = var.enable_datadog ? 1 : 0 142 | role = aws_iam_role.datadog_aws_integration[0].name 143 | policy_arn = aws_iam_policy.datadog_aws_integration[0].arn 144 | } 145 | 146 | 147 | # Creating Datadog Alerts 148 | 149 | resource "datadog_monitor" "cluster_cpuutilization" { 150 | count = var.enable_datadog ? 1 : 0 151 | name = "ECS - CPU check" 152 | type = "metric alert" 153 | message = "ECS - CPU is > 80%! Notify: @${var.sns_topic_name_for_alerts}" 154 | 155 | query = "avg(last_15m):avg:aws.ecs.cluster.cpuutilization{clustername:${var.cluster_name}} > ${var.datadog_integration_aws.alert_cpuutilization_threshold}" 156 | 157 | monitor_thresholds { 158 | critical = var.datadog_integration_aws.alert_cpuutilization_threshold 159 | } 160 | } 161 | 162 | resource "datadog_monitor" "memory_utilization" { 163 | count = var.enable_datadog ? 1 : 0 164 | name = "ECS - Memory check" 165 | type = "metric alert" 166 | message = "ECS - MEMORY is > 80%! Notify: @${var.sns_topic_name_for_alerts}" 167 | 168 | query = "avg(last_15m):avg:aws.ecs.cluster.memory_utilization{clustername:${var.cluster_name}} > ${var.datadog_integration_aws.alert_memory_utilization_threshold}" 169 | 170 | monitor_thresholds { 171 | critical = var.datadog_integration_aws.alert_memory_utilization_threshold 172 | } 173 | } 174 | 175 | resource "datadog_monitor" "task_pending" { 176 | count = var.enable_datadog ? 1 : 0 177 | name = "ECS - TaskPending check" 178 | type = "metric alert" 179 | message = "ECS - Task pending is > 0! Notify: @${var.sns_topic_name_for_alerts}" 180 | 181 | query = "avg(last_15m):avg:aws.ecs.service.pending{clustername:${var.cluster_name}} > 0" 182 | 183 | monitor_thresholds { 184 | critical = 0 185 | } 186 | } -------------------------------------------------------------------------------- /modules/datadog/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/fargate-serverless-platform-operator-kit/89f06f4090c3a1d16e121b1010efe73207a04579/modules/datadog/outputs.tf -------------------------------------------------------------------------------- /modules/datadog/variables.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # ECS Cluster Fargate Configuration 3 | ################################################################################ 4 | variable "cluster_name" { 5 | description = "Cluster Name" 6 | type = string 7 | default = "ecs-core" 8 | } 9 | 10 | ################################################################################ 11 | # Datadog Integration 12 | ################################################################################ 13 | 14 | variable "enable_datadog" { 15 | description = "Enable or Disable Datadog Integration" 16 | type = bool 17 | default = false 18 | } 19 | 20 | variable "sns_topic_name_for_alerts" { 21 | description = "SNS Topic for alerts" 22 | type = string 23 | nullable = false 24 | 25 | } 26 | 27 | variable "datadog_integration_aws" { 28 | description = "Datadog integration variables" 29 | type = map(string) 30 | default = { 31 | roleName = "DatadogAWSIntegrationRole" 32 | cpuutilization = "80" 33 | memory_utilization = "80" 34 | } 35 | } 36 | 37 | variable "secret_datadog" { 38 | default = { 39 | datadog_api_key = "datadog_api_key" 40 | datadog_app_key = "datadog_app_key" 41 | } 42 | 43 | type = map(string) 44 | } -------------------------------------------------------------------------------- /modules/datadog/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | datadog = { 6 | source = "DataDog/datadog" 7 | version = ">= 3.36.0" 8 | } 9 | aws = { 10 | source = "hashicorp/aws" 11 | version = ">= 4.22" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /patterns/aquasec-sidecar/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | COPY microenforcer /bin/microenforcer 3 | 4 | USER root 5 | RUN ["chmod", "+x", "/bin/microenforcer"] 6 | RUN ["/bin/microenforcer", "aqua-init"] 7 | VOLUME ["/.aquasec"] 8 | 9 | # Include the next line to embed an Image Profile 10 | # ADD policy.json /.aquasec/policy/policy.json 11 | # Include the next line to embed a Firewall Policy 12 | # ADD firewall.json /.aquasec/policy/firewall.json 13 | 14 | COPY microenforcer /.aquasec/bin/microenforcer 15 | 16 | RUN addgroup -g 11433 -S aqua && \ 17 | adduser -h /home/aqua -g "aqua user" -s /sbin/nologin -G aqua -S -u 11431 aqua 18 | USER aqua 19 | 20 | ENV LD_PRELOAD=/.aquasec/bin/$PLATFORM/slklib.so 21 | ENV AQUA_MICROENFORCER="1" 22 | ENV AQUA_DEBUG_TYPE=STDOUT 23 | LABEL name="Aqua MicroEnforcer" \ 24 | vendor="Aqua Security Software Ltd." \ 25 | summary="Aqua Security Microenforcer" \ 26 | description="The Aqua Security MicroEnforcer provides runtime protection." \ 27 | com.aquasec.component=microenforcer \ 28 | com.aquasec.baseimage=alpine \ 29 | product=aquasec \ 30 | maintainer="admin@aquasec.com" -------------------------------------------------------------------------------- /patterns/fargate-cluster/aquasec.tf.draft: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Module - Aquasec 3 | ################################################################################ 4 | 5 | resource "aws_secretsmanager_secret" "aquasec" { 6 | name = "aquasec" 7 | description = "AquaSec Secrets" 8 | recovery_window_in_days = 0 9 | } 10 | 11 | resource "aws_secretsmanager_secret_version" "aquasec_version" { 12 | secret_id = aws_secretsmanager_secret.aquasec.id 13 | secret_string = jsonencode(var.secret_aquasec) 14 | } 15 | 16 | # Aquasec 17 | module "aquasec" { 18 | source = "../../modules/aquasec" 19 | 20 | enable_aquasec = var.enable_aquasec 21 | enable_aquasec_sidecar = var.enable_aquasec_sidecar_ecr_repository 22 | 23 | account = var.account 24 | 25 | aquasec_registry = var.aquasec_registry 26 | username = jsondecode(aws_secretsmanager_secret_version.aquasec_version.secret_string)["username"] 27 | aqua_url = jsondecode(aws_secretsmanager_secret_version.aquasec_version.secret_string)["aqua_url"] 28 | password = jsondecode(aws_secretsmanager_secret_version.aquasec_version.secret_string)["password"] 29 | } -------------------------------------------------------------------------------- /patterns/fargate-cluster/delete_ecr_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | aws ecr batch-delete-image --region $1 \ 3 | --repository-name $2 \ 4 | --image-ids "$(aws ecr list-images --region $1 --repository-name $2 --query 'imageIds[*]' --output json 5 | )" || true -------------------------------------------------------------------------------- /patterns/fargate-cluster/main.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Provider Configuration 3 | ################################################################################ 4 | 5 | provider "aws" { 6 | region = var.account.region 7 | } 8 | 9 | data "aws_availability_zones" "available" {} 10 | data "aws_caller_identity" "current" {} 11 | 12 | locals { 13 | azs = slice(data.aws_availability_zones.available.names, 0, 3) 14 | } 15 | 16 | ################################################################################ 17 | # ECS Cluster Fargate 18 | ################################################################################ 19 | 20 | module "ecs" { 21 | #checkov:skip=CKV_AWS_111:Task Definition Permissions to Publish logs to CloudWatch Logs and Pull images from Amazon ECR. 22 | #checkov:skip=CKV_AWS_356:Service Permissions for AutoScaling, CloudWatch Logs and Pull images from Amazon ECR. 23 | #checkov:skip=CKV2_AWS_5:Default SG. 24 | #checkov:skip=CKV_TF_1:Using full commit hash generate a bug where the ref is not found on the CI. 25 | source = "terraform-aws-modules/ecs/aws" 26 | version = "5.11.2" 27 | 28 | cluster_name = var.cluster_name 29 | 30 | cluster_configuration = { 31 | execute_command_configuration = { 32 | logging = "OVERRIDE" 33 | log_configuration = { 34 | cloud_watch_log_group_name = "/aws/ecs/aws-fargate" 35 | } 36 | } 37 | } 38 | 39 | cluster_service_connect_defaults = { 40 | namespace = aws_service_discovery_private_dns_namespace.this.arn 41 | } 42 | 43 | # Shared task execution role 44 | create_task_exec_iam_role = false 45 | # Allow read access to all SSM params in current account for demo 46 | task_exec_ssm_param_arns = ["arn:aws:ssm:${var.account.region}:${data.aws_caller_identity.current.account_id}:parameter/*"] 47 | # Allow read access to all secrets in current account for demo 48 | task_exec_secret_arns = ["arn:aws:secretsmanager:${var.account.region}:${data.aws_caller_identity.current.account_id}:secret:*"] 49 | 50 | # ContainerInsights (https://registry.terraform.io/modules/terraform-aws-modules/ecs/aws/latest#input_cluster_settings) 51 | cluster_settings = { 52 | name = "containerInsights" 53 | value = "enabled" 54 | #checkov:skip=CKV_AWS_65:Container Insights is enabled by default. 55 | } 56 | 57 | fargate_capacity_providers = { 58 | FARGATE = {} 59 | FARGATE_SPOT = {} 60 | } 61 | 62 | services = { 63 | (var.service_sample.name) = { 64 | cpu = var.service_sample.cpu 65 | memory = var.service_sample.memory 66 | 67 | enable_execute_command = true 68 | 69 | ignore_task_definition_changes = true 70 | 71 | # Container definition(s) 72 | container_definitions = merge(try({ 73 | #checkov:skip=CKV_AWS_97:EFS is not being used. 74 | #AquaSec Sidecar 75 | (var.enable_aquasec_sidecar == true ? var.aquasec_microenforcer_sidecar.name : null) = { 76 | cpu = var.enable_aquasec_sidecar == true ? var.aquasec_microenforcer_sidecar.cpu : null 77 | memory = var.enable_aquasec_sidecar == true ? var.aquasec_microenforcer_sidecar.memory : null 78 | essential = var.enable_aquasec_sidecar == true ? var.aquasec_microenforcer_sidecar.essential : null 79 | image = var.enable_aquasec_sidecar == true ? var.aquasec_microenforcer_sidecar.image : null 80 | memory_reservation = var.enable_aquasec_sidecar == true ? var.aquasec_microenforcer_sidecar.memory_reservation : null 81 | } 82 | }, null), 83 | { 84 | (var.container_sample.name) = { 85 | cpu = var.container_sample.cpu 86 | memory = var.container_sample.memory 87 | essential = true 88 | image = var.container_sample.image 89 | port_mappings = [ 90 | { 91 | name = var.container_sample.name 92 | containerPort = tonumber(var.container_sample.port) 93 | protocol = "tcp" 94 | } 95 | ] 96 | 97 | #AquaSec App Configuration 98 | volumes_from = var.container_sample_volumes_from 99 | entrypoint = var.container_sample_entrypoint 100 | command = var.container_sample_command 101 | environment = var.container_sample_environment 102 | 103 | # Example image used requires access to write to root filesystem 104 | readonly_root_filesystem = false 105 | enable_cloudwatch_logging = true 106 | memory_reservation = 100 107 | } 108 | }) 109 | 110 | load_balancer = { 111 | service = { 112 | target_group_arn = element(module.service_alb.target_group_arns, 0) 113 | container_name = var.container_sample.name 114 | container_port = var.container_sample.port 115 | } 116 | } 117 | 118 | subnet_ids = module.vpc.private_subnets 119 | security_group_rules = { 120 | alb_ingress = { 121 | type = "ingress" 122 | from_port = var.container_sample.port 123 | to_port = var.container_sample.port 124 | protocol = "tcp" 125 | description = "Service port" 126 | source_security_group_id = module.service_alb.security_group_id 127 | } 128 | # TODO limit egress rules 129 | egress_all = { 130 | type = "egress" 131 | from_port = 0 132 | to_port = 0 133 | protocol = "-1" 134 | cidr_blocks = ["0.0.0.0/0"] 135 | } 136 | } 137 | } 138 | } 139 | 140 | tags = var.tags 141 | } 142 | 143 | ################################################################################ 144 | # AWS Service Connect 145 | ################################################################################ 146 | 147 | resource "aws_service_discovery_private_dns_namespace" "this" { 148 | name = "default.${var.cluster_name}.local" 149 | description = "Service discovery namespace.${var.cluster_name}.local" 150 | vpc = module.vpc.vpc_id 151 | 152 | tags = var.tags 153 | } 154 | 155 | ################################################################################ 156 | # ALB Target Group Service Sample 157 | ################################################################################ 158 | 159 | module "service_alb" { 160 | #checkov:skip=CKV_AWS_91:The access log for the Load Balancer is disabled by default. 161 | #checkov:skip=CKV2_AWS_28:No need for WAF for this ALB. 162 | #checkov:skip=CKV_TF_1:Using full commit hash generate a bug where the ref is not found on the CI. 163 | #checkov:skip=CKV_AWS_91:No need for access logs for the Load Balancer. 164 | source = "terraform-aws-modules/alb/aws" 165 | version = "~> 8.3" 166 | 167 | name = "${var.container_sample.name}-alb" 168 | load_balancer_type = "application" 169 | enable_deletion_protection = var.load_balancer.enable_deletion_protection 170 | drop_invalid_header_fields = true 171 | vpc_id = module.vpc.vpc_id 172 | subnets = module.vpc.public_subnets 173 | security_group_rules = { 174 | ingress_all_http = { 175 | type = "ingress" 176 | from_port = 80 177 | to_port = 80 178 | protocol = "tcp" 179 | description = "HTTP web traffic" 180 | cidr_blocks = ["0.0.0.0/0"] 181 | } 182 | egress_all = { 183 | type = "egress" 184 | from_port = 0 185 | to_port = 0 186 | protocol = "-1" 187 | cidr_blocks = ["0.0.0.0/0"] 188 | } 189 | } 190 | 191 | http_tcp_listeners = [ 192 | { 193 | port = "80" 194 | protocol = "HTTP" 195 | target_group_index = 0 196 | }, 197 | ] 198 | 199 | target_groups = [ 200 | { 201 | name = "${var.container_sample.name}-alb-tg" 202 | backend_protocol = "HTTP" 203 | backend_port = var.container_sample.port 204 | target_type = "ip" 205 | health_check = { 206 | path = "/" 207 | port = var.container_sample.port 208 | matcher = "200-299" 209 | } 210 | }, 211 | ] 212 | 213 | tags = var.tags 214 | } 215 | 216 | ################################################################################ 217 | # Module - Datadog 218 | ################################################################################ 219 | 220 | module "datadog" { 221 | source = "../../modules/datadog" 222 | 223 | enable_datadog = var.enable_datadog 224 | 225 | cluster_name = var.cluster_name 226 | 227 | sns_topic_name_for_alerts = var.sns_topic_name_for_alerts 228 | datadog_integration_aws = var.datadog_integration_aws 229 | secret_datadog = var.secret_datadog 230 | 231 | } 232 | 233 | ################################################################################ 234 | # Module - Codepipeline with Github 235 | ################################################################################ 236 | 237 | # Codepipeline with Github 238 | module "codepipeline_github" { 239 | count = var.enable_codepipeline_github ? 1 : 0 240 | source = "../../modules/codepipeline/github" 241 | 242 | cluster_name = var.cluster_name 243 | account = var.account 244 | account_id = data.aws_caller_identity.current.account_id 245 | 246 | ecr_repository_name = var.ecr_repository_name 247 | repository_name = var.repository_name 248 | secret_manager_name = var.secret_manager_name 249 | service_sample = var.service_sample 250 | container_sample = var.container_sample 251 | secret_github = var.secret_github 252 | } -------------------------------------------------------------------------------- /patterns/fargate-cluster/networking.tf: -------------------------------------------------------------------------------- 1 | module "vpc" { 2 | #checkov:skip=CKV_AWS_111:IAM Policy for Publish logs to CloudWatch Logs. https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs-cwl.html#flow-logs-iam-role 3 | #checkov:skip=CKV_AWS_356:IAM Policy for Publish logs to CloudWatch Logs. https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs-cwl.html#flow-logs-iam-role 4 | #checkov:skip=CKV2_AWS_11:No need for VPC flow logs on this pattern. 5 | #checkov:skip=CKV2_AWS_19:The EIP is attached to an ELB. 6 | #checkov:skip=CKV2_AWS_12:Default SG. 7 | #checkov:skip=CKV2_AWS_5:Default SG. 8 | #checkov:skip=CKV_TF_1:Using full commit hash generate a bug where the ref is not found on the CI. 9 | 10 | source = "terraform-aws-modules/vpc/aws" 11 | version = "~> 5.0" 12 | 13 | name = var.cluster_name 14 | cidr = var.network.vpc_cidr 15 | 16 | azs = local.azs 17 | private_subnets = [for k, v in local.azs : cidrsubnet(var.network.vpc_cidr, 4, k)] 18 | public_subnets = [for k, v in local.azs : cidrsubnet(var.network.vpc_cidr, 8, k + 48)] 19 | 20 | enable_nat_gateway = var.network.enable_nat_gateway 21 | single_nat_gateway = var.network.single_nat_gateway 22 | enable_dns_hostnames = var.network.enable_dns_hostnames 23 | 24 | # Manage so we can name 25 | manage_default_network_acl = var.network.manage_default_network_acl 26 | default_network_acl_tags = { Name = "${var.cluster_name}-default" } 27 | manage_default_route_table = var.network.manage_default_route_table 28 | default_route_table_tags = { Name = "${var.cluster_name}-default" } 29 | manage_default_security_group = var.network.manage_default_security_group 30 | default_security_group_tags = { Name = "${var.cluster_name}-default" } 31 | 32 | tags = var.tags 33 | } -------------------------------------------------------------------------------- /patterns/fargate-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Outputs 3 | ################################################################################ 4 | 5 | output "cluster_arn" { 6 | description = "ARN that identifies the cluster" 7 | value = module.ecs.cluster_arn 8 | } 9 | 10 | output "cluster_id" { 11 | description = "ID that identifies the cluster" 12 | value = module.ecs.cluster_id 13 | } 14 | 15 | output "cluster_name" { 16 | description = "Name that identifies the cluster" 17 | value = module.ecs.cluster_name 18 | } 19 | 20 | output "cluster_capacity_providers" { 21 | description = "Map of cluster capacity providers attributes" 22 | value = module.ecs.cluster_capacity_providers 23 | } 24 | 25 | output "cluster_autoscaling_capacity_providers" { 26 | description = "Map of capacity providers created and their attributes" 27 | value = module.ecs.autoscaling_capacity_providers 28 | } 29 | 30 | output "ecs_task_execution_role_name" { 31 | description = "The ARN of the task execution role" 32 | value = module.ecs.task_exec_iam_role_name 33 | } 34 | 35 | output "ecs_task_execution_role_arn" { 36 | description = "The ARN of the task execution role" 37 | value = module.ecs.task_exec_iam_role_arn 38 | } 39 | 40 | output "application_url" { 41 | value = "http://${module.service_alb.lb_dns_name}" 42 | description = "Copy this value in your browser in order to access the deployed app" 43 | } 44 | -------------------------------------------------------------------------------- /patterns/fargate-cluster/terraform.tfvars: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # ECS Cluster Fargate Configuration 3 | ################################################################################ 4 | 5 | cluster_name = "ecs-core" 6 | 7 | tags = { 8 | Environment = "Development" 9 | Project = "Spok" 10 | } 11 | 12 | account = { 13 | region = "us-east-1" 14 | } 15 | 16 | ecr_repository_name = "ecs-core" 17 | 18 | ################################################################################ 19 | # ECS Service Sample 20 | ################################################################################ 21 | 22 | service_sample = { 23 | cpu = 2048 24 | memory = 4096 25 | name = "ecsdemo" 26 | } 27 | 28 | container_sample = { 29 | cpu = 512 30 | memory = 1024 31 | port = 3000 32 | name = "ecs-sample" 33 | image = "public.ecr.aws/aws-containers/ecsdemo-nodejs:c3e96da" 34 | } 35 | 36 | # container_sample_entrypoint = ["/.aquasec/bin/microenforcer","bash", "/usr/src/app/startup.sh"] 37 | # container_sample_command = [] 38 | # container_sample_environment = [ 39 | # { 40 | # name = "AQUA_MICROENFORCER" 41 | # value = "1" 42 | # }, 43 | # { 44 | # name = "AQUA_SERVER" 45 | # value = "xxxxxxx-gw.cloud.aquasec.com:443" 46 | # }, 47 | # { 48 | # name = "AQUA_TOKEN" 49 | # value = "xxxxxx-xxxxx-xxxxx-xxxxx-xxxxx" 50 | # }, 51 | # { 52 | # name = "AQUA_IMAGE_ID" 53 | # value = "xxxxx" 54 | # } 55 | # ] 56 | 57 | # container_sample_volumes_from = [{ 58 | # sourceContainer = "aqua-sidecar" 59 | # readOnly = false 60 | # }] 61 | 62 | ################################################################################ 63 | # ALB 64 | ################################################################################ 65 | 66 | load_balancer = { 67 | enable_deletion_protection = false 68 | } 69 | 70 | ################################################################################ 71 | # Networking 72 | ################################################################################ 73 | 74 | network = { 75 | vpc_cidr = "10.0.0.0/16" 76 | enable_nat_gateway = true 77 | single_nat_gateway = true 78 | enable_dns_hostnames = true 79 | manage_default_network_acl = true 80 | manage_default_route_table = true 81 | manage_default_security_group = true 82 | } 83 | 84 | ################################################################################ 85 | # Module - Aquasec 86 | ################################################################################ 87 | 88 | enable_aquasec = false 89 | enable_aquasec_sidecar = false 90 | enable_aquasec_sidecar_ecr_repository = false 91 | 92 | aquasec = { 93 | secret_manager_name = "aquasec" 94 | } 95 | 96 | aquasec_microenforcer_sidecar = { 97 | name = "aqua-sidecar" 98 | cpu = 512 99 | memory = 1024 100 | essential = false 101 | image = "xxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/aqua-sidecar:latest" 102 | memory_reservation = 50 103 | } 104 | 105 | secret_aquasec = { 106 | aqua_url = "https://cloud.aquasec.com" 107 | username = "username" 108 | password = "password" 109 | } 110 | 111 | 112 | ################################################################################ 113 | # Module - Datadog 114 | ################################################################################ 115 | 116 | enable_datadog = false 117 | 118 | sns_topic_name_for_alerts = "sns-containters-ecs-topic-alerts" 119 | 120 | datadog_integration_aws = { 121 | roleName = "DatadogAWSIntegrationRole" 122 | alert_cpuutilization_threshold = "80" 123 | alert_memory_utilization_threshold = "80" 124 | secret_manager_name = "datadog" 125 | external_id = "XXXXXX" 126 | } 127 | 128 | secret_datadog = { 129 | datadog_api_key = "datadog_api_key" 130 | datadog_app_key = "datadog_app_key" 131 | } 132 | 133 | ################################################################################ 134 | # Module - Codepipeline with Github 135 | ################################################################################ 136 | 137 | enable_codepipeline_github = false 138 | 139 | repository_name = "ecsdemo-python" 140 | secret_manager_name = "github" 141 | 142 | secret_github = { 143 | code_star_connection_arn = "arn:aws:codeconnections:us-east-1:xxxxxxxxx:connection/xxxxx-xxxx-xxxx-xxxx-xxxxxxxxx" 144 | organization_name = "xxxxxxxx" 145 | } -------------------------------------------------------------------------------- /patterns/fargate-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # ECS Cluster Fargate Configuration 3 | ################################################################################ 4 | variable "cluster_name" { 5 | description = "Cluster Name" 6 | type = string 7 | default = "ecs-core" 8 | } 9 | 10 | variable "tags" { 11 | description = "Resources Tagging" 12 | type = map(string) 13 | default = { 14 | Environment = "Development" 15 | Project = "Spok" 16 | } 17 | } 18 | 19 | variable "account" { 20 | description = "Generic parameters for the variable" 21 | type = map(string) 22 | default = { 23 | region = "us-east-1" 24 | } 25 | } 26 | 27 | variable "ecr_repository_name" { 28 | type = string 29 | description = "The ECR repository name for the app" 30 | } 31 | 32 | ################################################################################ 33 | # ECS Service Sample 34 | ################################################################################ 35 | variable "service_sample" { 36 | description = "Parameter for Service Sample" 37 | type = map(string) 38 | default = { 39 | cpu = 1024 40 | memory = 4096 41 | name = "ecsdemo" 42 | } 43 | } 44 | 45 | variable "container_sample" { 46 | description = "Parameter for Container Sample" 47 | type = map(string) 48 | default = { 49 | cpu = 512 50 | memory = 1024 51 | port = 3000 52 | name = "ecs-sample" 53 | image = "public.ecr.aws/aws-containers/ecsdemo-nodejs:c3e96da" 54 | } 55 | } 56 | 57 | variable "container_sample_entrypoint" { 58 | description = "Container Sample Entrypoint" 59 | type = list(any) 60 | default = [] 61 | } 62 | 63 | variable "container_sample_command" { 64 | description = "Container Sample Command" 65 | type = list(any) 66 | default = [] 67 | } 68 | 69 | variable "container_sample_environment" { 70 | description = "Container Sample Environment Variables" 71 | type = list(any) 72 | default = [] 73 | } 74 | 75 | variable "container_sample_volumes_from" { 76 | description = "Container Sample Volumes From Container" 77 | type = list(object({ 78 | sourceContainer = string, 79 | readOnly = bool 80 | })) 81 | default = [] 82 | } 83 | 84 | ################################################################################ 85 | # Networking 86 | ################################################################################ 87 | 88 | # Networking 89 | 90 | variable "network" { 91 | description = "Parameter for Networking" 92 | type = map(string) 93 | default = { 94 | vpc_cidr = "10.0.0.0/16" 95 | enable_nat_gateway = true 96 | single_nat_gateway = true 97 | enable_dns_hostnames = true 98 | manage_default_network_acl = true 99 | manage_default_route_table = true 100 | manage_default_security_group = true 101 | } 102 | } 103 | 104 | ################################################################################ 105 | # Module - Aquasec 106 | ################################################################################ 107 | 108 | variable "secret_aquasec" { 109 | default = { 110 | aqua_url = "aqua_url" 111 | username = "username" 112 | password = "password" 113 | } 114 | 115 | type = map(string) 116 | } 117 | 118 | variable "enable_aquasec" { 119 | description = "Enable or Disable Aquasec Integration" 120 | type = bool 121 | default = false 122 | } 123 | 124 | variable "enable_aquasec_sidecar" { 125 | description = "Enable or Disable Aquasec Sidecar" 126 | type = bool 127 | default = false 128 | } 129 | 130 | variable "enable_aquasec_sidecar_ecr_repository" { 131 | description = "Enable or Disable Aquasec Sidecar ECR Registry" 132 | type = bool 133 | default = false 134 | } 135 | 136 | variable "aquasec" { 137 | description = "Parameter for AquaSec" 138 | type = map(string) 139 | default = { 140 | secret_manager_name = "aquasec" 141 | } 142 | } 143 | 144 | variable "aquasec_registry" { 145 | description = "Parameter for AquaSec Integration Registry" 146 | type = map(string) 147 | default = { 148 | name = "ECR Integration" 149 | type = "AWS" 150 | author = "example@amazon.com" 151 | arn_role = "arn:aws:iam::xxxxxxxx:role/AquaSec" 152 | sts_external_id = "AquaSecExternalIDxxxxxxxx" 153 | image_pull = "xyz" 154 | tag_included = "xyz" 155 | tag_excluded = "xyz" 156 | ecr_repository = "xxxxxxxx.dkr.ecr.us-east-1.amazonaws.com" 157 | } 158 | } 159 | 160 | variable "aquasec_microenforcer_sidecar" { 161 | description = "Parameter for AquaSec MicroEnforcer Sidecar" 162 | type = map(string) 163 | default = { 164 | name = "aqua-sidecar" 165 | cpu = 512 166 | memory = 1024 167 | essential = false 168 | image = "xxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/aqua-sidecar:latest" 169 | memory_reservation = 50 170 | } 171 | } 172 | 173 | ################################################################################ 174 | # Module - Datadog 175 | ################################################################################ 176 | 177 | variable "secret_datadog" { 178 | default = { 179 | datadog_api_key = "datadog_api_key" 180 | datadog_app_key = "datadog_app_key" 181 | } 182 | 183 | type = map(string) 184 | } 185 | 186 | variable "enable_datadog" { 187 | description = "Enable or Disable Datadog Integration" 188 | type = bool 189 | default = false 190 | } 191 | 192 | variable "sns_topic_name_for_alerts" { 193 | description = "SNS Topic for alerts" 194 | type = string 195 | nullable = false 196 | 197 | } 198 | 199 | variable "datadog_integration_aws" { 200 | description = "Datadog integration variables" 201 | type = map(string) 202 | default = { 203 | roleName = "DatadogAWSIntegrationRole" 204 | cpuutilization = "80" 205 | memory_utilization = "80" 206 | } 207 | } 208 | 209 | ################################################################################ 210 | # Module - Codepipeline with Github 211 | ################################################################################ 212 | 213 | variable "enable_codepipeline_github" { 214 | description = "Enable or Disable Codepipeline and Github Integration" 215 | type = bool 216 | default = false 217 | } 218 | 219 | variable "secret_manager_name" { 220 | type = string 221 | description = "Github Secret Manager Name" 222 | } 223 | 224 | variable "repository_name" { 225 | type = string 226 | description = "The repository name to use in CodePipeline source stage" 227 | } 228 | 229 | variable "secret_github" { 230 | default = { 231 | code_star_connection_arn = "code_star_connection_arn" 232 | organization_name = "organization_name" 233 | } 234 | 235 | type = map(string) 236 | } 237 | 238 | ################################################################################ 239 | # Load Balancer Configurations 240 | ################################################################################ 241 | 242 | variable "load_balancer" { 243 | description = "Parameters for Load Balancer" 244 | type = map(string) 245 | default = { 246 | enable_deletion_protection = true 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /patterns/fargate-cluster/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.55" 8 | } 9 | } 10 | } --------------------------------------------------------------------------------