├── .github
├── template.md
└── workflows
│ ├── bridgecrew-action.yml
│ └── tfsec-action.yml
├── .gitignore
├── LICENSE
├── README.md
├── terraform
├── aws
│ ├── consts.tf
│ ├── db-app.tf
│ ├── ec2.tf
│ ├── ecr.tf
│ ├── eks.tf
│ ├── elb.tf
│ ├── es.tf
│ ├── iam.tf
│ ├── kms.tf
│ ├── lambda.tf
│ ├── neptune.tf
│ ├── providers.tf
│ ├── resources
│ │ ├── Dockerfile
│ │ ├── customer-master.xlsx
│ │ └── lambda_function_payload.zip
│ └── s3.tf
├── azure
│ ├── aks.tf
│ ├── app_service.tf
│ ├── instance.tf
│ ├── key_vault.tf
│ ├── logging.tf
│ ├── networking.tf
│ ├── policies.tf
│ ├── provider.tf
│ ├── random.tf
│ ├── resource_group.tf
│ ├── roles.tf
│ ├── security_center.tf
│ ├── sql.tf
│ ├── storage.tf
│ └── variables.tf
└── gcp
│ ├── big_data.tf
│ ├── gcs.tf
│ ├── gke.tf
│ ├── instances.tf
│ ├── networks.tf
│ ├── provider.tf
│ └── variables.tf
└── terragoat-logo.png
/.github/template.md:
--------------------------------------------------------------------------------
1 | # TerraGoat - Vulnerable Terraform Infrastructure
2 |
3 | [](https://bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat)
4 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=INFRASTRUCTURE+SECURITY)
5 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=CIS+AZURE+V1.1)
6 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=CIS+GCP+V1.1)
7 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=CIS+AWS+V1.2)
8 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=PCI-DSS+V3.2)
9 | 
10 | [](https://slack.bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat)
11 |
12 |
13 | TerraGoat is Bridgecrew's "Vulnerable by Design" Terraform repository.
14 | 
15 |
16 | TerraGoat is Bridgecrew's "Vulnerable by Design" Terraform repository.
17 | TerraGoat is a learning and training project that demonstrates how common configuration errors can find their way into production cloud environments.
18 |
19 | ## Table of Contents
20 |
21 | * [Introduction](#introduction)
22 | * [Getting Started](#getting-started)
23 | * [AWS](#aws-setup)
24 | * [Azure](#azure-setup)
25 | * [GCP](#gcp-setup)
26 | * [Contributing](#contributing)
27 | * [Support](#support)
28 |
29 | ## Introduction
30 |
31 | TerraGoat was built to enable DevSecOps design and implement a sustainable misconfiguration prevention strategy. It can be used to test a policy-as-code framework like [Bridgecrew](https://bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat) & [Checkov](https://github.com/bridgecrewio/checkov/), inline-linters, pre-commit hooks or other code scanning methods.
32 |
33 | TerraGoat follows the tradition of existing *Goat projects that provide a baseline training ground to practice implementing secure development best practices for cloud infrastructure.
34 |
35 | ## Important notes
36 |
37 | * **Where to get help:** the [Bridgecrew Community Slack](https://slack.bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat)
38 |
39 | Before you proceed please take a not of these warning:
40 | > :warning: TerraGoat creates intentionally vulnerable AWS resources into your account. **DO NOT deploy TerraGoat in a production environment or alongside any sensitive AWS resources.**
41 |
42 | ## Requirements
43 |
44 | * Terraform 0.12
45 | * aws cli
46 | * azure cli
47 |
48 | To prevent vulnerable infrastructure from arriving to production see: [Bridgecrew](https://bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat) & [checkov](https://github.com/bridgecrewio/checkov/), the open source static analysis tool for infrastructure as code.
49 |
50 | ## Getting started
51 |
52 | ### AWS Setup
53 |
54 | #### Installation (AWS)
55 |
56 | You can deploy multiple TerraGoat stacks in a single AWS account using the parameter `TF_VAR_environment`.
57 |
58 | #### Create an S3 Bucket backend to keep Terraform state
59 |
60 | ```bash
61 | export TERRAGOAT_STATE_BUCKET="mydevsecops-bucket"
62 | export TF_VAR_company_name=acme
63 | export TF_VAR_environment=mydevsecops
64 | export TF_VAR_region="us-west-2"
65 |
66 | aws s3api create-bucket --bucket $TERRAGOAT_STATE_BUCKET \
67 | --region $TF_VAR_region --create-bucket-configuration LocationConstraint=$TF_VAR_region
68 |
69 | # Enable versioning
70 | aws s3api put-bucket-versioning --bucket $TERRAGOAT_STATE_BUCKET --versioning-configuration Status=Enabled
71 |
72 | # Enable encryption
73 | aws s3api put-bucket-encryption --bucket $TERRAGOAT_STATE_BUCKET --server-side-encryption-configuration '{
74 | "Rules": [
75 | {
76 | "ApplyServerSideEncryptionByDefault": {
77 | "SSEAlgorithm": "aws:kms"
78 | }
79 | }
80 | ]
81 | }'
82 | ```
83 |
84 | #### Apply TerraGoat (AWS)
85 |
86 | ```bash
87 | cd terraform/aws/
88 | terraform init \
89 | -backend-config="bucket=$TERRAGOAT_STATE_BUCKET" \
90 | -backend-config="key=$TF_VAR_company_name-$TF_VAR_environment.tfstate" \
91 | -backend-config="region=$TF_VAR_region"
92 |
93 | terraform apply
94 | ```
95 |
96 | #### Remove TerraGoat (AWS)
97 |
98 | ```bash
99 | terraform destroy
100 | ```
101 |
102 | #### Creating multiple TerraGoat AWS stacks
103 |
104 | ```bash
105 | cd terraform/aws/
106 | export TERRAGOAT_ENV=$TF_VAR_environment
107 | export TERRAGOAT_STACKS_NUM=5
108 | for i in $(seq 1 $TERRAGOAT_STACKS_NUM)
109 | do
110 | export TF_VAR_environment=$TERRAGOAT_ENV$i
111 | terraform init \
112 | -backend-config="bucket=$TERRAGOAT_STATE_BUCKET" \
113 | -backend-config="key=$TF_VAR_company_name-$TF_VAR_environment.tfstate" \
114 | -backend-config="region=$TF_VAR_region"
115 |
116 | terraform apply -auto-approve
117 | done
118 | ```
119 |
120 | #### Deleting multiple TerraGoat stacks (AWS)
121 |
122 | ```bash
123 | cd terraform/aws/
124 | export TF_VAR_environment = $TERRAGOAT_ENV
125 | for i in $(seq 1 $TERRAGOAT_STACKS_NUM)
126 | do
127 | export TF_VAR_environment=$TERRAGOAT_ENV$i
128 | terraform init \
129 | -backend-config="bucket=$TERRAGOAT_STATE_BUCKET" \
130 | -backend-config="key=$TF_VAR_company_name-$TF_VAR_environment.tfstate" \
131 | -backend-config="region=$TF_VAR_region"
132 |
133 | terraform destroy -auto-approve
134 | done
135 | ```
136 |
137 | ### Azure Setup
138 |
139 | #### Installation (Azure)
140 |
141 | You can deploy multiple TerraGoat stacks in a single Azure subscription using the parameter `TF_VAR_environment`.
142 |
143 | #### Create an Azure Storage Account backend to keep Terraform state
144 |
145 | ```bash
146 | export TERRAGOAT_RESOURCE_GROUP="TerraGoatRG"
147 | export TERRAGOAT_STATE_STORAGE_ACCOUNT="mydevsecopssa"
148 | export TERRAGOAT_STATE_CONTAINER="mydevsecops"
149 | export TF_VAR_environment="dev"
150 | export TF_VAR_region="westus"
151 |
152 | # Create resource group
153 | az group create --location $TF_VAR_region --name $TERRAGOAT_RESOURCE_GROUP
154 |
155 | # Create storage account
156 | az storage account create --name $TERRAGOAT_STATE_STORAGE_ACCOUNT --resource-group $TERRAGOAT_RESOURCE_GROUP --location $TF_VAR_region --sku Standard_LRS --kind StorageV2 --https-only true --encryption-services blob
157 |
158 | # Get storage account key
159 | ACCOUNT_KEY=$(az storage account keys list --resource-group $TERRAGOAT_RESOURCE_GROUP --account-name $TERRAGOAT_STATE_STORAGE_ACCOUNT --query [0].value -o tsv)
160 |
161 | # Create blob container
162 | az storage container create --name $TERRAGOAT_STATE_CONTAINER --account-name $TERRAGOAT_STATE_STORAGE_ACCOUNT --account-key $ACCOUNT_KEY
163 | ```
164 |
165 | #### Apply TerraGoat (Azure)
166 |
167 | ```bash
168 | cd terraform/azure/
169 | terraform init -reconfigure -backend-config="resource_group_name=$TERRAGOAT_RESOURCE_GROUP" \
170 | -backend-config "storage_account_name=$TERRAGOAT_STATE_STORAGE_ACCOUNT" \
171 | -backend-config="container_name=$TERRAGOAT_STATE_CONTAINER" \
172 | -backend-config "key=$TF_VAR_environment.terraform.tfstate"
173 |
174 | terraform apply
175 | ```
176 |
177 | #### Remove TerraGoat (Azure)
178 |
179 | ```bash
180 | terraform destroy
181 | ```
182 |
183 | ### GCP Setup
184 |
185 | #### Installation (GCP)
186 |
187 | You can deploy multiple TerraGoat stacks in a single GCP project using the parameter `TF_VAR_environment`.
188 |
189 | #### Create a GCS backend to keep Terraform state
190 |
191 | To use terraform, a Service Account and matching set of credentials are required.
192 | If they do not exist, they must be manually created for the relevant project.
193 | To create the Service Account:
194 | 1. Sign into your GCP project, go to `IAM` > `Service Accounts`.
195 | 2. Click the `CREATE SERVICE ACCOUNT`.
196 | 3. Give a name to your service account (for example - `terragoat`) and click `CREATE`.
197 | 4. Grant the Service Account the `Project` > `Editor` role and click `CONTINUE`.
198 | 5. Click `DONE`.
199 |
200 | To create the credentials:
201 | 1. Sign into your GCP project, go to `IAM` > `Service Accounts` and click on the relevant Service Account.
202 | 2. Click `ADD KEY` > `Create new key` > `JSON` and click `CREATE`. This will create a `.json` file and download it to your computer.
203 |
204 | We recommend saving the key with a nicer name than the auto-generated one (i.e. `terragoat_credentials.json`), and storing the resulting JSON file inside `terraform/gcp` directory of terragoat.
205 | Once the credentials are set up, create the BE configuration as follows:
206 |
207 | ```bash
208 | export TF_VAR_environment="dev"
209 | export TF_TERRAGOAT_STATE_BUCKET=remote-state-bucket-terragoat
210 | export TF_VAR_credentials_path= # example: export TF_VAR_credentials_path=terragoat_credentials.json
211 | export TF_VAR_project=
212 |
213 | # Create storage bucket
214 | gsutil mb gs://${TF_TERRAGOAT_STATE_BUCKET}
215 | ```
216 |
217 | #### Apply TerraGoat (GCP)
218 |
219 | ```bash
220 | cd terraform/gcp/
221 | terraform init -reconfigure -backend-config="bucket=$TF_TERRAGOAT_STATE_BUCKET" \
222 | -backend-config "credentials=$TF_VAR_credentials_path" \
223 | -backend-config "prefix=terragoat/${TF_VAR_environment}"
224 |
225 | terraform apply
226 | ```
227 |
228 | #### Remove TerraGoat (GCP)
229 |
230 | ```bash
231 | terraform destroy
232 | ```
233 |
234 | ## Bridgecrew's IaC herd of goats
235 |
236 | * [CfnGoat](https://github.com/bridgecrewio/cfngoat) - Vulnerable by design Cloudformation template
237 | * [TerraGoat](https://github.com/bridgecrewio/terragoat) - Vulnerable by design Terraform stack
238 | * [CDKGoat](https://github.com/bridgecrewio/cdkgoat) - Vulnerable by design CDK application
239 |
240 | ## Contributing
241 |
242 | Contribution is welcomed!
243 |
244 | We would love to hear about more ideas on how to find vulnerable infrastructure-as-code design patterns.
245 |
246 | ## Support
247 |
248 | [Bridgecrew](https://bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat) builds and maintains TerraGoat to encourage the adoption of policy-as-code.
249 |
250 | If you need direct support you can contact us at [info@bridgecrew.io](mailto:info@bridgecrew.io).
251 |
252 | ## Existing vulnerabilities (Auto-Generated)
253 |
--------------------------------------------------------------------------------
/.github/workflows/bridgecrew-action.yml:
--------------------------------------------------------------------------------
1 | # This is a basic workflow to help you get started with Actions
2 |
3 | name: Bridgecrew
4 |
5 | # Controls when the action will run. Triggers the workflow on push or pull request
6 | # events but only for the master branch
7 | on:
8 | push:
9 | branches: [ master ]
10 | pull_request:
11 | branches: [ master ]
12 |
13 | # A workflow run is made up of one or more jobs that can run sequentially or in parallel
14 | jobs:
15 | # This workflow contains a single job called "build"
16 | bridgecrew:
17 | runs-on: ubuntu-latest
18 | steps:
19 | - uses: actions/checkout@v2
20 |
21 | - name: Run Bridgecrew
22 | id: Bridgecrew
23 | uses: bridgecrewio/bridgecrew-action@master
24 | with:
25 | soft_fail: true
26 | env:
27 | GITHUB_REPOSITORY: ${{ github.repository }}
28 | GITHUB_REF: ${{ github.ref }}
29 | GITHUB_SHA: ${{ github.sha }}
30 | GITHUB_SERVER_URL: $GITHUB_SERVER_URL
31 |
32 | - name: Expose report
33 | uses: actions/upload-artifact@v2
34 | with:
35 | name: SARIF results
36 | path: results.sarif
37 |
38 | # Uploads results.sarif to GitHub repository using the upload-sarif action
39 | - uses: github/codeql-action/upload-sarif@v1
40 | with:
41 | sarif_file: results.sarif
42 |
--------------------------------------------------------------------------------
/.github/workflows/tfsec-action.yml:
--------------------------------------------------------------------------------
1 | name: TFSec
2 |
3 | on:
4 | push:
5 | branches: [ master, main ]
6 | pull_request:
7 | branches: [ master, main ]
8 |
9 | jobs:
10 | tfsec:
11 | name: Run tfsec sarif report
12 | runs-on: ubuntu-latest
13 | permissions:
14 | actions: read
15 | contents: read
16 | security-events: write
17 |
18 | steps:
19 | - name: Clone repo
20 | uses: actions/checkout@v3
21 |
22 | - name: Run tfsec
23 | uses: aquasecurity/tfsec-sarif-action@v0.1.4
24 | with:
25 | sarif_file: tfsec.sarif
26 |
27 | - name: Upload SARIF file
28 | uses: github/codeql-action/upload-sarif@v2
29 | with:
30 | # Path to SARIF file relative to the root of the repository
31 | sarif_file: tfsec.sarif
32 |
33 | - uses: actions/upload-artifact@v3
34 | with:
35 | name: sarif-results
36 | path: tfsec.sarif
37 |
38 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 | .idea
4 |
5 | # .tfstate files
6 | *.tfstate
7 | *.tfstate.*
8 |
9 | # Crash log files
10 | crash.log
11 |
12 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most
13 | # .tfvars files are managed as part of configuration and so should be included in
14 | # version control.
15 | #
16 | # example.tfvars
17 |
18 | # Ignore override files as they are usually used to override resources locally and so
19 | # are not checked in
20 | override.tf
21 | override.tf.json
22 | *_override.tf
23 | *_override.tf.json
24 | credentials.json
25 | *.tfbackend
26 | *.tfvars
27 |
28 | # Include override files you do wish to add to version control using negated pattern
29 | #
30 | # !example_override.tf
31 |
32 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
33 | # example: *tfplan*
34 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # TerraGoat - Vulnerable Terraform Infrastructure
2 |
3 | [](https://bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat)
4 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=INFRASTRUCTURE+SECURITY)
5 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=CIS+AZURE+V1.1)
6 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=CIS+GCP+V1.1)
7 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=CIS+AWS+V1.2)
8 | [](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=bridgecrewio%2Fterragoat&benchmark=PCI-DSS+V3.2)
9 | 
10 | [](https://slack.bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat)
11 |
12 |
13 | TerraGoat is Bridgecrew's "Vulnerable by Design" Terraform repository.
14 | 
15 |
16 | TerraGoat is Bridgecrew's "Vulnerable by Design" Terraform repository.
17 | TerraGoat is a learning and training project that demonstrates how common configuration errors can find their way into production cloud environments.
18 |
19 | ## Table of Contents
20 |
21 | * [Introduction](#introduction)
22 | * [Getting Started](#getting-started)
23 | * [AWS](#aws-setup)
24 | * [Azure](#azure-setup)
25 | * [GCP](#gcp-setup)
26 | * [Contributing](#contributing)
27 | * [Support](#support)
28 |
29 | ## Introduction
30 |
31 | TerraGoat was built to enable DevSecOps design and implement a sustainable misconfiguration prevention strategy. It can be used to test a policy-as-code framework like [Bridgecrew](https://bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat) & [Checkov](https://github.com/bridgecrewio/checkov/), inline-linters, pre-commit hooks or other code scanning methods.
32 |
33 | TerraGoat follows the tradition of existing *Goat projects that provide a baseline training ground to practice implementing secure development best practices for cloud infrastructure.
34 |
35 | ## Important notes
36 |
37 | * **Where to get help:** the [Bridgecrew Community Slack](https://slack.bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat)
38 |
39 | Before you proceed please take a not of these warning:
40 | > :warning: TerraGoat creates intentionally vulnerable AWS resources into your account. **DO NOT deploy TerraGoat in a production environment or alongside any sensitive AWS resources.**
41 |
42 | ## Requirements
43 |
44 | * Terraform 0.12
45 | * aws cli
46 | * azure cli
47 |
48 | To prevent vulnerable infrastructure from arriving to production see: [Bridgecrew](https://bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat) & [checkov](https://github.com/bridgecrewio/checkov/), the open source static analysis tool for infrastructure as code.
49 |
50 | ## Getting started
51 |
52 | ### AWS Setup
53 |
54 | #### Installation (AWS)
55 |
56 | You can deploy multiple TerraGoat stacks in a single AWS account using the parameter `TF_VAR_environment`.
57 |
58 | #### Create an S3 Bucket backend to keep Terraform state
59 |
60 | ```bash
61 | export TERRAGOAT_STATE_BUCKET="mydevsecops-bucket"
62 | export TF_VAR_company_name=acme
63 | export TF_VAR_environment=mydevsecops
64 | export TF_VAR_region="us-west-2"
65 |
66 | aws s3api create-bucket --bucket $TERRAGOAT_STATE_BUCKET \
67 | --region $TF_VAR_region --create-bucket-configuration LocationConstraint=$TF_VAR_region
68 |
69 | # Enable versioning
70 | aws s3api put-bucket-versioning --bucket $TERRAGOAT_STATE_BUCKET --versioning-configuration Status=Enabled
71 |
72 | # Enable encryption
73 | aws s3api put-bucket-encryption --bucket $TERRAGOAT_STATE_BUCKET --server-side-encryption-configuration '{
74 | "Rules": [
75 | {
76 | "ApplyServerSideEncryptionByDefault": {
77 | "SSEAlgorithm": "aws:kms"
78 | }
79 | }
80 | ]
81 | }'
82 | ```
83 |
84 | #### Apply TerraGoat (AWS)
85 |
86 | ```bash
87 | cd terraform/aws/
88 | terraform init \
89 | -backend-config="bucket=$TERRAGOAT_STATE_BUCKET" \
90 | -backend-config="key=$TF_VAR_company_name-$TF_VAR_environment.tfstate" \
91 | -backend-config="region=$TF_VAR_region"
92 |
93 | terraform apply
94 | ```
95 |
96 | #### Remove TerraGoat (AWS)
97 |
98 | ```bash
99 | terraform destroy
100 | ```
101 |
102 | #### Creating multiple TerraGoat AWS stacks
103 |
104 | ```bash
105 | cd terraform/aws/
106 | export TERRAGOAT_ENV=$TF_VAR_environment
107 | export TERRAGOAT_STACKS_NUM=5
108 | for i in $(seq 1 $TERRAGOAT_STACKS_NUM)
109 | do
110 | export TF_VAR_environment=$TERRAGOAT_ENV$i
111 | terraform init \
112 | -backend-config="bucket=$TERRAGOAT_STATE_BUCKET" \
113 | -backend-config="key=$TF_VAR_company_name-$TF_VAR_environment.tfstate" \
114 | -backend-config="region=$TF_VAR_region"
115 |
116 | terraform apply -auto-approve
117 | done
118 | ```
119 |
120 | #### Deleting multiple TerraGoat stacks (AWS)
121 |
122 | ```bash
123 | cd terraform/aws/
124 | export TF_VAR_environment = $TERRAGOAT_ENV
125 | for i in $(seq 1 $TERRAGOAT_STACKS_NUM)
126 | do
127 | export TF_VAR_environment=$TERRAGOAT_ENV$i
128 | terraform init \
129 | -backend-config="bucket=$TERRAGOAT_STATE_BUCKET" \
130 | -backend-config="key=$TF_VAR_company_name-$TF_VAR_environment.tfstate" \
131 | -backend-config="region=$TF_VAR_region"
132 |
133 | terraform destroy -auto-approve
134 | done
135 | ```
136 |
137 | ### Azure Setup
138 |
139 | #### Installation (Azure)
140 |
141 | You can deploy multiple TerraGoat stacks in a single Azure subscription using the parameter `TF_VAR_environment`.
142 |
143 | #### Create an Azure Storage Account backend to keep Terraform state
144 |
145 | ```bash
146 | export TERRAGOAT_RESOURCE_GROUP="TerraGoatRG"
147 | export TERRAGOAT_STATE_STORAGE_ACCOUNT="mydevsecopssa"
148 | export TERRAGOAT_STATE_CONTAINER="mydevsecops"
149 | export TF_VAR_environment="dev"
150 | export TF_VAR_region="westus"
151 |
152 | # Create resource group
153 | az group create --location $TF_VAR_region --name $TERRAGOAT_RESOURCE_GROUP
154 |
155 | # Create storage account
156 | az storage account create --name $TERRAGOAT_STATE_STORAGE_ACCOUNT --resource-group $TERRAGOAT_RESOURCE_GROUP --location $TF_VAR_region --sku Standard_LRS --kind StorageV2 --https-only true --encryption-services blob
157 |
158 | # Get storage account key
159 | ACCOUNT_KEY=$(az storage account keys list --resource-group $TERRAGOAT_RESOURCE_GROUP --account-name $TERRAGOAT_STATE_STORAGE_ACCOUNT --query [0].value -o tsv)
160 |
161 | # Create blob container
162 | az storage container create --name $TERRAGOAT_STATE_CONTAINER --account-name $TERRAGOAT_STATE_STORAGE_ACCOUNT --account-key $ACCOUNT_KEY
163 | ```
164 |
165 | #### Apply TerraGoat (Azure)
166 |
167 | ```bash
168 | cd terraform/azure/
169 | terraform init -reconfigure -backend-config="resource_group_name=$TERRAGOAT_RESOURCE_GROUP" \
170 | -backend-config "storage_account_name=$TERRAGOAT_STATE_STORAGE_ACCOUNT" \
171 | -backend-config="container_name=$TERRAGOAT_STATE_CONTAINER" \
172 | -backend-config "key=$TF_VAR_environment.terraform.tfstate"
173 |
174 | terraform apply
175 | ```
176 |
177 | #### Remove TerraGoat (Azure)
178 |
179 | ```bash
180 | terraform destroy
181 | ```
182 |
183 | ### GCP Setup
184 |
185 | #### Installation (GCP)
186 |
187 | You can deploy multiple TerraGoat stacks in a single GCP project using the parameter `TF_VAR_environment`.
188 |
189 | #### Create a GCS backend to keep Terraform state
190 |
191 | To use terraform, a Service Account and matching set of credentials are required.
192 | If they do not exist, they must be manually created for the relevant project.
193 | To create the Service Account:
194 | 1. Sign into your GCP project, go to `IAM` > `Service Accounts`.
195 | 2. Click the `CREATE SERVICE ACCOUNT`.
196 | 3. Give a name to your service account (for example - `terragoat`) and click `CREATE`.
197 | 4. Grant the Service Account the `Project` > `Editor` role and click `CONTINUE`.
198 | 5. Click `DONE`.
199 |
200 | To create the credentials:
201 | 1. Sign into your GCP project, go to `IAM` > `Service Accounts` and click on the relevant Service Account.
202 | 2. Click `ADD KEY` > `Create new key` > `JSON` and click `CREATE`. This will create a `.json` file and download it to your computer.
203 |
204 | We recommend saving the key with a nicer name than the auto-generated one (i.e. `terragoat_credentials.json`), and storing the resulting JSON file inside `terraform/gcp` directory of terragoat.
205 | Once the credentials are set up, create the BE configuration as follows:
206 |
207 | ```bash
208 | export TF_VAR_environment="dev"
209 | export TF_TERRAGOAT_STATE_BUCKET=remote-state-bucket-terragoat
210 | export TF_VAR_credentials_path= # example: export TF_VAR_credentials_path=terragoat_credentials.json
211 | export TF_VAR_project=
212 |
213 | # Create storage bucket
214 | gsutil mb gs://${TF_TERRAGOAT_STATE_BUCKET}
215 | ```
216 |
217 | #### Apply TerraGoat (GCP)
218 |
219 | ```bash
220 | cd terraform/gcp/
221 | terraform init -reconfigure -backend-config="bucket=$TF_TERRAGOAT_STATE_BUCKET" \
222 | -backend-config "credentials=$TF_VAR_credentials_path" \
223 | -backend-config "prefix=terragoat/${TF_VAR_environment}"
224 |
225 | terraform apply
226 | ```
227 |
228 | #### Remove TerraGoat (GCP)
229 |
230 | ```bash
231 | terraform destroy
232 | ```
233 |
234 | ## Bridgecrew's IaC herd of goats
235 |
236 | * [CfnGoat](https://github.com/bridgecrewio/cfngoat) - Vulnerable by design Cloudformation template
237 | * [TerraGoat](https://github.com/bridgecrewio/terragoat) - Vulnerable by design Terraform stack
238 | * [CDKGoat](https://github.com/bridgecrewio/cdkgoat) - Vulnerable by design CDK application
239 |
240 | ## Contributing
241 |
242 | Contribution is welcomed!
243 |
244 | We would love to hear about more ideas on how to find vulnerable infrastructure-as-code design patterns.
245 |
246 | ## Support
247 |
248 | [Bridgecrew](https://bridgecrew.io/?utm_source=github&utm_medium=organic_oss&utm_campaign=terragoat) builds and maintains TerraGoat to encourage the adoption of policy-as-code.
249 |
250 | If you need direct support you can contact us at [info@bridgecrew.io](mailto:info@bridgecrew.io).
251 |
252 | ## Existing vulnerabilities (Auto-Generated)
253 | | | check_id | file | resource | check_name | guideline |
254 | |-----|--------------|---------------------------|------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------|
255 | | 0 | CKV_AWS_41 | /aws/providers.tf | aws.plain_text_access_keys_provider | Ensure no hard coded AWS access key and and secret key exists in provider | https://docs.bridgecrew.io/docs/bc_aws_secrets_5 |
256 | | 1 | CKV_AWS_33 | /aws/ecr.tf | aws_ecr_repository.repository | Ensure ECR image scanning on push is enabled | https://docs.bridgecrew.io/docs/general_8 |
257 | | 2 | CKV_AWS_51 | /aws/ecr.tf | aws_ecr_repository.repository | Ensure ECR Image Tags are immutable | https://docs.bridgecrew.io/docs/bc_aws_general_24 |
258 | | 3 | CKV_AWS_46 | /aws/ec2.tf | aws_instance.web_host | Ensure no hard coded AWS access key and secret key exists in EC2 user data | https://docs.bridgecrew.io/docs/bc_aws_secrets_1 |
259 | | 4 | CKV_AWS_8 | /aws/ec2.tf | aws_instance.web_host | Ensure all data stored in the Launch configuration EBS is securely encrypted | https://docs.bridgecrew.io/docs/general_13 |
260 | | 5 | CKV_AWS_79 | /aws/ec2.tf | aws_instance.web_host | Ensure Instance Metadata Service Version 1 is not enabled | https://docs.bridgecrew.io/docs/bc_aws_general_31 |
261 | | 6 | CKV_AWS_3 | /aws/ec2.tf | aws_ebs_volume.web_host_storage | Ensure all data stored in the EBS is securely encrypted | https://docs.bridgecrew.io/docs/general_3-encrypt-eps-volume |
262 | | 7 | CKV_AWS_24 | /aws/ec2.tf | aws_security_group.web-node | Ensure no security groups allow ingress from 0.0.0.0:0 to port 22 | https://docs.bridgecrew.io/docs/networking_1-port-security |
263 | | 8 | CKV_AWS_52 | /aws/ec2.tf | aws_s3_bucket.flowbucket | Ensure S3 bucket has MFA delete enabled | |
264 | | 9 | CKV_AWS_21 | /aws/ec2.tf | aws_s3_bucket.flowbucket | Ensure all data stored in the S3 bucket have versioning enabled | https://docs.bridgecrew.io/docs/s3_16-enable-versioning |
265 | | 10 | CKV_AWS_18 | /aws/ec2.tf | aws_s3_bucket.flowbucket | Ensure the S3 bucket has access logging enabled | https://docs.bridgecrew.io/docs/s3_13-enable-logging |
266 | | 11 | CKV_AWS_19 | /aws/ec2.tf | aws_s3_bucket.flowbucket | Ensure all data stored in the S3 bucket is securely encrypted at rest | https://docs.bridgecrew.io/docs/s3_14-data-encrypted-at-rest |
267 | | 12 | CKV_AWS_7 | /aws/kms.tf | aws_kms_key.logs_key | Ensure rotation for customer created CMKs is enabled | https://docs.bridgecrew.io/docs/logging_8 |
268 | | 13 | CKV_AWS_37 | /aws/eks.tf | aws_eks_cluster.eks_cluster | Ensure Amazon EKS control plane logging enabled for all log types | https://docs.bridgecrew.io/docs/bc_aws_kubernetes_4 |
269 | | 14 | CKV_AWS_39 | /aws/eks.tf | aws_eks_cluster.eks_cluster | Ensure Amazon EKS public endpoint disabled | https://docs.bridgecrew.io/docs/bc_aws_kubernetes_2 |
270 | | 15 | CKV_AWS_58 | /aws/eks.tf | aws_eks_cluster.eks_cluster | Ensure EKS Cluster has Secrets Encryption Enabled | https://docs.bridgecrew.io/docs/bc_aws_kubernetes_3 |
271 | | 16 | CKV_AWS_38 | /aws/eks.tf | aws_eks_cluster.eks_cluster | Ensure Amazon EKS public endpoint not accessible to 0.0.0.0/0 | https://docs.bridgecrew.io/docs/bc_aws_kubernetes_1 |
272 | | 17 | CKV_AWS_50 | /aws/lambda.tf | aws_lambda_function.analysis_lambda | X-ray tracing is enabled for Lambda | https://docs.bridgecrew.io/page/guideline-does-not-exist |
273 | | 18 | CKV_AWS_45 | /aws/lambda.tf | aws_lambda_function.analysis_lambda | Ensure no hard coded AWS access key and secret key exists in lambda environment | https://docs.bridgecrew.io/docs/bc_aws_secrets_3 |
274 | | 19 | CKV_AWS_52 | /aws/s3.tf | aws_s3_bucket.data | Ensure S3 bucket has MFA delete enabled | |
275 | | 20 | CKV_AWS_21 | /aws/s3.tf | aws_s3_bucket.data | Ensure all data stored in the S3 bucket have versioning enabled | https://docs.bridgecrew.io/docs/s3_16-enable-versioning |
276 | | 21 | CKV_AWS_18 | /aws/s3.tf | aws_s3_bucket.data | Ensure the S3 bucket has access logging enabled | https://docs.bridgecrew.io/docs/s3_13-enable-logging |
277 | | 22 | CKV_AWS_20 | /aws/s3.tf | aws_s3_bucket.data | S3 Bucket has an ACL defined which allows public READ access. | https://docs.bridgecrew.io/docs/s3_1-acl-read-permissions-everyone |
278 | | 23 | CKV_AWS_19 | /aws/s3.tf | aws_s3_bucket.data | Ensure all data stored in the S3 bucket is securely encrypted at rest | https://docs.bridgecrew.io/docs/s3_14-data-encrypted-at-rest |
279 | | 24 | CKV_AWS_52 | /aws/s3.tf | aws_s3_bucket.financials | Ensure S3 bucket has MFA delete enabled | |
280 | | 25 | CKV_AWS_21 | /aws/s3.tf | aws_s3_bucket.financials | Ensure all data stored in the S3 bucket have versioning enabled | https://docs.bridgecrew.io/docs/s3_16-enable-versioning |
281 | | 26 | CKV_AWS_18 | /aws/s3.tf | aws_s3_bucket.financials | Ensure the S3 bucket has access logging enabled | https://docs.bridgecrew.io/docs/s3_13-enable-logging |
282 | | 27 | CKV_AWS_19 | /aws/s3.tf | aws_s3_bucket.financials | Ensure all data stored in the S3 bucket is securely encrypted at rest | https://docs.bridgecrew.io/docs/s3_14-data-encrypted-at-rest |
283 | | 28 | CKV_AWS_52 | /aws/s3.tf | aws_s3_bucket.operations | Ensure S3 bucket has MFA delete enabled | |
284 | | 29 | CKV_AWS_18 | /aws/s3.tf | aws_s3_bucket.operations | Ensure the S3 bucket has access logging enabled | https://docs.bridgecrew.io/docs/s3_13-enable-logging |
285 | | 30 | CKV_AWS_19 | /aws/s3.tf | aws_s3_bucket.operations | Ensure all data stored in the S3 bucket is securely encrypted at rest | https://docs.bridgecrew.io/docs/s3_14-data-encrypted-at-rest |
286 | | 31 | CKV_AWS_52 | /aws/s3.tf | aws_s3_bucket.data_science | Ensure S3 bucket has MFA delete enabled | |
287 | | 32 | CKV_AWS_19 | /aws/s3.tf | aws_s3_bucket.data_science | Ensure all data stored in the S3 bucket is securely encrypted at rest | https://docs.bridgecrew.io/docs/s3_14-data-encrypted-at-rest |
288 | | 33 | CKV_AWS_52 | /aws/s3.tf | aws_s3_bucket.logs | Ensure S3 bucket has MFA delete enabled | |
289 | | 34 | CKV_AWS_18 | /aws/s3.tf | aws_s3_bucket.logs | Ensure the S3 bucket has access logging enabled | https://docs.bridgecrew.io/docs/s3_13-enable-logging |
290 | | 35 | CKV_AWS_17 | /aws/db-app.tf | aws_db_instance.default | Ensure all data stored in the RDS bucket is not public accessible | https://docs.bridgecrew.io/docs/public_2 |
291 | | 36 | CKV_AWS_16 | /aws/db-app.tf | aws_db_instance.default | Ensure all data stored in the RDS is securely encrypted at rest | https://docs.bridgecrew.io/docs/general_4 |
292 | | 37 | CKV_AWS_8 | /aws/db-app.tf | aws_instance.db_app | Ensure all data stored in the Launch configuration EBS is securely encrypted | https://docs.bridgecrew.io/docs/general_13 |
293 | | 38 | CKV_AWS_79 | /aws/db-app.tf | aws_instance.db_app | Ensure Instance Metadata Service Version 1 is not enabled | https://docs.bridgecrew.io/docs/bc_aws_general_31 |
294 | | 39 | CKV_AWS_92 | /aws/elb.tf | aws_elb.weblb | Ensure the ELB has access logging enabled | |
295 | | 40 | CKV_AWS_84 | /aws/es.tf | aws_elasticsearch_domain.monitoring-framework | Ensure Elasticsearch Domain Logging is enabled | https://docs.bridgecrew.io/docs/elasticsearch_7 |
296 | | 41 | CKV_AWS_5 | /aws/es.tf | aws_elasticsearch_domain.monitoring-framework | Ensure all data stored in the Elasticsearch is securely encrypted at rest | https://docs.bridgecrew.io/docs/elasticsearch_3-enable-encryptionatrest |
297 | | 42 | CKV_AWS_83 | /aws/es.tf | aws_elasticsearch_domain.monitoring-framework | Ensure Elasticsearch Domain enforces HTTPS | https://docs.bridgecrew.io/docs/elasticsearch_6 |
298 | | 43 | CKV_AWS_40 | /aws/iam.tf | aws_iam_user_policy.userpolicy | Ensure IAM policies are attached only to groups or roles (Reducing access management complexity may in-turn reduce opportunity for a principal to inadvertently receive or retain excessive privileges.) | https://docs.bridgecrew.io/docs/iam_16-iam-policy-privileges-1 |
299 | | 44 | CKV_AWS_44 | /aws/neptune.tf | aws_neptune_cluster.default | Ensure Neptune storage is securely encrypted | https://docs.bridgecrew.io/docs/general_18 |
300 | | 45 | CKV_AZURE_9 | /azure/networking.tf | azurerm_network_security_group.bad_sg | Ensure that RDP access is restricted from the internet | https://docs.bridgecrew.io/docs/bc_azr_networking_2 |
301 | | 46 | CKV_AZURE_10 | /azure/networking.tf | azurerm_network_security_group.bad_sg | Ensure that SSH access is restricted from the internet | https://docs.bridgecrew.io/docs/bc_azr_networking_3 |
302 | | 47 | CKV_AZURE_12 | /azure/networking.tf | azurerm_network_watcher_flow_log.flow_log | Ensure that Network Security Group Flow Log retention period is 'greater than 90 days' | https://docs.bridgecrew.io/docs/bc_azr_logging_1 |
303 | | 48 | CKV_AZURE_39 | /azure/roles.tf | azurerm_role_definition.example | Ensure that no custom subscription owner roles are created | https://docs.bridgecrew.io/docs/do-not-create-custom-subscription-owner-roles |
304 | | 49 | CKV_AZURE_8 | /azure/aks.tf | azurerm_kubernetes_cluster.k8s_cluster | Ensure Kube Dashboard is disabled | https://docs.bridgecrew.io/docs/bc_azr_kubernetes_5 |
305 | | 50 | CKV_AZURE_6 | /azure/aks.tf | azurerm_kubernetes_cluster.k8s_cluster | Ensure AKS has an API Server Authorized IP Ranges enabled | https://docs.bridgecrew.io/docs/bc_azr_kubernetes_3 |
306 | | 51 | CKV_AZURE_5 | /azure/aks.tf | azurerm_kubernetes_cluster.k8s_cluster | Ensure RBAC is enabled on AKS clusters | https://docs.bridgecrew.io/docs/bc_azr_kubernetes_2 |
307 | | 52 | CKV_AZURE_7 | /azure/aks.tf | azurerm_kubernetes_cluster.k8s_cluster | Ensure AKS cluster has Network Policy configured | https://docs.bridgecrew.io/docs/bc_azr_kubernetes_4 |
308 | | 53 | CKV_AZURE_4 | /azure/aks.tf | azurerm_kubernetes_cluster.k8s_cluster | Ensure AKS logging to Azure Monitoring is Configured | https://docs.bridgecrew.io/docs/bc_azr_kubernetes_1 |
309 | | 54 | CKV_AZURE_1 | /azure/instance.tf | azurerm_linux_virtual_machine.linux_machine | Ensure Azure Instance does not use basic authentication(Use SSH Key Instead) | https://docs.bridgecrew.io/docs/bc_azr_networking_1 |
310 | | 55 | CKV_AZURE_24 | /azure/sql.tf | azurerm_sql_server.example | Ensure that 'Auditing' Retention is 'greater than 90 days' for SQL servers | https://docs.bridgecrew.io/docs/bc_azr_logging_3 |
311 | | 56 | CKV_AZURE_23 | /azure/sql.tf | azurerm_sql_server.example | Ensure that 'Auditing' is set to 'On' for SQL servers | https://docs.bridgecrew.io/docs/bc_azr_logging_2 |
312 | | 57 | CKV_AZURE_25 | /azure/sql.tf | azurerm_mssql_server_security_alert_policy.example | Ensure that 'Threat Detection types' is set to 'All' | https://docs.bridgecrew.io/docs/bc_azr_general_6 |
313 | | 58 | CKV_AZURE_26 | /azure/sql.tf | azurerm_mssql_server_security_alert_policy.example | Ensure that 'Send Alerts To' is enabled for MSSQL servers | https://docs.bridgecrew.io/docs/bc_azr_general_7 |
314 | | 59 | CKV_AZURE_27 | /azure/sql.tf | azurerm_mssql_server_security_alert_policy.example | Ensure that 'Email service and co-administrators' is 'Enabled' for MSSQL servers | https://docs.bridgecrew.io/docs/bc_azr_general_8 |
315 | | 60 | CKV_AZURE_28 | /azure/sql.tf | azurerm_mysql_server.example | Ensure 'Enforce SSL connection' is set to 'ENABLED' for MySQL Database Server | https://docs.bridgecrew.io/docs/bc_azr_networking_9 |
316 | | 61 | CKV_AZURE_29 | /azure/sql.tf | azurerm_postgresql_server.example | Ensure 'Enforce SSL connection' is set to 'ENABLED' for PostgreSQL Database Server | https://docs.bridgecrew.io/docs/bc_azr_networking_10 |
317 | | 62 | CKV_AZURE_32 | /azure/sql.tf | azurerm_postgresql_configuration.thrtottling_config | Ensure server parameter 'connection_throttling' is set to 'ON' for PostgreSQL Database Server | https://docs.bridgecrew.io/docs/bc_azr_networking_13 |
318 | | 63 | CKV_AZURE_30 | /azure/sql.tf | azurerm_postgresql_configuration.example | Ensure server parameter 'log_checkpoints' is set to 'ON' for PostgreSQL Database Server | https://docs.bridgecrew.io/docs/bc_azr_networking_11 |
319 | | 64 | CKV_AZURE_16 | /azure/app_service.tf | azurerm_app_service.app-service1 | Ensure that Register with Azure Active Directory is enabled on App Service | https://docs.bridgecrew.io/docs/bc_azr_iam_1 |
320 | | 65 | CKV_AZURE_14 | /azure/app_service.tf | azurerm_app_service.app-service1 | Ensure web app redirects all HTTP traffic to HTTPS in Azure App Service | https://docs.bridgecrew.io/docs/bc_azr_networking_5 |
321 | | 66 | CKV_AZURE_15 | /azure/app_service.tf | azurerm_app_service.app-service1 | Ensure web app is using the latest version of TLS encryption | https://docs.bridgecrew.io/docs/bc_azr_networking_6 |
322 | | 67 | CKV_AZURE_13 | /azure/app_service.tf | azurerm_app_service.app-service1 | Ensure App Service Authentication is set on Azure App Service | https://docs.bridgecrew.io/docs/bc_azr_general_2 |
323 | | 68 | CKV_AZURE_17 | /azure/app_service.tf | azurerm_app_service.app-service1 | Ensure the web app has 'Client Certificates (Incoming client certificates)' set | https://docs.bridgecrew.io/docs/bc_azr_networking_7 |
324 | | 69 | CKV_AZURE_18 | /azure/app_service.tf | azurerm_app_service.app-service1 | Ensure that 'HTTP Version' is the latest if used to run the web app | https://docs.bridgecrew.io/docs/bc_azr_networking_8 |
325 | | 70 | CKV_AZURE_16 | /azure/app_service.tf | azurerm_app_service.app-service2 | Ensure that Register with Azure Active Directory is enabled on App Service | https://docs.bridgecrew.io/docs/bc_azr_iam_1 |
326 | | 71 | CKV_AZURE_13 | /azure/app_service.tf | azurerm_app_service.app-service2 | Ensure App Service Authentication is set on Azure App Service | https://docs.bridgecrew.io/docs/bc_azr_general_2 |
327 | | 72 | CKV_AZURE_17 | /azure/app_service.tf | azurerm_app_service.app-service2 | Ensure the web app has 'Client Certificates (Incoming client certificates)' set | https://docs.bridgecrew.io/docs/bc_azr_networking_7 |
328 | | 73 | CKV_AZURE_18 | /azure/app_service.tf | azurerm_app_service.app-service2 | Ensure that 'HTTP Version' is the latest if used to run the web app | https://docs.bridgecrew.io/docs/bc_azr_networking_8 |
329 | | 74 | CKV_AZURE_19 | /azure/security_center.tf | azurerm_security_center_subscription_pricing.pricing | Ensure that standard pricing tier is selected | https://docs.bridgecrew.io/docs/ensure-standard-pricing-tier-is-selected |
330 | | 75 | CKV_AZURE_21 | /azure/security_center.tf | azurerm_security_center_contact.contact | Ensure that 'Send email notification for high severity alerts' is set to 'On' | https://docs.bridgecrew.io/docs/bc_azr_general_4 |
331 | | 76 | CKV_AZURE_20 | /azure/security_center.tf | azurerm_security_center_contact.contact | Ensure that security contact 'Phone number' is set | https://docs.bridgecrew.io/docs/bc_azr_general_3 |
332 | | 77 | CKV_AZURE_22 | /azure/security_center.tf | azurerm_security_center_contact.contact | Ensure that 'Send email notification for high severity alerts' is set to 'On' | https://docs.bridgecrew.io/docs/bc_azr_general_5 |
333 | | 78 | CKV_AZURE_42 | /azure/key_vault.tf | azurerm_key_vault.example | Ensure the key vault is recoverable | https://docs.bridgecrew.io/docs/ensure-the-key-vault-is-recoverable |
334 | | 79 | CKV_AZURE_40 | /azure/key_vault.tf | azurerm_key_vault_key.generated | Ensure that the expiration date is set on all keys | https://docs.bridgecrew.io/docs/set-an-expiration-date-on-all-keys |
335 | | 80 | CKV_AZURE_41 | /azure/key_vault.tf | azurerm_key_vault_secret.secret | Ensure that the expiration date is set on all secrets | https://docs.bridgecrew.io/docs/set-an-expiration-date-on-all-secrets |
336 | | 81 | CKV_AZURE_2 | /azure/storage.tf | azurerm_managed_disk.example | Ensure Azure managed disk have encryption enabled | https://docs.bridgecrew.io/docs/bc_azr_general_1 |
337 | | 82 | CKV_AZURE_35 | /azure/storage.tf | azurerm_storage_account.example | Ensure default network access rule for Storage Accounts is set to deny | https://docs.bridgecrew.io/docs/set-default-network-access-rule-for-storage-accounts-to-deny |
338 | | 83 | CKV_AZURE_3 | /azure/storage.tf | azurerm_storage_account.example | Ensure that 'Secure transfer required' is set to 'Enabled' | https://docs.bridgecrew.io/docs/ensure-secure-transfer-required-is-enabled |
339 | | 84 | CKV_AZURE_44 | /azure/storage.tf | azurerm_storage_account.example | Ensure Storage Account is using the latest version of TLS encryption | https://docs.bridgecrew.io/docs/bc_azr_storage_2 |
340 | | 85 | CKV_AZURE_43 | /azure/storage.tf | azurerm_storage_account.example | Ensure the Storage Account naming rules | |
341 | | 86 | CKV_AZURE_33 | /azure/storage.tf | azurerm_storage_account.example | Ensure Storage logging is enabled for Queue service for read, write and delete requests | https://docs.bridgecrew.io/docs/enable-requests-on-storage-logging-for-queue-service |
342 | | 87 | CKV_AZURE_36 | /azure/storage.tf | azurerm_storage_account_network_rules.test | Ensure 'Trusted Microsoft Services' is enabled for Storage Account access | https://docs.bridgecrew.io/docs/enable-trusted-microsoft-services-for-storage-account-access |
343 | | 88 | CKV_AZURE_37 | /azure/logging.tf | azurerm_monitor_log_profile.logging_profile | Ensure that Activity Log Retention is set 365 days or greater | https://docs.bridgecrew.io/docs/set-activity-log-retention-to-365-days-or-greater |
344 | | 89 | CKV_AZURE_38 | /azure/logging.tf | azurerm_monitor_log_profile.logging_profile | Ensure audit profile captures all the activities | https://docs.bridgecrew.io/docs/ensure-audit-profile-captures-all-activities |
345 | | 90 | CKV_GCP_6 | /gcp/big_data.tf | google_sql_database_instance.master_instance | Ensure all Cloud SQL database instance requires all incoming connections to use SSL | https://docs.bridgecrew.io/docs/bc_gcp_general_1 |
346 | | 91 | CKV_GCP_11 | /gcp/big_data.tf | google_sql_database_instance.master_instance | Ensure that Cloud SQL database Instances are not open to the world | https://docs.bridgecrew.io/docs/bc_gcp_networking_4 |
347 | | 92 | CKV_GCP_14 | /gcp/big_data.tf | google_sql_database_instance.master_instance | Ensure all Cloud SQL database instance have backup configuration enabled | https://docs.bridgecrew.io/docs/bc_gcp_general_2 |
348 | | 93 | CKV_GCP_15 | /gcp/big_data.tf | google_bigquery_dataset.dataset | Ensure that BigQuery datasets are not anonymously or publicly accessible | https://docs.bridgecrew.io/docs/bc_gcp_general_3 |
349 | | 94 | CKV_GCP_29 | /gcp/gcs.tf | google_storage_bucket.terragoat_website | Ensure that Cloud Storage buckets have uniform bucket-level access enabled | https://docs.bridgecrew.io/docs/bc_gcp_gcs_2 |
350 | | 95 | CKV_GCP_5 | /gcp/gcs.tf | google_storage_bucket.terragoat_website | Ensure Google storage bucket have encryption enabled | https://docs.bridgecrew.io/docs/bc_gcp_gcs_1 |
351 | | 96 | CKV_GCP_28 | /gcp/gcs.tf | google_storage_bucket_iam_binding.allow_public_read | Ensure that Cloud Storage bucket is not anonymously or publicly accessible | https://docs.bridgecrew.io/docs/bc_gcp_public_1 |
352 | | 97 | CKV_GCP_36 | /gcp/instances.tf | google_compute_instance.server | Ensure that IP forwarding is not enabled on Instances | https://docs.bridgecrew.io/docs/bc_gcp_networking_12 |
353 | | 98 | CKV_GCP_34 | /gcp/instances.tf | google_compute_instance.server | Ensure that no instance in the project overrides the project setting for enabling OSLogin(OSLogin needs to be enabled in project metadata for all instances) | https://docs.bridgecrew.io/docs/bc_gcp_networking_10 |
354 | | 99 | CKV_GCP_38 | /gcp/instances.tf | google_compute_instance.server | Ensure VM disks for critical VMs are encrypted with Customer Supplied Encryption Keys (CSEK) | https://docs.bridgecrew.io/docs/encrypt-boot-disks-for-instances-with-cseks |
355 | | 100 | CKV_GCP_30 | /gcp/instances.tf | google_compute_instance.server | Ensure that instances are not configured to use the default service account | https://docs.bridgecrew.io/docs/bc_gcp_iam_1 |
356 | | 101 | CKV_GCP_32 | /gcp/instances.tf | google_compute_instance.server | Ensure 'Block Project-wide SSH keys' is enabled for VM instances | https://docs.bridgecrew.io/docs/bc_gcp_networking_8 |
357 | | 102 | CKV_GCP_35 | /gcp/instances.tf | google_compute_instance.server | Ensure 'Enable connecting to serial ports' is not enabled for VM Instance | https://docs.bridgecrew.io/docs/bc_gcp_networking_11 |
358 | | 103 | CKV_GCP_39 | /gcp/instances.tf | google_compute_instance.server | Ensure Compute instances are launched with Shielded VM enabled | https://docs.bridgecrew.io/docs/bc_gcp_general_y |
359 | | 104 | CKV_GCP_37 | /gcp/instances.tf | google_compute_disk.unencrypted_disk | Ensure VM disks for critical VMs are encrypted with Customer Supplied Encryption Keys (CSEK) | https://docs.bridgecrew.io/docs/bc_gcp_general_x |
360 | | 105 | CKV_GCP_26 | /gcp/networks.tf | google_compute_subnetwork.public-subnetwork | Ensure that VPC Flow Logs is enabled for every subnet in a VPC Network | https://docs.bridgecrew.io/docs/bc_gcp_logging_1 |
361 | | 106 | CKV_GCP_3 | /gcp/networks.tf | google_compute_firewall.allow_all | Ensure Google compute firewall ingress does not allow unrestricted rdp access | https://docs.bridgecrew.io/docs/bc_gcp_networking_2 |
362 | | 107 | CKV_GCP_2 | /gcp/networks.tf | google_compute_firewall.allow_all | Ensure Google compute firewall ingress does not allow unrestricted ssh access | https://docs.bridgecrew.io/docs/bc_gcp_networking_1 |
363 | | 108 | CKV_GCP_23 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure Kubernetes Cluster is created with Alias IP ranges enabled | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_15 |
364 | | 109 | CKV_GCP_7 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure Legacy Authorization is set to Disabled on Kubernetes Engine Clusters | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_2 |
365 | | 110 | CKV_GCP_19 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure GKE basic auth is disabled | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_11 |
366 | | 111 | CKV_GCP_18 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure GKE Control Plane is not public | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_10 |
367 | | 112 | CKV_GCP_21 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure Kubernetes Clusters are configured with Labels | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_13 |
368 | | 113 | CKV_GCP_8 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure Stackdriver Monitoring is set to Enabled on Kubernetes Engine Clusters | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_3 |
369 | | 114 | CKV_GCP_24 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure PodSecurityPolicy controller is enabled on the Kubernetes Engine Clusters | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_9 |
370 | | 115 | CKV_GCP_12 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure Network Policy is enabled on Kubernetes Engine Clusters | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_7 |
371 | | 116 | CKV_GCP_1 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure Stackdriver Logging is set to Enabled on Kubernetes Engine Clusters | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_1 |
372 | | 117 | CKV_GCP_25 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure Kubernetes Cluster is created with Private cluster enabled | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_6 |
373 | | 118 | CKV_GCP_13 | /gcp/gke.tf | google_container_cluster.workload_cluster | Ensure a client certificate is used by clients to authenticate to Kubernetes Engine Clusters | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_8 |
374 | | 119 | CKV_GCP_9 | /gcp/gke.tf | google_container_node_pool.custom_node_pool | Ensure 'Automatic node repair' is enabled for Kubernetes Clusters | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_4 |
375 | | 120 | CKV_GCP_22 | /gcp/gke.tf | google_container_node_pool.custom_node_pool | Ensure Container-Optimized OS (cos) is used for Kubernetes Engine Clusters Node image | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_14 |
376 | | 121 | CKV_GCP_10 | /gcp/gke.tf | google_container_node_pool.custom_node_pool | Ensure 'Automatic node upgrade' is enabled for Kubernetes Clusters | https://docs.bridgecrew.io/docs/bc_gcp_kubernetes_5 |
377 |
378 |
379 | ---
380 |
381 | 1
382 |
383 |
384 |
--------------------------------------------------------------------------------
/terraform/aws/consts.tf:
--------------------------------------------------------------------------------
1 |
2 | data "aws_caller_identity" "current" {}
3 |
4 | variable "company_name" {
5 | default = "acme"
6 | }
7 |
8 | variable "environment" {
9 | default = "dev"
10 | }
11 |
12 | locals {
13 | resource_prefix = {
14 | value = "${data.aws_caller_identity.current.account_id}-${var.company_name}-${var.environment}"
15 | }
16 | }
17 |
18 |
19 |
20 | variable "profile" {
21 | default = "default"
22 | }
23 |
24 | variable "region" {
25 | default = "us-west-2"
26 | }
27 |
28 | variable "availability_zone" {
29 | type = "string"
30 | default = "us-west-2a"
31 | }
32 |
33 | variable "availability_zone2" {
34 | type = "string"
35 | default = "us-west-2b"
36 | }
37 |
38 |
39 | variable ami {
40 | type = "string"
41 | default = "ami-09a5b0b7edf08843d"
42 | }
43 |
44 | variable "dbname" {
45 | type = "string"
46 | description = "Name of the Database"
47 | default = "db1"
48 | }
49 |
50 | variable "password" {
51 | type = "string"
52 | description = "Database password"
53 | default = "Aa1234321Bb"
54 | }
55 |
56 | variable "neptune-dbname" {
57 | type = "string"
58 | description = "Name of the Neptune graph database"
59 | default = "neptunedb1"
60 | }
--------------------------------------------------------------------------------
/terraform/aws/db-app.tf:
--------------------------------------------------------------------------------
1 | resource "aws_db_instance" "default" {
2 | name = var.dbname
3 | engine = "mysql"
4 | option_group_name = aws_db_option_group.default.name
5 | parameter_group_name = aws_db_parameter_group.default.name
6 | db_subnet_group_name = aws_db_subnet_group.default.name
7 | vpc_security_group_ids = ["${aws_security_group.default.id}"]
8 |
9 | identifier = "rds-${local.resource_prefix.value}"
10 | engine_version = "8.0" # Latest major version
11 | instance_class = "db.t3.micro"
12 | allocated_storage = "20"
13 | username = "admin"
14 | password = var.password
15 | apply_immediately = true
16 | multi_az = false
17 | backup_retention_period = 0
18 | storage_encrypted = false
19 | skip_final_snapshot = true
20 | monitoring_interval = 0
21 | publicly_accessible = true
22 |
23 | tags = {
24 | Name = "${local.resource_prefix.value}-rds"
25 | Environment = local.resource_prefix.value
26 | }
27 |
28 | # Ignore password changes from tf plan diff
29 | lifecycle {
30 | ignore_changes = ["password"]
31 | }
32 | }
33 |
34 | resource "aws_db_option_group" "default" {
35 | engine_name = "mysql"
36 | name = "og-${local.resource_prefix.value}"
37 | major_engine_version = "8.0"
38 | option_group_description = "Terraform OG"
39 |
40 | tags = {
41 | Name = "${local.resource_prefix.value}-og"
42 | Environment = local.resource_prefix.value
43 | }
44 | }
45 |
46 | resource "aws_db_parameter_group" "default" {
47 | name = "pg-${local.resource_prefix.value}"
48 | family = "mysql8.0"
49 | description = "Terraform PG"
50 |
51 | parameter {
52 | name = "character_set_client"
53 | value = "utf8"
54 | apply_method = "immediate"
55 | }
56 |
57 | parameter {
58 | name = "character_set_server"
59 | value = "utf8"
60 | apply_method = "immediate"
61 | }
62 |
63 | tags = {
64 | Name = "${local.resource_prefix.value}-pg"
65 | Environment = local.resource_prefix.value
66 | }
67 | }
68 |
69 | resource "aws_db_subnet_group" "default" {
70 | name = "sg-${local.resource_prefix.value}"
71 | subnet_ids = ["${aws_subnet.web_subnet.id}", "${aws_subnet.web_subnet2.id}"]
72 | description = "Terraform DB Subnet Group"
73 |
74 | tags = {
75 | Name = "sg-${local.resource_prefix.value}"
76 | Environment = local.resource_prefix.value
77 | }
78 | }
79 |
80 | resource "aws_security_group" "default" {
81 | name = "${local.resource_prefix.value}-rds-sg"
82 | vpc_id = aws_vpc.web_vpc.id
83 |
84 | tags = {
85 | Name = "${local.resource_prefix.value}-rds-sg"
86 | Environment = local.resource_prefix.value
87 | }
88 | }
89 |
90 | resource "aws_security_group_rule" "ingress" {
91 | type = "ingress"
92 | from_port = "3306"
93 | to_port = "3306"
94 | protocol = "tcp"
95 | cidr_blocks = ["${aws_vpc.web_vpc.cidr_block}"]
96 | security_group_id = aws_security_group.default.id
97 | }
98 |
99 | resource "aws_security_group_rule" "egress" {
100 | type = "egress"
101 | from_port = 0
102 | to_port = 0
103 | protocol = "-1"
104 | cidr_blocks = ["0.0.0.0/0"]
105 | security_group_id = "${aws_security_group.default.id}"
106 | }
107 |
108 |
109 | ### EC2 instance
110 | resource "aws_iam_instance_profile" "ec2profile" {
111 | name = "${local.resource_prefix.value}-profile"
112 | role = "${aws_iam_role.ec2role.name}"
113 | }
114 |
115 | resource "aws_iam_role" "ec2role" {
116 | name = "${local.resource_prefix.value}-role"
117 | path = "/"
118 |
119 | assume_role_policy = < /tmp/dbinfo.inc
197 |
203 | EnD
204 | sudo mv /tmp/dbinfo.inc /var/www/inc
205 | sudo chown root:root /var/www/inc/dbinfo.inc
206 |
207 | cat << EnD > /tmp/index.php
208 |
209 |
210 |
211 | Sample page
212 |
232 |
233 |
234 |
253 |
254 |
255 |
256 |
257 | ID |
258 | NAME |
259 | ADDRESS |
260 |
261 |
262 | ";
268 | echo "",\$query_data[0], " | ",
269 | "",\$query_data[1], " | ",
270 | "",\$query_data[2], " | ";
271 | echo "";
272 | }
273 | ?>
274 |
275 |
276 |
277 |
278 |
284 |
285 |
286 |
287 |
288 |
289 | Error adding employee data.
");
299 | }
300 |
301 | /* Check whether the table exists and, if not, create it. */
302 | function VerifyEmployeesTable(\$connection, \$dbName) {
303 | if(!TableExists("EMPLOYEES", \$connection, \$dbName))
304 | {
305 | \$query = "CREATE TABLE EMPLOYEES (
306 | ID int(11) UNSIGNED AUTO_INCREMENT PRIMARY KEY,
307 | NAME VARCHAR(45),
308 | ADDRESS VARCHAR(90)
309 | )";
310 |
311 | if(!mysqli_query(\$connection, \$query)) echo("Error creating table.
");
312 | }
313 | }
314 |
315 | /* Check for the existence of a table. */
316 | function TableExists(\$tableName, \$connection, \$dbName) {
317 | \$t = mysqli_real_escape_string(\$connection, \$tableName);
318 | \$d = mysqli_real_escape_string(\$connection, \$dbName);
319 |
320 | \$checktable = mysqli_query(\$connection,
321 | "SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = '\$t' AND TABLE_SCHEMA = '\$d'");
322 |
323 | if(mysqli_num_rows(\$checktable) > 0) return true;
324 |
325 | return false;
326 | }
327 | ?>
328 | EnD
329 |
330 | sudo mv /tmp/index.php /var/www/html
331 | sudo chown root:root /var/www/html/index.php
332 |
333 |
334 |
335 | EOF
336 | tags = {
337 | Name = "${local.resource_prefix.value}-dbapp"
338 | }
339 | }
340 |
341 | output "db_app_public_dns" {
342 | description = "DB Public DNS name"
343 | value = aws_instance.db_app.public_dns
344 | }
345 |
346 | output "db_endpoint" {
347 | description = "DB Endpoint"
348 | value = aws_db_instance.default.endpoint
349 | }
350 |
351 |
--------------------------------------------------------------------------------
/terraform/aws/ec2.tf:
--------------------------------------------------------------------------------
1 | resource "aws_instance" "web_host" {
2 | # ec2 have plain text secrets in user data
3 | ami = "${var.ami}"
4 | instance_type = "t2.nano"
5 |
6 | vpc_security_group_ids = [
7 | "${aws_security_group.web-node.id}"]
8 | subnet_id = "${aws_subnet.web_subnet.id}"
9 | user_data = <Deployed via Terraform" | sudo tee /var/www/html/index.html
19 | EOF
20 | tags = {
21 | Name = "${local.resource_prefix.value}-ec2"
22 | }
23 | }
24 |
25 | resource "aws_ebs_volume" "web_host_storage" {
26 | # unencrypted volume
27 | availability_zone = "${var.availability_zone}"
28 | #encrypted = false # Setting this causes the volume to be recreated on apply
29 | size = 1
30 | tags = {
31 | Name = "${local.resource_prefix.value}-ebs"
32 | }
33 | }
34 |
35 | resource "aws_ebs_snapshot" "example_snapshot" {
36 | # ebs snapshot without encryption
37 | volume_id = "${aws_ebs_volume.web_host_storage.id}"
38 | description = "${local.resource_prefix.value}-ebs-snapshot"
39 | tags = {
40 | Name = "${local.resource_prefix.value}-ebs-snapshot"
41 | }
42 | }
43 |
44 | resource "aws_volume_attachment" "ebs_att" {
45 | device_name = "/dev/sdh"
46 | volume_id = "${aws_ebs_volume.web_host_storage.id}"
47 | instance_id = "${aws_instance.web_host.id}"
48 | }
49 |
50 | resource "aws_security_group" "web-node" {
51 | # security group is open to the world in SSH port
52 | name = "${local.resource_prefix.value}-sg"
53 | description = "${local.resource_prefix.value} Security Group"
54 | vpc_id = aws_vpc.web_vpc.id
55 |
56 | ingress {
57 | from_port = 80
58 | to_port = 80
59 | protocol = "tcp"
60 | cidr_blocks = [
61 | "0.0.0.0/0"]
62 | }
63 | ingress {
64 | from_port = 22
65 | to_port = 22
66 | protocol = "tcp"
67 | cidr_blocks = [
68 | "0.0.0.0/0"]
69 | }
70 | egress {
71 | from_port = 0
72 | to_port = 0
73 | protocol = "-1"
74 | cidr_blocks = [
75 | "0.0.0.0/0"]
76 | }
77 | depends_on = [aws_vpc.web_vpc]
78 | }
79 |
80 | resource "aws_vpc" "web_vpc" {
81 | cidr_block = "172.16.0.0/16"
82 | enable_dns_hostnames = true
83 | enable_dns_support = true
84 | tags = {
85 | Name = "${local.resource_prefix.value}-vpc"
86 | }
87 | }
88 |
89 | resource "aws_subnet" "web_subnet" {
90 | vpc_id = aws_vpc.web_vpc.id
91 | cidr_block = "172.16.10.0/24"
92 | availability_zone = var.availability_zone
93 | map_public_ip_on_launch = true
94 |
95 | tags = {
96 | Name = "${local.resource_prefix.value}-subnet"
97 | }
98 | }
99 |
100 | resource "aws_subnet" "web_subnet2" {
101 | vpc_id = aws_vpc.web_vpc.id
102 | cidr_block = "172.16.11.0/24"
103 | availability_zone = var.availability_zone2
104 | map_public_ip_on_launch = true
105 |
106 | tags = {
107 | Name = "${local.resource_prefix.value}-subnet2"
108 | }
109 | }
110 |
111 |
112 | resource "aws_internet_gateway" "web_igw" {
113 | vpc_id = aws_vpc.web_vpc.id
114 |
115 | tags = {
116 | Name = "${local.resource_prefix.value}-igw"
117 | }
118 | }
119 |
120 | resource "aws_route_table" "web_rtb" {
121 | vpc_id = aws_vpc.web_vpc.id
122 |
123 | tags = {
124 | Name = "${local.resource_prefix.value}-rtb"
125 | }
126 | }
127 |
128 | resource "aws_route_table_association" "rtbassoc" {
129 | subnet_id = aws_subnet.web_subnet.id
130 | route_table_id = aws_route_table.web_rtb.id
131 | }
132 |
133 | resource "aws_route_table_association" "rtbassoc2" {
134 | subnet_id = aws_subnet.web_subnet2.id
135 | route_table_id = aws_route_table.web_rtb.id
136 | }
137 |
138 | resource "aws_route" "public_internet_gateway" {
139 | route_table_id = aws_route_table.web_rtb.id
140 | destination_cidr_block = "0.0.0.0/0"
141 | gateway_id = aws_internet_gateway.web_igw.id
142 |
143 | timeouts {
144 | create = "5m"
145 | }
146 | }
147 |
148 |
149 | resource "aws_network_interface" "web-eni" {
150 | subnet_id = aws_subnet.web_subnet.id
151 | private_ips = ["172.16.10.100"]
152 |
153 | tags = {
154 | Name = "${local.resource_prefix.value}-primary_network_interface"
155 | }
156 | }
157 |
158 | # VPC Flow Logs to S3
159 | resource "aws_flow_log" "vpcflowlogs" {
160 | log_destination = aws_s3_bucket.flowbucket.arn
161 | log_destination_type = "s3"
162 | traffic_type = "ALL"
163 | vpc_id = aws_vpc.web_vpc.id
164 |
165 | tags = {
166 | Name = "${local.resource_prefix.value}-flowlogs"
167 | Environment = local.resource_prefix.value
168 | }
169 | }
170 |
171 | resource "aws_s3_bucket" "flowbucket" {
172 | bucket = "${local.resource_prefix.value}-flowlogs"
173 | force_destroy = true
174 |
175 | tags = {
176 | Name = "${local.resource_prefix.value}-flowlogs"
177 | Environment = local.resource_prefix.value
178 | }
179 | }
180 |
181 | output "ec2_public_dns" {
182 | description = "Web Host Public DNS name"
183 | value = aws_instance.web_host.public_dns
184 | }
185 |
186 | output "vpc_id" {
187 | description = "The ID of the VPC"
188 | value = aws_vpc.web_vpc.id
189 | }
190 |
191 | output "public_subnet" {
192 | description = "The ID of the Public subnet"
193 | value = aws_subnet.web_subnet.id
194 | }
195 |
196 | output "public_subnet2" {
197 | description = "The ID of the Public subnet"
198 | value = aws_subnet.web_subnet2.id
199 | }
200 |
--------------------------------------------------------------------------------
/terraform/aws/ecr.tf:
--------------------------------------------------------------------------------
1 | resource aws_ecr_repository "repository" {
2 | name = "${local.resource_prefix.value}-repository"
3 | image_tag_mutability = "MUTABLE"
4 |
5 | tags = {
6 | Name = "${local.resource_prefix.value}-repository"
7 | }
8 | }
9 |
10 | locals {
11 | docker_image = "${data.aws_caller_identity.current.account_id}.dkr.ecr.${var.region}.amazonaws.com/${aws_ecr_repository.repository.name}"
12 | }
13 |
14 |
15 | resource null_resource "push_image" {
16 | provisioner "local-exec" {
17 | working_dir = "${path.module}/resources"
18 | command = <