├── .github
├── dependabot.yml
└── workflows
│ ├── check-resource-types.yaml
│ ├── generate-assets.yaml
│ └── opa-tests.yaml
├── .gitignore
├── .regal
└── config.yaml
├── LICENSE
├── README.md
├── ci
├── check-resource-types.sh
└── resourcetypes.rego
├── deregister-hook.sh
├── docs
└── assets
│ └── opa-aws-cloudformation.svg
├── examples
├── policy
│ ├── assertions.rego
│ ├── authz.rego
│ ├── aws.rego
│ ├── ec2
│ │ └── security_group
│ │ │ ├── security_group.rego
│ │ │ └── security_group_test.rego
│ ├── eks
│ │ └── cluster
│ │ │ ├── logging_enabled.rego
│ │ │ ├── logging_enabled_test.rego
│ │ │ ├── public_api.rego
│ │ │ └── public_api_test.rego
│ ├── iam
│ │ ├── role
│ │ │ ├── principal_boundary.rego
│ │ │ └── principal_boundary_test.rego
│ │ └── user
│ │ │ ├── have_policies.rego
│ │ │ ├── have_policies_test.rego
│ │ │ ├── no_admin.rego
│ │ │ ├── no_admin_test.rego
│ │ │ ├── principal_boundary.rego
│ │ │ └── principal_boundary_test.rego
│ ├── main.rego
│ ├── main_test.rego
│ ├── rds
│ │ └── db
│ │ │ ├── encryption.rego
│ │ │ └── encryption_test.rego
│ ├── s3
│ │ └── bucket
│ │ │ ├── encryption.rego
│ │ │ ├── encryption_test.rego
│ │ │ ├── logging.rego
│ │ │ ├── logging_test.rego
│ │ │ ├── public_access.rego
│ │ │ └── public_access_test.rego
│ └── test_helpers.rego
└── templates
│ ├── eks-cluster-logging
│ ├── eks-deny-cluster.yaml
│ └── eks-success-cluster-logging.yaml
│ ├── eks-public-api
│ ├── eks-deny-cluster.yaml
│ └── eks-success-public-api-disabled.yaml
│ ├── iam-no-admin-statements
│ ├── iam-fail-admin-allowed.yaml
│ └── iam-success-action-restricted.yaml
│ ├── iam-principal-boundary
│ ├── iam-fail-auto-generated-name-not-excluded.yaml
│ ├── iam-fail-permission-boundary-incorrect.yaml
│ ├── iam-success-permission-boundary-included.yaml
│ ├── iam-success-role-name-excluded.yaml
│ └── iam-success-user-name-excluded.yaml
│ ├── iam-users-have-policy
│ ├── iam-fail-no-user-policy.yaml
│ └── iam-success-user-policy-attached.yaml
│ ├── rds-encryption-verify
│ ├── rds-fail-encryption-not-set.yaml
│ └── rds-success-encryption-set.yaml
│ ├── s3-block-public-access
│ ├── s3-fail-no-settings-specified.yaml
│ ├── s3-fail-not-all-4-settings-specified.yaml
│ ├── s3-success-all-access-blocked.yaml
│ └── s3-success-name-prefix-excluded.yaml
│ ├── s3-bucket-encryption
│ ├── s3-fail-no-bucket-encryption.yaml
│ ├── s3-fail-wrong-encryption-type.yaml
│ └── s3-success-encryption.yaml
│ ├── s3-bucket-logging-enabled
│ ├── s3-fail-no-logging.yaml
│ └── s3-success-logging-enabled.yaml
│ └── security-group-open-ingress
│ ├── sg-fail-open-to-public.yaml
│ └── sg-success-restricted-to-subnet.yaml
├── hooks
├── .gitignore
├── .rpdk-config
├── README.md
├── hook-role.yaml
├── requirements.txt
├── src
│ └── styra_opa_hook
│ │ ├── __init__.py
│ │ ├── handlers.py
│ │ └── models.py
├── styra-opa-hook-configuration.json
├── styra-opa-hook.json
└── template.yml
└── test
├── integration.sh
├── requirements.txt
└── validate.py
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "github-actions"
4 | directory: "/"
5 | schedule:
6 | interval: "daily"
7 |
--------------------------------------------------------------------------------
/.github/workflows/check-resource-types.yaml:
--------------------------------------------------------------------------------
1 | name: Check for resource type updates
2 |
3 | on:
4 | schedule:
5 | # Sundays at 01:00
6 | - cron: "00 1 * * 0"
7 | workflow_dispatch:
8 |
9 | jobs:
10 | check-for-updates:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Check out repository code
14 | uses: actions/checkout@v4
15 |
16 | - name: Setup OPA
17 | uses: open-policy-agent/setup-opa@v2
18 |
19 | - name: Check for updated resource types
20 | run: ci/check-resource-types.sh
21 |
--------------------------------------------------------------------------------
/.github/workflows/generate-assets.yaml:
--------------------------------------------------------------------------------
1 | name: Generate Zip File
2 |
3 | on:
4 | workflow_run:
5 | workflows: [OPA Tests]
6 | types:
7 | - completed
8 |
9 | jobs:
10 | generate-zip-file:
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - name: Check out repository code
15 | uses: actions/checkout@v4
16 |
17 | - name: Setup python
18 | uses: actions/setup-python@v5
19 | with:
20 | python-version: 3
21 |
22 | - name: Install and run cfn cli
23 | run: |
24 | cd hooks
25 | pip install --quiet cloudformation-cli cloudformation-cli-python-plugin
26 | cfn submit --dry-run
27 |
28 | - name: Upload zip file as artifact
29 | uses: actions/upload-artifact@v4
30 | with:
31 | name: hook-package-zip
32 | path: hooks/styra-opa-hook.zip
33 |
--------------------------------------------------------------------------------
/.github/workflows/opa-tests.yaml:
--------------------------------------------------------------------------------
1 | name: OPA Tests
2 |
3 | on: [push]
4 |
5 | jobs:
6 | opa-unit-tests:
7 | runs-on: ubuntu-latest
8 | strategy:
9 | matrix:
10 | version: [latest, 0.60.x]
11 | steps:
12 | - name: Check out repository code
13 | uses: actions/checkout@v4
14 |
15 | - name: Setup OPA
16 | uses: open-policy-agent/setup-opa@v2
17 | with:
18 | version: ${{ matrix.version }}
19 |
20 | - name: OPA unit tests
21 | run: opa test -v examples/policy/
22 |
23 | - name: OPA check strict
24 | run: opa check --strict examples/policy/
25 |
26 | opa-integration-tests:
27 | runs-on: ubuntu-latest
28 | strategy:
29 | matrix:
30 | version: [latest, 0.60.x]
31 | steps:
32 | - name: Check out repository code
33 | uses: actions/checkout@v4
34 |
35 | - name: Setup OPA
36 | uses: open-policy-agent/setup-opa@v2
37 | with:
38 | version: ${{ matrix.version }}
39 |
40 | - name: Install pip dependencies
41 | run: pip3 install -r test/requirements.txt
42 |
43 | - name: OPA integration tests
44 | run: test/integration.sh
45 |
46 | regal-lint:
47 | runs-on: ubuntu-latest
48 | steps:
49 | - name: Check out repository code
50 | uses: actions/checkout@v4
51 |
52 | - name: Setup OPA
53 | uses: open-policy-agent/setup-opa@v2
54 |
55 | - name: OPA check strict
56 | run: opa check --strict .
57 |
58 | - name: Setup Regal
59 | uses: StyraInc/setup-regal@v1
60 | with:
61 | version: latest
62 |
63 | - name: Regal Lint
64 | run: regal lint --format github .
65 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # macOS
2 | .DS_Store
3 | ._*
4 |
5 | # Byte-compiled / optimized / DLL files
6 | __pycache__/
7 | *.py[cod]
8 | *$py.class
9 |
10 | # C extensions
11 | *.so
12 |
13 | # Distribution / packaging
14 | .Python
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib/
22 | lib64/
23 | parts/
24 | sdist/
25 | var/
26 | wheels/
27 | pip-wheel-metadata/
28 | share/python-wheels/
29 | *.egg-info/
30 | .installed.cfg
31 | *.egg
32 | MANIFEST
33 |
34 | # PyInstaller
35 | # Usually these files are written by a python script from a template
36 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
37 | *.manifest
38 | *.spec
39 |
40 | # Installer logs
41 | pip-log.txt
42 | pip-delete-this-directory.txt
43 |
44 | # Unit test / coverage reports
45 | htmlcov/
46 | .tox/
47 | .nox/
48 | .coverage
49 | .coverage.*
50 | .cache
51 | nosetests.xml
52 | coverage.xml
53 | *.cover
54 | .hypothesis/
55 | .pytest_cache/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 | db.sqlite3
65 | db.sqlite3-journal
66 |
67 | # Flask stuff:
68 | instance/
69 | .webassets-cache
70 |
71 | # Scrapy stuff:
72 | .scrapy
73 |
74 | # Sphinx documentation
75 | docs/_build/
76 |
77 | # PyBuilder
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # IPython
84 | profile_default/
85 | ipython_config.py
86 |
87 | # pyenv
88 | .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # celery beat schedule file
98 | celerybeat-schedule
99 |
100 | # SageMath parsed files
101 | *.sage.py
102 |
103 | # Environments
104 | .env
105 | .venv
106 | env/
107 | venv/
108 | ENV/
109 | env.bak/
110 | venv.bak/
111 |
112 | # Spyder project settings
113 | .spyderproject
114 | .spyproject
115 |
116 | # Rope project settings
117 | .ropeproject
118 |
119 | # mkdocs documentation
120 | /site
121 |
122 | # mypy
123 | .mypy_cache/
124 | .dmypy.json
125 | dmypy.json
126 |
127 | # Pyre type checker
128 | .pyre/
129 |
130 | # contains credentials
131 | sam-tests/
132 |
133 | rpdk.log*
134 |
135 | *.zip
136 |
--------------------------------------------------------------------------------
/.regal/config.yaml:
--------------------------------------------------------------------------------
1 | rules:
2 | custom:
3 | one-liner-rule:
4 | level: error
5 | imports:
6 | prefer-package-imports:
7 | level: error
8 | ignore:
9 | files:
10 | - "*_test.rego"
11 | style:
12 | line-length:
13 | level: error
14 | non-breakable-word-threshold: 80
15 | rule-name-repeats-package:
16 | level: ignore
17 | testing:
18 | print-or-trace-call:
19 | level: error
20 | ignore:
21 | files:
22 | - "assertions.rego"
23 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # OPA AWS CloudFormation Hook
2 |
3 |
4 |
5 |
6 |
7 | This repository integrates AWS CloudFormation (CFN) with OPA using
8 | [AWS Cloud Formation Hooks](https://aws.amazon.com/about-aws/whats-new/2022/02/aws-announces-general-availability-aws-cloudformation-hooks/).
9 | Use this integration if you want to enforce policies over AWS resources (e.g., EC2 instances, S3 buckets, etc.)
10 | provisioned with CloudFormation. For example, using this integration you can enforce policy across resources like:
11 |
12 | * [EC2 Security Groups](https://github.com/StyraInc/opa-aws-cloudformation-hook/blob/main/examples/policy/ec2/security_group/security_group.rego)
13 | * [IAM Admin Rules](https://github.com/StyraInc/opa-aws-cloudformation-hook/blob/main/examples/policy/iam/user/no_admin_test.rego)
14 | * [S3 Public Access](https://github.com/StyraInc/opa-aws-cloudformation-hook/blob/main/examples/policy/s3/bucket/public_access_test.rego)
15 |
16 | > AWS Cloud Formation Hooks were added in February 2022. The feature is still relatively new for AWS Cloud Formation.
17 | > If you run into any issues please report them [here](https://github.com/StyraInc/opa-aws-cloudformation-hook/issues).
18 |
19 | ## How it Works
20 |
21 | The OPA hook works by installing an
22 | [AWS CloudFormation Hook](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/hooks-structure.html)
23 | to your environment.
24 |
25 | When creating, updating, or deleting a CloudFormation Stack, the hook is triggered to validate the configuration.
26 | When used in conjunction with OPA, the hook will send the property information from each resource in a Stack to your
27 | OPA server. When this information is received, OPA will validate the request against your defined policies and send
28 | back any violations it may have found, which will stop the stack creation and log the violations to AWS CloudWatch.
29 | If no violations are reported, the resources contained in the stack are created, updated or deleted accordingly.
30 |
31 | **NOTE:** Installing OPA into your AWS environment is currently out of scope for this documentation. For local
32 | development, a tool like [ngrok](https://ngrok.com/) could be used to point at an OPA running on your machine.
33 |
34 | Want to try out this integration yourself? See the AWS Cloud Formation Hooks tutorial in the
35 | [OPA documentation](https://www.openpolicyagent.org/docs/latest/aws-cloudformation-hooks/).
36 |
37 | ## Repository Contents
38 |
39 | Provided in this repository, you'll find the code for the hook you'll deploy in your AWS account to enable OPA policy
40 | enforcement for your CloudFormation resources under the `hooks` directory. See the
41 | [OPA tutorial](https://www.openpolicyagent.org/docs/latest/aws-cloudformation-hooks/) on the topic for instructions on
42 | how to quickly get started, or the
43 | [development guide](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/hooks.html)
44 | in AWS the documentation if you'd like to learn more about how it works.
45 |
46 | To give you an idea about what policy for AWS CloudFormation Hooks might look like, this repository provides a number
47 | of example resources and policies:
48 |
49 | * The `examples/templates` directory contains example templates used for testing
50 | * The `examples/policy` directory contains example policies
51 |
52 | ### Policy Development
53 |
54 | In order to quickly iterate on changes in your Rego policies, you may use the `validate.py` tool provided under the
55 | `test` directory. The tool allows you to test your policies against provided CloudFormation template files, without
56 | actually submitting them to a hook installed in your environment. With an OPA server started with your policy
57 | files loaded (e.g. `opa run --server --watch examples/policy`), you may use the tool like:
58 |
59 | ```shell
60 | test/validate.py my-cloudformation-template.yaml
61 | ```
62 |
63 | The tool will extract all resources found in the template and submit them to OPA one by one, in the same manner
64 | the hook operates once installed. Should any violation be encountered, the tool will print them to the console.
65 |
66 | ### Deregistering the Hook
67 |
68 | Deregistering a hook requires removal of not just the hook type, but also any versions of the hook deployed. In order
69 | to help with that, you may use the `deregister-hook.sh` script provided in this repo, with the ARN of the hook provided
70 | as the only argument:
71 |
72 | ```script
73 | ./deregister-hook.sh
74 | ```
75 |
76 | ## Community
77 |
78 | For questions, discussions and announcements related to Styra products, services and open source projects, please join the Styra community on [Slack](https://communityinviter.com/apps/styracommunity/signup)!
--------------------------------------------------------------------------------
/ci/check-resource-types.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
4 |
5 | current=$(cat "$SCRIPT_DIR/../hooks/styra-opa-hook.json")
6 |
7 | new=$(echo "$current" \
8 | | opa eval --stdin-input \
9 | --format pretty \
10 | --data "$SCRIPT_DIR/resourcetypes.rego" \
11 | data.aws.cloudformation.output)
12 |
13 | if [[ "$current" != "$new" ]]; then
14 | echo "Resource types have been updated. Please run:"
15 | echo
16 | echo "cat hooks/styra-opa-hook.json | opa eval -I -f pretty -d ci/resourcetypes.rego data.aws.cloudformation.output > hooks/styra-opa-hook-new.json"
17 | echo
18 | echo "mv hooks/styra-opa-hook-new.json hooks/styra-opa-hook.json"
19 | echo
20 | echo "And commit the result"
21 | exit 1
22 | fi
23 |
--------------------------------------------------------------------------------
/ci/resourcetypes.rego:
--------------------------------------------------------------------------------
1 | package aws.cloudformation
2 |
3 | import rego.v1
4 |
5 | # METADATA
6 | # description: |
7 | # Get all resource types from the single-file JSON schema provided by AWS for the us-west-1
8 | # region. See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html
9 | # for a list of all available files in all regions
10 | resource_types contains type if {
11 | resp := http.send({
12 | "method": "GET",
13 | "url": "https://d68hl49wbnanq.cloudfront.net/latest/gzip/CloudFormationResourceSpecification.json",
14 | })
15 |
16 | some type, _ in resp.body.ResourceTypes
17 | }
18 |
19 | # METADATA
20 | # description: |
21 | # Transform all resource types into a their wildcard representation, i.e. "target".
22 | # Example: AWS::S3:Bucket => AWS::S3::*
23 | resource_targets contains target if {
24 | some resource_type in resource_types
25 | target := concat("::", array.concat(array.slice(split(resource_type, "::"), 0, 2), ["*"]))
26 | }
27 |
28 | # METADATA
29 | # description: |
30 | # Patch the provided input (the existing file) with the resource types provided at the AWS endpoint
31 | output := object.union(
32 | input,
33 | {"handlers": {
34 | "preCreate": {"targetNames": resource_targets},
35 | "preUpdate": {"targetNames": resource_targets},
36 | "preDelete": {"targetNames": resource_targets},
37 | }},
38 | )
39 |
--------------------------------------------------------------------------------
/deregister-hook.sh:
--------------------------------------------------------------------------------
1 | #/usr/bin/env bash
2 |
3 | # Deregister the 'Styra::OPA::Hook' type hook and all of its versions
4 |
5 | if [[ -z "$1" ]]; then
6 | echo "Usage:"
7 | echo
8 | echo "./deregister-hook.sh "
9 | echo
10 | echo "The ARN of a hook can be found with the command 'aws cloudformation list-types'"
11 | fi
12 |
13 | arn="$1"
14 |
15 | versions=$(aws cloudformation list-type-versions --arn "$arn" | jq -r '.TypeVersionSummaries[].VersionId')
16 |
17 | for version in $versions; do
18 | echo "Deregistering version: $version"
19 | aws cloudformation deregister-type \
20 | --type HOOK \
21 | --type-name "Styra::OPA::Hook" \
22 | --version-id "$version"
23 | done
24 |
25 | echo "Deregistering hook Styra::OPA::Hook"
26 |
27 | aws cloudformation deregister-type --type HOOK --type-name "Styra::OPA::Hook"
28 |
--------------------------------------------------------------------------------
/examples/policy/assertions.rego:
--------------------------------------------------------------------------------
1 | # METADATA
2 | # description: Utility functions for working with Rego unit tests
3 | # authors:
4 | # - Anders Eknert
5 | #
6 | package assertions
7 |
8 | import rego.v1
9 |
10 | # METADATA
11 | # description: Assert expected is equal to result, or fail while printing both inputs to console
12 | assert_equals(expected, result) if expected == result
13 |
14 | assert_equals(expected, result) := false if {
15 | expected != result
16 | print("expected equals:", _quote_if_string(expected), "got:", result)
17 | }
18 |
19 | # METADATA
20 | # description: Assert expected is not equal to result, or fail while printing both inputs to console
21 | assert_not_equals(expected, result) if expected != result
22 |
23 | assert_not_equals(expected, result) := false if {
24 | expected == result
25 | print("expected not equals:", _quote_if_string(expected), "got:", result)
26 | }
27 |
28 | # METADATA
29 | # description: Assert item is in coll, or fail while printing the collection to console
30 | assert_in(item, coll) if item in coll
31 |
32 | assert_in(item, coll) := false if {
33 | not item in coll
34 | print("expected", type_name(item), _quote_if_string(item), "in", type_name(coll), "got:", coll)
35 | }
36 |
37 | # METADATA
38 | # description: Assert item is not in coll, or fail while printing the collection to console
39 | assert_not_in(item, coll) if not item in coll
40 |
41 | assert_not_in(item, coll) := false if {
42 | item in coll
43 | print("expected", type_name(item), _quote_if_string(item), "not in", type_name(coll), "got:", coll)
44 | }
45 |
46 | # METADATA
47 | # description: Assert provided collection is empty, or fail while printing the collection to console
48 | assert_empty(coll) if count(coll) == 0
49 |
50 | assert_empty(coll) := false if {
51 | count(coll) != 0
52 | print("expected empty", type_name(coll), "got:", coll)
53 | }
54 |
55 | # METADATA
56 | # description: Fail with provided message
57 | fail(msg) if {
58 | print(msg)
59 | false # regal ignore:constant-condition
60 | }
61 |
62 | _quote_if_string(x) := concat("", [`"`, x, `"`]) if is_string(x)
63 |
64 | _quote_if_string(x) := x if not is_string(x)
65 |
--------------------------------------------------------------------------------
/examples/policy/authz.rego:
--------------------------------------------------------------------------------
1 | # METADATA
2 | # description: |
3 | # Optional authorization policy to use for protecting the OPA REST API if
4 | # exposed on a public endpoint.
5 | # related_resources:
6 | # - description: OPA documentation on authentication and authorization
7 | # ref: https://www.openpolicyagent.org/docs/latest/security/#authentication-and-authorization
8 | #
9 | package system.authz
10 |
11 | import rego.v1
12 |
13 | default allow := false
14 |
15 | # METADATA
16 | # description: |
17 | # See the README.md file contained in this repo for how to configure an AWS Secret to
18 | # use as a token for client connections.
19 | #
20 | allow if input.identity == "changeme"
21 |
--------------------------------------------------------------------------------
/examples/policy/aws.rego:
--------------------------------------------------------------------------------
1 | # METADATA
2 | # scope: subpackages
3 | # organizations:
4 | # - Styra
5 | package aws
6 |
7 | import rego.v1
8 |
--------------------------------------------------------------------------------
/examples/policy/ec2/security_group/security_group.rego:
--------------------------------------------------------------------------------
1 | package aws.ec2.securitygroup
2 |
3 | import rego.v1
4 |
5 | deny contains msg if {
6 | input.resource.properties.SecurityGroupIngress[0].CidrIp == "0.0.0.0/0"
7 |
8 | msg := sprintf(
9 | "Security Group cannot contain rules allow all destinations (0.0.0.0/0 or ::/0): %s",
10 | [input.resource.id],
11 | )
12 | }
13 |
14 | deny contains msg if {
15 | input.resource.properties.SecurityGroupIngress[0].CidrIpv6 == "::/0"
16 |
17 | msg := sprintf(
18 | "Security Group cannot contain rules allow all destinations (0.0.0.0/0 or ::/0): %s",
19 | [input.resource.id],
20 | )
21 | }
22 |
--------------------------------------------------------------------------------
/examples/policy/ec2/security_group/security_group_test.rego:
--------------------------------------------------------------------------------
1 | package aws.ec2.securitygroup_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.ec2.securitygroup.deny
6 |
7 | import data.assertions.assert_empty
8 |
9 | import data.test_helpers.create_with_properties
10 |
11 | test_deny_if_security_group_allows_all_destinations if {
12 | inp := create_with_properties("AWS::EC2::SecurityGroup", "SecurityGroup", {"SecurityGroupIngress": [{
13 | "CidrIp": "0.0.0.0/0",
14 | "IpProtocol": "-1",
15 | }]})
16 |
17 | deny["Security Group cannot contain rules allow all destinations (0.0.0.0/0 or ::/0): SecurityGroup"] with input as inp
18 | }
19 |
20 | test_allow_if_security_group_cidr_is_set if {
21 | inp := create_with_properties("AWS::EC2::SecurityGroup", "SecurityGroup", {"SecurityGroupIngress": [{
22 | "CidrIp": "10.0.0.0/16",
23 | "IpProtocol": "-1",
24 | }]})
25 |
26 | assert_empty(deny) with input as inp
27 | }
28 |
--------------------------------------------------------------------------------
/examples/policy/eks/cluster/logging_enabled.rego:
--------------------------------------------------------------------------------
1 | package aws.eks.cluster
2 |
3 | import rego.v1
4 |
5 | deny contains msg if {
6 | not cluster_logging_enabled
7 | msg := sprintf("no logging types are enabled for cluster: %s", [input.resource.id])
8 | }
9 |
10 | cluster_logging_enabled if count(input.resource.properties.Logging.ClusterLogging.EnabledTypes) > 0
11 |
--------------------------------------------------------------------------------
/examples/policy/eks/cluster/logging_enabled_test.rego:
--------------------------------------------------------------------------------
1 | package aws.eks.logging_enabled_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.eks.cluster.deny
6 |
7 | import data.assertions.assert_in
8 | import data.assertions.assert_not_in
9 |
10 | import data.test_helpers.create_with_properties
11 |
12 | test_allow_cluster_logging_enabled if {
13 | inp := create_with_properties("AWS::EKS::Cluster", "EksCluster", {
14 | "ResourcesVpcConfig": {
15 | "RoleArn": "",
16 | "SubnetIds": [""],
17 | },
18 | "Logging": {"ClusterLogging": {"EnabledTypes": [
19 | {"Type": "audit"},
20 | {"Type": "authenticator"},
21 | ]}},
22 | })
23 |
24 | msg := "no logging types are enabled for cluster: EksCluster"
25 | assert_not_in(msg, deny) with input as inp
26 | }
27 |
28 | test_deny_no_logging_configuration if {
29 | inp := create_with_properties("AWS::EKS::Cluster", "EksCluster", {
30 | "ResourcesVpcConfig": {
31 | "RoleArn": "",
32 | "SubnetIds": [""],
33 | },
34 | "Logging": {"ClusterLogging": {}},
35 | })
36 |
37 | msg := "no logging types are enabled for cluster: EksCluster"
38 | assert_in(msg, deny) with input as inp
39 | }
40 |
--------------------------------------------------------------------------------
/examples/policy/eks/cluster/public_api.rego:
--------------------------------------------------------------------------------
1 | package aws.eks.cluster
2 |
3 | import rego.v1
4 |
5 | deny contains msg if {
6 | not public_endpoint_disabled
7 | msg := sprintf("public endpoint needs to be disabled for cluster: %s", [input.resource.id])
8 | }
9 |
10 | deny contains msg if {
11 | public_endpoint_disabled
12 | not private_endpoint_enabled
13 | msg := sprintf("invalid configuration, please enable private api for cluster: %s", [input.resource.id])
14 | }
15 |
16 | public_endpoint_disabled if input.resource.properties.ResourcesVpcConfig.EndpointPublicAccess == "false"
17 |
18 | private_endpoint_enabled if input.resource.properties.ResourcesVpcConfig.EndpointPrivateAccess == "true"
19 |
--------------------------------------------------------------------------------
/examples/policy/eks/cluster/public_api_test.rego:
--------------------------------------------------------------------------------
1 | package aws.eks.public_api_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.eks.cluster.deny
6 |
7 | import data.assertions.assert_in
8 | import data.assertions.assert_not_in
9 |
10 | import data.test_helpers.create_with_properties
11 |
12 | test_allow_cluster_private_api if {
13 | inp := create_with_properties("AWS::EKS::Cluster", "EksCluster", {"ResourcesVpcConfig": {
14 | "RoleArn": "",
15 | "SubnetIds": [""],
16 | "EndpointPublicAccess": "false",
17 | "EndpointPrivateAccess": "true",
18 | }})
19 |
20 | msg := "public endpoint needs to be disabled for cluster: EksCluster"
21 | assert_not_in(msg, deny) with input as inp
22 | }
23 |
24 | test_deny_cluster_public_api if {
25 | inp := create_with_properties("AWS::EKS::Cluster", "EksCluster", {"ResourcesVpcConfig": {
26 | "RoleArn": "",
27 | "SubnetIds": [""],
28 | "EndpointPublicAccess": "true",
29 | "EndpointPrivateAccess": "true",
30 | }})
31 |
32 | msg := "public endpoint needs to be disabled for cluster: EksCluster"
33 | assert_in(msg, deny) with input as inp
34 | }
35 |
36 | test_deny_cluster_no_access if {
37 | inp := create_with_properties("AWS::EKS::Cluster", "EksCluster", {"ResourcesVpcConfig": {
38 | "RoleArn": "",
39 | "SubnetIds": [""],
40 | "EndpointPublicAccess": "false",
41 | "EndpointPrivateAccess": "false",
42 | }})
43 |
44 | msg := "invalid configuration, please enable private api for cluster: EksCluster"
45 | assert_in(msg, deny) with input as inp
46 | }
47 |
--------------------------------------------------------------------------------
/examples/policy/iam/role/principal_boundary.rego:
--------------------------------------------------------------------------------
1 | package aws.iam.role
2 |
3 | import rego.v1
4 |
5 | excluded_principal_prefixes := ["excluded", "iam-excluded", "test-excluded-user"]
6 |
7 | deny contains msg if {
8 | not excluded_principal_name
9 | not permission_boundary_exists
10 |
11 | msg := sprintf("PermissionsBoundary is not set for %s", [input.resource.id])
12 | }
13 |
14 | deny contains msg if {
15 | not excluded_principal_name
16 | permission_boundary_exists
17 | not valid_permission_boundary
18 |
19 | msg := sprintf(
20 | "PermissionsBoundary %s is not allowed for %s",
21 | [input.resource.properties.PermissionsBoundary, input.resource.id],
22 | )
23 | }
24 |
25 | excluded_principal_name if {
26 | name := input.resource.properties.RoleName
27 | some prefix in excluded_principal_prefixes
28 | startswith(name, prefix)
29 | }
30 |
31 | permission_boundary_exists if input.resource.properties.PermissionsBoundary
32 |
33 | valid_permission_boundary if {
34 | input.resource.properties.PermissionsBoundary == "arn:aws:iam::555555555555:policy/s3_deny_permissions_boundary"
35 | }
36 |
--------------------------------------------------------------------------------
/examples/policy/iam/role/principal_boundary_test.rego:
--------------------------------------------------------------------------------
1 | package aws.iam.role_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.iam.role.deny
6 |
7 | import data.assertions.assert_empty
8 |
9 | import data.test_helpers.create_with_properties
10 | import data.test_helpers.with_properties
11 |
12 | mock_create := create_with_properties("AWS::IAM::Role", "IAMRoleTest", {"AssumeRolePolicyDocument": {
13 | "Version": "2012-10-17",
14 | "Statement": [{
15 | "Action": "sts:AssumeRole",
16 | "Effect": "Allow",
17 | "Principal": {"Service": "codepipeline.amazonaws.com"},
18 | }],
19 | }})
20 |
21 | test_deny_auto_generated_name_not_excluded if {
22 | inp := object.union(mock_create, with_properties({
23 | "RoleName": "iam-not-excluded-cfn-hooks-cfn-stack-1-fail-046693375555",
24 | "PermissionsBoundary": "arn:aws:iam::555555555555:policy/invalid_s3_deny_permissions_boundary",
25 | }))
26 |
27 | # regal ignore:line-length
28 | deny["PermissionsBoundary arn:aws:iam::555555555555:policy/invalid_s3_deny_permissions_boundary is not allowed for IAMRoleTest"] with input as inp
29 | }
30 |
31 | test_deny_permission_boundary_not_set if {
32 | deny["PermissionsBoundary is not set for IAMRoleTest"] with input as mock_create
33 | }
34 |
35 | test_allow_permission_boundary_included if {
36 | inp := object.union(mock_create, with_properties({
37 | "RoleName": "cfn-hooks-pass-046693375555",
38 | "PermissionsBoundary": "arn:aws:iam::555555555555:policy/s3_deny_permissions_boundary",
39 | }))
40 |
41 | assert_empty(deny) with input as inp
42 | }
43 |
44 | test_allow_role_name_excluded if {
45 | inp := object.union(mock_create, with_properties({"RoleName": "excluded-cfn-hooks-stack1-046693375555"}))
46 |
47 | assert_empty(deny) with input as inp
48 | }
49 |
--------------------------------------------------------------------------------
/examples/policy/iam/user/have_policies.rego:
--------------------------------------------------------------------------------
1 | package aws.iam.user
2 |
3 | import rego.v1
4 |
5 | deny contains msg if {
6 | not user_policies_exist
7 |
8 | msg := sprintf("IAM user does not have a policy statement: %s", [input.resource.id])
9 | }
10 |
11 | user_policies_exist if input.resource.properties.Policies
12 |
13 | user_policies_exist if input.resource.properties.ManagedPolicyArns
14 |
--------------------------------------------------------------------------------
/examples/policy/iam/user/have_policies_test.rego:
--------------------------------------------------------------------------------
1 | package aws.iam.user_have_policies_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.iam.user.deny
6 |
7 | import data.assertions.assert_in
8 | import data.assertions.assert_not_in
9 |
10 | import data.test_helpers.create_with_properties
11 |
12 | mock_create := create_with_properties("AWS::IAM::User", "IAMUserTest", {
13 | "Policies": [{"PolicyDocument": {
14 | "PolicyName": "Test",
15 | "Statement": [{
16 | "Action": "'*'",
17 | "Effect": "Deny",
18 | "Resource": "'*'",
19 | }],
20 | "Version": "2012-10-17",
21 | }}],
22 | "UserName": "WithPolicy",
23 | })
24 |
25 | test_deny_policy_statement_undefined if {
26 | inp := create_with_properties("AWS::IAM::User", "IAMUserTest", {"AssumeRolePolicyDocument": {
27 | "Version": "2012-10-17",
28 | "Statement": [{
29 | "Action": "sts:AssumeRole",
30 | "Effect": "Allow",
31 | "Principal": {"Service": "codepipeline.amazonaws.com"},
32 | }],
33 | }})
34 |
35 | assert_in("IAM user does not have a policy statement: IAMUserTest", deny) with input as inp
36 | }
37 |
38 | test_allow_user_with_policy if {
39 | inp := create_with_properties("AWS::IAM::User", "IAMUserTest", {
40 | "Policies": [{"PolicyDocument": {
41 | "PolicyName": "Test",
42 | "Statement": [{
43 | "Action": "'*'",
44 | "Effect": "Deny",
45 | "Resource": "'*'",
46 | }],
47 | "Version": "2012-10-17",
48 | }}],
49 | "UserName": "WithPolicy",
50 | })
51 |
52 | assert_not_in("IAM user does not have a policy statement: IAMUserTest", deny) with input as inp
53 | }
54 |
55 | test_allow_user_with_managed_policy if {
56 | inp := create_with_properties(
57 | "AWS::IAM::User",
58 | "IAMUserTest",
59 | {"ManagedPolicyArns": ["arn:aws:iam::aws:policy/AWSDenyAll"]},
60 | )
61 |
62 | assert_not_in("IAM user does not have a policy statement: IAMUserTest", deny) with input as inp
63 | }
64 |
--------------------------------------------------------------------------------
/examples/policy/iam/user/no_admin.rego:
--------------------------------------------------------------------------------
1 | package aws.iam.user
2 |
3 | import rego.v1
4 |
5 | deny contains msg if {
6 | not managed_policy_exist
7 | not valid_iam_scope
8 |
9 | msg := sprintf("please limit the scope for IAM user: %s", [input.resource.id])
10 | }
11 |
12 | valid_iam_scope if every policy in input.resource.properties.Policies {
13 | every statement in policy.PolicyDocument.Statement {
14 | statement.Action != "'*'"
15 | }
16 | }
17 |
18 | managed_policy_exist if input.resource.properties.ManagedPolicyArns
19 |
--------------------------------------------------------------------------------
/examples/policy/iam/user/no_admin_test.rego:
--------------------------------------------------------------------------------
1 | package aws.iam.user_no_admin_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.iam.user.deny
6 |
7 | import data.assertions.assert_empty
8 | import data.assertions.assert_in
9 | import data.assertions.assert_not_in
10 |
11 | import data.test_helpers.create_with_properties
12 | import data.test_helpers.with_properties
13 |
14 | mock_create := create_with_properties("AWS::IAM::User", "IAMUserTest", {
15 | "AssumeRolePolicyDocument": {
16 | "Version": "2012-10-17",
17 | "Statement": [{
18 | "Action": "sts:AssumeRole",
19 | "Effect": "Allow",
20 | "Principal": {"Service": "codepipeline.amazonaws.com"},
21 | }],
22 | },
23 | "PermissionsBoundary": "arn:aws:iam::555555555555:policy/s3_deny_permissions_boundary",
24 | })
25 |
26 | test_deny_policy_statement_undefined if {
27 | inp := mock_create
28 |
29 | assert_in("please limit the scope for IAM user: IAMUserTest", deny) with input as inp
30 | }
31 |
32 | test_deny_user_with_admin if {
33 | inp := object.union(mock_create, with_properties({
34 | "Policies": [
35 | {"PolicyDocument": {
36 | "PolicyName": "Test1",
37 | "Statement": [{
38 | "Action": "'Create'",
39 | "Effect": "Allow",
40 | "Resource": "'*'",
41 | }],
42 | "Version": "2012-10-17",
43 | }},
44 | {"PolicyDocument": {
45 | "PolicyName": "Test2",
46 | "Statement": [{
47 | "Action": "'*'",
48 | "Effect": "Allow",
49 | "Resource": "'*'",
50 | }],
51 | "Version": "2012-10-17",
52 | }},
53 | ],
54 | "UserName": "WithInlineAdminPolicy",
55 | }))
56 |
57 | msg := "please limit the scope for IAM user: IAMUserTest"
58 |
59 | assert_in(msg, deny) with input as inp
60 | }
61 |
62 | test_allow_user_without_admin if {
63 | inp := object.union(mock_create, with_properties({
64 | "Policies": [{"PolicyDocument": {
65 | "PolicyName": "Test1",
66 | "Statement": [{
67 | "Action": "'Create'",
68 | "Effect": "Allow",
69 | "Resource": "'*'",
70 | }],
71 | "Version": "2012-10-17",
72 | }}],
73 | "UserName": "WithInlineAdminPolicy",
74 | }))
75 |
76 | msg := "please limit the scope for IAM user: IAMUserTest"
77 |
78 | assert_not_in(msg, deny) with input as inp
79 | }
80 |
81 | test_allow_user_limited_scope if {
82 | inp := object.union(mock_create, with_properties({
83 | "Policies": [{"PolicyDocument": {
84 | "PolicyName": "Test",
85 | "Statement": [{
86 | "Action": "'ec2:*'",
87 | "Effect": "Allow",
88 | "Resource": "'*'",
89 | }],
90 | "Version": "2012-10-17",
91 | }}],
92 | "UserName": "WithInlineEc2Policy",
93 | }))
94 |
95 | assert_empty(deny) with input as inp
96 | }
97 |
--------------------------------------------------------------------------------
/examples/policy/iam/user/principal_boundary.rego:
--------------------------------------------------------------------------------
1 | package aws.iam.user
2 |
3 | import rego.v1
4 |
5 | excluded_principal_prefixes := ["excluded", "iam-excluded", "test-excluded-user"]
6 |
7 | deny contains msg if {
8 | not excluded_principal_name
9 | not permission_boundary_exists
10 |
11 | msg := sprintf("PermissionsBoundary is not set for %s", [input.resource.id])
12 | }
13 |
14 | deny contains msg if {
15 | not excluded_principal_name
16 | permission_boundary_exists
17 | not valid_permission_boundary
18 |
19 | msg := sprintf(
20 | "PermissionsBoundary %s is not allowed for %s",
21 | [input.resource.properties.PermissionsBoundary, input.resource.id],
22 | )
23 | }
24 |
25 | excluded_principal_name if {
26 | name := input.resource.properties.UserName
27 | some prefix in excluded_principal_prefixes
28 | startswith(name, prefix)
29 | }
30 |
31 | permission_boundary_exists if input.resource.properties.PermissionsBoundary
32 |
33 | valid_permission_boundary if {
34 | input.resource.properties.PermissionsBoundary == "arn:aws:iam::555555555555:policy/s3_deny_permissions_boundary"
35 | }
36 |
--------------------------------------------------------------------------------
/examples/policy/iam/user/principal_boundary_test.rego:
--------------------------------------------------------------------------------
1 | package aws.iam.user_principal_boundary_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.iam.user.deny
6 | import data.aws.iam.user.excluded_principal_name
7 |
8 | import data.assertions.assert_in
9 | import data.assertions.assert_not_in
10 |
11 | import data.test_helpers.create_with_properties
12 | import data.test_helpers.with_properties
13 |
14 | mock_create := create_with_properties("AWS::IAM::User", "IAMUserTest", {"AssumeRolePolicyDocument": {
15 | "Version": "2012-10-17",
16 | "Statement": [{
17 | "Action": "sts:AssumeRole",
18 | "Effect": "Allow",
19 | "Principal": {"Service": "codepipeline.amazonaws.com"},
20 | }],
21 | }})
22 |
23 | test_deny_auto_generated_name_not_excluded if {
24 | inp := object.union(mock_create, with_properties({
25 | "RoleName": "iam-not-excluded-cfn-hooks-cfn-stack-1-fail-046693375555",
26 | "PermissionsBoundary": "arn:aws:iam::555555555555:policy/invalid_s3_deny_permissions_boundary",
27 | }))
28 |
29 | # regal ignore:line-length
30 | assert_in("PermissionsBoundary arn:aws:iam::555555555555:policy/invalid_s3_deny_permissions_boundary is not allowed for IAMUserTest", deny) with input as inp
31 | }
32 |
33 | test_deny_permission_boundary_not_set if {
34 | inp := mock_create
35 |
36 | assert_in("PermissionsBoundary is not set for IAMUserTest", deny) with input as inp
37 | }
38 |
39 | test_allow_permission_boundary_included if {
40 | inp := object.union(mock_create, with_properties({
41 | "RoleName": "cfn-hooks-pass-046693375555",
42 | "PermissionsBoundary": "arn:aws:iam::555555555555:policy/s3_deny_permissions_boundary",
43 | }))
44 |
45 | assert_not_in("PermissionsBoundary is not set for IAMUserTest", deny) with input as inp
46 | }
47 |
48 | test_allow_user_name_excluded if {
49 | inp := object.union(mock_create, with_properties({"UserName": "excluded-cfn-hooks-stack1-046693375555"}))
50 |
51 | excluded_principal_name with input as inp
52 | }
53 |
--------------------------------------------------------------------------------
/examples/policy/main.rego:
--------------------------------------------------------------------------------
1 | # METADATA
2 | # description: |
3 | # Dynamic routing to policy based in input.resource.type,
4 | # aggregating the deny rules found in all policies with a
5 | # matching package name
6 | #
7 | package system
8 |
9 | import rego.v1
10 |
11 | # METADATA
12 | # entrypoint: true
13 | main := {
14 | "allow": count(violations) == 0,
15 | "violations": violations,
16 | }
17 |
18 | # METADATA
19 | # description: |
20 | # Main routing logic, simply converting input.resource.type, e.g.
21 | # AWS::S3::Bucket to data.aws.s3.bucket and returning that document.
22 | #
23 | # By default, only input.action == "CREATE" | "UPDATE" will be routed
24 | # to the data.aws.s3.bucket document. If handling "DELETE" actions is
25 | # desirable, one may create a special policy for that by simply appending
26 | # "delete" to the package name, e.g. data.aws.s3.bucket.delete
27 | #
28 | route := document(lower(component), lower(type)) if ["AWS", component, type] = split(input.resource.type, "::")
29 |
30 | violations contains msg if {
31 | # Aggregate all deny rules found in routed document
32 | some msg in route.deny
33 | }
34 |
35 | #
36 | # Basic input validation to avoid having to do this in each resource policy
37 | #
38 |
39 | violations contains "Missing input.resource" if not input.resource
40 |
41 | violations contains "Missing input.resource.type" if not input.resource.type
42 |
43 | violations contains "Missing input.resource.id" if not input.resource.id
44 |
45 | violations contains "Missing input.action" if not input.action
46 |
47 | #
48 | # Helpers
49 | #
50 |
51 | document(component, type) := data.aws[component][type] if input.action != "DELETE"
52 |
53 | document(component, type) := data.aws[component][type].delete if input.action == "DELETE"
54 |
--------------------------------------------------------------------------------
/examples/policy/main_test.rego:
--------------------------------------------------------------------------------
1 | package main_test
2 |
3 | import rego.v1
4 |
5 | import data.system.main
6 |
7 | import data.assertions.assert_equals
8 |
9 | test_simple_routing_deny if {
10 | inp := {"action": "CREATE", "resource": {"type": "AWS::S3::Bucket", "id": "foo"}}
11 | aws := {"s3": {"bucket": {"deny": {"test violation"}}}}
12 |
13 | assert_equals({"allow": false, "violations": {"test violation"}}, main) with input as inp with data.aws as aws
14 | }
15 |
16 | test_simple_routing_deny_many if {
17 | inp := {"action": "CREATE", "resource": {"type": "AWS::S3::Bucket", "id": "foo"}}
18 | aws := {"s3": {"bucket": {"deny": {"foo", "bar", "baz"}}}}
19 |
20 | assert_equals({"allow": false, "violations": {"foo", "bar", "baz"}}, main) with input as inp with data.aws as aws
21 | }
22 |
23 | test_simple_routing_allow if {
24 | inp := {"action": "CREATE", "resource": {"type": "AWS::S3::Bucket", "id": "foo"}}
25 | aws := {"s3": {"bucket": {"deny": {}}}}
26 |
27 | assert_equals({"allow": true, "violations": set()}, main) with input as inp with data.aws as aws
28 | }
29 |
30 | test_simple_routing_delete_allow if {
31 | inp := {"action": "DELETE", "resource": {"type": "AWS::S3::Bucket", "id": "foo"}}
32 | aws := {"s3": {"bucket": {"deny": {"denied but not delete"}, "delete": {"deny": {}}}}}
33 |
34 | assert_equals({"allow": true, "violations": set()}, main) with input as inp with data.aws as aws
35 | }
36 |
37 | test_simple_routing_delete_deny if {
38 | inp := {"action": "DELETE", "resource": {"type": "AWS::S3::Bucket", "id": "foo"}}
39 | aws := {"s3": {"bucket": {"deny": {"denied but not delete"}, "delete": {"deny": {"deny delete"}}}}}
40 |
41 | assert_equals({"allow": false, "violations": {"deny delete"}}, main) with input as inp with data.aws as aws
42 | }
43 |
44 | test_input_validation if {
45 | main.violations["Missing input.action"] with input as {}
46 | main.violations["Missing input.resource"] with input as {}
47 | main.violations["Missing input.resource.id"] with input as {}
48 | main.violations["Missing input.resource.type"] with input as {}
49 | }
50 |
--------------------------------------------------------------------------------
/examples/policy/rds/db/encryption.rego:
--------------------------------------------------------------------------------
1 | package aws.rds.dbinstance
2 |
3 | import rego.v1
4 |
5 | deny contains msg if {
6 | not valid_storage_encryption
7 | msg := sprintf("storage encryption not enabled for: %s", [input.resource.id])
8 | }
9 |
10 | valid_storage_encryption if input.resource.properties.StorageEncrypted == "true"
11 |
--------------------------------------------------------------------------------
/examples/policy/rds/db/encryption_test.rego:
--------------------------------------------------------------------------------
1 | package aws.rds.dbinstance_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.rds.dbinstance.deny
6 |
7 | import data.test_helpers.create_with_properties
8 |
9 | test_deny_storage_encryption_disabled if {
10 | inp := create_with_properties("AWS::RDS::DBInstance", "RDSInstance", {"StorageEncrypted": "false"})
11 |
12 | deny["storage encryption not enabled for: RDSInstance"] with input as inp
13 | }
14 |
15 | test_deny_storage_encryption_not_set if {
16 | inp := create_with_properties("AWS::RDS::DBInstance", "RDSInstance", {"properties": {
17 | "DBName": "OPA-DB",
18 | "Engine": "MySQL",
19 | }})
20 |
21 | deny["storage encryption not enabled for: RDSInstance"] with input as inp
22 | }
23 |
24 | test_allow_storage_encryption_enabled if {
25 | inp := create_with_properties("AWS::RDS::DBInstance", "RDSInstance", {"StorageEncrypted": "true"})
26 |
27 | count(deny) == 0 with input as inp
28 | }
29 |
--------------------------------------------------------------------------------
/examples/policy/s3/bucket/encryption.rego:
--------------------------------------------------------------------------------
1 | package aws.s3.bucket
2 |
3 | import rego.v1
4 |
5 | deny contains msg if {
6 | not bucket_excluded_encryption
7 | not valid_bucket_encryption
8 | msg := sprintf("bucket encryption is not enabled for bucket: %s", [input.resource.id])
9 | }
10 |
11 | deny contains msg if {
12 | not bucket_excluded_encryption
13 | not valid_sse_algo
14 | msg := sprintf("encryption not set to aws:kms for bucket: %s", [input.resource.id])
15 | }
16 |
17 | valid_bucket_encryption if input.resource.properties.BucketEncryption != {}
18 |
19 | valid_sse_algo if {
20 | input.resource.properties.BucketEncryption.ServerSideEncryptionConfiguration[0].ServerSideEncryptionByDefault.SSEAlgorithm == "aws:kms"
21 | }
22 |
23 | bucket_excluded_encryption if {
24 | some prefix in {"excluded-", "baseline-", "access-"}
25 | startswith(input.resource.properties.BucketName, prefix)
26 | }
27 |
--------------------------------------------------------------------------------
/examples/policy/s3/bucket/encryption_test.rego:
--------------------------------------------------------------------------------
1 | package aws.s3.bucket_encryption_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.s3.bucket.deny
6 |
7 | import data.test_helpers.create_with_properties
8 |
9 | test_deny_if_bucket_encryption_not_set if {
10 | inp := create_with_properties("AWS::S3::Bucket", "MyS3Bucket", {"BucketEncryption": {}})
11 |
12 | deny["bucket encryption is not enabled for bucket: MyS3Bucket"] with input as inp
13 | }
14 |
15 | test_deny_if_bucket_encryption_is_not_aws_kms if {
16 | inp := create_with_properties(
17 | "AWS::S3::Bucket", "MyS3Bucket",
18 | # regal ignore:line-length
19 | {"BucketEncryption": {"ServerSideEncryptionConfiguration": [{"ServerSideEncryptionByDefault": {"SSEAlgorithm": "aws:invalid"}}]}},
20 | )
21 |
22 | deny["encryption not set to aws:kms for bucket: MyS3Bucket"] with input as inp
23 | }
24 |
25 | test_allow_if_bucket_encryption_is_set if {
26 | inp := create_with_properties(
27 | "AWS::S3::Bucket",
28 | "MyS3Bucket",
29 | # regal ignore:line-length
30 | {"BucketEncryption": {"ServerSideEncryptionConfiguration": [{"ServerSideEncryptionByDefault": {"SSEAlgorithm": "aws:kms"}}]}},
31 | )
32 |
33 | not deny["bucket encryption is not enabled for bucket: MyS3Bucket"] with input as inp
34 | not deny["encryption not set to aws:kms for bucket: MyS3Bucket"] with input as inp
35 | }
36 |
--------------------------------------------------------------------------------
/examples/policy/s3/bucket/logging.rego:
--------------------------------------------------------------------------------
1 | package aws.s3.bucket
2 |
3 | import rego.v1
4 |
5 | deny contains msg if {
6 | not bucket_excluded_logging
7 | not valid_logging_prefix
8 | msg := sprintf("logging prefix is not set correctly for bucket: %s", [input.resource.id])
9 | }
10 |
11 | deny contains msg if {
12 | not bucket_excluded_logging
13 | not valid_logging_destination
14 | msg := sprintf("logging destination bucket is not correct: %s", [input.resource.id])
15 | }
16 |
17 | valid_logging_prefix if {
18 | prefix := input.resource.properties.LoggingConfiguration.LogFilePrefix
19 | prefix == concat("-", ["s3-logs", input.resource.properties.BucketName])
20 | }
21 |
22 | valid_logging_prefix if {
23 | not input.resource.properties.BucketName
24 | input.resource.properties.LoggingConfiguration.LogFilePrefix == "s3-logs"
25 | }
26 |
27 | valid_logging_destination if {
28 | input.resource.properties.LoggingConfiguration.DestinationBucketName == "my-logging-bucket"
29 | }
30 |
31 | bucket_excluded_logging if {
32 | some prefix in {"excluded-", "access-", "secure-"}
33 | startswith(input.resource.properties.BucketName, prefix)
34 | }
35 |
--------------------------------------------------------------------------------
/examples/policy/s3/bucket/logging_test.rego:
--------------------------------------------------------------------------------
1 | package aws.s3.bucket_logging_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.s3.bucket.deny
6 |
7 | import data.test_helpers.create_with_properties
8 |
9 | test_deny_if_logging_prefix_invalid if {
10 | inp := create_with_properties(
11 | "AWS::S3::Bucket", "MyS3Bucket",
12 | {"LoggingConfiguration": {"LogFilePrefix": "invalid"}},
13 | )
14 |
15 | deny["logging prefix is not set correctly for bucket: MyS3Bucket"] with input as inp
16 | }
17 |
18 | test_deny_if_logging_bucket_invalid if {
19 | inp := create_with_properties(
20 | "AWS::S3::Bucket", "MyS3Bucket",
21 | {"LoggingConfiguration": {"DestinationBucketName": "invalid"}},
22 | )
23 |
24 | deny["logging destination bucket is not correct: MyS3Bucket"] with input as inp
25 | }
26 |
27 | test_deny_if_logging_configuration_unset if {
28 | mock_create := create_with_properties("AWS::S3::Bucket", "MyS3Bucket", {})
29 |
30 | deny["logging destination bucket is not correct: MyS3Bucket"] with input as mock_create
31 | deny["logging prefix is not set correctly for bucket: MyS3Bucket"] with input as mock_create
32 | }
33 |
34 | test_allow_if_prefix_and_destination_set if {
35 | inp := create_with_properties("AWS::S3::Bucket", "MyS3Bucket", {"LoggingConfiguration": {
36 | "LogFilePrefix": "s3-logs",
37 | "DestinationBucketName": "my-logging-bucket",
38 | }})
39 |
40 | not deny["logging destination bucket is not correct: MyS3Bucket"] with input as inp
41 | not deny["logging prefix is not set correctly for bucket: MyS3Bucket"] with input as inp
42 | }
43 |
44 | test_allow_if_bucket_name_set if {
45 | inp := create_with_properties("AWS::S3::Bucket", "MyS3Bucket", {
46 | "BucketName": "My-Bucket",
47 | "LoggingConfiguration": {
48 | "LogFilePrefix": "s3-logs-My-Bucket",
49 | "DestinationBucketName": "my-logging-bucket",
50 | },
51 | })
52 |
53 | not deny["logging destination bucket is not correct: MyS3Bucket"] with input as inp
54 | not deny["logging prefix is not set correctly for bucket: MyS3Bucket"] with input as inp
55 | }
56 |
--------------------------------------------------------------------------------
/examples/policy/s3/bucket/public_access.rego:
--------------------------------------------------------------------------------
1 | package aws.s3.bucket
2 |
3 | import rego.v1
4 |
5 | deny contains msg if {
6 | not bucket_excluded_public
7 | not public_access_blocked
8 |
9 | msg := sprintf("public access not blocked for bucket %s", [input.resource.id])
10 | }
11 |
12 | bucket_excluded_public if {
13 | some prefix in {"excluded-", "baseline-", "secure-"}
14 | startswith(input.resource.properties.BucketName, prefix)
15 | }
16 |
17 | public_access_blocked if {
18 | every property in ["BlockPublicAcls", "BlockPublicPolicy", "IgnorePublicAcls", "RestrictPublicBuckets"] {
19 | input.resource.properties.PublicAccessBlockConfiguration[property] == "true"
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/examples/policy/s3/bucket/public_access_test.rego:
--------------------------------------------------------------------------------
1 | package aws.s3.bucket_public_access_test
2 |
3 | import rego.v1
4 |
5 | import data.aws.s3.bucket.deny
6 |
7 | import data.test_helpers.create_with_properties
8 |
9 | test_deny_if_not_public_access_blocked if {
10 | inp := create_with_properties("AWS::S3::Bucket", "MyS3Bucket", {"PublicAccessBlockConfiguration": {
11 | "BlockPublicAcls": "false",
12 | "BlockPublicPolicy": "true",
13 | "IgnorePublicAcls": "true",
14 | "RestrictPublicBuckets": "false",
15 | }})
16 |
17 | deny["public access not blocked for bucket MyS3Bucket"] with input as inp
18 | }
19 |
20 | test_allow_if_public_access_blocked if {
21 | inp := create_with_properties("AWS::S3::Bucket", "MyS3Bucket", {"PublicAccessBlockConfiguration": {
22 | "BlockPublicAcls": "true",
23 | "BlockPublicPolicy": "true",
24 | "IgnorePublicAcls": "true",
25 | "RestrictPublicBuckets": "true",
26 | }})
27 |
28 | not deny["public access not blocked for bucket MyS3Bucket"] with input as inp
29 | }
30 |
31 | test_allow_if_excluded_prefix if {
32 | inp := create_with_properties("AWS::S3::Bucket", "MyS3Bucket", {"BucketName": "excluded-bucket"})
33 |
34 | not deny["public access not blocked for bucket MyS3Bucket"] with input as inp
35 | }
36 |
--------------------------------------------------------------------------------
/examples/policy/test_helpers.rego:
--------------------------------------------------------------------------------
1 | package test_helpers
2 |
3 | import rego.v1
4 |
5 | create_with_properties(type, id, properties) := object.union(mock_create(type, id), with_properties(properties))
6 |
7 | mock_create(type, id) := {
8 | "action": "CREATE",
9 | "hook": "StyraOPA::OPA::Hook",
10 | "resource": {
11 | "id": id,
12 | "name": type,
13 | "properties": {},
14 | "type": type,
15 | },
16 | }
17 |
18 | with_properties(obj) := {"resource": {"properties": obj}}
19 |
--------------------------------------------------------------------------------
/examples/templates/eks-cluster-logging/eks-deny-cluster.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | EksCluster:
4 | Type: AWS::EKS::Cluster
5 | Properties:
6 | RoleArn: ""
7 | ResourcesVpcConfig:
8 | SubnetIds: [""]
--------------------------------------------------------------------------------
/examples/templates/eks-cluster-logging/eks-success-cluster-logging.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | EksCluster:
4 | Type: AWS::EKS::Cluster
5 | Properties:
6 | RoleArn: ""
7 | ResourcesVpcConfig:
8 | SubnetIds: [""]
9 | EndpointPublicAccess: false
10 | EndpointPrivateAccess: true
11 | Logging:
12 | ClusterLogging:
13 | EnabledTypes:
14 | - {Type: audit}
15 | - {Type: authenticator}
--------------------------------------------------------------------------------
/examples/templates/eks-public-api/eks-deny-cluster.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | EksCluster:
4 | Type: AWS::EKS::Cluster
5 | Properties:
6 | RoleArn: ""
7 | ResourcesVpcConfig:
8 | SubnetIds: [""]
--------------------------------------------------------------------------------
/examples/templates/eks-public-api/eks-success-public-api-disabled.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | EksCluster:
4 | Type: AWS::EKS::Cluster
5 | Properties:
6 | ResourcesVpcConfig:
7 | RoleArn: ""
8 | SubnetIds: [""]
9 | EndpointPublicAccess: false
10 | EndpointPrivateAccess: true
11 | Logging:
12 | ClusterLogging:
13 | EnabledTypes:
14 | - {Type: audit}
15 | - {Type: authenticator}
--------------------------------------------------------------------------------
/examples/templates/iam-no-admin-statements/iam-fail-admin-allowed.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 |
3 | Resources:
4 | UserWithAdminInlinePolicy:
5 | Type: AWS::IAM::User
6 | Properties:
7 | Policies:
8 | - PolicyDocument:
9 | Version: "2012-10-17"
10 | Statement:
11 | - Effect: Allow
12 | Action: '*'
13 | Resource: '*'
14 | PolicyName: test
15 | UserName: WithInlineAdminPolicy
16 |
--------------------------------------------------------------------------------
/examples/templates/iam-no-admin-statements/iam-success-action-restricted.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 |
3 | Resources:
4 | UserWithEc2InlinePolicy:
5 | Type: AWS::IAM::User
6 | Properties:
7 | PermissionsBoundary: 'arn:aws:iam::555555555555:policy/s3_deny_permissions_boundary'
8 | Policies:
9 | - PolicyDocument:
10 | Version: "2012-10-17"
11 | Statement:
12 | - Effect: Allow
13 | Action: 'ec2:*'
14 | Resource: '*'
15 | PolicyName: test
16 | UserName: WithInlineEc2Policy
--------------------------------------------------------------------------------
/examples/templates/iam-principal-boundary/iam-fail-auto-generated-name-not-excluded.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | IAMRoleTest:
4 | Type: AWS::IAM::Role
5 | Properties:
6 | RoleName: !Sub 'iam-not-excluded-cfn-hooks-${AWS::StackName}-${AWS::AccountId}'
7 | PermissionsBoundary: 'arn:aws:iam::555555555555:policy/invalid_s3_deny_permissions_boundary'
8 | AssumeRolePolicyDocument:
9 | Version: "2012-10-17"
10 | Statement:
11 | - Action: sts:AssumeRole
12 | Effect: Allow
13 | Principal:
14 | Service: codepipeline.amazonaws.com
15 |
--------------------------------------------------------------------------------
/examples/templates/iam-principal-boundary/iam-fail-permission-boundary-incorrect.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | IAMRoleTest:
4 | Type: AWS::IAM::Role
5 | Properties:
6 | AssumeRolePolicyDocument:
7 | Version: "2012-10-17"
8 | Statement:
9 | - Action: sts:AssumeRole
10 | Effect: Allow
11 | Principal:
12 | Service: codepipeline.amazonaws.com
--------------------------------------------------------------------------------
/examples/templates/iam-principal-boundary/iam-success-permission-boundary-included.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | IAMRoleTest:
4 | Type: AWS::IAM::Role
5 | Properties:
6 | PermissionsBoundary: 'arn:aws:iam::555555555555:policy/s3_deny_permissions_boundary'
7 | RoleName: !Sub 'cfn-hooks-pass-${AWS::AccountId}'
8 | AssumeRolePolicyDocument:
9 | Version: "2012-10-17"
10 | Statement:
11 | - Action: sts:AssumeRole
12 | Effect: Allow
13 | Principal:
14 | Service: codepipeline.amazonaws.com
--------------------------------------------------------------------------------
/examples/templates/iam-principal-boundary/iam-success-role-name-excluded.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | IAMRoleTest:
4 | Type: AWS::IAM::Role
5 | Properties:
6 | RoleName: 'excluded-cfn-hooks-role'
7 | AssumeRolePolicyDocument:
8 | Version: "2012-10-17"
9 | Statement:
10 | - Action: sts:AssumeRole
11 | Effect: Allow
12 | Principal:
13 | Service: codepipeline.amazonaws.com
--------------------------------------------------------------------------------
/examples/templates/iam-principal-boundary/iam-success-user-name-excluded.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | IAMUserTest:
4 | Type: AWS::IAM::User
5 | Properties:
6 | UserName: 'excluded-cfn-hooks-user'
7 | ManagedPolicyArns:
8 | - arn:aws:iam::aws:policy/AWSDenyAll
--------------------------------------------------------------------------------
/examples/templates/iam-users-have-policy/iam-fail-no-user-policy.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 |
3 | Resources:
4 |
5 | UserWithNoPolicies:
6 | Type: AWS::IAM::User
7 | Properties:
8 | UserName: NoPolicies
--------------------------------------------------------------------------------
/examples/templates/iam-users-have-policy/iam-success-user-policy-attached.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 |
3 | Resources:
4 | UserWithPolicy:
5 | Type: AWS::IAM::User
6 | Properties:
7 | PermissionsBoundary: 'arn:aws:iam::555555555555:policy/s3_deny_permissions_boundary'
8 | Policies:
9 | - PolicyDocument:
10 | Version: "2012-10-17"
11 | Statement:
12 | - Effect: Deny
13 | Action:
14 | - 'ec2:*'
15 | Resource: '*'
16 | PolicyName: test
17 | UserName: WithPolicy
18 |
19 | UserWithManagedPolicy:
20 | Type: AWS::IAM::User
21 | Properties:
22 | PermissionsBoundary: 'arn:aws:iam::555555555555:policy/s3_deny_permissions_boundary'
23 | ManagedPolicyArns:
24 | - arn:aws:iam::aws:policy/AWSDenyAll
25 | UserName: WithManagedPolicy
26 |
--------------------------------------------------------------------------------
/examples/templates/rds-encryption-verify/rds-fail-encryption-not-set.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Metadata:
3 | License: Apache-2.0
4 | ObjectToTest: MasterDB
5 | Description: 'AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample
6 | template showing how to create a highly-available, RDS DBInstance with a read replica.
7 | **WARNING** This template creates an Amazon Relational Database Service database
8 | instance and Amazon CloudWatch alarms. You will be billed for the AWS resources
9 | used if you create a stack from this template.'
10 | Parameters:
11 | DBName:
12 | Default: MyDatabase
13 | Description: The database name
14 | Type: String
15 | MinLength: '1'
16 | MaxLength: '64'
17 | AllowedPattern: '[a-zA-Z][a-zA-Z0-9]*'
18 | ConstraintDescription: must begin with a letter and contain only alphanumeric
19 | characters.
20 | DBUser:
21 | NoEcho: 'true'
22 | Description: The database admin account username
23 | Type: String
24 | MinLength: '1'
25 | MaxLength: '16'
26 | AllowedPattern: '[a-zA-Z][a-zA-Z0-9]*'
27 | ConstraintDescription: must begin with a letter and contain only alphanumeric
28 | characters.
29 | DBPassword:
30 | NoEcho: 'true'
31 | Description: The database admin account password
32 | Type: String
33 | MinLength: '1'
34 | MaxLength: '41'
35 | AllowedPattern: '[a-zA-Z0-9]+'
36 | ConstraintDescription: must contain only alphanumeric characters.
37 | DBAllocatedStorage:
38 | Default: '5'
39 | Description: The size of the database (Gb)
40 | Type: Number
41 | MinValue: '5'
42 | MaxValue: '1024'
43 | ConstraintDescription: must be between 5 and 1024Gb.
44 | DBInstanceClass:
45 | Description: The database instance type
46 | Type: String
47 | Default: db.t2.small
48 | AllowedValues: [db.t1.micro, db.m1.small, db.m1.medium, db.m1.large, db.m1.xlarge,
49 | db.m2.xlarge, db.m2.2xlarge, db.m2.4xlarge, db.m3.medium, db.m3.large, db.m3.xlarge,
50 | db.m3.2xlarge, db.m4.large, db.m4.xlarge, db.m4.2xlarge, db.m4.4xlarge, db.m4.10xlarge,
51 | db.r3.large, db.r3.xlarge, db.r3.2xlarge, db.r3.4xlarge, db.r3.8xlarge, db.m2.xlarge,
52 | db.m2.2xlarge, db.m2.4xlarge, db.cr1.8xlarge, db.t2.micro, db.t2.small, db.t2.medium,
53 | db.t2.large]
54 | ConstraintDescription: must select a valid database instance type.
55 | EC2SecurityGroup:
56 | Description: The EC2 security group that contains instances that need access to
57 | the database
58 | Default: default
59 | Type: String
60 | AllowedPattern: '[a-zA-Z0-9\-]+'
61 | ConstraintDescription: must be a valid security group name.
62 | MultiAZ:
63 | Description: Multi-AZ master database
64 | Type: String
65 | Default: 'false'
66 | AllowedValues: ['true', 'false']
67 | ConstraintDescription: must be true or false.
68 | Conditions:
69 | Is-EC2-VPC: !Or [!Equals [!Ref 'AWS::Region', eu-central-1], !Equals [!Ref 'AWS::Region',
70 | cn-north-1]]
71 | Is-EC2-Classic: !Not [{Condition: Is-EC2-VPC}]
72 | Resources:
73 | DBEC2SecurityGroup:
74 | Type: AWS::EC2::SecurityGroup
75 | Condition: Is-EC2-VPC
76 | Properties:
77 | GroupDescription: Open database for access
78 | SecurityGroupIngress:
79 | - IpProtocol: tcp
80 | FromPort: '3306'
81 | ToPort: '3306'
82 | SourceSecurityGroupName: !Ref 'EC2SecurityGroup'
83 | DBSecurityGroup:
84 | Type: AWS::RDS::DBSecurityGroup
85 | Condition: Is-EC2-Classic
86 | Properties:
87 | DBSecurityGroupIngress:
88 | EC2SecurityGroupName: !Ref 'EC2SecurityGroup'
89 | GroupDescription: database access
90 | MasterDB:
91 | Type: AWS::RDS::DBInstance
92 | Properties:
93 | DBName: !Ref 'DBName'
94 | AllocatedStorage: !Ref 'DBAllocatedStorage'
95 | DBInstanceClass: !Ref 'DBInstanceClass'
96 | Engine: MySQL
97 | MasterUsername: !Ref 'DBUser'
98 | MasterUserPassword: !Ref 'DBPassword'
99 | MultiAZ: !Ref 'MultiAZ'
100 | StorageEncrypted: false
101 | Tags:
102 | - Key: Name
103 | Value: Master Database
104 | VPCSecurityGroups: !If [Is-EC2-VPC, [!GetAtt [DBEC2SecurityGroup, GroupId]],
105 | !Ref 'AWS::NoValue']
106 | DeletionPolicy: Snapshot
107 | Outputs:
108 | EC2Platform:
109 | Description: Platform in which this stack is deployed
110 | Value: !If [Is-EC2-VPC, EC2-VPC, EC2-Classic]
111 | MasterJDBCConnectionString:
112 | Description: JDBC connection string for the master database
113 | Value: !Join ['', ['jdbc:mysql://', !GetAtt [MasterDB, Endpoint.Address], ':',
114 | !GetAtt [MasterDB, Endpoint.Port], /, !Ref 'DBName']]
--------------------------------------------------------------------------------
/examples/templates/rds-encryption-verify/rds-success-encryption-set.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Metadata:
3 | License: Apache-2.0
4 | Description: 'AWS CloudFormation Sample Template RDS_MySQL_With_Read_Replica: Sample
5 | template showing how to create a highly-available, RDS DBInstance with a read replica.
6 | **WARNING** This template creates an Amazon Relational Database Service database
7 | instance and Amazon CloudWatch alarms. You will be billed for the AWS resources
8 | used if you create a stack from this template.'
9 | Parameters:
10 | DBName:
11 | Default: MyDatabase
12 | Description: The database name
13 | Type: String
14 | MinLength: '1'
15 | MaxLength: '64'
16 | AllowedPattern: '[a-zA-Z][a-zA-Z0-9]*'
17 | ConstraintDescription: must begin with a letter and contain only alphanumeric
18 | characters.
19 | DBUser:
20 | NoEcho: 'true'
21 | Description: The database admin account username
22 | Type: String
23 | MinLength: '1'
24 | MaxLength: '16'
25 | AllowedPattern: '[a-zA-Z][a-zA-Z0-9]*'
26 | ConstraintDescription: must begin with a letter and contain only alphanumeric
27 | characters.
28 | DBPassword:
29 | NoEcho: 'true'
30 | Description: The database admin account password
31 | Type: String
32 | MinLength: '1'
33 | MaxLength: '41'
34 | AllowedPattern: '[a-zA-Z0-9]+'
35 | ConstraintDescription: must contain only alphanumeric characters.
36 | DBAllocatedStorage:
37 | Default: '5'
38 | Description: The size of the database (Gb)
39 | Type: Number
40 | MinValue: '5'
41 | MaxValue: '1024'
42 | ConstraintDescription: must be between 5 and 1024Gb.
43 | DBInstanceClass:
44 | Description: The database instance type
45 | Type: String
46 | Default: db.t2.small
47 | AllowedValues: [db.t1.micro, db.m1.small, db.m1.medium, db.m1.large, db.m1.xlarge,
48 | db.m2.xlarge, db.m2.2xlarge, db.m2.4xlarge, db.m3.medium, db.m3.large, db.m3.xlarge,
49 | db.m3.2xlarge, db.m4.large, db.m4.xlarge, db.m4.2xlarge, db.m4.4xlarge, db.m4.10xlarge,
50 | db.r3.large, db.r3.xlarge, db.r3.2xlarge, db.r3.4xlarge, db.r3.8xlarge, db.m2.xlarge,
51 | db.m2.2xlarge, db.m2.4xlarge, db.cr1.8xlarge, db.t2.micro, db.t2.small, db.t2.medium,
52 | db.t2.large]
53 | ConstraintDescription: must select a valid database instance type.
54 | EC2SecurityGroup:
55 | Description: The EC2 security group that contains instances that need access to
56 | the database
57 | Default: default
58 | Type: String
59 | AllowedPattern: '[a-zA-Z0-9\-]+'
60 | ConstraintDescription: must be a valid security group name.
61 | MultiAZ:
62 | Description: Multi-AZ master database
63 | Type: String
64 | Default: 'false'
65 | AllowedValues: ['true', 'false']
66 | ConstraintDescription: must be true or false.
67 | Conditions:
68 | Is-EC2-VPC: !Or [!Equals [!Ref 'AWS::Region', eu-central-1], !Equals [!Ref 'AWS::Region',
69 | cn-north-1]]
70 | Is-EC2-Classic: !Not [{Condition: Is-EC2-VPC}]
71 | Resources:
72 | DBEC2SecurityGroup:
73 | Type: AWS::EC2::SecurityGroup
74 | Condition: Is-EC2-VPC
75 | Properties:
76 | GroupDescription: Open database for access
77 | SecurityGroupIngress:
78 | - IpProtocol: tcp
79 | FromPort: '3306'
80 | ToPort: '3306'
81 | SourceSecurityGroupName: !Ref 'EC2SecurityGroup'
82 | DBSecurityGroup:
83 | Type: AWS::RDS::DBSecurityGroup
84 | Condition: Is-EC2-Classic
85 | Properties:
86 | DBSecurityGroupIngress:
87 | EC2SecurityGroupName: !Ref 'EC2SecurityGroup'
88 | GroupDescription: database access
89 | MasterDB:
90 | Type: AWS::RDS::DBInstance
91 | Properties:
92 | DBName: !Ref 'DBName'
93 | AllocatedStorage: !Ref 'DBAllocatedStorage'
94 | DBInstanceClass: !Ref 'DBInstanceClass'
95 | Engine: MySQL
96 | MasterUsername: !Ref 'DBUser'
97 | MasterUserPassword: !Ref 'DBPassword'
98 | MultiAZ: !Ref 'MultiAZ'
99 | StorageEncrypted: true
100 | Tags:
101 | - Key: Name
102 | Value: Master Database
103 | VPCSecurityGroups: !If [Is-EC2-VPC, [!GetAtt [DBEC2SecurityGroup, GroupId]],
104 | !Ref 'AWS::NoValue']
105 | DeletionPolicy: Snapshot
106 | Outputs:
107 | EC2Platform:
108 | Description: Platform in which this stack is deployed
109 | Value: !If [Is-EC2-VPC, EC2-VPC, EC2-Classic]
110 | MasterJDBCConnectionString:
111 | Description: JDBC connection string for the master database
112 | Value: !Join ['', ['jdbc:mysql://', !GetAtt [MasterDB, Endpoint.Address], ':',
113 | !GetAtt [MasterDB, Endpoint.Port], /, !Ref 'DBName']]
--------------------------------------------------------------------------------
/examples/templates/s3-block-public-access/s3-fail-no-settings-specified.yaml:
--------------------------------------------------------------------------------
1 | Resources:
2 | S3Bucket:
3 | Type: 'AWS::S3::Bucket'
4 | Properties:
5 | BucketName: !Sub 'mybucket-${AWS::Region}-${AWS::AccountId}-fail2'
--------------------------------------------------------------------------------
/examples/templates/s3-block-public-access/s3-fail-not-all-4-settings-specified.yaml:
--------------------------------------------------------------------------------
1 | Resources:
2 | S3Bucket:
3 | Type: 'AWS::S3::Bucket'
4 | Properties:
5 | BucketName: !Sub 'mybucket-${AWS::Region}-${AWS::AccountId}-fail1'
6 | PublicAccessBlockConfiguration:
7 | BlockPublicAcls: true
8 | BlockPublicPolicy: true
9 | IgnorePublicAcls: true
--------------------------------------------------------------------------------
/examples/templates/s3-block-public-access/s3-success-all-access-blocked.yaml:
--------------------------------------------------------------------------------
1 | Resources:
2 | S3Bucket:
3 | Type: 'AWS::S3::Bucket'
4 | Properties:
5 | BucketName: 'access-blocked-bucket'
6 | PublicAccessBlockConfiguration:
7 | BlockPublicAcls: true
8 | BlockPublicPolicy: true
9 | IgnorePublicAcls: true
10 | RestrictPublicBuckets: true
--------------------------------------------------------------------------------
/examples/templates/s3-block-public-access/s3-success-name-prefix-excluded.yaml:
--------------------------------------------------------------------------------
1 | Resources:
2 | S3Bucket:
3 | Type: 'AWS::S3::Bucket'
4 | Properties:
5 | BucketName: 'excluded-name-bucket'
--------------------------------------------------------------------------------
/examples/templates/s3-bucket-encryption/s3-fail-no-bucket-encryption.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | S3Bucket:
4 | Type: AWS::S3::Bucket
5 | Properties: {}
--------------------------------------------------------------------------------
/examples/templates/s3-bucket-encryption/s3-fail-wrong-encryption-type.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | S3Bucket:
4 | Type: AWS::S3::Bucket
5 | Properties: {}
6 |
7 | EncryptedS3Bucket:
8 | Type: 'AWS::S3::Bucket'
9 | Properties:
10 | BucketEncryption:
11 | ServerSideEncryptionConfiguration:
12 | - ServerSideEncryptionByDefault:
13 | SSEAlgorithm: 'AES256'
14 |
--------------------------------------------------------------------------------
/examples/templates/s3-bucket-encryption/s3-success-encryption.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Description: This CloudFormation template provisions an encrypted S3 Bucket
3 | Resources:
4 | EncryptedS3Bucket:
5 | Type: 'AWS::S3::Bucket'
6 | Properties:
7 | BucketName: 'secure-cfn-bucket-encrypted'
8 | BucketEncryption:
9 | ServerSideEncryptionConfiguration:
10 | - ServerSideEncryptionByDefault:
11 | SSEAlgorithm: 'aws:kms'
12 | KMSMasterKeyID: !Ref EncryptionKey
13 | BucketKeyEnabled: true
14 | Tags:
15 | - Key: "keyname1"
16 | Value: "value1"
17 |
18 | EncryptionKey:
19 | Type: AWS::KMS::Key
20 | DeletionPolicy: Retain
21 | Properties:
22 | Description: KMS key used to encrypt the resource type artifacts
23 | EnableKeyRotation: true
24 | KeyPolicy:
25 | Version: "2012-10-17"
26 | Statement:
27 | - Sid: Enable full access for owning account
28 | Effect: Allow
29 | Principal:
30 | AWS: !Ref "AWS::AccountId"
31 | Action: kms:*
32 | Resource: "*"
33 |
34 | Outputs:
35 | EncryptedBucketName:
36 | Value: !Ref EncryptedS3Bucket
37 |
--------------------------------------------------------------------------------
/examples/templates/s3-bucket-logging-enabled/s3-fail-no-logging.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | S3Bucket:
4 | Type: AWS::S3::Bucket
5 | Properties:
6 | LoggingConfiguration: {}
--------------------------------------------------------------------------------
/examples/templates/s3-bucket-logging-enabled/s3-success-logging-enabled.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | S3Bucket:
4 | Type: AWS::S3::Bucket
5 | Properties:
6 | BucketName: 'baseline-styra-opa-bucket'
7 | LoggingConfiguration:
8 | DestinationBucketName: 'my-logging-bucket'
9 | LogFilePrefix: 's3-logs-baseline-styra-opa-bucket'
--------------------------------------------------------------------------------
/examples/templates/security-group-open-ingress/sg-fail-open-to-public.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | SecurityGroup:
4 | Type: AWS::EC2::SecurityGroup
5 | Properties:
6 | GroupDescription: "wide open group"
7 | SecurityGroupIngress:
8 | - IpProtocol: -1
9 | CidrIp: 0.0.0.0/0
--------------------------------------------------------------------------------
/examples/templates/security-group-open-ingress/sg-success-restricted-to-subnet.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Resources:
3 | SecurityGroup:
4 | Type: AWS::EC2::SecurityGroup
5 | Properties:
6 | GroupDescription: "less wide open group"
7 | SecurityGroupIngress:
8 | - IpProtocol: -1
9 | CidrIp: 10.0.0.0/16
--------------------------------------------------------------------------------
/hooks/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | .hypothesis/
51 | .pytest_cache/
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 | db.sqlite3
61 | db.sqlite3-journal
62 |
63 | # Flask stuff:
64 | instance/
65 | .webassets-cache
66 |
67 | # Scrapy stuff:
68 | .scrapy
69 |
70 | # Sphinx documentation
71 | docs/_build/
72 |
73 | # PyBuilder
74 | target/
75 |
76 | # Jupyter Notebook
77 | .ipynb_checkpoints
78 |
79 | # IPython
80 | profile_default/
81 | ipython_config.py
82 |
83 | # pyenv
84 | .python-version
85 |
86 | # pipenv
87 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
88 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
89 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
90 | # install all needed dependencies.
91 | #Pipfile.lock
92 |
93 | # celery beat schedule file
94 | celerybeat-schedule
95 |
96 | # SageMath parsed files
97 | *.sage.py
98 |
99 | # Environments
100 | .env
101 | .venv
102 | env/
103 | venv/
104 | ENV/
105 | env.bak/
106 | venv.bak/
107 |
108 | # Spyder project settings
109 | .spyderproject
110 | .spyproject
111 |
112 | # Rope project settings
113 | .ropeproject
114 |
115 | # mkdocs documentation
116 | /site
117 |
118 | # mypy
119 | .mypy_cache/
120 | .dmypy.json
121 | dmypy.json
122 |
123 | # Pyre type checker
124 | .pyre/
125 |
126 | # contains credentials
127 | sam-tests/
128 |
129 | rpdk.log*
130 |
--------------------------------------------------------------------------------
/hooks/.rpdk-config:
--------------------------------------------------------------------------------
1 | {
2 | "artifact_type": "HOOK",
3 | "typeName": "Styra::OPA::Hook",
4 | "language": "python37",
5 | "runtime": "python3.7",
6 | "entrypoint": "styra_opa_hook.handlers.hook",
7 | "testEntrypoint": "styra_opa_hook.handlers.test_entrypoint",
8 | "settings": {
9 | "version": false,
10 | "subparser_name": null,
11 | "verbose": 0,
12 | "force": false,
13 | "type_name": null,
14 | "artifact_type": null,
15 | "endpoint_url": null,
16 | "region": null,
17 | "target_schemas": [],
18 | "use_docker": true,
19 | "protocolVersion": "2.0.0"
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/hooks/README.md:
--------------------------------------------------------------------------------
1 | # Styra::OPA::Hook
2 |
3 | Congratulations on starting development! Next steps:
4 |
5 | 1. Write the JSON schema describing your resource, `styra-opa-hook.json`
6 | 2. Implement your resource handlers in `styra_opa_hook/handlers.py`
7 |
8 | > Don't modify `models.py` by hand, any modifications will be overwritten when the `generate` or `package` commands are run.
9 |
10 | Implement CloudFormation resource here. Each function must always return a ProgressEvent.
11 |
12 | ```python
13 | ProgressEvent(
14 | # Required
15 | # Must be one of OperationStatus.IN_PROGRESS, OperationStatus.FAILED, OperationStatus.SUCCESS
16 | status=OperationStatus.IN_PROGRESS,
17 | # Required on SUCCESS (except for LIST where resourceModels is required)
18 | # The current resource model after the operation; instance of ResourceModel class
19 | resourceModel=model,
20 | resourceModels=None,
21 | # Required on FAILED
22 | # Customer-facing message, displayed in e.g. CloudFormation stack events
23 | message="",
24 | # Required on FAILED: a HandlerErrorCode
25 | errorCode=HandlerErrorCode.InternalFailure,
26 | # Optional
27 | # Use to store any state between re-invocation via IN_PROGRESS
28 | callbackContext={},
29 | # Required on IN_PROGRESS
30 | # The number of seconds to delay before re-invocation
31 | callbackDelaySeconds=0,
32 | )
33 | ```
34 |
35 | Failures can be passed back to CloudFormation by either raising an exception from `cloudformation_cli_python_lib.exceptions`, or setting the ProgressEvent's `status` to `OperationStatus.FAILED` and `errorCode` to one of `cloudformation_cli_python_lib.HandlerErrorCode`. There is a static helper function, `ProgressEvent.failed`, for this common case.
36 |
37 | ## What's with the type hints?
38 |
39 | We hope they'll be useful for getting started quicker with an IDE that support type hints. Type hints are optional - if your code doesn't use them, it will still work.
40 |
--------------------------------------------------------------------------------
/hooks/hook-role.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Description: >
3 | This CloudFormation template creates a role assumed by CloudFormation
4 | during Hook operations on behalf of the customer.
5 |
6 | Resources:
7 | ExecutionRole:
8 | Type: AWS::IAM::Role
9 | Properties:
10 | MaxSessionDuration: 8400
11 | AssumeRolePolicyDocument:
12 | Version: '2012-10-17'
13 | Statement:
14 | - Effect: Allow
15 | Principal:
16 | Service:
17 | - hooks.cloudformation.amazonaws.com
18 | - resources.cloudformation.amazonaws.com
19 | Action: sts:AssumeRole
20 | Condition:
21 | StringEquals:
22 | aws:SourceAccount:
23 | Ref: AWS::AccountId
24 | StringLike:
25 | aws:SourceArn:
26 | Fn::Sub: arn:${AWS::Partition}:cloudformation:${AWS::Region}:${AWS::AccountId}:type/hook/Styra-OPA-Hook/*
27 | Path: "/"
28 | Policies:
29 | - PolicyName: HookTypePolicy
30 | PolicyDocument:
31 | Version: '2012-10-17'
32 | Statement:
33 | - Effect: Allow
34 | Action:
35 | - "secretsmanager:GetSecretValue"
36 | Resource: "*"
37 | Outputs:
38 | ExecutionRoleArn:
39 | Value:
40 | Fn::GetAtt: ExecutionRole.Arn
41 |
--------------------------------------------------------------------------------
/hooks/requirements.txt:
--------------------------------------------------------------------------------
1 | cloudformation-cli-python-lib>=2.1.9
2 | requests
3 | botocore
4 |
--------------------------------------------------------------------------------
/hooks/src/styra_opa_hook/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/StyraInc/opa-aws-cloudformation-hook/cf2b057e1be0c1a3b92f9802519da2cb950efdaa/hooks/src/styra_opa_hook/__init__.py
--------------------------------------------------------------------------------
/hooks/src/styra_opa_hook/handlers.py:
--------------------------------------------------------------------------------
1 | """Handlers for delegating AWS Cloudformation hooks to OPA"""
2 |
3 | import logging
4 | from typing import Any, MutableMapping, Optional
5 |
6 | from botocore.exceptions import ClientError
7 | import requests
8 |
9 | from cloudformation_cli_python_lib import (
10 | Hook,
11 | HookInvocationPoint,
12 | OperationStatus,
13 | ProgressEvent,
14 | SessionProxy,
15 | )
16 |
17 | from .models import HookHandlerRequest, TypeConfigurationModel
18 |
19 | # Use this logger to forward log messages to CloudWatch Logs.
20 | LOG = logging.getLogger(__name__)
21 | LOG.setLevel(logging.INFO)
22 |
23 | TYPE_NAME = "Styra::OPA::Hook"
24 |
25 | hook = Hook(TYPE_NAME, TypeConfigurationModel)
26 | test_entrypoint = hook.test_entrypoint
27 |
28 | def get_secret(name: str, session: Optional[SessionProxy]) -> str:
29 | """Get the (optional) secret to use as bearer token for authenticating against OPA"""
30 |
31 | client = session.client("secretsmanager")
32 |
33 | try:
34 | resp = client.get_secret_value(SecretId=name)
35 | # pylint: disable=invalid-name
36 | except ClientError as e:
37 | LOG.error("Failed fetching secret %s", name)
38 | LOG.error(e)
39 | raise e
40 |
41 | if 'SecretString' in resp:
42 | return resp['SecretString']
43 |
44 | raise Exception("SecretString not found in secret")
45 |
46 | def opa_query(
47 | request: HookHandlerRequest,
48 | session: Optional[SessionProxy],
49 | type_configuration: TypeConfigurationModel,
50 | action: str,
51 | ) -> ProgressEvent:
52 | """Query OPA and return a ProgressEvent based on the decision"""
53 |
54 | progress: ProgressEvent = ProgressEvent(
55 | status=OperationStatus.IN_PROGRESS
56 | )
57 |
58 | # Querying the default decision, so don't wrap in "input" attribute
59 | opa_input = {
60 | "action": action,
61 | "hook": request.hookContext.hookTypeName,
62 | "resource": {
63 | "id": request.hookContext.targetLogicalId,
64 | "name": request.hookContext.targetName,
65 | "type": request.hookContext.targetType,
66 | "properties": request.hookContext.targetModel.get("resourceProperties")
67 | }
68 | }
69 |
70 | headers = {}
71 | secret = type_configuration.opaAuthTokenSecret
72 | if secret is not None and secret != "":
73 | token = get_secret(type_configuration.opaAuthTokenSecret, session)
74 | headers = {"Authorization": f"Bearer {token}"}
75 |
76 | try:
77 | connect_timeout = 10
78 | resp = requests.post(type_configuration.opaUrl, json=opa_input, headers=headers, timeout=connect_timeout)
79 | except requests.exceptions.ConnectTimeout:
80 | LOG.error("Timeout connecting to OPA at %s in %s seconds", type_configuration.opaUrl, connect_timeout)
81 | progress.status = OperationStatus.FAILED
82 | return progress
83 | except requests.ConnectionError:
84 | LOG.error("Failed connecting to OPA at %s", type_configuration.opaUrl)
85 | progress.status = OperationStatus.FAILED
86 | return progress
87 |
88 | if resp.status_code == 200:
89 | body = resp.json()
90 | if not "allow" in body:
91 | LOG.error("OPA returned empty/undefined result")
92 | progress.status = OperationStatus.FAILED
93 | else:
94 | if body["allow"] is True:
95 | progress.status = OperationStatus.SUCCESS
96 | else:
97 | message = " | ".join(body["violations"])
98 | LOG.info("OPA denied the request with message: %s", message)
99 | progress.status = OperationStatus.FAILED
100 | progress.message = message
101 |
102 | else:
103 | LOG.error("OPA returned status code: %d", resp.status_code)
104 | LOG.error(resp.json())
105 | progress.status = OperationStatus.FAILED
106 |
107 | return progress
108 |
109 | # pylint: disable=unused-argument,missing-function-docstring
110 | @hook.handler(HookInvocationPoint.CREATE_PRE_PROVISION)
111 | @hook.handler(HookInvocationPoint.UPDATE_PRE_PROVISION)
112 | @hook.handler(HookInvocationPoint.DELETE_PRE_PROVISION)
113 | def pre_handler(
114 | session: Optional[SessionProxy],
115 | request: HookHandlerRequest,
116 | callback_context: MutableMapping[str, Any],
117 | type_configuration: TypeConfigurationModel
118 | ) -> ProgressEvent:
119 |
120 | LOG.info("Hook triggered for target %s %s",
121 | request.hookContext.targetName,
122 | request.hookContext.targetLogicalId
123 | )
124 |
125 | action = request.hookContext.invocationPoint[0:6]
126 |
127 | return opa_query(request, session, type_configuration, action)
128 |
--------------------------------------------------------------------------------
/hooks/src/styra_opa_hook/models.py:
--------------------------------------------------------------------------------
1 | # DO NOT modify this file by hand, changes will be overwritten
2 | import sys
3 | from dataclasses import dataclass
4 | from inspect import getmembers, isclass
5 | from typing import (
6 | AbstractSet,
7 | Any,
8 | Generic,
9 | Mapping,
10 | MutableMapping,
11 | Optional,
12 | Sequence,
13 | Type,
14 | TypeVar,
15 | )
16 |
17 | from cloudformation_cli_python_lib.interface import BaseHookHandlerRequest, BaseModel
18 | from cloudformation_cli_python_lib.recast import recast_object
19 | from cloudformation_cli_python_lib.utils import deserialize_list
20 |
21 | T = TypeVar("T")
22 |
23 |
24 | def set_or_none(value: Optional[Sequence[T]]) -> Optional[AbstractSet[T]]:
25 | if value:
26 | return set(value)
27 | return None
28 |
29 |
30 | @dataclass
31 | class HookHandlerRequest(BaseHookHandlerRequest):
32 | pass
33 |
34 |
35 | @dataclass
36 | class TypeConfigurationModel(BaseModel):
37 | opaUrl: Optional[str]
38 | opaAuthTokenSecret: Optional[str]
39 |
40 | @classmethod
41 | def _deserialize(
42 | cls: Type["_TypeConfigurationModel"],
43 | json_data: Optional[Mapping[str, Any]],
44 | ) -> Optional["_TypeConfigurationModel"]:
45 | if not json_data:
46 | return None
47 | return cls(
48 | opaUrl=json_data.get("opaUrl"),
49 | opaAuthTokenSecret=json_data.get("opaAuthTokenSecret"),
50 | )
51 |
52 |
53 | # work around possible type aliasing issues when variable has same name as a model
54 | _TypeConfigurationModel = TypeConfigurationModel
55 |
56 |
57 |
--------------------------------------------------------------------------------
/hooks/styra-opa-hook-configuration.json:
--------------------------------------------------------------------------------
1 | {
2 | "properties": {
3 | "opaUrl": {
4 | "description": "URL pointing to an OPA instance for policy decisions",
5 | "type": "string"
6 | },
7 | "opaAuthTokenSecret": {
8 | "description": "ARN referencing a Secret in Secrets Manager containing a plain-text string token to use for authenticating against the OPA instance",
9 | "type": "string"
10 | }
11 | },
12 | "required": [
13 | "OpaUrl"
14 | ],
15 | "additionalProperties": false,
16 | "definitions": {},
17 | "typeName": "Styra::OPA::Hook"
18 | }
19 |
--------------------------------------------------------------------------------
/hooks/styra-opa-hook.json:
--------------------------------------------------------------------------------
1 | {
2 | "additionalProperties": false,
3 | "description": "Hook to call out to Open Policy Agent for policy decisions",
4 | "documentationUrl": "https://www.openpolicyagent.org/docs/latest/aws-cloudformation-hooks/",
5 | "handlers": {
6 | "preCreate": {
7 | "permissions": [
8 | "secretsmanager:GetSecretValue"
9 | ],
10 | "targetNames": [
11 | "AWS::ACMPCA::*",
12 | "AWS::AccessAnalyzer::*",
13 | "AWS::AmazonMQ::*",
14 | "AWS::Amplify::*",
15 | "AWS::AmplifyUIBuilder::*",
16 | "AWS::ApiGateway::*",
17 | "AWS::ApiGatewayV2::*",
18 | "AWS::AppConfig::*",
19 | "AWS::AppFlow::*",
20 | "AWS::AppMesh::*",
21 | "AWS::AppStream::*",
22 | "AWS::AppSync::*",
23 | "AWS::ApplicationAutoScaling::*",
24 | "AWS::ApplicationInsights::*",
25 | "AWS::Athena::*",
26 | "AWS::AuditManager::*",
27 | "AWS::AutoScaling::*",
28 | "AWS::AutoScalingPlans::*",
29 | "AWS::Backup::*",
30 | "AWS::Batch::*",
31 | "AWS::Budgets::*",
32 | "AWS::CE::*",
33 | "AWS::Cassandra::*",
34 | "AWS::CertificateManager::*",
35 | "AWS::Chatbot::*",
36 | "AWS::Cloud9::*",
37 | "AWS::CloudFormation::*",
38 | "AWS::CloudFront::*",
39 | "AWS::CloudTrail::*",
40 | "AWS::CloudWatch::*",
41 | "AWS::CodeBuild::*",
42 | "AWS::CodeCommit::*",
43 | "AWS::CodeDeploy::*",
44 | "AWS::CodePipeline::*",
45 | "AWS::CodeStar::*",
46 | "AWS::CodeStarConnections::*",
47 | "AWS::CodeStarNotifications::*",
48 | "AWS::Cognito::*",
49 | "AWS::Config::*",
50 | "AWS::DAX::*",
51 | "AWS::DLM::*",
52 | "AWS::DMS::*",
53 | "AWS::DataBrew::*",
54 | "AWS::DataPipeline::*",
55 | "AWS::DataSync::*",
56 | "AWS::Detective::*",
57 | "AWS::DevOpsGuru::*",
58 | "AWS::DirectoryService::*",
59 | "AWS::DynamoDB::*",
60 | "AWS::EC2::*",
61 | "AWS::ECR::*",
62 | "AWS::ECS::*",
63 | "AWS::EFS::*",
64 | "AWS::EKS::*",
65 | "AWS::EMR::*",
66 | "AWS::EMRContainers::*",
67 | "AWS::EMRServerless::*",
68 | "AWS::ElastiCache::*",
69 | "AWS::ElasticBeanstalk::*",
70 | "AWS::ElasticLoadBalancing::*",
71 | "AWS::ElasticLoadBalancingV2::*",
72 | "AWS::Elasticsearch::*",
73 | "AWS::EventSchemas::*",
74 | "AWS::Events::*",
75 | "AWS::FIS::*",
76 | "AWS::FMS::*",
77 | "AWS::FSx::*",
78 | "AWS::GameLift::*",
79 | "AWS::GlobalAccelerator::*",
80 | "AWS::Glue::*",
81 | "AWS::Greengrass::*",
82 | "AWS::GuardDuty::*",
83 | "AWS::IAM::*",
84 | "AWS::IdentityStore::*",
85 | "AWS::ImageBuilder::*",
86 | "AWS::Inspector::*",
87 | "AWS::InspectorV2::*",
88 | "AWS::IoT::*",
89 | "AWS::KMS::*",
90 | "AWS::KafkaConnect::*",
91 | "AWS::Kinesis::*",
92 | "AWS::KinesisAnalytics::*",
93 | "AWS::KinesisAnalyticsV2::*",
94 | "AWS::KinesisFirehose::*",
95 | "AWS::LakeFormation::*",
96 | "AWS::Lambda::*",
97 | "AWS::LicenseManager::*",
98 | "AWS::Logs::*",
99 | "AWS::M2::*",
100 | "AWS::MSK::*",
101 | "AWS::Macie::*",
102 | "AWS::MediaConnect::*",
103 | "AWS::MediaConvert::*",
104 | "AWS::MediaPackage::*",
105 | "AWS::MemoryDB::*",
106 | "AWS::Neptune::*",
107 | "AWS::NetworkFirewall::*",
108 | "AWS::NetworkManager::*",
109 | "AWS::Oam::*",
110 | "AWS::OpenSearchService::*",
111 | "AWS::OpsWorks::*",
112 | "AWS::OpsWorksCM::*",
113 | "AWS::Organizations::*",
114 | "AWS::Pipes::*",
115 | "AWS::RAM::*",
116 | "AWS::RDS::*",
117 | "AWS::Redshift::*",
118 | "AWS::RedshiftServerless::*",
119 | "AWS::Rekognition::*",
120 | "AWS::ResilienceHub::*",
121 | "AWS::ResourceExplorer2::*",
122 | "AWS::ResourceGroups::*",
123 | "AWS::RolesAnywhere::*",
124 | "AWS::Route53::*",
125 | "AWS::Route53Resolver::*",
126 | "AWS::S3::*",
127 | "AWS::S3ObjectLambda::*",
128 | "AWS::S3Outposts::*",
129 | "AWS::SDB::*",
130 | "AWS::SES::*",
131 | "AWS::SNS::*",
132 | "AWS::SQS::*",
133 | "AWS::SSM::*",
134 | "AWS::SSMContacts::*",
135 | "AWS::SSMIncidents::*",
136 | "AWS::SSO::*",
137 | "AWS::SageMaker::*",
138 | "AWS::SecretsManager::*",
139 | "AWS::SecurityHub::*",
140 | "AWS::ServiceCatalog::*",
141 | "AWS::ServiceCatalogAppRegistry::*",
142 | "AWS::ServiceDiscovery::*",
143 | "AWS::Signer::*",
144 | "AWS::StepFunctions::*",
145 | "AWS::Synthetics::*",
146 | "AWS::Transfer::*",
147 | "AWS::WAF::*",
148 | "AWS::WAFRegional::*",
149 | "AWS::WAFv2::*",
150 | "AWS::WorkSpaces::*",
151 | "AWS::XRay::*",
152 | "Alexa::ASK::*"
153 | ]
154 | },
155 | "preDelete": {
156 | "permissions": [
157 | "secretsmanager:GetSecretValue"
158 | ],
159 | "targetNames": [
160 | "AWS::ACMPCA::*",
161 | "AWS::AccessAnalyzer::*",
162 | "AWS::AmazonMQ::*",
163 | "AWS::Amplify::*",
164 | "AWS::AmplifyUIBuilder::*",
165 | "AWS::ApiGateway::*",
166 | "AWS::ApiGatewayV2::*",
167 | "AWS::AppConfig::*",
168 | "AWS::AppFlow::*",
169 | "AWS::AppMesh::*",
170 | "AWS::AppStream::*",
171 | "AWS::AppSync::*",
172 | "AWS::ApplicationAutoScaling::*",
173 | "AWS::ApplicationInsights::*",
174 | "AWS::Athena::*",
175 | "AWS::AuditManager::*",
176 | "AWS::AutoScaling::*",
177 | "AWS::AutoScalingPlans::*",
178 | "AWS::Backup::*",
179 | "AWS::Batch::*",
180 | "AWS::Budgets::*",
181 | "AWS::CE::*",
182 | "AWS::Cassandra::*",
183 | "AWS::CertificateManager::*",
184 | "AWS::Chatbot::*",
185 | "AWS::Cloud9::*",
186 | "AWS::CloudFormation::*",
187 | "AWS::CloudFront::*",
188 | "AWS::CloudTrail::*",
189 | "AWS::CloudWatch::*",
190 | "AWS::CodeBuild::*",
191 | "AWS::CodeCommit::*",
192 | "AWS::CodeDeploy::*",
193 | "AWS::CodePipeline::*",
194 | "AWS::CodeStar::*",
195 | "AWS::CodeStarConnections::*",
196 | "AWS::CodeStarNotifications::*",
197 | "AWS::Cognito::*",
198 | "AWS::Config::*",
199 | "AWS::DAX::*",
200 | "AWS::DLM::*",
201 | "AWS::DMS::*",
202 | "AWS::DataBrew::*",
203 | "AWS::DataPipeline::*",
204 | "AWS::DataSync::*",
205 | "AWS::Detective::*",
206 | "AWS::DevOpsGuru::*",
207 | "AWS::DirectoryService::*",
208 | "AWS::DynamoDB::*",
209 | "AWS::EC2::*",
210 | "AWS::ECR::*",
211 | "AWS::ECS::*",
212 | "AWS::EFS::*",
213 | "AWS::EKS::*",
214 | "AWS::EMR::*",
215 | "AWS::EMRContainers::*",
216 | "AWS::EMRServerless::*",
217 | "AWS::ElastiCache::*",
218 | "AWS::ElasticBeanstalk::*",
219 | "AWS::ElasticLoadBalancing::*",
220 | "AWS::ElasticLoadBalancingV2::*",
221 | "AWS::Elasticsearch::*",
222 | "AWS::EventSchemas::*",
223 | "AWS::Events::*",
224 | "AWS::FIS::*",
225 | "AWS::FMS::*",
226 | "AWS::FSx::*",
227 | "AWS::GameLift::*",
228 | "AWS::GlobalAccelerator::*",
229 | "AWS::Glue::*",
230 | "AWS::Greengrass::*",
231 | "AWS::GuardDuty::*",
232 | "AWS::IAM::*",
233 | "AWS::IdentityStore::*",
234 | "AWS::ImageBuilder::*",
235 | "AWS::Inspector::*",
236 | "AWS::InspectorV2::*",
237 | "AWS::IoT::*",
238 | "AWS::KMS::*",
239 | "AWS::KafkaConnect::*",
240 | "AWS::Kinesis::*",
241 | "AWS::KinesisAnalytics::*",
242 | "AWS::KinesisAnalyticsV2::*",
243 | "AWS::KinesisFirehose::*",
244 | "AWS::LakeFormation::*",
245 | "AWS::Lambda::*",
246 | "AWS::LicenseManager::*",
247 | "AWS::Logs::*",
248 | "AWS::M2::*",
249 | "AWS::MSK::*",
250 | "AWS::Macie::*",
251 | "AWS::MediaConnect::*",
252 | "AWS::MediaConvert::*",
253 | "AWS::MediaPackage::*",
254 | "AWS::MemoryDB::*",
255 | "AWS::Neptune::*",
256 | "AWS::NetworkFirewall::*",
257 | "AWS::NetworkManager::*",
258 | "AWS::Oam::*",
259 | "AWS::OpenSearchService::*",
260 | "AWS::OpsWorks::*",
261 | "AWS::OpsWorksCM::*",
262 | "AWS::Organizations::*",
263 | "AWS::Pipes::*",
264 | "AWS::RAM::*",
265 | "AWS::RDS::*",
266 | "AWS::Redshift::*",
267 | "AWS::RedshiftServerless::*",
268 | "AWS::Rekognition::*",
269 | "AWS::ResilienceHub::*",
270 | "AWS::ResourceExplorer2::*",
271 | "AWS::ResourceGroups::*",
272 | "AWS::RolesAnywhere::*",
273 | "AWS::Route53::*",
274 | "AWS::Route53Resolver::*",
275 | "AWS::S3::*",
276 | "AWS::S3ObjectLambda::*",
277 | "AWS::S3Outposts::*",
278 | "AWS::SDB::*",
279 | "AWS::SES::*",
280 | "AWS::SNS::*",
281 | "AWS::SQS::*",
282 | "AWS::SSM::*",
283 | "AWS::SSMContacts::*",
284 | "AWS::SSMIncidents::*",
285 | "AWS::SSO::*",
286 | "AWS::SageMaker::*",
287 | "AWS::SecretsManager::*",
288 | "AWS::SecurityHub::*",
289 | "AWS::ServiceCatalog::*",
290 | "AWS::ServiceCatalogAppRegistry::*",
291 | "AWS::ServiceDiscovery::*",
292 | "AWS::Signer::*",
293 | "AWS::StepFunctions::*",
294 | "AWS::Synthetics::*",
295 | "AWS::Transfer::*",
296 | "AWS::WAF::*",
297 | "AWS::WAFRegional::*",
298 | "AWS::WAFv2::*",
299 | "AWS::WorkSpaces::*",
300 | "AWS::XRay::*",
301 | "Alexa::ASK::*"
302 | ]
303 | },
304 | "preUpdate": {
305 | "permissions": [
306 | "secretsmanager:GetSecretValue"
307 | ],
308 | "targetNames": [
309 | "AWS::ACMPCA::*",
310 | "AWS::AccessAnalyzer::*",
311 | "AWS::AmazonMQ::*",
312 | "AWS::Amplify::*",
313 | "AWS::AmplifyUIBuilder::*",
314 | "AWS::ApiGateway::*",
315 | "AWS::ApiGatewayV2::*",
316 | "AWS::AppConfig::*",
317 | "AWS::AppFlow::*",
318 | "AWS::AppMesh::*",
319 | "AWS::AppStream::*",
320 | "AWS::AppSync::*",
321 | "AWS::ApplicationAutoScaling::*",
322 | "AWS::ApplicationInsights::*",
323 | "AWS::Athena::*",
324 | "AWS::AuditManager::*",
325 | "AWS::AutoScaling::*",
326 | "AWS::AutoScalingPlans::*",
327 | "AWS::Backup::*",
328 | "AWS::Batch::*",
329 | "AWS::Budgets::*",
330 | "AWS::CE::*",
331 | "AWS::Cassandra::*",
332 | "AWS::CertificateManager::*",
333 | "AWS::Chatbot::*",
334 | "AWS::Cloud9::*",
335 | "AWS::CloudFormation::*",
336 | "AWS::CloudFront::*",
337 | "AWS::CloudTrail::*",
338 | "AWS::CloudWatch::*",
339 | "AWS::CodeBuild::*",
340 | "AWS::CodeCommit::*",
341 | "AWS::CodeDeploy::*",
342 | "AWS::CodePipeline::*",
343 | "AWS::CodeStar::*",
344 | "AWS::CodeStarConnections::*",
345 | "AWS::CodeStarNotifications::*",
346 | "AWS::Cognito::*",
347 | "AWS::Config::*",
348 | "AWS::DAX::*",
349 | "AWS::DLM::*",
350 | "AWS::DMS::*",
351 | "AWS::DataBrew::*",
352 | "AWS::DataPipeline::*",
353 | "AWS::DataSync::*",
354 | "AWS::Detective::*",
355 | "AWS::DevOpsGuru::*",
356 | "AWS::DirectoryService::*",
357 | "AWS::DynamoDB::*",
358 | "AWS::EC2::*",
359 | "AWS::ECR::*",
360 | "AWS::ECS::*",
361 | "AWS::EFS::*",
362 | "AWS::EKS::*",
363 | "AWS::EMR::*",
364 | "AWS::EMRContainers::*",
365 | "AWS::EMRServerless::*",
366 | "AWS::ElastiCache::*",
367 | "AWS::ElasticBeanstalk::*",
368 | "AWS::ElasticLoadBalancing::*",
369 | "AWS::ElasticLoadBalancingV2::*",
370 | "AWS::Elasticsearch::*",
371 | "AWS::EventSchemas::*",
372 | "AWS::Events::*",
373 | "AWS::FIS::*",
374 | "AWS::FMS::*",
375 | "AWS::FSx::*",
376 | "AWS::GameLift::*",
377 | "AWS::GlobalAccelerator::*",
378 | "AWS::Glue::*",
379 | "AWS::Greengrass::*",
380 | "AWS::GuardDuty::*",
381 | "AWS::IAM::*",
382 | "AWS::IdentityStore::*",
383 | "AWS::ImageBuilder::*",
384 | "AWS::Inspector::*",
385 | "AWS::InspectorV2::*",
386 | "AWS::IoT::*",
387 | "AWS::KMS::*",
388 | "AWS::KafkaConnect::*",
389 | "AWS::Kinesis::*",
390 | "AWS::KinesisAnalytics::*",
391 | "AWS::KinesisAnalyticsV2::*",
392 | "AWS::KinesisFirehose::*",
393 | "AWS::LakeFormation::*",
394 | "AWS::Lambda::*",
395 | "AWS::LicenseManager::*",
396 | "AWS::Logs::*",
397 | "AWS::M2::*",
398 | "AWS::MSK::*",
399 | "AWS::Macie::*",
400 | "AWS::MediaConnect::*",
401 | "AWS::MediaConvert::*",
402 | "AWS::MediaPackage::*",
403 | "AWS::MemoryDB::*",
404 | "AWS::Neptune::*",
405 | "AWS::NetworkFirewall::*",
406 | "AWS::NetworkManager::*",
407 | "AWS::Oam::*",
408 | "AWS::OpenSearchService::*",
409 | "AWS::OpsWorks::*",
410 | "AWS::OpsWorksCM::*",
411 | "AWS::Organizations::*",
412 | "AWS::Pipes::*",
413 | "AWS::RAM::*",
414 | "AWS::RDS::*",
415 | "AWS::Redshift::*",
416 | "AWS::RedshiftServerless::*",
417 | "AWS::Rekognition::*",
418 | "AWS::ResilienceHub::*",
419 | "AWS::ResourceExplorer2::*",
420 | "AWS::ResourceGroups::*",
421 | "AWS::RolesAnywhere::*",
422 | "AWS::Route53::*",
423 | "AWS::Route53Resolver::*",
424 | "AWS::S3::*",
425 | "AWS::S3ObjectLambda::*",
426 | "AWS::S3Outposts::*",
427 | "AWS::SDB::*",
428 | "AWS::SES::*",
429 | "AWS::SNS::*",
430 | "AWS::SQS::*",
431 | "AWS::SSM::*",
432 | "AWS::SSMContacts::*",
433 | "AWS::SSMIncidents::*",
434 | "AWS::SSO::*",
435 | "AWS::SageMaker::*",
436 | "AWS::SecretsManager::*",
437 | "AWS::SecurityHub::*",
438 | "AWS::ServiceCatalog::*",
439 | "AWS::ServiceCatalogAppRegistry::*",
440 | "AWS::ServiceDiscovery::*",
441 | "AWS::Signer::*",
442 | "AWS::StepFunctions::*",
443 | "AWS::Synthetics::*",
444 | "AWS::Transfer::*",
445 | "AWS::WAF::*",
446 | "AWS::WAFRegional::*",
447 | "AWS::WAFv2::*",
448 | "AWS::WorkSpaces::*",
449 | "AWS::XRay::*",
450 | "Alexa::ASK::*"
451 | ]
452 | }
453 | },
454 | "required": [],
455 | "sourceUrl": "https://github.com/StyraInc/opa-aws-cloudformation-hook",
456 | "typeConfiguration": {
457 | "additionalProperties": false,
458 | "properties": {
459 | "opaAuthTokenSecret": {
460 | "description": "ARN referencing a Secret in Secrets Manager containing a plain-text string token to use for authenticating against the OPA instance",
461 | "type": "string"
462 | },
463 | "opaUrl": {
464 | "description": "URL pointing to an OPA instance for policy decisions",
465 | "type": "string"
466 | }
467 | },
468 | "required": [
469 | "opaUrl"
470 | ]
471 | },
472 | "typeName": "Styra::OPA::Hook"
473 | }
474 |
--------------------------------------------------------------------------------
/hooks/template.yml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: "2010-09-09"
2 | Transform: AWS::Serverless-2016-10-31
3 | Description: AWS SAM template for the Styra::OPA::Hook resource type
4 |
5 | Globals:
6 | Function:
7 | Timeout: 180 # docker start-up times can be long for SAM CLI
8 | MemorySize: 256
9 |
10 | Resources:
11 | TypeFunction:
12 | Type: AWS::Serverless::Function
13 | Properties:
14 | Handler: styra_opa_hook.handlers.hook
15 | Runtime: python3.7
16 | CodeUri: build/
17 |
18 | TestEntrypoint:
19 | Type: AWS::Serverless::Function
20 | Properties:
21 | Handler: styra_opa_hook.handlers.test_entrypoint
22 | Runtime: python3.7
23 | CodeUri: build/
24 |
25 |
--------------------------------------------------------------------------------
/test/integration.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
4 |
5 | opa run --server "$SCRIPT_DIR/../examples/policy/" &>/dev/null &
6 |
7 | pid=$(echo $!)
8 |
9 | # Provide some time for OPA to start
10 | sleep 1
11 |
12 | "$SCRIPT_DIR"/validate.py --test=true "$SCRIPT_DIR"/../examples/templates
13 |
14 | status=$(echo $?)
15 |
16 | kill "$pid"
17 |
18 | exit "$status"
19 |
--------------------------------------------------------------------------------
/test/requirements.txt:
--------------------------------------------------------------------------------
1 | cfn-flip
2 | requests
--------------------------------------------------------------------------------
/test/validate.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | """Send CloudFormation Template(s) to OPA for validation"""
4 |
5 | # pylint: disable=import-error,invalid-name,anomalous-backslash-in-string,broad-except
6 |
7 | import argparse
8 | import json
9 | import os
10 | import sys
11 |
12 | import requests
13 |
14 | from cfn_flip import to_json
15 |
16 | def get_all_templates(directory):
17 | """Get all template files in directory"""
18 | templates = []
19 |
20 | for root, _, files in os.walk(directory):
21 | for file in files:
22 | if file.endswith((".json", ".yml", ".yaml")):
23 | templates.append(os.path.join(root, file))
24 |
25 | return templates
26 |
27 |
28 | def check_resource(path, name, resource, expect_allow, test_mode):
29 | """Checks a single resource from a template against OPA"""
30 | properties = bools_to_string(resource["Properties"])
31 |
32 | opa_input = {
33 | "action": "CREATE",
34 | "hook": "Styra::OPA::Hook",
35 | "resource": {
36 | "id": name,
37 | "name": resource["Type"],
38 | "type": resource["Type"],
39 | "properties": properties
40 | }
41 | }
42 |
43 | resp = requests.post("http://localhost:8181", json=opa_input)
44 |
45 | decision = resp.json()
46 |
47 | if test_mode:
48 | if decision["allow"] == expect_allow:
49 | print(f"SUCCESS: {path} {name}")
50 |
51 | return True
52 |
53 | print(f"FAIL: {path} {name}")
54 |
55 | if len(decision["violations"]) > 0:
56 | print()
57 |
58 | for violation in decision["violations"]:
59 | print(f"\t{violation}")
60 |
61 | if len(decision["violations"]) > 0:
62 | print()
63 |
64 | return False
65 |
66 | print(json.dumps(decision, indent=4))
67 |
68 | return decision["allow"]
69 |
70 | def check_template(path, test_mode):
71 | """Send each resource in provided template to OPA for validation"""
72 | contents = {}
73 | try:
74 | with open(path, encoding='UTF-8') as file:
75 | if path.endswith(".json"):
76 | contents = json.loads(file.read())
77 | else:
78 | contents = json.loads(to_json(file.read(), clean_up=True))
79 |
80 | except Exception as e:
81 | print(f"ERROR: Exception raised when loading {path}", e)
82 | return False
83 |
84 | resources = contents["Resources"]
85 | resource_names = list(resources.keys())
86 |
87 | success = True
88 | for name in resource_names:
89 | # Some templates contain resources which we don't intend to test
90 | # For those we may use an "ObjectToTest" attribute in the templates
91 | # metadata section to point out which object should be verified.
92 | if "Metadata" in contents and "ObjectToTest" in contents["Metadata"]:
93 | if name == contents["Metadata"]["ObjectToTest"]:
94 | if not check_resource(path, name, resources[name], "success" in path, test_mode):
95 | success = False
96 | else:
97 | continue
98 |
99 | if not check_resource(path, name, resources[name], "success" in path, test_mode):
100 | success = False
101 |
102 | return success
103 |
104 |
105 | def bools_to_string(obj):
106 | """
107 | When presented to the hook, AWS templates converts booleans to strings...
108 |
109 | ¯\_(ツ)_/¯
110 |
111 | """
112 | for k, v in obj.items():
113 | if isinstance(v, dict):
114 | bools_to_string(v)
115 | if isinstance(v, list):
116 | for item in v:
117 | if isinstance(item, dict):
118 | bools_to_string(item)
119 |
120 | if isinstance(v, bool):
121 | obj[k] = "false" if not v else "true"
122 |
123 | return obj
124 |
125 |
126 | def main():
127 | """Validate on or more templates against OPA running on localhost
128 |
129 | If --test=true is provided, test all templates by asserting file names containing "success"
130 | are allowed, and others expected to fail.
131 |
132 | Example usage:
133 |
134 | $ validate.py ../examples/templates/eks-cluster-logging/eks-deny-cluster.yaml
135 | {
136 | "allow": false,
137 | "violations": [
138 | "no logging types are enabled for cluster: EksCluster",
139 | "public endpoint needs to be disabled for cluster: EksCluster"
140 | ]
141 | }
142 |
143 | $ validate.py --test=true ../examples/templates
144 | SUCCESS: iam-fail-no-user-policy.yaml UserWithNoPolicies
145 | SUCCESS: iam-success-user-policy-attached.yaml UserWithPolicy
146 | SUCCESS: iam-success-user-policy-attached.yaml UserWithManagedPolicy
147 | ...
148 | """
149 | parser = argparse.ArgumentParser()
150 |
151 | parser.add_argument("-t", "--test", type=bool, default=False, help="Test for failures")
152 | parser.add_argument("files", nargs="*")
153 |
154 | args = parser.parse_args()
155 |
156 | if len(args.files) == 0:
157 | sys.exit("No files provided. Example usage: validate.py file1.yaml file2.json dir1/")
158 |
159 | templates = []
160 | for file in args.files:
161 | if os.path.isdir(file):
162 | templates += get_all_templates(file)
163 | else:
164 | templates.append(file)
165 |
166 | success = True
167 | for template in templates:
168 | if not check_template(os.path.relpath(template), args.test):
169 | success = False
170 |
171 | if not success:
172 | sys.exit(1)
173 |
174 | if __name__ == "__main__":
175 | main()
176 |
--------------------------------------------------------------------------------