├── docs ├── index.md ├── rdklib_add_on_to_RDK_Workshop.md ├── developer_notes.md └── requirements.txt ├── NOTICE ├── .gitignore ├── rdklib ├── util │ ├── internal.py │ ├── __init__.py │ ├── external.py │ ├── evaluations.py │ └── service.py ├── errors.py ├── __init__.py ├── configrule.py ├── clientfactory.py ├── evaluation.py └── evaluator.py ├── CODE_OF_CONDUCT.md ├── samconfig.toml ├── mkdocs.yml ├── developer_notes.md ├── .github ├── dependabot.yml ├── workflows │ ├── pytest.yml │ ├── publish.yaml │ ├── validate.yaml │ └── publish_cft.yaml └── actions │ └── dep-setup │ └── action.yml ├── .readthedocs.yaml ├── rdklibtest ├── __init__.py └── test.py ├── tst └── test │ ├── rdklib_util_internal_test.py │ ├── rdklib_util_external_test.py │ ├── rdklib_configrule_test.py │ ├── rdklib_clientfactory_test.py │ ├── rdklibtest_test_test.py │ ├── rdklib_evaluation_test.py │ ├── rdklib_evaluator_test.py │ ├── rdklib_util_evaluations_test.py │ └── rdklib_util_service_test.py ├── template.yaml ├── CONTRIBUTING.md ├── pyproject.toml ├── README.md └── LICENSE /docs/index.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | opt 3 | dist 4 | .idea 5 | *.egg-info 6 | *__pycache__* 7 | *.pyc 8 | .aws-sam 9 | -------------------------------------------------------------------------------- /rdklib/util/internal.py: -------------------------------------------------------------------------------- 1 | # Process evaluations 2 | def process_evaluations(event, client_factory, evaluations): 3 | return evaluations 4 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /samconfig.toml: -------------------------------------------------------------------------------- 1 | version = 0.1 2 | [default] 3 | [default.deploy] 4 | [default.deploy.parameters] 5 | stack_name = "serverlessrepo-rdklib-layer" 6 | s3_bucket = "aws-sam-cli-managed-default-samclisourcebucket-1p3znu34dzuws" 7 | s3_prefix = "serverlessrepo-rdklib-layer" 8 | region = "ap-southeast-1" 9 | confirm_changeset = true 10 | capabilities = "CAPABILITY_IAM" 11 | image_repositories = [] 12 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: AWS rdklib Documentation 2 | theme: 3 | name: mkdocs 4 | palette: 5 | scheme: default 6 | primary: green 7 | plugins: 8 | - search 9 | # TODO: Enable this if/when docstrings are expanded in the core rdk module. 10 | # - mkdocstrings: 11 | # handlers: 12 | # python: 13 | # paths: [rdk] 14 | # markdown_extensions: 15 | # - markdown_include.include: 16 | # base_path: . 17 | docs_dir: docs 18 | -------------------------------------------------------------------------------- /developer_notes.md: -------------------------------------------------------------------------------- 1 | # New Versions 2 | 3 | To publish a new version of rdklib, in the RDK Maintainters account, you will need to: 4 | 5 | 1. Create a new tagged version of rdklib (`git tag X.Y.Z; git push origin X.Y.Z`) 6 | 2. Download the latest version of the SAM template from `s3://aws-sam-cli-rdklib-build-bucket-ap-southeast-1/serverlessrepo-rdklib-layer/*.template` 7 | 3. Create a new version of the SAM application using the template you downloaded. 8 | 4. Update the `rdk` repository to reference the latest Lambda Layer versions (use the `update_rdklib_versions.py` script) 9 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | -------------------------------------------------------------------------------- /rdklib/util/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at 4 | # 5 | # http://aws.amazon.com/apache2.0/ 6 | # 7 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | -------------------------------------------------------------------------------- /rdklib/errors.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | # not use this file except in compliance with the License. A copy of the License is located at 5 | # 6 | # http://aws.amazon.com/apache2.0/ 7 | # 8 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for 10 | # the specific language governing permissions and limitations under the License. 11 | 12 | class InvalidParametersError(Exception): 13 | pass -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # NOTE - Currently set to OFF until issues with RTD integration with MkDocs is resolved. 3 | # Read the Docs configuration file 4 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 5 | 6 | # Required 7 | version: 2 8 | 9 | # Set the version of Python and other tools you might need 10 | build: 11 | os: ubuntu-22.04 12 | tools: 13 | python: "3.13" 14 | 15 | # Build documentation in the docs/ directory with mkdocs 16 | mkdocs: 17 | configuration: mkdocs.yml 18 | # We recommend specifying your dependencies to enable reproducible builds: 19 | # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 20 | # python: 21 | # install: 22 | # - requirements: docs/requirements.txt 23 | -------------------------------------------------------------------------------- /rdklibtest/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at 4 | # 5 | # http://aws.amazon.com/apache2.0/ 6 | # 7 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | 9 | from .test import create_test_configurationchange_event, create_test_scheduled_event, assert_successful_evaluation, assert_customer_error_response 10 | 11 | MY_VERSION = "0.0.1" 12 | -------------------------------------------------------------------------------- /tst/test/rdklib_util_internal_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import importlib 4 | 5 | import sys 6 | import os 7 | 8 | # Get the absolute path of the current script 9 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 10 | 11 | # Get the absolute path of the project directory 12 | project_dir = os.path.abspath(os.path.join(current_script_dir, "..", "..")) 13 | 14 | # Add the project directory to the Python path 15 | sys.path.append(project_dir) 16 | 17 | CODE = importlib.import_module("rdklib.util.internal") 18 | 19 | 20 | class rdklibUtilInternalTest(unittest.TestCase): 21 | def test_internal_process_evaluations(self): 22 | response = CODE.process_evaluations({}, {}, "some-value") 23 | self.assertEqual(response, "some-value") 24 | 25 | 26 | if __name__ == "__main__": 27 | unittest.main() 28 | -------------------------------------------------------------------------------- /rdklib/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at 4 | # 5 | # http://aws.amazon.com/apache2.0/ 6 | # 7 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | 9 | MY_VERSION = "0.3.8" 10 | 11 | from .configrule import ConfigRule, MissingTriggerHandlerError 12 | from .evaluator import Evaluator 13 | from .clientfactory import ClientFactory 14 | from .evaluation import ComplianceType, Evaluation 15 | from .errors import InvalidParametersError 16 | -------------------------------------------------------------------------------- /rdklib/util/external.py: -------------------------------------------------------------------------------- 1 | def process_evaluations(event, client_factory, evaluations): 2 | config_client = client_factory.build_client('config') 3 | 4 | # Put together the request that reports the evaluation status 5 | result_token = event['resultToken'] 6 | test_mode = False 7 | if result_token == 'TESTMODE': 8 | # Used solely for RDK test to skip actual put_evaluation API call 9 | test_mode = True 10 | 11 | if not evaluations: 12 | config_client.put_evaluations(Evaluations=[], ResultToken=result_token, TestMode=test_mode) 13 | return [] 14 | 15 | # Invoke the Config API to report the result of the evaluation 16 | evaluation_copy = [] 17 | evaluation_copy = evaluations[:] 18 | while evaluation_copy: 19 | config_client.put_evaluations(Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode) 20 | del evaluation_copy[:100] 21 | 22 | # Used solely for RDK test to be able to test Lambda function 23 | return evaluations 24 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | name: Pytest 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | env: 16 | poetry_version: 2.1.3 17 | strategy: 18 | fail-fast: false 19 | max-parallel: 4 20 | matrix: 21 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] 22 | steps: 23 | - name: Checkout 24 | uses: actions/checkout@v3 25 | - name: Set up Python 26 | uses: actions/setup-python@v4 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | - name: cache poetry install 30 | uses: actions/cache@v4 31 | with: 32 | path: ~/.local 33 | key: poetry-${{ env.poetry_version }}-0 34 | - uses: snok/install-poetry@v1 35 | with: 36 | version: ${{ env.poetry_version }} 37 | virtualenvs-create: true 38 | virtualenvs-in-project: false 39 | - name: cache deps 40 | id: cache-deps 41 | uses: actions/cache@v4 42 | with: 43 | path: .venv 44 | key: pydeps-${{ hashFiles('**/poetry.lock') }} 45 | - run: poetry install --no-interaction --no-root 46 | if: steps.cache-deps.outputs.cache-hit != 'true' 47 | - run: poetry install --no-interaction 48 | - run: poetry run pytest 49 | -------------------------------------------------------------------------------- /template.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: "2010-09-09" 2 | Transform: AWS::Serverless-2016-10-31 3 | Metadata: 4 | AWS::ServerlessRepo::Application: 5 | Name: rdklib 6 | Description: rdklib library for authoring Config Rules 7 | Author: awslabs 8 | SpdxLicenseId: Apache-2.0 9 | LicenseUrl: LICENSE 10 | ReadmeUrl: README.md 11 | Labels: ["stable"] 12 | HomePageUrl: https://github.com/awslabs/aws-config-rdklib 13 | SemanticVersion: 0.3.8 14 | SourceCodeUrl: https://github.com/awslabs/aws-config-rdklib 15 | 16 | Description: > 17 | rdklib library for authoring Config Rules 18 | 19 | # More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst 20 | Globals: 21 | Function: 22 | Timeout: 3 23 | 24 | Resources: 25 | RdklibLayer: 26 | Type: AWS::Serverless::LayerVersion 27 | Properties: 28 | LayerName: rdklib-layer 29 | Description: rdklib library for authoring Config Rules 30 | ContentUri: ./build # Build directory will be populated during CFT deploy workflow 31 | CompatibleRuntimes: 32 | - python3.7 33 | - python3.8 34 | - python3.9 35 | - python3.10 36 | - python3.11 37 | - python3.12 38 | - python3.13 39 | LicenseInfo: "Apache License, Version 2.0" 40 | 41 | Outputs: 42 | RdklibLayerArn: 43 | Description: rdklib layer ARN 44 | Value: !Ref RdklibLayer 45 | -------------------------------------------------------------------------------- /.github/workflows/publish.yaml: -------------------------------------------------------------------------------- 1 | name: "Publish Release" 2 | 3 | on: 4 | push: 5 | tags: 6 | - "*" 7 | 8 | permissions: 9 | contents: write 10 | 11 | jobs: 12 | publish: 13 | name: Publish Release 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout Source 17 | uses: actions/checkout@v3 18 | with: 19 | fetch-depth: 0 20 | 21 | - name: Setup Dependencies 22 | uses: "./.github/actions/dep-setup" 23 | with: 24 | python-version: "3.10" 25 | 26 | - name: Run Safety Check 27 | run: poetry poe safety 28 | 29 | - name: Get Python Module Version 30 | run: | 31 | MODULE_VERSION=$(poetry version --short) 32 | echo "MODULE_VERSION=$MODULE_VERSION" >> $GITHUB_ENV 33 | 34 | - name: Verify Versions Match 35 | run: | 36 | TAG_VERSION=$(git describe HEAD --tags --abbrev=0) 37 | echo "Git Tag Version: $TAG_VERSION" 38 | echo "Python Module Version: $MODULE_VERSION" 39 | if [[ "$TAG_VERSION" != "$MODULE_VERSION" ]]; then exit 1; fi 40 | 41 | - name: Publish to PyPi 42 | run: poetry publish --build 43 | env: 44 | POETRY_PYPI_TOKEN_PYPI: ${{ secrets.POETRY_PYPI_TOKEN_PYPI }} 45 | 46 | - name: Release 47 | uses: softprops/action-gh-release@v1 48 | with: 49 | discussion_category_name: announcements 50 | generate_release_notes: true 51 | files: | 52 | dist/rdklib-${{env.MODULE_VERSION}}-py3-none-any.whl 53 | dist/rdklib-${{env.MODULE_VERSION}}.tar.gz 54 | -------------------------------------------------------------------------------- /tst/test/rdklib_util_external_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest.mock import patch, MagicMock 3 | 4 | import importlib 5 | 6 | import sys 7 | import os 8 | 9 | # Get the absolute path of the current script 10 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | # Get the absolute path of the project directory 13 | project_dir = os.path.abspath(os.path.join(current_script_dir, "..", "..")) 14 | 15 | # Add the project directory to the Python path 16 | sys.path.append(project_dir) 17 | 18 | CODE = importlib.import_module("rdklib.util.external") 19 | 20 | CLIENT_FACTORY = MagicMock() 21 | CLIENT_MOCK = MagicMock() 22 | 23 | 24 | def mock_get_client(*args, **kwargs): 25 | return CLIENT_MOCK 26 | 27 | 28 | def return_same_value(Evaluations, ResultToken, TestMode): 29 | return Evaluations 30 | 31 | 32 | @patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) 33 | @patch.object(CLIENT_MOCK, "put_evaluations", MagicMock(side_effect=return_same_value)) 34 | class rdklibUtilExternalTest(unittest.TestCase): 35 | def test_external_process_evaluations(self): 36 | event_not_test = {"resultToken": "NOT_TESTMODE"} 37 | event_test = {"resultToken": "TESTMODE"} 38 | 39 | # No evaluation 40 | response = CODE.process_evaluations(event_test, CLIENT_FACTORY, []) 41 | self.assertEqual(response, []) 42 | 43 | # Evaluation in test mode 44 | response = CODE.process_evaluations(event_test, CLIENT_FACTORY, ["some-eval"]) 45 | self.assertEqual(response, ["some-eval"]) 46 | 47 | # Evaluation not in test mode 48 | response = CODE.process_evaluations(event_not_test, CLIENT_FACTORY, ["some-eval"]) 49 | self.assertEqual(response, ["some-eval"]) 50 | -------------------------------------------------------------------------------- /.github/actions/dep-setup/action.yml: -------------------------------------------------------------------------------- 1 | name: Dependency Setup 2 | description: 'Action to setup the runtime environment for CI jobs.' 3 | 4 | inputs: 5 | python-version: 6 | description: 'The Python version to be used during setup' 7 | required: true 8 | 9 | runs: 10 | using: "composite" 11 | steps: 12 | - name: Setup Python 13 | uses: actions/setup-python@v4 14 | with: 15 | python-version: '${{ inputs.python-version }}' 16 | 17 | - name: Cache Poetry 18 | id: cache-poetry 19 | uses: actions/cache@v4 20 | with: 21 | path: ${{github.workspace}}/.poetry 22 | key: poetry-self-${{ hashFiles('.github/workflows/*.yml') }} 23 | restore-keys: poetry-self- 24 | 25 | - name: Install Poetry 26 | if: steps.cache-poetry.outputs.cache-hit != 'true' 27 | shell: bash 28 | run: | 29 | export POETRY_HOME=${{github.workspace}}/.poetry 30 | curl -sSL https://raw.githubusercontent.com/python-poetry/install.python-poetry.org/main/install-poetry.py -O 31 | python install-poetry.py --preview 32 | rm install-poetry.py 33 | 34 | - name: Add Poetry to $PATH 35 | shell: bash 36 | run: echo "${{github.workspace}}/.poetry/bin" >> $GITHUB_PATH 37 | 38 | - name: Add poethepoet plugin 39 | shell: bash 40 | run: poetry self add 'poethepoet[poetry_plugin]' 41 | 42 | - name: Poetry Version 43 | shell: bash 44 | run: poetry --version 45 | 46 | - name: Check pyproject.toml validity 47 | shell: bash 48 | run: poetry check --no-interaction 49 | 50 | - name: Cache Dependencies 51 | id: cache-deps 52 | uses: actions/cache@v4 53 | with: 54 | path: ${{github.workspace}}/.venv 55 | key: poetry-deps-${{ hashFiles('**/poetry.lock') }} 56 | restore-keys: poetry-deps- 57 | 58 | - name: Install Deps 59 | if: steps.cache-deps.cache-hit != 'true' 60 | shell: bash 61 | run: | 62 | poetry config virtualenvs.in-project true 63 | poetry install --no-interaction 64 | -------------------------------------------------------------------------------- /.github/workflows/validate.yaml: -------------------------------------------------------------------------------- 1 | name: "Validation" 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | ## TODO: Enable this once the repo is totally formatted to standard. 16 | # lint-style: 17 | # name: Linting and Styling 18 | # runs-on: ubuntu-latest 19 | # steps: 20 | # - name: Checkout Source 21 | # uses: actions/checkout@v3 22 | # with: 23 | # fetch-depth: 0 24 | 25 | # - name: Setup Dependencies 26 | # uses: './.github/actions/dep-setup' 27 | # with: 28 | # python-version: '3.10' 29 | 30 | # - name: Run Styling Enforcement 31 | # shell: bash 32 | # run: poetry poe check 33 | 34 | # # TODO: As soon as the repo is in a state to enable this, we'll do so. 35 | # - name: Run Style Linting Enforcement 36 | # shell: bash 37 | # run: poetry poe lint 38 | 39 | ## TODO: Enable unit tests via GH Actions when unit tests are fixed and migrated to pytest. 40 | # unit-tests: 41 | # name: Run Unit Tests 42 | # strategy: 43 | # matrix: 44 | # version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] 45 | # os: [ubuntu-latest] 46 | # runs-on: ${{ matrix.os }} 47 | # steps: 48 | # - name: Checkout Source 49 | # uses: actions/checkout@v3 50 | # with: 51 | # fetch-depth: 0 52 | 53 | # - name: Setup Dependencies 54 | # uses: './.github/actions/dep-setup' 55 | # with: 56 | # python-version: '${{ matrix.version }}' 57 | 58 | # - name: Run Tests 59 | # shell: bash 60 | # run: poetry poe test 61 | 62 | # - name: Codecov 63 | # uses: codecov/codecov-action@v3 64 | 65 | security: 66 | name: Run Security Checks 67 | runs-on: ubuntu-latest 68 | steps: 69 | - name: Checkout Source 70 | uses: actions/checkout@v3 71 | with: 72 | fetch-depth: 0 73 | 74 | - name: Setup Dependencies 75 | uses: "./.github/actions/dep-setup" 76 | with: 77 | python-version: "3.13" 78 | 79 | - name: Run Security Checks 80 | shell: bash 81 | run: poetry poe safety 82 | -------------------------------------------------------------------------------- /rdklib/configrule.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | # not use this file except in compliance with the License. A copy of the License is located at 5 | # 6 | # http://aws.amazon.com/apache2.0/ 7 | # 8 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for 10 | # the specific language governing permissions and limitations under the License. 11 | 12 | import json 13 | 14 | class ConfigRule: 15 | #Set this to True to prevent removal of old evaluations when evaluate_compliance returns a list of compliance results. 16 | delete_old_evaluations_on_scheduled_notification = True 17 | 18 | def __init__(self): 19 | pass 20 | 21 | def evaluate_parameters(self, rule_parameters): 22 | return rule_parameters 23 | 24 | def evaluate_change(self, event, client_factory, configuration_item, valid_rule_parameters): 25 | raise MissingTriggerHandlerError("You must implement the evaluate_change method of the ConfigRule class.") 26 | 27 | def evaluate_periodic(self, event, client_factory, valid_rule_parameters): 28 | raise MissingTriggerHandlerError("You must implement the evaluate_periodic method of the ConfigRule class.") 29 | 30 | def get_execution_role_arn(self, event): 31 | role_arn = None 32 | if 'ruleParameters' in event: 33 | rule_params = json.loads(event['ruleParameters']) 34 | role_name = rule_params.get("ExecutionRoleName") 35 | if role_name: 36 | execution_role_prefix = event["executionRoleArn"].split("/")[0] 37 | role_arn = "{}/{}".format(execution_role_prefix, role_name) 38 | 39 | if not role_arn: 40 | role_arn = event['executionRoleArn'] 41 | 42 | return role_arn 43 | 44 | def get_assume_role_region(self, event): 45 | assume_role_region = None 46 | if 'ruleParameters' in event: 47 | rule_params = json.loads(event['ruleParameters']) 48 | assume_role_region = rule_params.get("ExecutionRoleRegion") 49 | 50 | return assume_role_region 51 | 52 | def get_assume_role_mode(self, event): 53 | assume_role_mode = True 54 | if 'ruleParameters' in event: 55 | rule_params = json.loads(event['ruleParameters']) 56 | if "AssumeRoleMode" in rule_params: 57 | assume_role_mode = rule_params.get("AssumeRoleMode").lower() != "false" 58 | 59 | return assume_role_mode 60 | 61 | class MissingTriggerHandlerError(Exception): 62 | pass 63 | -------------------------------------------------------------------------------- /rdklibtest/test.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | # not use this file except in compliance with the License. A copy of the License is located at 5 | # 6 | # http://aws.amazon.com/apache2.0/ 7 | # 8 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for 10 | # the specific language governing permissions and limitations under the License. 11 | 12 | import json 13 | 14 | ############################ 15 | # Testing Helper Functions # 16 | ############################ 17 | 18 | 19 | def create_test_configurationchange_event(invoking_event_json, rule_parameters_json=None): 20 | event_to_return = { 21 | "configRuleName": "myrule", 22 | "executionRoleArn": "arn:aws:iam::123456789012:role/example", 23 | "eventLeftScope": False, 24 | "invokingEvent": json.dumps(invoking_event_json), 25 | "accountId": "123456789012", 26 | "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", 27 | "resultToken": "token", 28 | } 29 | if rule_parameters_json: 30 | event_to_return["ruleParameters"] = json.dumps(rule_parameters_json) 31 | return event_to_return 32 | 33 | 34 | def create_test_scheduled_event(rule_parameters_json=None): 35 | invoking_event = {"messageType": "ScheduledNotification", "notificationCreationTime": "2017-12-23T22:11:18.158Z"} 36 | event_to_return = create_test_configurationchange_event(invoking_event, rule_parameters_json) 37 | return event_to_return 38 | 39 | 40 | def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1): 41 | test_class.assertEqual(len(response), len(resp_expected)) 42 | test_class.assertEqual(len(response), evaluations_count) 43 | for i, response_expected in enumerate(resp_expected): 44 | test_class.assertEqual(response_expected, response[i]) 45 | 46 | 47 | def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None): 48 | if customer_error_code: 49 | test_class.assertEqual(customer_error_code, response["customerErrorCode"]) 50 | if customer_error_message: 51 | test_class.assertEqual(customer_error_message, response["customerErrorMessage"]) 52 | test_class.assertTrue(response["customerErrorCode"]) 53 | test_class.assertTrue(response["customerErrorMessage"]) 54 | if "internalErrorMessage" in response: 55 | test_class.assertTrue(response["internalErrorMessage"]) 56 | if "internalErrorDetails" in response: 57 | test_class.assertTrue(response["internalErrorDetails"]) 58 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *master* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | 61 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 62 | -------------------------------------------------------------------------------- /rdklib/clientfactory.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | # not use this file except in compliance with the License. A copy of the License is located at 5 | # 6 | # http://aws.amazon.com/apache2.0/ 7 | # 8 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for 10 | # the specific language governing permissions and limitations under the License. 11 | 12 | import boto3 13 | import botocore 14 | import os 15 | 16 | CONFIG_ROLE_TIMEOUT_SECONDS = 900 17 | 18 | class ClientFactory: 19 | __sts_credentials = None 20 | __role_arn = None 21 | __region = None 22 | __assume_role_mode = None 23 | 24 | def __init__(self, role_arn, region=None, assume_role_mode=True): 25 | self.__role_arn = role_arn 26 | self.__assume_role_mode = assume_role_mode 27 | if region == None: 28 | region = os.environ.get('AWS_REGION') 29 | self.__region = region 30 | 31 | def build_client(self, service, region=None, assume_role_mode=True): 32 | if not region: 33 | region = self.__region 34 | 35 | if not assume_role_mode or not self.__assume_role_mode: 36 | return boto3.client(service, region) 37 | 38 | if not self.__role_arn: 39 | raise Exception("No Role ARN - ClientFactory must be initialized with a role_arn or set assume_role_mode to False before build_client is called. You can also add assume_role_arn mode to false in build_client() if you want to use the current iam role") 40 | 41 | # Check to see if we have already gotten STS credentials for this role. If not, get them now and then save them for later use. 42 | if not self.__sts_credentials: 43 | self.__sts_credentials = get_assume_role_credentials(self.__role_arn, region) 44 | 45 | # Use the credentials to get a new boto3 client for the appropriate service. 46 | return boto3.client(service, 47 | aws_access_key_id=self.__sts_credentials['AccessKeyId'], 48 | aws_secret_access_key=self.__sts_credentials['SecretAccessKey'], 49 | aws_session_token=self.__sts_credentials['SessionToken'], 50 | region_name=region) 51 | 52 | def get_assume_role_credentials(role_arn, region): 53 | try: 54 | try: 55 | #use region specific url for sts client is recommended. In some cases, company firewall policies are blocking the global endpoint sts.amazonaws.com 56 | assume_role_response = boto3.client('sts', region_name=region, endpoint_url="https://sts." + region + ".amazonaws.com").assume_role(RoleArn=role_arn,RoleSessionName="configLambdaExecution",DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS) 57 | except: 58 | assume_role_response = boto3.client('sts').assume_role(RoleArn=role_arn,RoleSessionName="configLambdaExecution",DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS) 59 | return assume_role_response['Credentials'] 60 | except botocore.exceptions.ClientError as ex: 61 | if 'AccessDenied' in ex.response['Error']['Code']: 62 | ex.response['Error']['Message'] = "AWS Config does not have permission to assume the IAM role. Please try 1) grant the right privilege to the assume the IAM role OR 2) provide Config Rules parameter \"EXECUTION_ROLE_NAME\" to specify a role to execute your rule OR 3)Set Config Rules parameter \"ASSUME_ROLE_MODE\" to False to use your lambda role instead of default Config Role." 63 | else: 64 | ex.response['Error']['Message'] = "InternalError" 65 | ex.response['Error']['Code'] = "InternalError" 66 | raise ex 67 | -------------------------------------------------------------------------------- /.github/workflows/publish_cft.yaml: -------------------------------------------------------------------------------- 1 | name: Publish rdklib CFT 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | workflow_dispatch: 8 | 9 | env: 10 | samBucketPrefix: "aws-sam-cli-rdklib-build-bucket-" 11 | 12 | jobs: 13 | samDeploy: 14 | environment: prod 15 | strategy: 16 | matrix: 17 | region: 18 | - "us-east-1" 19 | - "us-east-2" 20 | - "us-west-1" 21 | - "us-west-2" 22 | - "ap-south-1" 23 | - "ap-northeast-3" 24 | - "ap-northeast-2" 25 | - "ap-southeast-1" 26 | - "ap-southeast-2" 27 | - "ap-northeast-1" 28 | - "ca-central-1" 29 | - "eu-central-1" 30 | - "eu-west-1" 31 | - "eu-west-2" 32 | - "eu-west-3" 33 | - "eu-north-1" 34 | - "sa-east-1" 35 | name: SAM Deploy 36 | runs-on: ubuntu-latest 37 | permissions: 38 | id-token: write 39 | contents: read 40 | steps: 41 | - name: Checkout 42 | uses: actions/checkout@v3 43 | - uses: actions/setup-python@v4 44 | - uses: aws-actions/setup-sam@v2 45 | - name: Configure AWS Credentials 46 | uses: aws-actions/configure-aws-credentials@v3 47 | with: 48 | role-to-assume: arn:aws:iam::711761543063:role/github-automation-role-update-layers 49 | aws-region: ${{ matrix.region }} 50 | - name: Install Poetry 51 | uses: snok/install-poetry@v1 52 | - name: Pre-package steps 53 | run: | 54 | poetry self add poetry-plugin-export 55 | poetry export -f requirements.txt --output requirements.txt 56 | pip install -r requirements.txt -t build/python 57 | mkdir build/lib 58 | cp -r rdklib build/lib/rdklib 59 | cp -r rdklib build/python/rdklib 60 | cp -r rdklibtest build/lib/rdklibtest 61 | cp -r rdklibtest build/python/rdklibtest 62 | # Boto bloats the layer without any performance improvement, as Lambda will already have it 63 | # https://gist.github.com/gene1wood/4a052f39490fae00e0c3 64 | rm -r build/python/botocore 65 | rm -r build/python/boto3 66 | rm -r build/python/botocore-*.dist-info 67 | rm -r build/python/boto3-*.dist-info 68 | rm build/lib/rdklib/util/internal.py # Remove an internal python file that only causes trouble 69 | rm build/python/rdklib/util/internal.py # Remove an internal python file that only causes trouble 70 | 71 | - run: sam build --use-container 72 | - run: sam package --s3-bucket "${{ env.samBucketPrefix }}${{ matrix.region }}" --output-template-file packaged.yaml 73 | - run: sam deploy --no-confirm-changeset --no-fail-on-empty-changeset --s3-bucket "${{ env.samBucketPrefix }}${{ matrix.region }}" --template-file packaged.yaml --stack-name rdklib-layer-sam --capabilities CAPABILITY_IAM --region ${{ matrix.region }} 74 | - run: | 75 | LATEST_LAYER_VER=$(aws lambda list-layer-versions --region ${{ matrix.region }} --layer-name rdklib-layer --max-items 1 | jq -r ".LayerVersions[].Version") 76 | aws lambda add-layer-version-permission --region ${{ matrix.region }} --layer-name rdklib-layer --version-number $LATEST_LAYER_VER --statement-id public --action "lambda:GetLayerVersion" --principal "*" 77 | # Commenting this out because the action always fails 78 | # rdkIssue: 79 | # environment: prod 80 | # name: Create RDK Issue 81 | # runs-on: ubuntu-latest 82 | # # These permissions are needed to interact with GitHub's OIDC Token endpoint. 83 | # permissions: 84 | # id-token: write 85 | # contents: write 86 | # steps: 87 | # - name: Checkout 88 | # uses: actions/checkout@v3 89 | # - uses: actions-ecosystem/action-get-latest-tag@v1 90 | # id: get-latest-tag 91 | # with: 92 | # semver_only: true 93 | # - name: Create new issue in aws-config-rdk notifying of new layer version 94 | # uses: dacbd/create-issue-action@main 95 | # with: 96 | # token: ${{ github.token }} 97 | # title: RDKLIB Version ${{ steps.get-latest-tag.outputs.tag }} Available 98 | # body: A new tag has been released in rdklib, and RDK should be updated to use the latest layer 99 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at 4 | # 5 | # http://aws.amazon.com/apache2.0/ 6 | # 7 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 8 | [project] 9 | name = "rdklib" 10 | version = "0.3.8" 11 | requires-python = ">=3.7" 12 | 13 | [tool.poetry] 14 | name = "rdklib" 15 | version = "0.3.8" 16 | description = "Rule Development Kit Library for AWS Config" 17 | authors = [ 18 | "AWS RDK Maintainers ", 19 | ] 20 | repository = "https://github.com/awslabs/aws-config-rdklib" 21 | homepage = "https://github.com/awslabs/aws-config-rdklib" 22 | readme = "README.md" 23 | packages = [{include = "rdklib"}, {include = "rdklibtest"}] 24 | keywords = ["amazon", "aws", "awslabs", "rdk", "rdklib", "layer", "lambda", "config", "rules", "compliance"] 25 | documentation = "https://aws-config-rdklib.readthedocs.io" 26 | classifiers = [ 27 | "License :: OSI Approved :: Apache Software License", 28 | "Programming Language :: Python", 29 | "Programming Language :: Python :: 3", 30 | "Programming Language :: Python :: 3.7", 31 | "Programming Language :: Python :: 3.8", 32 | "Programming Language :: Python :: 3.9", 33 | "Programming Language :: Python :: 3.10", 34 | "Programming Language :: Python :: 3.11", 35 | "Programming Language :: Python :: 3.12", 36 | "Programming Language :: Python :: 3.13", 37 | ] 38 | include = [ 39 | "README.md", 40 | "NOTICE.txt", 41 | "LICENSE", 42 | ] 43 | exclude = [ 44 | "rdklib/util/internal.py" 45 | ] 46 | license = "Apache-2.0" 47 | 48 | [tool.bandit] 49 | exclude_dirs = ["tst"] 50 | 51 | # Styling and linting Configurations 52 | [tool.isort] 53 | profile = "black" 54 | line_length = 120 55 | 56 | [tool.black] 57 | line-length = 120 58 | target-version = ["py37", "py38", "py39", "py310", "py311", "py312", "py313"] 59 | 60 | [tool.ruff] 61 | line-length = 120 62 | 63 | [tool.poe.tasks] 64 | isort = "isort --profile=black ." 65 | black = "black ." 66 | check-black = {cmd = "black . --check --diff", help = "Check code for black styling"} 67 | check-isort = {cmd = "isort --check --profile=black .", help = "Check code for import styling"} 68 | check-docstrings = "pydocstyle -e ." 69 | check-ruff = "ruff check rdklib" 70 | check = ["check-isort", "check-black"] 71 | lint = ["check-docstrings", "check-ruff"] 72 | fix = ["isort", "black"] 73 | # test = "pytest --cov=rdklib --cov-report=xml --cov-report=term" 74 | ruff = "ruff check --fix rdk" 75 | safety = "safety check" 76 | bandit = "bandit -r rdklib" 77 | security = ["safety", "bandit"] 78 | update-doc-deps = {cmd = "poetry export --only=docs -f requirements.txt > docs/requirements.txt", help = "Generate an updated requirements.txt for docs" } 79 | serve-docs = {cmd = "mkdocs serve"} 80 | # requires poethepoet outside of poetry. 81 | install = "poetry install" 82 | build = "poetry build" 83 | 84 | [tool.poetry.dependencies] 85 | python = "^3.10" 86 | boto3 = "^1.35" 87 | pyyaml = "^6" 88 | rdk = "^0.17" 89 | bandit = "^1.7.7" 90 | idna = "^3.7" 91 | urllib3 = "^2.5" 92 | requests = "^2.32" 93 | regex = "^2025.2" 94 | authlib = "^1.6.5" 95 | 96 | [tool.poetry.group.dev.dependencies] 97 | black = "^25.1" 98 | pydocstyle = "^6.3" 99 | isort = {extras = ["toml"], version = "^5.11.4"} 100 | mypy = "^1.16.1" 101 | debugpy = "^1.6.7" 102 | ruff = "^0.12.3" 103 | pytest = "^7.0" 104 | jinja2 = "^3.1.6" 105 | setuptools = "^78.1.1" 106 | 107 | [tool.poetry.group.security.dependencies] 108 | bandit = "^1.8.3" 109 | safety = "^3.6.0" 110 | 111 | [tool.poetry.group.types.dependencies] 112 | types-pyyaml = "^6.0.12.10" 113 | boto3-stubs = {extras = ["cloudformation", "config", "iam", "s3", "sts"], version = "^1.26.139"} 114 | 115 | 116 | [tool.poetry.group.docs.dependencies] 117 | mkdocs = "^1.6.1" 118 | mkdocs-material = "^9.6.12" 119 | mkdocstrings-python = "^1.0" 120 | markdown-include = "^0.8.1" 121 | 122 | [tool.poetry.requires-plugins] 123 | poetry-plugin-export = ">=1.8" 124 | 125 | [build-system] 126 | requires = ["poetry-core"] 127 | build-backend = "poetry.core.masonry.api" 128 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RDKlib 2 | 3 | [![image](https://github.com/awslabs/aws-config-rdklib/workflows/ci/badge.svg?branch=master)](https://github.com/awslabs/aws-config-rdklib/actions?query=workflow%3Aci+branch%3Amaster) 4 | 5 | RDKlib is a Python library to enable you to **run custom AWS Config Rules at scale**. The library can be used to: 6 | 7 | - Help you to focus only on the compliance logic, while the library 8 | does the heavy lifting 9 | - Ease maintenance by moving the boilerplate code as a AWS Lambda 10 | Layer 11 | - Ease deployment by using AWS Serverless Application Repository 12 | 13 | RDKLib works in synergy with the [AWS Config Rule Development Kit](https://github.com/awslabs/aws-config-rdk). 14 | 15 | # Getting Started 16 | 17 | ## Install the library locally 18 | 19 | ```bash 20 | pip install rdklib 21 | ``` 22 | 23 | ## Create a rule using the RDK 24 | 25 | > Note: you need to [install the RDK](https://github.com/awslabs/aws-config-rdk#getting-started) first. 26 | 27 | To use `rdklib`, specify a `python3.x-lib` runtime when you run `rdk create` (or don't specify any runtime; `rdklib` is now the default for `rdk create`). This will populate the `rdklib` runtime in the RDK `parameters.json` of your Rule template. Examples: 28 | 29 | - For periodic trigger: 30 | 31 | ```bash 32 | rdk create YOUR_RULE_NAME --runtime python3.12-lib --maximum-frequency TwentyFour_Hours 33 | ``` 34 | 35 | - For configuration change trigger (for example S3 Bucket): 36 | 37 | ```bash 38 | rdk create YOUR_RULE_NAME --runtime python3.12-lib --resource-types AWS::S3::Bucket 39 | ``` 40 | 41 | After you've created your rule, update the `.py` file that was generated, adding your custom logic within the `evaluate_change()` method for change-triggered rules or the `evaluate_periodic()` method for periodic rules (you may need to uncomment `evaluate_periodic()`. If you need to create a `boto3` client, use the `client_factory` helper (eg. instead of `boto3.client("s3")`, use `client_factory.build_client("s3")`). Examples of `rdklib` rules can be found [here](https://github.com/awslabs/aws-config-rules/blob/master/python-rdklib/EC2_INSTANCE_EBS_VOLUME_TAGS_MATCH/config_rule/config-version/EC2_INSTANCE_EBS_VOLUME_TAGS_MATCH/EC2_INSTANCE_EBS_VOLUME_TAGS_MATCH.py). 42 | 43 | ## Deploy your rule with RDKlib layer 44 | 45 | RDKlib is designed to work as a AWS Lambda Layer. It allows you to use the library without needing to include it in your deployment package. 46 | 47 | 1. Install RDKlib layer (with AWS CLI) 48 | 49 | ```bash 50 | aws serverlessrepo create-cloud-formation-change-set --application-id arn:aws:serverlessrepo:ap-southeast-1:711761543063:applications/rdklib --stack-name RDKlib-Layer 51 | 52 | # Copy/paste the full change-set ARN to customize the following command 53 | aws cloudformation execute-change-set --change-set-name NAME_OF_THE_CHANGE_SET 54 | 55 | aws cloudformation describe-stack-resources --stack-name serverlessrepo-RDKlib-Layer 56 | # Copy the ARN of the Lambda layer in the "PhysicalResourceId" key (i.e. arn:aws:lambda:YOUR_REGION:YOUR_ACCOUNT:layer:rdklib-layer:1). 57 | ``` 58 | 59 | > Note: You can do the same step manually going to and find "rdklib" 60 | 61 | 1. Deploy the rule 62 | 63 | ```bash 64 | rdk deploy YOUR_RULE_NAME --rdklib-layer-arn YOUR_RDKLIB_LAYER_ARN 65 | ``` 66 | 67 | # FAQs 68 | 69 | - Q. What is the `client_factory` that I see in my `rdklib` rules? 70 | - A. A `client_factory` is a class that allows for dynamic provisioning of a `boto3` client. In an `rdklib` rule, you should treat `client_factory` as the way to create a `boto3` client. So instead of calling `client = boto3.client("s3")`, you would call `client = client_factory.build_client("s3")`. 71 | - Q. ...Why? 72 | - A. It's mainly there to allow for cross-account functionality so that your client evaluates the rule in the right account. 73 | 74 | # License 75 | 76 | This project is licensed under the Apache-2.0 License. 77 | 78 | # Feedback / Questions 79 | 80 | Feel free to email 81 | 82 | # Contacts 83 | 84 | - **Benjamin Morris** - _Maintainer, code, testing_ 85 | 86 | # Acknowledgements 87 | 88 | - **Mark Beacom** - _Maintainer, code, testing_ 89 | - **Michael Borchert** - _Design, code, testing, feedback_ 90 | - **Ricky Chau** - _Maintainer, code, testing_ 91 | - **Julio Delgado Jr.** - *Design, testing, feedback* 92 | - **Chris Gutierrez** - _Design, feedback_ 93 | - **Joe Lee** - _Design, feedback_ 94 | - **Jonathan Rault** - _Maintainer, design, code, testing, feedback_ 95 | - **Carlo DePaolis** - _Maintainer, code, testing_ 96 | -------------------------------------------------------------------------------- /rdklib/util/evaluations.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | # not use this file except in compliance with the License. A copy of the License is located at 5 | # 6 | # http://aws.amazon.com/apache2.0/ 7 | # 8 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for 10 | # the specific language governing permissions and limitations under the License. 11 | 12 | from rdklib.evaluation import ComplianceType, Evaluation 13 | 14 | try: 15 | from rdklib.util.internal import process_evaluations 16 | except ImportError as ex: 17 | from rdklib.util.external import process_evaluations 18 | 19 | 20 | # Build the evaluations list to return 21 | def process_event_evaluations_list(event, client_factory, compliance_result, configuration_item): 22 | evaluations = [] 23 | 24 | if not isinstance(compliance_result, list): 25 | print("The return statement from evaluate_change() is not a list.") 26 | raise Exception("The return statement from evaluate_change() is not a list.") 27 | 28 | for evaluation in compliance_result: 29 | if not isinstance(evaluation, Evaluation): 30 | print("The return statement from evaluate_change() is not a list of Evaluation() object.") 31 | raise Exception("The return statement from evaluate_change() is not a list of Evaluation() object.") 32 | evaluation.import_fields_from_configuration_item(configuration_item) 33 | if evaluation.is_valid(): 34 | evaluations.append(evaluation.get_json()) 35 | 36 | return process_evaluations(event, client_factory, evaluations) 37 | 38 | 39 | def process_periodic_evaluations_list(event, client_factory, compliance_result, rule): 40 | evaluations = [] 41 | latest_evaluations = [] 42 | 43 | if not isinstance(compliance_result, list): 44 | print("The return statement from evaluate_periodic() is not a list.") 45 | raise Exception("The return statement from evaluate_periodic() is not a list.") 46 | 47 | for evaluation in compliance_result: 48 | if not isinstance(evaluation, Evaluation): 49 | print("The return statement from evaluate_periodic() is not a list of Evaluation() object.") 50 | raise Exception("The return statement from evaluate_periodic() is not a list of Evaluation() object.") 51 | evaluation.import_fields_from_periodic_event(event) 52 | if evaluation.is_valid(): 53 | latest_evaluations.append(evaluation.get_json()) 54 | 55 | if rule.delete_old_evaluations_on_scheduled_notification: 56 | evaluations = clean_up_old_evaluations(event, client_factory, latest_evaluations) 57 | else: 58 | evaluations = latest_evaluations 59 | 60 | return process_evaluations(event, client_factory, evaluations) 61 | 62 | 63 | # This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account). 64 | def clean_up_old_evaluations(event, client_factory, latest_evaluations): 65 | config_client = client_factory.build_client("config") 66 | latest_eval_ids = [] 67 | for latest_eval in latest_evaluations: 68 | latest_eval_ids.append(latest_eval["ComplianceResourceId"]) 69 | 70 | cleaned_evaluations = [] 71 | 72 | old_evals = [] 73 | next_token = "" 74 | while True: 75 | compliance_details = config_client.get_compliance_details_by_config_rule( 76 | ConfigRuleName=event["configRuleName"], 77 | ComplianceTypes=["COMPLIANT", "NON_COMPLIANT"], 78 | Limit=100, 79 | NextToken=next_token, 80 | ) 81 | 82 | old_evals.extend(compliance_details["EvaluationResults"]) 83 | next_token = compliance_details.get("NextToken", "") 84 | if not next_token: 85 | break 86 | 87 | for old_eval in old_evals: 88 | old_resource_id = old_eval["EvaluationResultIdentifier"]["EvaluationResultQualifier"]["ResourceId"] 89 | if old_resource_id not in latest_eval_ids: 90 | eval = Evaluation( 91 | ComplianceType.NOT_APPLICABLE, 92 | old_resource_id, 93 | resourceType=old_eval["EvaluationResultIdentifier"]["EvaluationResultQualifier"]["ResourceType"], 94 | ) 95 | eval.import_fields_from_periodic_event(event) 96 | 97 | cleaned_evaluations.append(eval.get_json()) 98 | 99 | return cleaned_evaluations + latest_evaluations 100 | -------------------------------------------------------------------------------- /rdklib/evaluation.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | # not use this file except in compliance with the License. A copy of the License is located at 5 | # 6 | # http://aws.amazon.com/apache2.0/ 7 | # 8 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for 10 | # the specific language governing permissions and limitations under the License. 11 | 12 | import json 13 | 14 | class ComplianceType: 15 | NOT_APPLICABLE = "NOT_APPLICABLE" 16 | COMPLIANT = "COMPLIANT" 17 | NON_COMPLIANT = "NON_COMPLIANT" 18 | 19 | @staticmethod 20 | def get_valid_compliances(): 21 | return [ComplianceType.NOT_APPLICABLE, ComplianceType.COMPLIANT, ComplianceType.NON_COMPLIANT] 22 | 23 | class Evaluation: 24 | annotation = "" 25 | complianceResourceType = None 26 | complianceType = None 27 | complianceResourceId = None 28 | orderingTimestamp = None 29 | 30 | def __init__(self, complianceType, resourceId=None, resourceType=None, annotation=""): 31 | self.annotation = build_annotation(annotation) 32 | self.complianceResourceId = resourceId 33 | self.complianceResourceType = resourceType 34 | if not complianceType in ComplianceType.get_valid_compliances(): 35 | print('The complianceType is not valid. Valid values include: ComplianceType.COMPLIANT, ComplianceType.COMPLIANT and ComplianceType.NOT_APPLICABLE') 36 | raise Exception('The complianceType is not valid. Valid values include: ComplianceType.COMPLIANT, ComplianceType.COMPLIANT and ComplianceType.NOT_APPLICABLE') 37 | self.complianceType = complianceType 38 | 39 | def __repr__(self): 40 | return f"Evaluation(annotation='{self.annotation}', resourceId='{self.complianceResourceId}', resourceType='{self.complianceResourceType}', complianceType='{self.complianceType}')" 41 | 42 | def __eq__(self, other): 43 | return ( 44 | ( 45 | (self.annotation is None and other.annotation is None) or 46 | (self.annotation == other.annotation) 47 | ) and 48 | self.complianceResourceType == other.complianceResourceType and 49 | self.complianceType == other.complianceType and 50 | self.complianceResourceId == other.complianceResourceId and 51 | self.orderingTimestamp == other.orderingTimestamp) 52 | 53 | # This generate an evaluation for config 54 | def import_fields_from_periodic_event(self, event): 55 | self.orderingTimestamp = str(json.loads(event['invokingEvent'])['notificationCreationTime']) 56 | 57 | def import_fields_from_configuration_item(self, configuration_item): 58 | self.orderingTimestamp = configuration_item['configurationItemCaptureTime'] 59 | if not self.complianceResourceId: 60 | self.complianceResourceId = configuration_item['resourceId'] 61 | if not self.complianceResourceType: 62 | self.complianceResourceType = configuration_item['resourceType'] 63 | 64 | # Check that an evaluation is well-formed 65 | def is_valid(self): 66 | if not self.complianceType: 67 | print('Missing complianceType from an evaluation result.') 68 | raise Exception('Missing complianceType from an evaluation result.') 69 | 70 | if not self.complianceResourceId: 71 | print('Missing complianceResourceId from an evaluation result.') 72 | raise Exception('Missing complianceResourceId from an evaluation result.') 73 | 74 | if not self.complianceResourceType: 75 | print('Missing complianceResourceType from an evaluation result.') 76 | raise Exception('Missing complianceResourceType from an evaluation result.') 77 | 78 | if not self.orderingTimestamp: 79 | print('Missing orderingTimestamp from an evaluation result.') 80 | raise Exception('Missing orderingTimestamp from an evaluation result.') 81 | 82 | return True 83 | 84 | def get_json(self): 85 | output = { 86 | "ComplianceResourceId": self.complianceResourceId, 87 | "ComplianceResourceType": self.complianceResourceType, 88 | "ComplianceType": self.complianceType, 89 | "OrderingTimestamp": self.orderingTimestamp 90 | } 91 | 92 | if self.annotation: 93 | output["Annotation"] = self.annotation 94 | 95 | return output 96 | 97 | # Build annotation within Service constraints 98 | def build_annotation(annotation_string): 99 | if len(annotation_string) > 256: 100 | return annotation_string[:244] + " [truncated]" 101 | return annotation_string 102 | -------------------------------------------------------------------------------- /tst/test/rdklib_configrule_test.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import sys 4 | import unittest 5 | 6 | # Get the absolute path of the current script 7 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 8 | 9 | # Get the absolute path of the project directory 10 | project_dir = os.path.abspath(os.path.join(current_script_dir, "..", "..")) 11 | 12 | # Add the project directory to the Python path 13 | sys.path.append(project_dir) 14 | 15 | CODE = importlib.import_module("rdklib.configrule") 16 | 17 | 18 | class TEST_RULE_EMPTY(CODE.ConfigRule): 19 | pass 20 | 21 | 22 | class TEST_RULE_CHANGED(CODE.ConfigRule): 23 | def evaluate_parameters(self, rule_parameters): 24 | return rule_parameters.strip() 25 | 26 | def evaluate_periodic(self, event, client_factory, valid_rule_parameters): 27 | return "COMPLIANT" 28 | 29 | def evaluate_change(self, event, client_factory, configuration_item, valid_rule_parameters): 30 | return "NON_COMPLIANT" 31 | 32 | def get_execution_role_arn(self, event): 33 | return "Some_ARN" 34 | 35 | 36 | class rdklibConfigRuleTest(unittest.TestCase): 37 | def setUp(self): 38 | pass 39 | 40 | def test_rule_default_behavior(self): 41 | my_empty_rule = TEST_RULE_EMPTY() 42 | with self.assertRaises(CODE.MissingTriggerHandlerError) as context: 43 | my_empty_rule.evaluate_periodic({}, {}, {}) 44 | self.assertTrue( 45 | "You must implement the evaluate_periodic method of the ConfigRule class." in str(context.exception) 46 | ) 47 | 48 | with self.assertRaises(CODE.MissingTriggerHandlerError) as context: 49 | my_empty_rule.evaluate_change({}, {}, {}, {}) 50 | self.assertTrue( 51 | "You must implement the evaluate_change method of the ConfigRule class." in str(context.exception) 52 | ) 53 | 54 | self.assertEqual("no_param", my_empty_rule.evaluate_parameters("no_param")) 55 | 56 | event_param_exec_role = {} 57 | event_param_exec_role["executionRoleArn"] = "aws:arn:account-and-stuff:role/some-role-path" 58 | self.assertEqual( 59 | event_param_exec_role["executionRoleArn"], my_empty_rule.get_execution_role_arn(event_param_exec_role) 60 | ) 61 | 62 | event_param_exec_role = {} 63 | event_param_exec_role["executionRoleArn"] = "aws:arn:account-and-stuff:role/some-role-path" 64 | event_param_exec_role["ruleParameters"] = '{"some_param_key": "value"}' 65 | self.assertEqual( 66 | event_param_exec_role["executionRoleArn"], my_empty_rule.get_execution_role_arn(event_param_exec_role) 67 | ) 68 | 69 | event_param_exec_role = {} 70 | event_param_exec_role["executionRoleArn"] = "aws:arn:account-and-stuff:role/some-role-path" 71 | event_param_exec_role["ruleParameters"] = '{"ExecutionRoleName": "some-role-name"}' 72 | expected_role_arn = "aws:arn:account-and-stuff:role/some-role-name" 73 | self.assertEqual(expected_role_arn, my_empty_rule.get_execution_role_arn(event_param_exec_role)) 74 | 75 | def test_rule_methods_replaced(self): 76 | my_changed_rule = TEST_RULE_CHANGED() 77 | self.assertEqual("param", my_changed_rule.evaluate_parameters(" param ")) 78 | self.assertEqual("COMPLIANT", my_changed_rule.evaluate_periodic({}, {}, {})) 79 | self.assertEqual("NON_COMPLIANT", my_changed_rule.evaluate_change({}, {}, {}, {})) 80 | self.assertEqual("Some_ARN", my_changed_rule.get_execution_role_arn({})) 81 | 82 | def test_get_assume_role_region(self): 83 | """The function: get_assume_role_region() should return appropriate value.""" 84 | my_empty_rule = TEST_RULE_EMPTY() 85 | parameterized = [ 86 | ({}, None), 87 | ({"ruleParameters": "{}"}, None), 88 | ({"ruleParameters": '{"ExecutionRoleRegion": "us-west-2"}'}, "us-west-2"), 89 | ] 90 | for event, expected in parameterized: 91 | with self.subTest(event): 92 | self.assertEqual(my_empty_rule.get_assume_role_region(event), expected) 93 | 94 | def test_get_assume_role_mode(self): 95 | """The function: get_assume_role_mode() should return appropriate value.""" 96 | my_empty_rule = TEST_RULE_EMPTY() 97 | parameterized = [ 98 | ({}, True), 99 | ({"ruleParameters": "{}"}, True), 100 | ({"ruleParameters": '{"AssumeRoleMode": "false"}'}, False), 101 | ({"ruleParameters": '{"AssumeRoleMode": "FALSE"}'}, False), 102 | ({"ruleParameters": '{"AssumeRoleMode": "true"}'}, True), 103 | ({"ruleParameters": '{"AssumeRoleMode": "TRUE"}'}, True), 104 | ] 105 | for event, expected in parameterized: 106 | with self.subTest(event): 107 | self.assertEqual(expected, my_empty_rule.get_assume_role_mode(event)) 108 | -------------------------------------------------------------------------------- /rdklib/evaluator.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | # not use this file except in compliance with the License. A copy of the License is located at 5 | # 6 | # http://aws.amazon.com/apache2.0/ 7 | # 8 | # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for 10 | # the specific language governing permissions and limitations under the License. 11 | 12 | import json 13 | import botocore 14 | from rdklib.util.evaluations import process_event_evaluations_list, process_periodic_evaluations_list 15 | from rdklib.util.service import build_parameters_value_error_response, build_internal_error_response, build_error_response, is_applicable_status, is_internal_error, check_defined, get_configuration_item, inflate_oversized_notification, is_applicable_resource_type 16 | from rdklib.clientfactory import ClientFactory 17 | from rdklib.evaluation import ComplianceType, Evaluation 18 | from rdklib.errors import InvalidParametersError 19 | 20 | class Evaluator: 21 | __rdk_rule = None 22 | __expected_resource_types = None 23 | 24 | def __init__(self, config_rule, expected_resource_types=None, is_applicable_status=False): 25 | self.__rdk_rule = config_rule 26 | self.is_applicable = is_applicable_status 27 | if expected_resource_types is None: 28 | self.__expected_resource_types = [] 29 | else: 30 | self.__expected_resource_types = expected_resource_types 31 | 32 | def handle(self, event, context): 33 | 34 | check_defined(event, 'event') 35 | 36 | client_factory = ClientFactory(role_arn=self.__rdk_rule.get_execution_role_arn(event), region=self.__rdk_rule.get_assume_role_region(event), assume_role_mode=self.__rdk_rule.get_assume_role_mode(event)) 37 | invoking_event = init_event(event, client_factory) 38 | 39 | rule_parameters = {} 40 | if 'ruleParameters' in event: 41 | rule_parameters = json.loads(event['ruleParameters']) 42 | 43 | try: 44 | valid_rule_parameters = self.__rdk_rule.evaluate_parameters(rule_parameters) 45 | except InvalidParametersError as ex: 46 | return build_parameters_value_error_response(ex) 47 | 48 | try: 49 | if invoking_event['messageType'] == 'ScheduledNotification': 50 | compliance_result = self.__rdk_rule.evaluate_periodic(event, client_factory, valid_rule_parameters) 51 | return process_periodic_evaluations_list(event, client_factory, compliance_result, self.__rdk_rule) 52 | if invoking_event['messageType'] in ['ConfigurationItemChangeNotification', 'OversizedConfigurationItemChangeNotification']: 53 | if not self.__expected_resource_types: 54 | raise Exception("Change triggered rules must provide expected resource types") 55 | configuration_item = get_configuration_item(invoking_event) 56 | if is_applicable_status(configuration_item, event, is_applicable=self.is_applicable) and is_applicable_resource_type(configuration_item, self.__expected_resource_types): 57 | compliance_result = self.__rdk_rule.evaluate_change(event, client_factory, configuration_item, valid_rule_parameters) 58 | else: 59 | compliance_result = [Evaluation(ComplianceType.NOT_APPLICABLE)] 60 | return process_event_evaluations_list(event, client_factory, compliance_result, configuration_item) 61 | return build_internal_error_response('Unexpected message type', str(invoking_event)) 62 | except botocore.exceptions.ClientError as ex: 63 | error_code = ex.response['Error']['Code'] 64 | if 'AccessDenied' in error_code or 'UnauthorizedOperation' in error_code: 65 | return build_error_response( 66 | "Insufficient access to perform this action.", str(ex), 67 | ex.response['Error']['Code'], 68 | ex.response['Error']['Message']) 69 | if is_internal_error(ex): 70 | return build_internal_error_response("Unexpected error while completing API request", str(ex)) 71 | return build_error_response("Customer error while making API request", str(ex), ex.response['Error']['Code'], ex.response['Error']['Message']) 72 | except ValueError as ex: 73 | return build_internal_error_response(str(ex), str(ex)) 74 | 75 | def init_event(event, client_factory): 76 | invoking_event = json.loads(event['invokingEvent']) 77 | if not invoking_event['messageType'] == 'OversizedConfigurationItemChangeNotification': 78 | return invoking_event 79 | 80 | config_client = client_factory.build_client('config') 81 | change_notification = inflate_oversized_notification(config_client, invoking_event) 82 | return change_notification 83 | -------------------------------------------------------------------------------- /tst/test/rdklib_clientfactory_test.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import os 3 | import sys 4 | import unittest 5 | from unittest.mock import MagicMock, patch 6 | 7 | import botocore 8 | 9 | # Get the absolute path of the current script 10 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 11 | 12 | # Get the absolute path of the project directory 13 | project_dir = os.path.abspath(os.path.join(current_script_dir, "..", "..")) 14 | 15 | # Add the project directory to the Python path 16 | sys.path.append(project_dir) 17 | 18 | CODE = importlib.import_module("rdklib.clientfactory") 19 | 20 | STS_CLIENT_MOCK = MagicMock() 21 | OTHER_CLIENT_MOCK = MagicMock() 22 | 23 | 24 | def client(client_name, *args, **kwargs): 25 | if client_name == "sts": 26 | return STS_CLIENT_MOCK 27 | return OTHER_CLIENT_MOCK 28 | 29 | 30 | def credentials(role_arn, region): 31 | return {"AccessKeyId": "some-key-id", "SecretAccessKey": "some-secret", "SessionToken": "some-token"} 32 | 33 | 34 | @patch.object(CODE.boto3, "client", MagicMock(side_effect=client)) 35 | class rdklibClientFactoryTest(unittest.TestCase): 36 | @patch.object(CODE, "get_assume_role_credentials", MagicMock(side_effect=credentials)) 37 | def test_clientfactory_build_client(self): 38 | # Init values 39 | client_factory = CODE.ClientFactory("arn:aws:iam:::role/some-role-name") 40 | self.assertEqual(client_factory.__dict__["_ClientFactory__role_arn"], "arn:aws:iam:::role/some-role-name") 41 | 42 | # init with region 43 | client_factory = CODE.ClientFactory("arn:aws:iam:::role/some-role-name", "some-region") 44 | self.assertEqual(client_factory.__dict__["_ClientFactory__role_arn"], "arn:aws:iam:::role/some-role-name") 45 | self.assertEqual(client_factory.__dict__["_ClientFactory__region"], "some-region") 46 | 47 | # No role arn error 48 | client_factory.__dict__["_ClientFactory__role_arn"] = None 49 | with self.assertRaises(Exception) as context: 50 | client_factory.build_client("other") 51 | self.assertTrue( 52 | "No Role ARN - ClientFactory must be initialized with a role_arn or set assume_role_mode to False before build_client is called." 53 | in str(context.exception) 54 | ) 55 | 56 | # No creds already 57 | client_factory.__dict__["_ClientFactory__role_arn"] = "arn:aws:iam:::role/some-role-name" 58 | client_factory.__dict__["_ClientFactory__region"] = "some_region" 59 | response = client_factory.build_client("other") 60 | self.assertEqual(response, OTHER_CLIENT_MOCK) 61 | 62 | # Creds already 63 | other_creds = { 64 | "AccessKeyId": "some-other-key-id", 65 | "SecretAccessKey": "some-other-secret", 66 | "SessionToken": "some-other-token", 67 | } 68 | client_factory.__dict__["_ClientFactory__sts_credentials"] = other_creds 69 | client_factory.build_client("other") 70 | self.assertDictEqual(client_factory.__dict__["_ClientFactory__sts_credentials"], other_creds) 71 | 72 | # disable assume role mode 73 | client_factory = CODE.ClientFactory( 74 | role_arn="arn:aws:iam:::role/some-role-name", region="some-region", assume_role_mode=False 75 | ) 76 | self.assertEqual(client_factory.__dict__["_ClientFactory__assume_role_mode"], False) 77 | 78 | def test_get_assume_role_credentials(self): 79 | STS_CLIENT_MOCK.assume_role.return_value = {"Credentials": "some-creds"} 80 | response = CODE.get_assume_role_credentials("arn:aws:iam:::role/some-role-name", "some-region") 81 | self.assertEqual(response, "some-creds") 82 | 83 | STS_CLIENT_MOCK.assume_role.side_effect = botocore.exceptions.ClientError( 84 | {"Error": {"Code": "AccessDenied", "Message": "access-denied"}}, "operation" 85 | ) 86 | with self.assertRaises(botocore.exceptions.ClientError) as context: 87 | CODE.get_assume_role_credentials("arn:aws:iam:::role/some-role-name", "some-region") 88 | self.assertTrue("AccessDenied" in str(context.exception.response["Error"]["Code"])) 89 | self.assertTrue( 90 | "AWS Config does not have permission to assume the IAM role." 91 | in str(context.exception.response["Error"]["Message"]) 92 | ) 93 | 94 | STS_CLIENT_MOCK.assume_role.side_effect = botocore.exceptions.ClientError( 95 | {"Error": {"Code": "Some-other-error", "Message": "Some-other-error"}}, "operation" 96 | ) 97 | with self.assertRaises(botocore.exceptions.ClientError) as context: 98 | CODE.get_assume_role_credentials("arn:aws:iam:::role/some-role-name", "some-region") 99 | self.assertDictEqual( 100 | context.exception.response, {"Error": {"Code": "InternalError", "Message": "InternalError"}} 101 | ) 102 | 103 | def test_when_not_assume_role_mode_init(self): 104 | """ClientFactory should return the client assume role mode disabled.""" 105 | client_factory = CODE.ClientFactory( 106 | role_arn="arn:aws:iam:::role/some-role-name", 107 | region="some-region", 108 | assume_role_mode=False, 109 | ) 110 | response = client_factory.build_client("other") 111 | self.assertNotEqual(response, STS_CLIENT_MOCK) 112 | self.assertEqual(response, OTHER_CLIENT_MOCK) 113 | 114 | def test_when_not_assume_role_mode_call(self): 115 | """ClientFactory should return the client assume role mode disabled.""" 116 | client_factory = CODE.ClientFactory( 117 | role_arn="arn:aws:iam:::role/some-role-name", 118 | region="some-region", 119 | ) 120 | response = client_factory.build_client("other", assume_role_mode=False) 121 | self.assertNotEqual(response, STS_CLIENT_MOCK) 122 | self.assertEqual(response, OTHER_CLIENT_MOCK) 123 | -------------------------------------------------------------------------------- /rdklib/util/service.py: -------------------------------------------------------------------------------- 1 | import json 2 | import botocore 3 | 4 | 5 | # Helper function used to validate input 6 | def check_defined(reference, reference_name): 7 | if not reference: 8 | raise Exception("Error: " + reference_name + " is not defined.") 9 | return reference 10 | 11 | 12 | # Check whether the resource has been deleted. If it has, then the evaluation is unnecessary. 13 | def is_applicable_status(configuration_item, event, **kwargs): 14 | status = configuration_item["configurationItemStatus"] 15 | is_applicable = kwargs.get("is_applicable", None) 16 | event_left_scope = event["eventLeftScope"] 17 | if is_applicable: 18 | return True 19 | if status in ("ResourceDeleted", "ResourceDeletedNotRecorded", "ResourceNotRecorded"): 20 | print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.") 21 | return status in ("OK", "ResourceDiscovered") and not event_left_scope 22 | 23 | 24 | # Check whether the resource type for the CI is in scope for the rule - if not, we can skip evaluation 25 | def is_applicable_resource_type(configuration_item, expected_resource_types): 26 | if configuration_item["resourceType"] not in expected_resource_types: 27 | print("ResourceType is not in expected resource types") 28 | return configuration_item["resourceType"] in expected_resource_types 29 | 30 | 31 | # Based on the type of message get the configuration item 32 | # either from configurationItem in the invoking event 33 | # or using the getResourceConfigHistiry API in getConfiguration function. 34 | def get_configuration_item(invoking_event): 35 | check_defined(invoking_event, "invokingEvent") 36 | return check_defined(invoking_event["configurationItem"], "configurationItem") 37 | 38 | 39 | def is_internal_error(exception): 40 | return ( 41 | not isinstance(exception, botocore.exceptions.ClientError) 42 | or exception.response["Error"]["Code"].startswith("5") 43 | or "InternalError" in exception.response["Error"]["Code"] 44 | or "ServiceError" in exception.response["Error"]["Code"] 45 | ) 46 | 47 | 48 | def build_internal_error_response(internal_error_message, internal_error_details=None): 49 | return build_error_response(internal_error_message, internal_error_details) 50 | 51 | 52 | def build_error_response( 53 | internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None 54 | ): 55 | error_response = { 56 | "internalErrorMessage": internal_error_message, 57 | "internalErrorDetails": internal_error_details, 58 | "customerErrorMessage": customer_error_message, 59 | "customerErrorCode": customer_error_code, 60 | } 61 | print(error_response) 62 | return error_response 63 | 64 | 65 | # Build an error to be displayed in the logs when the parameter is invalid. 66 | def build_parameters_value_error_response(ex): 67 | """Return an error dictionary when the evaluate_parameters() raises a ValueError. 68 | 69 | Keyword arguments: 70 | ex -- Exception text 71 | """ 72 | return build_error_response( 73 | internal_error_message="Parameter value is invalid", 74 | internal_error_details="A ValueError was raised during the validation of the Parameter value", 75 | customer_error_code="InvalidParameterValueException", 76 | customer_error_message=str(ex), 77 | ) 78 | 79 | 80 | # Check whether the message is OversizedConfigurationItemChangeNotification or not 81 | def is_oversized_changed_notification(message_type): 82 | check_defined(message_type, "messageType") 83 | return message_type == "OversizedConfigurationItemChangeNotification" 84 | 85 | 86 | # Check whether the message is a ScheduledNotification or not. 87 | def is_scheduled_notification(message_type): 88 | check_defined(message_type, "messageType") 89 | return message_type == "ScheduledNotification" 90 | 91 | 92 | def inflate_oversized_notification(config_client, invoking_event): 93 | grh_response = get_resource_config_history(config_client, invoking_event) 94 | config_item = convert_into_notification_config_item(grh_response["configurationItems"][0]) 95 | return { 96 | "configurationItem": config_item, 97 | "notificationCreationTime": invoking_event["notificationCreationTime"], 98 | "messageType": invoking_event["messageType"], 99 | "recordVersion": invoking_event["recordVersion"], 100 | } 101 | 102 | 103 | def get_resource_config_history(config_client, invoking_event): 104 | resource_id = invoking_event["configurationItemSummary"]["resourceId"] 105 | resource_type = invoking_event["configurationItemSummary"]["resourceType"] 106 | return config_client.get_resource_config_history(resourceType=resource_type, resourceId=resource_id, limit=1) 107 | 108 | 109 | def convert_into_notification_config_item(grh_config_item): 110 | return { 111 | "configurationItemCaptureTime": grh_config_item.get("configurationItemCaptureTime"), 112 | "configurationStateId": grh_config_item.get("configurationStateId"), 113 | "awsAccountId": grh_config_item.get("accountId"), 114 | "configurationItemStatus": grh_config_item.get("configurationItemStatus"), 115 | "resourceType": grh_config_item.get("resourceType"), 116 | "resourceId": grh_config_item.get("resourceId"), 117 | "resourceName": grh_config_item.get( 118 | "resourceName", 119 | grh_config_item.get("resourceId"), 120 | ), 121 | "ARN": grh_config_item.get("arn"), 122 | "awsRegion": grh_config_item.get("awsRegion"), 123 | "availabilityZone": grh_config_item.get( 124 | "availabilityZone", 125 | "", 126 | ), 127 | "configurationStateMd5Hash": grh_config_item.get("configurationItemMD5Hash"), 128 | "resourceCreationTime": grh_config_item.get("resourceCreationTime"), 129 | "relatedEvents": grh_config_item.get("relatedEvents"), 130 | "tags": grh_config_item.get("tags"), 131 | "relationships": extract_relationships( 132 | grh_config_item.get("relationships"), 133 | ), 134 | "configuration": json.loads( 135 | grh_config_item.get("configuration", {}), 136 | ), 137 | "supplementaryConfiguration": extract_supplementary_configuration( 138 | grh_config_item.get("supplementaryConfiguration", {}) 139 | ), 140 | } 141 | 142 | 143 | def extract_supplementary_configuration(grh_supplementary_configuration): 144 | return {key: json.loads(value) for key, value in grh_supplementary_configuration.items()} 145 | 146 | 147 | def extract_relationships(grh_relationships): 148 | return [ 149 | { 150 | "name": relationship.get("relationshipName"), 151 | "resourceId": relationship.get("resourceId"), 152 | "resourceName": relationship.get("resourceName"), 153 | "resourceType": relationship.get("resourceType"), 154 | } 155 | for relationship in grh_relationships 156 | ] 157 | -------------------------------------------------------------------------------- /docs/rdklib_add_on_to_RDK_Workshop.md: -------------------------------------------------------------------------------- 1 | # RDKlib Add-On to RDK Workshop 2 | 3 | The [RDK Workshop](https://github.com/awslabs/aws-config-rdk/blob/master/rdk-workshop/instructions.md) had the objective to show how a customer can create their own rules (Custom Config Rule) to detect violations on AWS resources and how they can remediate once the rule is violated. 4 | 5 | The **RDKlib Add-On** has the objective to take the same example, MFA_ENABLED_RULE created in the **RDK Workshop**, and provide guidance on how to accomplish the solution using the [RDKLib](https://github.com/awslabs/aws-config-rdklib) Python library to enable you to **run custom AWS Config Rules at scale**. The library can be used to: 6 | 7 | - Help you to focus only on the compliance logic, while the library does the heavy lifting 8 | - Ease maintenance by moving the boilerplate code as a AWS Lambda Layer 9 | - Ease deployment by using AWS Serverless Application Repository 10 | 11 | ## PreRequisites 12 | 13 | - Assumes you have knowledge in using [RDK](https://github.com/awslabs/aws-config-rdk), an AWS Config Rules Development Kit that helps developers setup, author, and test custom Config Rules. 14 | - Assumes you have successfully completed the [RDK Workshop](https://github.com/awslabs/aws-config-rdk/blob/master/rdk-workshop/instructions.md) 15 | 16 | ## Task 1: Install RDKlib locally 17 | 18 | ```bash 19 | pip install rdklib 20 | ``` 21 | 22 | ## Task 2: Create Rule using RDK with RDKlib 23 | 24 | - Periodic Trigger Example: 25 | 26 | ```bash 27 | rdk create RULE_NAME --runtime python3.11-lib --maximum-frequency TwentyFour_Hours 28 | ``` 29 | 30 | - Configuration Change Trigger Example (for IAM User) 31 | 32 | ```bash 33 | rdk create YOUR_RULE_NAME --runtime python3.11-lib --resource-types AWS::IAM::User 34 | ``` 35 | 36 | ## Task 3: Write the Code 37 | 38 | - Write Code for MFA_ENABLED_RULEs used in [RDK Workshop](https://github.com/awslabs/aws-config-rdk/blob/master/rdk-workshop/instructions.md) examples to implement the RDKlib. 39 | - For basic solutions to this exercise, scroll to the end. 40 | 41 | ## Task 4: Install RDKlib layer in AWS 42 | 43 | RDKLib is designed to work as a AWS Lambda Layer. It allows you to use the library without needing to include it in your deployment package. 44 | 45 | ### Install via AWS CLI 46 | 47 | 1. Create CloudFormation ChangeSet for RDKlib-Layer 48 | 49 | ```bash 50 | aws serverlessrepo create-cloud-formation-change-set --application-id arn:aws:serverlessrepo:ap-southeast-1:711761543063:applications/rdklib --stack-name RDKlib-Layer 51 | ``` 52 | 53 | 1. Copy/Paste the full change-set ARN to customize the following command and Execute ChangeSet 54 | 55 | ```bash 56 | aws cloudformation execute-change-set --change-set-name NAME_OF_THE_CHANGE_SET 57 | ``` 58 | 59 | 1. Describe CloudFormation Stack 60 | 61 | ```bash 62 | aws cloudformation describe-stack-resources --stack-name serverlessrepo-RDKlib-Layer 63 | ``` 64 | 65 | 1. Take Note of the **PhysicalResourceId** as that is the ARN for the **RDKlib Layer** 66 | 67 | ### Install via AWS Console 68 | 69 | 1. Create Function from Serverless App Repository. Click [here](https://console.aws.amazon.com/lambda/home#/create/application?tab=serverlessApps) 70 | 1. Search for **rdklib** 71 | 1. Deploy 72 | 1. Take Note of the **PhysicalResourceId** as that is the ARN for the **RDKlib Layer** 73 | - CloudFormation -> Stacks -> **serverlessrepo-rdklib** -> Resources 74 | 75 | ## Task 5: Provide Role to Assume for Lambda Functions 76 | 77 | - By default, the Lambda Functions will try to AssumeRole of the AWSServiceConfigRole, which is not allowed. 78 | - You need to provide the Role to Assume for the Lambda Function when its running the code logic you implemented for MFA_ENABLED_RULE. 79 | - This is done by updating the input parameter for **ExecutionRoleName**, and providing the Role Name. 80 | 81 | ```bash 82 | rdk modify MFA_ENABLED_RULE -i '{"ExecutionRoleName":"ExampleRole"}' 83 | ``` 84 | 85 | - If using the same Lambda Role that was created by rdk, it will look like this: 86 | 87 | ```bash 88 | rdk modify MFA_ENABLED_RULE -i '{"ExecutionRoleName":"rdk/MFAENABLEDRULEconfigchangesrdklib-rdkLambdaRole-R0W9ZV90V0HV"}' 89 | ``` 90 | 91 | ## Task 6: Deploy the Rule 92 | 93 | ```bash 94 | rdk deploy YOUR_RULE_NAME --rdklib-layer-arn YOUR_RDKLIB_LAYER_ARN 95 | ``` 96 | 97 | ## Solutions - MFA_ENABLED_RULE Example with RDKlib 98 | 99 | ### Triggers on Configuration Change (AWS::IAM::User) 100 | 101 | ```python 102 | def evaluate_change(self, event, client_factory, configuration_item, valid_rule_parameters): 103 | 104 | username = configuration_item.get("resourceName") 105 | 106 | iam_client = client_factory.build_client("iam") 107 | 108 | response = iam_client.list_mfa_devices(UserName=username) 109 | 110 | # IAM user has MFA enabled. 111 | if response["MFADevices"]: 112 | return [Evaluation(ComplianceType.COMPLIANT)] 113 | 114 | # IAM user has MFA disabled. 115 | return [Evaluation(ComplianceType.NON_COMPLIANT, annotation="MFA needs to be enabled for user")] 116 | ``` 117 | 118 | ### Triggers Periodic (without Pagination) 119 | 120 | ```python 121 | def evaluate_periodic(self, event, client_factory, valid_rule_parameters): 122 | evaluations = [] 123 | 124 | iam_client = client_factory.build_client("iam") 125 | 126 | response = iam_client.list_users() 127 | 128 | for user in response["Users"]: 129 | username = user["UserName"] 130 | response = iam_client.list_mfa_devices(UserName=username) 131 | 132 | # IAM user has MFA enabled. 133 | if response["MFADevices"]: 134 | evaluations.append(Evaluation(ComplianceType.COMPLIANT, username, "AWS::IAM::User")) 135 | 136 | # IAM user has MFA disabled. 137 | if not response["MFADevices"]: 138 | annotation = "MFA needs to be enabled for user." 139 | evaluations.append( 140 | Evaluation(ComplianceType.NON_COMPLIANT, username, "AWS::IAM::User", annotation=annotation) 141 | ) 142 | return evaluations 143 | ``` 144 | 145 | ### Triggers Periodic (with Pagination) 146 | 147 | ```python 148 | def evaluate_periodic(self, event, client_factory, valid_rule_parameters): 149 | 150 | evaluations = [] 151 | 152 | iam_client = client_factory.build_client("iam") 153 | 154 | paginator = iam_client.get_paginator("list_users") 155 | response_iterator = paginator.paginate() 156 | 157 | for response in response_iterator: 158 | for user in response["Users"]: 159 | username = user["UserName"] 160 | response = iam_client.list_mfa_devices(UserName=username) 161 | 162 | # IAM user has MFA enabled. 163 | if response["MFADevices"]: 164 | evaluations.append(Evaluation(ComplianceType.COMPLIANT, username, "AWS::IAM::User")) 165 | # IAM user has MFA disabled. 166 | if not response["MFADevices"]: 167 | annotation = "MFA needs to be enabled for user." 168 | evaluations.append( 169 | Evaluation(ComplianceType.NON_COMPLIANT, username, "AWS::IAM::User", annotation=annotation) 170 | ) 171 | return evaluations 172 | ``` 173 | -------------------------------------------------------------------------------- /docs/developer_notes.md: -------------------------------------------------------------------------------- 1 | # Dev Guide 2 | 3 | ## _class_ **ClientFactory** 4 | 5 | _method_ **build_client()** 6 | 7 | Create or reuse a boto3 client. It minimizes the number of STS calls 8 | by reusing existing client, if already available. 9 | 10 | **Request Syntax** 11 | 12 | ```python 13 | response = client_factory.build_client( 14 | service='string', region='string', assume_role_mode='bool') 15 | ``` 16 | 17 | **Parameters** 18 | 19 | - **service** _(string)_ \-- **\[REQUIRED\]** 20 | 21 | The boto3 name of the AWS service 22 | 23 | - **region** _(string)_ \-- **\[OPTIONAL\]** 24 | 25 | Default: None The boto3 region 26 | 27 | - **assume_role_mode** _(string)_ \-- **\[OPTIONAL\]** 28 | 29 | Default: True By Default, ClientFactory is using AWS Config 30 | Role, which is comming from Config Rule event. 31 | 32 | 1. User can disable the assume_role_mode by setting it to False 33 | or set `AssumeRoleMode` to False in Config Rules 34 | Parameter. ClientFactory will then use the attached lambda 35 | role for the execution. 36 | 2. User also can specify a custom role in Config Rules 37 | Parameter with `ExecutionRoleName` as well as 38 | `ExecutionRoleRegion` for ClientFactory 39 | 40 | ## _class_ **ConfigRule** 41 | 42 | _method_ **evaluate_parameters()** 43 | 44 | Used to analyze the validity of the input parameters of the Config 45 | Rule. 46 | 47 | **Parameter** 48 | 49 | - **rule_parameters** _(dict)_ 50 | 51 | The input parameters of the Config Rule. 52 | 53 | **Return Syntax** 54 | 55 | If one of the parameters is invalid, raise an 56 | InvalidParametersError error. 57 | 58 | ```python 59 | from rdklib import InvalidParametersError 60 | raise InvalidParametersError("Error message to display") 61 | ``` 62 | 63 | If the parameters are all valid, return a dict. 64 | 65 | ```python 66 | return valid_rule_parameters 67 | ``` 68 | 69 | _method_ **evaluate_change()** 70 | 71 | Used to evaluate Configuration Change triggered rule. 72 | 73 | **Parameters** 74 | 75 | - **event** 76 | 77 | Lambda event provided by Config. 78 | 79 | - **client_factory** _(ClientFactory)_ 80 | 81 | _ClientFactory_ object to be used in this rule. 82 | 83 | - **configuration_item** _(dict)_ 84 | 85 | The full configuration Item, even if oversized. 86 | 87 | - **valid_rule_parameters** _(dict)_ 88 | 89 | The output of the evaluate_parameters() method. 90 | 91 | **Return Syntax** 92 | 93 | Return an list of _Evaluation_ object(s). 94 | 95 | ```python 96 | return [Evaluation()] 97 | ``` 98 | 99 | It can be an empty list, if no evaluation. 100 | 101 | _method_ **evaluate_periodic()** 102 | 103 | Used to evaluate Periodic triggered rule. 104 | 105 | **Parameters** 106 | 107 | - **event** 108 | 109 | Lambda event provided by Config. 110 | 111 | - **client_factory** _(ClientFactory)_ 112 | 113 | _ClientFactory_ object to be used in this rule. 114 | 115 | - **valid_rule_parameters** _(dict)_ 116 | 117 | The output of the evaluate_parameters() method. 118 | 119 | **Return Syntax** 120 | 121 | Return an list of _Evaluation_ object(s). 122 | 123 | ```python 124 | return [Evaluation()] 125 | ``` 126 | 127 | It can be an empty list, if no evaluation. 128 | 129 | ## _class_ **Evaluation** 130 | 131 | Class for the _Evaluation_ object. 132 | 133 | **Request Syntax** 134 | 135 | ```python 136 | evaluation = Evaluation( 137 | complianceType='ComplianceType', 138 | resourceId='string', 139 | resourceType='string', 140 | annotation='string') 141 | ``` 142 | 143 | **Parameter** 144 | 145 | - **complianceType** _(ComplianceType)_ **\[REQUIRED\]** 146 | 147 | Compliance type of the evaluation. 148 | 149 | - **resourceId** _(string)_ 150 | 151 | Resource id of the evaluation. It gets autopopulated for 152 | Configuration Change triggered rule. 153 | 154 | - **resourceType** _(string)_ 155 | 156 | Resource type of the evaluation (as per AWS CloudFormation 157 | definition). It gets autopopulated for Configuration Change 158 | triggered rule. 159 | 160 | - **annotation** _(string)_ 161 | 162 | Annotation for the evaluation. It gets shorten to 255 characters 163 | automatically. 164 | 165 | ## _class_ **ComplianceType** 166 | 167 | Class for the _ComplianceType_ object. 168 | 169 | **Request Syntax** 170 | 171 | Evaluation will display as \"Compliant\" 172 | 173 | ```python 174 | compliance_type = ComplianceType.COMPLIANT 175 | ``` 176 | 177 | Evaluation will display as \"Non Compliant\" 178 | 179 | ```python 180 | compliance_type = ComplianceType.NON_COMPLIANT 181 | ``` 182 | 183 | Evaluation will not display: 184 | 185 | ```python 186 | compliance_type = ComplianceType.NOT_APPLICABLE 187 | ``` 188 | 189 | ## _Helper functions_ 190 | 191 | **rdklibtest** 192 | 193 | _assert_successful_evaluation(\*\*kwargs)_ 194 | 195 | Do a comparison on the list of _Evaluation_ objects returned by 196 | either _evaluate_change()_ or _evaluate_periodic()_. 197 | 198 | **Request Syntax** 199 | 200 | ```python 201 | rdklibtest.assert_successful_evaluation(self, response, resp_expected, evaluations_count=1) 202 | ``` 203 | 204 | **Parameters** 205 | 206 | - response (list of Evaluation Objects) **\[REQUIRED\]** 207 | 208 | The list of the response from _evaluate_change()_ or _evaluate_periodic()_ 209 | 210 | - resp_expected (list of Evaluation Objects) **\[REQUIRED\]** 211 | 212 | The list of the expected response from _evaluate_change()_ or _evaluate_periodic()_ 213 | 214 | - evaluations_count (int) 215 | 216 | The number of Evaluation Objects expected. Default is 1. 217 | 218 | **Return** 219 | 220 | None 221 | 222 | **_create_test_configurationchange_event(\*\*kwargs)_** 223 | 224 | Generate a dummy configuration change event that can be used as 225 | input when testing `_evaluate_change()_` 226 | 227 | **Request Syntax** 228 | 229 | ```python 230 | rdklibtest.create_test_configurationchange_event(invoking_event_json, rule_parameters_json=None) 231 | ``` 232 | 233 | Parameters 234 | 235 | - invoking_event (dict) **\[REQUIRED\]** 236 | 237 | the invoking event json from Config 238 | 239 | - rule_parameters_json (dict) 240 | 241 | the key/value pair(s) for the Rule parameters. Default to None. 242 | 243 | **Return Syntax** 244 | 245 | ```python 246 | { 247 | "configRuleName":"myrule", 248 | "executionRoleArn":"arn:aws:iam::123456789012:role/example", 249 | "eventLeftScope": False, 250 | "invokingEvent": json.dumps(invoking_event_json), 251 | "accountId": "123456789012", 252 | "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", 253 | "resultToken":"token", 254 | "ruleParameters": json.dumps(rule_parameters_json) 255 | } 256 | ``` 257 | 258 | **_create_test_scheduled_event(\*\*kwargs)_** 259 | 260 | Generate a dummy periodic event that can be used as input when 261 | testing `_evaluate_periodic()_` 262 | 263 | **Request Syntax** 264 | 265 | ```python 266 | rdklibtest.create_test_scheduled_event(rule_parameters_json=None) 267 | ``` 268 | 269 | **Parameter** 270 | 271 | - rule_parameters_json (dict) 272 | 273 | the key/value pair(s) for the Rule parameters. Default to None. 274 | 275 | **Return Syntax** 276 | 277 | ```python 278 | { 279 | "configRuleName":"myrule", 280 | "executionRoleArn":"arn:aws:iam::123456789012:role/example", 281 | "eventLeftScope": False, 282 | "invokingEvent": "{\"messageType\": \"ScheduledNotification\", \"notificationCreationTime\": \"2017-12-23T22:11:18.158Z\"}", 283 | "accountId": "123456789012", 284 | "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", 285 | "resultToken":"token", 286 | "ruleParameters": json.dumps(rule_parameters_json) 287 | } 288 | ``` 289 | -------------------------------------------------------------------------------- /tst/test/rdklibtest_test_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import unittest 3 | from rdklib.evaluation import Evaluation, ComplianceType 4 | 5 | import importlib 6 | 7 | import sys 8 | import os 9 | 10 | # Get the absolute path of the current script 11 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 12 | 13 | # Get the absolute path of the project directory 14 | project_dir = os.path.abspath(os.path.join(current_script_dir, "..", "..")) 15 | 16 | # Add the project directory to the Python path 17 | sys.path.append(project_dir) 18 | 19 | CODE = importlib.import_module("rdklibtest.test") 20 | 21 | 22 | class rdklibtestTest(unittest.TestCase): 23 | def test_assert_successful_evaluation(self): 24 | response = [Evaluation(ComplianceType.COMPLIANT, "some-resource-id", "some-resource-type", "some-annotation")] 25 | response_two = [ 26 | Evaluation(ComplianceType.NON_COMPLIANT, "some-resource-id", "some-resource-type", "some-annotation") 27 | ] 28 | response_combine = response + response_two 29 | 30 | # AssertionError due to 2 different lenghts of response and resp_expected 31 | with self.assertRaises(AssertionError) as context: 32 | CODE.assert_successful_evaluation(self, response, response_combine) 33 | self.assertTrue("1 != 2" in str(context.exception)) 34 | 35 | # AssertionError due to evaluation_count default 36 | with self.assertRaises(AssertionError) as context: 37 | CODE.assert_successful_evaluation(self, response_combine, response_combine) 38 | self.assertTrue("2 != 1" in str(context.exception)) 39 | 40 | # AssertionError due to evaluation_count incorrect 41 | with self.assertRaises(AssertionError) as context: 42 | CODE.assert_successful_evaluation(self, response, response, 2) 43 | self.assertTrue("1 != 2" in str(context.exception)) 44 | 45 | # Passing test for several evaluations 46 | CODE.assert_successful_evaluation(self, response, response) 47 | CODE.assert_successful_evaluation(self, response_combine, response_combine, 2) 48 | 49 | def test_assert_customer_error_response(self): 50 | error_message_no_code = { 51 | "internalErrorMessage": "some-internal-error-msg", 52 | "internalErrorDetails": "some-internal-error-details", 53 | "customerErrorCode": "", 54 | } 55 | error_message_no_msg = { 56 | "internalErrorMessage": "some-internal-error-msg", 57 | "internalErrorDetails": "some-internal-error-details", 58 | "customerErrorMessage": "", 59 | "customerErrorCode": "some-external-error-code", 60 | } 61 | error_message = { 62 | "internalErrorMessage": "some-internal-error-msg", 63 | "internalErrorDetails": "some-internal-error-details", 64 | "customerErrorMessage": "some-external-error-msg", 65 | "customerErrorCode": "some-external-error-code", 66 | } 67 | # Pass test 68 | CODE.assert_customer_error_response(self, error_message) 69 | 70 | # Customer code given 71 | CODE.assert_customer_error_response(self, error_message, "some-external-error-code") 72 | 73 | with self.assertRaises(AssertionError) as context: 74 | CODE.assert_customer_error_response(self, error_message, "some-code") 75 | self.assertTrue("'some-code' != 'some-external-error-code'" in str(context.exception)) 76 | 77 | # Customer code not given, checking if present and not empty 78 | with self.assertRaises(AssertionError) as context: 79 | CODE.assert_customer_error_response(self, error_message_no_code) 80 | self.assertTrue("'' is not true" in str(context.exception)) 81 | 82 | # Customer msg given 83 | CODE.assert_customer_error_response(self, error_message, customer_error_message="some-external-error-msg") 84 | 85 | with self.assertRaises(AssertionError) as context: 86 | CODE.assert_customer_error_response(self, error_message, customer_error_message="some-msg") 87 | self.assertTrue("'some-msg' != 'some-external-error-msg'" in str(context.exception)) 88 | 89 | # Customer msg not given, checking if present and not empty 90 | with self.assertRaises(AssertionError) as context: 91 | CODE.assert_customer_error_response(self, error_message_no_msg) 92 | self.assertTrue("'' is not true" in str(context.exception)) 93 | 94 | # Internal message, checking if not empty 95 | error_message_msg_empty = { 96 | "internalErrorMessage": "", 97 | "customerErrorMessage": "some-external-error-msg", 98 | "customerErrorCode": "some-external-error-code", 99 | } 100 | with self.assertRaises(AssertionError) as context: 101 | CODE.assert_customer_error_response(self, error_message_msg_empty) 102 | self.assertTrue("'' is not true" in str(context.exception)) 103 | 104 | # Internal details, checking if not empty 105 | error_message_detail_empty = { 106 | "internalErrorMessage": "some-internal-error-msg", 107 | "internalErrorDetails": "", 108 | "customerErrorMessage": "some-external-error-msg", 109 | "customerErrorCode": "some-external-error-code", 110 | } 111 | with self.assertRaises(AssertionError) as context: 112 | CODE.assert_customer_error_response(self, error_message_detail_empty) 113 | self.assertTrue("'' is not true" in str(context.exception)) 114 | 115 | def test_create_test_configurationchange_event(self): 116 | invoking_event = {"event": "my_event"} 117 | expected_event_no_param = { 118 | "configRuleName": "myrule", 119 | "executionRoleArn": "arn:aws:iam::123456789012:role/example", 120 | "eventLeftScope": False, 121 | "invokingEvent": '{"event": "my_event"}', 122 | "accountId": "123456789012", 123 | "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", 124 | "resultToken": "token", 125 | } 126 | self.assertDictEqual(CODE.create_test_configurationchange_event(invoking_event), expected_event_no_param) 127 | 128 | expected_event_param = { 129 | "configRuleName": "myrule", 130 | "executionRoleArn": "arn:aws:iam::123456789012:role/example", 131 | "eventLeftScope": False, 132 | "invokingEvent": '{"event": "my_event"}', 133 | "accountId": "123456789012", 134 | "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", 135 | "resultToken": "token", 136 | "ruleParameters": '{"somekeyparam": "somevalueparam"}', 137 | } 138 | parameter = {"somekeyparam": "somevalueparam"} 139 | self.assertDictEqual( 140 | CODE.create_test_configurationchange_event(invoking_event, parameter), expected_event_param 141 | ) 142 | 143 | def test_create_test_scheduled_event(self): 144 | invoking_event = { 145 | "messageType": "ScheduledNotification", 146 | "notificationCreationTime": "2017-12-23T22:11:18.158Z", 147 | } 148 | 149 | expected_event_no_param = { 150 | "configRuleName": "myrule", 151 | "executionRoleArn": "arn:aws:iam::123456789012:role/example", 152 | "eventLeftScope": False, 153 | "invokingEvent": json.dumps(invoking_event), 154 | "accountId": "123456789012", 155 | "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", 156 | "resultToken": "token", 157 | } 158 | self.assertDictEqual(CODE.create_test_scheduled_event(), expected_event_no_param) 159 | 160 | expected_event_param = { 161 | "configRuleName": "myrule", 162 | "executionRoleArn": "arn:aws:iam::123456789012:role/example", 163 | "eventLeftScope": False, 164 | "invokingEvent": json.dumps(invoking_event), 165 | "accountId": "123456789012", 166 | "configRuleArn": "arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan", 167 | "resultToken": "token", 168 | "ruleParameters": '{"somekeyparam": "somevalueparam"}', 169 | } 170 | parameter = {"somekeyparam": "somevalueparam"} 171 | self.assertDictEqual(CODE.create_test_scheduled_event(parameter), expected_event_param) 172 | -------------------------------------------------------------------------------- /tst/test/rdklib_evaluation_test.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import json 3 | import os 4 | import sys 5 | import unittest 6 | 7 | # from rdklib.evaluation import ComplianceType, Evaluation, build_annotation 8 | 9 | 10 | # Get the absolute path of the current script 11 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 12 | 13 | # Get the absolute path of the project directory 14 | project_dir = os.path.abspath(os.path.join(current_script_dir, "..", "..")) 15 | 16 | # Add the project directory to the Python path 17 | sys.path.append(project_dir) 18 | 19 | CODE = importlib.import_module("rdklib.evaluation") 20 | 21 | 22 | class rdklibEvaluationTest(unittest.TestCase): 23 | def test_build_annotation(self): 24 | string_0 = "" 25 | response = CODE.build_annotation(string_0) 26 | self.assertEqual(response, string_0) 27 | 28 | string_256 = "x" * 256 29 | response = CODE.build_annotation(string_256) 30 | self.assertEqual(response, string_256) 31 | 32 | string_257 = string_256 + "x" 33 | response = CODE.build_annotation(string_257) 34 | expected_resp = "x" * 244 + " [truncated]" 35 | self.assertEqual(response, expected_resp) 36 | self.assertEqual(len(response), 256) 37 | 38 | def test_compliance_type(self): 39 | # Valid value 40 | self.assertTrue(CODE.ComplianceType.NOT_APPLICABLE == "NOT_APPLICABLE") 41 | self.assertTrue(CODE.ComplianceType.COMPLIANT == "COMPLIANT") 42 | self.assertTrue(CODE.ComplianceType.NON_COMPLIANT == "NON_COMPLIANT") 43 | 44 | # Invalid value 45 | with self.assertRaises(AttributeError) as context: 46 | CODE.ComplianceType.SOMETHING_ELSE 47 | self.assertTrue("SOMETHING_ELSE" in str(context.exception)) 48 | 49 | def test_evaluation_init(self): 50 | # Missing argument Error 51 | with self.assertRaises(TypeError) as context: 52 | evaluation = CODE.Evaluation() 53 | self.assertTrue("__init__()" in str(context.exception)) 54 | 55 | # Invalid int argument Error 56 | with self.assertRaises(Exception) as context: 57 | evaluation = CODE.Evaluation(4) 58 | print(str(context.exception)) 59 | self.assertTrue( 60 | "The complianceType is not valid. Valid values include: ComplianceType.COMPLIANT, ComplianceType.COMPLIANT and ComplianceType.NOT_APPLICABLE" 61 | in str(context.exception) 62 | ) 63 | 64 | # Invalid str string Error 65 | with self.assertRaises(Exception) as context: 66 | evaluation = CODE.Evaluation("string") 67 | print(str(context.exception)) 68 | self.assertTrue( 69 | "The complianceType is not valid. Valid values include: ComplianceType.COMPLIANT, ComplianceType.COMPLIANT and ComplianceType.NOT_APPLICABLE" 70 | in str(context.exception) 71 | ) 72 | 73 | # Default value 74 | evaluation = CODE.Evaluation(CODE.ComplianceType.COMPLIANT) 75 | self.assertEqual(evaluation.complianceType, CODE.ComplianceType.COMPLIANT) 76 | self.assertEqual(evaluation.complianceResourceId, None) 77 | self.assertEqual(evaluation.complianceResourceType, None) 78 | self.assertEqual(evaluation.annotation, "") 79 | 80 | # Assigning value 81 | evaluation = CODE.Evaluation( 82 | CODE.ComplianceType.COMPLIANT, "some-resource-id", "some-resource-type", "some-annotation" 83 | ) 84 | self.assertEqual(evaluation.complianceType, CODE.ComplianceType.COMPLIANT) 85 | self.assertEqual(evaluation.complianceResourceId, "some-resource-id") 86 | self.assertEqual(evaluation.complianceResourceType, "some-resource-type") 87 | self.assertEqual(evaluation.annotation, "some-annotation") 88 | 89 | def test_evaluation_eq(self): 90 | evaluation = CODE.Evaluation( 91 | CODE.ComplianceType.COMPLIANT, "some-resource-id", "some-resource-type", "some-annotation" 92 | ) 93 | self.assertTrue(evaluation.__eq__(evaluation)) 94 | self.assertFalse( 95 | evaluation.__eq__( 96 | CODE.Evaluation( 97 | CODE.ComplianceType.NON_COMPLIANT, "some-resource-id", "some-resource-type", "some-annotation" 98 | ) 99 | ) 100 | ) 101 | self.assertFalse( 102 | evaluation.__eq__( 103 | CODE.Evaluation( 104 | CODE.ComplianceType.COMPLIANT, "some-resource-id-2", "some-resource-type", "some-annotation" 105 | ) 106 | ) 107 | ) 108 | self.assertFalse( 109 | evaluation.__eq__( 110 | CODE.Evaluation( 111 | CODE.ComplianceType.COMPLIANT, "some-resource-id", "some-resource-type-2", "some-annotation" 112 | ) 113 | ) 114 | ) 115 | self.assertFalse( 116 | evaluation.__eq__(CODE.Evaluation(CODE.ComplianceType.COMPLIANT, "some-resource-id", "some-resource-type")) 117 | ) 118 | 119 | def test_import_fields_from_periodic_event(self): 120 | evaluation = CODE.Evaluation(CODE.ComplianceType.COMPLIANT) 121 | self.assertEqual(evaluation.orderingTimestamp, None) 122 | event = {"invokingEvent": json.dumps({"notificationCreationTime": "some-date"})} 123 | evaluation.import_fields_from_periodic_event(event) 124 | self.assertEqual(evaluation.orderingTimestamp, "some-date") 125 | 126 | def test_import_fields_from_configuration_item(self): 127 | config_item = { 128 | "configurationItemCaptureTime": "some-date", 129 | "resourceId": "some-resource-id", 130 | "resourceType": "some-resource-type", 131 | } 132 | evaluation = CODE.Evaluation(CODE.ComplianceType.COMPLIANT) 133 | evaluation.import_fields_from_configuration_item(config_item) 134 | self.assertEqual(evaluation.complianceResourceId, "some-resource-id") 135 | self.assertEqual(evaluation.complianceResourceType, "some-resource-type") 136 | self.assertEqual(evaluation.orderingTimestamp, "some-date") 137 | 138 | evaluation = CODE.Evaluation(CODE.ComplianceType.COMPLIANT, "some-other-id", "some-other-type") 139 | evaluation.import_fields_from_configuration_item(config_item) 140 | self.assertEqual(evaluation.complianceResourceId, "some-other-id") 141 | self.assertEqual(evaluation.complianceResourceType, "some-other-type") 142 | 143 | def test_is_valid(self): 144 | evaluation = CODE.Evaluation(CODE.ComplianceType.COMPLIANT) 145 | evaluation.complianceType = None 146 | with self.assertRaises(Exception) as context: 147 | evaluation.is_valid() 148 | self.assertTrue("Missing complianceType from an evaluation result." in str(context.exception)) 149 | 150 | evaluation.complianceType = "some-compliance" 151 | with self.assertRaises(Exception) as context: 152 | evaluation.is_valid() 153 | self.assertTrue("Missing complianceResourceId from an evaluation result." in str(context.exception)) 154 | 155 | evaluation.complianceResourceId = "some-id" 156 | with self.assertRaises(Exception) as context: 157 | evaluation.is_valid() 158 | self.assertTrue("Missing complianceResourceType from an evaluation result." in str(context.exception)) 159 | 160 | evaluation.complianceResourceType = "some-type" 161 | with self.assertRaises(Exception) as context: 162 | evaluation.is_valid() 163 | self.assertTrue("Missing orderingTimestamp from an evaluation result." in str(context.exception)) 164 | 165 | evaluation.orderingTimestamp = "some-date" 166 | self.assertTrue(evaluation.is_valid()) 167 | 168 | def test_get_json(self): 169 | evaluation = CODE.Evaluation(CODE.ComplianceType.NON_COMPLIANT, "some-resource-id", "some-resource-type") 170 | evaluation.orderingTimestamp = "some-date" 171 | response = evaluation.get_json() 172 | resp_expected = { 173 | "ComplianceResourceId": "some-resource-id", 174 | "ComplianceResourceType": "some-resource-type", 175 | "ComplianceType": "NON_COMPLIANT", 176 | "OrderingTimestamp": "some-date", 177 | } 178 | self.assertDictEqual(response, resp_expected) 179 | 180 | evaluation.annotation = "some-annotation" 181 | response = evaluation.get_json() 182 | resp_expected = { 183 | "ComplianceResourceId": "some-resource-id", 184 | "ComplianceResourceType": "some-resource-type", 185 | "ComplianceType": "NON_COMPLIANT", 186 | "OrderingTimestamp": "some-date", 187 | "Annotation": "some-annotation", 188 | } 189 | self.assertDictEqual(response, resp_expected) 190 | 191 | def test_evaluation_repr(self): 192 | """The function: __repr__() should return the string representation of the object.""" 193 | evaluation = CODE.Evaluation( 194 | CODE.ComplianceType.NON_COMPLIANT, 195 | "some-resource-id", 196 | "some-resource-type", 197 | "Annotation message for test.", 198 | ) 199 | # pylint: disable-next=unused-import,import-outside-toplevel 200 | from rdklib.evaluation import Evaluation # noqa: F401 201 | 202 | # pylint: disable-next=eval-used 203 | actual = eval(str(evaluation)) # noqa: DUO104 204 | self.assertEqual(actual, evaluation) 205 | -------------------------------------------------------------------------------- /tst/test/rdklib_evaluator_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import unittest 3 | from unittest.mock import patch, MagicMock 4 | import botocore 5 | from rdklib.errors import InvalidParametersError 6 | 7 | import importlib 8 | 9 | import sys 10 | import os 11 | 12 | # Get the absolute path of the current script 13 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 14 | 15 | # Get the absolute path of the project directory 16 | project_dir = os.path.abspath(os.path.join(current_script_dir, "..", "..")) 17 | 18 | # Add the project directory to the Python path 19 | sys.path.append(project_dir) 20 | 21 | CODE = importlib.import_module("rdklib.evaluator") 22 | 23 | CLIENT_FACTORY = MagicMock() 24 | CONFIG_CLIENT_MOCK = MagicMock() 25 | 26 | 27 | def mock_get_client(client_name, *args, **kwargs): 28 | return CONFIG_CLIENT_MOCK 29 | 30 | 31 | @patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) 32 | @patch.object(CODE, "check_defined", MagicMock(return_value=True)) 33 | class rdklibEvaluatorTest(unittest.TestCase): 34 | rule = MagicMock() 35 | 36 | def setUp(self): 37 | self.rule.reset_mock() 38 | 39 | def test_evaluator_init(self): 40 | evaluator = CODE.Evaluator("config-rule") 41 | self.assertEqual(evaluator.__dict__["_Evaluator__rdk_rule"], "config-rule") 42 | self.assertEqual(evaluator.__dict__["_Evaluator__expected_resource_types"], []) 43 | 44 | applicable_resource_type = "applicable-resource-type" 45 | evaluator = CODE.Evaluator("config-rule", [applicable_resource_type]) 46 | self.assertEqual(evaluator.__dict__["_Evaluator__rdk_rule"], "config-rule") 47 | self.assertEqual(evaluator.__dict__["_Evaluator__expected_resource_types"], [applicable_resource_type]) 48 | 49 | def test_evaluator_handle_parameter_error(self): 50 | event = generate_event("some-msg-type") 51 | self.rule.evaluate_parameters.side_effect = InvalidParametersError("some-error") 52 | evaluator = CODE.Evaluator(self.rule) 53 | response = evaluator.handle(event, {}) 54 | resp_expected = { 55 | "internalErrorMessage": "Parameter value is invalid", 56 | "internalErrorDetails": "A ValueError was raised during the validation of the Parameter value", 57 | "customerErrorMessage": "some-error", 58 | "customerErrorCode": "InvalidParameterValueException", 59 | } 60 | self.assertDictEqual(response, resp_expected) 61 | 62 | def test_evaluator_handle_messagetype_error(self): 63 | event = generate_event("some-msg-type") 64 | self.rule.evaluate_parameters.return_value = "some-param" 65 | evaluator = CODE.Evaluator(self.rule) 66 | response = evaluator.handle(event, {}) 67 | resp_expected = { 68 | "internalErrorMessage": "Unexpected message type", 69 | "internalErrorDetails": "{'messageType': 'some-msg-type'}", 70 | "customerErrorMessage": None, 71 | "customerErrorCode": None, 72 | } 73 | self.assertDictEqual(response, resp_expected) 74 | 75 | def test_evaluator_handle_boto_error(self): 76 | event = generate_event("ScheduledNotification") 77 | self.rule.evaluate_parameters.return_value = "some-param" 78 | self.rule.evaluate_periodic.side_effect = botocore.exceptions.ClientError( 79 | {"Error": {"Code": "AccessDenied", "Message": "access-denied"}}, "operation" 80 | ) 81 | evaluator = CODE.Evaluator(self.rule) 82 | response = evaluator.handle(event, {}) 83 | resp_expected = { 84 | "internalErrorMessage": "Insufficient access to perform this action.", 85 | "internalErrorDetails": "An error occurred (AccessDenied) when calling the operation operation: access-denied", 86 | "customerErrorMessage": "access-denied", 87 | "customerErrorCode": "AccessDenied", 88 | } 89 | self.assertDictEqual(response, resp_expected) 90 | 91 | def test_evaluator_handle_internal_error(self): 92 | event = generate_event("ScheduledNotification") 93 | self.rule.evaluate_parameters.return_value = "some-param" 94 | self.rule.evaluate_periodic.side_effect = botocore.exceptions.ClientError( 95 | {"Error": {"Code": "InternalError", "Message": "some-internal-error"}}, "operation" 96 | ) 97 | evaluator = CODE.Evaluator(self.rule) 98 | response = evaluator.handle(event, {}) 99 | resp_expected = { 100 | "internalErrorMessage": "Unexpected error while completing API request", 101 | "internalErrorDetails": "An error occurred (InternalError) when calling the operation operation: some-internal-error", 102 | "customerErrorMessage": None, 103 | "customerErrorCode": None, 104 | } 105 | self.assertDictEqual(response, resp_expected) 106 | 107 | def test_evaluator_handle_other_error(self): 108 | event = generate_event("ScheduledNotification") 109 | self.rule.evaluate_parameters.return_value = "some-param" 110 | self.rule.evaluate_periodic.side_effect = botocore.exceptions.ClientError( 111 | {"Error": {"Code": "OtherError", "Message": "some-other-error"}}, "operation" 112 | ) 113 | evaluator = CODE.Evaluator(self.rule) 114 | response = evaluator.handle(event, {}) 115 | resp_expected = { 116 | "internalErrorMessage": "Customer error while making API request", 117 | "internalErrorDetails": "An error occurred (OtherError) when calling the operation operation: some-other-error", 118 | "customerErrorMessage": "some-other-error", 119 | "customerErrorCode": "OtherError", 120 | } 121 | self.assertDictEqual(response, resp_expected) 122 | 123 | def test_evaluator_handle_valueerror_error(self): 124 | event = generate_event("ScheduledNotification") 125 | rule = MagicMock() 126 | evaluator = CODE.Evaluator(rule) 127 | rule.evaluate_periodic.side_effect = ValueError("some-value-error") 128 | response = evaluator.handle(event, {}) 129 | resp_expected = { 130 | "internalErrorMessage": "some-value-error", 131 | "internalErrorDetails": "some-value-error", 132 | "customerErrorMessage": None, 133 | "customerErrorCode": None, 134 | } 135 | self.assertDictEqual(response, resp_expected) 136 | 137 | @patch.object(CODE, "process_periodic_evaluations_list", MagicMock(return_value=True)) 138 | def test_evaluator_handle_schedule(self): 139 | rule = MagicMock() 140 | evaluator = CODE.Evaluator(rule) 141 | rule.evaluate_periodic.return_value = True 142 | event = generate_event("ScheduledNotification") 143 | response = evaluator.handle(event, {}) 144 | self.assertTrue(response) 145 | 146 | @patch.object(CODE, "process_event_evaluations_list", MagicMock(return_value=True)) 147 | @patch.object(CODE, "is_applicable_status", MagicMock(return_value=True)) 148 | @patch.object(CODE, "get_configuration_item", MagicMock(return_value=True)) 149 | @patch.object(CODE, "is_applicable_resource_type", MagicMock(return_value=True)) 150 | def test_evaluator_handle_event_applicable(self): 151 | rule = MagicMock() 152 | evaluator = CODE.Evaluator(rule, ["resourceType"]) 153 | rule.evaluate_change.return_value = True 154 | event = generate_event("ConfigurationItemChangeNotification") 155 | response = evaluator.handle(event, {}) 156 | self.assertTrue(response) 157 | 158 | def test_evaluator_handle_event_change_triggered_no_expected_resources(self): 159 | rule = MagicMock() 160 | evaluator = CODE.Evaluator(rule, []) 161 | rule.evaluate_change.return_value = True 162 | event = generate_event("ConfigurationItemChangeNotification") 163 | 164 | with self.assertRaises(Exception) as context: 165 | evaluator.handle(event, {}) 166 | self.assertTrue("Change triggered rules must provide expected resource types" in str(context.exception)) 167 | 168 | evaluator = CODE.Evaluator(rule) 169 | with self.assertRaises(Exception) as context: 170 | evaluator.handle(event, {}) 171 | self.assertTrue("Change triggered rules must provide expected resource types" in str(context.exception)) 172 | 173 | @patch.object(CODE, "process_event_evaluations_list", MagicMock(return_value=True)) 174 | @patch.object(CODE, "is_applicable_status", MagicMock(return_value=False)) 175 | @patch.object(CODE, "get_configuration_item", MagicMock(return_value=True)) 176 | @patch.object( 177 | CODE, "init_event", MagicMock(return_value={"messageType": "OversizedConfigurationItemChangeNotification"}) 178 | ) 179 | def test_evaluator_handle_event_oversized_notapplicable(self): 180 | rule = MagicMock() 181 | evaluator = CODE.Evaluator(rule, ["resourceType"]) 182 | event = generate_event("OversizedConfigurationItemChangeNotification") 183 | response = evaluator.handle(event, {}) 184 | self.assertTrue(response) 185 | 186 | @patch.object(CODE, "inflate_oversized_notification", MagicMock(return_value="some-notification")) 187 | def test_init_event(self): 188 | event = generate_event("some-msg-type") 189 | response = CODE.init_event(event, {}) 190 | self.assertDictEqual(response, {"messageType": "some-msg-type"}) 191 | 192 | event = generate_event("OversizedConfigurationItemChangeNotification") 193 | response = CODE.init_event(event, CLIENT_FACTORY) 194 | self.assertEqual(response, "some-notification") 195 | 196 | 197 | def generate_event(message_type): 198 | invoking_event = {"messageType": message_type} 199 | event = { 200 | "executionRoleArn": "some-role-arn", 201 | "ruleParameters": json.dumps({"param_key": "param_value"}).encode("utf8"), 202 | "accountId": "accountId", 203 | "configRuleArn": "ruleArn", 204 | } 205 | if message_type == "ConfigurationItemChangeNotification": 206 | invoking_event.update({"configurationItem": generate_ci_data()}) 207 | elif message_type == "OversizedConfigurationItemChangeNotification": 208 | invoking_event.update({"configurationItemSummary": generate_ci_data()}) 209 | event.update({"invokingEvent": json.dumps(invoking_event).encode("utf8")}) 210 | return event 211 | 212 | 213 | def generate_ci_data(): 214 | return {"resourceType": "resourceType", "resourceId": "resourceId", "configurationStateId": "configurationStateId"} 215 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | -------------------------------------------------------------------------------- /tst/test/rdklib_util_evaluations_test.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | import json 3 | import os 4 | import sys 5 | import unittest 6 | from importlib.abc import MetaPathFinder 7 | from unittest.mock import MagicMock, patch 8 | 9 | from rdklib.configrule import ConfigRule 10 | from rdklib.evaluation import ComplianceType, Evaluation 11 | 12 | # Get the absolute path of the current script 13 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 14 | 15 | # Get the absolute path of the project directory 16 | project_dir = os.path.abspath(os.path.join(current_script_dir, "..", "..")) 17 | 18 | # Add the project directory to the Python path 19 | sys.path.append(project_dir) 20 | 21 | CODE = importlib.import_module("rdklib.util.evaluations") 22 | 23 | CLIENT_FACTORY = MagicMock() 24 | CLIENT_MOCK = MagicMock() 25 | 26 | 27 | def mock_get_client(*args, **kwargs): 28 | return CLIENT_MOCK 29 | 30 | 31 | def return_same_value(event, client_factory, evaluations): 32 | return evaluations 33 | 34 | 35 | def return_first_item(event, client_factory, evaluations): 36 | return evaluations[0] 37 | 38 | 39 | class FailLoader(MetaPathFinder): 40 | """To raise ImportError for test. 41 | 42 | - Answer: Mocking ImportError in Python 43 | https://stackoverflow.com/a/2481588/12721873 44 | """ 45 | 46 | def __init__(self, modules): 47 | self.modules = modules 48 | 49 | # pylint: disable-next=unused-argument 50 | def find_spec(self, fullname, path=None, target=None): 51 | """Raise ImportError for the module in self.modules.""" 52 | print(fullname) 53 | if fullname in self.modules: 54 | raise ImportError(f"Debug import failure for {fullname}") 55 | 56 | 57 | @patch.object(CLIENT_FACTORY, "build_client", MagicMock(side_effect=mock_get_client)) 58 | @patch.object(CODE, "process_evaluations", MagicMock(side_effect=return_same_value)) 59 | class rdklibUtilEvaluationsTest(unittest.TestCase): 60 | event = { 61 | "invokingEvent": json.dumps({"notificationCreationTime": "some-date-time"}), 62 | "configRuleName": "some-role-name", 63 | } 64 | 65 | def test_import_succeed(self): 66 | """Importing evaluations should import internal when importing internal succeeds.""" 67 | code = importlib.import_module("rdklib.util.evaluations") 68 | self.assertEqual(code.process_evaluations, CODE.process_evaluations) 69 | 70 | @patch("rdklib.util.internal.process_evaluations", "internal") 71 | @patch("rdklib.util.external.process_evaluations", "external") 72 | def test_import_error(self): 73 | """Importing evaluations should import external when importing internal fails. 74 | 75 | - Answer: Mocking ImportError in Python 76 | https://stackoverflow.com/a/2481588/12721873 77 | """ 78 | from rdklib.util import evaluations # pylint: disable=import-outside-toplevel 79 | 80 | module_name = "rdklib.util.internal" 81 | sys.meta_path.insert(0, FailLoader([module_name])) 82 | del sys.modules[module_name] 83 | importlib.reload(evaluations) 84 | self.assertNotEqual(evaluations.process_evaluations, "internal") 85 | self.assertEqual(evaluations.process_evaluations, "external") 86 | 87 | def test_process_event_evaluations_list(self): 88 | eval_result = [Evaluation(ComplianceType.COMPLIANT, annotation="some-annotation")] 89 | eval_result_two = [Evaluation(ComplianceType.COMPLIANT)] 90 | eval_result_combine = eval_result + eval_result_two 91 | config_item = { 92 | "resourceType": "some-resource-type", 93 | "resourceId": "some-resource-id", 94 | "configurationItemCaptureTime": "some-date-time", 95 | } 96 | 97 | # Empty evaluation list 98 | response = CODE.process_event_evaluations_list({}, {}, [], {}) 99 | self.assertFalse(response) 100 | self.assertTrue(isinstance(response, list)) 101 | 102 | # Invalid compliance results 103 | with self.assertRaises(Exception) as context: 104 | CODE.process_event_evaluations_list({}, {}, "string", {}) 105 | self.assertTrue("The return statement from evaluate_change() is not a list." in str(context.exception)) 106 | 107 | with self.assertRaises(Exception) as context: 108 | CODE.process_event_evaluations_list({}, {}, ["string", "string"], {}) 109 | self.assertTrue( 110 | "The return statement from evaluate_change() is not a list of Evaluation() object." 111 | in str(context.exception) 112 | ) 113 | 114 | # Valid compliance results with and without annotation 115 | response = CODE.process_event_evaluations_list({}, {}, eval_result_combine, config_item) 116 | resp_expected = [ 117 | { 118 | "ComplianceResourceId": "some-resource-id", 119 | "ComplianceResourceType": "some-resource-type", 120 | "ComplianceType": "COMPLIANT", 121 | "OrderingTimestamp": "some-date-time", 122 | "Annotation": "some-annotation", 123 | }, 124 | { 125 | "ComplianceResourceId": "some-resource-id", 126 | "ComplianceResourceType": "some-resource-type", 127 | "ComplianceType": "COMPLIANT", 128 | "OrderingTimestamp": "some-date-time", 129 | }, 130 | ] 131 | for i, resp in enumerate(response): 132 | self.assertDictEqual(resp, resp_expected[i]) 133 | 134 | @patch.object(CODE, "clean_up_old_evaluations", MagicMock(side_effect=return_first_item)) 135 | def test_process_periodic_evaluations_list(self): 136 | class SomeRuleClass(ConfigRule): 137 | pass 138 | 139 | rule = SomeRuleClass() 140 | rule.delete_old_evaluations_on_scheduled_notification = False 141 | 142 | eval_result = [Evaluation(ComplianceType.COMPLIANT)] 143 | eval_result_two = [Evaluation(ComplianceType.COMPLIANT, "some-resource-id")] 144 | eval_result_three = [Evaluation(ComplianceType.COMPLIANT, "some-resource-id", "some-resource-type")] 145 | eval_result_four = [ 146 | Evaluation(ComplianceType.NON_COMPLIANT, "some-resource-id", "some-resource-type", "some-annotation") 147 | ] 148 | eval_result_combine = eval_result_three + eval_result_four 149 | 150 | # Empty evaluation list 151 | response = CODE.process_periodic_evaluations_list({}, {}, [], rule) 152 | self.assertFalse(response) 153 | self.assertTrue(isinstance(response, list)) 154 | 155 | # Invalid compliance results 156 | with self.assertRaises(Exception) as context: 157 | CODE.process_periodic_evaluations_list({}, {}, "string", rule) 158 | self.assertTrue("The return statement from evaluate_periodic() is not a list." in str(context.exception)) 159 | 160 | with self.assertRaises(Exception) as context: 161 | CODE.process_periodic_evaluations_list({}, {}, ["string", "string"], rule) 162 | self.assertTrue( 163 | "The return statement from evaluate_periodic() is not a list of Evaluation() object." 164 | in str(context.exception) 165 | ) 166 | 167 | # Missing information in evaluation 168 | with self.assertRaises(Exception) as context: 169 | CODE.process_periodic_evaluations_list(self.event, {}, eval_result, rule) 170 | self.assertTrue("Missing complianceResourceId from an evaluation result." in str(context.exception)) 171 | 172 | with self.assertRaises(Exception) as context: 173 | CODE.process_periodic_evaluations_list(self.event, {}, eval_result_two, rule) 174 | self.assertTrue("Missing complianceResourceType from an evaluation result." in str(context.exception)) 175 | 176 | # Valid compliance results with and without annotation 177 | response = CODE.process_periodic_evaluations_list(self.event, {}, eval_result_combine, rule) 178 | resp_expected = [ 179 | { 180 | "ComplianceResourceId": "some-resource-id", 181 | "ComplianceResourceType": "some-resource-type", 182 | "ComplianceType": "COMPLIANT", 183 | "OrderingTimestamp": "some-date-time", 184 | }, 185 | { 186 | "ComplianceResourceId": "some-resource-id", 187 | "ComplianceResourceType": "some-resource-type", 188 | "ComplianceType": "NON_COMPLIANT", 189 | "OrderingTimestamp": "some-date-time", 190 | "Annotation": "some-annotation", 191 | }, 192 | ] 193 | for i, resp in enumerate(response): 194 | self.assertDictEqual(resp, resp_expected[i]) 195 | 196 | # Test execution of clean_up_old_evaluations() 197 | rule.delete_old_evaluations_on_scheduled_notification = True 198 | response = CODE.process_periodic_evaluations_list(self.event, {}, eval_result_combine, rule) 199 | resp_expected = { 200 | "ComplianceResourceId": "some-resource-id", 201 | "ComplianceResourceType": "some-resource-type", 202 | "ComplianceType": "COMPLIANT", 203 | "OrderingTimestamp": "some-date-time", 204 | } 205 | self.assertDictEqual(response, resp_expected) 206 | 207 | def test_clean_up_old_evaluations(self): 208 | new_eval = [Evaluation(ComplianceType.COMPLIANT, "some-resource-id", "some-resource-type").get_json()] 209 | old_eval_overlapping = { 210 | "EvaluationResultIdentifier": { 211 | "EvaluationResultQualifier": {"ResourceId": "some-resource-id", "ResourceType": "some-resource-type"} 212 | } 213 | } 214 | old_eval_not_overlapping = { 215 | "EvaluationResultIdentifier": { 216 | "EvaluationResultQualifier": { 217 | "ResourceId": "some-other-resource-id", 218 | "ResourceType": "some-resource-type", 219 | } 220 | } 221 | } 222 | old_eval_list = [old_eval_overlapping, old_eval_not_overlapping] 223 | 224 | # Empty eval, empty old eval 225 | CLIENT_MOCK.get_compliance_details_by_config_rule.return_value = {"EvaluationResults": []} 226 | response = CODE.clean_up_old_evaluations(self.event, CLIENT_FACTORY, []) 227 | self.assertFalse(response) 228 | self.assertTrue(isinstance(response, list)) 229 | 230 | # Some eval, empty old eval 231 | CLIENT_MOCK.get_compliance_details_by_config_rule.return_value = {"EvaluationResults": []} 232 | response = CODE.clean_up_old_evaluations(self.event, CLIENT_FACTORY, new_eval) 233 | resp_expected = { 234 | "ComplianceResourceId": "some-resource-id", 235 | "ComplianceResourceType": "some-resource-type", 236 | "ComplianceType": "COMPLIANT", 237 | "OrderingTimestamp": None, 238 | } 239 | self.assertDictEqual(response[0], resp_expected) 240 | 241 | # Some eval, Some old eval 242 | new_eval = [Evaluation(ComplianceType.COMPLIANT, "some-resource-id", "some-resource-type").get_json()] 243 | CLIENT_MOCK.get_compliance_details_by_config_rule.return_value = {"EvaluationResults": old_eval_list} 244 | response = CODE.clean_up_old_evaluations(self.event, CLIENT_FACTORY, new_eval) 245 | resp_expected = [ 246 | { 247 | "ComplianceResourceId": "some-other-resource-id", 248 | "ComplianceResourceType": "some-resource-type", 249 | "ComplianceType": "NOT_APPLICABLE", 250 | "OrderingTimestamp": "some-date-time", 251 | }, 252 | { 253 | "ComplianceResourceId": "some-resource-id", 254 | "ComplianceResourceType": "some-resource-type", 255 | "ComplianceType": "COMPLIANT", 256 | "OrderingTimestamp": None, 257 | }, 258 | ] 259 | for i, resp in enumerate(response): 260 | self.assertDictEqual(resp, resp_expected[i]) 261 | -------------------------------------------------------------------------------- /tst/test/rdklib_util_service_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import unittest 3 | from unittest.mock import patch, MagicMock 4 | import botocore 5 | 6 | import importlib 7 | 8 | import sys 9 | import os 10 | 11 | # Get the absolute path of the current script 12 | current_script_dir = os.path.dirname(os.path.abspath(__file__)) 13 | 14 | # Get the absolute path of the project directory 15 | project_dir = os.path.abspath(os.path.join(current_script_dir, "..", "..")) 16 | 17 | # Add the project directory to the Python path 18 | sys.path.append(project_dir) 19 | 20 | CODE = importlib.import_module("rdklib.util.service") 21 | 22 | RESOURCE_TYPE = "some-resource-type" 23 | 24 | CONFIG_CLIENT_MOCK = MagicMock() 25 | 26 | 27 | class rdklibUtilServiceTest(unittest.TestCase): 28 | def test_check_defined(self): 29 | ref = {} 30 | with self.assertRaises(Exception) as context: 31 | CODE.check_defined(ref, "ref") 32 | self.assertTrue("Error: ref is not defined." in str(context.exception)) 33 | 34 | ref = {"key": "value"} 35 | response = CODE.check_defined(ref, "ref") 36 | self.assertEqual(response, ref) 37 | 38 | def test_is_applicable_status(self): 39 | status_false = ["ResourceDeleted", "ResourceDeletedNotRecorded", "ResourceNotRecorded"] 40 | status_true = ["OK", "ResourceDiscovered"] 41 | status_flag = True 42 | 43 | for status in status_false: 44 | config_item = build_config_item(status) 45 | response = CODE.is_applicable_status(config_item, build_normal_event(False)) 46 | self.assertFalse(response) 47 | response = CODE.is_applicable_status(config_item, build_normal_event(True)) 48 | self.assertFalse(response) 49 | 50 | for status in status_true: 51 | config_item = build_config_item(status) 52 | response = CODE.is_applicable_status(config_item, build_normal_event(False)) 53 | self.assertTrue(response) 54 | response = CODE.is_applicable_status(config_item, build_normal_event(True)) 55 | self.assertFalse(response) 56 | 57 | for status in status_false: 58 | config_item = build_config_item(status) 59 | response = CODE.is_applicable_status(config_item, build_normal_event(False), is_applicable=status_flag) 60 | self.assertTrue(response) 61 | response = CODE.is_applicable_status(config_item, build_normal_event(True), is_applicable=status_flag) 62 | self.assertTrue(response) 63 | 64 | for status in status_true: 65 | config_item = build_config_item(status) 66 | response = CODE.is_applicable_status(config_item, build_normal_event(False), is_applicable=status_flag) 67 | self.assertTrue(response) 68 | response = CODE.is_applicable_status(config_item, build_normal_event(True), is_applicable=status_flag) 69 | self.assertTrue(response) 70 | 71 | def test_is_applicable_resource_type(self): 72 | config_item = build_config_item("OK") 73 | 74 | response = CODE.is_applicable_resource_type(config_item, ["other-resource-type"]) 75 | self.assertFalse(response) 76 | 77 | response = CODE.is_applicable_resource_type(config_item, [RESOURCE_TYPE]) 78 | self.assertTrue(response) 79 | 80 | response = CODE.is_applicable_resource_type(config_item, []) 81 | self.assertFalse(response) 82 | 83 | def test_get_configuration_item(self): 84 | invoke_event = {"configurationItem": "some-ci"} 85 | response = CODE.get_configuration_item(invoke_event) 86 | self.assertEqual(response, "some-ci") 87 | 88 | def test_is_internal_error(self): 89 | exception = Exception("some-error") 90 | response = CODE.is_internal_error(exception) 91 | self.assertTrue(response) 92 | 93 | exception = botocore.exceptions.ClientError({"Error": {"Code": "500"}}, "operation") 94 | response = CODE.is_internal_error(exception) 95 | self.assertTrue(response) 96 | 97 | exception = botocore.exceptions.ClientError({"Error": {"Code": "InternalError"}}, "operation") 98 | response = CODE.is_internal_error(exception) 99 | self.assertTrue(response) 100 | 101 | exception = botocore.exceptions.ClientError({"Error": {"Code": "ServiceError"}}, "operation") 102 | response = CODE.is_internal_error(exception) 103 | self.assertTrue(response) 104 | 105 | exception = botocore.exceptions.ClientError({"Error": {"Code": "600"}}, "operation") 106 | response = CODE.is_internal_error(exception) 107 | self.assertFalse(response) 108 | 109 | exception = botocore.exceptions.ClientError({"Error": {"Code": "some-other-code"}}, "operation") 110 | response = CODE.is_internal_error(exception) 111 | self.assertFalse(response) 112 | 113 | def test_build_error_response(self): 114 | response = CODE.build_error_response("int_msg", "int_detail", "ext_code", "ext_msg") 115 | resp_expected = { 116 | "internalErrorMessage": "int_msg", 117 | "internalErrorDetails": "int_detail", 118 | "customerErrorMessage": "ext_msg", 119 | "customerErrorCode": "ext_code", 120 | } 121 | self.assertDictEqual(response, resp_expected) 122 | 123 | def test_build_internal_error_response(self): 124 | response = CODE.build_internal_error_response("int_msg", "int_detail") 125 | resp_expected = { 126 | "internalErrorMessage": "int_msg", 127 | "internalErrorDetails": "int_detail", 128 | "customerErrorMessage": None, 129 | "customerErrorCode": None, 130 | } 131 | self.assertDictEqual(response, resp_expected) 132 | 133 | def test_build_parameters_value_error_response(self): 134 | response = CODE.build_parameters_value_error_response("msg") 135 | resp_expected = { 136 | "internalErrorMessage": "Parameter value is invalid", 137 | "internalErrorDetails": "A ValueError was raised during the validation of the Parameter value", 138 | "customerErrorMessage": "msg", 139 | "customerErrorCode": "InvalidParameterValueException", 140 | } 141 | self.assertDictEqual(response, resp_expected) 142 | 143 | def test_is_oversized_changed_notification(self): 144 | message_type = "OversizedConfigurationItemChangeNotification" 145 | response = CODE.is_oversized_changed_notification(message_type) 146 | self.assertTrue(response) 147 | 148 | message_type = "other" 149 | response = CODE.is_oversized_changed_notification(message_type) 150 | self.assertFalse(response) 151 | 152 | def test_is_scheduled_notification(self): 153 | message_type = "ScheduledNotification" 154 | response = CODE.is_scheduled_notification(message_type) 155 | self.assertTrue(response) 156 | 157 | message_type = "other" 158 | response = CODE.is_scheduled_notification(message_type) 159 | self.assertFalse(response) 160 | 161 | def test_inflate_oversized_notification(self): 162 | CODE.get_resource_config_history = MagicMock(return_value=build_grh_response()) 163 | CODE.convert_into_notification_config_item = MagicMock(return_value=build_config_item("some-type")) 164 | invoke_event = json.loads(build_normal_event(True)["invokingEvent"]) 165 | response = CODE.inflate_oversized_notification({}, invoke_event) 166 | resp_expected = { 167 | "configurationItem": {"configurationItemStatus": "some-type", "resourceType": "some-resource-type"}, 168 | "notificationCreationTime": "some-time", 169 | "messageType": "ConfigurationItemChangeNotification", 170 | "recordVersion": "some-version", 171 | } 172 | self.assertDictEqual(response, resp_expected) 173 | 174 | @patch.object(CONFIG_CLIENT_MOCK, "get_resource_config_history", MagicMock(return_value="invoked")) 175 | def test_get_resource_config_history(self): 176 | invoke_event = json.loads(build_normal_event(True)["invokingEvent"]) 177 | response = CODE.get_resource_config_history(CONFIG_CLIENT_MOCK, invoke_event) 178 | self.assertEqual(response, "invoked") 179 | 180 | def test_convert_into_notification_config_item(self): 181 | response = CODE.convert_into_notification_config_item(build_grh_response()["configurationItems"][0]) 182 | resp_expected = { 183 | "configurationItemCaptureTime": "configurationItemCaptureTime", 184 | "configurationStateId": "configurationStateId", 185 | "awsAccountId": "accountId", 186 | "configurationItemStatus": "configurationItemStatus", 187 | "resourceType": "AWS::ResourceType", 188 | "resourceId": "resourceId", 189 | "resourceName": "resourceName", 190 | "ARN": "arn", 191 | "awsRegion": "awsRegion", 192 | "availabilityZone": "availabilityZone", 193 | "configurationStateMd5Hash": "configurationItemMD5Hash", 194 | "resourceCreationTime": "resourceCreationTime", 195 | "relatedEvents": ["relatedEvent"], 196 | "tags": {"tag": "tag"}, 197 | "relationships": [ 198 | { 199 | "name": "relationshipName", 200 | "resourceId": "resourceId", 201 | "resourceName": "resourceName", 202 | "resourceType": "resourceType", 203 | } 204 | ], 205 | "configuration": {"configuration": "configuration"}, 206 | "supplementaryConfiguration": {"supplementaryAttribute": {"supplementaryKey": "supplementaryValue"}}, 207 | } 208 | self.assertDictEqual(response, resp_expected) 209 | 210 | 211 | def build_grh_response(): 212 | return { 213 | "configurationItems": [ 214 | { 215 | "version": "version", 216 | "accountId": "accountId", 217 | "configurationItemCaptureTime": "configurationItemCaptureTime", 218 | "configurationItemStatus": "configurationItemStatus", 219 | "configurationStateId": "configurationStateId", 220 | "configurationItemMD5Hash": "configurationItemMD5Hash", 221 | "arn": "arn", 222 | "resourceType": "AWS::ResourceType", 223 | "resourceId": "resourceId", 224 | "resourceName": "resourceName", 225 | "awsRegion": "awsRegion", 226 | "availabilityZone": "availabilityZone", 227 | "resourceCreationTime": "resourceCreationTime", 228 | "tags": {"tag": "tag"}, 229 | "relatedEvents": [ 230 | "relatedEvent", 231 | ], 232 | "relationships": [ 233 | { 234 | "resourceType": "resourceType", 235 | "resourceId": "resourceId", 236 | "resourceName": "resourceName", 237 | "relationshipName": "relationshipName", 238 | }, 239 | ], 240 | "configuration": '{"configuration": "configuration"}', 241 | "supplementaryConfiguration": {"supplementaryAttribute": '{"supplementaryKey":"supplementaryValue"}'}, 242 | } 243 | ] 244 | } 245 | 246 | 247 | def build_config_item(message_type): 248 | return {"configurationItemStatus": message_type, "resourceType": RESOURCE_TYPE} 249 | 250 | 251 | def build_normal_event(event_bool): 252 | return { 253 | "invokingEvent": json.dumps( 254 | { 255 | "configurationItemSummary": {"resourceType": "some-resource-type", "resourceId": "some-resource-id"}, 256 | "messageType": "ConfigurationItemChangeNotification", 257 | "notificationCreationTime": "some-time", 258 | "recordVersion": "some-version", 259 | "configurationItem": {}, 260 | } 261 | ), 262 | "executionRoleArn": "roleArn", 263 | "eventLeftScope": event_bool, 264 | } 265 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | cached-property==1.5.2 ; python_full_version >= "3.7.2" and python_version < "3.8" \ 2 | --hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \ 3 | --hash=sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0 4 | certifi==2022.12.7 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 5 | --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ 6 | --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 7 | charset-normalizer==2.1.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 8 | --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ 9 | --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f 10 | click==8.1.3 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 11 | --hash=sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e \ 12 | --hash=sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48 13 | colorama==0.4.6 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 14 | --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ 15 | --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 16 | ghp-import==2.1.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 17 | --hash=sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619 \ 18 | --hash=sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343 19 | griffe==0.28.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 20 | --hash=sha256:a471498b0b9505c721ea0e652fd77c97df1aeb56c4eb8c93d24bb1140da4216d \ 21 | --hash=sha256:bde3a3dfa301a4b113c7fac3b2be45e5723bc50cda4c9cfe13f43c447c9aa5d1 22 | idna==3.4 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 23 | --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ 24 | --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 25 | importlib-metadata==4.13.0 ; python_full_version >= "3.7.2" and python_version < "3.10" \ 26 | --hash=sha256:8a8a81bcf996e74fee46f0d16bd3eaa382a7eb20fd82445c3ad11f4090334116 \ 27 | --hash=sha256:dd0173e8f150d6815e098fd354f6414b0f079af4644ddfe90c71e2fc6174346d 28 | jinja2==3.1.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 29 | --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ 30 | --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 31 | markdown-include==0.8.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 32 | --hash=sha256:1d0623e0fc2757c38d35df53752768356162284259d259c486b4ab6285cdbbe3 \ 33 | --hash=sha256:32f0635b9cfef46997b307e2430022852529f7a5b87c0075c504283e7cc7db53 34 | markdown==3.3.7 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 35 | --hash=sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874 \ 36 | --hash=sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621 37 | markupsafe==2.1.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 38 | --hash=sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed \ 39 | --hash=sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc \ 40 | --hash=sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2 \ 41 | --hash=sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460 \ 42 | --hash=sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7 \ 43 | --hash=sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0 \ 44 | --hash=sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1 \ 45 | --hash=sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa \ 46 | --hash=sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03 \ 47 | --hash=sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323 \ 48 | --hash=sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65 \ 49 | --hash=sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013 \ 50 | --hash=sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036 \ 51 | --hash=sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f \ 52 | --hash=sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4 \ 53 | --hash=sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419 \ 54 | --hash=sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2 \ 55 | --hash=sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619 \ 56 | --hash=sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a \ 57 | --hash=sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a \ 58 | --hash=sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd \ 59 | --hash=sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7 \ 60 | --hash=sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666 \ 61 | --hash=sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65 \ 62 | --hash=sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859 \ 63 | --hash=sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625 \ 64 | --hash=sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff \ 65 | --hash=sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156 \ 66 | --hash=sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd \ 67 | --hash=sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba \ 68 | --hash=sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f \ 69 | --hash=sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1 \ 70 | --hash=sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094 \ 71 | --hash=sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a \ 72 | --hash=sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513 \ 73 | --hash=sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed \ 74 | --hash=sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d \ 75 | --hash=sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3 \ 76 | --hash=sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147 \ 77 | --hash=sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c \ 78 | --hash=sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603 \ 79 | --hash=sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601 \ 80 | --hash=sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a \ 81 | --hash=sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1 \ 82 | --hash=sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d \ 83 | --hash=sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3 \ 84 | --hash=sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54 \ 85 | --hash=sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2 \ 86 | --hash=sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6 \ 87 | --hash=sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58 88 | mergedeep==1.3.4 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 89 | --hash=sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8 \ 90 | --hash=sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307 91 | mkdocs-autorefs==0.4.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 92 | --hash=sha256:70748a7bd025f9ecd6d6feeba8ba63f8e891a1af55f48e366d6d6e78493aba84 \ 93 | --hash=sha256:a2248a9501b29dc0cc8ba4c09f4f47ff121945f6ce33d760f145d6f89d313f5b 94 | mkdocs-material-extensions==1.1.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 95 | --hash=sha256:9c003da71e2cc2493d910237448c672e00cefc800d3d6ae93d2fc69979e3bd93 \ 96 | --hash=sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945 97 | mkdocs-material==9.1.14 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 98 | --hash=sha256:1ae74cc5464ef2f64574d4884512efed7f4db386fb9bc6af20fd427d7a702f49 \ 99 | --hash=sha256:b56a9f955ed32d38333715cbbf68ce38f683bf38610c65094fa4ef2db9f08bcd 100 | mkdocs==1.4.3 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 101 | --hash=sha256:5955093bbd4dd2e9403c5afaf57324ad8b04f16886512a3ee6ef828956481c57 \ 102 | --hash=sha256:6ee46d309bda331aac915cd24aab882c179a933bd9e77b80ce7d2eaaa3f689dd 103 | mkdocstrings-python==1.0.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 104 | --hash=sha256:b89d849df990204f909d5452548b6936a185f912da06208a93909bebe25d6e67 \ 105 | --hash=sha256:c59d67009a7a85172f4da990d8523e95606b6a1ff93a22a2351ad3b5f8cafed1 106 | mkdocstrings==0.21.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 107 | --hash=sha256:304e56a2e90595708a38a13a278e538a67ad82052dd5c8b71f77a604a4f3d911 \ 108 | --hash=sha256:949ef8da92df9d692ca07be50616459a6b536083a25520fd54b00e8814ce019b 109 | packaging==21.3 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 110 | --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ 111 | --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 112 | pygments==2.15.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 113 | --hash=sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c \ 114 | --hash=sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1 115 | pymdown-extensions==10.0.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 116 | --hash=sha256:ae66d84013c5d027ce055693e09a4628b67e9dec5bce05727e45b0918e36f274 \ 117 | --hash=sha256:b44e1093a43b8a975eae17b03c3a77aad4681b3b56fce60ce746dbef1944c8cb 118 | pyparsing==3.0.9 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 119 | --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ 120 | --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc 121 | python-dateutil==2.8.2 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 122 | --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ 123 | --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 124 | pyyaml-env-tag==0.1 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 125 | --hash=sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb \ 126 | --hash=sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069 127 | pyyaml==6.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 128 | --hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \ 129 | --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \ 130 | --hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \ 131 | --hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \ 132 | --hash=sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b \ 133 | --hash=sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4 \ 134 | --hash=sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07 \ 135 | --hash=sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba \ 136 | --hash=sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 \ 137 | --hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \ 138 | --hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \ 139 | --hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \ 140 | --hash=sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782 \ 141 | --hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \ 142 | --hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \ 143 | --hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \ 144 | --hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \ 145 | --hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \ 146 | --hash=sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1 \ 147 | --hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \ 148 | --hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \ 149 | --hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \ 150 | --hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \ 151 | --hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \ 152 | --hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \ 153 | --hash=sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d \ 154 | --hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \ 155 | --hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \ 156 | --hash=sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7 \ 157 | --hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \ 158 | --hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \ 159 | --hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \ 160 | --hash=sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358 \ 161 | --hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \ 162 | --hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \ 163 | --hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \ 164 | --hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \ 165 | --hash=sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f \ 166 | --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \ 167 | --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5 168 | regex==2023.5.5 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 169 | --hash=sha256:02f4541550459c08fdd6f97aa4e24c6f1932eec780d58a2faa2068253df7d6ff \ 170 | --hash=sha256:0a69cf0c00c4d4a929c6c7717fd918414cab0d6132a49a6d8fc3ded1988ed2ea \ 171 | --hash=sha256:0bbd5dcb19603ab8d2781fac60114fb89aee8494f4505ae7ad141a3314abb1f9 \ 172 | --hash=sha256:10250a093741ec7bf74bcd2039e697f519b028518f605ff2aa7ac1e9c9f97423 \ 173 | --hash=sha256:10374c84ee58c44575b667310d5bbfa89fb2e64e52349720a0182c0017512f6c \ 174 | --hash=sha256:1189fbbb21e2c117fda5303653b61905aeeeea23de4a94d400b0487eb16d2d60 \ 175 | --hash=sha256:1307aa4daa1cbb23823d8238e1f61292fd07e4e5d8d38a6efff00b67a7cdb764 \ 176 | --hash=sha256:144b5b017646b5a9392a5554a1e5db0000ae637be4971c9747566775fc96e1b2 \ 177 | --hash=sha256:171c52e320fe29260da550d81c6b99f6f8402450dc7777ef5ced2e848f3b6f8f \ 178 | --hash=sha256:18196c16a584619c7c1d843497c069955d7629ad4a3fdee240eb347f4a2c9dbe \ 179 | --hash=sha256:18f05d14f14a812fe9723f13afafefe6b74ca042d99f8884e62dbd34dcccf3e2 \ 180 | --hash=sha256:1ecf3dcff71f0c0fe3e555201cbe749fa66aae8d18f80d2cc4de8e66df37390a \ 181 | --hash=sha256:21e90a288e6ba4bf44c25c6a946cb9b0f00b73044d74308b5e0afd190338297c \ 182 | --hash=sha256:23d86ad2121b3c4fc78c58f95e19173790e22ac05996df69b84e12da5816cb17 \ 183 | --hash=sha256:256f7f4c6ba145f62f7a441a003c94b8b1af78cee2cccacfc1e835f93bc09426 \ 184 | --hash=sha256:290fd35219486dfbc00b0de72f455ecdd63e59b528991a6aec9fdfc0ce85672e \ 185 | --hash=sha256:2e9c4f778514a560a9c9aa8e5538bee759b55f6c1dcd35613ad72523fd9175b8 \ 186 | --hash=sha256:338994d3d4ca4cf12f09822e025731a5bdd3a37aaa571fa52659e85ca793fb67 \ 187 | --hash=sha256:33d430a23b661629661f1fe8395be2004006bc792bb9fc7c53911d661b69dd7e \ 188 | --hash=sha256:385992d5ecf1a93cb85adff2f73e0402dd9ac29b71b7006d342cc920816e6f32 \ 189 | --hash=sha256:3d45864693351c15531f7e76f545ec35000d50848daa833cead96edae1665559 \ 190 | --hash=sha256:40005cbd383438aecf715a7b47fe1e3dcbc889a36461ed416bdec07e0ef1db66 \ 191 | --hash=sha256:4035d6945cb961c90c3e1c1ca2feb526175bcfed44dfb1cc77db4fdced060d3e \ 192 | --hash=sha256:445d6f4fc3bd9fc2bf0416164454f90acab8858cd5a041403d7a11e3356980e8 \ 193 | --hash=sha256:48c9ec56579d4ba1c88f42302194b8ae2350265cb60c64b7b9a88dcb7fbde309 \ 194 | --hash=sha256:4a5059bd585e9e9504ef9c07e4bc15b0a621ba20504388875d66b8b30a5c4d18 \ 195 | --hash=sha256:4a6e4b0e0531223f53bad07ddf733af490ba2b8367f62342b92b39b29f72735a \ 196 | --hash=sha256:4b870b6f632fc74941cadc2a0f3064ed8409e6f8ee226cdfd2a85ae50473aa94 \ 197 | --hash=sha256:50fd2d9b36938d4dcecbd684777dd12a407add4f9f934f235c66372e630772b0 \ 198 | --hash=sha256:53e22e4460f0245b468ee645156a4f84d0fc35a12d9ba79bd7d79bdcd2f9629d \ 199 | --hash=sha256:586a011f77f8a2da4b888774174cd266e69e917a67ba072c7fc0e91878178a80 \ 200 | --hash=sha256:59597cd6315d3439ed4b074febe84a439c33928dd34396941b4d377692eca810 \ 201 | --hash=sha256:59e4b729eae1a0919f9e4c0fc635fbcc9db59c74ad98d684f4877be3d2607dd6 \ 202 | --hash=sha256:5a0f874ee8c0bc820e649c900243c6d1e6dc435b81da1492046716f14f1a2a96 \ 203 | --hash=sha256:5ac2b7d341dc1bd102be849d6dd33b09701223a851105b2754339e390be0627a \ 204 | --hash=sha256:5e3f4468b8c6fd2fd33c218bbd0a1559e6a6fcf185af8bb0cc43f3b5bfb7d636 \ 205 | --hash=sha256:6164d4e2a82f9ebd7752a06bd6c504791bedc6418c0196cd0a23afb7f3e12b2d \ 206 | --hash=sha256:6893544e06bae009916a5658ce7207e26ed17385149f35a3125f5259951f1bbe \ 207 | --hash=sha256:690a17db524ee6ac4a27efc5406530dd90e7a7a69d8360235323d0e5dafb8f5b \ 208 | --hash=sha256:6b8d0c153f07a953636b9cdb3011b733cadd4178123ef728ccc4d5969e67f3c2 \ 209 | --hash=sha256:72a28979cc667e5f82ef433db009184e7ac277844eea0f7f4d254b789517941d \ 210 | --hash=sha256:72aa4746993a28c841e05889f3f1b1e5d14df8d3daa157d6001a34c98102b393 \ 211 | --hash=sha256:732176f5427e72fa2325b05c58ad0b45af341c459910d766f814b0584ac1f9ac \ 212 | --hash=sha256:7918a1b83dd70dc04ab5ed24c78ae833ae8ea228cef84e08597c408286edc926 \ 213 | --hash=sha256:7923470d6056a9590247ff729c05e8e0f06bbd4efa6569c916943cb2d9b68b91 \ 214 | --hash=sha256:7d76a8a1fc9da08296462a18f16620ba73bcbf5909e42383b253ef34d9d5141e \ 215 | --hash=sha256:811040d7f3dd9c55eb0d8b00b5dcb7fd9ae1761c454f444fd9f37fe5ec57143a \ 216 | --hash=sha256:821a88b878b6589c5068f4cc2cfeb2c64e343a196bc9d7ac68ea8c2a776acd46 \ 217 | --hash=sha256:84397d3f750d153ebd7f958efaa92b45fea170200e2df5e0e1fd4d85b7e3f58a \ 218 | --hash=sha256:844671c9c1150fcdac46d43198364034b961bd520f2c4fdaabfc7c7d7138a2dd \ 219 | --hash=sha256:890a09cb0a62198bff92eda98b2b507305dd3abf974778bae3287f98b48907d3 \ 220 | --hash=sha256:8f08276466fedb9e36e5193a96cb944928301152879ec20c2d723d1031cd4ddd \ 221 | --hash=sha256:8f5e06df94fff8c4c85f98c6487f6636848e1dc85ce17ab7d1931df4a081f657 \ 222 | --hash=sha256:921473a93bcea4d00295799ab929522fc650e85c6b9f27ae1e6bb32a790ea7d3 \ 223 | --hash=sha256:941b3f1b2392f0bcd6abf1bc7a322787d6db4e7457be6d1ffd3a693426a755f2 \ 224 | --hash=sha256:9b320677521aabf666cdd6e99baee4fb5ac3996349c3b7f8e7c4eee1c00dfe3a \ 225 | --hash=sha256:9c3efee9bb53cbe7b285760c81f28ac80dc15fa48b5fe7e58b52752e642553f1 \ 226 | --hash=sha256:9fda3e50abad8d0f48df621cf75adc73c63f7243cbe0e3b2171392b445401550 \ 227 | --hash=sha256:a4c5da39bca4f7979eefcbb36efea04471cd68db2d38fcbb4ee2c6d440699833 \ 228 | --hash=sha256:a56c18f21ac98209da9c54ae3ebb3b6f6e772038681d6cb43b8d53da3b09ee81 \ 229 | --hash=sha256:a623564d810e7a953ff1357f7799c14bc9beeab699aacc8b7ab7822da1e952b8 \ 230 | --hash=sha256:a8906669b03c63266b6a7693d1f487b02647beb12adea20f8840c1a087e2dfb5 \ 231 | --hash=sha256:a99757ad7fe5c8a2bb44829fc57ced11253e10f462233c1255fe03888e06bc19 \ 232 | --hash=sha256:aa7d032c1d84726aa9edeb6accf079b4caa87151ca9fabacef31fa028186c66d \ 233 | --hash=sha256:aad5524c2aedaf9aa14ef1bc9327f8abd915699dea457d339bebbe2f0d218f86 \ 234 | --hash=sha256:afb1c70ec1e594a547f38ad6bf5e3d60304ce7539e677c1429eebab115bce56e \ 235 | --hash=sha256:b6365703e8cf1644b82104cdd05270d1a9f043119a168d66c55684b1b557d008 \ 236 | --hash=sha256:b8b942d8b3ce765dbc3b1dad0a944712a89b5de290ce8f72681e22b3c55f3cc8 \ 237 | --hash=sha256:ba73a14e9c8f9ac409863543cde3290dba39098fc261f717dc337ea72d3ebad2 \ 238 | --hash=sha256:bd7b68fd2e79d59d86dcbc1ccd6e2ca09c505343445daaa4e07f43c8a9cc34da \ 239 | --hash=sha256:bd966475e963122ee0a7118ec9024388c602d12ac72860f6eea119a3928be053 \ 240 | --hash=sha256:c2ce65bdeaf0a386bb3b533a28de3994e8e13b464ac15e1e67e4603dd88787fa \ 241 | --hash=sha256:c64d5abe91a3dfe5ff250c6bb267ef00dbc01501518225b45a5f9def458f31fb \ 242 | --hash=sha256:c8c143a65ce3ca42e54d8e6fcaf465b6b672ed1c6c90022794a802fb93105d22 \ 243 | --hash=sha256:cd46f30e758629c3ee91713529cfbe107ac50d27110fdcc326a42ce2acf4dafc \ 244 | --hash=sha256:ced02e3bd55e16e89c08bbc8128cff0884d96e7f7a5633d3dc366b6d95fcd1d6 \ 245 | --hash=sha256:cf123225945aa58b3057d0fba67e8061c62d14cc8a4202630f8057df70189051 \ 246 | --hash=sha256:d19e57f888b00cd04fc38f5e18d0efbd91ccba2d45039453ab2236e6eec48d4d \ 247 | --hash=sha256:d1cbe6b5be3b9b698d8cc4ee4dee7e017ad655e83361cd0ea8e653d65e469468 \ 248 | --hash=sha256:db09e6c18977a33fea26fe67b7a842f706c67cf8bda1450974d0ae0dd63570df \ 249 | --hash=sha256:de2f780c3242ea114dd01f84848655356af4dd561501896c751d7b885ea6d3a1 \ 250 | --hash=sha256:e2205a81f815b5bb17e46e74cc946c575b484e5f0acfcb805fb252d67e22938d \ 251 | --hash=sha256:e645c757183ee0e13f0bbe56508598e2d9cd42b8abc6c0599d53b0d0b8dd1479 \ 252 | --hash=sha256:f2910502f718828cecc8beff004917dcf577fc5f8f5dd40ffb1ea7612124547b \ 253 | --hash=sha256:f764e4dfafa288e2eba21231f455d209f4709436baeebb05bdecfb5d8ddc3d35 \ 254 | --hash=sha256:f83fe9e10f9d0b6cf580564d4d23845b9d692e4c91bd8be57733958e4c602956 \ 255 | --hash=sha256:fb2b495dd94b02de8215625948132cc2ea360ae84fe6634cd19b6567709c8ae2 \ 256 | --hash=sha256:fee0016cc35a8a91e8cc9312ab26a6fe638d484131a7afa79e1ce6165328a135 257 | requests==2.31.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 258 | --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ 259 | --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 260 | six==1.16.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 261 | --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ 262 | --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 263 | typing-extensions==4.4.0 ; python_full_version >= "3.7.2" and python_version < "3.10" \ 264 | --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ 265 | --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e 266 | urllib3==1.26.13 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 267 | --hash=sha256:47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc \ 268 | --hash=sha256:c083dd0dce68dbfbe1129d5271cb90f9447dea7d52097c6e0126120c521ddea8 269 | watchdog==3.0.0 ; python_full_version >= "3.7.2" and python_full_version < "4.0.0" \ 270 | --hash=sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a \ 271 | --hash=sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100 \ 272 | --hash=sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8 \ 273 | --hash=sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc \ 274 | --hash=sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae \ 275 | --hash=sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41 \ 276 | --hash=sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0 \ 277 | --hash=sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f \ 278 | --hash=sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c \ 279 | --hash=sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9 \ 280 | --hash=sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3 \ 281 | --hash=sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709 \ 282 | --hash=sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83 \ 283 | --hash=sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759 \ 284 | --hash=sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9 \ 285 | --hash=sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3 \ 286 | --hash=sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7 \ 287 | --hash=sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f \ 288 | --hash=sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346 \ 289 | --hash=sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674 \ 290 | --hash=sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397 \ 291 | --hash=sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96 \ 292 | --hash=sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d \ 293 | --hash=sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a \ 294 | --hash=sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64 \ 295 | --hash=sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44 \ 296 | --hash=sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33 297 | zipp==3.11.0 ; python_full_version >= "3.7.2" and python_version < "3.10" \ 298 | --hash=sha256:83a28fcb75844b5c0cdaf5aa4003c2d728c77e05f5aeabe8e95e56727005fbaa \ 299 | --hash=sha256:a7a22e05929290a67401440b39690ae6563279bced5f314609d9d03798f56766 --------------------------------------------------------------------------------