├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Config ├── LICENSE ├── README.md ├── cli.py ├── createAutomaticFramework.py ├── createCustomStandard.py ├── createYamlAutomatedFramework.py ├── customFramework.py ├── frameworks ├── multi_framework.yaml ├── s3_config_framework.yaml ├── sampleData.json ├── sampleData.yaml └── sampleData.yml ├── generateYAMLFramework.py ├── images └── audit-manager-automation-diagram.png ├── mergeMultiFramework.py ├── requirements.txt └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Windows thumbnail cache files 2 | Thumbs.db 3 | Thumbs.db:encryptable 4 | ehthumbs.db 5 | ehthumbs_vista.db 6 | 7 | # Dump file 8 | *.stackdump 9 | 10 | # Folder config file 11 | [Dd]esktop.ini 12 | 13 | # Recycle Bin used on file shares 14 | $RECYCLE.BIN/ 15 | 16 | # Windows Installer files 17 | *.cab 18 | *.msi 19 | *.msix 20 | *.msm 21 | *.msp 22 | 23 | # Windows shortcuts 24 | *.lnk 25 | 26 | # Mac OS General 27 | .DS_Store 28 | .AppleDouble 29 | .LSOverride 30 | 31 | # Icon must end with two \r 32 | Icon 33 | 34 | # Thumbnails 35 | ._* 36 | 37 | # Files that might appear in the root of a volume 38 | .DocumentRevisions-V100 39 | .fseventsd 40 | .Spotlight-V100 41 | .TemporaryItems 42 | .Trashes 43 | .VolumeIcon.icns 44 | .com.apple.timemachine.donotpresent 45 | 46 | # Directories potentially created on remote AFP share 47 | .AppleDB 48 | .AppleDesktop 49 | Network Trash Folder 50 | Temporary Items 51 | .apdisk 52 | 53 | # Byte-compiled / optimized / DLL files 54 | __pycache__/ 55 | *.py[cod] 56 | *$py.class 57 | 58 | # C extensions 59 | *.so 60 | 61 | # Distribution / packaging 62 | .Python 63 | build/ 64 | develop-eggs/ 65 | dist/ 66 | downloads/ 67 | eggs/ 68 | .eggs/ 69 | lib/ 70 | lib64/ 71 | parts/ 72 | sdist/ 73 | var/ 74 | wheels/ 75 | pip-wheel-metadata/ 76 | share/python-wheels/ 77 | *.egg-info/ 78 | .installed.cfg 79 | *.egg 80 | MANIFEST 81 | 82 | # PyInstaller 83 | # Usually these files are written by a python script from a template 84 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 85 | *.manifest 86 | *.spec 87 | 88 | # Installer logs 89 | pip-log.txt 90 | pip-delete-this-directory.txt 91 | 92 | # Unit test / coverage reports 93 | htmlcov/ 94 | .tox/ 95 | .nox/ 96 | .coverage 97 | .coverage.* 98 | .cache 99 | nosetests.xml 100 | coverage.xml 101 | *.cover 102 | *.py,cover 103 | .hypothesis/ 104 | .pytest_cache/ 105 | 106 | # Translations 107 | *.mo 108 | *.pot 109 | 110 | # Django stuff: 111 | *.log 112 | local_settings.py 113 | db.sqlite3 114 | db.sqlite3-journal 115 | 116 | # Flask stuff: 117 | instance/ 118 | .webassets-cache 119 | 120 | # Scrapy stuff: 121 | .scrapy 122 | 123 | # Sphinx documentation 124 | docs/_build/ 125 | 126 | # PyBuilder 127 | target/ 128 | 129 | # Jupyter Notebook 130 | .ipynb_checkpoints 131 | 132 | # IPython 133 | profile_default/ 134 | ipython_config.py 135 | 136 | # pyenv 137 | .python-version 138 | 139 | # pipenv 140 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 141 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 142 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 143 | # install all needed dependencies. 144 | #Pipfile.lock 145 | 146 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 147 | __pypackages__/ 148 | 149 | # Celery stuff 150 | celerybeat-schedule 151 | celerybeat.pid 152 | 153 | # SageMath parsed files 154 | *.sage.py 155 | 156 | # Environments 157 | .env 158 | .venv 159 | env/ 160 | venv/ 161 | ENV/ 162 | env.bak/ 163 | venv.bak/ 164 | 165 | # Spyder project settings 166 | .spyderproject 167 | .spyproject 168 | 169 | # Rope project settings 170 | .ropeproject 171 | 172 | # mkdocs documentation 173 | /site 174 | 175 | # mypy 176 | .mypy_cache/ 177 | .dmypy.json 178 | dmypy.json 179 | 180 | # Pyre type checker 181 | .pyre/ -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /Config: -------------------------------------------------------------------------------- 1 | package.Reinforce-2021-audit-manager-automation = { 2 | interfaces = (1.0); 3 | 4 | # Use NoOpBuild. See https://w.amazon.com/index.php/BrazilBuildSystem/NoOpBuild 5 | build-system = no-op; 6 | build-tools = { 7 | 1.0 = { 8 | NoOpBuild = 1.0; 9 | }; 10 | }; 11 | 12 | # Use runtime-dependencies for when you want to bring in additional 13 | # packages when deploying. 14 | # Use dependencies instead if you intend for these dependencies to 15 | # be exported to other packages that build against you. 16 | dependencies = { 17 | 1.0 = { 18 | }; 19 | }; 20 | 21 | runtime-dependencies = { 22 | 1.0 = { 23 | }; 24 | }; 25 | 26 | }; 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this 4 | software and associated documentation files (the "Software"), to deal in the Software 5 | without restriction, including without limitation the rights to use, copy, modify, 6 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 7 | permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 10 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 11 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 12 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 13 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 14 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Custom Audit Manager Controls and Frameworks 2 | 3 | ## Building custom frameworks on AWS Audit Manager 4 | The AWS Audit Manager framework library is the central place from which you can access and manage frameworks. You can create custom frameworks to organize controls into control sets in a way that suits your unique requirements. In this session, builders will get an opportunity to build a customized framework through Management Console. Additionally, they can learn from customer use cases to use automation using AWS SDK for Python to build custom frameworks at scale. 5 | 6 | 7 | ### Automation Architecture 8 | ![architecture_diagram](images/audit-manager-automation-diagram.png) 9 | 10 | ## customFramework.py Function 11 | ### customFramework.py Arguments 12 | 13 | | Argument | Purpose | Allowed Values | 14 | |-----------------------------------|---------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------| 15 | | [--jobName JOB_NAME] | Provide customFramework instructions for the type of automation job you want to perform. | Custom-Standard-Framework, Automated-Custom-Framework, Merge-Multiple-Framework, Generate-YAML-Framework | 16 | | [—customFrameworkName CUSTOM_REPORT_NAME] | The name of the custom framework created as a result of this job. | Must be unique | 17 | | [—existingFrameworkName REPORT_NAME] | The name of an existing framework. This is used for jobs that lookup a framework and extract control details. | | 18 | | [—description DESCRIPTION] | An optional description for the new custom framework | | 19 | | [—compliance-type COMPLIANCETYPE] | An optional description for the compliance type that the new custom framework supports, such as CIS or HIPAA. | | 20 | | [—template-path FILEPATH] | Path to the file containing the template body for the control sets in either JSON or YAML | | 21 | | --regions REGIONS | List of regions to deploy custom framework into separated by a single ',' | | 22 | | [-v] | enables verbose logging for troubleshooting | || 23 | 24 | 25 | 26 | ### CustomFramework Job Functionality 27 | #### Custom-Standard-Framework 28 | Description 29 | This job will create new custom controls and a new custom framework that consists of those custom controls. The controls are expressed via a YAML file and the repo includes an example if you wanted to build a framework for automated assessment of Amazon S3 30 | 31 | Example Test String 32 | ``` 33 | $ python3 customFramework.py --jobName Custom-Standard-Framework --customFrameworkName "S3 Controls Framework" --description "Automated AWS Config Controls for Amazon S3" --compliance-type "AWS Service" --template-path "frameworks/s3_config_framework.yaml" --regions "us-east-1" 34 | ``` 35 | 36 | #### Automated-Custom-Framework 37 | **Description** 38 | 39 | This job will accept Audit Manager Managed Framework names https://docs.aws.amazon.com/audit-manager/latest/userguide/framework-overviews.html and create a new custom framework that consists of only the automated controls from the managed framework. 40 | 41 | Example Test String 42 | ``` 43 | $ python3 customFramework.py --jobName Automated-Custom-Framework --regions "us-east-1" --existingFrameworkName "PCI DSS V3.2.1" --customFrameworkName "PCI DSS V3.2.1 - Automated Controls Only" 44 | ``` 45 | 46 | 47 | #### Merge-Multiple-Framework 48 | **Description** 49 | 50 | This job will create a custom framework in Audit Manager by merging multiple frameworks. The frameworks' control sets are identified in a provided YAML template. 51 | 52 | 53 | Example Test String 54 | ``` 55 | $ python3 customFramework.py --jobName Merge-Multiple-Framework --regions "us-east-1" --customFrameworkName "Custom Enterprise Controls" --template-path "frameworks/multi_framework.yaml" 56 | ``` 57 | #### Generate-YAML-Automatic-Framework 58 | **Description** 59 | This job will create a YAML file formatted for a custom framework template from one or more existing frameworks. You can then modify that YAML template to add or remove additional controls. Once the complete, you can use it to generate a custom framework based on this template. 60 | 61 | 62 | Example Test String 63 | ``` 64 | $ python3 customFramework.py --jobName Generate-YAML-Framework --existingFrameworkName "AWS License Manager" --template-path "frameworks/license_manager_controls.yaml" --regions "us-east-1" 65 | ``` 66 | 67 | 68 | 69 | 70 | # Builders Session Steps 71 | ## Prerequisites 72 | 73 | 1. Have an AWS account with Audit Manager enabled 74 | 2. Confirm you have an environment to make AWS API calls with. (Cloud9 or local) 75 | 3. Clone the provided sample code to your working environment (local computer or cloud9) (LINK) 76 | 4. Ensure you have updated Python (Specifically version 3.5 or higher), Boto3, pyyaml, and Botocore installed in the environment. 77 | 78 | 79 | ## Create a Custom Framework in the Audit Manager UI 80 | 81 | 1. Log into the AWS console and navigate to Audit Manager 82 | 2. Navigate to the framework library and create a custom framework. 83 | 3. Upon completion you should have made a custom framework either from scratch of using one of the prebuilt frameworks as a template. 84 | 85 | ## Create a Custom Framework using the automation provided 86 | 87 | 1. Use the "customFramework" function and the correct job name to begin creating a standard custom framework. 88 | 2. Look at example test string in code repo for command arguments. 89 | 3. After the Python script has completed check in the console to see if your new custom framework appears. 90 | 91 | ## Create a Custom Framework with only automated controls from an AWS Managed Framework 92 | 93 | 1. Use the "customFramework" function and the correct job name to begin creating a custom framework for automated controls. 94 | 2. Look at example test string in code repo for command arguments. 95 | 3. After the Python script has completed check in the console to see if your new custom framework appears. 96 | 97 | ## Merge any two frameworks into a new Custom Framework. 98 | 99 | 1. Use the "customFramework" function an the correct job name to begin creating a custom framework from multiple frameworks. 100 | 2. Look at example test string in code repo for command arguments. 101 | 3. After the Python script has completed check in the console to see if your new custom framework appears. 102 | 103 | ## Generate a YAML template from an existing framework, manually update it with additional controls, and create a new Custom Framework based on that. 104 | 105 | 1. Use the "customFramework" function and the Generate-YAML-Framework job name to create a YAML framework. 106 | 2. Once the Python script finishes find the name YAML file in the local copy of the repo. 107 | 3. Use the Python tool to generate a custom framework with your newly generated YAML file. 108 | 4. After the Python script has completed check in the console to see if your new custom framework appears. 109 | -------------------------------------------------------------------------------- /cli.py: -------------------------------------------------------------------------------- 1 | # Core python packages 2 | import argparse 3 | import ast 4 | 5 | def create_arg_parser() -> dict: 6 | # Define the parser variable to equal argparse.ArgumentParser() 7 | parser = argparse.ArgumentParser( 8 | description="Creates a custom audit framework", 9 | ) 10 | 11 | # Add each of the arguments using the parser.add_argument() method 12 | parser.add_argument( 13 | "--jobName", 14 | dest='job_name', 15 | type=str, 16 | required=False, 17 | help="The name of the job. Currently we are supporting merge-multi-framework") 18 | 19 | parser.add_argument( 20 | "--customFrameworkName", 21 | dest='custom_report_name', 22 | type=str, 23 | required=False, 24 | help="The name of the new custom framework") 25 | 26 | parser.add_argument( 27 | "--existingFrameworkName", 28 | dest='report_name', 29 | type=str, 30 | required=False, 31 | help="The name of an existing framework") 32 | 33 | # parser.add_argument( 34 | # "--report_list", 35 | # dest="report_list", 36 | # type=ast.literal_eval, 37 | # required=False, 38 | # help="The list of existing frameworks") 39 | 40 | parser.add_argument( 41 | '--description', 42 | dest='description', 43 | type=str, 44 | required=False, 45 | help='An optional description for the new custom framework',) 46 | 47 | parser.add_argument( 48 | "--compliance-type", 49 | dest="complianceType", 50 | type=str, 51 | required=False, 52 | help=( 53 | "The compliance type that the new custom framework supports," 54 | "such as CIS or HIPPA") 55 | ) 56 | 57 | parser.add_argument( 58 | "--template-path", 59 | dest="filepath", 60 | type=str, 61 | required=False, 62 | help=( 63 | "file containing the template body for the control sets," 64 | "in either json or yaml"), 65 | ) 66 | 67 | parser.add_argument( 68 | "--regions", 69 | dest="regions", 70 | type=str, 71 | required=True, 72 | help=( 73 | "List of regions to deploy custom framework into," 74 | "separated by a single ','"), 75 | ) 76 | 77 | parser.add_argument( 78 | "-v", 79 | "--verbose", 80 | dest="verbose", 81 | help=( 82 | """ 83 | Using -v or --verbose will set the logging level 84 | to DEBUG to return increased logging,\ 85 | CAUTION - This will generate a lot of logs! 86 | """), 87 | action='store_true', 88 | ) 89 | 90 | 91 | 92 | 93 | # This will inspect the command line, convert each argument 94 | # to the appropriate type and invoke the appropriate action. 95 | args = parser.parse_args() 96 | return args 97 | 98 | 99 | def main(): 100 | create_arg_parser() 101 | 102 | 103 | if __name__ == '__main__': 104 | main() 105 | -------------------------------------------------------------------------------- /createAutomaticFramework.py: -------------------------------------------------------------------------------- 1 | # core python packages 2 | import sys 3 | 4 | # 3rd party packages - see requirements.txt 5 | import boto3 6 | 7 | # /src 8 | from utils import * 9 | 10 | 11 | 12 | # Create custom assessment framework using automated controls 13 | 14 | def create_automated_custom_framework( 15 | report_name: str = None, 16 | custom_report_name: str = None, 17 | region_name=None, 18 | ) -> dict: 19 | """ 20 | Create a custom assessment framework using automated controls in Audit Manager 21 | 22 | Args: 23 | report_name (*string*)-- 24 | [**REQUIRED**] 25 | The name of existing framework. Defaults to **None**. 26 | 27 | custom_report_name (*string*)-- 28 | [**REQUIRED**] 29 | The name of the custom framework. Defaults to **None**. 30 | 31 | 32 | Raises: 33 | error: raises Boto3 ClientError 34 | 35 | Returns: 36 | [type]: [description] 37 | """ 38 | 39 | auditmanager_client = boto3.client('auditmanager', region_name=region_name) 40 | 41 | ## List all standard frameworks 42 | try: 43 | framework_list_response = auditmanager_client.list_assessment_frameworks(frameworkType='Standard') 44 | except botocore.exceptions.ClientError as error: 45 | raise error 46 | 47 | custom_exists = check_framework_existence(custom_report_name) 48 | 49 | if custom_exists: 50 | sys.exit(f"The customer framework {custom_report_name} already exists. Please note that Framework name within the AWS account should be unique") 51 | 52 | ## Extract framework id of given framework 53 | standard_exists = False 54 | for framework_list in framework_list_response['frameworkMetadataList']: 55 | if framework_list['name'] == report_name: 56 | id_report = framework_list['id'] 57 | standard_exists = True 58 | 59 | if not standard_exists: 60 | sys.exit(f"The framework name {report_name} doesn't exist") 61 | 62 | 63 | ## Returns complete information about given framework 64 | try: 65 | framework_controls_response = auditmanager_client.get_assessment_framework( 66 | frameworkId=id_report 67 | ) 68 | except botocore.exceptions.ClientError as error: 69 | raise error 70 | 71 | control_sets_list = [] 72 | 73 | ## Get control sets and controls of given framework 74 | for control_sets in framework_controls_response['framework']['controlSets']: 75 | control_sets_dict = {} 76 | 77 | control_list = [] 78 | for controls in control_sets['controls']: 79 | controls_dict = {} 80 | if controls['controlSources'] != 'Manual': ## Filters out manual controls 81 | controls_dict['id'] = controls['id'] 82 | control_list.append(controls_dict) 83 | if len(control_list) > 0: 84 | control_sets_dict['name'] = control_sets['name'] 85 | control_sets_dict['controls'] = control_list 86 | control_sets_list.append(control_sets_dict) ## create a list of dictionaries 87 | 88 | try: 89 | response = auditmanager_client.create_assessment_framework( 90 | name=custom_report_name, 91 | controlSets=control_sets_list 92 | ) 93 | logging.debug(response) 94 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 95 | logging.info(f"Custom Framework {custom_report_name} Created successfully") 96 | except botocore.exceptions.ClientError as error: 97 | raise error 98 | 99 | -------------------------------------------------------------------------------- /createCustomStandard.py: -------------------------------------------------------------------------------- 1 | # Core python packages 2 | import logging 3 | 4 | # 3rd party packages - see requirements.txt 5 | import boto3 6 | import botocore.exceptions 7 | 8 | # src python 9 | from utils import * 10 | 11 | # Create custom assessment framework in Audit Manager 12 | def create_custom_framework( 13 | custom_report_name: str = None, 14 | description: str = None, 15 | complianceType: str = None, 16 | control_sets: list = None, 17 | region_name=None, 18 | ) -> dict: 19 | """ 20 | Creates a custom assessment framework in Audit Manager 21 | 22 | Args: 23 | custom_report_name (*string*)-- 24 | [**REQUIRED**] 25 | The name of the custom framework. Defaults to **None**. 26 | description (*string*, optional): An . Defaults to **None**. 27 | complianceType (*string*, optional): [description]. Defaults to **None**. 28 | control_sets (*string*, optional): [description]. Defaults to **None**. 29 | auditmanager (*string*, optional): [description]. Defaults to **None**. 30 | 31 | Raises: 32 | error: raises Boto3 ClientError 33 | error: raises Boto3 ClientError 34 | 35 | Returns: 36 | [type]: [description] 37 | """ 38 | auditmanager_client = boto3.client('auditmanager', region_name=region_name) 39 | existing_frameworks = ( 40 | auditmanager_client.list_assessment_frameworks(frameworkType="Custom")) 41 | already_exists = False 42 | # Looping through all existing custom frameworks 43 | for existing in existing_frameworks["frameworkMetadataList"]: 44 | # Checking if the framework being created already exists and 45 | # updating it 46 | if existing["name"] == custom_report_name: 47 | already_exists = True 48 | try: 49 | response = auditmanager_client.update_assessment_framework( 50 | frameworkId=existing["id"], 51 | name=custom_report_name, 52 | description=description, 53 | complianceType=complianceType, 54 | controlSets=control_sets 55 | ) 56 | logging.debug(response) 57 | return response 58 | except botocore.exceptions.ClientError as error: 59 | raise error 60 | 61 | # Creating new framework if it does not already exist 62 | if already_exists is False: 63 | try: 64 | response = auditmanager_client.create_assessment_framework( 65 | name=custom_report_name, 66 | description=description, 67 | complianceType=complianceType, 68 | controlSets=control_sets 69 | ) 70 | logging.debug(response) 71 | return response 72 | except botocore.exceptions.ClientError as error: 73 | raise error 74 | 75 | 76 | def list_controls(controlType: str = None, region_name=None) -> dict: 77 | auditmanager_client = boto3.client('auditmanager', region_name=region_name) 78 | existing_controls = auditmanager_client.list_controls(controlType="Custom") 79 | nextToken = existing_controls.get('nextToken', None) 80 | while nextToken is not None: 81 | next_existing_controls = auditmanager_client.list_controls( 82 | controlType="Custom", 83 | nextToken=nextToken 84 | ) 85 | # Adding each control in the new list of controls to the existing list 86 | for item in next_existing_controls["controlMetadataList"]: 87 | existing_controls["controlMetadataList"].append(item) 88 | 89 | # Checking if there is a token in the new list of controls 90 | if "nextToken" in next_existing_controls: 91 | nextToken = next_existing_controls["nextToken"] 92 | else: 93 | nextToken = None 94 | logging.debug(existing_controls) 95 | return existing_controls 96 | 97 | 98 | # Create 99 | 100 | 101 | # Create custom controls in Audit Manager 102 | def create_custom_controls(input=None, controls=None, region_name=None): 103 | auditmanager_client = boto3.client('auditmanager', region_name=region_name) 104 | # Calls the list_controls() and returns a list of existing controls 105 | existing_controls = list_controls(region_name=region_name) 106 | control_sets = [] 107 | # Iterating through the control sets in the JSON file 108 | for control_set in input: 109 | # Creating a dictionary of control sets and list of control IDs 110 | # to pass them into the assessment framework in the correct format 111 | control_sets_dict = {} 112 | control_ids = [] 113 | control_sets_dict.setdefault("name", control_set) 114 | 115 | # Iterating through each control in the control set 116 | for control in input[control_set]: 117 | 118 | already_exists = False 119 | # Looping through all existing custom controls 120 | for existing in existing_controls["controlMetadataList"]: 121 | # Checking if the control being created already exists and 122 | # updating it if so 123 | if existing["name"] == input[control_set][control]["name"]: 124 | already_exists = True 125 | controlMappingSources = [] 126 | 127 | # Adding each data source to a 128 | # list to feed into the control creation 129 | for data_source in input[control_set][control]["controlMappingSources"]: 130 | # Converting keywords to uppercase if not already 131 | if "sourceKeyword" in data_source: 132 | keyword = data_source["sourceKeyword"]["keywordValue"] 133 | if keyword.isupper() is not True: 134 | uppercase_keyword = keyword.upper() 135 | data_source["sourceKeyword"]["keywordValue"] = uppercase_keyword 136 | controlMappingSources.append(data_source) 137 | # Create control 138 | try: 139 | response = auditmanager_client.update_control( 140 | controlId=existing["id"], 141 | name=input[control_set][control]["name"], 142 | description=( 143 | input[control_set][control]["description"]), 144 | testingInformation=( 145 | input[control_set] 146 | [control]["testingInformation"]), 147 | actionPlanTitle=( 148 | input[control_set] 149 | [control]["actionPlanTitle"]), 150 | actionPlanInstructions=( 151 | input[control_set] 152 | [control]["actionPlanInstructions"]), 153 | controlMappingSources=controlMappingSources 154 | ) 155 | except botocore.exceptions.ClientError as error: 156 | raise error 157 | 158 | control_ids.append({"id": response["control"]["id"]}) 159 | break 160 | 161 | # Creating new control if it does not already exist 162 | if already_exists is False: 163 | controlMappingSources = [] 164 | 165 | # Adding each data source to a 166 | # list to feed into the control creation 167 | for data_source in input[control_set][control]["controlMappingSources"]: 168 | # Converting keywords to uppercase if not already 169 | if "sourceKeyword" in data_source: 170 | keyword = data_source["sourceKeyword"]["keywordValue"] 171 | if keyword.isupper() is not True: 172 | uppercase_keyword = keyword.upper() 173 | data_source["sourceKeyword"]["keywordValue"] = uppercase_keyword 174 | controlMappingSources.append(data_source) 175 | # Create control 176 | try: 177 | response = auditmanager_client.create_control( 178 | name=input[control_set][control]["name"], 179 | description=input[control_set][control]["description"], 180 | testingInformation=( 181 | input[control_set][control]["testingInformation"]), 182 | actionPlanTitle=( 183 | input[control_set][control]["actionPlanTitle"]), 184 | actionPlanInstructions=( 185 | input[control_set] 186 | [control]["actionPlanInstructions"]), 187 | controlMappingSources=controlMappingSources 188 | ) 189 | except botocore.exceptions.ClientError as error: 190 | raise error 191 | 192 | control_ids.append({"id": response["control"]["id"]}) 193 | 194 | control_sets_dict["controls"] = control_ids 195 | control_sets.append(control_sets_dict) 196 | 197 | return control_sets 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | -------------------------------------------------------------------------------- /createYamlAutomatedFramework.py: -------------------------------------------------------------------------------- 1 | # Core python packages 2 | import yaml 3 | 4 | # 3rd party packages 5 | import boto3 6 | 7 | # /src 8 | from mergeMultiFramework import merge_multiple_framework 9 | 10 | def create_generate_yaml_automated_controls( 11 | report_list: list, 12 | custom_report_name: str, 13 | filename: str, 14 | region_name = None, 15 | ): 16 | 17 | 18 | auditmanager_client = boto3.client('auditmanager', region_name=region_name) 19 | ## create a zero byte yaml file 20 | open(filename, 'w').close() 21 | 22 | for name_report in report_list: 23 | framework_list_response = auditmanager_client.list_assessment_frameworks(frameworkType='Standard') 24 | 25 | for framework_list in framework_list_response['frameworkMetadataList']: 26 | if framework_list['name'] == name_report: 27 | id_report = framework_list['id'] 28 | 29 | framework_controls_response = auditmanager_client.get_assessment_framework( 30 | frameworkId=id_report 31 | ) 32 | yaml_dict = {} 33 | control_sets_list = [] 34 | yaml_control_dict = {} 35 | for control_sets in framework_controls_response['framework']['controlSets']: 36 | control_sets_dict = {} 37 | control_list = [] 38 | yaml_control_list = [] 39 | for controls in control_sets['controls']: 40 | controls_dict = {} 41 | if controls['controlSources'] != 'Manual': 42 | yaml_control_list.append(controls['name']) 43 | controls_dict['id'] = controls['id'] 44 | control_list.append(controls_dict) 45 | 46 | if len(yaml_control_list) > 0: 47 | yaml_control_dict[control_sets['name']] = yaml_control_list 48 | control_sets_dict['name'] = control_sets['name'] 49 | control_sets_dict['controls'] = control_list 50 | control_sets_list.append(control_sets_dict) 51 | 52 | if len(yaml_control_dict) > 0: 53 | yaml_dict[name_report] = yaml_control_dict 54 | 55 | ## writes into yaml file 56 | with open(filename, 'a') as file: 57 | documents = yaml.dump(yaml_dict, file) 58 | 59 | merge_multiple_framework(custom_report_name, filename) -------------------------------------------------------------------------------- /customFramework.py: -------------------------------------------------------------------------------- 1 | # Core python packages 2 | import sys 3 | import logging 4 | 5 | # from src 6 | from utils import * 7 | from cli import create_arg_parser 8 | from createAutomaticFramework import create_automated_custom_framework 9 | from generateYAMLFramework import create_yaml_controls 10 | from mergeMultiFramework import merge_multiple_framework 11 | from createCustomStandard import * 12 | # Deploy the custom controls and framework to every region required 13 | 14 | def deploy_to_all_regions(regions: str = None): 15 | regions_list = regions.split(",") 16 | 17 | logging.info("-------- Beginning deployment --------") 18 | deployment_status = True 19 | # Iterating through the list of regions 20 | for region_name in regions_list: 21 | # Checking if Audit Manager has been registered in that region 22 | # and registering if not 23 | register_region = register_account(region_name=region_name) 24 | argument = (vars(create_arg_parser())) 25 | input_job_name = argument.get('job_name') 26 | 27 | deployment_region_status = False 28 | 29 | if input_job_name == 'Custom-Standard-Framework': 30 | if argument.get('custom_report_name') and argument.get('filepath') and argument.get('description') and argument.get('complianceType'): 31 | input = open_input(argument.get('filepath')) 32 | # Creating custom controls and passing to a custom framework 33 | 34 | # 👇 The function call to create_custom_controls👇 35 | control_sets = create_custom_controls( 36 | input, 37 | region_name=region_name 38 | ) 39 | #The function call to create_custom_framework👇 40 | create_custom_framework( 41 | argument.get('custom_report_name'), 42 | argument.get('description'), 43 | argument.get('complianceType'), 44 | control_sets, 45 | region_name=region_name 46 | ) 47 | deployment_region_status = True 48 | else: 49 | logging.error("--customFrameworkName, --description, --compliance-type and --template-path are required parameters.") 50 | print("python customFramework.py --jobName Custom-Standard-Framework --customFrameworkName \"S3 Controls Framework\" --description \"Automated AWS Config Controls for Amazon S3\" --compliance-type \"AWS Service\" --template-path \"frameworks/s3_config_framework.yaml\" --regions \"us-east-1\"") 51 | 52 | elif input_job_name == 'Automated-Custom-Framework': 53 | 54 | if argument.get('custom_report_name') and argument.get('report_name'): 55 | create_automated_custom_framework( 56 | argument.get('report_name'), 57 | argument.get('custom_report_name') 58 | ) 59 | deployment_region_status = True 60 | else: 61 | logging.error("--customFrameworkName and --existingFrameworkName are required parameters.") 62 | print("Example: python customFramework.py --jobName Automated-Custom-Framework --existingFrameworkName \"PCI DSS V3.2.1\" --customFrameworkName \"PCI DSS V3.2.1 - Automated Controls Only\" --regions \"us-east-1\"") 63 | 64 | elif input_job_name == 'Merge-Multiple-Framework': 65 | 66 | if argument.get('custom_report_name') and argument.get('filepath'): 67 | merge_multiple_framework( 68 | argument.get('custom_report_name'), 69 | argument.get('filepath'), 70 | argument.get('region_name') 71 | ) 72 | deployment_region_status = True 73 | else: 74 | logging.error("--customFrameworkName and --filepath are required parameters.") 75 | print("Example: python customFramework.py --jobName Merge-Multiple-Framework --customFrameworkName \"Custom Enterprise Controls\" --template-path \"frameworks/multi_framework.yaml\" --regions \"us-east-1\"") 76 | 77 | elif input_job_name == 'Generate-YAML-Framework': 78 | 79 | if argument.get('filepath') and argument.get('report_name'): 80 | create_yaml_controls( 81 | argument.get('report_name'), 82 | argument.get('filepath') 83 | 84 | ) 85 | deployment_region_status = True 86 | else: 87 | logging.error("--existingFrameworkName, --template-path and --filepath are required parameters.") 88 | print("Example: python customFramework.py --jobName Generate-YAML-Framework --existingFrameworkName \"AWS License Manager\" --template-path \"frameworks/license_manager_controls.yaml\" --regions \"us-east-1\"") 89 | else: 90 | logging.error(f"The job name {input_job_name} does not exist.") 91 | logging.error("Expected JobNames : Custom-Standard-Framework|Automated-Custom-Framework|Merge-Multiple-Framework|Generate-YAML-Framework") 92 | 93 | if deployment_region_status: 94 | logging.info(f"Deployment to the {region_name} region has been completed.") 95 | else: 96 | logging.error(f"Deployment to the {region_name} region has been failed.") 97 | deployment_status = False 98 | 99 | if deployment_status: 100 | logging.info("-------- All deployments have been completed --------") 101 | else: 102 | logging.error("-------- Issues reported while deploying custom frameworks --------") 103 | return register_region 104 | 105 | 106 | # Main function 107 | def main(): 108 | if not sys.version_info >= (3, 5): 109 | sys.exit('python version must be 3.5 or greater') 110 | else: 111 | argument = (vars(create_arg_parser())) 112 | if argument.get('verbose') is True: 113 | logging.basicConfig(level=logging.DEBUG) 114 | else: 115 | logging.basicConfig(level=logging.INFO) 116 | deploy_to_all_regions( 117 | (argument.get('regions'))) 118 | 119 | 120 | if __name__ == '__main__': 121 | main() 122 | -------------------------------------------------------------------------------- /frameworks/multi_framework.yaml: -------------------------------------------------------------------------------- 1 | AWS License Manager: 2 | Oracle Database Editions: 3 | - 3.0.1 - Customer managed license for Oracle Database Standard Edition 4 | - 3.0.2 - Customer managed license for Oracle Database Standard Edition One 5 | Oracle Database Options and Packs: 6 | - 3.1.1 - Customer managed license for Oracle Database Active Data Guard 7 | AWS Control Tower Guardrails: 8 | Disallow instances: 9 | - 5.0.1 - Disallow RDS database instances that are not storage encrypted 10 | -------------------------------------------------------------------------------- /frameworks/s3_config_framework.yaml: -------------------------------------------------------------------------------- 1 | A.1 - s3 Compliance: 2 | A.1.1 - s3-bucket-level-public-access-prohibited: 3 | name: A.1.1 - s3-bucket-level-public-access-prohibited 4 | description: Checks if Amazon Simple Storage Service (Amazon S3) buckets are publicly accessible. This rule is NON_COMPLIANT if an Amazon S3 bucket is not listed in the excludedPublicBuckets parameter and bucket level settings are public. 5 | testingInformation: "-" 6 | actionPlanTitle: "-" 7 | actionPlanInstructions: "-" 8 | controlMappingSources: 9 | - sourceName: S3 bucket level public access prohibited 10 | troubleshootingText: Checks if Amazon Simple Storage Service (Amazon S3) buckets are publicly accessible. 11 | sourceSetUpOption: System_Controls_Mapping 12 | sourceType: AWS_Config 13 | sourceKeyword: 14 | keywordValue: S3_BUCKET_LEVEL_PUBLIC_ACCESS_PROHIBITED 15 | A.1.2 - s3-bucket-logging-enabled: 16 | name: A.1.2 - s3-bucket-logging-enabled 17 | description: Checks whether logging is enabled for your S3 buckets. 18 | testingInformation: "-" 19 | actionPlanTitle: "-" 20 | actionPlanInstructions: "-" 21 | controlMappingSources: 22 | - sourceName: s3 bucket logging enabled 23 | troubleshootingText: Checks whether logging is enabled for your S3 buckets. 24 | sourceSetUpOption: System_Controls_Mapping 25 | sourceType: AWS_Config 26 | sourceKeyword: 27 | keywordValue: S3_BUCKET_LOGGING_ENABLED 28 | A.1.3 - s3-bucket-public-read-prohibited: 29 | name: A.1.3 - s3-bucket-public-read-prohibited 30 | description: Checks if your Amazon S3 buckets do not allow public read access. The rule checks the Block Public Access settings, the bucket policy, and the bucket access control list (ACL). 31 | testingInformation: "-" 32 | actionPlanTitle: "-" 33 | actionPlanInstructions: "-" 34 | controlMappingSources: 35 | - sourceName: S3 bucket level public read access prohibited 36 | troubleshootingText: Checks if your Amazon S3 buckets do not allow public read access. The rule checks the Block Public Access settings, the bucket policy, and the bucket access control list (ACL). 37 | sourceSetUpOption: System_Controls_Mapping 38 | sourceType: AWS_Config 39 | sourceKeyword: 40 | keywordValue: S3_BUCKET_PUBLIC_READ_PROHIBITED 41 | A.1.4 - s3-bucket-public-write-prohibited: 42 | name: A.1.4 - s3-bucket-public-write-prohibited 43 | description: Checks if your Amazon S3 buckets do not allow public write access. The rule checks the Block Public Access settings, the bucket policy, and the bucket access control list (ACL). 44 | testingInformation: "-" 45 | actionPlanTitle: "-" 46 | actionPlanInstructions: "-" 47 | controlMappingSources: 48 | - sourceName: S3 bucket level public write access prohibited 49 | troubleshootingText: Checks if your Amazon S3 buckets do not allow public write access. The rule checks the Block Public Access settings, the bucket policy, and the bucket access control list (ACL). 50 | sourceSetUpOption: System_Controls_Mapping 51 | sourceType: AWS_Config 52 | sourceKeyword: 53 | keywordValue: S3_BUCKET_PUBLIC_WRITE_PROHIBITED 54 | A.1.5 - s3-bucket-replication-enabled: 55 | name: A.1.5 - s3-bucket-replication-enabled 56 | description: Checks whether the Amazon S3 buckets have cross-region replication enabled. 57 | testingInformation: "-" 58 | actionPlanTitle: "-" 59 | actionPlanInstructions: "-" 60 | controlMappingSources: 61 | - sourceName: s3 bucket replication enabled 62 | troubleshootingText: Checks whether the Amazon S3 buckets have cross-region replication enabled. 63 | sourceSetUpOption: System_Controls_Mapping 64 | sourceType: AWS_Config 65 | sourceKeyword: 66 | keywordValue: S3_BUCKET_REPLICATION_ENABLED 67 | A.1.6 - s3-bucket-server-side-encryption-enabled: 68 | name: A.1.6 - s3-bucket-server-side-encryption-enabled 69 | description: Checks that your Amazon S3 bucket either has Amazon S3 default encryption enabled or that the S3 bucket policy explicitly denies put-object requests without server side encryption that uses AES-256 or AWS Key Management Service. 70 | testingInformation: "-" 71 | actionPlanTitle: "-" 72 | actionPlanInstructions: "-" 73 | controlMappingSources: 74 | - sourceName: s3 bucket server side encryption enabled 75 | troubleshootingText: Checks that your Amazon S3 bucket either has Amazon S3 default encryption enabled or that the S3 bucket policy explicitly denies put-object requests without server side encryption that uses AES-256 or AWS Key Management Service. 76 | sourceSetUpOption: System_Controls_Mapping 77 | sourceType: AWS_Config 78 | sourceKeyword: 79 | keywordValue: S3_BUCKET_SERVER_SIDE_ENCRYPTION_ENABLED 80 | -------------------------------------------------------------------------------- /frameworks/sampleData.json: -------------------------------------------------------------------------------- 1 | { 2 | "A.6 - Organisation of information security":{ 3 | "A.6.1.2 - Segregation of duties":{ 4 | "name":"A.6.1.2 - Segregation of duties", 5 | "description":"A.6.1.2 - Conflicting duties and areas of responsibility shall be segregated to reduce opportunities for unauthorized or unintentional modification or misuse of the organization's assets.", 6 | "testingInformation":"-", 7 | "actionPlanTitle":"-", 8 | "actionPlanInstructions":"-", 9 | "controlMappingSources":[ 10 | { 11 | "sourceName":"Data source 1", 12 | "troubleshootingText":"Checks whether at least one AWS CloudTrail trail is logging Amazon S3 data events for all S3 buckets. The rule is NON_COMPLIANT if trails that log data events for S3 buckets are not configured.", 13 | "sourceSetUpOption":"System_Controls_Mapping", 14 | "sourceType":"AWS_Config", 15 | "sourceKeyword":{ 16 | "keywordValue":"cloudtrail-s3-dataevents-enabled" 17 | } 18 | }, 19 | { 20 | "sourceName":"Data source 2", 21 | "troubleshootingText":"Checks whether AWS CloudTrail is enabled in your AWS account. Optionally, you can specify which S3 bucket, SNS topic, and Amazon CloudWatch Logs ARN to use.", 22 | "sourceSetUpOption":"System_Controls_Mapping", 23 | "sourceType":"AWS_Config", 24 | "sourceKeyword":{ 25 | "keywordValue":"cloudtrail-enabled" 26 | } 27 | }, 28 | { 29 | "sourceName":"Data source 3", 30 | "troubleshootingText":"Checks whether IAM groups have at least one IAM user.", 31 | "sourceSetUpOption":"System_Controls_Mapping", 32 | "sourceType":"AWS_Config", 33 | "sourceKeyword":{ 34 | "keywordValue":"iam-group-has-users-check" 35 | } 36 | }, 37 | { 38 | "sourceName":"Data source 4", 39 | "troubleshootingText":"Checks whether the Application Load Balancer and the Classic Load Balancer have logging enabled. The rule is NON_COMPLIANT if the access_logs.s3.enabled is false or access_logs.S3.bucket is not equal to the s3BucketName that you provided.", 40 | "sourceSetUpOption":"System_Controls_Mapping", 41 | "sourceType":"AWS_Config", 42 | "sourceKeyword":{ 43 | "keywordValue":"elb-logging-enabled" 44 | } 45 | }, 46 | { 47 | "sourceName":"Data source 5", 48 | "troubleshootingText":"Checks whether logging is enabled for your S3 buckets.", 49 | "sourceSetUpOption":"System_Controls_Mapping", 50 | "sourceType":"AWS_Config", 51 | "sourceKeyword":{ 52 | "keywordValue":"s3-bucket-logging-enabled" 53 | } 54 | }, 55 | { 56 | "sourceName":"Data source 6", 57 | "troubleshootingText":"Check whether the Amazon Relational Database Service instances are not publicly accessible. The rule is NON_COMPLIANT if the publiclyAccessible field is true in the instance configuration item.", 58 | "sourceSetUpOption":"System_Controls_Mapping", 59 | "sourceType":"AWS_Config", 60 | "sourceKeyword":{ 61 | "keywordValue":"rds-instance-public-access-check" 62 | } 63 | }, 64 | { 65 | "sourceName":"Data source 7", 66 | "troubleshootingText":"Checks if Amazon Relational Database Service (Amazon RDS) snapshots are public. The rule is NON_COMPLIANT if any existing and new Amazon RDS snapshots are public.", 67 | "sourceSetUpOption":"System_Controls_Mapping", 68 | "sourceType":"AWS_Config", 69 | "sourceKeyword":{ 70 | "keywordValue":"rds-snapshots-public-prohibited" 71 | } 72 | }, 73 | { 74 | "sourceName":"Data source 8", 75 | "troubleshootingText":"Checks whether Amazon Redshift clusters are not publicly accessible. The rule is NON_COMPLIANT if the publiclyAccessible field is true in the cluster configuration item.", 76 | "sourceSetUpOption":"System_Controls_Mapping", 77 | "sourceType":"AWS_Config", 78 | "sourceKeyword":{ 79 | "keywordValue":"redshift-cluster-public-access-check" 80 | } 81 | }, 82 | { 83 | "sourceName":"Data source 9", 84 | "troubleshootingText":"CloudTrail is a service which logs API calls made to the AWS platform. This provides information on what API call was made, who made the API call and when the call was made, as well as other information such as the IP address.", 85 | "sourceSetUpOption":"System_Controls_Mapping", 86 | "sourceType":"AWS_Config", 87 | "sourceKeyword":{ 88 | "keywordValue":"multi-region-cloudtrail-enabled" 89 | } 90 | }, 91 | { 92 | "sourceName":"Data source 10", 93 | "troubleshootingText":" Checks whether the root user access key is available. The rule is COMPLIANT if the user access key does not exist.", 94 | "sourceSetUpOption":"System_Controls_Mapping", 95 | "sourceType":"AWS_Config", 96 | "sourceKeyword":{ 97 | "keywordValue":"iam-root-access-key-check" 98 | } 99 | }, 100 | { 101 | "sourceName":"Data source 11", 102 | "troubleshootingText":"Checks that AWS Security Hub is enabled for an AWS account. The rule is NON_COMPLIANT if Security Hub is not enabled.", 103 | "sourceSetUpOption":"System_Controls_Mapping", 104 | "sourceType":"AWS_Config", 105 | "sourceKeyword":{ 106 | "keywordValue":"securityhub-enabled" 107 | } 108 | }, 109 | { 110 | "sourceName":"Data source 12", 111 | "troubleshootingText":"Checks whether IAM users are members of at least one IAM group.", 112 | "sourceSetUpOption":"System_Controls_Mapping", 113 | "sourceType":"AWS_Config", 114 | "sourceKeyword":{ 115 | "keywordValue":"iam-user-group-membership-check" 116 | } 117 | }, 118 | { 119 | "sourceName":"Data source 13", 120 | "troubleshootingText":"Checks that none of your IAM users have policies attached. IAM users must inherit permissions from IAM groups or roles.", 121 | "sourceSetUpOption":"System_Controls_Mapping", 122 | "sourceType":"AWS_Config", 123 | "sourceKeyword":{ 124 | "keywordValue":"iam-user-no-policies-check" 125 | } 126 | }, 127 | { 128 | "sourceName":"Data source 14", 129 | "troubleshootingText":"Checks whether AWS Database Migration Service replication instances are public. The rule is NON_COMPLIANT if PubliclyAccessible field is true.", 130 | "sourceSetUpOption":"System_Controls_Mapping", 131 | "sourceType":"AWS_Config", 132 | "sourceKeyword":{ 133 | "keywordValue":"dms-replication-not-public" 134 | } 135 | }, 136 | { 137 | "sourceName":"Data source 15", 138 | "troubleshootingText":"Checks whether Amazon GuardDuty is enabled in your AWS account and region. If you provide an AWS account for centralization, the rule evaluates the Amazon GuardDuty results in the centralized account. The rule is COMPLIANT when Amazon GuardDuty is enabled.", 139 | "sourceSetUpOption":"System_Controls_Mapping", 140 | "sourceType":"AWS_Config", 141 | "sourceKeyword":{ 142 | "keywordValue":"guardduty-enabled-centralized" 143 | } 144 | }, 145 | { 146 | "sourceName":"Data source 16", 147 | "troubleshootingText":"Checks the IAM policies that you create for Allow statements that grant permissions to all actions on all resources. The rule is NON_COMPLIANT if any policy statement includes 'Effect': 'Allow' with 'Action': '*' over 'Resource': '*'.", 148 | "sourceSetUpOption":"System_Controls_Mapping", 149 | "sourceType":"AWS_Config", 150 | "sourceKeyword":{ 151 | "keywordValue":"iam-policy-no-statements-with-admin-access" 152 | } 153 | }, 154 | { 155 | "sourceName":"Data source 17", 156 | "troubleshootingText":"Checks whether Amazon Virtual Private Cloud flow logs are found and enabled for Amazon VPC.", 157 | "sourceSetUpOption":"System_Controls_Mapping", 158 | "sourceType":"AWS_Config", 159 | "sourceKeyword":{ 160 | "keywordValue":"vpc-flow-logs-enabled" 161 | } 162 | }, 163 | { 164 | "sourceName":"Data source 18", 165 | "troubleshootingText":"Checks whether the project contains environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. The rule is NON_COMPLIANT when the project environment variables contains plaintext credentials.", 166 | "sourceSetUpOption":"System_Controls_Mapping", 167 | "sourceType":"AWS_Config", 168 | "sourceKeyword":{ 169 | "keywordValue":"codebuild-project-envvar-awscred-check" 170 | } 171 | }, 172 | { 173 | "sourceName":"Data source 19", 174 | "troubleshootingText":"Checks whether the AWS Lambda function policy attached to the Lambda resource prohibits public access. If the Lambda function policy allows public access it is NON_COMPLIANT.", 175 | "sourceSetUpOption":"System_Controls_Mapping", 176 | "sourceType":"AWS_Config", 177 | "sourceKeyword":{ 178 | "keywordValue":"lambda-functions-public-access-prohibited" 179 | } 180 | }, 181 | { 182 | "sourceName":"Data source 20", 183 | "troubleshootingText":"Checks whether the required public access block settings are configured from account level. The rule is only NON_COMPLIANT when the fields set below do not match the corresponding fields in the configuration item.", 184 | "sourceSetUpOption":"System_Controls_Mapping", 185 | "sourceType":"AWS_Config", 186 | "sourceKeyword":{ 187 | "keywordValue":"s3-account-level-public-access-blocks" 188 | } 189 | }, 190 | { 191 | "sourceName":"Data source 21", 192 | "troubleshootingText":"Checks whether Amazon Elastic Block Store snapshots are not publicly restorable. The rule is NON_COMPLIANT if one or more snapshots with the RestorableByUserIds field is set to all. If this field is set to all, then Amazon EBS snapshots are public.", 193 | "sourceSetUpOption":"System_Controls_Mapping", 194 | "sourceType":"AWS_Config", 195 | "sourceKeyword":{ 196 | "keywordValue":"ebs-snapshot-public-restorable-check" 197 | } 198 | }, 199 | { 200 | "sourceName":"Data source 22", 201 | "troubleshootingText":"Checks whether direct internet access is disabled for an Amazon SageMaker notebook instance. The rule is NON_COMPLIANT if SageMaker notebook instances are internet-enabled.", 202 | "sourceSetUpOption":"System_Controls_Mapping", 203 | "sourceType":"AWS_Config", 204 | "sourceKeyword":{ 205 | "keywordValue":"sagemaker-notebook-no-direct-internet-access" 206 | } 207 | }, 208 | { 209 | "sourceName":"Data source 23", 210 | "troubleshootingText":"Checks that your Amazon S3 buckets do not allow public read access. The rule checks the Block Public Access settings, the bucket policy, and the bucket access control list (ACL).", 211 | "sourceSetUpOption":"System_Controls_Mapping", 212 | "sourceType":"AWS_Config", 213 | "sourceKeyword":{ 214 | "keywordValue":"s3-bucket-public-read-prohibited" 215 | } 216 | }, 217 | { 218 | "sourceName":"Data source 24", 219 | "troubleshootingText":"Checks that your Amazon S3 buckets do not allow public write access. The rule checks the Block Public Access settings, the bucket policy, and the bucket access control list (ACL).", 220 | "sourceSetUpOption":"System_Controls_Mapping", 221 | "sourceType":"AWS_Config", 222 | "sourceKeyword":{ 223 | "keywordValue":"s3-bucket-public-write-prohibited" 224 | } 225 | } 226 | ] 227 | }, 228 | "A.6.1.4 - Contact with special interest groups":{ 229 | "name":"A.6.1.4 - Contact with special interest groups", 230 | "description":"A.6.1.4 - Appropriate contacts with special interest groups or other specialist security forums and professional associations shall be maintained.", 231 | "testingInformation":"-", 232 | "actionPlanTitle":"-", 233 | "actionPlanInstructions":"-", 234 | "controlMappingSources":[ 235 | { 236 | "sourceName":"Data source 1", 237 | "troubleshootingText":"Checks that AWS Security Hub is enabled for an AWS account. The rule is NON_COMPLIANT if Security Hub is not enabled.", 238 | "sourceSetUpOption":"System_Controls_Mapping", 239 | "sourceType":"AWS_Config", 240 | "sourceKeyword":{ 241 | "keywordValue":"securityhub-enabled" 242 | } 243 | }, 244 | { 245 | "sourceName":"Data source 2", 246 | "troubleshootingText":"Checks whether Amazon GuardDuty is enabled in your AWS account and region. If you provide an AWS account for centralization, the rule evaluates the Amazon GuardDuty results in the centralized account. The rule is COMPLIANT when Amazon GuardDuty is enabled.", 247 | "sourceSetUpOption":"System_Controls_Mapping", 248 | "sourceType":"AWS_Config", 249 | "sourceKeyword":{ 250 | "keywordValue":"guardduty-enabled-centralized" 251 | } 252 | } 253 | ] 254 | }, 255 | "A.6.2.2 - Teleworking":{ 256 | "name":"A.6.2.2 - Teleworking", 257 | "description":"A.6.2.2 - A policy and supporting security measures shall be implemented to protect information accessed, processed or stored at teleworking sites.", 258 | "testingInformation":"-", 259 | "actionPlanTitle":"-", 260 | "actionPlanInstructions":"-", 261 | "controlMappingSources":[ 262 | { 263 | "sourceName":"Data source 1", 264 | "troubleshootingText":"Checks whether Amazon Elasticsearch Service (Amazon ES) domains are in Amazon Virtual Private Cloud (Amazon VPC). The rule is NON_COMPLIANT if the Amazon ES domain endpoint is public.", 265 | "sourceSetUpOption":"System_Controls_Mapping", 266 | "sourceType":"AWS_Config", 267 | "sourceKeyword":{ 268 | "keywordValue":"elasticsearch-in-vpc-only" 269 | } 270 | }, 271 | { 272 | "sourceName":"Data source 2", 273 | "troubleshootingText":"Checks whether Amazon Elastic MapReduce (EMR) clusters' master nodes have public IPs. The rule is NON_COMPLIANT if the master node has a public IP.", 274 | "sourceSetUpOption":"System_Controls_Mapping", 275 | "sourceType":"AWS_Config", 276 | "sourceKeyword":{ 277 | "keywordValue":"emr-master-no-public-ip" 278 | } 279 | }, 280 | { 281 | "sourceName":"Data source 3", 282 | "troubleshootingText":"Checks whether the AWS Identity and Access Management users have multi-factor authentication (MFA) enabled.", 283 | "sourceSetUpOption":"System_Controls_Mapping", 284 | "sourceType":"AWS_Config", 285 | "sourceKeyword":{ 286 | "keywordValue":"iam-user-mfa-enabled" 287 | } 288 | }, 289 | { 290 | "sourceName":"Data source 4", 291 | "troubleshootingText":"Checks whether Amazon Elastic Compute Cloud (Amazon EC2) instances have a public IP association. The rule is NON_COMPLIANT if the publicIp field is present in the Amazon EC2 instance configuration item. This rule applies only to IPv4.", 292 | "sourceSetUpOption":"System_Controls_Mapping", 293 | "sourceType":"AWS_Config", 294 | "sourceKeyword":{ 295 | "keywordValue":"ec2-instance-no-public-ip" 296 | } 297 | }, 298 | { 299 | "sourceName":"Data source 5", 300 | "troubleshootingText":"Checks whether Amazon Elastic Block Store snapshots are not publicly restorable. The rule is NON_COMPLIANT if one or more snapshots with the RestorableByUserIds field is set to all. If this field is set to all, then Amazon EBS snapshots are public.", 301 | "sourceSetUpOption":"System_Controls_Mapping", 302 | "sourceType":"AWS_Config", 303 | "sourceKeyword":{ 304 | "keywordValue":"ebs-snapshot-public-restorable-check" 305 | } 306 | } 307 | ] 308 | } 309 | }, 310 | "A.7 - Human resource security":{ 311 | "A.7.1.1 - Screening":{ 312 | "name":"A.7.1.1 - Screening", 313 | "description":"A.7.1.1 - Background verification checks on all candidates for employment shall be carried out in accordance with relevant laws, regulations and ethics and shall be proportional to the business requirements, the classification of the information to be accessed and the perceived risks.", 314 | "testingInformation":"-", 315 | "actionPlanTitle":"-", 316 | "actionPlanInstructions":"-", 317 | "controlMappingSources":[ 318 | { 319 | "sourceName":"Data source 1", 320 | "troubleshootingText":"Checks whether at least one AWS CloudTrail trail is logging Amazon S3 data events for all S3 buckets. The rule is NON_COMPLIANT if trails that log data events for S3 buckets are not configured. ", 321 | "sourceSetUpOption":"System_Controls_Mapping", 322 | "sourceType":"AWS_Config", 323 | "sourceKeyword":{ 324 | "keywordValue":"cloudtrail-s3-dataevents-enabled" 325 | } 326 | }, 327 | { 328 | "sourceName":"Data source 2", 329 | "troubleshootingText":"Checks whether AWS CloudTrail is enabled in your AWS account. Optionally, you can specify which S3 bucket, SNS topic, and Amazon CloudWatch Logs ARN to use.", 330 | "sourceSetUpOption":"System_Controls_Mapping", 331 | "sourceType":"AWS_Config", 332 | "sourceKeyword":{ 333 | "keywordValue":"cloudtrail-enabled" 334 | } 335 | }, 336 | { 337 | "sourceName":"Data source 3", 338 | "troubleshootingText":"Checks whether the Application Load Balancer and the Classic Load Balancer have logging enabled. The rule is NON_COMPLIANT if the access_logs.s3.enabled is false or access_logs.S3.bucket is not equal to the s3BucketName that you provided.", 339 | "sourceSetUpOption":"System_Controls_Mapping", 340 | "sourceType":"AWS_Config", 341 | "sourceKeyword":{ 342 | "keywordValue":"elb-logging-enabled" 343 | } 344 | }, 345 | { 346 | "sourceName":"Data source 4", 347 | "troubleshootingText":"Checks whether logging is enabled for your S3 buckets.", 348 | "sourceSetUpOption":"System_Controls_Mapping", 349 | "sourceType":"AWS_Config", 350 | "sourceKeyword":{ 351 | "keywordValue":"s3-bucket-logging-enabled" 352 | } 353 | }, 354 | { 355 | "sourceName":"Data source 5", 356 | "troubleshootingText":"Check whether the Amazon Relational Database Service instances are not publicly accessible. The rule is NON_COMPLIANT if the publiclyAccessible field is true in the instance configuration item.", 357 | "sourceSetUpOption":"System_Controls_Mapping", 358 | "sourceType":"AWS_Config", 359 | "sourceKeyword":{ 360 | "keywordValue":"rds-instance-public-access-check" 361 | } 362 | }, 363 | { 364 | "sourceName":"Data source 6", 365 | "troubleshootingText":"Checks if Amazon Relational Database Service (Amazon RDS) snapshots are public. The rule is NON_COMPLIANT if any existing and new Amazon RDS snapshots are public.", 366 | "sourceSetUpOption":"System_Controls_Mapping", 367 | "sourceType":"AWS_Config", 368 | "sourceKeyword":{ 369 | "keywordValue":"rds-snapshots-public-prohibited" 370 | } 371 | }, 372 | { 373 | "sourceName":"Data source 7", 374 | "troubleshootingText":"Checks whether Amazon Redshift clusters are not publicly accessible. The rule is NON_COMPLIANT if the publiclyAccessible field is true in the cluster configuration item.", 375 | "sourceSetUpOption":"System_Controls_Mapping", 376 | "sourceType":"AWS_Config", 377 | "sourceKeyword":{ 378 | "keywordValue":"redshift-cluster-public-access-check" 379 | } 380 | }, 381 | { 382 | "sourceName":"Data source 8", 383 | "troubleshootingText":"CloudTrail is a service which logs API calls made to the AWS platform. This provides information on what API call was made, who made the API call and when the call was made, as well as other information such as the IP address. ", 384 | "sourceSetUpOption":"System_Controls_Mapping", 385 | "sourceType":"AWS_Config", 386 | "sourceKeyword":{ 387 | "keywordValue":"multi-region-cloudtrail-enabled" 388 | } 389 | }, 390 | { 391 | "sourceName":"Data source 9", 392 | "troubleshootingText":"Checks that AWS Security Hub is enabled for an AWS account. The rule is NON_COMPLIANT if Security Hub is not enabled.", 393 | "sourceSetUpOption":"System_Controls_Mapping", 394 | "sourceType":"AWS_Config", 395 | "sourceKeyword":{ 396 | "keywordValue":"securityhub-enabled" 397 | } 398 | }, 399 | { 400 | "sourceName":"Data source 10", 401 | "troubleshootingText":"Checks whether AWS Database Migration Service replication instances are public. The rule is NON_COMPLIANT if PubliclyAccessible field is true.", 402 | "sourceSetUpOption":"System_Controls_Mapping", 403 | "sourceType":"AWS_Config", 404 | "sourceKeyword":{ 405 | "keywordValue":"dms-replication-not-public" 406 | } 407 | }, 408 | { 409 | "sourceName":"Data source 11", 410 | "troubleshootingText":"Checks whether Amazon GuardDuty is enabled in your AWS account and region. If you provide an AWS account for centralization, the rule evaluates the Amazon GuardDuty results in the centralized account. The rule is COMPLIANT when Amazon GuardDuty is enabled.", 411 | "sourceSetUpOption":"System_Controls_Mapping", 412 | "sourceType":"AWS_Config", 413 | "sourceKeyword":{ 414 | "keywordValue":"guardduty-enabled-centralized" 415 | } 416 | }, 417 | { 418 | "sourceName":"Data source 12", 419 | "troubleshootingText":"Checks whether the project contains environment variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. The rule is NON_COMPLIANT when the project environment variables contains plaintext credentials.", 420 | "sourceSetUpOption":"System_Controls_Mapping", 421 | "sourceType":"AWS_Config", 422 | "sourceKeyword":{ 423 | "keywordValue":"codebuild-project-envvar-awscred-check" 424 | } 425 | }, 426 | { 427 | "sourceName":"Data source 13", 428 | "troubleshootingText":"Checks whether the AWS Lambda function policy attached to the Lambda resource prohibits public access. If the Lambda function policy allows public access it is NON_COMPLIANT.", 429 | "sourceSetUpOption":"System_Controls_Mapping", 430 | "sourceType":"AWS_Config", 431 | "sourceKeyword":{ 432 | "keywordValue":"lambda-functions-public-access-prohibited" 433 | } 434 | }, 435 | { 436 | "sourceName":"Data source 14", 437 | "troubleshootingText":"Checks whether Amazon Elastic Block Store snapshots are not publicly restorable. The rule is NON_COMPLIANT if one or more snapshots with the RestorableByUserIds field is set to all. If this field is set to all, then Amazon EBS snapshots are public.", 438 | "sourceSetUpOption":"System_Controls_Mapping", 439 | "sourceType":"AWS_Config", 440 | "sourceKeyword":{ 441 | "keywordValue":"ebs-snapshot-public-restorable-check" 442 | } 443 | }, 444 | { 445 | "sourceName":"Data source 15", 446 | "troubleshootingText":"Checks whether direct internet access is disabled for an Amazon SageMaker notebook instance. The rule is NON_COMPLIANT if SageMaker notebook instances are internet-enabled.", 447 | "sourceSetUpOption":"System_Controls_Mapping", 448 | "sourceType":"AWS_Config", 449 | "sourceKeyword":{ 450 | "keywordValue":"sagemaker-notebook-no-direct-internet-access" 451 | } 452 | }, 453 | { 454 | "sourceName":"Data source 16", 455 | "troubleshootingText":"Checks that your Amazon S3 buckets do not allow public read access. The rule checks the Block Public Access settings, the bucket policy, and the bucket access control list (ACL).", 456 | "sourceSetUpOption":"System_Controls_Mapping", 457 | "sourceType":"AWS_Config", 458 | "sourceKeyword":{ 459 | "keywordValue":"s3-bucket-public-read-prohibited" 460 | } 461 | }, 462 | { 463 | "sourceName":"Data source 17", 464 | "troubleshootingText":"Checks that your Amazon S3 buckets do not allow public write access. The rule checks the Block Public Access settings, the bucket policy, and the bucket access control list (ACL).", 465 | "sourceSetUpOption":"System_Controls_Mapping", 466 | "sourceType":"AWS_Config", 467 | "sourceKeyword":{ 468 | "keywordValue":"s3-bucket-public-write-prohibited" 469 | } 470 | } 471 | ] 472 | } 473 | } 474 | } 475 | -------------------------------------------------------------------------------- /frameworks/sampleData.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | A.6 - Organisation of information security: 3 | A.6.1.2 - Segregation of duties: 4 | name: A.6.1.2 - Segregation of duties 5 | description: A.6.1.2 - Conflicting duties and areas of responsibility shall be 6 | segregated to reduce opportunities for unauthorized or unintentional modification 7 | or misuse of the organization's assets. 8 | testingInformation: "-" 9 | actionPlanTitle: "-" 10 | actionPlanInstructions: "-" 11 | controlMappingSources: 12 | - sourceName: Data source 1 13 | troubleshootingText: Checks whether at least one AWS CloudTrail trail is logging 14 | Amazon S3 data events for all S3 buckets. The rule is NON_COMPLIANT if trails 15 | that log data events for S3 buckets are not configured. 16 | sourceSetUpOption: System_Controls_Mapping 17 | sourceType: AWS_Config 18 | sourceKeyword: 19 | keywordValue: cloudtrail-s3-dataevents-enabled 20 | - sourceName: Data source 2 21 | troubleshootingText: Checks whether AWS CloudTrail is enabled in your AWS account. 22 | Optionally, you can specify which S3 bucket, SNS topic, and Amazon CloudWatch 23 | Logs ARN to use. 24 | sourceSetUpOption: System_Controls_Mapping 25 | sourceType: AWS_Config 26 | sourceKeyword: 27 | keywordValue: cloudtrail-enabled 28 | - sourceName: Data source 3 29 | troubleshootingText: Checks whether IAM groups have at least one IAM user. 30 | sourceSetUpOption: System_Controls_Mapping 31 | sourceType: AWS_Config 32 | sourceKeyword: 33 | keywordValue: iam-group-has-users-check 34 | - sourceName: Data source 4 35 | troubleshootingText: Checks whether the Application Load Balancer and the Classic 36 | Load Balancer have logging enabled. The rule is NON_COMPLIANT if the access_logs.s3.enabled 37 | is false or access_logs.S3.bucket is not equal to the s3BucketName that you 38 | provided. 39 | sourceSetUpOption: System_Controls_Mapping 40 | sourceType: AWS_Config 41 | sourceKeyword: 42 | keywordValue: elb-logging-enabled 43 | - sourceName: Data source 5 44 | troubleshootingText: Checks whether logging is enabled for your S3 buckets. 45 | sourceSetUpOption: System_Controls_Mapping 46 | sourceType: AWS_Config 47 | sourceKeyword: 48 | keywordValue: s3-bucket-logging-enabled 49 | - sourceName: Data source 6 50 | troubleshootingText: Check whether the Amazon Relational Database Service instances 51 | are not publicly accessible. The rule is NON_COMPLIANT if the publiclyAccessible 52 | field is true in the instance configuration item. 53 | sourceSetUpOption: System_Controls_Mapping 54 | sourceType: AWS_Config 55 | sourceKeyword: 56 | keywordValue: rds-instance-public-access-check 57 | - sourceName: Data source 7 58 | troubleshootingText: Checks if Amazon Relational Database Service (Amazon RDS) 59 | snapshots are public. The rule is NON_COMPLIANT if any existing and new Amazon 60 | RDS snapshots are public. 61 | sourceSetUpOption: System_Controls_Mapping 62 | sourceType: AWS_Config 63 | sourceKeyword: 64 | keywordValue: rds-snapshots-public-prohibited 65 | - sourceName: Data source 8 66 | troubleshootingText: Checks whether Amazon Redshift clusters are not publicly 67 | accessible. The rule is NON_COMPLIANT if the publiclyAccessible field is true 68 | in the cluster configuration item. 69 | sourceSetUpOption: System_Controls_Mapping 70 | sourceType: AWS_Config 71 | sourceKeyword: 72 | keywordValue: redshift-cluster-public-access-check 73 | - sourceName: Data source 9 74 | troubleshootingText: CloudTrail is a service which logs API calls made to the 75 | AWS platform. This provides information on what API call was made, who made 76 | the API call and when the call was made, as well as other information such 77 | as the IP address. 78 | sourceSetUpOption: System_Controls_Mapping 79 | sourceType: AWS_Config 80 | sourceKeyword: 81 | keywordValue: multi-region-cloudtrail-enabled 82 | - sourceName: Data source 10 83 | troubleshootingText: " Checks whether the root user access key is available. 84 | The rule is COMPLIANT if the user access key does not exist." 85 | sourceSetUpOption: System_Controls_Mapping 86 | sourceType: AWS_Config 87 | sourceKeyword: 88 | keywordValue: iam-root-access-key-check 89 | - sourceName: Data source 11 90 | troubleshootingText: Checks that AWS Security Hub is enabled for an AWS account. 91 | The rule is NON_COMPLIANT if Security Hub is not enabled. 92 | sourceSetUpOption: System_Controls_Mapping 93 | sourceType: AWS_Config 94 | sourceKeyword: 95 | keywordValue: securityhub-enabled 96 | - sourceName: Data source 12 97 | troubleshootingText: Checks whether IAM users are members of at least one IAM 98 | group. 99 | sourceSetUpOption: System_Controls_Mapping 100 | sourceType: AWS_Config 101 | sourceKeyword: 102 | keywordValue: iam-user-group-membership-check 103 | - sourceName: Data source 13 104 | troubleshootingText: Checks that none of your IAM users have policies attached. 105 | IAM users must inherit permissions from IAM groups or roles. 106 | sourceSetUpOption: System_Controls_Mapping 107 | sourceType: AWS_Config 108 | sourceKeyword: 109 | keywordValue: iam-user-no-policies-check 110 | - sourceName: Data source 14 111 | troubleshootingText: Checks whether AWS Database Migration Service replication 112 | instances are public. The rule is NON_COMPLIANT if PubliclyAccessible field 113 | is true. 114 | sourceSetUpOption: System_Controls_Mapping 115 | sourceType: AWS_Config 116 | sourceKeyword: 117 | keywordValue: dms-replication-not-public 118 | - sourceName: Data source 15 119 | troubleshootingText: Checks whether Amazon GuardDuty is enabled in your AWS 120 | account and region. If you provide an AWS account for centralization, the 121 | rule evaluates the Amazon GuardDuty results in the centralized account. The 122 | rule is COMPLIANT when Amazon GuardDuty is enabled. 123 | sourceSetUpOption: System_Controls_Mapping 124 | sourceType: AWS_Config 125 | sourceKeyword: 126 | keywordValue: guardduty-enabled-centralized 127 | - sourceName: Data source 16 128 | troubleshootingText: 'Checks the IAM policies that you create for Allow statements 129 | that grant permissions to all actions on all resources. The rule is NON_COMPLIANT 130 | if any policy statement includes ''Effect'': ''Allow'' with ''Action'': ''*'' 131 | over ''Resource'': ''*''.' 132 | sourceSetUpOption: System_Controls_Mapping 133 | sourceType: AWS_Config 134 | sourceKeyword: 135 | keywordValue: iam-policy-no-statements-with-admin-access 136 | - sourceName: Data source 17 137 | troubleshootingText: Checks whether Amazon Virtual Private Cloud flow logs are 138 | found and enabled for Amazon VPC. 139 | sourceSetUpOption: System_Controls_Mapping 140 | sourceType: AWS_Config 141 | sourceKeyword: 142 | keywordValue: vpc-flow-logs-enabled 143 | - sourceName: Data source 18 144 | troubleshootingText: Checks whether the project contains environment variables 145 | AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. The rule is NON_COMPLIANT when 146 | the project environment variables contains plaintext credentials. 147 | sourceSetUpOption: System_Controls_Mapping 148 | sourceType: AWS_Config 149 | sourceKeyword: 150 | keywordValue: codebuild-project-envvar-awscred-check 151 | - sourceName: Data source 19 152 | troubleshootingText: Checks whether the AWS Lambda function policy attached 153 | to the Lambda resource prohibits public access. If the Lambda function policy 154 | allows public access it is NON_COMPLIANT. 155 | sourceSetUpOption: System_Controls_Mapping 156 | sourceType: AWS_Config 157 | sourceKeyword: 158 | keywordValue: lambda-functions-public-access-prohibited 159 | - sourceName: Data source 20 160 | troubleshootingText: Checks whether the required public access block settings 161 | are configured from account level. The rule is only NON_COMPLIANT when the 162 | fields set below do not match the corresponding fields in the configuration 163 | item. 164 | sourceSetUpOption: System_Controls_Mapping 165 | sourceType: AWS_Config 166 | sourceKeyword: 167 | keywordValue: s3-account-level-public-access-blocks 168 | - sourceName: Data source 21 169 | troubleshootingText: Checks whether Amazon Elastic Block Store snapshots are 170 | not publicly restorable. The rule is NON_COMPLIANT if one or more snapshots 171 | with the RestorableByUserIds field is set to all. If this field is set to 172 | all, then Amazon EBS snapshots are public. 173 | sourceSetUpOption: System_Controls_Mapping 174 | sourceType: AWS_Config 175 | sourceKeyword: 176 | keywordValue: ebs-snapshot-public-restorable-check 177 | - sourceName: Data source 22 178 | troubleshootingText: Checks whether direct internet access is disabled for an 179 | Amazon SageMaker notebook instance. The rule is NON_COMPLIANT if SageMaker 180 | notebook instances are internet-enabled. 181 | sourceSetUpOption: System_Controls_Mapping 182 | sourceType: AWS_Config 183 | sourceKeyword: 184 | keywordValue: sagemaker-notebook-no-direct-internet-access 185 | - sourceName: Data source 23 186 | troubleshootingText: Checks that your Amazon S3 buckets do not allow public 187 | read access. The rule checks the Block Public Access settings, the bucket 188 | policy, and the bucket access control list (ACL). 189 | sourceSetUpOption: System_Controls_Mapping 190 | sourceType: AWS_Config 191 | sourceKeyword: 192 | keywordValue: s3-bucket-public-read-prohibited 193 | - sourceName: Data source 24 194 | troubleshootingText: Checks that your Amazon S3 buckets do not allow public 195 | write access. The rule checks the Block Public Access settings, the bucket 196 | policy, and the bucket access control list (ACL). 197 | sourceSetUpOption: System_Controls_Mapping 198 | sourceType: AWS_Config 199 | sourceKeyword: 200 | keywordValue: s3-bucket-public-write-prohibited 201 | A.6.1.4 - Contact with special interest groups: 202 | name: A.6.1.4 - Contact with special interest groups 203 | description: A.6.1.4 - Appropriate contacts with special interest groups or other 204 | specialist security forums and professional associations shall be maintained. 205 | testingInformation: "-" 206 | actionPlanTitle: "-" 207 | actionPlanInstructions: "-" 208 | controlMappingSources: 209 | - sourceName: Data source 1 210 | troubleshootingText: Checks that AWS Security Hub is enabled for an AWS account. 211 | The rule is NON_COMPLIANT if Security Hub is not enabled. 212 | sourceSetUpOption: System_Controls_Mapping 213 | sourceType: AWS_Config 214 | sourceKeyword: 215 | keywordValue: securityhub-enabled 216 | - sourceName: Data source 2 217 | troubleshootingText: Checks whether Amazon GuardDuty is enabled in your AWS 218 | account and region. If you provide an AWS account for centralization, the 219 | rule evaluates the Amazon GuardDuty results in the centralized account. The 220 | rule is COMPLIANT when Amazon GuardDuty is enabled. 221 | sourceSetUpOption: System_Controls_Mapping 222 | sourceType: AWS_Config 223 | sourceKeyword: 224 | keywordValue: guardduty-enabled-centralized 225 | A.6.2.2 - Teleworking: 226 | name: A.6.2.2 - Teleworking 227 | description: A.6.2.2 - A policy and supporting security measures shall be implemented 228 | to protect information accessed, processed or stored at teleworking sites. 229 | testingInformation: "-" 230 | actionPlanTitle: "-" 231 | actionPlanInstructions: "-" 232 | controlMappingSources: 233 | - sourceName: Data source 1 234 | troubleshootingText: Checks whether Amazon Elasticsearch Service (Amazon ES) 235 | domains are in Amazon Virtual Private Cloud (Amazon VPC). The rule is NON_COMPLIANT 236 | if the Amazon ES domain endpoint is public. 237 | sourceSetUpOption: System_Controls_Mapping 238 | sourceType: AWS_Config 239 | sourceKeyword: 240 | keywordValue: elasticsearch-in-vpc-only 241 | - sourceName: Data source 2 242 | troubleshootingText: Checks whether Amazon Elastic MapReduce (EMR) clusters' 243 | master nodes have public IPs. The rule is NON_COMPLIANT if the master node 244 | has a public IP. 245 | sourceSetUpOption: System_Controls_Mapping 246 | sourceType: AWS_Config 247 | sourceKeyword: 248 | keywordValue: emr-master-no-public-ip 249 | - sourceName: Data source 3 250 | troubleshootingText: Checks whether the AWS Identity and Access Management users 251 | have multi-factor authentication (MFA) enabled. 252 | sourceSetUpOption: System_Controls_Mapping 253 | sourceType: AWS_Config 254 | sourceKeyword: 255 | keywordValue: iam-user-mfa-enabled 256 | - sourceName: Data source 4 257 | troubleshootingText: Checks whether Amazon Elastic Compute Cloud (Amazon EC2) 258 | instances have a public IP association. The rule is NON_COMPLIANT if the publicIp 259 | field is present in the Amazon EC2 instance configuration item. This rule 260 | applies only to IPv4. 261 | sourceSetUpOption: System_Controls_Mapping 262 | sourceType: AWS_Config 263 | sourceKeyword: 264 | keywordValue: ec2-instance-no-public-ip 265 | - sourceName: Data source 5 266 | troubleshootingText: Checks whether Amazon Elastic Block Store snapshots are 267 | not publicly restorable. The rule is NON_COMPLIANT if one or more snapshots 268 | with the RestorableByUserIds field is set to all. If this field is set to 269 | all, then Amazon EBS snapshots are public. 270 | sourceSetUpOption: System_Controls_Mapping 271 | sourceType: AWS_Config 272 | sourceKeyword: 273 | keywordValue: ebs-snapshot-public-restorable-check 274 | A.7 - Human resource security: 275 | A.7.1.1 - Screening: 276 | name: A.7.1.1 - Screening 277 | description: A.7.1.1 - Background verification checks on all candidates for employment 278 | shall be carried out in accordance with relevant laws, regulations and ethics 279 | and shall be proportional to the business requirements, the classification of 280 | the information to be accessed and the perceived risks. 281 | testingInformation: "-" 282 | actionPlanTitle: "-" 283 | actionPlanInstructions: "-" 284 | controlMappingSources: 285 | - sourceName: Data source 1 286 | troubleshootingText: 'Checks whether at least one AWS CloudTrail trail is logging 287 | Amazon S3 data events for all S3 buckets. The rule is NON_COMPLIANT if trails 288 | that log data events for S3 buckets are not configured. ' 289 | sourceSetUpOption: System_Controls_Mapping 290 | sourceType: AWS_Config 291 | sourceKeyword: 292 | keywordValue: cloudtrail-s3-dataevents-enabled 293 | - sourceName: Data source 2 294 | troubleshootingText: Checks whether AWS CloudTrail is enabled in your AWS account. 295 | Optionally, you can specify which S3 bucket, SNS topic, and Amazon CloudWatch 296 | Logs ARN to use. 297 | sourceSetUpOption: System_Controls_Mapping 298 | sourceType: AWS_Config 299 | sourceKeyword: 300 | keywordValue: cloudtrail-enabled 301 | - sourceName: Data source 3 302 | troubleshootingText: Checks whether the Application Load Balancer and the Classic 303 | Load Balancer have logging enabled. The rule is NON_COMPLIANT if the access_logs.s3.enabled 304 | is false or access_logs.S3.bucket is not equal to the s3BucketName that you 305 | provided. 306 | sourceSetUpOption: System_Controls_Mapping 307 | sourceType: AWS_Config 308 | sourceKeyword: 309 | keywordValue: elb-logging-enabled 310 | - sourceName: Data source 4 311 | troubleshootingText: Checks whether logging is enabled for your S3 buckets. 312 | sourceSetUpOption: System_Controls_Mapping 313 | sourceType: AWS_Config 314 | sourceKeyword: 315 | keywordValue: s3-bucket-logging-enabled 316 | - sourceName: Data source 5 317 | troubleshootingText: Check whether the Amazon Relational Database Service instances 318 | are not publicly accessible. The rule is NON_COMPLIANT if the publiclyAccessible 319 | field is true in the instance configuration item. 320 | sourceSetUpOption: System_Controls_Mapping 321 | sourceType: AWS_Config 322 | sourceKeyword: 323 | keywordValue: rds-instance-public-access-check 324 | - sourceName: Data source 6 325 | troubleshootingText: Checks if Amazon Relational Database Service (Amazon RDS) 326 | snapshots are public. The rule is NON_COMPLIANT if any existing and new Amazon 327 | RDS snapshots are public. 328 | sourceSetUpOption: System_Controls_Mapping 329 | sourceType: AWS_Config 330 | sourceKeyword: 331 | keywordValue: rds-snapshots-public-prohibited 332 | - sourceName: Data source 7 333 | troubleshootingText: Checks whether Amazon Redshift clusters are not publicly 334 | accessible. The rule is NON_COMPLIANT if the publiclyAccessible field is true 335 | in the cluster configuration item. 336 | sourceSetUpOption: System_Controls_Mapping 337 | sourceType: AWS_Config 338 | sourceKeyword: 339 | keywordValue: redshift-cluster-public-access-check 340 | - sourceName: Data source 8 341 | troubleshootingText: 'CloudTrail is a service which logs API calls made to the 342 | AWS platform. This provides information on what API call was made, who made 343 | the API call and when the call was made, as well as other information such 344 | as the IP address. ' 345 | sourceSetUpOption: System_Controls_Mapping 346 | sourceType: AWS_Config 347 | sourceKeyword: 348 | keywordValue: multi-region-cloudtrail-enabled 349 | - sourceName: Data source 9 350 | troubleshootingText: Checks that AWS Security Hub is enabled for an AWS account. 351 | The rule is NON_COMPLIANT if Security Hub is not enabled. 352 | sourceSetUpOption: System_Controls_Mapping 353 | sourceType: AWS_Config 354 | sourceKeyword: 355 | keywordValue: securityhub-enabled 356 | - sourceName: Data source 10 357 | troubleshootingText: Checks whether AWS Database Migration Service replication 358 | instances are public. The rule is NON_COMPLIANT if PubliclyAccessible field 359 | is true. 360 | sourceSetUpOption: System_Controls_Mapping 361 | sourceType: AWS_Config 362 | sourceKeyword: 363 | keywordValue: dms-replication-not-public 364 | - sourceName: Data source 11 365 | troubleshootingText: Checks whether Amazon GuardDuty is enabled in your AWS 366 | account and region. If you provide an AWS account for centralization, the 367 | rule evaluates the Amazon GuardDuty results in the centralized account. The 368 | rule is COMPLIANT when Amazon GuardDuty is enabled. 369 | sourceSetUpOption: System_Controls_Mapping 370 | sourceType: AWS_Config 371 | sourceKeyword: 372 | keywordValue: guardduty-enabled-centralized 373 | - sourceName: Data source 12 374 | troubleshootingText: Checks whether the project contains environment variables 375 | AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. The rule is NON_COMPLIANT when 376 | the project environment variables contains plaintext credentials. 377 | sourceSetUpOption: System_Controls_Mapping 378 | sourceType: AWS_Config 379 | sourceKeyword: 380 | keywordValue: codebuild-project-envvar-awscred-check 381 | - sourceName: Data source 13 382 | troubleshootingText: Checks whether the AWS Lambda function policy attached 383 | to the Lambda resource prohibits public access. If the Lambda function policy 384 | allows public access it is NON_COMPLIANT. 385 | sourceSetUpOption: System_Controls_Mapping 386 | sourceType: AWS_Config 387 | sourceKeyword: 388 | keywordValue: lambda-functions-public-access-prohibited 389 | - sourceName: Data source 14 390 | troubleshootingText: Checks whether Amazon Elastic Block Store snapshots are 391 | not publicly restorable. The rule is NON_COMPLIANT if one or more snapshots 392 | with the RestorableByUserIds field is set to all. If this field is set to 393 | all, then Amazon EBS snapshots are public. 394 | sourceSetUpOption: System_Controls_Mapping 395 | sourceType: AWS_Config 396 | sourceKeyword: 397 | keywordValue: ebs-snapshot-public-restorable-check 398 | - sourceName: Data source 15 399 | troubleshootingText: Checks whether direct internet access is disabled for an 400 | Amazon SageMaker notebook instance. The rule is NON_COMPLIANT if SageMaker 401 | notebook instances are internet-enabled. 402 | sourceSetUpOption: System_Controls_Mapping 403 | sourceType: AWS_Config 404 | sourceKeyword: 405 | keywordValue: sagemaker-notebook-no-direct-internet-access 406 | - sourceName: Data source 16 407 | troubleshootingText: Checks that your Amazon S3 buckets do not allow public 408 | read access. The rule checks the Block Public Access settings, the bucket 409 | policy, and the bucket access control list (ACL). 410 | sourceSetUpOption: System_Controls_Mapping 411 | sourceType: AWS_Config 412 | sourceKeyword: 413 | keywordValue: s3-bucket-public-read-prohibited 414 | - sourceName: Data source 17 415 | troubleshootingText: Checks that your Amazon S3 buckets do not allow public 416 | write access. The rule checks the Block Public Access settings, the bucket 417 | policy, and the bucket access control list (ACL). 418 | sourceSetUpOption: System_Controls_Mapping 419 | sourceType: AWS_Config 420 | sourceKeyword: 421 | keywordValue: s3-bucket-public-write-prohibited 422 | -------------------------------------------------------------------------------- /frameworks/sampleData.yml: -------------------------------------------------------------------------------- 1 | --- 2 | A.6 - Organisation of information security: 3 | A.6.1.2 - Segregation of duties: 4 | name: A.6.1.2 - Segregation of duties 5 | description: A.6.1.2 - Conflicting duties and areas of responsibility shall be 6 | segregated to reduce opportunities for unauthorized or unintentional modification 7 | or misuse of the organization's assets. 8 | testingInformation: "-" 9 | actionPlanTitle: "-" 10 | actionPlanInstructions: "-" 11 | controlMappingSources: 12 | - sourceName: Data source 1 13 | troubleshootingText: Checks whether at least one AWS CloudTrail trail is logging 14 | Amazon S3 data events for all S3 buckets. The rule is NON_COMPLIANT if trails 15 | that log data events for S3 buckets are not configured. 16 | sourceSetUpOption: System_Controls_Mapping 17 | sourceType: AWS_Config 18 | sourceKeyword: 19 | keywordValue: cloudtrail-s3-dataevents-enabled 20 | - sourceName: Data source 2 21 | troubleshootingText: Checks whether AWS CloudTrail is enabled in your AWS account. 22 | Optionally, you can specify which S3 bucket, SNS topic, and Amazon CloudWatch 23 | Logs ARN to use. 24 | sourceSetUpOption: System_Controls_Mapping 25 | sourceType: AWS_Config 26 | sourceKeyword: 27 | keywordValue: cloudtrail-enabled 28 | - sourceName: Data source 3 29 | troubleshootingText: Checks whether IAM groups have at least one IAM user. 30 | sourceSetUpOption: System_Controls_Mapping 31 | sourceType: AWS_Config 32 | sourceKeyword: 33 | keywordValue: iam-group-has-users-check 34 | - sourceName: Data source 4 35 | troubleshootingText: Checks whether the Application Load Balancer and the Classic 36 | Load Balancer have logging enabled. The rule is NON_COMPLIANT if the access_logs.s3.enabled 37 | is false or access_logs.S3.bucket is not equal to the s3BucketName that you 38 | provided. 39 | sourceSetUpOption: System_Controls_Mapping 40 | sourceType: AWS_Config 41 | sourceKeyword: 42 | keywordValue: elb-logging-enabled 43 | - sourceName: Data source 5 44 | troubleshootingText: Checks whether logging is enabled for your S3 buckets. 45 | sourceSetUpOption: System_Controls_Mapping 46 | sourceType: AWS_Config 47 | sourceKeyword: 48 | keywordValue: s3-bucket-logging-enabled 49 | - sourceName: Data source 6 50 | troubleshootingText: Check whether the Amazon Relational Database Service instances 51 | are not publicly accessible. The rule is NON_COMPLIANT if the publiclyAccessible 52 | field is true in the instance configuration item. 53 | sourceSetUpOption: System_Controls_Mapping 54 | sourceType: AWS_Config 55 | sourceKeyword: 56 | keywordValue: rds-instance-public-access-check 57 | - sourceName: Data source 7 58 | troubleshootingText: Checks if Amazon Relational Database Service (Amazon RDS) 59 | snapshots are public. The rule is NON_COMPLIANT if any existing and new Amazon 60 | RDS snapshots are public. 61 | sourceSetUpOption: System_Controls_Mapping 62 | sourceType: AWS_Config 63 | sourceKeyword: 64 | keywordValue: rds-snapshots-public-prohibited 65 | - sourceName: Data source 8 66 | troubleshootingText: Checks whether Amazon Redshift clusters are not publicly 67 | accessible. The rule is NON_COMPLIANT if the publiclyAccessible field is true 68 | in the cluster configuration item. 69 | sourceSetUpOption: System_Controls_Mapping 70 | sourceType: AWS_Config 71 | sourceKeyword: 72 | keywordValue: redshift-cluster-public-access-check 73 | - sourceName: Data source 9 74 | troubleshootingText: CloudTrail is a service which logs API calls made to the 75 | AWS platform. This provides information on what API call was made, who made 76 | the API call and when the call was made, as well as other information such 77 | as the IP address. 78 | sourceSetUpOption: System_Controls_Mapping 79 | sourceType: AWS_Config 80 | sourceKeyword: 81 | keywordValue: multi-region-cloudtrail-enabled 82 | - sourceName: Data source 10 83 | troubleshootingText: " Checks whether the root user access key is available. 84 | The rule is COMPLIANT if the user access key does not exist." 85 | sourceSetUpOption: System_Controls_Mapping 86 | sourceType: AWS_Config 87 | sourceKeyword: 88 | keywordValue: iam-root-access-key-check 89 | - sourceName: Data source 11 90 | troubleshootingText: Checks that AWS Security Hub is enabled for an AWS account. 91 | The rule is NON_COMPLIANT if Security Hub is not enabled. 92 | sourceSetUpOption: System_Controls_Mapping 93 | sourceType: AWS_Config 94 | sourceKeyword: 95 | keywordValue: securityhub-enabled 96 | - sourceName: Data source 12 97 | troubleshootingText: Checks whether IAM users are members of at least one IAM 98 | group. 99 | sourceSetUpOption: System_Controls_Mapping 100 | sourceType: AWS_Config 101 | sourceKeyword: 102 | keywordValue: iam-user-group-membership-check 103 | - sourceName: Data source 13 104 | troubleshootingText: Checks that none of your IAM users have policies attached. 105 | IAM users must inherit permissions from IAM groups or roles. 106 | sourceSetUpOption: System_Controls_Mapping 107 | sourceType: AWS_Config 108 | sourceKeyword: 109 | keywordValue: iam-user-no-policies-check 110 | - sourceName: Data source 14 111 | troubleshootingText: Checks whether AWS Database Migration Service replication 112 | instances are public. The rule is NON_COMPLIANT if PubliclyAccessible field 113 | is true. 114 | sourceSetUpOption: System_Controls_Mapping 115 | sourceType: AWS_Config 116 | sourceKeyword: 117 | keywordValue: dms-replication-not-public 118 | - sourceName: Data source 15 119 | troubleshootingText: Checks whether Amazon GuardDuty is enabled in your AWS 120 | account and region. If you provide an AWS account for centralization, the 121 | rule evaluates the Amazon GuardDuty results in the centralized account. The 122 | rule is COMPLIANT when Amazon GuardDuty is enabled. 123 | sourceSetUpOption: System_Controls_Mapping 124 | sourceType: AWS_Config 125 | sourceKeyword: 126 | keywordValue: guardduty-enabled-centralized 127 | - sourceName: Data source 16 128 | troubleshootingText: 'Checks the IAM policies that you create for Allow statements 129 | that grant permissions to all actions on all resources. The rule is NON_COMPLIANT 130 | if any policy statement includes ''Effect'': ''Allow'' with ''Action'': ''*'' 131 | over ''Resource'': ''*''.' 132 | sourceSetUpOption: System_Controls_Mapping 133 | sourceType: AWS_Config 134 | sourceKeyword: 135 | keywordValue: iam-policy-no-statements-with-admin-access 136 | - sourceName: Data source 17 137 | troubleshootingText: Checks whether Amazon Virtual Private Cloud flow logs are 138 | found and enabled for Amazon VPC. 139 | sourceSetUpOption: System_Controls_Mapping 140 | sourceType: AWS_Config 141 | sourceKeyword: 142 | keywordValue: vpc-flow-logs-enabled 143 | - sourceName: Data source 18 144 | troubleshootingText: Checks whether the project contains environment variables 145 | AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. The rule is NON_COMPLIANT when 146 | the project environment variables contains plaintext credentials. 147 | sourceSetUpOption: System_Controls_Mapping 148 | sourceType: AWS_Config 149 | sourceKeyword: 150 | keywordValue: codebuild-project-envvar-awscred-check 151 | - sourceName: Data source 19 152 | troubleshootingText: Checks whether the AWS Lambda function policy attached 153 | to the Lambda resource prohibits public access. If the Lambda function policy 154 | allows public access it is NON_COMPLIANT. 155 | sourceSetUpOption: System_Controls_Mapping 156 | sourceType: AWS_Config 157 | sourceKeyword: 158 | keywordValue: lambda-functions-public-access-prohibited 159 | - sourceName: Data source 20 160 | troubleshootingText: Checks whether the required public access block settings 161 | are configured from account level. The rule is only NON_COMPLIANT when the 162 | fields set below do not match the corresponding fields in the configuration 163 | item. 164 | sourceSetUpOption: System_Controls_Mapping 165 | sourceType: AWS_Config 166 | sourceKeyword: 167 | keywordValue: s3-account-level-public-access-blocks 168 | - sourceName: Data source 21 169 | troubleshootingText: Checks whether Amazon Elastic Block Store snapshots are 170 | not publicly restorable. The rule is NON_COMPLIANT if one or more snapshots 171 | with the RestorableByUserIds field is set to all. If this field is set to 172 | all, then Amazon EBS snapshots are public. 173 | sourceSetUpOption: System_Controls_Mapping 174 | sourceType: AWS_Config 175 | sourceKeyword: 176 | keywordValue: ebs-snapshot-public-restorable-check 177 | - sourceName: Data source 22 178 | troubleshootingText: Checks whether direct internet access is disabled for an 179 | Amazon SageMaker notebook instance. The rule is NON_COMPLIANT if SageMaker 180 | notebook instances are internet-enabled. 181 | sourceSetUpOption: System_Controls_Mapping 182 | sourceType: AWS_Config 183 | sourceKeyword: 184 | keywordValue: sagemaker-notebook-no-direct-internet-access 185 | - sourceName: Data source 23 186 | troubleshootingText: Checks that your Amazon S3 buckets do not allow public 187 | read access. The rule checks the Block Public Access settings, the bucket 188 | policy, and the bucket access control list (ACL). 189 | sourceSetUpOption: System_Controls_Mapping 190 | sourceType: AWS_Config 191 | sourceKeyword: 192 | keywordValue: s3-bucket-public-read-prohibited 193 | - sourceName: Data source 24 194 | troubleshootingText: Checks that your Amazon S3 buckets do not allow public 195 | write access. The rule checks the Block Public Access settings, the bucket 196 | policy, and the bucket access control list (ACL). 197 | sourceSetUpOption: System_Controls_Mapping 198 | sourceType: AWS_Config 199 | sourceKeyword: 200 | keywordValue: s3-bucket-public-write-prohibited 201 | A.6.1.4 - Contact with special interest groups: 202 | name: A.6.1.4 - Contact with special interest groups 203 | description: A.6.1.4 - Appropriate contacts with special interest groups or other 204 | specialist security forums and professional associations shall be maintained. 205 | testingInformation: "-" 206 | actionPlanTitle: "-" 207 | actionPlanInstructions: "-" 208 | controlMappingSources: 209 | - sourceName: Data source 1 210 | troubleshootingText: Checks that AWS Security Hub is enabled for an AWS account. 211 | The rule is NON_COMPLIANT if Security Hub is not enabled. 212 | sourceSetUpOption: System_Controls_Mapping 213 | sourceType: AWS_Config 214 | sourceKeyword: 215 | keywordValue: securityhub-enabled 216 | - sourceName: Data source 2 217 | troubleshootingText: Checks whether Amazon GuardDuty is enabled in your AWS 218 | account and region. If you provide an AWS account for centralization, the 219 | rule evaluates the Amazon GuardDuty results in the centralized account. The 220 | rule is COMPLIANT when Amazon GuardDuty is enabled. 221 | sourceSetUpOption: System_Controls_Mapping 222 | sourceType: AWS_Config 223 | sourceKeyword: 224 | keywordValue: guardduty-enabled-centralized 225 | A.6.2.2 - Teleworking: 226 | name: A.6.2.2 - Teleworking 227 | description: A.6.2.2 - A policy and supporting security measures shall be implemented 228 | to protect information accessed, processed or stored at teleworking sites. 229 | testingInformation: "-" 230 | actionPlanTitle: "-" 231 | actionPlanInstructions: "-" 232 | controlMappingSources: 233 | - sourceName: Data source 1 234 | troubleshootingText: Checks whether Amazon Elasticsearch Service (Amazon ES) 235 | domains are in Amazon Virtual Private Cloud (Amazon VPC). The rule is NON_COMPLIANT 236 | if the Amazon ES domain endpoint is public. 237 | sourceSetUpOption: System_Controls_Mapping 238 | sourceType: AWS_Config 239 | sourceKeyword: 240 | keywordValue: elasticsearch-in-vpc-only 241 | - sourceName: Data source 2 242 | troubleshootingText: Checks whether Amazon Elastic MapReduce (EMR) clusters' 243 | master nodes have public IPs. The rule is NON_COMPLIANT if the master node 244 | has a public IP. 245 | sourceSetUpOption: System_Controls_Mapping 246 | sourceType: AWS_Config 247 | sourceKeyword: 248 | keywordValue: emr-master-no-public-ip 249 | - sourceName: Data source 3 250 | troubleshootingText: Checks whether the AWS Identity and Access Management users 251 | have multi-factor authentication (MFA) enabled. 252 | sourceSetUpOption: System_Controls_Mapping 253 | sourceType: AWS_Config 254 | sourceKeyword: 255 | keywordValue: iam-user-mfa-enabled 256 | - sourceName: Data source 4 257 | troubleshootingText: Checks whether Amazon Elastic Compute Cloud (Amazon EC2) 258 | instances have a public IP association. The rule is NON_COMPLIANT if the publicIp 259 | field is present in the Amazon EC2 instance configuration item. This rule 260 | applies only to IPv4. 261 | sourceSetUpOption: System_Controls_Mapping 262 | sourceType: AWS_Config 263 | sourceKeyword: 264 | keywordValue: ec2-instance-no-public-ip 265 | - sourceName: Data source 5 266 | troubleshootingText: Checks whether Amazon Elastic Block Store snapshots are 267 | not publicly restorable. The rule is NON_COMPLIANT if one or more snapshots 268 | with the RestorableByUserIds field is set to all. If this field is set to 269 | all, then Amazon EBS snapshots are public. 270 | sourceSetUpOption: System_Controls_Mapping 271 | sourceType: AWS_Config 272 | sourceKeyword: 273 | keywordValue: ebs-snapshot-public-restorable-check 274 | A.7 - Human resource security: 275 | A.7.1.1 - Screening: 276 | name: A.7.1.1 - Screening 277 | description: A.7.1.1 - Background verification checks on all candidates for employment 278 | shall be carried out in accordance with relevant laws, regulations and ethics 279 | and shall be proportional to the business requirements, the classification of 280 | the information to be accessed and the perceived risks. 281 | testingInformation: "-" 282 | actionPlanTitle: "-" 283 | actionPlanInstructions: "-" 284 | controlMappingSources: 285 | - sourceName: Data source 1 286 | troubleshootingText: 'Checks whether at least one AWS CloudTrail trail is logging 287 | Amazon S3 data events for all S3 buckets. The rule is NON_COMPLIANT if trails 288 | that log data events for S3 buckets are not configured. ' 289 | sourceSetUpOption: System_Controls_Mapping 290 | sourceType: AWS_Config 291 | sourceKeyword: 292 | keywordValue: cloudtrail-s3-dataevents-enabled 293 | - sourceName: Data source 2 294 | troubleshootingText: Checks whether AWS CloudTrail is enabled in your AWS account. 295 | Optionally, you can specify which S3 bucket, SNS topic, and Amazon CloudWatch 296 | Logs ARN to use. 297 | sourceSetUpOption: System_Controls_Mapping 298 | sourceType: AWS_Config 299 | sourceKeyword: 300 | keywordValue: cloudtrail-enabled 301 | - sourceName: Data source 3 302 | troubleshootingText: Checks whether the Application Load Balancer and the Classic 303 | Load Balancer have logging enabled. The rule is NON_COMPLIANT if the access_logs.s3.enabled 304 | is false or access_logs.S3.bucket is not equal to the s3BucketName that you 305 | provided. 306 | sourceSetUpOption: System_Controls_Mapping 307 | sourceType: AWS_Config 308 | sourceKeyword: 309 | keywordValue: elb-logging-enabled 310 | - sourceName: Data source 4 311 | troubleshootingText: Checks whether logging is enabled for your S3 buckets. 312 | sourceSetUpOption: System_Controls_Mapping 313 | sourceType: AWS_Config 314 | sourceKeyword: 315 | keywordValue: s3-bucket-logging-enabled 316 | - sourceName: Data source 5 317 | troubleshootingText: Check whether the Amazon Relational Database Service instances 318 | are not publicly accessible. The rule is NON_COMPLIANT if the publiclyAccessible 319 | field is true in the instance configuration item. 320 | sourceSetUpOption: System_Controls_Mapping 321 | sourceType: AWS_Config 322 | sourceKeyword: 323 | keywordValue: rds-instance-public-access-check 324 | - sourceName: Data source 6 325 | troubleshootingText: Checks if Amazon Relational Database Service (Amazon RDS) 326 | snapshots are public. The rule is NON_COMPLIANT if any existing and new Amazon 327 | RDS snapshots are public. 328 | sourceSetUpOption: System_Controls_Mapping 329 | sourceType: AWS_Config 330 | sourceKeyword: 331 | keywordValue: rds-snapshots-public-prohibited 332 | - sourceName: Data source 7 333 | troubleshootingText: Checks whether Amazon Redshift clusters are not publicly 334 | accessible. The rule is NON_COMPLIANT if the publiclyAccessible field is true 335 | in the cluster configuration item. 336 | sourceSetUpOption: System_Controls_Mapping 337 | sourceType: AWS_Config 338 | sourceKeyword: 339 | keywordValue: redshift-cluster-public-access-check 340 | - sourceName: Data source 8 341 | troubleshootingText: 'CloudTrail is a service which logs API calls made to the 342 | AWS platform. This provides information on what API call was made, who made 343 | the API call and when the call was made, as well as other information such 344 | as the IP address. ' 345 | sourceSetUpOption: System_Controls_Mapping 346 | sourceType: AWS_Config 347 | sourceKeyword: 348 | keywordValue: multi-region-cloudtrail-enabled 349 | - sourceName: Data source 9 350 | troubleshootingText: Checks that AWS Security Hub is enabled for an AWS account. 351 | The rule is NON_COMPLIANT if Security Hub is not enabled. 352 | sourceSetUpOption: System_Controls_Mapping 353 | sourceType: AWS_Config 354 | sourceKeyword: 355 | keywordValue: securityhub-enabled 356 | - sourceName: Data source 10 357 | troubleshootingText: Checks whether AWS Database Migration Service replication 358 | instances are public. The rule is NON_COMPLIANT if PubliclyAccessible field 359 | is true. 360 | sourceSetUpOption: System_Controls_Mapping 361 | sourceType: AWS_Config 362 | sourceKeyword: 363 | keywordValue: dms-replication-not-public 364 | - sourceName: Data source 11 365 | troubleshootingText: Checks whether Amazon GuardDuty is enabled in your AWS 366 | account and region. If you provide an AWS account for centralization, the 367 | rule evaluates the Amazon GuardDuty results in the centralized account. The 368 | rule is COMPLIANT when Amazon GuardDuty is enabled. 369 | sourceSetUpOption: System_Controls_Mapping 370 | sourceType: AWS_Config 371 | sourceKeyword: 372 | keywordValue: guardduty-enabled-centralized 373 | - sourceName: Data source 12 374 | troubleshootingText: Checks whether the project contains environment variables 375 | AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. The rule is NON_COMPLIANT when 376 | the project environment variables contains plaintext credentials. 377 | sourceSetUpOption: System_Controls_Mapping 378 | sourceType: AWS_Config 379 | sourceKeyword: 380 | keywordValue: codebuild-project-envvar-awscred-check 381 | - sourceName: Data source 13 382 | troubleshootingText: Checks whether the AWS Lambda function policy attached 383 | to the Lambda resource prohibits public access. If the Lambda function policy 384 | allows public access it is NON_COMPLIANT. 385 | sourceSetUpOption: System_Controls_Mapping 386 | sourceType: AWS_Config 387 | sourceKeyword: 388 | keywordValue: lambda-functions-public-access-prohibited 389 | - sourceName: Data source 14 390 | troubleshootingText: Checks whether Amazon Elastic Block Store snapshots are 391 | not publicly restorable. The rule is NON_COMPLIANT if one or more snapshots 392 | with the RestorableByUserIds field is set to all. If this field is set to 393 | all, then Amazon EBS snapshots are public. 394 | sourceSetUpOption: System_Controls_Mapping 395 | sourceType: AWS_Config 396 | sourceKeyword: 397 | keywordValue: ebs-snapshot-public-restorable-check 398 | - sourceName: Data source 15 399 | troubleshootingText: Checks whether direct internet access is disabled for an 400 | Amazon SageMaker notebook instance. The rule is NON_COMPLIANT if SageMaker 401 | notebook instances are internet-enabled. 402 | sourceSetUpOption: System_Controls_Mapping 403 | sourceType: AWS_Config 404 | sourceKeyword: 405 | keywordValue: sagemaker-notebook-no-direct-internet-access 406 | - sourceName: Data source 16 407 | troubleshootingText: Checks that your Amazon S3 buckets do not allow public 408 | read access. The rule checks the Block Public Access settings, the bucket 409 | policy, and the bucket access control list (ACL). 410 | sourceSetUpOption: System_Controls_Mapping 411 | sourceType: AWS_Config 412 | sourceKeyword: 413 | keywordValue: s3-bucket-public-read-prohibited 414 | - sourceName: Data source 17 415 | troubleshootingText: Checks that your Amazon S3 buckets do not allow public 416 | write access. The rule checks the Block Public Access settings, the bucket 417 | policy, and the bucket access control list (ACL). 418 | sourceSetUpOption: System_Controls_Mapping 419 | sourceType: AWS_Config 420 | sourceKeyword: 421 | keywordValue: s3-bucket-public-write-prohibited 422 | -------------------------------------------------------------------------------- /generateYAMLFramework.py: -------------------------------------------------------------------------------- 1 | # Core python packages 2 | import yaml 3 | 4 | # 3rd party packages 5 | import boto3 6 | 7 | def create_yaml_controls( 8 | name_report: str, 9 | filename: str, 10 | region_name = None 11 | ): 12 | 13 | 14 | auditmanager_client = boto3.client('auditmanager', region_name=region_name) 15 | 16 | ## create a zero byte yaml file 17 | open(filename, 'w', encoding="utf-8" ).close() 18 | 19 | framework_list_response = auditmanager_client.list_assessment_frameworks(frameworkType='Standard') 20 | 21 | 22 | for framework_list in framework_list_response['frameworkMetadataList']: 23 | if framework_list['name'] == name_report: 24 | id_report = framework_list['id'] 25 | 26 | framework_controls_response = auditmanager_client.get_assessment_framework( 27 | frameworkId=id_report 28 | ) 29 | 30 | yaml_dict = {} 31 | 32 | for control_sets in framework_controls_response['framework']['controlSets']: 33 | 34 | control_sets_dict = {} 35 | 36 | for controls in control_sets['controls']: 37 | 38 | controls_dict = {} 39 | 40 | controls_dict['name'] = controls['name'].strip()[0:300] 41 | if 'description' in controls.keys(): 42 | controls_dict['description'] = controls['description'].strip()[0:1000] 43 | else: 44 | controls_dict['description'] = "Not Available" 45 | controls_dict['testingInformation'] = "-" 46 | controls_dict['actionPlanTitle'] = "-" 47 | controls_dict['actionPlanInstructions'] = "-" 48 | 49 | for control_map in controls['controlMappingSources']: 50 | control_map['sourceName'] = control_map['sourceId'] 51 | control_map.pop('sourceId') 52 | 53 | controls_dict['controlMappingSources'] = controls['controlMappingSources'] 54 | control_sets_dict[controls['name'].strip()] = controls_dict 55 | 56 | yaml_dict[control_sets['name']] = control_sets_dict 57 | 58 | with open(filename, 'a', encoding="utf-8") as file: 59 | documents = yaml.dump(yaml_dict, file, sort_keys=False) 60 | -------------------------------------------------------------------------------- /images/audit-manager-automation-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-audit-manager-custom-frameworks/8aa6aaae941df23ebe651041800d9c93b3273b03/images/audit-manager-automation-diagram.png -------------------------------------------------------------------------------- /mergeMultiFramework.py: -------------------------------------------------------------------------------- 1 | # Core python packages 2 | import sys 3 | 4 | # 3rd party packages 5 | import boto3 6 | 7 | # /src 8 | from utils import * 9 | 10 | def merge_multiple_framework( 11 | custom_report_name: str, 12 | filename: str, 13 | region_name=None, 14 | ) -> dict: 15 | format_yaml = open_yaml(input_file=filename) 16 | """ 17 | Creates a custom assessment framework in Audit Manager by merging multiple frameworks using YAML 18 | 19 | Args: 20 | custom report_name (*string*)-- 21 | [**REQUIRED**] 22 | The name of existing framework. 23 | 24 | filename (*string*)-- 25 | [**REQUIRED**] 26 | The filepath of aml file 27 | 28 | 29 | Raises: 30 | error: raises Boto3 ClientError 31 | 32 | Returns: 33 | [type]: [description] 34 | """ 35 | 36 | auditmanager_client = boto3.client('auditmanager', region_name=region_name) 37 | 38 | ## List all standard frameworks 39 | try: 40 | framework_response = auditmanager_client.list_assessment_frameworks(frameworkType='Standard') 41 | except botocore.exceptions.ClientError as error: 42 | raise error 43 | 44 | already_exists = check_framework_existence(custom_report_name) 45 | 46 | if already_exists: 47 | sys.exit( 48 | f"The customer framework {custom_report_name} already exists. Please note that Framework name within the AWS account should be unique") 49 | 50 | ## replace framework name with framework id 51 | format_yaml_orig = format_yaml.copy() 52 | for yaml_keys in format_yaml_orig.keys(): 53 | for f_response in framework_response.get('frameworkMetadataList'): 54 | if f_response['name'] == yaml_keys: 55 | format_yaml[f_response['id']] = format_yaml[yaml_keys] 56 | del format_yaml[yaml_keys] 57 | 58 | ## creates an output list 59 | yaml_control_sets_list = [] 60 | 61 | for yaml_items in format_yaml.items(): ## iterates through yaml file 62 | for yaml_control_sets in yaml_items[1].keys(): ## iterates through control_sets 63 | for control_sets in auditmanager_client.get_assessment_framework(frameworkId=yaml_items[0])['framework']['controlSets']: ## fetches list of controlSets of single framework 64 | yaml_control_sets_dict = {} 65 | control_list = [] 66 | if control_sets['name'] == yaml_control_sets: ## compare name of control_sets with input_yaml_control sets ## Returns list of list as an output 67 | for yaml_control_list in yaml_items[1].values(): ## will fetch corresponding controls from control sets 68 | for yaml_controls in yaml_control_list: ## will fetch corresponding controls from control sets. We do it twice as we get list of list as an output 69 | for controls in control_sets['controls']: ## iterates through controls 70 | if controls['name'] == yaml_controls: ## compares with input yaml and convert into required format. 71 | controls_dict = {} 72 | controls_dict['id'] = controls['id'] ## we need control's Id as an input 73 | control_list.append(controls_dict) 74 | 75 | if len(control_list) > 0: 76 | yaml_control_sets_dict['name'] = controls['name'] 77 | yaml_control_sets_dict['controls'] = control_list 78 | yaml_control_sets_list.append(yaml_control_sets_dict) 79 | try: 80 | response = auditmanager_client.create_assessment_framework( 81 | name=custom_report_name, 82 | controlSets=yaml_control_sets_list 83 | ) 84 | logging.debug(response) 85 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 86 | logging.info(f"Custom Framework {custom_report_name} Created successfully") 87 | except botocore.exceptions.ClientError as error: 88 | raise error -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | boto3==1.17.24 2 | botocore==1.20.24 3 | pyyaml==5.4.1 -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | # Core python packages 2 | import json 3 | import logging 4 | import sys 5 | import os 6 | 7 | 8 | # 3rd party packages - see requirements.txt 9 | import boto3 10 | import botocore.exceptions 11 | import yaml 12 | 13 | # src python 14 | from utils import * 15 | def open_input(filepath: str = None) -> str: 16 | """ 17 | Performs a check on the --filepath argument to attempt to find the 18 | template file for the custom control and custom framework. 19 | 20 | It looks in three locations. 21 | 22 | * A complete path to the file 23 | * A file in /frameworks 24 | * An S3 URL 25 | 26 | Args: 27 | filepath (*string*, REQUIRED): 28 | 29 | A string which defined the path to the template file for the 30 | custom controls and custome framework 31 | 32 | Defaults to None. 33 | 34 | Raises: 35 | FileNotFoundError: The FileNotFoundError will be raised if it cannot 36 | locate the file 37 | """ 38 | script_dir = os.path.dirname(os.path.realpath(__file__)) 39 | framework_dir = os.path.join(script_dir, 'frameworks') 40 | try: 41 | if os.path.isfile(filepath) is True: 42 | logging.info( 43 | 'Input file found at {}'.format(filepath)) 44 | if filepath.endswith(".json") is True: 45 | contents = open_json(input_file=filepath) 46 | return contents 47 | elif ( 48 | filepath.endswith(".yaml") is True 49 | or 50 | filepath.endswith(".yml") is True 51 | ): 52 | contents = open_yaml(input_file=filepath) 53 | return contents 54 | else: 55 | logging.info("Incorrect file type. An input file should be yaml or json.") 56 | sys.exit(1) 57 | elif os.path.isfile(os.path.join(framework_dir, filepath)) is True: 58 | filepath = os.path.join(framework_dir, filepath) 59 | logging.info( 60 | 'Input file found in {} directory'.format( 61 | os.path.join(framework_dir, filepath))) 62 | if filepath.endswith('.json') is True: 63 | contents = open_json(input_file=filepath) 64 | return contents 65 | elif ( 66 | filepath.endswith(".yaml") is True 67 | or 68 | filepath.endswith(".yml") is True 69 | ): 70 | contents = open_yaml(input_file=filepath) 71 | return contents 72 | else: 73 | logging.info("Incorrect file type. An input file should be yaml or json.") 74 | sys.exit(1) 75 | elif filepath.startswith('s3://') is True: 76 | logging.info('Input file is in S3 bucket {}'.format(filepath)) 77 | if filepath.endswith('.json') is True: 78 | contents = json.load(get_object_from_s3(filepath)) 79 | return contents 80 | elif ( 81 | filepath.endswith(".yaml") is True 82 | or 83 | filepath.endswith(".yml") is True 84 | ): 85 | contents = yaml.safe_load(get_object_from_s3(filepath)) 86 | return contents 87 | else: 88 | logging.info("Incorrect file type. An input file should be yaml or json.") 89 | sys.exit(1) 90 | else: 91 | raise FileNotFoundError 92 | except FileNotFoundError as error: 93 | raise error 94 | 95 | 96 | def get_object_from_s3(s3_path: str = None) -> dict: 97 | s3 = boto3.client('s3') 98 | try: 99 | bucket_path = s3_path.split('/') 100 | bucket_name = bucket_path[2] 101 | key = bucket_path[(len(bucket_path) - 1)] 102 | response = s3.get_object( 103 | Bucket=bucket_name, 104 | Key=key 105 | ) 106 | file_contents = response['Body'].read().decode() 107 | return file_contents 108 | except botocore.exceptions.ClientError as error: 109 | raise error 110 | 111 | def open_yaml(input_file: str = None) -> dict: 112 | with open(input_file, 'r', encoding="utf8") as file_object: 113 | contents = yaml.safe_load(file_object) 114 | logging.debug(contents) 115 | return contents 116 | 117 | 118 | def open_json(input_file: str = None) -> dict: 119 | with open(input_file, 'r', encoding="utf8") as file_object: 120 | contents = json.load(file_object) 121 | logging.debug(contents) 122 | return contents 123 | 124 | 125 | def get_account_status(region_name=None): 126 | auditmanager_client = boto3.client('auditmanager', region_name=None) 127 | try: 128 | response_get_status = auditmanager_client.get_account_status() 129 | return response_get_status 130 | except botocore.exceptions.ClientError as error: 131 | raise error 132 | 133 | 134 | def register_account(region_name=None): 135 | auditmanager_client = boto3.client('auditmanager', region_name=region_name) 136 | try: 137 | get_status = get_account_status(region_name=region_name) 138 | if get_status["status"] != "ACTIVE": 139 | response = auditmanager_client.register_account() 140 | return response 141 | except botocore.exceptions.ClientError as error: 142 | raise error 143 | 144 | 145 | def check_framework_existence(custom_report_name, region_name=None): 146 | auditmanager_client = boto3.client('auditmanager', region_name=region_name) 147 | existing_frameworks = (auditmanager_client.list_assessment_frameworks(frameworkType="Custom")) 148 | already_exists = False 149 | for existing in existing_frameworks["frameworkMetadataList"]: 150 | # Checking if the framework being created already exists and 151 | # updating it 152 | if existing["name"] == custom_report_name: 153 | already_exists = True 154 | return already_exists 155 | --------------------------------------------------------------------------------