├── .coveragerc ├── .dockerignore ├── .github └── workflows │ ├── publish.yml │ └── test.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── aws ├── __init__.py ├── autoscaling │ ├── __init__.py │ └── resources.py ├── client.py ├── cloudtrail │ ├── __init__.py │ ├── resources.py │ ├── test_cloudtrail_enabled_in_all_regions.py │ └── test_cloudtrail_log_validation_enabled.py ├── conftest.py ├── ec2 │ ├── __init__.py │ ├── helpers.py │ ├── resources.py │ ├── test_ec2_all_eips_bound.py │ ├── test_ec2_ebs_snapshot_not_too_old.py │ ├── test_ec2_ebs_snapshots_are_private.py │ ├── test_ec2_ebs_volume_attached_to_instance.py │ ├── test_ec2_ebs_volume_encrypted.py │ ├── test_ec2_ebs_volume_not_piops.py │ ├── test_ec2_instance_has_required_tags.py │ ├── test_ec2_instance_on_acceptable_ami.py │ ├── test_ec2_security_group_in_use.py │ ├── test_ec2_security_group_opens_all_ports.py │ ├── test_ec2_security_group_opens_all_ports_to_all.py │ ├── test_ec2_security_group_opens_all_ports_to_self.py │ ├── test_ec2_security_group_opens_specific_ports_to_all.py │ └── test_ec2_vpc_flow_log_enabled.py ├── elasticache │ ├── __init__.py │ └── resources.py ├── elasticsearch │ ├── __init__.py │ ├── resources.py │ └── test_elasticsearch_domains_have_logging_enabled.py ├── elb │ ├── resources.py │ ├── test_elb_desync_mode.py │ └── test_elb_instances_attached.py ├── iam │ ├── __init__.py │ ├── helpers.py │ ├── resources.py │ ├── test_iam_access_key_is_old.py │ ├── test_iam_admin_user_with_access_keys.py │ ├── test_iam_admin_user_without_mfa.py │ ├── test_iam_cross_account_admin_roles_require_mfa.py │ ├── test_iam_user_is_inactive.py │ └── test_iam_user_without_mfa.py ├── rds │ ├── __init__.py │ ├── helpers.py │ ├── resources.py │ ├── test_rds_db_instance_backup_enabled.py │ ├── test_rds_db_instance_encrypted.py │ ├── test_rds_db_instance_is_multiaz.py │ ├── test_rds_db_instance_is_postgres_with_invalid_certificate.py │ ├── test_rds_db_instance_minor_version_updates_enabled.py │ ├── test_rds_db_instance_not_publicly_accessible_by_vpc_sg.py │ ├── test_rds_db_instance_storage_type_not_piops.py │ ├── test_rds_db_security_group_does_not_grant_public_access.py │ ├── test_rds_db_snapshot_encrypted.py │ ├── test_rds_db_snapshot_not_publicly_accessible.py │ └── test_rds_db_snapshot_not_too_old.py ├── redshift │ ├── __init__.py │ ├── helpers.py │ ├── resources.py │ └── test_redshift_security_group_does_not_allow_all_ips_access.py ├── route53 │ ├── __init__.py │ ├── resources.py │ └── test_route53_cnames_minimum_ttl_or_greater.py ├── s3 │ ├── __init__.py │ ├── helpers.py │ ├── resources.py │ ├── test_s3_bucket_cors_disabled.py │ ├── test_s3_bucket_does_not_grant_all_principals_all_actions.py │ ├── test_s3_bucket_has_life_cycle_policy.py │ ├── test_s3_bucket_logging_enabled.py │ ├── test_s3_bucket_no_world_acl.py │ ├── test_s3_bucket_versioning_enabled.py │ ├── test_s3_bucket_versioning_mfa_delete_enabled.py │ └── test_s3_bucket_web_hosting_disabled.py └── sns │ ├── __init__.py │ ├── resources.py │ ├── test_sns_pending_verified.py │ ├── test_sns_subscriptions_without_topics.py │ └── test_sns_topics_without_subscriptions.py ├── cache.py ├── config.yaml.example ├── conftest.py ├── custom_config.py ├── docs ├── .nojekyll ├── Architecture.rst ├── CodingConventions.rst ├── ContributingDocumentation.rst ├── FAQ.rst ├── Makefile ├── MozillaDeployment.rst ├── NewServices.rst ├── Source.rst ├── UseCases.rst ├── conf.py ├── frost-snowman-logo.png ├── index.rst ├── readme-include.md └── requirements.txt ├── example_cache └── v │ ├── cache │ └── lastfailed │ ├── pytest_aws:example-account:us-east-1:iam:list_user_policies::UserName=spacemanspiff.json │ ├── pytest_aws:example-account:us-east-1:iam:list_user_policies::UserName=tigerone.json │ └── pytest_aws:example-account:us-east-1:iam:list_users::.json ├── exemptions.py ├── frost ├── __init__.py └── cli.py ├── gcp ├── __init__.py ├── bigquery │ ├── __init__.py │ ├── resources.py │ └── test_dataset_not_publicly_accessible.py ├── client.py ├── compute │ ├── __init__.py │ ├── helpers.py │ ├── resources.py │ ├── test_firewall_opens_all_ports_to_all.py │ ├── test_firewall_opens_all_ports_to_any.py │ ├── test_firewall_opens_any_ports_to_all.py │ ├── test_gke_version_up_to_date.py │ └── test_only_allowed_gke_versions.py ├── conftest.py ├── iam │ ├── __init__.py │ ├── helpers.py │ ├── resources.py │ ├── test_admin_service_accounts.py │ ├── test_only_allowed_org_accounts.py │ └── test_service_account_key_is_old.py └── sql │ ├── __init__.py │ ├── resources.py │ ├── test_sql_instance_automatic_backup_enabled.py │ ├── test_sql_instance_private_ip_required.py │ └── test_sql_instance_ssl_required.py ├── gsuite ├── README.md ├── __init__.py ├── admin │ ├── __init__.py │ ├── helpers.py │ ├── resources.py │ ├── test_admin_user_is_inactive.py │ └── test_groups_have_enough_owners.py ├── client.py └── conftest.py ├── helpers.py ├── meta_test_cache.py ├── renovate.json ├── requirements.txt ├── service_report_generator.py ├── setup.py └── severity.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | omit = venv/* 4 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | env/ 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *.cover 48 | .hypothesis/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | 68 | # PyBuilder 69 | target/ 70 | 71 | # Jupyter Notebook 72 | .ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # SageMath parsed files 81 | *.sage.py 82 | 83 | # dotenv 84 | .env 85 | 86 | # virtualenv 87 | .venv 88 | venv/ 89 | ENV/ 90 | 91 | # Spyder project settings 92 | .spyderproject 93 | .spyproject 94 | 95 | # Rope project settings 96 | .ropeproject 97 | 98 | # mkdocs documentation 99 | /site 100 | 101 | # mypy 102 | .mypy_cache/ 103 | 104 | # VCS 105 | .git 106 | 107 | # CI 108 | .circleci 109 | .travis.yml 110 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | # Upload a Python Package using Twine when a semver tag is pushed to 2 | # the default branch 3 | # 4 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 5 | 6 | name: Publish to PyPI and GH Pages 7 | 8 | on: 9 | push: 10 | # Sequence of patterns matched against refs/tags 11 | tags: 12 | - '[0-9]+\.[0-9]+\.[0-9]+' 13 | 14 | jobs: 15 | publish-gh-pages: 16 | runs-on: ubuntu-latest 17 | 18 | steps: 19 | - uses: actions/checkout@v2 20 | with: 21 | ssh-key: ${{ secrets.DEPLOY_KEY }} 22 | 23 | - name: Set up Python 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: '3.8' 27 | 28 | - name: Install doc dependencies in venv 29 | run: | 30 | make install install-docs 31 | 32 | - name: build docs 33 | shell: bash 34 | run: | 35 | (source venv/bin/activate && make doc-build) 36 | 37 | - name: publish docs to GH pages 38 | shell: bash 39 | run: | 40 | git config --local user.email "publish@gha" 41 | git config --local user.name "frost publish-gh-pages GHA" 42 | 43 | mv -f docs/_build/html/* . 44 | git add . 45 | git commit -vm "build docs for gh-page for commit ${{ github.sha }}" 46 | git push -f origin HEAD:gh-pages 47 | 48 | publish-package: 49 | runs-on: ubuntu-latest 50 | 51 | steps: 52 | - uses: actions/checkout@v2 53 | 54 | - name: Set up Python 55 | uses: actions/setup-python@v2 56 | with: 57 | python-version: '3.8' 58 | 59 | - name: Install publish dependencies 60 | run: | 61 | python -m pip install --upgrade pip 62 | pip install setuptools wheel twine 63 | 64 | - name: Install package dependencies 65 | run: | 66 | pip install -r requirements.txt 67 | 68 | - name: Build and publish 69 | env: 70 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 71 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 72 | run: | 73 | python setup.py sdist bdist_wheel 74 | twine upload dist/* 75 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: [push, pull_request] 4 | 5 | 6 | jobs: 7 | build: 8 | runs-on: ${{ matrix.os }} 9 | strategy: 10 | fail-fast: false 11 | matrix: 12 | python-version: [3.8] 13 | os: [ubuntu-latest, ubuntu-20.04] 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - name: Set up Python ${{ matrix.python-version }} 19 | uses: actions/setup-python@v2 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | 23 | - name: Get pip cache dir 24 | id: pip-cache 25 | run: | 26 | echo "::set-output name=dir::$(pip cache dir)" 27 | 28 | - name: Cache 29 | uses: actions/cache@v2 30 | with: 31 | path: ${{ steps.pip-cache.outputs.dir }} 32 | key: 33 | ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py') }} 34 | restore-keys: | 35 | ${{ matrix.os }}-${{ matrix.python-version }}- 36 | 37 | - name: Install dependencies 38 | shell: bash 39 | run: | 40 | make install install-docs 41 | 42 | - name: Run Tests 43 | shell: bash 44 | run: | 45 | (source venv/bin/activate && make black check_conftest_imports doctest coverage doc-build) 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx generated documentation 65 | docs/_build/ 66 | docs/source/ 67 | 68 | # PyBuilder 69 | target/ 70 | 71 | # Jupyter Notebook 72 | .ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # SageMath parsed files 81 | *.sage.py 82 | 83 | # dotenv 84 | .env 85 | 86 | # virtualenv 87 | .venv 88 | venv/ 89 | ENV/ 90 | 91 | # Spyder project settings 92 | .spyderproject 93 | .spyproject 94 | 95 | # Rope project settings 96 | .ropeproject 97 | 98 | # mkdocs documentation 99 | /site 100 | 101 | # mypy 102 | .mypy_cache/ 103 | 104 | # ide 105 | .vscode/ 106 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/python/black 3 | rev: 19.10b0 4 | hooks: 5 | - id: black 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Community Participation Guidelines 2 | 3 | This repository is governed by Mozilla's code of conduct and etiquette guidelines. 4 | For more details, please read the 5 | [Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). 6 | 7 | ## How to Report 8 | For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. 9 | 10 | 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # frost 2 | 3 | FROM python:3.8-slim-buster 4 | 5 | ENV PYTHONPATH $PYTHONPATH:/app 6 | ENV PYTHONUNBUFFERED 1 7 | 8 | RUN groupadd --gid 10001 app && \ 9 | useradd --uid 10001 --gid 10001 --shell /usr/sbin/nologin app 10 | RUN install -o app -g app -d /var/run/depobs /var/log/depobs 11 | 12 | # git for herokuadmintools 13 | RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ 14 | apt-get upgrade -y && \ 15 | apt-get install --no-install-recommends -y \ 16 | ca-certificates \ 17 | curl \ 18 | git \ 19 | jq 20 | 21 | COPY . /app 22 | WORKDIR /app 23 | 24 | RUN python setup.py install 25 | 26 | USER app 27 | ENTRYPOINT [ "frost" ] 28 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include requirements.txt 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | TODAY := $(shell date '+%Y-%m-%d') 3 | 4 | .DEFAULT_GOAL := all 5 | 6 | AWS_PROFILE := default 7 | 8 | PYTEST_OPTS := '' 9 | 10 | AUTOBUILD_OPTS ?= --open-browser --port=0 --delay 5 11 | 12 | all: check_venv 13 | frost test 14 | 15 | awsci: check_venv 16 | frost test --continue-on-collection-errors -m aws aws/**/*.py \ 17 | -k "not test_ec2_security_group_in_use" \ 18 | --json=results-$(AWS_PROFILE)-$(TODAY).json $(PYTEST_OPTS) 19 | 20 | check_venv: 21 | ifeq ($(VIRTUAL_ENV),) 22 | $(error "Run frost from a virtualenv (try 'make install && source venv/bin/activate')") 23 | endif 24 | 25 | check_conftest_imports: 26 | # refs: https://github.com/mozilla/frost/issues/119 27 | grep --recursive --exclude-dir '*venv' --include '*.py' '^import\s+conftest|^from\s+conftest\s+import\s+pytest' ./ ; [ $$? -eq 1 ] 28 | 29 | clean: clean-cache clean-python 30 | rm -rf venv 31 | # remember to deactivate your active virtual env 32 | 33 | clean-cache: check_venv 34 | @# do as little work as possible to clear the cache, and guarantee success 35 | frost test --cache-clear --continue-on-collection-errors \ 36 | --collect-only -m "no_such_marker" \ 37 | --noconftest --tb=no --disable-warnings --quiet \ 38 | || true 39 | 40 | clean-python: 41 | find . -type d -name venv -prune -o -type d -name __pycache__ -print0 | xargs -0 rm -rf 42 | 43 | doc-build: check_venv 44 | type sphinx-build || { echo "please run `make install-docs` to build docs"; false; } 45 | @# we regen the api docs every time -- they are not checked in. 46 | rm -rf docs/source 47 | sphinx-apidoc --no-toc -o docs/source . 48 | @# TODO: Add new service modules below also in docs/Source.rst 49 | for module in frost aws gcp gsuite; do \ 50 | sphinx-apidoc -f -o docs/source/$$module $$module ; \ 51 | done 52 | make -C docs clean html 53 | 54 | doc-preview: check_venv 55 | @#sphinx-autobuild "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 56 | sphinx-autobuild $(AUTOBUILD_OPTS) "docs/" "docs/_build/html/" $(SPHINXOPTS) $(O) 57 | 58 | doctest: check_venv 59 | frost test -vv --doctest-modules --doctest-glob='*.py' -s --offline --debug-calls $(shell find . -type f -name '*.py' | grep -v venv | grep -v .pyenv | grep -v setup.py) \ 60 | --doctest-modules -s --offline --debug-calls 61 | 62 | coverage: check_venv 63 | frost test --cov-config .coveragerc --cov=. \ 64 | --aws-profiles example-account \ 65 | -o python_files=meta_test*.py \ 66 | -o cache_dir=./example_cache/ \ 67 | --offline \ 68 | $(shell find . -type f -name '*.py' | grep -v venv | grep -v .pyenv | grep -v setup.py) 69 | coverage report -m 70 | coverage html 71 | 72 | flake8: check_venv 73 | flake8 --max-line-length 120 $(shell git ls-files | grep \.py$$) 74 | 75 | black: check_venv 76 | pre-commit run black --all-files 77 | 78 | install: venv 79 | ( . venv/bin/activate && pip install -U pip && pip install -r requirements.txt && python setup.py develop && pre-commit install ) 80 | 81 | install-docs: venv 82 | ( . venv/bin/activate && pip install -r docs/requirements.txt ) 83 | 84 | setup_gsuite: check_venv 85 | python -m bin.auth.setup_gsuite 86 | 87 | metatest: 88 | frost test --aws-profiles example-account \ 89 | -o python_files=meta_test*.py \ 90 | -o cache_dir=./example_cache/ 91 | 92 | venv: 93 | python3 -m venv venv 94 | 95 | build-image: 96 | docker build -t localhost/frost:latest . 97 | 98 | .PHONY: \ 99 | all \ 100 | awsci \ 101 | black \ 102 | build-image \ 103 | check_conftest_imports \ 104 | check_venv \ 105 | clean \ 106 | clean-cache \ 107 | clean-python \ 108 | coverage \ 109 | doc-build \ 110 | doc-preview \ 111 | doctest \ 112 | flake8 \ 113 | install \ 114 | install-docs \ 115 | metatest \ 116 | setup_gsuite \ 117 | venv 118 | -------------------------------------------------------------------------------- /aws/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/__init__.py -------------------------------------------------------------------------------- /aws/autoscaling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/autoscaling/__init__.py -------------------------------------------------------------------------------- /aws/autoscaling/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def autoscaling_launch_configurations(): 5 | """ 6 | http://botocore.readthedocs.io/en/latest/reference/services/autoscaling.html#AutoScaling.Client.describe_launch_configurations 7 | """ 8 | return ( 9 | botocore_client.get("autoscaling", "describe_launch_configurations", [], {}) 10 | .extract_key("LaunchConfigurations") 11 | .flatten() 12 | .values() 13 | ) 14 | -------------------------------------------------------------------------------- /aws/cloudtrail/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/cloudtrail/__init__.py -------------------------------------------------------------------------------- /aws/cloudtrail/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def cloudtrails(): 5 | "https://botocore.readthedocs.io/en/latest/reference/services/cloudtrail.html#CloudTrail.Client.describe_trails" 6 | trails = ( 7 | botocore_client.get("cloudtrail", "describe_trails", [], {}) 8 | .extract_key("trailList") 9 | .flatten() 10 | .values() 11 | ) 12 | 13 | # This is due to the fact that if you have a multi region cloudtrail, it will be included for each region. 14 | unique_trails = [] 15 | for trail in trails: 16 | if not any(t for t in unique_trails if t["TrailARN"] == trail["TrailARN"]): 17 | unique_trails.append(trail) 18 | 19 | return unique_trails 20 | -------------------------------------------------------------------------------- /aws/cloudtrail/test_cloudtrail_enabled_in_all_regions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from conftest import botocore_client 4 | 5 | from aws.cloudtrail.resources import cloudtrails 6 | 7 | 8 | @pytest.fixture 9 | def all_cloudtrails(): 10 | return cloudtrails() 11 | 12 | 13 | @pytest.mark.cloudtrail 14 | @pytest.mark.parametrize("aws_region", botocore_client.get_regions()) 15 | def test_cloudtrail_enabled_in_all_regions(aws_region, all_cloudtrails): 16 | """ 17 | Tests that all regions have an associated cloudtrail or that there is a cloudtrail for all regions. 18 | """ 19 | assert any( 20 | trail 21 | for trail in all_cloudtrails 22 | if trail["HomeRegion"] == aws_region or trail["IsMultiRegionTrail"] 23 | ) 24 | -------------------------------------------------------------------------------- /aws/cloudtrail/test_cloudtrail_log_validation_enabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.cloudtrail.resources import cloudtrails 6 | 7 | 8 | @pytest.mark.cloudtrail 9 | @pytest.mark.parametrize( 10 | "cloudtrail", cloudtrails(), ids=lambda trail: get_param_id(trail, "Name"), 11 | ) 12 | def test_cloudtrail_log_validation_enabled(cloudtrail): 13 | """ 14 | Tests that all Cloudtrails have log validation enabled. 15 | """ 16 | assert cloudtrail["LogFileValidationEnabled"] 17 | -------------------------------------------------------------------------------- /aws/conftest.py: -------------------------------------------------------------------------------- 1 | from _pytest.config import Config 2 | 3 | 4 | def pytest_configure(config: Config) -> None: 5 | # register custom marks for aws services 6 | for svc_name in [ 7 | "aws", 8 | "cloudtrail", 9 | "ec2", 10 | "elasticsearch", 11 | "elb", 12 | "iam", 13 | "rds", 14 | "redshift", 15 | "s3", 16 | "sns", 17 | ]: 18 | config.addinivalue_line( 19 | "markers", "{}: mark tests against {}".format(svc_name, svc_name) 20 | ) 21 | -------------------------------------------------------------------------------- /aws/ec2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/ec2/__init__.py -------------------------------------------------------------------------------- /aws/ec2/resources.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | 3 | from conftest import botocore_client 4 | 5 | from aws.autoscaling.resources import autoscaling_launch_configurations 6 | from aws.elasticache.resources import elasticache_clusters 7 | from aws.elb.resources import elbs, elbs_v2 8 | from aws.rds.resources import rds_db_instances 9 | from aws.redshift.resources import redshift_clusters 10 | from aws.elasticsearch.resources import elasticsearch_domains 11 | 12 | 13 | def ec2_instances(): 14 | "http://botocore.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_instances" 15 | # Note: extracting Reservations.Instances drops EC2-Classic Groups at Reservations.Groups 16 | return ( 17 | botocore_client.get( 18 | "ec2", 19 | "describe_instances", 20 | [], 21 | { 22 | "Filters": [ 23 | {"Name": "instance-state-name", "Values": ["pending", "running"]} 24 | ] 25 | }, 26 | ) 27 | .extract_key("Reservations") 28 | .flatten() 29 | .extract_key("Instances") 30 | .flatten() 31 | .values() 32 | ) 33 | 34 | 35 | def ec2_security_groups(): 36 | "http://botocore.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_security_groups" 37 | return ( 38 | botocore_client.get("ec2", "describe_security_groups", [], {}) 39 | .extract_key("SecurityGroups") 40 | .flatten() 41 | .values() 42 | ) 43 | 44 | 45 | def ec2_ebs_volumes(): 46 | "http://botocore.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_volumes" 47 | return ( 48 | botocore_client.get("ec2", "describe_volumes", [], {}) 49 | .extract_key("Volumes") 50 | .flatten() 51 | .values() 52 | ) 53 | 54 | 55 | def ec2_ebs_snapshots(): 56 | "http://botocore.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_snapshots" 57 | return ( 58 | botocore_client.get("ec2", "describe_snapshots", [], {"OwnerIds": ["self"]}) 59 | .extract_key("Snapshots") 60 | .flatten() 61 | .values() 62 | ) 63 | 64 | 65 | def ec2_ebs_snapshots_create_permission(): 66 | "https://botocore.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_snapshot_attribute" 67 | return sum( 68 | [ 69 | botocore_client.get( 70 | service_name="ec2", 71 | method_name="describe_snapshot_attribute", 72 | call_args=[], 73 | call_kwargs={ 74 | "Attribute": "createVolumePermission", 75 | "SnapshotId": snapshot["SnapshotId"], 76 | }, 77 | regions=[snapshot["__pytest_meta"]["region"]], 78 | ).values() 79 | for snapshot in ec2_ebs_snapshots() 80 | ], 81 | [], 82 | ) 83 | 84 | 85 | def ec2_flow_logs(): 86 | "https://botocore.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_flow_logs" 87 | return ( 88 | botocore_client.get("ec2", "describe_flow_logs", [], {}) 89 | .extract_key("FlowLogs") 90 | .flatten() 91 | .values() 92 | ) 93 | 94 | 95 | def ec2_vpcs(): 96 | "https://botocore.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_vpcs" 97 | return ( 98 | botocore_client.get("ec2", "describe_vpcs", [], {}) 99 | .extract_key("Vpcs") 100 | .flatten() 101 | .values() 102 | ) 103 | 104 | 105 | def ec2_addresses(): 106 | "https://botocore.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_addresses" 107 | return ( 108 | botocore_client.get("ec2", "describe_addresses", [], {}) 109 | .extract_key("Addresses") 110 | .flatten() 111 | .values() 112 | ) 113 | 114 | 115 | def ec2_security_groups_with_in_use_flag(): 116 | """Returns security groups with an additional "InUse" key, 117 | which is True if it is associated with at least one resource. 118 | 119 | Possible resources: 120 | - EC2 121 | - ELBs (v1 and v2) 122 | - RDS 123 | - Redshift 124 | - ElasticCache 125 | - ElasticSearchService 126 | - AutoScaling 127 | """ 128 | sec_groups = ec2_security_groups() 129 | in_use_sec_group_ids = defaultdict(int) 130 | 131 | # These resources have their security groups under 'SecurityGroups'. 132 | # Most of these are a list of dictionaries which include either SecurityGroupId 133 | # or GroupId, but some have just a list of group ids. 134 | resources = sum( 135 | [ 136 | ec2_instances(), 137 | elbs(), 138 | elbs_v2(), 139 | elasticache_clusters(), 140 | autoscaling_launch_configurations(), 141 | ], 142 | [], 143 | ) 144 | for resource in resources: 145 | for attached_sec_group in resource.get("SecurityGroups", []): 146 | if isinstance(attached_sec_group, dict): 147 | for key in ["SecurityGroupId", "GroupId"]: 148 | if key in attached_sec_group: 149 | in_use_sec_group_ids[attached_sec_group[key]] += 1 150 | elif isinstance(attached_sec_group, str): 151 | in_use_sec_group_ids[attached_sec_group] += 1 152 | else: 153 | raise Exception( 154 | "Got security group value with a type of %s" 155 | % type(attached_sec_group) 156 | ) 157 | 158 | # These resources have two types of security groups, therefore 159 | # the Vpc ones are namespaced under "VpcSecurityGroups" 160 | vpc_namespaced_resources = sum([rds_db_instances(), redshift_clusters()], []) 161 | for resource in vpc_namespaced_resources: 162 | for attached_sec_group in resource.get("VpcSecurityGroups", []): 163 | in_use_sec_group_ids[attached_sec_group["VpcSecurityGroupId"]] += 1 164 | 165 | # ElasticSearchService does it a little differently 166 | for domain in elasticsearch_domains(): 167 | if "VPCOptions" in domain: 168 | for attached_sec_group in domain["VPCOptions"]["SecurityGroupIds"]: 169 | in_use_sec_group_ids[attached_sec_group] += 1 170 | 171 | for sec_group in sec_groups: 172 | if sec_group["GroupId"] in in_use_sec_group_ids.keys(): 173 | sec_group["InUse"] = True 174 | else: 175 | sec_group["InUse"] = False 176 | 177 | return sec_groups 178 | 179 | 180 | def ec2_images_owned_by(account_ids): 181 | "Returns a list of EC2 images owned by a list of provided account ids" 182 | return ( 183 | botocore_client.get( 184 | "ec2", 185 | "describe_images", 186 | [], 187 | {"Filters": [{"Name": "owner-id", "Values": account_ids}]}, 188 | ) 189 | .extract_key("Images") 190 | .flatten() 191 | .values() 192 | ) 193 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_all_eips_bound.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.ec2.helpers import ec2_address_id 4 | from aws.ec2.resources import ec2_addresses 5 | 6 | 7 | @pytest.mark.ec2 8 | @pytest.mark.parametrize("ec2_address", ec2_addresses(), ids=ec2_address_id) 9 | def test_ec2_all_eips_bound(ec2_address): 10 | """Checks whether all EIPs are bound to instances.""" 11 | assert ec2_address.get("InstanceId"), "No associated instance." 12 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_ebs_snapshot_not_too_old.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.ec2.resources import ec2_ebs_snapshots 6 | from aws.ec2.helpers import ebs_snapshot_not_too_old 7 | 8 | 9 | @pytest.mark.ec2 10 | @pytest.mark.parametrize( 11 | "ec2_ebs_snapshot", 12 | ec2_ebs_snapshots(), 13 | ids=lambda ebs: get_param_id(ebs, "SnapshotId"), 14 | ) 15 | def test_ec2_ebs_snapshot_not_too_old(ec2_ebs_snapshot): 16 | assert ebs_snapshot_not_too_old( 17 | ec2_ebs_snapshot 18 | ), f"{ec2_ebs_snapshot['SnapshotId']} is started at {ec2_ebs_snapshot['StartTime']}, and is considered too old." 19 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_ebs_snapshots_are_private.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.ec2.resources import ec2_ebs_snapshots_create_permission 6 | from aws.ec2.helpers import is_ebs_snapshot_public 7 | 8 | 9 | @pytest.mark.ec2 10 | @pytest.mark.parametrize( 11 | "ec2_ebs_snapshot", 12 | ec2_ebs_snapshots_create_permission(), 13 | ids=lambda ebs: get_param_id(ebs, "SnapshotId"), 14 | ) 15 | def test_ec2_ebs_snapshot_are_private(ec2_ebs_snapshot): 16 | assert not is_ebs_snapshot_public( 17 | ec2_ebs_snapshot 18 | ), "Snapshot {} is publicly accessible.".format(ec2_ebs_snapshot["SnapshotId"]) 19 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_ebs_volume_attached_to_instance.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.ec2.resources import ec2_ebs_volumes 6 | from aws.ec2.helpers import ebs_volume_attached_to_instance 7 | 8 | 9 | @pytest.mark.ec2 10 | @pytest.mark.parametrize( 11 | "ec2_ebs_volume", ec2_ebs_volumes(), ids=lambda ebs: get_param_id(ebs, "VolumeId") 12 | ) 13 | def test_ec2_ebs_volume_attached_to_instance(ec2_ebs_volume): 14 | """Normally cloudops would like to have all EBS volumes attached to 15 | instances once they are created, for the purpose of cost saving, thus we 16 | have this check. There is a default 90 day grace period though for the ebs 17 | volumes, e.g. a volume may not be attached to any instance for 90 days. 18 | """ 19 | assert ebs_volume_attached_to_instance( 20 | ec2_ebs_volume 21 | ), f"{ec2_ebs_volume['VolumeId']} is created at {ec2_ebs_volume['CreateTime']}, and is not attached to an instance." 22 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_ebs_volume_encrypted.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.ec2.resources import ec2_ebs_volumes 6 | from aws.ec2.helpers import is_ebs_volume_encrypted 7 | 8 | 9 | @pytest.mark.ec2 10 | @pytest.mark.parametrize( 11 | "ec2_ebs_volume", ec2_ebs_volumes(), ids=lambda ebs: get_param_id(ebs, "VolumeId"), 12 | ) 13 | def test_ec2_ebs_volume_encrypted(ec2_ebs_volume): 14 | assert is_ebs_volume_encrypted(ec2_ebs_volume) 15 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_ebs_volume_not_piops.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.ec2.resources import ec2_ebs_volumes 6 | from aws.ec2.helpers import is_ebs_volume_piops 7 | 8 | 9 | @pytest.mark.ec2 10 | @pytest.mark.parametrize( 11 | "ec2_ebs_volume", ec2_ebs_volumes(), ids=lambda ebs: get_param_id(ebs, "VolumeId") 12 | ) 13 | def test_ec2_ebs_volume_not_piops(ec2_ebs_volume): 14 | assert not is_ebs_volume_piops(ec2_ebs_volume) 15 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_instance_has_required_tags.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.ec2.helpers import ec2_instance_test_id, ec2_instance_missing_tag_names 4 | from aws.ec2.resources import ec2_instances 5 | 6 | 7 | @pytest.fixture 8 | def required_tag_names(pytestconfig): 9 | return frozenset(pytestconfig.custom_config.aws.required_tags) 10 | 11 | 12 | @pytest.mark.ec2 13 | @pytest.mark.parametrize("ec2_instance", ec2_instances(), ids=ec2_instance_test_id) 14 | def test_ec2_instance_has_required_tags(ec2_instance, required_tag_names): 15 | """ 16 | Checks that all EC2 instances have the tags with the required names. 17 | 18 | Does not check tag values. 19 | """ 20 | if len(required_tag_names) == 0: 21 | pytest.skip("No required tag names were provided") 22 | missing_tag_names = ec2_instance_missing_tag_names(ec2_instance, required_tag_names) 23 | assert ( 24 | not missing_tag_names 25 | ), "EC2 Instance {0[InstanceId]} missing required tags {1!r}".format( 26 | ec2_instance, missing_tag_names 27 | ) 28 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_instance_on_acceptable_ami.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.ec2.helpers import ec2_instance_test_id 4 | from aws.ec2.resources import ec2_instances, ec2_images_owned_by 5 | 6 | from datetime import datetime, timedelta, timezone 7 | 8 | 9 | @pytest.fixture 10 | def owned_amis(pytestconfig): 11 | return ec2_images_owned_by(pytestconfig.custom_config.aws.owned_ami_account_ids) 12 | 13 | 14 | @pytest.fixture 15 | def max_ami_age(pytestconfig): 16 | return pytestconfig.custom_config.aws.max_ami_age_in_days 17 | 18 | 19 | @pytest.mark.ec2 20 | @pytest.mark.parametrize("ec2_instance", ec2_instances(), ids=ec2_instance_test_id) 21 | def test_ec2_instance_on_acceptable_ami(ec2_instance, owned_amis, max_ami_age): 22 | """ 23 | Checks that all EC2 instances are running on acceptable AMIs, meaning 24 | an AMI that is not older than X days and is owned by us. 25 | Default is 180 days. 26 | """ 27 | for tag in ec2_instance["Tags"]: 28 | if tag["Key"] == "Name": 29 | instanceName = tag["Value"] 30 | 31 | minAge = datetime.now(timezone.utc) - timedelta(days=max_ami_age) 32 | foundAmi = False 33 | for ami in owned_amis: 34 | if ami["ImageId"] == ec2_instance["ImageId"]: 35 | assert ( 36 | ami["CreationDate"] > minAge 37 | ), "Instance {} {} is running on an AMI created on {} that's older than 180 days".format( 38 | instanceName, ec2_instance["InstanceId"], ami["CreationDate"] 39 | ) 40 | foundAmi = True 41 | break 42 | 43 | if not foundAmi: 44 | assert False, "Instance {} {} uses AMI {} not owned by us".format( 45 | instanceName, ec2_instance["InstanceId"], ec2_instance["ImageId"] 46 | ) 47 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_security_group_in_use.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.ec2.helpers import ec2_security_group_test_id 4 | from aws.ec2.resources import ec2_security_groups_with_in_use_flag 5 | 6 | 7 | @pytest.mark.ec2 8 | @pytest.mark.rationale( 9 | """ 10 | Having unused security groups adds cruft in an AWS account, increases the 11 | likelihood of a mistake, and makes security testing harder. 12 | """ 13 | ) 14 | @pytest.mark.parametrize( 15 | "ec2_security_group", 16 | ec2_security_groups_with_in_use_flag(), 17 | ids=ec2_security_group_test_id, 18 | ) 19 | def test_ec2_security_group_in_use(ec2_security_group): 20 | """Checks to make sure that the security group 21 | is currently attached to at least one EC2 instance 22 | """ 23 | assert ec2_security_group[ 24 | "InUse" 25 | ], "Security group is not currently attached to any instance." 26 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_security_group_opens_all_ports.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.ec2.helpers import ( 4 | ec2_security_group_test_id, 5 | ec2_security_group_opens_all_ports, 6 | ) 7 | from aws.ec2.resources import ec2_security_groups_with_in_use_flag 8 | 9 | 10 | @pytest.mark.ec2 11 | @pytest.mark.parametrize( 12 | "ec2_security_group", 13 | ec2_security_groups_with_in_use_flag(), 14 | ids=ec2_security_group_test_id, 15 | ) 16 | def test_ec2_security_group_opens_all_ports(ec2_security_group): 17 | """Checks whether an EC2 security group includes a permission 18 | allowing inbound access on all ports. 19 | """ 20 | if ec2_security_group["InUse"]: 21 | assert not ec2_security_group_opens_all_ports(ec2_security_group) 22 | else: 23 | pytest.skip("Security group not in use") 24 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_security_group_opens_all_ports_to_all.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.ec2.helpers import ( 4 | ec2_security_group_test_id, 5 | ec2_security_group_opens_all_ports_to_all, 6 | ) 7 | from aws.ec2.resources import ec2_security_groups_with_in_use_flag 8 | 9 | 10 | @pytest.mark.ec2 11 | @pytest.mark.parametrize( 12 | "ec2_security_group", 13 | ec2_security_groups_with_in_use_flag(), 14 | ids=ec2_security_group_test_id, 15 | ) 16 | def test_ec2_security_group_opens_all_ports_to_all(ec2_security_group): 17 | """Checks whether an EC2 security group includes a permission 18 | allowing inbound access to all IPs on all ports.""" 19 | if ec2_security_group["InUse"]: 20 | assert not ec2_security_group_opens_all_ports_to_all(ec2_security_group) 21 | else: 22 | pytest.skip("Security group not in use") 23 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_security_group_opens_all_ports_to_self.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.ec2.helpers import ( 4 | ec2_security_group_test_id, 5 | ec2_security_group_opens_all_ports_to_self, 6 | ) 7 | from aws.ec2.resources import ec2_security_groups_with_in_use_flag 8 | 9 | 10 | @pytest.mark.ec2 11 | @pytest.mark.parametrize( 12 | "ec2_security_group", 13 | ec2_security_groups_with_in_use_flag(), 14 | ids=ec2_security_group_test_id, 15 | ) 16 | def test_ec2_security_group_opens_all_ports_to_self(ec2_security_group): 17 | """Checks whether an EC2 security group includes a permission 18 | allowing unrestricted inbound access within the security group.""" 19 | if ec2_security_group["InUse"]: 20 | assert not ec2_security_group_opens_all_ports_to_self(ec2_security_group) 21 | else: 22 | pytest.skip("Security group not in use") 23 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_security_group_opens_specific_ports_to_all.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.ec2.helpers import ( 4 | ec2_security_group_test_id, 5 | ec2_security_group_opens_specific_ports_to_all, 6 | ) 7 | from aws.ec2.resources import ec2_security_groups_with_in_use_flag 8 | 9 | 10 | @pytest.mark.ec2 11 | @pytest.mark.parametrize( 12 | "ec2_security_group", 13 | ec2_security_groups_with_in_use_flag(), 14 | ids=ec2_security_group_test_id, 15 | ) 16 | def test_ec2_security_group_opens_specific_ports_to_all(ec2_security_group, aws_config): 17 | """Checks whether an EC2 security group includes a permission allowing 18 | inbound access on specific ports. Excluded ports are 80 and 443. 19 | """ 20 | if ec2_security_group["InUse"]: 21 | allowed_ports = aws_config.get_allowed_ports( 22 | ec2_security_group_test_id(ec2_security_group) 23 | ) 24 | assert not ec2_security_group_opens_specific_ports_to_all( 25 | ec2_security_group, allowed_ports 26 | ) 27 | else: 28 | pytest.skip("Security group not in use") 29 | -------------------------------------------------------------------------------- /aws/ec2/test_ec2_vpc_flow_log_enabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.ec2.resources import ec2_flow_logs, ec2_vpcs 6 | 7 | 8 | @pytest.fixture 9 | def all_flow_logs(): 10 | return ec2_flow_logs() 11 | 12 | 13 | @pytest.mark.ec2 14 | @pytest.mark.parametrize( 15 | "ec2_vpc", ec2_vpcs(), ids=lambda vpc: get_param_id(vpc, "VpcId"), 16 | ) 17 | def test_ec2_vpc_flow_log_enabled(ec2_vpc, all_flow_logs): 18 | """ 19 | Checks that each VPC has VPC Flow Logs enabled. 20 | """ 21 | assert any( 22 | flow_log 23 | for flow_log in all_flow_logs 24 | if flow_log["ResourceId"] == ec2_vpc["VpcId"] 25 | ) 26 | -------------------------------------------------------------------------------- /aws/elasticache/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/elasticache/__init__.py -------------------------------------------------------------------------------- /aws/elasticache/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def elasticache_clusters(): 5 | """ 6 | http://botocore.readthedocs.io/en/latest/reference/services/elasticache.html#ElastiCache.Client.describe_cache_clusters 7 | """ 8 | return ( 9 | botocore_client.get("elasticache", "describe_cache_clusters", [], {}) 10 | .extract_key("CacheClusters") 11 | .flatten() 12 | .values() 13 | ) 14 | -------------------------------------------------------------------------------- /aws/elasticsearch/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/elasticsearch/__init__.py -------------------------------------------------------------------------------- /aws/elasticsearch/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def elasticsearch_domains(): 5 | """ 6 | http://botocore.readthedocs.io/en/latest/reference/services/es.html#ElasticsearchService.Client.describe_elasticsearch_domains 7 | """ 8 | # You can only get 5 at a time. 9 | domains_list = list_elasticsearch_domains() 10 | domains = [] 11 | for i in range(0, len(domains_list), 5): 12 | domains += ( 13 | botocore_client.get( 14 | "es", 15 | "describe_elasticsearch_domains", 16 | [], 17 | {"DomainNames": domains_list[i : i + 5]}, 18 | ) 19 | .extract_key("DomainStatusList") 20 | .flatten() 21 | .values() 22 | ) 23 | return domains 24 | 25 | 26 | def list_elasticsearch_domains(): 27 | "http://botocore.readthedocs.io/en/latest/reference/services/es.html#ElasticsearchService.Client.list_domain_names" 28 | return [ 29 | domain["DomainName"] 30 | for domain in botocore_client.get("es", "list_domain_names", [], {}) 31 | .extract_key("DomainNames") 32 | .flatten() 33 | .values() 34 | ] 35 | -------------------------------------------------------------------------------- /aws/elasticsearch/test_elasticsearch_domains_have_logging_enabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.elasticsearch.resources import elasticsearch_domains 6 | 7 | 8 | @pytest.mark.elasticsearch 9 | @pytest.mark.parametrize( 10 | "es_domain", 11 | elasticsearch_domains(), 12 | ids=lambda es_domain: get_param_id(es_domain, "ARN"), 13 | ) 14 | def test_elasticsearch_domains_have_logging_enabled(es_domain): 15 | """ 16 | Tests whether an elasticsearch domain has logging enabled. 17 | """ 18 | assert ( 19 | es_domain.get("LogPublishingOption") is not None 20 | ), "Logging is disabled for {}".format(es_domain["DomainName"]) 21 | assert es_domain["LogPublishingOptions"]["INDEX_SLOW_LOGS"][ 22 | "Enabled" 23 | ], "Index Slow Logs are disabled for {}".format(es_domain["DomainName"]) 24 | assert es_domain["LogPublishingOptions"]["SEARCH_SLOW_LOGS"][ 25 | "Enabled" 26 | ], "Search Slow Logs are disabled for {}".format(es_domain["DomainName"]) 27 | -------------------------------------------------------------------------------- /aws/elb/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def elbs(with_tags=True): 5 | """ 6 | http://botocore.readthedocs.io/en/latest/reference/services/elb.html#ElasticLoadBalancing.Client.describe_load_balancers 7 | """ 8 | elbs = ( 9 | botocore_client.get("elb", "describe_load_balancers", [], {}) 10 | .extract_key("LoadBalancerDescriptions") 11 | .flatten() 12 | .values() 13 | ) 14 | 15 | if not with_tags: 16 | return elbs 17 | 18 | elbs_with_tags = [] 19 | for elb in elbs: 20 | tags = ( 21 | botocore_client.get( 22 | service_name="elb", 23 | method_name="describe_tags", 24 | call_args=[], 25 | call_kwargs={"LoadBalancerNames": [elb["LoadBalancerName"]]}, 26 | regions=[elb["__pytest_meta"]["region"]], 27 | ) 28 | .extract_key("TagDescriptions") 29 | .flatten() 30 | .values() 31 | ) 32 | # This check is probably unneeded 33 | if len(tags) >= 1: 34 | tags = tags[0] 35 | if "Tags" in tags: 36 | elb["Tags"] = tags["Tags"] 37 | elbs_with_tags.append(elb) 38 | 39 | return elbs_with_tags 40 | 41 | 42 | def elbs_v2(): 43 | """ 44 | http://botocore.readthedocs.io/en/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers 45 | """ 46 | return ( 47 | botocore_client.get("elbv2", "describe_load_balancers", [], {}) 48 | .extract_key("LoadBalancers") 49 | .flatten() 50 | .values() 51 | ) 52 | 53 | 54 | def elb_attributes(elb): 55 | """ 56 | https://botocore.amazonaws.com/v1/documentation/api/latest/reference/services/elb.html#ElasticLoadBalancing.Client.describe_load_balancer_attributes 57 | """ 58 | return ( 59 | botocore_client.get( 60 | "elb", 61 | "describe_load_balancer_attributes", 62 | [], 63 | call_kwargs={"LoadBalancerName": elb["LoadBalancerName"]}, 64 | regions=[elb["__pytest_meta"]["region"]], 65 | ) 66 | .extract_key("LoadBalancerAttributes") 67 | .debug() 68 | .values() 69 | )[0] 70 | 71 | 72 | def elbs_with_attributes(): 73 | return [(elb, elb_attributes(elb),) for elb in elbs(with_tags=False)] 74 | -------------------------------------------------------------------------------- /aws/elb/test_elb_desync_mode.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timezone 2 | 3 | from _pytest.compat import NOTSET 4 | import pytest 5 | 6 | from helpers import get_param_id 7 | 8 | from aws.elb.resources import elbs_with_attributes 9 | 10 | 11 | @pytest.mark.elb 12 | @pytest.mark.parametrize( 13 | "elb_with_attrs", 14 | elbs_with_attributes(), 15 | ids=lambda e: get_param_id(e[0], "LoadBalancerName") if e != NOTSET else None, 16 | ) 17 | def test_elb_instance_desync_mode(elb_with_attrs): 18 | """ 19 | Checks ELB HTTP desync mode: 20 | 21 | * is not 'monitor' mode 22 | 23 | >>> test_elb_instance_desync_mode(( 24 | ... {"LoadBalancerName": "old-doctest-elb", "CreatedTime": datetime(2020, 9, 1).astimezone(tz=timezone.utc), }, 25 | ... {"AdditionalAttributes": [{"Key": "elb.http.desyncmitigationmode", "Value": "monitor"}]}, 26 | ... )) 27 | Traceback (most recent call last): 28 | ... 29 | AssertionError: ELB old-doctest-elb using desync monitor mode 30 | assert 'monitor' != 'monitor' 31 | >>> test_elb_instance_desync_mode(( 32 | ... {"LoadBalancerName": "new-doctest-elb", "CreatedTime": datetime(2020, 10, 1).astimezone(tz=timezone.utc), }, 33 | ... {"AdditionalAttributes": [{"Key": "elb.http.desyncmitigationmode", "Value": "monitor"}]}, 34 | ... )) 35 | Traceback (most recent call last): 36 | ... 37 | AssertionError: ELB new-doctest-elb using desync monitor mode 38 | assert 'monitor' != 'monitor' 39 | 40 | * is 'defensive' or 'strictest' for ELBs created <= 2020-09-01 41 | 42 | >>> test_elb_instance_desync_mode(( 43 | ... {"LoadBalancerName": "old-doctest-elb", "CreatedTime": datetime(2020, 9, 1).astimezone(tz=timezone.utc), }, 44 | ... {"AdditionalAttributes": [{"Key": "elb.http.desyncmitigationmode", "Value": "strictest"}]}, 45 | ... )) 46 | >>> test_elb_instance_desync_mode(( 47 | ... {"LoadBalancerName": "old-doctest-elb", "CreatedTime": datetime(2020, 9, 1).astimezone(tz=timezone.utc), }, 48 | ... {"AdditionalAttributes": [{"Key": "elb.http.desyncmitigationmode", "Value": "defensive"}]}, 49 | ... )) 50 | 51 | * is 'strictest' for ELBs created > 2020-09-01 52 | 53 | >>> test_elb_instance_desync_mode(( 54 | ... {"LoadBalancerName": "new-doctest-elb", "CreatedTime": datetime(2020, 10, 1).astimezone(tz=timezone.utc), }, 55 | ... {"AdditionalAttributes": [{"Key": "elb.http.desyncmitigationmode", "Value": "strictest"}]}, 56 | ... )) 57 | >>> test_elb_instance_desync_mode(( 58 | ... {"LoadBalancerName": "new-doctest-elb", "CreatedTime": datetime(2020, 10, 1).astimezone(tz=timezone.utc), }, 59 | ... {"AdditionalAttributes": [{"Key": "elb.http.desyncmitigationmode", "Value": "defensive"}]}, 60 | ... )) 61 | Traceback (most recent call last): 62 | ... 63 | AssertionError: ELB new-doctest-elb (created 2020-10-01) using desync mode defensive instead of {'strictest'} 64 | assert 'defensive' in {'strictest'} 65 | 66 | """ 67 | elb, attrs = elb_with_attrs 68 | elb_name = elb["LoadBalancerName"] 69 | 70 | for attr in attrs["AdditionalAttributes"]: 71 | if attr["Key"] == "elb.http.desyncmitigationmode": 72 | desync_mode = attr["Value"] 73 | break 74 | 75 | created_time = elb["CreatedTime"] 76 | assert desync_mode != "monitor", "ELB {} using desync monitor mode".format(elb_name) 77 | 78 | if created_time <= datetime(2020, 9, 1).astimezone(tz=timezone.utc): 79 | acceptable_modes = {"defensive", "strictest"} 80 | else: 81 | acceptable_modes = {"strictest"} 82 | 83 | assert ( 84 | desync_mode in acceptable_modes 85 | ), "ELB {} (created {}) using desync mode {} instead of {}".format( 86 | elb_name, created_time.date(), desync_mode, acceptable_modes 87 | ) 88 | -------------------------------------------------------------------------------- /aws/elb/test_elb_instances_attached.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.elb.resources import elbs 6 | 7 | 8 | @pytest.mark.elb 9 | @pytest.mark.parametrize( 10 | "elb", elbs(), ids=lambda e: get_param_id(e, "LoadBalancerName"), 11 | ) 12 | def test_elb_instances_attached(elb): 13 | """ 14 | Checks to see that an ELB has attached instances and fails if 15 | there are 0 16 | """ 17 | assert len(elb["Instances"]) > 0, "ELB has zero attached instances" 18 | -------------------------------------------------------------------------------- /aws/iam/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/iam/__init__.py -------------------------------------------------------------------------------- /aws/iam/helpers.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from dateutil.parser import parse 3 | 4 | from helpers import get_param_id 5 | 6 | 7 | def user_is_inactive(iam_user, no_activity_since, created_after): 8 | """ 9 | Returns False if any of these are true: 10 | - The user was created after the passed in "created_after" datetime. 11 | - The user has used either potentially active access keys since the date 12 | that is "no_activity_since" 13 | - The user has logged into the AWS console since the date that is 14 | "no_activity_since" 15 | else it will return True. 16 | 17 | >>> from datetime import datetime 18 | >>> no_activity_since = datetime(2017, 1, 1) 19 | >>> created_after = datetime(2018, 1, 8) 20 | 21 | User considered active due to being created after the created_after datetime. 22 | >>> user_is_inactive({'user_creation_time': '2018-01-10'}, created_after, no_activity_since) 23 | False 24 | 25 | User considered active due to usage of access key 1 after no_activity_since 26 | >>> user_is_inactive({ 27 | ... 'user_creation_time': '2016-01-10', 28 | ... 'access_key_1_active': 'true', 29 | ... 'access_key_1_last_used_date': '2017-06-01', 30 | ... }, no_activity_since, created_after) 31 | False 32 | 33 | User considered active due to usage of access key 2 after no_activity_since 34 | >>> user_is_inactive({ 35 | ... 'user_creation_time': '2010-01-10', 36 | ... 'access_key_1_active': 'true', 37 | ... 'access_key_1_last_used_date': '2014-06-01', 38 | ... 'access_key_2_active': 'true', 39 | ... 'access_key_2_last_used_date': '2017-02-01', 40 | ... }, no_activity_since, created_after) 41 | False 42 | 43 | User considered active due to usage of password after no_activity_since 44 | >>> user_is_inactive({ 45 | ... 'user_creation_time': '2010-01-10', 46 | ... 'access_key_1_active': 'true', 47 | ... 'access_key_1_last_used_date': '2014-06-01', 48 | ... 'access_key_2_active': 'false', 49 | ... 'access_key_2_last_used_date': 'N/A', 50 | ... 'password_enabled': 'true', 51 | ... 'password_last_used': '2017-09-01', 52 | ... }, no_activity_since, created_after) 53 | False 54 | 55 | User considered inactive due to the only usage (access key 1) being before no_activity_since 56 | and user being created before created_after 57 | >>> user_is_inactive({ 58 | ... 'user_creation_time': '2016-01-10', 59 | ... 'access_key_1_active': 'true', 60 | ... 'access_key_1_last_used_date': '2016-06-01', 61 | ... 'access_key_2_active': 'false', 62 | ... 'access_key_2_last_used_date': 'N/A', 63 | ... 'password_enabled': 'false', 64 | ... 'password_last_used': 'N/A', 65 | ... }, no_activity_since, created_after) 66 | True 67 | 68 | User considered inactive due to the only usage (password) being before no_activity_since 69 | and user being created before created_after 70 | >>> user_is_inactive({ 71 | ... 'user_creation_time': '2016-01-10', 72 | ... 'access_key_1_active': 'false', 73 | ... 'access_key_1_last_used_date': 'N/A', 74 | ... 'access_key_2_active': 'false', 75 | ... 'access_key_2_last_used_date': 'N/A', 76 | ... 'password_enabled': 'true', 77 | ... 'password_last_used': '2016-06-01', 78 | ... }, no_activity_since, created_after) 79 | True 80 | """ 81 | 82 | if parse(iam_user["user_creation_time"]) > created_after: 83 | return False 84 | 85 | if ( 86 | is_credential_active( 87 | iam_user["access_key_1_active"], iam_user["access_key_1_last_used_date"] 88 | ) 89 | and parse(iam_user["access_key_1_last_used_date"]) > no_activity_since 90 | ): 91 | return False 92 | 93 | if ( 94 | is_credential_active( 95 | iam_user["access_key_2_active"], iam_user["access_key_2_last_used_date"] 96 | ) 97 | and parse(iam_user["access_key_2_last_used_date"]) > no_activity_since 98 | ): 99 | return False 100 | 101 | if ( 102 | is_credential_active( 103 | iam_user["password_enabled"], iam_user["password_last_used"] 104 | ) 105 | and parse(iam_user["password_last_used"]) > no_activity_since 106 | ): 107 | return False 108 | 109 | return True 110 | 111 | 112 | def is_credential_active(credential_active, credential_last_used): 113 | return credential_active == "true" and credential_last_used not in [ 114 | "N/A", 115 | "no_information", 116 | ] 117 | 118 | 119 | def is_access_key_expired(iam_access_key, access_key_expiration_date): 120 | """ 121 | Compares the CreateDate of the access key with the datetime object passed 122 | in as `access_key_expiration_date` and returns True if the CreateDate is 123 | before the `access_key_expiration_date` datetime object. 124 | 125 | Returns False if the Status of the key is not `Active`, as though it may 126 | have expired, it cannot be used. 127 | 128 | >>> from datetime import datetime 129 | >>> access_key_expiration_date = datetime(2018, 1, 8) 130 | 131 | >>> is_access_key_expired({'Status': 'Inactive'}, access_key_expiration_date) 132 | False 133 | >>> is_access_key_expired({'Status': 'Active', 'CreateDate': datetime(2018, 1, 9)}, access_key_expiration_date) 134 | False 135 | >>> is_access_key_expired({'Status': 'Active', 'CreateDate': datetime(2020, 1, 9)}, access_key_expiration_date) 136 | False 137 | 138 | >>> is_access_key_expired({'Status': 'Active', 'CreateDate': datetime(2018, 1, 7)}, access_key_expiration_date) 139 | True 140 | >>> is_access_key_expired({'Status': 'Active', 'CreateDate': datetime(2000, 1, 9)}, access_key_expiration_date) 141 | True 142 | """ 143 | 144 | if iam_access_key["Status"] != "Active": 145 | return False 146 | 147 | assert isinstance(iam_access_key["CreateDate"], datetime) 148 | return access_key_expiration_date > iam_access_key["CreateDate"] 149 | 150 | 151 | def get_iam_user_name(login): 152 | return get_param_id(login, "UserName") 153 | 154 | 155 | def get_iam_resource_id(resource): 156 | if isinstance(resource, dict) and "UserName" in resource: 157 | return get_iam_user_name(resource) 158 | if isinstance(resource, list): 159 | if len(resource) == 0: 160 | return "empty" 161 | return None 162 | -------------------------------------------------------------------------------- /aws/iam/resources.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import time 3 | 4 | import pytest 5 | 6 | from conftest import botocore_client, custom_config_global 7 | 8 | 9 | def iam_users(): 10 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_users" 11 | return ( 12 | botocore_client.get("iam", "list_users", [], {}) 13 | .extract_key("Users") 14 | .flatten() 15 | .values() 16 | ) 17 | 18 | 19 | def iam_admin_users(): 20 | return [ 21 | user for user in iam_users_with_policies_and_groups() if user_is_admin(user) 22 | ] 23 | 24 | 25 | def iam_inline_policies(username): 26 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_user_policies" 27 | return ( 28 | botocore_client.get("iam", "list_user_policies", [], {"UserName": username}) 29 | .extract_key("PolicyNames") 30 | .flatten() 31 | .values() 32 | ) 33 | 34 | 35 | def iam_managed_policies(username): 36 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_attached_user_policies" 37 | return ( 38 | botocore_client.get( 39 | "iam", "list_attached_user_policies", [], {"UserName": username} 40 | ) 41 | .extract_key("AttachedPolicies") 42 | .flatten() 43 | .values() 44 | ) 45 | 46 | 47 | def iam_user_groups(username): 48 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_groups_for_user" 49 | return ( 50 | botocore_client.get("iam", "list_groups_for_user", [], {"UserName": username}) 51 | .extract_key("Groups") 52 | .flatten() 53 | .values() 54 | ) 55 | 56 | 57 | def iam_user_group_inline_policies(username): 58 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_group_policies" 59 | return [ 60 | botocore_client.get( 61 | "iam", "list_group_policies", [], {"GroupName": group["GroupName"]} 62 | ) 63 | .extract_key("PolicyNames") 64 | .flatten() 65 | .values() 66 | for group in iam_user_groups(username) 67 | ] 68 | 69 | 70 | def iam_user_group_managed_policies(username): 71 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_attached_group_policies" 72 | return [ 73 | botocore_client.get( 74 | "iam", "list_attached_group_policies", [], {"GroupName": group["GroupName"]} 75 | ) 76 | .extract_key("AttachedPolicies") 77 | .flatten() 78 | .values() 79 | for group in iam_user_groups(username) 80 | ] 81 | 82 | 83 | def iam_all_user_policies(username): 84 | """ 85 | Gets all policies that can be attached to a user. This includes: 86 | - Inline policies on the user 87 | - Managed policies on the user 88 | - Inline policies on the group that the user is in 89 | - Managed policies on the group that the user is in 90 | 91 | Inline policy API calls just return the name of the policy, so we create a single key dictionary to 92 | allow for standard access to the policy name ({'PolicyName': policy_name}) 93 | """ 94 | inline = [] 95 | inline_policies = [ 96 | iam_inline_policies(username=username) 97 | + iam_user_group_inline_policies(username=username) 98 | ] 99 | for policies in inline_policies: 100 | for policy_name in policies: 101 | if isinstance(policy_name, str): 102 | inline += {"PolicyName": policy_name} 103 | 104 | managed = [ 105 | policy 106 | for policies in iam_managed_policies(username=username) 107 | + iam_user_group_managed_policies(username=username) 108 | for policy in policies 109 | ] 110 | 111 | return inline + managed 112 | 113 | 114 | def iam_users_with_policies(): 115 | return [ 116 | {**{"Policies": iam_all_user_policies(username=user["UserName"])}, **user} 117 | for user in iam_users() 118 | ] 119 | 120 | 121 | def iam_users_with_policies_and_groups(): 122 | """Users with their associated Policies and Groups""" 123 | return [ 124 | {**{"Groups": iam_user_groups(username=user["UserName"])}, **user} 125 | for user in iam_users_with_policies() 126 | ] 127 | 128 | 129 | def iam_admin_login_profiles(): 130 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.get_login_profile" 131 | return iam_login_profiles(iam_admin_users()) 132 | 133 | 134 | def iam_admin_mfa_devices(): 135 | "https://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_mfa_devices" 136 | return iam_mfa_devices(iam_admin_users()) 137 | 138 | 139 | def iam_user_login_profiles(): 140 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.get_login_profile" 141 | return iam_login_profiles(iam_users()) 142 | 143 | 144 | def iam_user_mfa_devices(): 145 | "https://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_mfa_devices" 146 | return iam_mfa_devices(iam_users()) 147 | 148 | 149 | def iam_login_profiles(users): 150 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.get_login_profile" 151 | return [ 152 | botocore_client.get( 153 | "iam", 154 | "get_login_profile", 155 | [], 156 | {"UserName": user["UserName"]}, 157 | result_from_error=lambda error, call: {"LoginProfile": None}, 158 | ) 159 | .extract_key("LoginProfile") 160 | .values()[0] 161 | for user in users 162 | ] 163 | 164 | 165 | def iam_mfa_devices(users): 166 | "https://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_mfa_devices" 167 | return [ 168 | botocore_client.get( 169 | "iam", "list_mfa_devices", [], {"UserName": user["UserName"]} 170 | ) 171 | .extract_key("MFADevices") 172 | .values()[0] 173 | for user in users 174 | ] 175 | 176 | 177 | def iam_roles(): 178 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_roles" 179 | return ( 180 | botocore_client.get("iam", "list_roles", [], {}) 181 | .extract_key("Roles") 182 | .flatten() 183 | .values() 184 | ) 185 | 186 | 187 | def iam_all_role_policies(rolename): 188 | return [ 189 | {"PolicyName": policy_name} 190 | for policy_name in iam_role_inline_policies(rolename=rolename) 191 | ] + iam_role_managed_policies(rolename=rolename) 192 | 193 | 194 | def iam_roles_with_policies(): 195 | return [ 196 | {**{"Policies": iam_all_role_policies(rolename=role["RoleName"])}, **role} 197 | for role in iam_roles() 198 | ] 199 | 200 | 201 | def iam_role_inline_policies(rolename): 202 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_role_policies" 203 | return ( 204 | botocore_client.get("iam", "list_role_policies", [], {"RoleName": rolename}) 205 | .extract_key("PolicyNames") 206 | .flatten() 207 | .values() 208 | ) 209 | 210 | 211 | def iam_role_managed_policies(rolename): 212 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_attached_role_policies" 213 | return ( 214 | botocore_client.get( 215 | "iam", "list_attached_role_policies", [], {"RoleName": rolename} 216 | ) 217 | .extract_key("AttachedPolicies") 218 | .flatten() 219 | .values() 220 | ) 221 | 222 | 223 | def iam_admin_roles(): 224 | return [role for role in iam_roles_with_policies() if user_is_admin(role)] 225 | 226 | 227 | def iam_access_keys_for_user(username): 228 | "https://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.list_access_keys" 229 | return ( 230 | botocore_client.get("iam", "list_access_keys", [], {"UserName": username}) 231 | .extract_key("AccessKeyMetadata") 232 | .flatten() 233 | .values() 234 | ) 235 | 236 | 237 | def iam_get_all_access_keys(): 238 | return sum( 239 | [iam_access_keys_for_user(username=user["UserName"]) for user in iam_users()], 240 | [], 241 | ) 242 | 243 | 244 | def iam_generate_credential_report(): 245 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.generate_credential_report" 246 | results = botocore_client.get( 247 | "iam", "generate_credential_report", [], {}, do_not_cache=True 248 | ).results 249 | if len(results): 250 | return results[0].get("State") 251 | return "" 252 | 253 | 254 | def iam_get_credential_report(): 255 | "http://botocore.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.get_credential_report" 256 | # the story of the wack api 257 | while True: 258 | cred_report_state = iam_generate_credential_report() 259 | if cred_report_state not in ["STARTED", "INPROGRESS"]: 260 | break 261 | time.sleep(2) 262 | 263 | # We want this to blow up if it can't get the "Content" 264 | results = botocore_client.get( 265 | "iam", "get_credential_report", [], {}, do_not_cache=True 266 | ).results 267 | if not len(results): 268 | return [] 269 | content = results[0]["Content"] 270 | decoded_content = content.decode("utf-8") 271 | return list(csv.DictReader(decoded_content.split("\n"))) 272 | 273 | 274 | # (ajvb) I'm not a big fan of this, but it seems to be the easiest way to 275 | # only have to call `get_credential_report` once since it is not easily cacheable. 276 | def iam_admin_users_with_credential_report(): 277 | """Returns all "admin" users with an additional "CredentialReport" key, 278 | which is a dict containing their row in the Credentials Report. 279 | """ 280 | admins = iam_admin_users() 281 | credential_report = iam_get_credential_report() 282 | 283 | for admin in admins: 284 | for user in credential_report: 285 | if admin["UserName"] == user["user"]: 286 | admin["CredentialReport"] = user 287 | break 288 | 289 | return admins 290 | 291 | 292 | def user_is_admin(user): 293 | for policy in user["Policies"]: 294 | if isinstance(policy, dict): 295 | if policy.get("PolicyName", "") in custom_config_global.aws.admin_policies: 296 | return True 297 | 298 | for group in user.get("Groups", []): 299 | if isinstance(group, dict): 300 | if group.get("GroupName", "") in custom_config_global.aws.admin_groups: 301 | return True 302 | 303 | return False 304 | 305 | 306 | def get_all_users_that_can_access_aws_account(): 307 | """ 308 | Returns users with console or API access to an AWS account. 309 | """ 310 | profile_usernames = [ 311 | profile["UserName"] 312 | for profile in iam_user_login_profiles() 313 | if profile is not None 314 | ] 315 | access_key_usernames = [ 316 | akey["UserName"] 317 | for akey in iam_get_all_access_keys() 318 | if akey["Status"] == "Active" 319 | ] 320 | return set(profile_usernames + access_key_usernames) 321 | -------------------------------------------------------------------------------- /aws/iam/test_iam_access_key_is_old.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.iam.resources import iam_get_all_access_keys 4 | from aws.iam.helpers import get_iam_user_name, is_access_key_expired 5 | 6 | 7 | @pytest.fixture 8 | def access_key_expiration_date(pytestconfig): 9 | return pytestconfig.custom_config.aws.get_access_key_expiration_date() 10 | 11 | 12 | @pytest.mark.iam 13 | @pytest.mark.parametrize( 14 | "iam_access_key", iam_get_all_access_keys(), ids=get_iam_user_name, 15 | ) 16 | def test_iam_access_key_is_old(iam_access_key, access_key_expiration_date): 17 | assert not is_access_key_expired(iam_access_key, access_key_expiration_date) 18 | -------------------------------------------------------------------------------- /aws/iam/test_iam_admin_user_with_access_keys.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.iam.helpers import get_iam_user_name 4 | from aws.iam.resources import iam_admin_users_with_credential_report 5 | 6 | 7 | @pytest.mark.iam 8 | @pytest.mark.parametrize( 9 | "iam_admin_user", iam_admin_users_with_credential_report(), ids=get_iam_user_name, 10 | ) 11 | def test_iam_admin_user_with_access_key(iam_admin_user): 12 | """Test that all "admin" users do not have access keys 13 | associated to their user. 14 | 15 | Note: Due to the naive mechanism for determing what an "admin" is, this test 16 | can easily have both false positives and (more likely) false negatives. 17 | """ 18 | assert ( 19 | iam_admin_user["CredentialReport"]["access_key_1_active"] != "true" 20 | ), "Access key found for admin user: {}".format(iam_admin_user["UserName"]) 21 | assert ( 22 | iam_admin_user["CredentialReport"]["access_key_2_active"] != "true" 23 | ), "Access key found for admin user: {}".format(iam_admin_user["UserName"]) 24 | -------------------------------------------------------------------------------- /aws/iam/test_iam_admin_user_without_mfa.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.iam.helpers import get_iam_resource_id 4 | from aws.iam.resources import iam_admin_login_profiles, iam_admin_mfa_devices 5 | 6 | 7 | @pytest.mark.iam 8 | @pytest.mark.parametrize( 9 | ["iam_login_profile", "iam_user_mfa_devices"], 10 | zip(iam_admin_login_profiles(), iam_admin_mfa_devices()), 11 | ids=get_iam_resource_id, 12 | ) 13 | def test_iam_admin_user_without_mfa(iam_login_profile, iam_user_mfa_devices): 14 | """Test that all "admin" users with console access also have an MFA device. 15 | 16 | Note: Due to the naive mechanism for determing what an "admin" is, this test 17 | can easily have both false positives and (more likely) false negatives. 18 | """ 19 | if bool(iam_login_profile): 20 | assert len(iam_user_mfa_devices) > 0, "No MFA Device found for {}".format( 21 | iam_login_profile["UserName"] 22 | ) 23 | -------------------------------------------------------------------------------- /aws/iam/test_iam_cross_account_admin_roles_require_mfa.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.iam.resources import iam_admin_roles 6 | 7 | 8 | @pytest.mark.iam 9 | @pytest.mark.parametrize( 10 | "iam_admin_role", 11 | iam_admin_roles(), 12 | ids=lambda role: get_param_id(role, "RoleName"), 13 | ) 14 | def test_iam_cross_account_admin_roles_require_mfa(iam_admin_role): 15 | """Test that all IAM Roles that include admin policies and have cross account 16 | trust relationships require MFA. 17 | 18 | Note: Due to the naive mechanism for determing what an "admin" is, this test 19 | can easily have both false positives and (more likely) false negatives. 20 | """ 21 | for statement in iam_admin_role["AssumeRolePolicyDocument"]["Statement"]: 22 | if statement["Action"].startswith("sts") and "AWS" in statement["Principal"]: 23 | assert "Condition" in statement 24 | assert "aws:MultiFactorAuthPresent" in statement["Condition"]["Bool"] 25 | assert ( 26 | statement["Condition"]["Bool"]["aws:MultiFactorAuthPresent"] == "true" 27 | ) 28 | -------------------------------------------------------------------------------- /aws/iam/test_iam_user_is_inactive.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from aws.iam.resources import iam_get_credential_report 6 | from aws.iam.helpers import user_is_inactive 7 | 8 | 9 | @pytest.fixture 10 | def no_activity_since(pytestconfig): 11 | return pytestconfig.custom_config.aws.no_activity_since() 12 | 13 | 14 | @pytest.fixture 15 | def created_after(pytestconfig): 16 | return pytestconfig.custom_config.aws.created_after() 17 | 18 | 19 | @pytest.mark.iam 20 | @pytest.mark.parametrize( 21 | "iam_user_row", 22 | iam_get_credential_report(), 23 | ids=lambda user: get_param_id(user, "user"), 24 | ) 25 | def test_iam_user_is_inactive(iam_user_row, no_activity_since, created_after): 26 | """Tests if a user is inactive. This is done by checking the last time 27 | an access key was used or the user logged into the console. If the config 28 | settings are not present, it defaults to considering a year ago as inactive 29 | and giving a user a 1 week grace period (created_after). 30 | """ 31 | assert not user_is_inactive( 32 | iam_user_row, no_activity_since, created_after 33 | ), "User {} hasn't been used since {} and is out of the grace period.".format( 34 | iam_user_row["user"], no_activity_since.date() 35 | ) 36 | -------------------------------------------------------------------------------- /aws/iam/test_iam_user_without_mfa.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.iam.helpers import get_iam_resource_id 4 | from aws.iam.resources import iam_user_login_profiles, iam_user_mfa_devices 5 | 6 | 7 | @pytest.mark.iam 8 | @pytest.mark.parametrize( 9 | ["iam_login_profile", "iam_user_mfa_devices"], 10 | zip(iam_user_login_profiles(), iam_user_mfa_devices()), 11 | ids=get_iam_resource_id, 12 | ) 13 | def test_iam_user_without_mfa(iam_login_profile, iam_user_mfa_devices): 14 | """Test that all users with console access also have an MFA device.""" 15 | if bool(iam_login_profile): 16 | assert len(iam_user_mfa_devices) > 0, "No MFA Device found for {}".format( 17 | iam_login_profile["UserName"] 18 | ) 19 | -------------------------------------------------------------------------------- /aws/rds/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/rds/__init__.py -------------------------------------------------------------------------------- /aws/rds/helpers.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from helpers import get_param_id 3 | 4 | 5 | def is_rds_db_snapshot_attr_public_access(rds_db_snapshot_attribute): 6 | """ 7 | Checks whether a RDS snapshot attribute is: 8 | 9 | { 10 | "AttributeName": "restore", 11 | "AttributeValues": ["random_aws_account_id", "any"] 12 | } 13 | 14 | >>> is_rds_db_snapshot_attr_public_access({"AttributeName": "restore", "AttributeValues": ["any"]}) 15 | True 16 | >>> is_rds_db_snapshot_attr_public_access({"AttributeName": "restore", "AttributeValues": ["aws_account_id"]}) 17 | False 18 | >>> is_rds_db_snapshot_attr_public_access({"AttributeName": "restore", "AttributeValues": []}) 19 | False 20 | >>> is_rds_db_snapshot_attr_public_access({"AttributeName": "blorg", "AttributeValues": ["any"]}) 21 | False 22 | >>> is_rds_db_snapshot_attr_public_access([]) 23 | Traceback (most recent call last): 24 | ... 25 | TypeError: list indices must be integers or slices, not str 26 | >>> is_rds_db_snapshot_attr_public_access(0) 27 | Traceback (most recent call last): 28 | ... 29 | TypeError: 'int' object is not subscriptable 30 | >>> is_rds_db_snapshot_attr_public_access(None) 31 | Traceback (most recent call last): 32 | ... 33 | TypeError: 'NoneType' object is not subscriptable 34 | """ 35 | return ( 36 | rds_db_snapshot_attribute["AttributeName"] == "restore" 37 | and "any" in rds_db_snapshot_attribute["AttributeValues"] 38 | ) 39 | 40 | 41 | def does_rds_db_security_group_grant_public_access(sg): 42 | """ 43 | Checks an RDS instance for a DB security group with CIDRIP 0.0.0.0/0 44 | 45 | >>> does_rds_db_security_group_grant_public_access( 46 | ... {"IPRanges": [{"CIDRIP": "127.0.0.1/32", "Status": "authorized"}, 47 | ... {"CIDRIP": "0.0.0.0/0", "Status": "authorized"}]}) 48 | True 49 | >>> does_rds_db_security_group_grant_public_access({"IPRanges": []}) 50 | False 51 | """ 52 | return any( 53 | ipr["CIDRIP"] == "0.0.0.0/0" and ipr["Status"] == "authorized" 54 | for ipr in sg["IPRanges"] 55 | ) 56 | 57 | 58 | def does_vpc_security_group_grant_public_access(sg): 59 | """ 60 | Checks an RDS instance for a VPC security groups with ingress permission ipv4 range 0.0.0.0/0 or ipv6 range :::/0 61 | 62 | >>> does_vpc_security_group_grant_public_access( 63 | ... {'IpPermissions': [{'Ipv6Ranges': [], 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}]}) 64 | True 65 | >>> does_vpc_security_group_grant_public_access( 66 | ... {'IpPermissions': [{'Ipv6Ranges': [], 'IpRanges': []}]}) 67 | False 68 | >>> does_vpc_security_group_grant_public_access( 69 | ... {'IpPermissions': [{'Ipv6Ranges': [], 'IpRanges': [{'CidrIp': '192.168.1.0/0'}]}]}) 70 | False 71 | """ 72 | public_ipv4 = any( 73 | ipr["CidrIp"] == "0.0.0.0/0" 74 | for ipp in sg["IpPermissions"] 75 | for ipr in ipp["IpRanges"] 76 | ) 77 | public_ipv6 = any( 78 | ipr["CidrIpv6"] == "::/0" 79 | for ipp in sg["IpPermissions"] 80 | for ipr in ipp["Ipv6Ranges"] 81 | ) 82 | return public_ipv4 or public_ipv6 83 | 84 | 85 | def is_rds_db_instance_encrypted(rds_db_instance): 86 | """ 87 | Checks the RDS instance 'StorageEncrypted' value. 88 | 89 | >>> is_rds_db_instance_encrypted({'StorageEncrypted': True}) 90 | True 91 | >>> is_rds_db_instance_encrypted({'StorageEncrypted': False}) 92 | False 93 | >>> is_rds_db_instance_encrypted({}) 94 | Traceback (most recent call last): 95 | ... 96 | KeyError: 'StorageEncrypted' 97 | >>> is_rds_db_instance_encrypted(0) 98 | Traceback (most recent call last): 99 | ... 100 | TypeError: 'int' object is not subscriptable 101 | >>> is_rds_db_instance_encrypted(None) 102 | Traceback (most recent call last): 103 | ... 104 | TypeError: 'NoneType' object is not subscriptable 105 | """ 106 | return bool(rds_db_instance["StorageEncrypted"]) 107 | 108 | 109 | def is_rds_db_snapshot_encrypted(rds_db_snapshot): 110 | """ 111 | Checks the RDS snapshot 'Encrypted' value. 112 | 113 | >>> is_rds_db_snapshot_encrypted({'Encrypted': True}) 114 | True 115 | >>> is_rds_db_snapshot_encrypted({'Encrypted': False}) 116 | False 117 | >>> is_rds_db_snapshot_encrypted({}) 118 | Traceback (most recent call last): 119 | ... 120 | KeyError: 'Encrypted' 121 | >>> is_rds_db_snapshot_encrypted(0) 122 | Traceback (most recent call last): 123 | ... 124 | TypeError: 'int' object is not subscriptable 125 | >>> is_rds_db_snapshot_encrypted(None) 126 | Traceback (most recent call last): 127 | ... 128 | TypeError: 'NoneType' object is not subscriptable 129 | """ 130 | return bool(rds_db_snapshot["Encrypted"]) 131 | 132 | 133 | def get_db_instance_id(db_instance): 134 | return get_param_id(db_instance, "DBInstanceIdentifier") 135 | 136 | 137 | def get_db_snapshot_arn(snapshot): 138 | return get_param_id(snapshot, "DBSnapshotArn") 139 | 140 | 141 | def get_db_security_group_arn(sg): 142 | return get_param_id(sg, "DBSecurityGroupArn") 143 | 144 | 145 | def get_rds_resource_id(resource): 146 | if isinstance(resource, dict) and "DBInstanceIdentifier" in resource: 147 | return get_db_instance_id(resource) 148 | if isinstance(resource, dict) and "DBSnapshotArn" in resource: 149 | return get_db_snapshot_arn(resource) 150 | if isinstance(resource, dict) and "DBSecurityGroupArn" in resource: 151 | return get_db_security_group_arn(resource) 152 | if isinstance(resource, dict) and "AttributeName" in resource: 153 | return get_param_id(resource, "AttributeName") 154 | 155 | if isinstance(resource, list): 156 | if len(resource) == 0: 157 | return "empty" 158 | return get_rds_resource_id(resource[0]) 159 | 160 | return None 161 | 162 | 163 | def rds_db_snapshot_not_too_old(snapshot, snapshot_created_days_ago=365): 164 | """ 165 | Check a rds snapshot is created "snapshot_created_days_ago". 166 | 167 | >>> from datetime import datetime 168 | >>> from datetime import timezone 169 | 170 | >>> rds_db_snapshot_not_too_old({"SnapshotCreateTime": datetime.now(timezone.utc)}) 171 | True 172 | >>> rds_db_snapshot_not_too_old({"SnapshotCreateTime": datetime.fromisoformat("2019-09-11T19:45:22.116+00:00")}) 173 | False 174 | """ 175 | create_time = snapshot["SnapshotCreateTime"] 176 | now = datetime.now(tz=create_time.tzinfo) 177 | 178 | if (now - create_time).days < snapshot_created_days_ago: 179 | return True 180 | else: 181 | return False 182 | -------------------------------------------------------------------------------- /aws/rds/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def rds_db_instances(): 5 | "http://botocore.readthedocs.io/en/latest/reference/services/rds.html#RDS.Client.describe_db_instances" 6 | return ( 7 | botocore_client.get("rds", "describe_db_instances", [], {}) 8 | .extract_key("DBInstances") 9 | .flatten() 10 | .values() 11 | ) 12 | 13 | 14 | def rds_db_instance_tags(db): 15 | "http://botocore.readthedocs.io/en/latest/reference/services/rds.html#RDS.Client.list_tags_for_resource" 16 | return ( 17 | botocore_client.get( 18 | service_name="rds", 19 | method_name="list_tags_for_resource", 20 | call_args=[], 21 | call_kwargs={"ResourceName": db["DBInstanceArn"]}, 22 | profiles=[db["__pytest_meta"]["profile"]], 23 | regions=[db["__pytest_meta"]["region"]], 24 | result_from_error=lambda e, call: [], 25 | ) 26 | .extract_key("TagList") 27 | .flatten() 28 | .values() 29 | ) 30 | 31 | 32 | def rds_db_instances_with_tags(): 33 | return [ 34 | {**{"TagList": rds_db_instance_tags(db=db)}, **db} for db in rds_db_instances() 35 | ] 36 | 37 | 38 | def rds_db_instances_vpc_security_groups(): 39 | "http://botocore.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_security_groups" 40 | return [ 41 | botocore_client.get( 42 | service_name="ec2", 43 | method_name="describe_security_groups", 44 | call_args=[], 45 | call_kwargs={ 46 | "Filters": [ 47 | { 48 | "Name": "group-id", 49 | "Values": [ 50 | sg["VpcSecurityGroupId"] 51 | for sg in instance["VpcSecurityGroups"] 52 | if sg["Status"] == "active" 53 | ], 54 | } 55 | ] 56 | }, 57 | profiles=[instance["__pytest_meta"]["profile"]], 58 | regions=[instance["__pytest_meta"]["region"]], 59 | result_from_error=lambda e, call: {"SecurityGroups": []}, 60 | ) # treat not found as empty list 61 | .extract_key("SecurityGroups") 62 | .flatten() 63 | .values() 64 | for instance in rds_db_instances() 65 | ] 66 | 67 | 68 | def rds_db_snapshots(): 69 | "http://botocore.readthedocs.io/en/latest/reference/services/rds.html#RDS.Client.describe_db_snapshots" 70 | return ( 71 | botocore_client.get("rds", "describe_db_snapshots", [], {}) 72 | .extract_key("DBSnapshots") 73 | .flatten() 74 | .values() 75 | ) 76 | 77 | 78 | def rds_db_snapshot_attributes(): 79 | "http://botocore.readthedocs.io/en/latest/reference/services/rds.html#RDS.Client.describe_db_snapshot_attributes" 80 | empty_attrs = {"DBSnapshotAttributesResult": {"DBSnapshotAttributes": []}} 81 | return [ 82 | botocore_client.get( 83 | service_name="rds", 84 | method_name="describe_db_snapshot_attributes", 85 | call_args=[], 86 | call_kwargs={"DBSnapshotIdentifier": snapshot["DBSnapshotIdentifier"]}, 87 | profiles=[snapshot["__pytest_meta"]["profile"]], 88 | regions=[snapshot["__pytest_meta"]["region"]], 89 | result_from_error=lambda e, call: empty_attrs, # treat not found as empty list 90 | ) 91 | .extract_key("DBSnapshotAttributesResult") 92 | .extract_key("DBSnapshotAttributes") 93 | .values()[0] 94 | for snapshot in rds_db_snapshots() 95 | ] 96 | 97 | 98 | def rds_db_security_groups(): 99 | "http://botocore.readthedocs.io/en/latest/reference/services/rds.html#RDS.Client.describe_db_security_groups" 100 | sgs = [] 101 | 102 | for response in botocore_client.get( 103 | "rds", "describe_db_security_groups", [], {} 104 | ).values(): 105 | sgs.extend(response["DBSecurityGroups"]) 106 | 107 | return sgs 108 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_instance_backup_enabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.helpers import get_db_instance_id 4 | from aws.rds.resources import rds_db_instances_with_tags 5 | 6 | 7 | @pytest.mark.rds 8 | @pytest.mark.parametrize( 9 | "rds_db_instance", rds_db_instances_with_tags(), ids=get_db_instance_id, 10 | ) 11 | def test_rds_db_instance_backup_enabled(rds_db_instance): 12 | assert ( 13 | rds_db_instance["BackupRetentionPeriod"] > 0 14 | ), "Backups disabled for {}".format(rds_db_instance["DBInstanceIdentifier"]) 15 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_instance_encrypted.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.resources import rds_db_instances_with_tags 4 | from aws.rds.helpers import get_db_instance_id, is_rds_db_instance_encrypted 5 | 6 | 7 | @pytest.mark.rds 8 | @pytest.mark.parametrize( 9 | "rds_db_instance", rds_db_instances_with_tags(), ids=get_db_instance_id, 10 | ) 11 | def test_rds_db_instance_encrypted(rds_db_instance): 12 | assert is_rds_db_instance_encrypted(rds_db_instance) 13 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_instance_is_multiaz.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.helpers import get_db_instance_id 4 | from aws.rds.resources import rds_db_instances_with_tags 5 | 6 | 7 | @pytest.mark.rds 8 | @pytest.mark.parametrize( 9 | "rds_db_instance", rds_db_instances_with_tags(), ids=get_db_instance_id, 10 | ) 11 | def test_rds_db_instance_is_multiaz(rds_db_instance): 12 | assert ( 13 | rds_db_instance["MultiAZ"] is False 14 | ), "{} is in a single availability zone".format( 15 | rds_db_instance["DBInstanceIdentifier"] 16 | ) 17 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_instance_is_postgres_with_invalid_certificate.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from botocore.utils import parse_timestamp 3 | 4 | import pytest 5 | 6 | from aws.rds.helpers import get_db_instance_id 7 | from aws.rds.resources import rds_db_instances_with_tags 8 | 9 | 10 | @pytest.mark.rds 11 | @pytest.mark.parametrize( 12 | "rds_db_instance", rds_db_instances_with_tags(), ids=get_db_instance_id, 13 | ) 14 | def test_rds_db_instance_is_postgres_with_invalid_certificate(rds_db_instance): 15 | if rds_db_instance["Engine"] != "postgres": 16 | pytest.skip("RDS DB instance engine is not Postgres.") 17 | 18 | if rds_db_instance["DBInstanceStatus"] == "creating": 19 | pytest.skip('RDS DB instance status is still "creating".') 20 | 21 | ict = rds_db_instance["InstanceCreateTime"] 22 | if isinstance(ict, str): 23 | ict = parse_timestamp(ict) 24 | 25 | assert ict > datetime.datetime( 26 | 2014, 8, 5, tzinfo=datetime.timezone(datetime.timedelta(), "utc") 27 | ) 28 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_instance_minor_version_updates_enabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.helpers import get_db_instance_id 4 | from aws.rds.resources import rds_db_instances_with_tags 5 | 6 | 7 | @pytest.mark.rds 8 | @pytest.mark.parametrize( 9 | "rds_db_instance", rds_db_instances_with_tags(), ids=get_db_instance_id, 10 | ) 11 | def test_rds_db_instance_minor_version_updates_enabled(rds_db_instance): 12 | """ 13 | Enable automatic minor version updates (e.g. 5.6.26 to 5.6.27) 14 | during maintenance windows to receive security patches. 15 | 16 | Only checked for maria, mysql, and postgres dbs. 17 | 18 | http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Upgrading.html 19 | """ 20 | if rds_db_instance["Engine"] not in ["mariadb", "mysql", "postgres"]: 21 | pytest.skip( 22 | "Engine type %s does not support minor version updates." 23 | % rds_db_instance["Engine"] 24 | ) 25 | 26 | assert rds_db_instance[ 27 | "AutoMinorVersionUpgrade" 28 | ], "Minor version automatic upgrades disabled for {}".format( 29 | rds_db_instance["DBInstanceIdentifier"] 30 | ) 31 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_instance_not_publicly_accessible_by_vpc_sg.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.resources import ( 4 | rds_db_instances_with_tags, 5 | rds_db_instances_vpc_security_groups, 6 | ) 7 | from aws.rds.helpers import ( 8 | does_vpc_security_group_grant_public_access, 9 | get_db_instance_id, 10 | ) 11 | 12 | 13 | @pytest.mark.rds 14 | @pytest.mark.parametrize( 15 | ["rds_db_instance", "ec2_security_groups"], 16 | zip(rds_db_instances_with_tags(), rds_db_instances_vpc_security_groups()), 17 | ids=lambda db: get_db_instance_id(db) 18 | if isinstance(db, dict) and "DBInstanceIdentifier" in db 19 | else "secgroups", 20 | ) 21 | def test_rds_db_instance_not_publicly_accessible_by_vpc_security_group( 22 | rds_db_instance, ec2_security_groups 23 | ): 24 | """ 25 | Checks whether any VPC/EC2 security groups that are attached to an RDS instance 26 | allow for access from the public internet. 27 | """ 28 | if not ec2_security_groups: 29 | assert not rds_db_instance["VpcSecurityGroups"] 30 | else: 31 | assert set(sg["GroupId"] for sg in ec2_security_groups) == set( 32 | sg["VpcSecurityGroupId"] for sg in rds_db_instance["VpcSecurityGroups"] 33 | ) 34 | 35 | assert not any( 36 | does_vpc_security_group_grant_public_access(sg) 37 | for sg in ec2_security_groups 38 | ) 39 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_instance_storage_type_not_piops.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.helpers import get_db_instance_id 4 | from aws.rds.resources import rds_db_instances_with_tags 5 | 6 | 7 | @pytest.mark.rds 8 | @pytest.mark.parametrize( 9 | "rds_db_instance", rds_db_instances_with_tags(), ids=get_db_instance_id, 10 | ) 11 | def test_rds_db_instance_storage_type_not_piops(rds_db_instance): 12 | """PIOPs storage type is expensive. Cloudops recommends using gp2 type with 13 | a volume size that offers the same IOPs as the desired PIOPs type. 14 | """ 15 | assert not rds_db_instance["StorageType"].startswith( 16 | "io" 17 | ), f"{rds_db_instance['DBInstanceIdentifier']} uses PIOPs storage type with IOPs of {rds_db_instance['Iops']}" 18 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_security_group_does_not_grant_public_access.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.resources import rds_db_security_groups 4 | from aws.rds.helpers import ( 5 | does_rds_db_security_group_grant_public_access, 6 | get_db_security_group_arn, 7 | ) 8 | 9 | 10 | @pytest.mark.rds 11 | @pytest.mark.parametrize( 12 | "rds_db_security_group", rds_db_security_groups(), ids=get_db_security_group_arn, 13 | ) 14 | def test_rds_db_security_group_does_not_grant_public_access(rds_db_security_group): 15 | """ 16 | Checks whether any RDS security group allows for inbound 17 | access from the public internet 18 | """ 19 | assert not does_rds_db_security_group_grant_public_access(rds_db_security_group) 20 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_snapshot_encrypted.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.resources import rds_db_snapshots 4 | from aws.rds.helpers import is_rds_db_snapshot_encrypted, get_db_snapshot_arn 5 | 6 | 7 | @pytest.mark.rds 8 | @pytest.mark.parametrize( 9 | "rds_db_snapshot", rds_db_snapshots(), ids=get_db_snapshot_arn, 10 | ) 11 | def test_rds_db_snapshot_encrypted(rds_db_snapshot): 12 | assert is_rds_db_snapshot_encrypted(rds_db_snapshot) 13 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_snapshot_not_publicly_accessible.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.resources import rds_db_snapshots, rds_db_snapshot_attributes 4 | from aws.rds.helpers import is_rds_db_snapshot_attr_public_access, get_rds_resource_id 5 | 6 | 7 | @pytest.mark.rds 8 | @pytest.mark.parametrize( 9 | ["rds_db_snapshot", "rds_db_snapshot_attributes"], 10 | zip(rds_db_snapshots(), rds_db_snapshot_attributes()), 11 | ids=get_rds_resource_id, 12 | ) 13 | def test_rds_db_snapshot_not_publicly_accessible( 14 | rds_db_snapshot, rds_db_snapshot_attributes 15 | ): 16 | for attr in rds_db_snapshot_attributes: 17 | assert not is_rds_db_snapshot_attr_public_access(attr) 18 | -------------------------------------------------------------------------------- /aws/rds/test_rds_db_snapshot_not_too_old.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.rds.resources import rds_db_snapshots 4 | from aws.rds.helpers import get_db_snapshot_arn, rds_db_snapshot_not_too_old 5 | 6 | 7 | @pytest.mark.rds 8 | @pytest.mark.parametrize( 9 | "rds_db_snapshot", rds_db_snapshots(), ids=get_db_snapshot_arn, 10 | ) 11 | def test_rds_db_snapshot_not_too_old(rds_db_snapshot): 12 | assert rds_db_snapshot_not_too_old( 13 | rds_db_snapshot 14 | ), f"{rds_db_snapshot['DBSnapshotIdentifier']} is created at {rds_db_snapshot['SnapshotCreateTime']}, and is considered too old." 15 | -------------------------------------------------------------------------------- /aws/redshift/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/redshift/__init__.py -------------------------------------------------------------------------------- /aws/redshift/helpers.py: -------------------------------------------------------------------------------- 1 | from helpers import get_param_id 2 | 3 | 4 | def redshift_cluster_security_group_test_id(security_group): 5 | return get_param_id(security_group, "ClusterSecurityGroupName") 6 | 7 | 8 | def redshift_cluster_security_group_is_open_to_all_ips(security_group): 9 | """ 10 | Returns True if the security group grants access to all IPs. 11 | 12 | Does not check EC2 Security groups. 13 | 14 | 15 | >>> redshift_cluster_security_group_is_open_to_all_ips({'IPRanges': [{'CIDRIP': '0.0.0.0/0'}]}) 16 | True 17 | >>> redshift_cluster_security_group_is_open_to_all_ips({'IPRanges': [{'CIDRIP': '::/0'}]}) 18 | True 19 | 20 | >>> redshift_cluster_security_group_is_open_to_all_ips({'IPRanges': [{'CIDRIP': '192.168.1.1'}]}) 21 | False 22 | >>> redshift_cluster_security_group_is_open_to_all_ips({'IPRanges': []}) 23 | False 24 | >>> redshift_cluster_security_group_is_open_to_all_ips({}) 25 | False 26 | 27 | """ 28 | for ipr in security_group.get("IPRanges", []): 29 | if ipr.get("CIDRIP", None) in ["0.0.0.0/0", "::/0"]: 30 | return True 31 | 32 | return False 33 | -------------------------------------------------------------------------------- /aws/redshift/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def redshift_clusters(): 5 | "botocore.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_clusters" 6 | return ( 7 | botocore_client.get("redshift", "describe_clusters", [], {}) 8 | .extract_key("Clusters") 9 | .flatten() 10 | .values() 11 | ) 12 | 13 | 14 | def redshift_cluster_security_groups(): 15 | "http://botocore.readthedocs.io/en/latest/reference/services/redshift.html#Redshift.Client.describe_cluster_security_groups" # NOQA 16 | return ( 17 | botocore_client.get( 18 | "redshift", 19 | "describe_cluster_security_groups", 20 | [], 21 | {}, 22 | result_from_error=lambda error, call: {"ClusterSecurityGroups": []}, 23 | ) 24 | .extract_key("ClusterSecurityGroups") 25 | .flatten() 26 | .values() 27 | ) 28 | -------------------------------------------------------------------------------- /aws/redshift/test_redshift_security_group_does_not_allow_all_ips_access.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.redshift.resources import redshift_cluster_security_groups 4 | from aws.redshift.helpers import ( 5 | redshift_cluster_security_group_is_open_to_all_ips, 6 | redshift_cluster_security_group_test_id, 7 | ) 8 | 9 | 10 | @pytest.mark.redshift 11 | @pytest.mark.parametrize( 12 | "security_group", 13 | redshift_cluster_security_groups(), 14 | ids=redshift_cluster_security_group_test_id, 15 | ) 16 | def test_redshift_security_group_does_not_allow_all_ips_access(security_group): 17 | """Checks whether a redshift cluster grants public access via 18 | cluster security group. 19 | """ 20 | assert not redshift_cluster_security_group_is_open_to_all_ips(security_group) 21 | -------------------------------------------------------------------------------- /aws/route53/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/route53/__init__.py -------------------------------------------------------------------------------- /aws/route53/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def zones(): 5 | """ 6 | https://botocore.amazonaws.com/v1/documentation/api/latest/reference/services/route53.html#Route53.Client.list_hosted_zones 7 | """ 8 | return ( 9 | botocore_client.get("route53", "list_hosted_zones", [], {}) 10 | .extract_key("HostedZones") 11 | .flatten() 12 | .values() 13 | ) 14 | 15 | 16 | def cnames(): 17 | records = [] 18 | 19 | for zone in zones(): 20 | zone_id = zone["Id"].split("/")[2] 21 | zone_records = ( 22 | botocore_client.get( 23 | "route53", "list_resource_record_sets", [], {"HostedZoneId": zone_id} 24 | ) 25 | .extract_key("ResourceRecordSets") 26 | .flatten() 27 | .values() 28 | ) 29 | records.extend([record for record in zone_records if record["Type"] == "CNAME"]) 30 | 31 | return records 32 | -------------------------------------------------------------------------------- /aws/route53/test_route53_cnames_minimum_ttl_or_greater.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.route53.resources import zones, cnames 4 | from helpers import get_param_id 5 | 6 | 7 | MINIMUM_TTL = 600 8 | 9 | 10 | @pytest.mark.parametrize( 11 | "cnames", cnames(), ids=lambda record: get_param_id(record, "Name") 12 | ) 13 | def test_route53_cnames_minimum_ttl_or_greater(cnames): 14 | """ 15 | Tests that CNAMEs in Route53 have a TTL of 600 seconds or more. 16 | """ 17 | assert ( 18 | int(cnames["TTL"]) >= MINIMUM_TTL 19 | ), f"TTL is below the minimum of {MINIMUM_TTL}, it is currently set to {cnames['TTL']}" 20 | -------------------------------------------------------------------------------- /aws/s3/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/s3/__init__.py -------------------------------------------------------------------------------- /aws/s3/helpers.py: -------------------------------------------------------------------------------- 1 | from helpers import get_param_id 2 | 3 | 4 | def get_s3_bucket_name(bucket): 5 | return get_param_id(bucket, "Name") 6 | 7 | 8 | def get_s3_resource_id(resource): 9 | if isinstance(resource, dict) and "Name" in resource: 10 | return get_s3_bucket_name(resource) 11 | if isinstance(resource, dict) and "ID" in resource: 12 | return get_param_id(resource, "ID") 13 | if isinstance(resource, dict) and "Owner" in resource: # ACL 14 | return get_param_id(resource["Owner"], "DisplayName") 15 | if isinstance(resource, dict) and "Status" in resource: # Versioning 16 | return get_param_id(resource, "Status") 17 | if isinstance(resource, dict) and "AllowedHeaders" in resource: # CORS 18 | return "cors-rules" 19 | 20 | if isinstance(resource, dict) and "ResponseMetadata" in resource: 21 | return "empty" 22 | 23 | if isinstance(resource, dict) and not resource: 24 | return "empty" 25 | 26 | if isinstance(resource, list): 27 | if len(resource) == 0: 28 | return "empty" 29 | else: 30 | return get_s3_resource_id(resource[0]) 31 | 32 | if resource is None: 33 | return "none" 34 | 35 | return None 36 | -------------------------------------------------------------------------------- /aws/s3/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def s3_buckets(): 5 | "http://botocore.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.list_buckets" 6 | return ( 7 | botocore_client.get("s3", "list_buckets", [], {}) 8 | .extract_key("Buckets") 9 | .flatten() 10 | .values() 11 | ) 12 | 13 | 14 | def s3_buckets_cors_rules(): 15 | "http://botocore.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.get_bucket_cors" 16 | return [ 17 | botocore_client.get( 18 | "s3", 19 | "get_bucket_cors", 20 | [], 21 | {"Bucket": bucket["Name"]}, 22 | profiles=[bucket["__pytest_meta"]["profile"]], 23 | regions=[bucket["__pytest_meta"]["region"]], 24 | result_from_error=lambda error, call: {"CORSRules": None}, 25 | ) 26 | .extract_key("CORSRules") 27 | .values()[0] 28 | for bucket in s3_buckets() 29 | ] 30 | 31 | 32 | def s3_buckets_logging(): 33 | "http://botocore.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.get_bucket_logging" 34 | return [ 35 | botocore_client.get( 36 | "s3", 37 | "get_bucket_logging", 38 | [], 39 | {"Bucket": bucket["Name"]}, 40 | profiles=[bucket["__pytest_meta"]["profile"]], 41 | regions=[bucket["__pytest_meta"]["region"]], 42 | ) 43 | .extract_key("LoggingEnabled", default=False) 44 | .values()[0] 45 | for bucket in s3_buckets() 46 | ] 47 | 48 | 49 | def s3_buckets_acls(): 50 | "http://botocore.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.get_bucket_acl" 51 | return [ 52 | botocore_client.get( 53 | "s3", 54 | "get_bucket_acl", 55 | [], 56 | {"Bucket": bucket["Name"]}, 57 | profiles=[bucket["__pytest_meta"]["profile"]], 58 | regions=[bucket["__pytest_meta"]["region"]], 59 | ).values()[0] 60 | for bucket in s3_buckets() 61 | ] 62 | 63 | 64 | def s3_buckets_versioning(): 65 | "http://botocore.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.get_bucket_versioning" 66 | return [ 67 | botocore_client.get( 68 | "s3", 69 | "get_bucket_versioning", 70 | [], 71 | {"Bucket": bucket["Name"]}, 72 | profiles=[bucket["__pytest_meta"]["profile"]], 73 | regions=[bucket["__pytest_meta"]["region"]], 74 | ).values()[0] 75 | for bucket in s3_buckets() 76 | ] 77 | 78 | 79 | def s3_buckets_website(): 80 | "http://botocore.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.get_bucket_website" 81 | empty_response = { 82 | "IndexDocument": None, 83 | "ErrorDocument": None, 84 | "RedirectAllRequestsTo": None, 85 | } 86 | return [ 87 | website 88 | for bucket in s3_buckets() 89 | for website in botocore_client.get( 90 | "s3", 91 | "get_bucket_website", 92 | [], 93 | {"Bucket": bucket["Name"]}, 94 | profiles=[bucket["__pytest_meta"]["profile"]], 95 | regions=[bucket["__pytest_meta"]["region"]], 96 | result_from_error=lambda e, call: empty_response, 97 | ).values() 98 | ] 99 | 100 | 101 | def s3_buckets_policy(): 102 | "http://botocore.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.get_bucket_policy" 103 | return [ 104 | botocore_client.get( 105 | "s3", 106 | "get_bucket_policy", 107 | [], 108 | {"Bucket": bucket["Name"]}, 109 | profiles=[bucket["__pytest_meta"]["profile"]], 110 | regions=[bucket["__pytest_meta"]["region"]], 111 | result_from_error=lambda e, call: {"Policy": ""}, 112 | ) 113 | .extract_key("Policy") 114 | .values()[0] 115 | for bucket in s3_buckets() 116 | ] 117 | 118 | 119 | def s3_bucket_lifecycle_configuration(): 120 | "https://botocore.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.get_bucket_lifecycle_configuration" 121 | return [ 122 | botocore_client.get( 123 | "s3", 124 | "get_bucket_lifecycle_configuration", 125 | [], 126 | {"Bucket": bucket["Name"]}, 127 | profiles=[bucket["__pytest_meta"]["profile"]], 128 | regions=[bucket["__pytest_meta"]["region"]], 129 | result_from_error=lambda e, call: [], 130 | ) 131 | .extract_key("Rules") 132 | .values() 133 | for bucket in s3_buckets() 134 | ] 135 | -------------------------------------------------------------------------------- /aws/s3/test_s3_bucket_cors_disabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.s3.helpers import get_s3_resource_id 4 | from aws.s3.resources import s3_buckets, s3_buckets_cors_rules 5 | 6 | 7 | @pytest.mark.s3 8 | @pytest.mark.parametrize( 9 | ["s3_bucket", "s3_bucket_cors_rules"], 10 | zip(s3_buckets(), s3_buckets_cors_rules()), 11 | ids=get_s3_resource_id, 12 | ) 13 | def test_s3_bucket_cors_disabled(s3_bucket, s3_bucket_cors_rules): 14 | """ 15 | Disable sharing S3 bucket contents cross origin with CORS headers. 16 | 17 | http://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html 18 | """ 19 | assert s3_bucket_cors_rules is None, "CORS enabled for {0[Name]}".format(s3_bucket) 20 | -------------------------------------------------------------------------------- /aws/s3/test_s3_bucket_does_not_grant_all_principals_all_actions.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pytest 4 | 5 | from aws.s3.helpers import get_s3_resource_id 6 | from aws.s3.resources import s3_buckets, s3_buckets_policy 7 | 8 | 9 | STAR_ACTIONS = ["*", "s3:*", "s3:delete*", "s3:put*", "s3:get*", "s3:list*"] 10 | 11 | 12 | @pytest.mark.s3 13 | @pytest.mark.parametrize( 14 | ["s3_bucket", "s3_bucket_policy"], 15 | zip(s3_buckets(), s3_buckets_policy()), 16 | ids=get_s3_resource_id, 17 | ) 18 | def test_s3_bucket_does_not_grant_all_principals_all_actions( 19 | s3_bucket, s3_bucket_policy 20 | ): 21 | """ 22 | Check policy does not allow all principals all actions on the S3 Bucket. 23 | 24 | Mitigations: 25 | 26 | * limit actions instead of using * or S3:* http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html 27 | * limit principals to specific IAMs 28 | http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-bucket-user-policy-specifying-principal-intro.html 29 | * add conditions http://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html 30 | """ 31 | if not s3_bucket_policy: 32 | pytest.skip("Bucket has no policy, which means it defaults to private.") 33 | # https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-policy.html 34 | 35 | policy = json.loads(s3_bucket_policy) 36 | 37 | for statement in policy["Statement"]: 38 | if "Condition" in statement: 39 | continue 40 | 41 | actions = ( 42 | [statement["Action"]] 43 | if isinstance(statement["Action"], str) 44 | else statement["Action"] 45 | ) 46 | actions = [action.lower() for action in actions] 47 | 48 | assert not ( 49 | statement["Effect"] == "Allow" 50 | and any(action in STAR_ACTIONS for action in actions) 51 | and "Principal" == "*" 52 | ) 53 | -------------------------------------------------------------------------------- /aws/s3/test_s3_bucket_has_life_cycle_policy.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.s3.helpers import get_s3_resource_id 4 | from aws.s3.resources import s3_buckets, s3_bucket_lifecycle_configuration 5 | 6 | 7 | @pytest.mark.s3 8 | @pytest.mark.parametrize( 9 | ["s3_bucket", "lifecycle_configuration"], 10 | zip(s3_buckets(), s3_bucket_lifecycle_configuration()), 11 | ids=get_s3_resource_id, 12 | ) 13 | def test_s3_bucket_has_life_cycle_policy(s3_bucket, lifecycle_configuration): 14 | """ 15 | Check a bucket has a life cycle policy. 16 | """ 17 | assert ( 18 | None not in lifecycle_configuration 19 | ), f"{s3_bucket['Name']} has no life cycle policy." 20 | -------------------------------------------------------------------------------- /aws/s3/test_s3_bucket_logging_enabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.s3.helpers import get_s3_resource_id 4 | from aws.s3.resources import s3_buckets, s3_buckets_logging 5 | 6 | 7 | @pytest.mark.s3 8 | @pytest.mark.parametrize( 9 | ["s3_bucket", "s3_bucket_logging_enabled"], 10 | zip(s3_buckets(), s3_buckets_logging()), 11 | ids=get_s3_resource_id, 12 | ) 13 | def test_s3_bucket_logging_enabled(s3_bucket, s3_bucket_logging_enabled): 14 | """ 15 | Enable access logs for S3 buckets. 16 | """ 17 | assert s3_bucket_logging_enabled, "Logging not enabled for {0[Name]}".format( 18 | s3_bucket 19 | ) 20 | -------------------------------------------------------------------------------- /aws/s3/test_s3_bucket_no_world_acl.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.s3.helpers import get_s3_resource_id 4 | from aws.s3.resources import s3_buckets, s3_buckets_acls 5 | 6 | 7 | AWS_PREDEFINED_GROUPS = [ 8 | # allow any AWS account to access the resource with a signed/authed request 9 | "http://acs.amazonaws.com/groups/global/AuthenticatedUsers", 10 | # allows anyone in the world access to the resource 11 | "http://acs.amazonaws.com/groups/global/AllUsers", 12 | ] 13 | 14 | 15 | @pytest.mark.s3 16 | @pytest.mark.parametrize( 17 | ["s3_bucket", "s3_bucket_acl"], 18 | zip(s3_buckets(), s3_buckets_acls()), 19 | ids=get_s3_resource_id, 20 | ) 21 | def test_s3_bucket_no_world_acl(s3_bucket, s3_bucket_acl): 22 | """ 23 | Check S3 bucket does not allow global predefined AWS groups access. 24 | 25 | http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html 26 | """ 27 | for grant in s3_bucket_acl["Grants"]: 28 | grantee = grant["Grantee"] 29 | if "URI" not in grantee: 30 | pytest.skip("S3 Bucket ACL does not use URI.") 31 | 32 | grantee_uri = grantee["URI"] 33 | assert not any(grantee_uri.startswith(group) for group in AWS_PREDEFINED_GROUPS) 34 | -------------------------------------------------------------------------------- /aws/s3/test_s3_bucket_versioning_enabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.s3.helpers import get_s3_resource_id 4 | from aws.s3.resources import s3_buckets, s3_buckets_versioning 5 | 6 | 7 | @pytest.mark.s3 8 | @pytest.mark.parametrize( 9 | ["s3_bucket", "s3_bucket_versioning"], 10 | zip(s3_buckets(), s3_buckets_versioning()), 11 | ids=get_s3_resource_id, 12 | ) 13 | def test_s3_bucket_versioning_enabled(s3_bucket, s3_bucket_versioning): 14 | """ 15 | Enable restoring every version of every object in the S3 bucket to easily recover data. 16 | 17 | http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html 18 | """ 19 | assert s3_bucket_versioning.get("Status", None) == "Enabled" 20 | -------------------------------------------------------------------------------- /aws/s3/test_s3_bucket_versioning_mfa_delete_enabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.s3.helpers import get_s3_resource_id 4 | from aws.s3.resources import s3_buckets, s3_buckets_versioning 5 | 6 | 7 | @pytest.mark.s3 8 | @pytest.mark.parametrize( 9 | ["s3_bucket", "s3_bucket_versioning"], 10 | zip(s3_buckets(), s3_buckets_versioning()), 11 | ids=get_s3_resource_id, 12 | ) 13 | def test_s3_bucket_versioning_mfa_delete_enabled(s3_bucket, s3_bucket_versioning): 14 | """ 15 | Enable MFA delete for versioned S3 buckets to prevent their accidental deletion. 16 | """ 17 | if s3_bucket_versioning.get("Status", None) != "Enabled": 18 | return pytest.skip() 19 | 20 | assert s3_bucket_versioning.get("MFADelete", None) != "Disabled" 21 | -------------------------------------------------------------------------------- /aws/s3/test_s3_bucket_web_hosting_disabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from aws.s3.helpers import get_s3_resource_id 4 | from aws.s3.resources import s3_buckets, s3_buckets_website 5 | 6 | 7 | @pytest.mark.s3 8 | @pytest.mark.parametrize( 9 | ["s3_bucket", "s3_bucket_website"], 10 | zip(s3_buckets(), s3_buckets_website()), 11 | ids=get_s3_resource_id, 12 | ) 13 | def test_s3_bucket_web_hosting_disabled(s3_bucket, s3_bucket_website): 14 | """ 15 | Disable hosting static site in the S3 bucket. 16 | """ 17 | assert not s3_bucket_website["IndexDocument"] 18 | assert not s3_bucket_website["ErrorDocument"] 19 | assert not s3_bucket_website["RedirectAllRequestsTo"] 20 | -------------------------------------------------------------------------------- /aws/sns/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/aws/sns/__init__.py -------------------------------------------------------------------------------- /aws/sns/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import botocore_client 2 | 3 | 4 | def sns_subscriptions(): 5 | "https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#subscription" 6 | return ( 7 | botocore_client.get("sns", "list_subscriptions", [], {}) 8 | .extract_key("Subscriptions") 9 | .flatten() 10 | .values() 11 | ) 12 | 13 | 14 | def sns_topics(): 15 | "https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#topic" 16 | return ( 17 | botocore_client.get("sns", "list_topics", [], {}) 18 | .extract_key("Topics") 19 | .flatten() 20 | .values() 21 | ) 22 | 23 | 24 | def sns_topic_arns(): 25 | "https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#topic" 26 | return {x["TopicArn"] for x in sns_topics()} 27 | 28 | 29 | def sns_subscriptions_by_topic(): 30 | "https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sns.html#topic" 31 | return [ 32 | { 33 | **{ 34 | "Subscriptions": botocore_client.get( 35 | service_name="sns", 36 | method_name="list_subscriptions_by_topic", 37 | call_args=[], 38 | call_kwargs={"TopicArn": topic["TopicArn"]}, 39 | profiles=[topic["__pytest_meta"]["profile"]], 40 | regions=[topic["__pytest_meta"]["region"]], 41 | ) 42 | .extract_key("Subscriptions") 43 | .values()[0] 44 | }, 45 | **topic, 46 | } 47 | for topic in sns_topics() 48 | ] 49 | -------------------------------------------------------------------------------- /aws/sns/test_sns_pending_verified.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from helpers import get_param_id 3 | 4 | from aws.sns.resources import sns_subscriptions_by_topic 5 | 6 | 7 | @pytest.mark.sns 8 | @pytest.mark.rationale( 9 | """ 10 | SNS Subscriptions in PendingConfirmation status cannot receive 11 | notifications. They are good candidates for removal, or confirmation. 12 | """ 13 | ) 14 | @pytest.mark.parametrize( 15 | "topic", 16 | sns_subscriptions_by_topic(), 17 | ids=lambda topic: get_param_id(topic, "TopicArn"), 18 | ) 19 | def test_sns_topics_with_subscriptions_pending_confirmation(topic): 20 | for subscription in topic["Subscriptions"]: 21 | assert subscription["SubscriptionArn"] != "PendingConfirmation" 22 | -------------------------------------------------------------------------------- /aws/sns/test_sns_subscriptions_without_topics.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from helpers import get_param_id 3 | 4 | from aws.sns.resources import sns_subscriptions 5 | from aws.sns.resources import sns_topic_arns 6 | 7 | 8 | @pytest.fixture 9 | def topics(): 10 | return sns_topic_arns() 11 | 12 | 13 | @pytest.mark.sns 14 | @pytest.mark.rationale( 15 | """ 16 | SNS subscriptions subscribed to non-existent topics cannot 17 | receive messages. They are good candidates for removal. 18 | """ 19 | ) 20 | @pytest.mark.parametrize( 21 | "subscription", 22 | sns_subscriptions(), 23 | ids=lambda subscription: get_param_id(subscription, "SubscriptionArn"), 24 | ) 25 | def test_sns_subscriptions_without_parent_topics(subscription, topics): 26 | assert subscription["TopicArn"] in topics 27 | -------------------------------------------------------------------------------- /aws/sns/test_sns_topics_without_subscriptions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from helpers import get_param_id 3 | 4 | from aws.sns.resources import sns_subscriptions_by_topic 5 | 6 | 7 | @pytest.mark.sns 8 | @pytest.mark.rationale( 9 | """ 10 | SNS Topics without subscriptions have no place to deliver messages. 11 | They are good candidates for removal, or appropriate subscriptions. 12 | """ 13 | ) 14 | @pytest.mark.parametrize( 15 | "topic", 16 | sns_subscriptions_by_topic(), 17 | ids=lambda topic: get_param_id(topic, "TopicArn"), 18 | ) 19 | def test_sns_topics_without_subscriptions(topic): 20 | assert len(topic["Subscriptions"]) > 0 21 | -------------------------------------------------------------------------------- /cache.py: -------------------------------------------------------------------------------- 1 | """ 2 | Patch for pytest cache to serialize datetime.datetime 3 | """ 4 | 5 | import datetime 6 | import functools 7 | import json 8 | from typing import Any, Dict, Union, Tuple 9 | 10 | from dateutil.parser import isoparse 11 | import _pytest 12 | import _pytest.cacheprovider 13 | 14 | 15 | def json_iso_datetimes(obj: Any) -> str: 16 | """JSON serializer for objects not serializable by default json 17 | module.""" 18 | if isinstance(obj, datetime.datetime): 19 | return obj.isoformat() 20 | 21 | raise TypeError(f"Unserializable type {type(obj)}") 22 | 23 | 24 | def json_iso_datetime_string_to_datetime(obj: Dict[Any, Any]) -> Dict[Any, Any]: 25 | """JSON object hook that converts object vals from ISO datetime 26 | strings to python datetime.datetime`s if possible.""" 27 | 28 | for k, v in obj.items(): 29 | if not isinstance(v, str): 30 | continue 31 | 32 | try: 33 | obj[k] = isoparse(v) 34 | except (OverflowError, ValueError): 35 | pass 36 | 37 | return obj 38 | 39 | 40 | def datetime_encode_set( 41 | self: _pytest.cacheprovider.Cache, 42 | key: str, 43 | value: Union[str, int, float, Dict[Any, Any], Tuple[Any]], 44 | ) -> None: 45 | """save value for the given key. 46 | 47 | :param key: must be a ``/`` separated value. Usually the first 48 | name is the name of your plugin or your application. 49 | :param value: must be of any combination of basic 50 | python types, including nested types 51 | like e. g. lists of dictionaries. 52 | """ 53 | path = self._getvaluepath(key) 54 | try: 55 | path.parent.mkdir(exist_ok=True, parents=True) 56 | except (IOError, OSError): 57 | self.warn("could not create cache path {path}", path=path) 58 | return 59 | try: 60 | f = path.open("w") 61 | except (IOError, OSError): 62 | self.warn("cache could not write path {path}", path=path) 63 | else: 64 | with f: 65 | json.dump(value, f, indent=2, sort_keys=True, default=json_iso_datetimes) 66 | self._ensure_supporting_files() 67 | 68 | 69 | def datetime_encode_get( 70 | self: _pytest.cacheprovider.Cache, key: str, default: Any 71 | ) -> Any: 72 | """return cached value for the given key. If no value 73 | was yet cached or the value cannot be read, the specified 74 | default is returned. 75 | 76 | :param key: must be a ``/`` separated value. Usually the first 77 | name is the name of your plugin or your application. 78 | :param default: must be provided in case of a cache-miss or 79 | invalid cache values. 80 | """ 81 | path = self._getvaluepath(key) 82 | try: 83 | with path.open("r") as f: 84 | return json.load(f, object_hook=json_iso_datetime_string_to_datetime) 85 | except (ValueError, IOError, OSError): 86 | return default 87 | 88 | 89 | def patch_cache_set(config: _pytest.config.Config) -> None: 90 | assert config.cache, "pytest does not have a cache configured to patch" 91 | # types ignored due to https://github.com/python/mypy/issues/2427 92 | config.cache.set = functools.partial(datetime_encode_set, config.cache) # type: ignore 93 | config.cache.get = functools.partial(datetime_encode_get, config.cache) # type: ignore 94 | -------------------------------------------------------------------------------- /config.yaml.example: -------------------------------------------------------------------------------- 1 | # 2 | # frost config file 3 | # 4 | # Documentation on config file found in README.md 5 | # 6 | exemptions: 7 | - test_name: test_ec2_instance_has_required_tags 8 | test_param_id: i-0123456789f014c162 9 | expiration_day: 2019-01-01 10 | reason: ec2 instance has no owner 11 | - test_name: test_ec2_security_group_opens_specific_ports_to_all 12 | test_param_id: '*HoneyPot' 13 | expiration_day: 2020-01-01 14 | reason: purposefully insecure security group 15 | severities: 16 | - test_name: test_ec2_instance_has_required_tags 17 | severity: INFO 18 | - test_name: '*' 19 | severity: ERROR 20 | aws: 21 | admin_groups: 22 | - "Administrators" 23 | admin_policies: 24 | - "AWSAdminRequireMFA" 25 | user_is_inactive: 26 | no_activity_since: 27 | years: 1 28 | months: 0 29 | created_after: 30 | weeks: 1 31 | access_key_expires_after: 32 | years: 1 33 | months: 0 34 | required_tags: 35 | - Name 36 | - Type 37 | - App 38 | - Env 39 | # Allowed ports for the test_ec2_security_group_opens_specific_ports_to_all 40 | # test for all instances 41 | allowed_ports_global: 42 | - 25 43 | # Allowed ports for the test_ec2_security_group_opens_specific_ports_to_all 44 | # test for specific instances. In this example, we are allowing ports 22 45 | # and 2222 for all security groups that include the word 'bastion' in them. 46 | allowed_ports: 47 | - test_param_id: '*bastion' 48 | ports: 49 | - 22 50 | - 2222 51 | max_ami_age_in_days: 90 52 | owned_ami_account_ids: 53 | - 1234567890 54 | gcp: 55 | allowed_org_domains: 56 | - mygsuiteorg.com 57 | allowed_gke_versions: 58 | - 1.15.12-gke.20 59 | - 1.16.13-gke.401 60 | - 1.17.9-gke.1504 61 | - 1.18.6-gke.3504 62 | # Allowed ports for the test_firewall_opens_any_ports_to_all 63 | # test for all firewalls 64 | allowed_ports_global: 65 | - 25 66 | # Allowed ports for the test_firewall_opens_any_ports_to_all 67 | # test for specific firewalls. In this example, we are allowing ports 22 68 | # and 2222 for all firewalls that include the word 'bastion' in them. 69 | allowed_ports: 70 | - test_param_id: '*bastion' 71 | ports: 72 | - 22 73 | - 2222 74 | gsuite: 75 | domain: 'mygsuiteorg.com' 76 | min_number_of_owners: 2 77 | user_is_inactive: 78 | no_activity_since: 79 | years: 1 80 | months: 0 81 | -------------------------------------------------------------------------------- /custom_config.py: -------------------------------------------------------------------------------- 1 | import re 2 | from datetime import datetime, timezone 3 | 4 | from ruamel.yaml import YAML 5 | from dateutil.relativedelta import relativedelta 6 | 7 | import exemptions 8 | import severity 9 | 10 | 11 | class CustomConfig: 12 | def __init__(self, config_fd): 13 | parsed_config = {} 14 | if config_fd is not None: 15 | yaml = YAML() 16 | parsed_config = yaml.load(config_fd) 17 | self.aws = AWSConfig(parsed_config.get("aws", {})) 18 | self.gcp = GCPConfig(parsed_config.get("gcp", {})) 19 | self.gsuite = GSuiteConfig(parsed_config.get("gsuite", {})) 20 | 21 | self.exemptions = exemptions.load(parsed_config.get("exemptions")) 22 | self.severities = severity.load(parsed_config.get("severities")) 23 | 24 | def add_markers(self, item): 25 | severity.add_severity_marker(item) 26 | exemptions.add_xfail_marker(item) 27 | 28 | 29 | class CustomConfigMixin: 30 | def __init__(self, config): 31 | self.user_is_inactive = config.get("user_is_inactive", {}) 32 | self.allowed_ports_global = set(config.get("allowed_ports_global", [])) 33 | self.allowed_ports = config.get("allowed_ports", []) 34 | 35 | def no_activity_since(self): 36 | no_activity_since = self._parse_user_is_inactive_relative_time( 37 | "no_activity_since" 38 | ) 39 | if no_activity_since is None: 40 | return datetime.now(timezone.utc) - relativedelta(years=+1) 41 | return no_activity_since 42 | 43 | def created_after(self): 44 | created_after = self._parse_user_is_inactive_relative_time("created_after") 45 | if created_after is None: 46 | return datetime.now(timezone.utc) - relativedelta(weeks=+1) 47 | return created_after 48 | 49 | def _parse_user_is_inactive_relative_time(self, key): 50 | if self.user_is_inactive.get(key) is None: 51 | return None 52 | 53 | return datetime.now(timezone.utc) - relativedelta( 54 | years=+self.user_is_inactive[key].get("years", 0), 55 | months=+self.user_is_inactive[key].get("months", 0), 56 | weeks=+self.user_is_inactive[key].get("weeks", 0), 57 | ) 58 | 59 | def get_allowed_ports(self, test_id): 60 | return self.get_allowed_ports_from_test_id(test_id) | self.allowed_ports_global 61 | 62 | def get_allowed_ports_from_test_id(self, test_id): 63 | for rule in self.allowed_ports: 64 | if rule["test_param_id"].startswith("*"): 65 | substring = rule["test_param_id"][1:] 66 | if re.search(substring, test_id): 67 | return set(rule["ports"]) 68 | 69 | if test_id == rule["test_param_id"]: 70 | return set(rule["ports"]) 71 | 72 | return set([]) 73 | 74 | 75 | class AWSConfig(CustomConfigMixin): 76 | def __init__(self, config): 77 | self.required_tags = frozenset(config.get("required_tags", [])) 78 | self.required_amis = frozenset(config.get("required_amis", [])) 79 | self.access_key_expires_after = config.get("access_key_expires_after", None) 80 | self.admin_policies = frozenset(config.get("admin_policies", [])) 81 | self.admin_groups = frozenset(config.get("admin_groups", [])) 82 | self.owned_ami_account_ids = [ 83 | str(x) for x in config.get("owned_ami_account_ids", []) 84 | ] 85 | self.max_ami_age_in_days = config.get("max_ami_age_in_days", 180) 86 | super().__init__(config) 87 | 88 | def get_access_key_expiration_date(self): 89 | if self.access_key_expires_after is None: 90 | return datetime.now(timezone.utc) - relativedelta(years=+1) 91 | 92 | return datetime.now(timezone.utc) - relativedelta( 93 | years=+self.access_key_expires_after.get("years", 0), 94 | months=+self.access_key_expires_after.get("months", 0), 95 | weeks=+self.access_key_expires_after.get("weeks", 0), 96 | ) 97 | 98 | 99 | class GCPConfig(CustomConfigMixin): 100 | def __init__(self, config): 101 | self.allowed_org_domains = config.get("allowed_org_domains", []) 102 | self.allowed_gke_versions = config.get("allowed_gke_versions", []) 103 | super().__init__(config) 104 | 105 | 106 | class GSuiteConfig(CustomConfigMixin): 107 | def __init__(self, config): 108 | self.domain = config.get("domain", "") 109 | self.min_number_of_owners = int(config.get("min_number_of_owners", "2")) 110 | super().__init__(config) 111 | 112 | 113 | class PagerdutyConfig(CustomConfigMixin): 114 | def __init__(self, config): 115 | self.users_with_remote_access_monitoring = config.get( 116 | "users_with_remote_access_monitoring", "" 117 | ) 118 | self.bastion_users = config.get("bastion_users", "") 119 | self.alternate_usernames = config.get("alternate_usernames", "") 120 | super().__init__(config) 121 | -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/docs/.nojekyll -------------------------------------------------------------------------------- /docs/Architecture.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 | 6 | 7 | .. _architecture: 8 | 9 | ============ 10 | Architecture 11 | ============ 12 | 13 | PyTest supports several different ways of organizing tests. For frost, we use a 14 | mixture of class based and declarative tests. 15 | 16 | In general the class holds session information, as PyTest treats class 17 | ``__init__`` functions as session scoped fixtures. The class methods provide raw 18 | access to the service, and cache the result. 19 | 20 | Traditional PyTest fixtures (in ``conftest.py``) or "cache access functions" (in 21 | ``resources.py``) are used to supply the data to tests. The tests are 22 | conventionally written in ``test_.py`` files, with a single function of the 23 | same name as the file. *(With "Black_", we stopped the tabs-vs-spaces debate, 24 | so redirected that energy to one-or-many-tests-per-file debate.)* 25 | 26 | A recommended way to organize your code is to create a directory per type of 27 | resource you test. E.g. ``aws/{elb,ec2,iam}/`` or 28 | ``github/{orgs,branches,users}``. Whether it makes sense to have ``conftest.py`` 29 | files at each level is up to the developer. There should only be one session 30 | client per service though. 31 | 32 | Caching 33 | ======= 34 | 35 | .. note:: 36 | The caching operations is under consideration for deprecation. If you 37 | intend to rely on caching, you should check with the active developers first. 38 | 39 | To implement caching: 40 | 41 | #. Your class ``__init__`` method must accept and store a cache object. 42 | 43 | #. Your data retrieval functions should be written to try the cache first 44 | before fetching data from the service under test. 45 | 46 | #. A cache_key global function is recommended as a means to ensure consistent 47 | and non conflicting keys to store data in the cache. (The existing 48 | functions tend to marshal the full data location path and arguments 49 | into a string.) 50 | 51 | Expected Output flow 52 | ==================== 53 | 54 | Every test that fails needs to output sufficient information to allow downstream 55 | processes to take action on the failure (open an issue, or bug, or email the 56 | team or ...). All that information must be contained in the test id. Use the 57 | ``ids`` argument to the ``pytest.mark.parametrize`` decorator to generate rich 58 | ids as needed. (See `PyTest docs`_.) 59 | 60 | A PyTest plugin in frost adds the option ``--json`` which outputs test failures 61 | as JSON objects which include the test's id, in addition to other context about 62 | the failed test. Using the ``--json`` option is the recommended way to provide 63 | actionalble data to processes further down the pipeline. 64 | 65 | The output flow will be installation specific. 66 | 67 | .. _PyTest docs: https://docs.pytest.org/en/stable/example/parametrize.html#paramexamples>`) 68 | 69 | .. _Black: https://black.readthedocs.io/ 70 | -------------------------------------------------------------------------------- /docs/CodingConventions.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 | 6 | 7 | =========== 8 | Conventions 9 | =========== 10 | 11 | - As mentioned elsewhere, all function *not* within a ``test_*.py`` file should 12 | have doctest_ tests. 13 | 14 | 15 | - Frost tests are expected to support the ``--json`` option by ensuring ids used 16 | in ``pytest.mark.parametrize`` contain sufficient information for downstream 17 | processing. 18 | 19 | .. _offline: 20 | 21 | - All data access routines should respect the ``--offline`` command line option 22 | that is a ``frost`` extension. The value of ``--offline`` is passed as a value to 23 | the "client" constructor. 24 | 25 | If you do not properly implement this option, you will break "``make doctest``" for 26 | everything. (I.e. you're advised to run "``make doctest``" early & often while 27 | implementing a new service.) 28 | 29 | .. _doctest: https://docs.python.org/3.6/library/doctest.html 30 | 31 | -------------------------------------------------------------------------------- /docs/ContributingDocumentation.rst: -------------------------------------------------------------------------------- 1 | ========================== 2 | Contributing Documentation 3 | ========================== 4 | 5 | Frost documentation may be written in either `restructured text `_ or `Markdown`__ (`Common Mark`__ syntax). All input is rendered into HTML with `Sphinx `_. 6 | 7 | __ https://en.wikipedia.org/wiki/Markdown 8 | __ https://en.wikipedia.org/wiki/Markdown#CommonMark 9 | 10 | Create a new documentation branch 11 | --------------------------------- 12 | 13 | Use a descriptive branch name that describes the changes, such as: **adding_new_toaster_doc_section**. 14 | 15 | Create a new branch (assumes you already have configured SSH keys for your GitHub account): 16 | 17 | .. code:: console 18 | 19 | git clone git@github.com:mozilla/frost.git 20 | cd frost 21 | git checkout -b my_descriptive_documentation_branch 22 | make install-docs 23 | 24 | Make documentation changes in branch 25 | ------------------------------------ 26 | 27 | While editing the documentation, you may find it useful to run an "autobuilder". That will allow you to review the completed documentation as you save your work. To activate the autobuilder, open a new terminal window, then:: 28 | 29 | cd /path/to/frost 30 | source venv/bin/activate 31 | made doc-preview 32 | 33 | You will want to terminate the autobuilder prior to doing the final checks below. Just enter ctrl-C in the terminal window running autobuilder. (A number of parameters can be used to adjust the behavior of the autobuilder. Run ``sphinx-autobuilder --help`` for a list, and set the environment variable ``AUTOBUILD_OPTS`` to override.) 34 | 35 | Remember that any new sections will need to be listed in the `table of contents `_ in the `index.rst file `_. 36 | 37 | Build docs and read over changes 38 | -------------------------------- 39 | 40 | The following steps will give you a clean build of the final version of all the docs. You can view those by opening ``/path/to/frost/docs/_build/html/index.html`` in a browser. 41 | 42 | build:: 43 | 44 | pwd # Should be the main frost directory. 45 | make doc-build 46 | 47 | Read over your changes using your `favorite web browser `_ by going to a `file:// URL `_ for the **index.html** file. 48 | 49 | URL:: 50 | 51 | echo "file://$(pwd)/_build/html/index.html" 52 | 53 | Commit and review 54 | ----------------- 55 | 56 | Commit:: 57 | 58 | git add . 59 | git commit -m 'Adding new documentation section on video toasters' 60 | git push 61 | 62 | The first time you push, the URL for creating a PR will be shown in the terminal. If you missed that, display your branch on the web, and create the PR. 63 | 64 | File a `PR `_ and request a review from a `code owner `_. 65 | -------------------------------------------------------------------------------- /docs/FAQ.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 | 6 | 7 | ========================== 8 | Frequently Asked Questions 9 | ========================== 10 | 11 | Overview 12 | -------- 13 | 14 | **What's the general flow of a "test"?** 15 | 16 | When you invoke a test, frost_ uses features of 17 | pytest_ to execute the test. Most commonly, 18 | the test will validate certain relationships about data files 19 | representing configuration data of some external service. 20 | 21 | If the data-under-test is already cached (and fresh enough), the cached 22 | data will be used. If the data is not available locally, pytest_ 23 | fixtures are used to obtain or refresh the data required by that test. 24 | Any freshly retrieved data is cached for use by subsequent tests. 25 | 26 | This "lazy evaluation" of supplying data ensures the quickest possible 27 | turnaround time for ad-hoc queries. 28 | 29 | .. _pytest: https://pytest.org/ 30 | .. _frost: https://github.com/mozilla/frost 31 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/MozillaDeployment.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 | 6 | 7 | ================== 8 | Mozilla Deployment 9 | ================== 10 | 11 | Some details of the Mozilla deployment of Frost are listed here as an example of 12 | how it can be done. 13 | 14 | Frost jobs are run via Jenkins. Jobs are organized for both convenience and to 15 | accommodate different reporting intervals. Usually only a single service is 16 | queried in any particular job. 17 | 18 | The actual job runs in a docker container, which has the frost repository 19 | already checked out. Separate configuration repositories are also checked out at 20 | runtime, based on job parameters. 21 | 22 | Jobs have a common entry script, which performs any job-specific tasks before 23 | and after the main frost run. PyTest is always invoked with the ``--json`` 24 | options supported by the frost extensions, and post processing steps are 25 | expected to use the JSON as input. 26 | 27 | [The deployment is under revision. A rough "as is" doc may be found `here`__.] 28 | 29 | __ https://docs.google.com/document/d/1ePUkJPcHEj9XxaVYr2TSABOxRjhDBKr2KSQ2EzgHJm4 30 | 31 | Adjacent Tools at Mozilla 32 | ========================= 33 | 34 | We feed some of the output of Frost tests into our metrics system. While the output of Frost is JSON, it's not in a format 35 | that is easily consumable by our injestion engine: AWS Athena reading from S3. For that, more traditional formats, 36 | such as CSV and un-nested JSON are easier to work with. To support that, our jobs typically post process the Frost JSON 37 | into reasonable morsels for our metrics. Our conventions__ may be of interest. 38 | 39 | __ https://github.com/mozilla-services/foxsec-tools/tree/master/metrics/utils/Conventions.md 40 | -------------------------------------------------------------------------------- /docs/NewServices.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 | 6 | 7 | ============================ 8 | Adding a New Service 9 | ============================ 10 | Here are the steps to add a new service to the Frost framework. 11 | 12 | Claim a name 13 | ============ 14 | 15 | Like 'heroku' ;) 16 | 17 | Create a new directory by that name 18 | ----------------------------------- 19 | 20 | Clone the repo:: 21 | 22 | git clone git@github.com:mozilla/frost.git 23 | git checkout -b new_service 24 | 25 | Setup for new service:: 26 | 27 | mkdir heroku 28 | cd heroku 29 | 30 | Create Default Files 31 | -------------------- 32 | 33 | The new service should be a Python Package:: 34 | 35 | touch __init__.py client.py resources.py conftest.py 36 | 37 | Commit shell:: 38 | 39 | git add . 40 | git push -m 'Adding new service Heroku' 41 | 42 | Add Service Specific Content 43 | ============================ 44 | 45 | ``client.py``: responsible for obtaining data from the service, and 46 | placing it into the PyTest cache. The client module typically exposes the data via a 47 | "{service}_client" object. The PyTest framework will instantiate the client 48 | before any tests are run with all the configuration & credential 49 | information provided by the configuration files and command line 50 | options. (See :ref:`architecture` for status of cache functionality.) (See 51 | :ref:`doctest offline support ` for other requirements.) 52 | 53 | ``resources.py``: holds mapping functions which convert the data from 54 | the cache into the format expected by the tests. This should be the only 55 | file which imports the instantiation of the client. (Future best 56 | practices may pre-populate the cache outside of the PyTest execution.) 57 | 58 | ``conftest.py`` (optional) As much as possible, put service specific 59 | options, etc. in this local file. (Some things may not work c.f. BUG.) 60 | In conventional ``PyTest`` usage, ``conftest.py`` would contain fixture 61 | routines which did the combined steps of fetching the data and providing to the 62 | tests. If caching is not important, the traditional approach may be used. 63 | 64 | Tests for these support files should be included as doc tests whenever 65 | practical. If possible, the default of executing the module should be to run 66 | the doc tests. 67 | 68 | Conventions for Parametrization 69 | -------------------------------------------------- 70 | 71 | One of the enhancements Frost makes to pytest is simplifying the task of getting test specific metadata into the JSON file to simplify downstream processing. To access this feature, you need to follow a few conventions that are unique to Frost. 72 | 73 | When you use the ``pytest.mark.parametrize`` function, you supply two key arguments (see the `pytest documentation`_ for more details): 74 | 75 | - ``argvalues`` (2nd argument) - an iterable where each item contains information for one execution of the test. 76 | - ``ids`` (keyword argument) - a iterable where which results in a text string displayed, in addition to the test name, to uniquly identify one execution of the test. Caution: the string value is used for lookup during exemption processing. The mapping must be stable for this to work as expected -- you can not let pytest generate a default value. 77 | 78 | For any values you want to appear in the JSON output, ``argvalues`` should supply a dictionary with a unique-to-context key value. To actually insert the key, value pair into the output JSON, you must also specify the key in the global set ``METADATA_KEYS``. Presence of the key in that set is what triggers the frost additions to put the key, value pair into the output JSON. One way to do that is: 79 | 80 | .. code-block:: python 81 | 82 | from conftest import METADATA_KEYS 83 | 84 | METADATA_KEYS.update("key_1", "key_2") 85 | 86 | 87 | .. _pytest documentation: https://docs.pytest.org/en/stable/reference.html#pytest.python.Metafunc.parametrize 88 | 89 | Add Service Specific Tests 90 | -------------------------- 91 | 92 | ``test_*.py``: normal PyTest tests, which each import the resources they 93 | need to test. 94 | -------------------------------------------------------------------------------- /docs/Source.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 | 6 | 7 | =========== 8 | Source Code 9 | =========== 10 | 11 | Service Specific Code 12 | ===================== 13 | 14 | .. 15 | TODO: Add new modules to the table below, also in ../Makefile 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | 20 | source/aws/modules.rst 21 | source/gcp/modules.rst 22 | source/gsuite/modules.rst 23 | 24 | Frost and support code 25 | ====================== 26 | 27 | .. toctree:: 28 | :maxdepth: 2 29 | 30 | source/frost/modules.rst 31 | -------------------------------------------------------------------------------- /docs/UseCases.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | Use Cases & Philosophy 3 | ====================== 4 | 5 | .. warning:: Incorrect Information below 6 | 7 | Until this document is reviewed and corrected, it is known to contain errors, especially w.r.t. philosopy. 8 | 9 | Philosophy 10 | ========= 11 | 12 | Frost is intended to build upon the pytest framework to provide extra features to support "infrastructure auditing". 13 | 14 | The added features include: 15 | 16 | - providing a standard way to inject meta data in the JSON test result file. 17 | - providing a CLI wrapper (the ``frost`` program) to simplify execution of tests within the full supported frost environment. 18 | 19 | Note that the added features are all extensions to the normal pytest operations. Therefore, all tests can be executed directly from pytest. This can be highly useful for adhoc queries, and the development of new tests. 20 | 21 | Target Domain 22 | ------------- 23 | 24 | Frost is intended to simplify the process of auditing existing system configurations, when the configuration data is accessible via an API. While any pytest compliant test *can* be driven via Frost, Frost is optimized for processing auditing controls and providing a report of the current state. 25 | 26 | 27 | Since, in general, the number of configuration resources to check is not statically known when the test is written or executed, some lesser used features of pytest are needed. In particular, the test collection phase often requires API calls to the service to discover the complete list of resources to be checked. While uncommon in internet pytest examples, dynamic test discovery is fully supported by pytest. 28 | 29 | In practice, many of the same calls needed to determine the list of resources may need to be repeated to collect the additional information needed by one or more tests. This leads to a tension between collecting all potentially needed data in the first (discovery) pass, or making many duplicate calls to the service as each test collects the additional information it requires. This leads to 2 common data collection paths: 30 | 31 | - Path 1: Frost service API clients collect all need resources during the test collection phase, and caches the responses for use by other tests within the same run of Frost and (optionally) by subsequent runs of Frost. All test output is delivered in a JSON file for subsequent processing as appropriate. 32 | 33 | - Path 2: Frost service builds the list of resources to be audited during the test collection phase. The current system state is collected via API calls during the test. 34 | 35 | .. mermaid:: 36 | 37 | sequenceDiagram 38 | 39 | participant p1 as Path 1 40 | participant s as Service 41 | participant c as Pytest Managed Data 42 | participant p2 as Path 2 43 | 44 | note over p1, p2: Test Collection Phase 45 | note over p1: AWS, gSuite, ... 46 | p1->>+s: enumerate resources and data 47 | s->>-p1: all data 48 | p1 ->> c: resources & data 49 | 50 | note over c: Pytest Data Caching 51 | note over p2: GitHub (new) 52 | p2->>+s: enumerate resources 53 | s->>-p2: all resources 54 | p2->>c: resources 55 | 56 | note over p1, p2: Test Execution Phase 57 | 58 | c-->>p1: data for test 59 | 60 | c-->>p2: resources for test 61 | p2->>+s: request data for resource 62 | s->>-p2: data for test 63 | 64 | note over c: Results caching 65 | 66 | p1->>c: test results 67 | p2->>c: test results 68 | 69 | note over p1, p2: Paths rejoin here for test output processing 70 | 71 | In both cases, all test output is delivered in a JSON file for subsequent processing as appropriate. 72 | 73 | Frost Supported Use Cases 74 | ========================= 75 | 76 | While Frost could be considered a wrapper around pytest, it is optimized to support the following use cases: 77 | 78 | - as part of an automated system, Frost will execute robustly, and without user intervention, so folks can trust my reporting. 79 | - as an SRE, I want to be able to easily create new suites of tests, so I can pay special attention to something for a while. (Maybe new service, service migration, etc) 80 | - as an SRE, I want to be able to easily add additional tests to a service, so new cloud features are monitored as we start to use them. (Or to apply lessons learned from an incident) 81 | - as a Frost Developer, I want to easily be able to develop support for a new service, so I don't have to re-implement the framework. 82 | - as a SecOps incident responder, I want to quickly obtain easy-to-understand status of a specific account or subsystems, so I can eliminate, or focus in on, aspects of the system that appear abnormal. 83 | 84 | Unsupported Use Cases 85 | --------------------- 86 | 87 | .. admonition:: Add away 88 | 89 | Fair game to add out of scope use cases here, including moving down from above. 90 | 91 | The following use cases are explicitly out of scope for Frost. 92 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | 16 | sys.path.insert(0, os.path.abspath("..")) 17 | 18 | 19 | # -- Project information ----------------------------------------------------- 20 | 21 | project = "Frost" 22 | copyright = "2020" 23 | author = "Firefox Operations Security team" 24 | 25 | # If extensions (or modules to document with autodoc) are in another 26 | # directory, 27 | # add these directories to sys.path here. If the directory is relative 28 | # to the 29 | # documentation root, use os.path.abspath to make it absolute, like 30 | # shown here. 31 | # sys.path.insert(0, os.path.abspath('.')) 32 | 33 | autodoc_member_order = "bysource" 34 | 35 | # -- General configuration --------------------------------------------------- 36 | 37 | # Add any Sphinx extension module names here, as strings. They can be 38 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 39 | # ones. 40 | extensions = [ 41 | "pallets_sphinx_themes", 42 | "sphinx.ext.githubpages", 43 | "sphinx.ext.napoleon", 44 | "sphinx.ext.autodoc", 45 | "sphinx.ext.viewcode", 46 | "sphinxcontrib.mermaid", 47 | "myst_parser", 48 | ] 49 | 50 | # autodoc extension config 51 | # from https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html 52 | autodoc_mock_imports = ["pytest", "_pytest", "py", "sqladmin", "dateutil"] 53 | ##autodoc_default_options = { 54 | ## ##'members': 'var1, var2', 55 | ## ##'member-order': 'bysource', 56 | ## ##'special-members': '__init__', 57 | ## ##'undoc-members': True, 58 | ## ##'exclude-members': '__weakref__' 59 | ##} 60 | 61 | # Add any paths that contain templates here, relative to this directory. 62 | templates_path = ["_templates"] 63 | 64 | # List of patterns, relative to source directory, that match files and 65 | # directories to ignore when looking for source files. 66 | # This pattern also affects html_static_path and html_extra_path. 67 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 68 | 69 | 70 | # -- Options for HTML output ------------------------------------------------- 71 | 72 | # The theme to use for HTML and HTML Help pages. See the documentation for 73 | # a list of builtin themes. 74 | # 75 | # html_theme = "alabaster" 76 | 77 | # Taken from 78 | # https://github.com/pytest-dev/pytest/blob/master/doc/en/conf.py 79 | sys.path.append(os.path.abspath("_themes")) 80 | html_theme_path = ["_themes"] 81 | 82 | # The theme to use for HTML and HTML Help pages. See the documentation 83 | # for 84 | # a list of builtin themes. 85 | html_theme = "flask" 86 | 87 | # Theme options are theme-specific and customize the look and feel of a 88 | # theme 89 | # further. For a list of options available for each theme, see the 90 | # documentation. 91 | # html_theme_options = {"index_logo": None} 92 | 93 | # Add any paths that contain custom themes here, relative to this 94 | # directory. 95 | # html_theme_path = [] 96 | 97 | # Add any paths that contain custom static files (such as style sheets) here, 98 | # relative to this directory. They are copied after the builtin static files, 99 | # so a file named "default.css" will overwrite the builtin "default.css". 100 | # html_static_path = ["_static"] 101 | -------------------------------------------------------------------------------- /docs/frost-snowman-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/docs/frost-snowman-logo.png -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. raw:: html 2 | 3 | 6 | 7 | Frost's documentation! 8 | ================================= 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | :caption: Using Frost: 13 | 14 | Overview 15 | FAQ.rst 16 | 17 | .. toctree:: 18 | :maxdepth: 2 19 | :caption: Contributing: 20 | 21 | Architecture.rst 22 | CodingConventions.rst 23 | MozillaDeployment.rst 24 | NewServices.rst 25 | ContributingDocumentation.rst 26 | UseCases.rst 27 | 28 | .. toctree:: 29 | :maxdepth: 2 30 | :caption: API: 31 | 32 | Source 33 | 34 | 35 | Indices and tables 36 | ================== 37 | 38 | * :ref:`genindex` 39 | * :ref:`modindex` 40 | * :ref:`search` 41 | -------------------------------------------------------------------------------- /docs/readme-include.md: -------------------------------------------------------------------------------- 1 | ```{include} ../README.md 2 | :relative-docs: docs/ 3 | :relative-images: 4 | ``` 5 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | myst_parser==0.12.10 2 | sphinx-autobuild==2020.9.1 3 | sphinx==3.2.1 4 | pallets-sphinx-themes==1.2.3 5 | sphinxcontrib-mermaid==0.5.0 6 | -------------------------------------------------------------------------------- /example_cache/v/cache/lastfailed: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /example_cache/v/pytest_aws:example-account:us-east-1:iam:list_user_policies::UserName=spacemanspiff.json: -------------------------------------------------------------------------------- 1 | { 2 | "PolicyNames": [], 3 | "__pytest_meta": { 4 | "profile": "example-account", 5 | "region": "us-east-1" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /example_cache/v/pytest_aws:example-account:us-east-1:iam:list_user_policies::UserName=tigerone.json: -------------------------------------------------------------------------------- 1 | { 2 | "PolicyNames": [], 3 | "__pytest_meta": { 4 | "profile": "example-account", 5 | "region": "us-east-1" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /example_cache/v/pytest_aws:example-account:us-east-1:iam:list_users::.json: -------------------------------------------------------------------------------- 1 | { 2 | "Users": [ 3 | { 4 | "Arn": "arn:aws:iam::123456789012:user/hobbes", 5 | "CreateDate": "1985-11-18T00:01:10+00:00", 6 | "PasswordLastUsed": "2018-01-09T20:43:00+00:00", 7 | "Path": "/", 8 | "UserId": "H0BBIHMA0CZ0R0K0MN00C", 9 | "UserName": "tigerone" 10 | }, 11 | { 12 | "Arn": "arn:aws:iam::123456789012:user/calvin", 13 | "CreateDate": "1985-11-18T00:01:10+00:00", 14 | "PasswordLastUsed": "2008-01-09T20:43:00+00:00", 15 | "Path": "/", 16 | "UserId": "CALCIHMA0CZ0R0K0MN00C", 17 | "UserName": "spacemanspiff" 18 | } 19 | ], 20 | "__pytest_meta": { 21 | "profile": "example-account", 22 | "region": "us-east-1" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /exemptions.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from datetime import date 3 | import re 4 | import warnings 5 | 6 | import pytest 7 | 8 | 9 | def load(rules): 10 | """Marks tests as xfail based on test name and ID. 11 | 12 | Parses the exemptions section of the conf file and returns a two level dict with format: 13 | 14 | {: 15 | {: (, ) 16 | ... 17 | } 18 | ... 19 | } 20 | 21 | Examples: 22 | 23 | >>> load([ 24 | ... { 25 | ... 'test_name': 'test_foo', 26 | ... 'test_param_id': 'foo-id', 27 | ... 'expiration_day': date(2050, 1, 1), 28 | ... 'reason': 'in prod never allow foo' 29 | ... } 30 | ... ]) == {'test_foo': {'foo-id': ('2050-01-01', 'in prod never allow foo')}} 31 | True 32 | 33 | >>> load([ 34 | ... { 35 | ... 'test_name': '*test_foo', 36 | ... 'test_param_id': 'foo-id', 37 | ... 'expiration_day': date(2050, 1, 1), 38 | ... 'reason': 'in prod never allow foo' 39 | ... } 40 | ... ]) == {'*test_foo': {'foo-id': ('2050-01-01', 'in prod never allow foo')}} 41 | True 42 | 43 | >>> load([ 44 | ... { 45 | ... 'test_name': 'test_foo', 46 | ... 'test_param_id': 'foo-id', 47 | ... 'expiration_day': date(2050, 1, 1), 48 | ... 'reason': 'in prod never allow foo' 49 | ... }, 50 | ... { 51 | ... 'test_name': 'test_foo', 52 | ... 'test_param_id': 'bar-id', 53 | ... 'expiration_day': date(2050, 1, 1), 54 | ... 'reason': 'in prod never allow bar' 55 | ... } 56 | ... ]) == { 57 | ... 'test_foo': { 58 | ... 'foo-id': ('2050-01-01', 'in prod never allow foo'), 59 | ... 'bar-id': ('2050-01-01', 'in prod never allow bar')}} 60 | True 61 | 62 | 63 | Duplicate test name and IDs are ignored with a warning: 64 | 65 | >>> load([ 66 | ... { 67 | ... 'test_name': 'test_foo', 68 | ... 'test_param_id': 'foo-id', 69 | ... 'expiration_day': date(2050, 1, 1), 70 | ... 'reason': 'in prod never allow foo' 71 | ... }, 72 | ... { 73 | ... 'test_name': 'test_foo', 74 | ... 'test_param_id': 'foo-id', 75 | ... 'expiration_day': date(2051, 1, 1), 76 | ... 'reason': 'in prod never allow foo another' 77 | ... } 78 | ... ]) == {'test_foo': {'foo-id': ('2050-01-01', 'in prod never allow foo')}} 79 | True 80 | >>> # UserWarning: Exemptions: test_name: test_foo | test_id: foo-id | Skipping duplicate test name and ID 81 | 82 | Does not check that test name and IDs exist (since names might not 83 | be collected and IDs can require an HTTP call). 84 | """ 85 | processed_rules = defaultdict(dict) 86 | 87 | if not rules: 88 | return processed_rules 89 | 90 | for rule in rules: 91 | test_name, test_id = rule["test_name"], rule["test_param_id"] 92 | expiration, reason = rule["expiration_day"], rule["reason"] 93 | 94 | if expiration < date.today(): 95 | warnings.warn( 96 | "Exemptions: test_name: {} | test_id: {} | Skipping rule with expiration day in the past {!r}".format( 97 | test_name, test_id, expiration 98 | ) 99 | ) 100 | continue 101 | 102 | if test_id in processed_rules[test_name]: 103 | warnings.warn( 104 | "Exemptions: test_name: {} | test_id: {} | Skipping duplicate test name and ID".format( 105 | test_name, test_id 106 | ) 107 | ) 108 | continue 109 | 110 | processed_rules[test_name][test_id] = (str(expiration), reason) 111 | 112 | return processed_rules 113 | 114 | 115 | def add_xfail_marker(item): 116 | """ 117 | Adds xfail markers for test names and ids specified in the exemptions conf. 118 | """ 119 | if not item.get_closest_marker("parametrize"): 120 | warnings.warn( 121 | "Skipping exemption checks for test without resource name {!r}".format( 122 | item.name 123 | ) 124 | ) 125 | return 126 | 127 | test_exemptions = item.config.custom_config.exemptions.get(item.originalname, None) 128 | 129 | test_id = item.name 130 | try: 131 | # test_admin_user_is_inactive[gsuiteuser@example.com] 132 | test_id = item.name.split("[")[1][:-1] 133 | except IndexError: 134 | warnings.warn( 135 | "Exemption check failed: was unable to parse parametrized test name:", 136 | item.name, 137 | ) 138 | return 139 | 140 | if test_exemptions: 141 | if test_id in test_exemptions: 142 | expiration, reason = test_exemptions[test_id] 143 | item.add_marker( 144 | pytest.mark.xfail(reason=reason, strict=True, expiration=expiration) 145 | ) 146 | return 147 | 148 | # Check for any substring matchers 149 | for exemption_test_id in test_exemptions: 150 | if exemption_test_id.startswith("*"): 151 | substring = exemption_test_id[1:] 152 | if re.search(substring, test_id): 153 | expiration, reason = test_exemptions[exemption_test_id] 154 | item.add_marker( 155 | pytest.mark.xfail( 156 | reason=reason, strict=True, expiration=expiration 157 | ) 158 | ) 159 | return 160 | -------------------------------------------------------------------------------- /frost/__init__.py: -------------------------------------------------------------------------------- 1 | SOURCE_URL = "https://github.com/mozilla/frost" 2 | VERSION = "0.4.7" 3 | -------------------------------------------------------------------------------- /frost/cli.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import glob 4 | 5 | import click 6 | import pytest 7 | 8 | 9 | FROST_PARENT_DIRECTORY = os.path.dirname(os.path.dirname(__file__)) 10 | 11 | 12 | def switch_to_frost_parent_directory(): 13 | """ 14 | Changes to the frost CLI parent directory 15 | 16 | This shouldn't be necessary once tests move to frost/ 17 | """ 18 | # look up frost/.. to get the repo root dir 19 | os.chdir(FROST_PARENT_DIRECTORY) 20 | 21 | 22 | @click.group() 23 | @click.version_option() 24 | def cli(): 25 | """ 26 | FiRefox Operations Security Testing API clients and tests 27 | """ 28 | pass 29 | 30 | 31 | @cli.command( 32 | "list", context_settings=dict(ignore_unknown_options=True,), 33 | ) 34 | def list_tests(): 35 | """ 36 | Lists available test filenames packaged with frost. 37 | """ 38 | switch_to_frost_parent_directory() 39 | sys.stdout.writelines( 40 | f"{test_file_path}\n" 41 | for test_file_path in glob.glob("./**/test*.py", recursive=True) 42 | if not ("/venv" in test_file_path or "/build" in test_file_path) 43 | ) 44 | 45 | 46 | @cli.command( 47 | "test", context_settings=dict(ignore_unknown_options=True,), 48 | ) 49 | @click.argument("pytest_args", nargs=-1, type=click.UNPROCESSED) 50 | @click.pass_context 51 | def run_pytest(ctx, pytest_args): 52 | """ 53 | Run pytest tests passing all trailing args to pytest. 54 | 55 | Adds the pytest args: 56 | 57 | -s to disable capturing stdout https://docs.pytest.org/en/latest/capture.html 58 | 59 | and frost specific args: 60 | 61 | --debug-calls to print AWS API calls 62 | --ignore-glob='*.py' to require explicit test specification 63 | """ 64 | switch_to_frost_parent_directory() 65 | sys.exit( 66 | pytest.main(["-s", "--debug-calls", "--ignore-glob='*.py'"] + list(pytest_args)) 67 | ) 68 | 69 | 70 | if __name__ == "__main__": 71 | cli() 72 | -------------------------------------------------------------------------------- /gcp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/gcp/__init__.py -------------------------------------------------------------------------------- /gcp/bigquery/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/gcp/bigquery/__init__.py -------------------------------------------------------------------------------- /gcp/bigquery/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import gcp_client 2 | 3 | 4 | def datasets(): 5 | results = [] 6 | for project_id in gcp_client.project_list: 7 | datasets = gcp_client.list( 8 | "bigquery", 9 | "datasets", 10 | version="v2", 11 | results_key="datasets", 12 | call_kwargs={"projectId": project_id}, 13 | ) 14 | results += [ 15 | get_dataset(d["datasetReference"]["datasetId"], project_id) 16 | for d in datasets 17 | ] 18 | return results 19 | 20 | 21 | def get_dataset(dataset_id, project_id): 22 | return gcp_client.get( 23 | project_id, "bigquery", "datasets", "datasetId", dataset_id, version="v2" 24 | ) 25 | -------------------------------------------------------------------------------- /gcp/bigquery/test_dataset_not_publicly_accessible.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gcp.bigquery.resources import datasets 6 | 7 | 8 | @pytest.mark.parametrize( 9 | "dataset", datasets(), ids=lambda d: get_param_id(d, "friendlyName"), 10 | ) 11 | def test_dataset_not_publicly_accessible(dataset): 12 | """ 13 | Test's whether a dataset is publicly accessible 14 | """ 15 | for access in dataset["access"]: 16 | assert ( 17 | access.get("specialGroup", "") != "allAuthenticatedUsers" 18 | ), "BigQuery Dataset {0[id]}'s IAM policy allows anyone to access it.".format( 19 | dataset 20 | ) 21 | -------------------------------------------------------------------------------- /gcp/client.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import logging 3 | 4 | from apiclient.discovery import build as build_service 5 | from apiclient.errors import HttpError 6 | 7 | 8 | # Filters out the warning about using end user credentials. 9 | warnings.filterwarnings( 10 | "ignore", "Your application has authenticated using end user credentials" 11 | ) 12 | 13 | # Filters out a warning around a feature we don't use - https://github.com/googleapis/google-api-python-client/issues/299 14 | logging.getLogger("googleapiclient.discovery_cache").setLevel(logging.ERROR) 15 | 16 | # Filters out a warning about not have a default GCP Project ID. Not required for Frost, no need to display. 17 | logging.getLogger("google.auth._default").setLevel(logging.ERROR) 18 | 19 | 20 | def cache_key(project_id, version, product, subproduct, call="list", id_value="na"): 21 | """Returns the fullname (directory and filename) for a cached GCP API call. 22 | 23 | >>> cache_key("123", "v1", "compute", "firewalls") 24 | 'pytest_gcp/123/v1/compute/firewalls/list:na.json' 25 | >>> cache_key("123", "v1", "compute", "firewalls", "get", "321") 26 | 'pytest_gcp/123/v1/compute/firewalls/get:321.json' 27 | """ 28 | path = "/".join(["pytest_gcp", project_id, version, product, subproduct]) 29 | filename = ":".join([call, id_value]) + ".json" 30 | return f"{path}/{filename}" 31 | 32 | 33 | def get_all_projects_in_folder(folder_id=None): 34 | if folder_id is None: 35 | return 36 | 37 | if not folder_id.startswith("folders/"): 38 | folder_id = "folders/" + folder_id 39 | 40 | crm = build_service("cloudresourcemanager", "v2") 41 | allFolders = get_all_folders_in_folder(crm, folder_id) 42 | crm.close() 43 | allFolders.append(folder_id) 44 | 45 | project_crm = build_service("cloudresourcemanager", "v1") 46 | allProjects = [] 47 | for folder in allFolders: 48 | projects = ( 49 | project_crm.projects().list(filter="parent.id:" + folder[8:]).execute() 50 | ) 51 | if projects: 52 | allProjects.append(projects) 53 | project_crm.close() 54 | 55 | # flatten and return 56 | return sum([p["projects"] for p in allProjects], []) 57 | 58 | 59 | def get_all_folders_in_folder(crm, folder_id=None): 60 | allFolders = [] 61 | if folder_id is None: 62 | return allFolders 63 | folders = crm.folders().list(parent=folder_id).execute() 64 | if folders: 65 | for folder in folders["folders"]: 66 | allFolders.extend(get_all_folders_in_folder(crm, folder["name"])) 67 | else: 68 | allFolders = [folder_id] 69 | 70 | return allFolders 71 | 72 | 73 | class GCPClient: 74 | def __init__(self, project_id, folder_id, cache, debug_calls, debug_cache, offline): 75 | self.cache = cache 76 | self.debug_calls = debug_calls 77 | self.debug_cache = debug_cache 78 | self.offline = offline 79 | 80 | self.project_list = [] 81 | if project_id is not None: 82 | self.project_list = [project_id] 83 | 84 | if folder_id is not None: 85 | self.project_list = [ 86 | p["projectId"] for p in get_all_projects_in_folder(folder_id) 87 | ] 88 | 89 | def get_project_iam_policies(self): 90 | if self.offline: 91 | return [] 92 | 93 | service = self._service("cloudresourcemanager") 94 | policies = [] 95 | for project_id in self.project_list: 96 | try: 97 | resp = ( 98 | service.projects() 99 | .getIamPolicy(resource=project_id, body={}) 100 | .execute() 101 | ) 102 | policies.append(resp) 103 | except HttpError as e: 104 | if "has not been used in project" in e._get_reason(): 105 | continue 106 | raise e 107 | return policies 108 | 109 | def get_project_container_config(self): 110 | if self.offline: 111 | return {} 112 | 113 | service = self._service("container") 114 | for project_id in self.project_list: 115 | try: 116 | # TODO : We may want to correlate the zone here with the corresponding clusters zone. 117 | request = ( 118 | service.projects() 119 | .locations() 120 | .getServerConfig( 121 | name="projects/{}/locations/us-west1".format(project_id) 122 | ) 123 | ) 124 | resp = request.execute() 125 | except HttpError as e: 126 | # This will be thrown if an API is disabled, so we will try the next project id 127 | if "has not been used in project" in e._get_reason(): 128 | continue 129 | raise e 130 | return resp 131 | 132 | return {} 133 | 134 | def get( 135 | self, 136 | project_id, 137 | product, 138 | subproduct, 139 | id_key, 140 | id_value, 141 | version="v1", 142 | call_kwargs=None, 143 | ): 144 | if self.offline: 145 | result = {} 146 | else: 147 | result = self._get( 148 | project_id, product, subproduct, id_key, id_value, version 149 | ) 150 | return result 151 | 152 | def _get( 153 | self, 154 | project_id, 155 | product, 156 | subproduct, 157 | id_key, 158 | id_value, 159 | version="v1", 160 | call_kwargs=None, 161 | ): 162 | ckey = cache_key(project_id, version, product, subproduct, "get", id_value) 163 | cached_result = self.cache.get(ckey, None) 164 | if cached_result is not None: 165 | print("found cached value for", ckey) 166 | return cached_result 167 | 168 | if call_kwargs is None: 169 | call_kwargs = {} 170 | call_kwargs["projectId"] = project_id 171 | call_kwargs[id_key] = id_value 172 | 173 | service = self._service(product, version) 174 | 175 | api_entity = getattr(service, subproduct.split(".")[0])() 176 | for entity in subproduct.split(".")[1:]: 177 | api_entity = getattr(api_entity, entity)() 178 | 179 | try: 180 | result = api_entity.get(**call_kwargs).execute() 181 | except HttpError as e: 182 | # This will be thrown if an API is disabled. 183 | if "has not been used in project" in e._get_reason(): 184 | return {} 185 | raise e 186 | 187 | result["projectId"] = project_id 188 | 189 | if self.debug_cache: 190 | print("setting cache value for", ckey) 191 | 192 | self.cache.set(ckey, result) 193 | 194 | return result 195 | 196 | def list( 197 | self, product, subproduct, version="v1", results_key="items", call_kwargs=None 198 | ): 199 | """Public list func. See _list func docstring for more info""" 200 | if self.offline: 201 | results = [] 202 | else: 203 | if call_kwargs is not None: 204 | return list( 205 | self._list(product, subproduct, version, results_key, call_kwargs) 206 | ) 207 | 208 | results = [] 209 | for project_id in self.project_list: 210 | call_kwargs = {"project": project_id} 211 | results += list( 212 | self._list(product, subproduct, version, results_key, call_kwargs) 213 | ) 214 | 215 | return results 216 | 217 | def _list( 218 | self, product, subproduct, version="v1", results_key="items", call_kwargs=None 219 | ): 220 | """ 221 | Internal function for calling .list() on some service's resource. Supports debug printing 222 | and caching of the response. 223 | 224 | If a service supports "zones", then loop through each zone to collect all resources from 225 | that service. An example of this is collecting all compute instances (compute.instances().list()). 226 | """ 227 | 228 | project_id = "-" 229 | if "project" in call_kwargs: 230 | project_id = call_kwargs["project"] 231 | if "projectId" in call_kwargs: 232 | project_id = call_kwargs["projectId"] 233 | 234 | call_id = project_id 235 | if call_kwargs is not None: 236 | call_id = "-".join(sum([x for x in call_kwargs.items()], ())) 237 | 238 | ckey = cache_key(call_id, version, product, subproduct) 239 | cached_result = self.cache.get(ckey, None) 240 | if cached_result is not None: 241 | print("found cached value for", ckey) 242 | return cached_result 243 | 244 | service = self._service(product, version) 245 | 246 | api_entity = getattr(service, subproduct.split(".")[0])() 247 | for entity in subproduct.split(".")[1:]: 248 | api_entity = getattr(api_entity, entity)() 249 | 250 | if self.debug_calls: 251 | print( 252 | "calling {}.{} for project {}".format(product, subproduct, project_id) 253 | ) 254 | 255 | if self._zone_aware(product, subproduct): 256 | results = [] 257 | for zone in self._list_zones(project_id): 258 | results = sum( 259 | [results], 260 | self._list_all_items( 261 | api_entity, {**call_kwargs, "zone": zone}, results_key 262 | ), 263 | ) 264 | else: 265 | results = self._list_all_items(api_entity, call_kwargs, results_key) 266 | 267 | # Append the project id to each resource for use in test metadata 268 | results = [{"projectId": project_id, **result} for result in results] 269 | 270 | if self.debug_cache: 271 | print("setting cache value for", ckey) 272 | 273 | self.cache.set(ckey, results) 274 | 275 | return results 276 | 277 | def _list_all_items(self, api_entity, call_kwargs, results_key): 278 | """Internal helper for dealing with pagination""" 279 | request = api_entity.list(**call_kwargs) 280 | items = [] 281 | while request is not None: 282 | try: 283 | resp = request.execute() 284 | except HttpError as e: 285 | # This will be thrown if an API is disabled. 286 | if "has not been used in project" in e._get_reason(): 287 | return [] 288 | if "has not enabled" in e._get_reason(): 289 | return [] 290 | raise e 291 | items = sum([items], resp.get(results_key, [])) 292 | try: 293 | request = api_entity.list_next(request, resp) 294 | except AttributeError: 295 | request = None 296 | 297 | return items 298 | 299 | def _service(self, product, version="v1"): 300 | """Internal helper around google client lib's build service func""" 301 | return build_service(product, version) 302 | 303 | def _zone_aware(self, product, subproduct): 304 | """ 305 | Internal helper for whether or not a product and subproduct take zones into account. 306 | 307 | Differing heavily from AWS, most GCP services do not take zones into account with API 308 | calls. 309 | """ 310 | if product == "compute" and subproduct == "instances": 311 | return True 312 | return False 313 | 314 | def _list_zones(self, project_id): 315 | """Internal helper for listing all zones""" 316 | try: 317 | response = ( 318 | self._service("compute") 319 | .zones() 320 | .list(project=project_id) 321 | .execute()["items"] 322 | ) 323 | except HttpError as e: 324 | # This will be thrown if an API is disabled. 325 | if "has not been used in project" in e._get_reason(): 326 | return [] 327 | raise e 328 | return [result["name"] for result in response] 329 | -------------------------------------------------------------------------------- /gcp/compute/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/gcp/compute/__init__.py -------------------------------------------------------------------------------- /gcp/compute/helpers.py: -------------------------------------------------------------------------------- 1 | def does_firewall_open_all_ports_to_any(firewall): 2 | """ 3 | Returns True if firewall has a rule to open all ports to any source. Excludes ICMP. 4 | 5 | >>> does_firewall_open_all_ports_to_any({}) 6 | False 7 | >>> does_firewall_open_all_ports_to_any({'sourceRanges': ['1.1.1.1/1'], 'allowed': [{'ports': ['1', '2', '300']}]}) 8 | False 9 | >>> does_firewall_open_all_ports_to_any({'sourceRanges': ['1.1.1.1/1'], 'allowed': [{'ports': ['0-65535']}]}) 10 | True 11 | >>> does_firewall_open_all_ports_to_any({'sourceRanges': ['0.0.0.0/0'], 'allowed': [{'ports': ['0-65535']}]}) 12 | True 13 | >>> does_firewall_open_all_ports_to_any({'sourceRanges': ['10.0.0.5/32'], 'allowed': [{'ports': ['0-65535']}]}) 14 | True 15 | """ 16 | if does_firewall_open_all_ports_to_all(firewall): 17 | return True 18 | 19 | if firewall.get("sourceRanges") is None: 20 | return False 21 | 22 | for rule in firewall.get("allowed"): 23 | if rule.get("IPProtocol", "") == "icmp": 24 | continue 25 | if not rule.get("ports"): 26 | return True 27 | for port_rule in rule.get("ports"): 28 | if port_rule == "0-65535": 29 | return True 30 | 31 | return False 32 | 33 | 34 | def does_firewall_open_all_ports_to_all(firewall): 35 | """ 36 | Returns True if firewall has a rule to open all ports to all. Excludes ICMP. 37 | 38 | >>> does_firewall_open_all_ports_to_all({}) 39 | False 40 | >>> does_firewall_open_all_ports_to_all({'sourceRanges': ['1.1.1.1/1']}) 41 | False 42 | >>> does_firewall_open_all_ports_to_all({'sourceRanges': ['1.1.1.1/1'], 'allowed': [{'ports': ['0-65535']}]}) 43 | False 44 | >>> does_firewall_open_all_ports_to_all({'sourceRanges': ['0.0.0.0/0'], 'allowed': [{'ports': ['0-65535']}]}) 45 | True 46 | """ 47 | if ( 48 | firewall.get("sourceRanges") is None 49 | or "0.0.0.0/0" not in firewall["sourceRanges"] 50 | ): 51 | return False 52 | 53 | for rule in firewall.get("allowed"): 54 | if rule.get("IPProtocol", "") == "icmp": 55 | continue 56 | if not rule.get("ports"): 57 | return True 58 | for port_rule in rule.get("ports"): 59 | if port_rule == "0-65535": 60 | return True 61 | 62 | return False 63 | 64 | 65 | def does_firewall_open_any_ports_to_all(firewall, allowed_ports=None): 66 | """ 67 | Returns True if firewall has a rule to open any ports (except 80/443) to all. Excludes ICMP. 68 | 69 | >>> does_firewall_open_any_ports_to_all({}) 70 | False 71 | >>> does_firewall_open_any_ports_to_all({'sourceRanges': ['1.1.1.1/1']}) 72 | False 73 | >>> does_firewall_open_any_ports_to_all({'sourceRanges': ['1.1.1.1/1'], 'allowed': [{'ports': ['0-65535']}]}) 74 | False 75 | >>> does_firewall_open_any_ports_to_all({'sourceRanges': ['0.0.0.0/0'], 'allowed': [{'ports': ['0-65535']}]}) 76 | True 77 | >>> does_firewall_open_any_ports_to_all({'sourceRanges': ['1.1.1.1/1'], 'allowed': [{'ports': ['123']}]}) 78 | False 79 | >>> does_firewall_open_any_ports_to_all({'sourceRanges': ['0.0.0.0/0'], 'allowed': [{'ports': ['123']}]}) 80 | True 81 | >>> does_firewall_open_any_ports_to_all({'sourceRanges': ['0.0.0.0/0'], 'allowed': [{'ports': ['80']}]}) 82 | False 83 | >>> does_firewall_open_any_ports_to_all({'sourceRanges': ['0.0.0.0/0'], 'allowed': [{'ports': ['443']}]}) 84 | False 85 | >>> does_firewall_open_any_ports_to_all({'sourceRanges': ['0.0.0.0/0'], 'allowed': [{'ports': ['22', '80', '443']}]}) 86 | True 87 | """ 88 | if allowed_ports is None: 89 | allowed_ports = [] 90 | 91 | if does_firewall_open_all_ports_to_all(firewall): 92 | return True 93 | 94 | if ( 95 | firewall.get("sourceRanges") is None 96 | or "0.0.0.0/0" not in firewall["sourceRanges"] 97 | ): 98 | return False 99 | 100 | for rule in firewall.get("allowed"): 101 | if rule.get("IPProtocol", "") == "icmp": 102 | continue 103 | for port_rule in rule.get("ports"): 104 | try: 105 | port_rule = int(port_rule) 106 | except ValueError: 107 | return True 108 | 109 | if port_rule in allowed_ports: 110 | continue 111 | 112 | if port_rule not in [80, 443]: 113 | return True 114 | 115 | return False 116 | 117 | 118 | def firewall_id(firewall): 119 | """A getter fn for test ids for Firewalls""" 120 | return ( 121 | "{}-{}".format(firewall["id"], firewall["name"]) 122 | if hasattr(firewall, "__getitem__") 123 | else None 124 | ) 125 | -------------------------------------------------------------------------------- /gcp/compute/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import gcp_client 2 | 3 | 4 | def firewalls(): 5 | return gcp_client.list("compute", "firewalls") 6 | 7 | 8 | def networks(): 9 | return gcp_client.list("compute", "networks") 10 | 11 | 12 | def instances(): 13 | return gcp_client.list("compute", "instances") 14 | 15 | 16 | def clusters(): 17 | results = [] 18 | for project_id in gcp_client.project_list: 19 | results += gcp_client.list( 20 | "container", 21 | "projects.locations.clusters", 22 | results_key="clusters", 23 | call_kwargs={"parent": "projects/{}/locations/-".format(project_id)}, 24 | ) 25 | return results 26 | 27 | 28 | def networks_with_instances(): 29 | allInstances = instances() 30 | in_use_networks = [] 31 | for network in networks(): 32 | network["instances"] = [] 33 | for instance in allInstances: 34 | if network["selfLink"] in [ 35 | interface["network"] for interface in instance["networkInterfaces"] 36 | ]: 37 | network["instances"].append(instance) 38 | if len(network["instances"]): 39 | in_use_networks.append(network) 40 | 41 | return in_use_networks 42 | 43 | 44 | def in_use_firewalls(): 45 | all_networks = networks_with_instances() 46 | results = [] 47 | for firewall in firewalls(): 48 | if firewall["disabled"] == True: 49 | continue 50 | for network in all_networks: 51 | if ( 52 | network["selfLink"] == firewall["network"] 53 | and len(network["instances"]) > 0 54 | ): 55 | results.append(firewall) 56 | return results 57 | -------------------------------------------------------------------------------- /gcp/compute/test_firewall_opens_all_ports_to_all.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from gcp.compute.helpers import does_firewall_open_all_ports_to_all, firewall_id 4 | from gcp.compute.resources import in_use_firewalls 5 | 6 | 7 | @pytest.mark.gcp_compute 8 | @pytest.mark.parametrize("firewall", in_use_firewalls(), ids=firewall_id) 9 | def test_firewall_opens_all_ports_to_all(firewall): 10 | """Checks if firewall opens all ports to all IPs""" 11 | assert not does_firewall_open_all_ports_to_all(firewall) 12 | -------------------------------------------------------------------------------- /gcp/compute/test_firewall_opens_all_ports_to_any.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from gcp.compute.helpers import does_firewall_open_all_ports_to_any, firewall_id 4 | from gcp.compute.resources import in_use_firewalls 5 | 6 | 7 | @pytest.mark.gcp_compute 8 | @pytest.mark.parametrize("firewall", in_use_firewalls(), ids=firewall_id) 9 | def test_firewall_opens_all_ports_to_any(firewall): 10 | """ 11 | This test confirms that there are no firewall 12 | rules that allow ingress access to all ports from anywhere. 13 | 14 | A part of CIS 3.1 15 | """ 16 | assert not does_firewall_open_all_ports_to_any(firewall) 17 | -------------------------------------------------------------------------------- /gcp/compute/test_firewall_opens_any_ports_to_all.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from gcp.compute.helpers import does_firewall_open_any_ports_to_all, firewall_id 4 | from gcp.compute.resources import in_use_firewalls 5 | 6 | 7 | @pytest.mark.gcp_compute 8 | @pytest.mark.parametrize("firewall", in_use_firewalls(), ids=firewall_id) 9 | def test_firewall_opens_any_ports_to_all(firewall, gcp_config): 10 | """ 11 | This test confirms that no ports are open to 0.0.0.0/0 (except 12 | 80 and 443 on any VPC. 13 | 14 | CIS 3.6, 3.7 15 | """ 16 | allowed_ports = gcp_config.get_allowed_ports(firewall_id(firewall)) 17 | assert not does_firewall_open_any_ports_to_all(firewall, allowed_ports) 18 | -------------------------------------------------------------------------------- /gcp/compute/test_gke_version_up_to_date.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gcp.compute.resources import clusters 6 | from conftest import gcp_client 7 | 8 | 9 | @pytest.fixture 10 | def server_config(): 11 | return gcp_client.get_project_container_config() 12 | 13 | 14 | @pytest.mark.gcp_compute 15 | @pytest.mark.parametrize( 16 | "cluster", clusters(), ids=lambda c: get_param_id(c, "name"), 17 | ) 18 | def test_gke_version_up_to_date(cluster, server_config): 19 | """ 20 | Tests if GKE version is up to date by comparing the 21 | list of valid master versions to what is 22 | currently running on the cluster. 23 | """ 24 | assert ( 25 | cluster["currentMasterVersion"] in server_config["validMasterVersions"] 26 | ), "Current GKE master version ({}) is not in the list of valid master versions.".format( 27 | cluster["currentMasterVersion"] 28 | ) 29 | assert ( 30 | cluster["currentNodeVersion"] in server_config["validMasterVersions"] 31 | ), "Current GKE node version ({}) is not in the list of valid master versions.".format( 32 | cluster["currentNodeVersion"] 33 | ) 34 | -------------------------------------------------------------------------------- /gcp/compute/test_only_allowed_gke_versions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from gcp.compute.resources import clusters 4 | from conftest import gcp_client 5 | 6 | 7 | @pytest.fixture 8 | def allowed_gke_versions(pytestconfig): 9 | return pytestconfig.custom_config.gcp.allowed_gke_versions 10 | 11 | 12 | @pytest.mark.gcp_compute 13 | @pytest.mark.parametrize( 14 | "cluster", clusters(), ids=lambda c: c["name"] if isinstance(c, dict) else None 15 | ) 16 | def test_only_allowed_gke_versions(cluster, allowed_gke_versions): 17 | """ 18 | Tests if GKE version is within allowed list of GKE versions. 19 | 20 | Useful for checking upgrade status after a vulnerability is released, as in: 21 | - https://cloud.google.com/kubernetes-engine/docs/security-bulletins#gcp-2020-012 22 | """ 23 | assert ( 24 | cluster["currentMasterVersion"] in allowed_gke_versions 25 | ), "Current GKE master version ({}) is not in the list of allowed GKE versions.".format( 26 | cluster["currentMasterVersion"] 27 | ) 28 | assert ( 29 | cluster["currentNodeVersion"] in allowed_gke_versions 30 | ), "Current GKE node version ({}) is not in the list of allowed GKE versions.".format( 31 | cluster["currentNodeVersion"] 32 | ) 33 | -------------------------------------------------------------------------------- /gcp/conftest.py: -------------------------------------------------------------------------------- 1 | def pytest_configure(config): 2 | # register custom marks for gcp services 3 | for svc_name in [ 4 | "gcp", 5 | "gcp_compute", 6 | "gcp_iam", 7 | "gcp_sql", 8 | ]: 9 | config.addinivalue_line( 10 | "markers", "{}: mark tests against {}".format(svc_name, svc_name) 11 | ) 12 | -------------------------------------------------------------------------------- /gcp/iam/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/gcp/iam/__init__.py -------------------------------------------------------------------------------- /gcp/iam/helpers.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | 4 | def is_service_account_key_old(service_account_key): 5 | """ 6 | Tests whether a service account key is older than 90 days. 7 | """ 8 | creation_date = datetime.datetime.strptime( 9 | service_account_key["validAfterTime"][:10], "%Y-%m-%d" 10 | ) 11 | # TODO: Make configurable 12 | return creation_date > datetime.datetime.now( 13 | datetime.timezone.utc 14 | ) - datetime.timedelta(days=90) 15 | -------------------------------------------------------------------------------- /gcp/iam/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import gcp_client 2 | 3 | 4 | def service_accounts(): 5 | results = [] 6 | for project_id in gcp_client.project_list: 7 | results += gcp_client.list( 8 | "iam", 9 | "projects.serviceAccounts", 10 | results_key="accounts", 11 | call_kwargs={"name": "projects/" + project_id}, 12 | ) 13 | return results 14 | 15 | 16 | def service_account_keys(service_account): 17 | return gcp_client.list( 18 | "iam", 19 | "projects.serviceAccounts.keys", 20 | results_key="keys", 21 | call_kwargs={"name": service_account["name"]}, 22 | ) 23 | 24 | 25 | def all_service_account_keys(): 26 | keys = [] 27 | for sa in service_accounts(): 28 | for key in service_account_keys(sa): 29 | keys.append(key) 30 | return keys 31 | 32 | 33 | def project_iam_bindings(): 34 | bindings = [] 35 | policies = gcp_client.get_project_iam_policies() 36 | for policy in policies: 37 | for binding in policy.get("bindings", []): 38 | bindings.append(binding) 39 | return bindings 40 | -------------------------------------------------------------------------------- /gcp/iam/test_admin_service_accounts.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gcp.iam.resources import project_iam_bindings 6 | 7 | 8 | @pytest.mark.gcp_iam 9 | @pytest.mark.parametrize( 10 | "iam_binding", project_iam_bindings(), ids=lambda r: get_param_id(r, "role"), 11 | ) 12 | def test_admin_service_accounts(iam_binding): 13 | """ 14 | No Service Account should have the `role/editor` 15 | or `role/owner` roles attached or any roles matching `*Admin` 16 | 17 | CIS 1.4 18 | """ 19 | if (iam_binding["role"] in ["role/editor", "role/owner"]) or ( 20 | iam_binding["role"].endswith("Admin") 21 | ): 22 | for member in iam_binding["members"]: 23 | assert not member.startswith("serviceAccount") 24 | -------------------------------------------------------------------------------- /gcp/iam/test_only_allowed_org_accounts.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gcp.iam.resources import project_iam_bindings 6 | 7 | 8 | @pytest.fixture 9 | def allowed_org_domains(pytestconfig): 10 | return pytestconfig.custom_config.gcp.allowed_org_domains 11 | 12 | 13 | EXCLUDED_ROLES = ["roles/logging.viewer"] 14 | 15 | 16 | @pytest.mark.gcp_iam 17 | @pytest.mark.parametrize( 18 | "iam_binding", project_iam_bindings(), ids=lambda r: get_param_id(r, "role"), 19 | ) 20 | def test_only_allowed_org_accounts(iam_binding, allowed_org_domains): 21 | """ 22 | Only allow specified org domains as members within this project, with a few exceptions. 23 | * Service Accounts are excluded 24 | * The following roles are excluded: 25 | - roles/logging.viewer 26 | """ 27 | if len(allowed_org_domains) == 0: 28 | assert False, "No allowed org domains specified" 29 | 30 | if iam_binding["role"] not in EXCLUDED_ROLES: 31 | for member in iam_binding["members"]: 32 | if not member.startswith("serviceAccount") and not member.startswith( 33 | "deleted:serviceAccount" 34 | ): 35 | assert ( 36 | member.split("@")[-1] in allowed_org_domains 37 | ), "{} was found and is not in the allowed_org_domains".format(member) 38 | -------------------------------------------------------------------------------- /gcp/iam/test_service_account_key_is_old.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gcp.iam.resources import all_service_account_keys 6 | from gcp.iam.helpers import is_service_account_key_old 7 | 8 | 9 | @pytest.mark.gcp_iam 10 | @pytest.mark.parametrize( 11 | "service_account_key", 12 | all_service_account_keys(), 13 | ids=lambda key: get_param_id(key, "name"), 14 | ) 15 | def test_service_account_key_is_old(service_account_key): 16 | """Tests if the Service Account Key is older than 90 days""" 17 | assert is_service_account_key_old(service_account_key) 18 | -------------------------------------------------------------------------------- /gcp/sql/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/gcp/sql/__init__.py -------------------------------------------------------------------------------- /gcp/sql/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import gcp_client 2 | 3 | 4 | def instances(): 5 | return gcp_client.list("sqladmin", "instances", version="v1beta4") 6 | -------------------------------------------------------------------------------- /gcp/sql/test_sql_instance_automatic_backup_enabled.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gcp.sql.resources import instances 6 | 7 | 8 | @pytest.mark.gcp_sql 9 | @pytest.mark.parametrize( 10 | "sql_instance", instances(), ids=lambda instance: get_param_id(instance, "name"), 11 | ) 12 | def test_sql_instance_automatic_backup_enabled(sql_instance): 13 | """Test CloudSQL Instance has Automatic Backup Enabled""" 14 | assert sql_instance.get("settings").get("backupConfiguration").get("enabled") 15 | if "MYSQL" in sql_instance.get("databaseVersion"): 16 | assert ( 17 | sql_instance.get("settings") 18 | .get("backupConfiguration") 19 | .get("binaryLogEnabled") 20 | ) 21 | -------------------------------------------------------------------------------- /gcp/sql/test_sql_instance_private_ip_required.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gcp.sql.resources import instances 6 | 7 | 8 | @pytest.mark.gcp_sql 9 | @pytest.mark.parametrize( 10 | "sql_instance", instances(), ids=lambda instance: get_param_id(instance, "name"), 11 | ) 12 | def test_sql_instance_private_ip_required(sql_instance): 13 | """ 14 | Test CloudSQL Instance requires Private IP to connect 15 | 16 | CIS 6.2 17 | """ 18 | assert ( 19 | sql_instance.get("settings").get("ipConfiguration").get("privateNetwork", None) 20 | ), "CloudSQL Instance {0[name]} does not have a private network configured.".format( 21 | sql_instance 22 | ) 23 | 24 | assert ( 25 | sql_instance.get("settings").get("ipConfiguration").get("ipv4Enabled", None) 26 | == False 27 | ), "CloudSQL Instance {0[name]} has a public IPv4 enabled.".format(sql_instance) 28 | -------------------------------------------------------------------------------- /gcp/sql/test_sql_instance_ssl_required.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gcp.sql.resources import instances 6 | 7 | 8 | @pytest.mark.gcp_sql 9 | @pytest.mark.parametrize( 10 | "sql_instance", instances(), ids=lambda instance: get_param_id(instance, "name"), 11 | ) 12 | def test_sql_instance_ssl_required(sql_instance): 13 | """Test CloudSQL Instance requires SSL""" 14 | assert ( 15 | sql_instance.get("settings").get("ipConfiguration").get("requireSsl", False) 16 | ), "CloudSQL Instance {0[name]} does not require SSL".format(sql_instance) 17 | -------------------------------------------------------------------------------- /gsuite/README.md: -------------------------------------------------------------------------------- 1 | ### EXPERIMENTAL - GSuite tests 2 | 3 | These tests and the gsuite client are experimental. The client currently only supports the "application default" google auth credentials. 4 | -------------------------------------------------------------------------------- /gsuite/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/gsuite/__init__.py -------------------------------------------------------------------------------- /gsuite/admin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla/frost/5e4bce438a4c2b27cf97989d4fd3462b3db2610b/gsuite/admin/__init__.py -------------------------------------------------------------------------------- /gsuite/admin/helpers.py: -------------------------------------------------------------------------------- 1 | from dateutil.parser import parse 2 | 3 | 4 | def user_is_inactive(user, no_activity_since): 5 | """ 6 | Compares the lastLoginTime with no_activity_since. 7 | """ 8 | return parse(user["lastLoginTime"]) > no_activity_since 9 | 10 | 11 | def owners_of_a_group(members): 12 | """ 13 | Returns a list of owners from a list of group members 14 | """ 15 | return [member for member in members if is_owner_of_group(member)] 16 | 17 | 18 | def is_owner_of_group(member): 19 | """ 20 | Check whether a member of a group is an owner with a status of 'ACTIVE'. 21 | """ 22 | return ( 23 | member["type"] == "USER" 24 | and member["role"] == "OWNER" 25 | and member["status"] == "ACTIVE" 26 | ) 27 | -------------------------------------------------------------------------------- /gsuite/admin/resources.py: -------------------------------------------------------------------------------- 1 | from conftest import gsuite_client 2 | 3 | 4 | def list_users(): 5 | return gsuite_client.list_users() 6 | 7 | 8 | def list_groups(): 9 | return gsuite_client.list_groups() 10 | 11 | 12 | def list_members_of_group(group): 13 | return gsuite_client.list_members_of_group(group) 14 | 15 | 16 | def list_groups_and_members(): 17 | return [ 18 | {**group, "members": list_members_of_group(group["email"])} 19 | for group in list_groups() 20 | ] 21 | -------------------------------------------------------------------------------- /gsuite/admin/test_admin_user_is_inactive.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gsuite.admin.resources import list_users 6 | from gsuite.admin.helpers import user_is_inactive 7 | 8 | 9 | @pytest.fixture 10 | def no_activity_since(pytestconfig): 11 | return pytestconfig.custom_config.gsuite.no_activity_since() 12 | 13 | 14 | @pytest.mark.gsuite_admin 15 | @pytest.mark.parametrize( 16 | "user", list_users(), ids=lambda u: get_param_id(u, "primaryEmail"), 17 | ) 18 | def test_admin_user_is_inactive(user, no_activity_since): 19 | """Tests whether user is active by checking lastLoginTime""" 20 | assert user_is_inactive(user, no_activity_since) 21 | -------------------------------------------------------------------------------- /gsuite/admin/test_groups_have_enough_owners.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from helpers import get_param_id 4 | 5 | from gsuite.admin.resources import list_groups_and_members 6 | from gsuite.admin.helpers import owners_of_a_group 7 | 8 | 9 | @pytest.fixture 10 | def min_number_of_owners(pytestconfig): 11 | return pytestconfig.custom_config.gsuite.min_number_of_owners 12 | 13 | 14 | @pytest.mark.gsuite_admin 15 | @pytest.mark.parametrize( 16 | "group", list_groups_and_members(), ids=lambda g: get_param_id(g, "email"), 17 | ) 18 | def test_groups_have_enough_owners(group, min_number_of_owners): 19 | assert len(owners_of_a_group(group["members"])) >= min_number_of_owners 20 | -------------------------------------------------------------------------------- /gsuite/client.py: -------------------------------------------------------------------------------- 1 | import os 2 | import httplib2 3 | 4 | from apiclient import discovery 5 | import google.auth 6 | 7 | SCOPES = [ 8 | "https://www.googleapis.com/auth/admin.directory.user.readonly", 9 | "https://www.googleapis.com/auth/admin.directory.group.readonly", 10 | ] 11 | 12 | 13 | class GsuiteClient: 14 | def __init__(self, domain, offline): 15 | self.domain = domain 16 | self.offline = offline 17 | 18 | if not self.offline: 19 | self.directory_client = self.build_directory_client() 20 | 21 | def build_directory_client(self): 22 | # TODO: Support service accounts: 23 | # https://googleapis.github.io/google-api-python-client/docs/oauth-server.html#examples 24 | credentials, _ = google.auth.default(scopes=SCOPES) 25 | return discovery.build("admin", "directory_v1", credentials=credentials) 26 | 27 | def list_users(self): 28 | """ 29 | https://developers.google.com/admin-sdk/directory/v1/reference/users#resource 30 | """ 31 | if self.offline: 32 | return [] 33 | 34 | req = self.directory_client.users().list(domain=self.domain) 35 | users = [] 36 | while req is not None: 37 | resp = req.execute() 38 | users += resp.get("users", []) 39 | req = self.directory_client.users().list_next(req, resp) 40 | return users 41 | 42 | def list_groups(self): 43 | """ 44 | https://developers.google.com/admin-sdk/directory/v1/reference/groups 45 | """ 46 | if self.offline: 47 | return [] 48 | 49 | req = self.directory_client.groups().list(domain=self.domain) 50 | groups = [] 51 | while req is not None: 52 | resp = req.execute() 53 | groups += resp.get("groups", []) 54 | req = self.directory_client.groups().list_next(req, resp) 55 | return groups 56 | 57 | def list_members_of_group(self, group): 58 | if self.offline: 59 | return [] 60 | 61 | req = self.directory_client.members().list(groupKey=group) 62 | members = [] 63 | while req is not None: 64 | resp = req.execute() 65 | members += resp.get("members", []) 66 | req = self.directory_client.members().list_next(req, resp) 67 | return members 68 | -------------------------------------------------------------------------------- /gsuite/conftest.py: -------------------------------------------------------------------------------- 1 | def pytest_configure(config): 2 | # register custom marks for gsuite services 3 | for svc_name in [ 4 | "gsuite_admin", 5 | ]: 6 | config.addinivalue_line( 7 | "markers", "{}: mark tests against {}".format(svc_name, svc_name) 8 | ) 9 | -------------------------------------------------------------------------------- /helpers.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | 4 | def get_param_id(obj: Any, key: str) -> Optional[str]: 5 | """ 6 | Returns the params test parameter ID or None 7 | """ 8 | # avoid confusing "TypeError: 'NotSetType' object is not subscriptable" errors 9 | if not hasattr(obj, "__getitem__"): 10 | return None 11 | 12 | try: 13 | return obj[key] 14 | except KeyError: 15 | return None 16 | -------------------------------------------------------------------------------- /meta_test_cache.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from dateutil.parser import parse 3 | 4 | import pytest 5 | 6 | 7 | TEST_IAM_USERS = [ 8 | { 9 | "Arn": "arn:aws:iam::123456789012:user/hobbes", 10 | "CreateDate": parse("1985-11-18T00:01:10+00:00", ignoretz=True), 11 | "PasswordLastUsed": parse("2018-01-09T20:43:00+00:00", ignoretz=True), 12 | "NotARealField": datetime.utcnow(), 13 | "Path": "/", 14 | "UserId": "H0BBIHMA0CZ0R0K0MN00C", 15 | "UserName": "tigerone", 16 | "__pytest_meta": {"profile": "example-account", "region": "us-east-1"}, 17 | } 18 | ] 19 | 20 | 21 | @pytest.fixture 22 | def uncached_iam_users(): 23 | return TEST_IAM_USERS 24 | 25 | 26 | @pytest.fixture 27 | def cached_iam_users(request): 28 | request.config.cache.set("cached_iam_users", TEST_IAM_USERS) 29 | return request.config.cache.get("cached_iam_users", None) 30 | 31 | 32 | def test_cache_serializes_and_deserializes_datetimes( 33 | cached_iam_users, uncached_iam_users 34 | ): 35 | assert cached_iam_users == uncached_iam_users 36 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:base" 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | botocore==1.12.75 2 | click==7.1.2 3 | coverage==4.5.3 4 | google-api-python-client==1.12.8 5 | google-auth==1.24.0 6 | google-auth-httplib2==0.0.4 7 | pre-commit==1.17.0 8 | pytest-cov==2.10.0 9 | pytest-json==0.4.0 10 | pytest-metadata==1.10.0 11 | pytest==6.0.2 12 | python-dateutil==2.7.5 13 | ruamel.yaml==0.15.85 14 | wheel==0.33.1 15 | -------------------------------------------------------------------------------- /service_report_generator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Transform results and metadata from a pytest report JSON output and writes Service JSON reports. 3 | 4 | Pytest Service JSON format: 5 | 6 | { 7 | 'name': 'pytest', 8 | 'tool_url': 'https://github.com/mozilla/frost', 9 | 'version': 1, 10 | 'created_at': '2000-01-01 15:50:00.123123', 11 | 'meanings': { 12 | 'pass': { 13 | 'short': 'pass', // text that _could_ be used in a badge 14 | 'long': 'Test passed / no issues found.' 15 | }, 16 | 'warn': { 17 | 'short': 'warn', 18 | 'long': 'Expected test failures, either due to test-level xfail/xpass markers or exemptions.' 19 | }, 20 | 'fail': { 21 | 'short': 'fail', 22 | 'long': 'Critical test failure that should never happen e.g. publicly accessible DB snapshot,' 23 | ' user without MFA in prod.' 24 | }, 25 | 'err': { 26 | 'short': 'err', 27 | 'long': 'Error fetching an resource from AWS.' 28 | } 29 | }, 30 | 'results': [ 31 | { 32 | 'test_name': # unparametrized pytest test name 33 | 'resource_name': # best effort at resource name 34 | 'name': 35 | 'status': 36 | 'value': 37 | 'reason': # pytest test outcome reason if any (e.g. resource fetch failed) 38 | 'markers': # pytest markers on the test e.g. aws service, ruleset 39 | 'metadata': # additional metadata on the resource being tested 40 | 'rationale': # (optional) rationale behind the test. (null if not set) 41 | 'description': # (optional) description of the test (null if not set) 42 | 'severity': # (optional) severity of the test (null if not set) 43 | 'regression': # (optional) regression comment (null if not set) 44 | }, 45 | ... 46 | ] 47 | } 48 | 49 | """ 50 | 51 | import json 52 | import argparse 53 | from collections import defaultdict 54 | 55 | STATUSES_TO_LIST = ["fail", "warn", "err"] 56 | 57 | service_json_template = { 58 | "name": "frost", 59 | "tool_url": "https://github.com/mozilla/frost", 60 | "version": 1, 61 | "created_at": "", 62 | "meanings": { 63 | "pass": {"short": "Pass", "long": "Test passed / no issues found."}, 64 | "warn": { 65 | "short": "Warn", 66 | "long": "Expected test failures, either due to test-level " 67 | "xfail/xpass markers or exemptions.", 68 | }, 69 | "fail": { 70 | "short": "FAIL", 71 | "long": "Critical test failure that should never happen " 72 | "e.g. publicly accessible DB snapshot, user without MFA in prod.", 73 | }, 74 | "err": {"short": "Err", "long": "Error fetching an resource from AWS."}, 75 | }, 76 | "results": [], 77 | } 78 | 79 | 80 | def parse_args(): 81 | parser = argparse.ArgumentParser(description=__doc__) 82 | 83 | parser.add_argument( 84 | "--jo", 85 | "--json-out", 86 | default="service-report.json", 87 | dest="json_out", 88 | help="Service json output filename.", 89 | ) 90 | 91 | parser.add_argument( 92 | "pytest_json", 93 | metavar="", 94 | help="Pytest results output in JSON format.", 95 | ) 96 | 97 | return parser.parse_args() 98 | 99 | 100 | def get_test_status(outcome): 101 | if outcome == "errored": 102 | return "err" 103 | elif outcome in ["xfailed", "xpassed"]: 104 | return "warn" 105 | elif outcome in ["passed", "skipped"]: 106 | return "pass" 107 | elif outcome == "failed": 108 | return "fail" 109 | else: 110 | raise Exception("Unexpected test outcome %s" % outcome) 111 | 112 | 113 | def get_resource_name(name): 114 | try: 115 | # test_elb_instances_attached[elb-name] 116 | rname = name.split("[")[1][:-1] 117 | return rname 118 | except: 119 | return name 120 | 121 | 122 | def get_result_for_test(test): 123 | meta = test["metadata"][0] 124 | return { 125 | "test_name": meta["unparametrized_name"], 126 | "resource_name": get_resource_name(meta["parametrized_name"]), 127 | "name": meta["parametrized_name"], 128 | "status": get_test_status(meta["outcome"]), 129 | "value": meta["outcome"], 130 | "reason": meta["reason"], 131 | "markers": meta["markers"], 132 | "metadata": meta["metadata"], 133 | "rationale": meta["rationale"], 134 | "description": meta["description"], 135 | "severity": meta["severity"], 136 | "regression": meta["regression"], 137 | } 138 | 139 | 140 | def pytest_json_to_service_json(pytest_json): 141 | service_json_template["created_at"] = pytest_json["report"]["created_at"] 142 | service_json_template["results"] = [] 143 | 144 | for test in pytest_json["report"]["tests"]: 145 | try: 146 | service_json_template["results"].append(get_result_for_test(test)) 147 | except KeyError: 148 | pass 149 | 150 | return service_json_template 151 | 152 | 153 | if __name__ == "__main__": 154 | args = parse_args() 155 | 156 | pytest_json = json.load(open(args.pytest_json, "r")) 157 | 158 | service_json = pytest_json_to_service_json(pytest_json) 159 | 160 | with open(args.json_out, "w") as fout: 161 | json.dump(service_json, fout, sort_keys=True, indent=4) 162 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import setuptools 4 | from frost import SOURCE_URL, VERSION 5 | 6 | with open("README.md", "r") as fh: 7 | long_description = fh.read() 8 | 9 | install_requires = [ 10 | line.split("==")[0] for line in open("requirements.txt", "r") if ".git" not in line 11 | ] 12 | 13 | setuptools.setup( 14 | name="frost", 15 | version=VERSION, 16 | author="Firefox Operations Security Team (foxsec)", 17 | author_email="foxsec+frost@mozilla.com", 18 | description="tests for checking that third party services the Firefox Operations Security or foxsec team uses are configured correctly", 19 | long_description=long_description, 20 | long_description_content_type="text/markdown", 21 | url=SOURCE_URL, 22 | license="MPL2", 23 | packages=setuptools.find_packages(), 24 | install_requires=install_requires, 25 | classifiers=[ 26 | "Natural Language :: English", 27 | "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", 28 | "Operating System :: OS Independent", 29 | "Programming Language :: Python :: 3.8", 30 | ], 31 | python_requires=">=3.8", 32 | entry_points={"console_scripts": ["frost=frost.cli:cli"],}, 33 | include_package_data=True, 34 | ) 35 | -------------------------------------------------------------------------------- /severity.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import warnings 3 | 4 | import pytest 5 | 6 | 7 | # parseable severity levels in order of increasing badness 8 | SEVERITY_LEVELS = ["INFO", "WARN", "ERROR"] 9 | 10 | 11 | def load(rules): 12 | """ 13 | Parses severities section of the conf file and returns a dict of test name to severity level for tests. 14 | 15 | >>> load([{'test_name': 'test_foo', 'severity': 'ERROR'}]) 16 | {'test_foo': 'ERROR'} 17 | >>> load([{'test_name': '*', 'severity': 'INFO'}]) # doctest:+ELLIPSIS 18 | defaultdict(. at 0x...>, {}) 19 | >>> load([ 20 | ... {'test_name': 'test_foo', 'severity': 'ERROR'}, {'test_name': 'test_bar', 'severity': 'INFO'} 21 | ... ]) == {'test_foo': 'ERROR', 'test_bar': 'INFO'} 22 | True 23 | 24 | Invalid severity levels are skipped with a warning: 25 | 26 | >>> load([{'test_name': 'test_foo', 'severity': 'AHHH!'}]) 27 | {} 28 | >>> # UserWarning: Line 0: Skipping line with invalid severity level 'AHHH!' 29 | 30 | 31 | Duplicate test names are ignored with a warning: 32 | 33 | >>> load([ 34 | ... {'test_name': 'test_foo', 'severity': 'ERROR'}, {'test_name': 'test_foo', 'severity': 'INFO'} 35 | ... ]) == {'test_foo': 'ERROR'} 36 | True 37 | >>> # UserWarning: Line 1: Skipping line with duplicate test name 'test_foo' 38 | 39 | Does not check that test names exist (since they might not be collected). 40 | """ 41 | # dict of test name to severity level 42 | processed_rules = {} 43 | 44 | if not rules: 45 | return processed_rules 46 | 47 | for rule in rules: 48 | test_name, severity = rule["test_name"], rule["severity"] 49 | 50 | if severity not in SEVERITY_LEVELS: 51 | warnings.warn( 52 | "test_name: {} | Skipping line with invalid severity level {!r}".format( 53 | test_name, severity 54 | ) 55 | ) 56 | continue 57 | 58 | if test_name in processed_rules: 59 | warnings.warn( 60 | "test_name: {} | Skipping line with duplicate test name".format( 61 | test_name 62 | ) 63 | ) 64 | continue 65 | 66 | processed_rules[test_name] = severity 67 | 68 | if "*" in processed_rules: 69 | rules_with_default = defaultdict( 70 | lambda: processed_rules["*"], **processed_rules 71 | ) 72 | del rules_with_default["*"] 73 | return rules_with_default 74 | else: 75 | return processed_rules 76 | 77 | 78 | def add_severity_marker(item): 79 | """ 80 | Adds severity markers as specified in the severity conf. 81 | 82 | Warns when overriding an existing test severity. 83 | """ 84 | test_name_for_matching = item.originalname or item.name 85 | 86 | if test_name_for_matching in item.config.custom_config.severities: 87 | conf_severity = item.config.custom_config.severities[test_name_for_matching] 88 | test_severity = item.get_closest_marker("severity") 89 | if test_severity and test_severity.args[0] != conf_severity: 90 | warnings.warn( 91 | "Overriding existing severity {} for test {}".format( 92 | test_severity, test_name_for_matching 93 | ) 94 | ) 95 | 96 | item.add_marker(pytest.mark.severity(conf_severity)) 97 | --------------------------------------------------------------------------------