├── infra └── cloudcustodian │ ├── cloudcustodian │ ├── __init__.py │ ├── common.py │ ├── cloudcustodian_mailer.py │ ├── cloudcustodian_stack.py │ └── cloudcustodian_sub_stack.py │ ├── requirements-dev.txt │ ├── requirements.txt │ ├── lambda-code │ └── main.py │ ├── .gitignore │ ├── source.bat │ ├── lambda-mailer-code │ ├── requirements-mailer.txt │ ├── config.json │ ├── main-modified.py │ ├── handle-modified.py │ ├── target-modified.py │ ├── mongodb_delivery.py │ ├── sqs_queue_processor-modified.py │ ├── slack_delivery-modified.py │ └── email_delivery-modified.py │ ├── docker │ ├── entrypoint.sh │ └── Dockerfile │ ├── app.py │ ├── tests │ └── eventbridge │ │ ├── trail_event.json │ │ └── send_cloudtrail_event.py │ ├── transform.py │ └── cdk.json ├── .github ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── feature.yml │ └── bug.yml ├── PULL_REQUEST_TEMPLATE.yml └── workflows │ ├── issue_projects_labeler.yml │ ├── docker.yml │ ├── pr.yml │ └── infra.yml ├── docs └── img │ ├── arch.png │ ├── slack-example.png │ ├── li10-logo-light-bg.svg │ └── li10-logo-dark-bg.svg ├── CODEOWNERS ├── .gitignore ├── policies ├── schedule-based │ ├── all │ │ ├── accounts.yml │ │ ├── s3-public-access-audit.yml │ │ ├── rds-deprecated.yml │ │ ├── service-quota.yml │ │ ├── acm-certificate-audit.yml │ │ ├── ec2-public-instance-audit.yml │ │ ├── sg-remove-ingress-rule.yml │ │ ├── ebs-copy-tags.yml │ │ ├── rds-unused-30-days.yml │ │ └── ec2-unused-eip.yml │ └── staging │ │ └── accounts.yml ├── event-based │ ├── sg_ingress.yml │ └── tagging.yml └── templates │ ├── default.slack.j2 │ └── default.html.j2 ├── .pre-commit-config.yaml ├── .gitlab └── .gitlab-ci.yml ├── README.md └── LICENSE /infra/cloudcustodian/cloudcustodian/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /infra/cloudcustodian/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | pytest==6.2.5 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: [] 3 | -------------------------------------------------------------------------------- /docs/img/arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/li10labs/li10-governance/HEAD/docs/img/arch.png -------------------------------------------------------------------------------- /infra/cloudcustodian/requirements.txt: -------------------------------------------------------------------------------- 1 | aws-cdk-lib==2.132.1 2 | constructs>=10.0.0,<11.0.0 3 | c7n==0.9.35 -------------------------------------------------------------------------------- /docs/img/slack-example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/li10labs/li10-governance/HEAD/docs/img/slack-example.png -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-code/main.py: -------------------------------------------------------------------------------- 1 | from c7n import handler 2 | 3 | def run(event, context): 4 | return handler.dispatch_event(event, context) -------------------------------------------------------------------------------- /infra/cloudcustodian/.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | package-lock.json 3 | __pycache__ 4 | .pytest_cache 5 | .venv 6 | *.egg-info 7 | 8 | # CDK asset staging directory 9 | .cdk.staging 10 | cdk.out 11 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # See syntax guidelines for owners file 2 | # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners 3 | 4 | # These owners will be the default owners for everything in the repo. 5 | * @matthiasbuchner 6 | * @ntmggr 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .tfcache 2 | .env 3 | .venv 4 | .python-version 5 | .venv/** 6 | infra/cloudcustodian/tmp/** 7 | out/ 8 | .DS_STORE 9 | .tox/ 10 | .idea/ 11 | .vscode/ 12 | !.vscode/*.sample 13 | .pytest_cache/ 14 | pip-wheel-metadata/ 15 | pyvenv.cfg 16 | junit 17 | /.vs 18 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.yml: -------------------------------------------------------------------------------- 1 | ## Problem 2 | 3 | ## Solution 4 | 5 | 10 | 11 | ## License 12 | 13 | By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. 14 | -------------------------------------------------------------------------------- /policies/schedule-based/all/accounts.yml: -------------------------------------------------------------------------------- 1 | accounts: 2 | - account_id: '123456789123' 3 | name: stage 4 | regions: 5 | - us-east-1 6 | role: arn:aws:iam::123456789123:role/governance_security_level_1_role 7 | vars: 8 | NOTIFY_EMAIL: "groumail@test.com" 9 | NOTIFY_SLACK: "slack://#alerts" 10 | INSTANT_SQS_QUEUE: "https://sqs.us-east-1.amazonaws.com/123456789123/CloudcustodianStackMailer-mailerqueue123456-232qsdqd123" -------------------------------------------------------------------------------- /infra/cloudcustodian/source.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | rem The sole purpose of this script is to make the command 4 | rem 5 | rem source .venv/bin/activate 6 | rem 7 | rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows. 8 | rem On Windows, this command just runs this batch file (the argument is ignored). 9 | rem 10 | rem Now we don't need to document a Windows command for activating a virtualenv. 11 | 12 | echo Executing .venv\Scripts\activate.bat for you 13 | .venv\Scripts\activate.bat 14 | -------------------------------------------------------------------------------- /policies/schedule-based/staging/accounts.yml: -------------------------------------------------------------------------------- 1 | accounts: 2 | - account_id: '123456789123' 3 | name: stage 4 | regions: 5 | - us-east-1 6 | role: arn:aws:iam::123456789123:role/governance_security_level_1_role 7 | vars: 8 | NOTIFY_EMAIL: "groumail@test.com" 9 | NOTIFY_SLACK: "slack://#alerts" 10 | INSTANT_SQS_QUEUE: "https://sqs.us-east-1.amazonaws.com/123456789123/CloudcustodianStackMailer-mailerqueue123456-232qsdqd123" 11 | ################### 12 | # BILLING ACCOUNT # 13 | ################### -------------------------------------------------------------------------------- /.github/workflows/issue_projects_labeler.yml: -------------------------------------------------------------------------------- 1 | name: Label issues 2 | on: 3 | issues: 4 | types: 5 | - reopened 6 | - opened 7 | jobs: 8 | label_issues: 9 | runs-on: ubuntu-latest 10 | permissions: 11 | issues: write 12 | steps: 13 | - run: gh issue edit "$NUMBER" --add-label "$LABELS" 14 | env: 15 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 16 | GH_REPO: ${{ github.repository }} 17 | NUMBER: ${{ github.event.issue.number }} 18 | ## Labels need to exist 19 | LABELS: Unassigned 20 | -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-mailer-code/requirements-mailer.txt: -------------------------------------------------------------------------------- 1 | c7n-mailer 2 | # core deps 3 | jinja2 4 | markupsafe 5 | pyyaml 6 | # yaml 7 | ldap3 8 | pyasn1 9 | redis 10 | jmespath 11 | # for other dependencies 12 | # pkg_resources 13 | # transport datadog - recursive deps 14 | # datadog 15 | # decorator 16 | # requests (recursive deps) needed by datadog slackclient splunk 17 | requests 18 | urllib3 19 | idna 20 | charset_normalizer 21 | certifi 22 | # used by splunk mailer transport 23 | # jsonpointer 24 | # jsonpatch 25 | # sendgrid dependencies 26 | # sendgrid 27 | # python_http_client 28 | # ellipticcurve 29 | pymongo[tls,srv,gssapi]==4.6.3 30 | dnspython 31 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature.yml: -------------------------------------------------------------------------------- 1 | name: Feature Request 2 | description: Suggest an idea for Li10 Governance 3 | labels: [enhancement] 4 | 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thank you for taking the time to fill out this feature enhancement! 10 | - type: textarea 11 | id: describe-feature 12 | attributes: 13 | label: Describe the feature 14 | description: Is your feature request related to a problem? 15 | placeholder: Please describe 16 | value: "It always irritates me when ..." 17 | validations: 18 | required: true 19 | - type: textarea 20 | id: extra-context 21 | attributes: 22 | label: Extra information or context 23 | description: Please feel free to suggest any workarounds or alternatives you might have. 24 | -------------------------------------------------------------------------------- /policies/schedule-based/all/s3-public-access-audit.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | run_mode: &run_mode 3 | type: pull 4 | notify: ¬ify 5 | type: notify 6 | slack_template: default.slack 7 | to: 8 | - "{NOTIFY_EMAIL}" 9 | - "{NOTIFY_SLACK}" 10 | transport: 11 | type: sqs 12 | queue: "{INSTANT_SQS_QUEUE}" 13 | 14 | policies: 15 | - name: s3-public-access-audit 16 | resource: s3 17 | description: | 18 | Cloud Custodian Public Access S3 Audit 19 | comments: | 20 | Retrieve public S3 buckets 21 | filters: 22 | - or: 23 | - type: global-grants 24 | allow_website: false 25 | actions: 26 | - <<: *notify 27 | action_desc: "Notify only - action is required :eyes:" 28 | violation_desc: "Unauthorized Public S3 Bucket created" 29 | level: "danger" 30 | -------------------------------------------------------------------------------- /policies/schedule-based/all/rds-deprecated.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | run_mode: &run_mode 3 | type: pull 4 | notify: ¬ify 5 | type: notify 6 | slack_template: default.slack 7 | to: 8 | - "{NOTIFY_EMAIL}" 9 | - "{NOTIFY_SLACK}" 10 | subject: "RDS - Deprecated Engine found = [custodian {{ account }} - {{ region }}]" 11 | transport: 12 | type: sqs 13 | queue: "{INSTANT_SQS_QUEUE}" 14 | filters: &filters 15 | - type: engine 16 | key: Status 17 | value: deprecated 18 | 19 | policies: 20 | - name: find-deprecated-versions 21 | resource: aws.rds 22 | filters: 23 | - <<: *filters 24 | actions: 25 | - <<: *notify 26 | action_desc: "Notify only - RDS deprecated version :eyes:" 27 | violation_desc: "RDS RDS deprecated versions" 28 | level: "warning" 29 | -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-mailer-code/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "queue_url": "will be automatically set by the mailer at runtime using the Lambda environment variable", 3 | "from_address": "will be automatically set by the mailer at runtime using the Lambda environment variable", 4 | "role": "will be automatically set by the mailer at runtime using the Lambda environment variable", 5 | "contact_tags": [ 6 | "OwnerContact", 7 | "OwnerEmail", 8 | "SNSTopicARN" 9 | ], 10 | "slack_token": "will be overwriten by the mailer at runtime with value read from secrets manager", 11 | "templates_folders": [ 12 | "c7n_mailer/msg-templates/" 13 | ], 14 | "mongodb_uri": "will be overwriten by the mailer at runtime with value read from secrets manager", 15 | "mongodb_database": "CloudCustodian", 16 | "mongodb_collection": "Events" 17 | } 18 | -------------------------------------------------------------------------------- /policies/schedule-based/all/service-quota.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | run_mode: &run_mode 3 | type: pull 4 | notify: ¬ify 5 | type: notify 6 | slack_template: default.slack 7 | to: 8 | - "{NOTIFY_EMAIL}" 9 | - "{NOTIFY_SLACK}" 10 | subject: "Service Quota - Increased by 25% = [custodian {{ account }} - {{ region }}]" 11 | transport: 12 | type: sqs 13 | queue: "{INSTANT_SQS_QUEUE}" 14 | filters: &filters 15 | - type: service-limit 16 | threshold: 80 17 | 18 | policies: 19 | - name: service-quota-usage 20 | resource: aws.service-quota 21 | filters: 22 | - <<: *filters 23 | actions: 24 | - type: request-limit-increase 25 | percent-increase: 25 26 | - actions: 27 | - <<: *notify 28 | action_desc: "Notying only - Automated remediation has been performed :eyes:" 29 | violation_desc: "Service Quota - Increased by 25%" 30 | level: "good" 31 | -------------------------------------------------------------------------------- /policies/schedule-based/all/acm-certificate-audit.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | run_mode: &run_mode 3 | type: pull 4 | notify: ¬ify 5 | type: notify 6 | slack_template: default.slack 7 | to: 8 | - "{NOTIFY_EMAIL}" 9 | - "{NOTIFY_SLACK}" 10 | transport: 11 | type: sqs 12 | queue: "{INSTANT_SQS_QUEUE}" 13 | 14 | 15 | policies: 16 | - name: acm-certificate-audit 17 | resource: acm-certificate 18 | filters: 19 | - or: 20 | - "tag:Audit": absent 21 | - "tag:Audit": empty 22 | - type: value 23 | key: Name 24 | op: regex 25 | value: ".*" 26 | - type: value 27 | key: NotAfter 28 | op: less-than 29 | value_type: expiration 30 | value: 60 31 | mode: 32 | <<: *run_mode 33 | actions: 34 | - <<: *notify 35 | action_desc: "Notify only - action is required :eyes:" 36 | violation_desc: "ACM certs are expiring within 60 days" 37 | level: "warning" 38 | -------------------------------------------------------------------------------- /policies/schedule-based/all/ec2-public-instance-audit.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | run_mode: &run_mode 3 | type: pull 4 | notify: ¬ify 5 | type: notify 6 | template: default.html 7 | slack_template: default.slack 8 | to: 9 | - "{NOTIFY_EMAIL}" 10 | - "{NOTIFY_SLACK}" 11 | subject: "[custodian {{ account }} - {{ region }}] Public EC2 instance found 👀🚨" 12 | transport: 13 | type: sqs 14 | queue: "{INSTANT_SQS_QUEUE}" 15 | eip_filters: &eip_filters 16 | - InstanceId: absent 17 | - AssociationId: absent 18 | 19 | policies: 20 | - name: ec2-public-instance-audit 21 | resource: ec2 22 | mode: 23 | <<: *run_mode 24 | filters: 25 | - or: 26 | - type: subnet 27 | key: "tag:Name" 28 | op: regex 29 | value: ".*pub" 30 | - type: value 31 | key: "PublicIpAddress" 32 | value: not-null 33 | actions: 34 | - <<: *notify 35 | action_desc: "Notify only - action is required 👀🚨" 36 | violation_desc: "Public EC2 instance found" 37 | level: "danger" -------------------------------------------------------------------------------- /policies/schedule-based/all/sg-remove-ingress-rule.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | run_mode: &run_mode 3 | type: pull 4 | notify: ¬ify 5 | type: notify 6 | slack_template: default.slack 7 | to: 8 | - "{NOTIFY_EMAIL}" 9 | - "{NOTIFY_SLACK}" 10 | subject: "SG - Ingress Rule open to public = [custodian {{ account }} - {{ region }}]" 11 | transport: 12 | type: sqs 13 | queue: "{INSTANT_SQS_QUEUE}" 14 | filters: &filters 15 | - or: 16 | - type: ingress 17 | Cidr: 18 | value: "0.0.0.0/0" 19 | - type: ingress 20 | CidrV6: 21 | value: "::/0" 22 | 23 | policies: 24 | - name: sg-open-ingress-rule-tag 25 | resource: security-group 26 | filters: 27 | - "tag:c7n_keep_open_sg_rule": absent 28 | - and: *filters 29 | actions: 30 | - type: remove-permissions 31 | ingress: matched 32 | - <<: *notify 33 | action_desc: "Notying only - Automated remediation has been performed :eyes:" 34 | violation_desc: "SG - Ingress Rule open to public" 35 | level: "warning" 36 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.5.0 4 | hooks: 5 | # Git style 6 | - id: check-added-large-files 7 | - id: check-merge-conflict 8 | - id: check-vcs-permalinks 9 | - id: forbid-new-submodules 10 | #- id: no-commit-to-branch 11 | # Common errors 12 | - id: end-of-file-fixer 13 | - id: trailing-whitespace 14 | args: [--markdown-linebreak-ext=md] 15 | exclude: CHANGELOG.md 16 | - id: check-yaml 17 | exclude: aws-cf/.*\.(yml|yaml)$|.*\.(yml|yaml) 18 | - id: check-merge-conflict 19 | - id: check-executables-have-shebangs 20 | - id: check-ast 21 | 22 | # Cross platform 23 | - id: check-case-conflict 24 | - id: mixed-line-ending 25 | args: [--fix=lf] 26 | 27 | # Security 28 | - id: detect-aws-credentials 29 | args: ["--allow-missing-credentials"] 30 | - id: detect-private-key 31 | 32 | - repo: https://github.com/awslabs/cfn-python-lint 33 | rev: v0.72.10 34 | hooks: 35 | - id: cfn-python-lint 36 | files: aws-cf/.*\.(yml|yaml)$|sam/.*\.(yml|yaml) 37 | -------------------------------------------------------------------------------- /infra/cloudcustodian/docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /governance 4 | 5 | aws sts get-caller-identity 6 | 7 | source .venv/bin/activate 8 | 9 | mkdir .cache 10 | 11 | for dir in /governance/policy/*/ 12 | do 13 | echo "working with policies in directory: $dir" 14 | echo "test policy: $POLICY_FILTER" 15 | 16 | for policy_file in $dir*.yml 17 | do 18 | echo "processing $policy_file" 19 | 20 | if [[ $policy_file != *accounts.yml ]] 21 | then 22 | if [ "$dir$POLICY_FILTER" = $policy_file ]; then 23 | echo "execute (1).$policy_file." 24 | c7n-org run --cache-period 0 --cache-path ./.cache/ -c $dir/accounts.yml -s out -u $policy_file --verbose --debug 25 | elif [ -z "$POLICY_FILTER" ]; then 26 | echo "execute (2) .$policy_file." 27 | c7n-org run --cache-period 0 --cache-path ./.cache/ -c $dir/accounts.yml -s out -u $policy_file --verbose --debug 28 | else 29 | echo "skip .$policy_file." 30 | fi 31 | else 32 | echo "skipping $policy_file" 33 | fi 34 | done 35 | done 36 | 37 | ls -als /governance/out 38 | echo "Done processing all policies" -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Build Docker Container 2 | 3 | on: 4 | push: 5 | branches: 6 | - "yourbranch" 7 | paths: 8 | - infra/cloudcustodian/docker/** 9 | - policies/schedule-based/** 10 | - .github/workflows/docker.yml 11 | 12 | jobs: 13 | docker: 14 | runs-on: [ self-hosted, ubuntu ] 15 | steps: 16 | - uses: actions/checkout@v3 17 | 18 | - name: configure aws credentials 19 | uses: aws-actions/configure-aws-credentials@v3 20 | with: 21 | role-skip-session-tagging: true 22 | role-to-assume: <> 23 | aws-region: us-east-1 24 | 25 | - name: Login to Amazon ECR 26 | id: login-ecr 27 | uses: aws-actions/amazon-ecr-login@v1 28 | with: 29 | mask-password: "true" # see: https://github.com/aws-actions/amazon-ecr-login#docker-credentials 30 | 31 | - name: Build, tag, and push docker image to Amazon ECR 32 | env: 33 | REGISTRY: ${{ steps.login-ecr.outputs.registry }} 34 | REPOSITORY: governance 35 | IMAGE_TAG: latest 36 | run: | 37 | docker build --platform=linux/amd64 -t $REGISTRY/$REPOSITORY:$IMAGE_TAG -f infra/cloudcustodian/docker/Dockerfile . 38 | docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG 39 | -------------------------------------------------------------------------------- /infra/cloudcustodian/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 amazonlinux:2023 2 | 3 | RUN dnf update expat --releasever 2023.4.20240401 -y 4 | RUN dnf update rpm --releasever 2023.4.20240319 -y 5 | 6 | RUN yum upgrade 7 | RUN yum update 8 | 9 | RUN yum install -y python pip aws-cli 10 | RUN python --version && pip --version && aws --version 11 | 12 | USER 1000 13 | WORKDIR /governance 14 | RUN python -m venv .venv && source .venv/bin/activate && pip install c7n c7n-org && pip list 15 | 16 | RUN sed -i 's/logging.getLogger(\x27botocore\x27).setLevel(logging.ERROR)/logging.getLogger("botocore").setLevel(logging.INFO)/g' .venv/lib/python3.9/site-packages/c7n_org/cli.py 17 | RUN sed -i 's/logging.getLogger(\x27s3transfer\x27).setLevel(logging.WARNING)/logging.getLogger("s3transfer").setLevel(logging.INFO)/g' .venv/lib/python3.9/site-packages/c7n_org/cli.py 18 | RUN sed -i 's/logging.getLogger(\x27custodian.s3\x27).setLevel(logging.ERROR)/logging.getLogger("custodian.s3").setLevel(logging.INFO)/g' .venv/lib/python3.9/site-packages/c7n_org/cli.py 19 | 20 | RUN source .venv/bin/activate && custodian version 21 | RUN aws --version 22 | 23 | COPY --chown=1000 ./policies/schedule-based/ /governance/policy/ 24 | COPY --chown=1000 ./infra/cloudcustodian/docker/entrypoint.sh /governance/entrypoint.sh 25 | RUN chmod +x /governance/entrypoint.sh 26 | 27 | ENTRYPOINT ["/governance/entrypoint.sh"] -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-mailer-code/main-modified.py: -------------------------------------------------------------------------------- 1 | # Copyright The Cloud Custodian Authors. 2 | # SPDX-License-Identifier: Apache-2.0 3 | import logging 4 | from c7n_mailer import handle 5 | 6 | import boto3 7 | import os 8 | import json 9 | 10 | if len(logging.getLogger().handlers) > 0: 11 | print("pre-configured") 12 | logging.getLogger().setLevel(logging.DEBUG) 13 | else: 14 | logging.basicConfig(level=logging.DEBUG) 15 | 16 | logger = logging.getLogger('custodian.mailer') 17 | log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' 18 | logging.basicConfig(level=logging.DEBUG, format=log_format) 19 | logging.getLogger('botocore').setLevel(logging.INFO) 20 | 21 | secret_arn = os.environ['SECRET_ARN'] 22 | print("loading secret: ", secret_arn) 23 | 24 | # load once, then reuse cached value until Lambda reloads 25 | sm_client = boto3.client('secretsmanager') 26 | kwargs = {'SecretId': secret_arn} 27 | response = sm_client.get_secret_value(**kwargs) 28 | secrets = json.loads(response['SecretString']) 29 | 30 | def dispatch(event, context): 31 | logger.debug(event) 32 | 33 | if event and "Records" in event: 34 | batch_item_failures = [] 35 | sqs_batch_response = {} 36 | 37 | return handle.start_c7n_mailer(logger, sqs_trigger_messages=event["Records"], secrets=secrets) 38 | 39 | return handle.start_c7n_mailer(logger, secrets=secrets) 40 | -------------------------------------------------------------------------------- /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | # This workflow will be verify that all PRs have at 2 | # least on the label: 'bugs', 'enhancement' before 3 | # they can be merged. 4 | 5 | name: Verify PR labels 6 | on: 7 | pull_request: 8 | types: [opened, labeled, unlabeled, synchronize] 9 | 10 | jobs: 11 | check_pr_labels: 12 | runs-on: ubuntu-latest 13 | permissions: 14 | contents: write 15 | pull-requests: write 16 | repository-projects: write 17 | name: Verify that the PR has a valid label 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v4 21 | 22 | - name: Change Reason label exists 23 | if: github.base_ref == 'main' 24 | uses: jesusvasquez333/verify-pr-label-action@v1.4.0 25 | id: verify-change-label 26 | with: 27 | github-token: "${{ secrets.GITHUB_TOKEN }}" 28 | valid-labels: 'bug, enhancement, chore' 29 | pull-request-number: '${{ github.event.pull_request.number }}' 30 | disable-reviews: true 31 | 32 | - name: Verify PR label 33 | if: github.base_ref == 'main' 34 | uses: jesusvasquez333/verify-pr-label-action@v1.4.0 35 | id: verify-pr-label 36 | with: 37 | github-token: "${{ secrets.GITHUB_TOKEN }}" 38 | valid-labels: 'major, minor, patch' 39 | pull-request-number: '${{ github.event.pull_request.number }}' 40 | disable-reviews: true 41 | -------------------------------------------------------------------------------- /policies/event-based/sg_ingress.yml: -------------------------------------------------------------------------------- 1 | --- 2 | vars: 3 | mode: &mode 4 | type: cloudtrail 5 | member-role: "{MEMBER_ROLE}" 6 | filters_actions: &filters_actions 7 | filters: 8 | - "tag:c7n_keep_open_sg_rule": absent 9 | - or: 10 | - type: ingress 11 | Cidr: 12 | value: "0.0.0.0/0" 13 | - type: ingress 14 | CidrV6: 15 | value: "::/0" 16 | actions: 17 | - type: remove-permissions 18 | ingress: matched 19 | - type: notify 20 | template: default.html 21 | slack_template: default.slack 22 | to: 23 | - "{NOTIFY_EMAIL}" 24 | - "{NOTIFY_SLACK}" 25 | subject: "SG - Rule Open to Public = [custodian {{ account }} - {{ region }}]" 26 | transport: 27 | type: sqs 28 | queue: "{INSTANT_SQS_QUEUE}" 29 | action_desc: "Rule Removed :magic_wand:" 30 | violation_desc: "SG Open rule detected - Removed" 31 | 32 | policies: 33 | - name: sg-open-ingress-rule-tag 34 | resource: security-group 35 | mode: 36 | <<: *mode 37 | events: 38 | - source: ec2.amazonaws.com 39 | event: AuthorizeSecurityGroupIngress 40 | ids: "responseElements.securityGroupRuleSet.items[].groupId" 41 | - source: ec2.amazonaws.com 42 | event: RevokeSecurityGroupIngress 43 | ids: "requestParameters.groupId" 44 | <<: *filters_actions 45 | -------------------------------------------------------------------------------- /policies/schedule-based/all/ebs-copy-tags.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | run_mode: &run_mode 3 | type: pull 4 | notify: ¬ify 5 | type: notify 6 | template: default.html 7 | subject: "[custodian {{ account }} - {{ region }}] EBS volume or snapshop tagged" 8 | slack_template: default.slack 9 | to: 10 | - "{NOTIFY_EMAIL}" 11 | - "{NOTIFY_SLACK}" 12 | transport: 13 | type: sqs 14 | queue: "{INSTANT_SQS_QUEUE}" 15 | 16 | policies: 17 | - name: aws-copy-tags-from-instance-to-ebs-volume 18 | resource: ebs 19 | mode: 20 | <<: *run_mode 21 | filters: 22 | - type: value 23 | key: Attachments[0].Device 24 | value: not-null 25 | actions: 26 | - type: copy-related-tag 27 | resource: ec2 28 | skip_missing: True 29 | key: Attachments[].InstanceId | [0] 30 | tags: '*' 31 | - <<: *notify 32 | action_desc: "Resource tag auto-propagated" 33 | violation_desc: "Resource was missing tags" 34 | level: "good" 35 | 36 | - name: ebs-to-snapshot 37 | resource: ebs-snapshot 38 | mode: 39 | <<: *run_mode 40 | actions: 41 | - type: copy-related-tag 42 | resource: ebs 43 | skip_missing: True 44 | key: VolumeId 45 | tags: '*' 46 | - <<: *notify 47 | action_desc: "Resource tag auto-propagated" 48 | violation_desc: "Resource was missing tags" 49 | level: "good" 50 | -------------------------------------------------------------------------------- /infra/cloudcustodian/app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright Li10 3 | # SPDX-License-Identifier: Apache-2.0 4 | import os 5 | import aws_cdk as cdk 6 | from cloudcustodian.cloudcustodian_stack import CloudcustodianStack 7 | from cloudcustodian.cloudcustodian_sub_stack import CloudcustodianSubStack 8 | from cloudcustodian.cloudcustodian_mailer import CloudcustodianStackMailer 9 | 10 | import cloudcustodian.common as common 11 | 12 | app = cdk.App() 13 | 14 | tags = { 15 | # "git.commit": os.environ["SHA"], 16 | # "git.job_id": os.environ["JOB"] 17 | } 18 | 19 | 20 | main_stack = CloudcustodianStack( 21 | app, 22 | "CloudcustodianStack", 23 | env=cdk.Environment(account=common.main_stack_account_settings["account"], region=common.main_stack_account_settings["region"]), 24 | tags=tags) 25 | 26 | CloudcustodianStackMailer( 27 | app, 28 | "CloudcustodianStackMailer", 29 | env=cdk.Environment(account=common.main_stack_account_settings["account"], region=common.main_stack_account_settings["region"]), 30 | org_id=main_stack.org_id, 31 | secret_arn=main_stack.secret_arn) 32 | 33 | for account in common.subStackTargets: 34 | for region in common.subStackTargets[account]: 35 | CloudcustodianSubStack( 36 | app, 37 | f"CloudcustodianSubStack-{account}-{region}", 38 | env=cdk.Environment(account=account, region=region), 39 | bus_arn=main_stack.bus_arn, 40 | central_role_arn=main_stack.role_arn) 41 | 42 | cdk.Tags.of(app).add("git.repo_url", os.environ["GIT"]) 43 | 44 | app.synth() 45 | -------------------------------------------------------------------------------- /policies/schedule-based/all/rds-unused-30-days.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | run_mode: &run_mode 3 | type: pull 4 | notify: ¬ify 5 | type: notify 6 | slack_template: default.slack 7 | to: 8 | - "{NOTIFY_EMAIL}" 9 | - "{NOTIFY_SLACK}" 10 | subject: "RDS - Unused database found = [custodian {{ account }} - {{ region }}]" 11 | transport: 12 | type: sqs 13 | queue: "{INSTANT_SQS_QUEUE}" 14 | metrics-filters: &metrics-filter 15 | - type: metrics 16 | name: DatabaseConnections 17 | days: 30 18 | value: 0 19 | op: equal 20 | 21 | policies: 22 | # Mark any RDS with no connections over the last 30 days 23 | - name: rds-unused-databases-stop-and-delete-in-30-days 24 | resource: rds 25 | filters: 26 | - type: value 27 | value_type: age 28 | key: InstanceCreateTime 29 | value: 14 30 | op: greater-than 31 | - <<: *metrics-filter 32 | mode: 33 | <<: *run_mode 34 | actions: 35 | - type: mark-for-op 36 | tag: c7n_rds_unused 37 | op: delete 38 | days: 30 39 | - <<: *notify 40 | action_desc: "Notify only - Automated remediation is scheduled :alarm_clock:" 41 | violation_desc: "RDS with no connections over the last 30 days. Marking for deletion in 14 days" 42 | level: "warning" 43 | 44 | # Remove the c7n_rds_unused tag if rds started to receive traffic 45 | - name: unused-c7n-rds-unused-if-in-use 46 | resource: rds 47 | filters: 48 | - "tag:c7n_rds_unused": not-null 49 | - not: 50 | - or: *eip_filters 51 | mode: 52 | <<: *run_mode 53 | actions: 54 | - type: remove-tag 55 | tags: [c7n_rds_unused] 56 | - <<: *notify 57 | action_desc: "Notifying only - RDS won't be removed due to new connections :magic_wand:" 58 | violation_desc: "RDS has finally new connections. I will remove the deletion tag" 59 | level: "good" 60 | -------------------------------------------------------------------------------- /infra/cloudcustodian/cloudcustodian/common.py: -------------------------------------------------------------------------------- 1 | main_stack_account_settings = { 2 | "account": "123123456789", 3 | "region": "us-east-1" 4 | } 5 | 6 | subStackTargets = { 7 | "123456789123" : ["us-east-1", "eu-west-1"], # dev 8 | } 9 | 10 | event_pattern_detail = { 11 | "$or": [ 12 | { 13 | "eventSource": ["sagemaker.amazonaws.com"], 14 | "eventName": ["CreateModel", "CreateEndpointConfig", "CreateEndpoint", "CreateTrainingJob", "CreateNotebookInstance", "CreateTransformJob"] 15 | }, 16 | { 17 | "eventSource": ["ec2.amazonaws.com"], 18 | "eventName": ["RunInstances", "CreateSnapshot", "CreateVolume","AuthorizeSecurityGroupIngress","CreateSecurityGroup"], 19 | "errorCode": [{ "exists": False }] 20 | }, 21 | { 22 | "eventScopeCode": ["ACCOUNT_SPECIFIC", "PUBLIC"] 23 | }, 24 | { 25 | "eventSource": ["s3.amazonaws.com"], 26 | "eventName": ["CreateBucket"] 27 | }, 28 | { 29 | "eventSource": ["rds.amazonaws.com"], 30 | "eventName": ["CreateDBInstance", "CreateDBCluster", "CreateDBSubnetGroup"] 31 | }, 32 | { 33 | "eventSource": ["lambda.amazonaws.com"], 34 | "eventName": ["CreateFunction20150331"] 35 | }, 36 | { 37 | "eventSource": ["elasticloadbalancing.amazonaws.com"], 38 | "eventName": ["CreateLoadBalancer", "CreateTargetGroup"] 39 | }, 40 | { 41 | "eventSource": ["glue.amazonaws.com"], 42 | "eventName": ["CreateJob"] 43 | }, 44 | { 45 | "eventSource": ["aoss.amazonaws.com"], 46 | "eventName": ["CreateCollection"] 47 | }, 48 | { 49 | "eventSource": ["dynamodb.amazonaws.com"], 50 | "eventName": ["CreateTable"] 51 | } 52 | ] 53 | } 54 | 55 | 56 | central_governance_role_name = "central_governance_role" 57 | target_governance_role_name = "governance_tagging_role" 58 | from_address = "groumail@test.com" 59 | 60 | target_governance_security_level_1_role = "governance_security_level_1_role" 61 | -------------------------------------------------------------------------------- /policies/schedule-based/all/ec2-unused-eip.yml: -------------------------------------------------------------------------------- 1 | vars: 2 | run_mode: &run_mode 3 | type: pull 4 | notify: ¬ify 5 | type: notify 6 | slack_template: default.slack 7 | to: 8 | - "{NOTIFY_EMAIL}" 9 | - "{NOTIFY_SLACK}" 10 | subject: "EIP - Unassociated EIP found = [custodian {{ account }} - {{ region }}]" 11 | transport: 12 | type: sqs 13 | queue: "{INSTANT_SQS_QUEUE}" 14 | eip_filters: &eip_filters 15 | - InstanceId: absent 16 | - AssociationId: absent 17 | 18 | policies: 19 | # Mark any EIP with no instances attached for action in 7 days 20 | - name: unused-eip-mark 21 | resource: elastic-ip 22 | filters: 23 | - "tag:maid_status_eip": absent 24 | - and: *eip_filters 25 | mode: 26 | <<: *run_mode 27 | actions: 28 | - type: mark-for-op 29 | tag: maid_status_eip 30 | days: 7 31 | op: release 32 | - <<: *notify 33 | action_desc: "Notify only - Automated remediation is scheduled :alarm_clock:" 34 | violation_desc: "New EIPs were found without association" 35 | level: "good" 36 | 37 | # List new and old IPs which are not associated 38 | - name: unused-eip-audit 39 | resource: elastic-ip 40 | filters: 41 | - and: *eip_filters 42 | mode: 43 | <<: *run_mode 44 | actions: 45 | - <<: *notify 46 | action_desc: "Notying only - Automated remediation is scheduled :eyes:" 47 | violation_desc: "EIPs were found without association" 48 | level: "good" 49 | 50 | # Remove the maid_status_eip tag from any eip which has instances attached 51 | - name: unused-eip-unmark-if-in-use 52 | resource: elastic-ip 53 | filters: 54 | - "tag:maid_status_eip": not-null 55 | - not: 56 | - or: *eip_filters 57 | mode: 58 | <<: *run_mode 59 | actions: 60 | - type: remove-tag 61 | tags: [maid_status_eip] 62 | - <<: *notify 63 | action_desc: "EIP removed after grace period :magic_wand:" 64 | violation_desc: "EIP was left unassociated" 65 | level: "good" 66 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | description: Report an issue about using Li10 Governance 3 | labels: [bug] 4 | 5 | body: 6 | - type: markdown 7 | attributes: 8 | value: | 9 | Thank you for taking the time to fill out this bug report! 10 | - type: textarea 11 | id: describe-bug 12 | attributes: 13 | label: Describe the bug 14 | description: A clear and concise description of what the bug is. 15 | placeholder: Tell us what happened! 16 | value: "When I ordered a coffee, I received a tea instead." 17 | validations: 18 | required: true 19 | - type: textarea 20 | id: expected-bahavior 21 | attributes: 22 | label: What did you expect to happen? 23 | description: A clear and concise description of what you expected to happen. 24 | placeholder: What were you expecting to happen? 25 | value: "I expected to find my keys in my pocket, but instead, I found a banana!" 26 | validations: 27 | required: true 28 | - type: dropdown 29 | id: cloud-provider 30 | attributes: 31 | label: Cloud Provider 32 | description: Which cloud provider are you using? 33 | default: 0 34 | options: 35 | - Amazon Web Services (AWS) 36 | validations: 37 | required: false 38 | - type: textarea 39 | id: version 40 | attributes: 41 | label: Cloud Custodian version and dependency information 42 | description: Please run `custodian version --debug` and paste the output here. 43 | render: shell 44 | validations: 45 | required: true 46 | - type: textarea 47 | id: policy 48 | attributes: 49 | label: Policy 50 | description: Please copy and paste the policy you are trying to run if applicable. Please exclude any account/sensitive information. Your response will be automatically formatted into YAML, so code-formatting markdown is not necessary. 51 | render: shell 52 | validations: 53 | required: true 54 | - type: textarea 55 | id: logs 56 | attributes: 57 | label: Relevant log/traceback output 58 | description: Please copy and paste any relevant log output. Please exclude any account/sensitive information. Your response will be automatically formatted into YAML, so code-formatting markdown is not necessary. 59 | render: shell 60 | validations: 61 | required: true 62 | - type: textarea 63 | id: extra-context 64 | attributes: 65 | label: Extra information or context 66 | description: Add any other context about the problem here. 67 | validations: 68 | required: true 69 | -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-mailer-code/handle-modified.py: -------------------------------------------------------------------------------- 1 | # Copyright The Cloud Custodian Authors. 2 | # SPDX-License-Identifier: Apache-2.0 3 | """ 4 | Lambda entry point 5 | """ 6 | import boto3 7 | import json 8 | import os 9 | 10 | from .sqs_queue_processor import MailerSqsQueueProcessor 11 | 12 | 13 | def config_setup(config=None, logger=None, secrets=None): 14 | task_dir = os.environ.get("LAMBDA_TASK_ROOT") 15 | os.environ["PYTHONPATH"] = "%s:%s" % (task_dir, os.environ.get("PYTHONPATH", "")) 16 | if not config: 17 | with open(os.path.join(task_dir, "config.json")) as fh: 18 | config = json.load(fh) 19 | if "http_proxy" in config: 20 | os.environ["http_proxy"] = config["http_proxy"] 21 | if "https_proxy" in config: 22 | os.environ["https_proxy"] = config["https_proxy"] 23 | 24 | # overwrite slack token with value from Secrets Manager 25 | try: 26 | config["slack_token"]=secrets["MAILER_SLACK_TOKEN"] 27 | logger.debug("slack token updated") 28 | except: 29 | logger.warning("slack token not updated") 30 | 31 | try: 32 | config["mongodb_uri"]=secrets["MAILER_MONGODB_URI"] 33 | logger.debug("mongodb_uri updated") 34 | except: 35 | logger.warning("mongodb_uri not updated") 36 | 37 | try: 38 | config["queue_url"]=os.environ["QUEUE_URL"] 39 | logger.debug("queue url updated") 40 | except: 41 | logger.warning("queue url not updated") 42 | 43 | try: 44 | config["from_address"]=os.environ["FROM_ADDRESS"] 45 | logger.debug("from_address updated") 46 | except: 47 | logger.warning("from_address not updated") 48 | 49 | try: 50 | config["role"]=os.environ["MAILER_ROLE_ARN"] 51 | logger.debug("role updated") 52 | except: 53 | logger.warning("role not updated") 54 | try: 55 | config["mongodb_collection"]=os.environ["MAILER_MONGODB_COLLECTION"] 56 | logger.debug("mongodb_collection updated") 57 | except: 58 | logger.warning("mongodb_collection not updated") 59 | 60 | return config 61 | 62 | 63 | def start_c7n_mailer(logger, config=None, parallel=False, sqs_trigger_messages=None, secrets=None): 64 | try: 65 | session = boto3.Session() 66 | if not config: 67 | config = config_setup(logger=logger, secrets=secrets) 68 | logger.info("c7n_mailer starting...") 69 | mailer_sqs_queue_processor = MailerSqsQueueProcessor(config, session, logger) 70 | mailer_sqs_queue_processor.run(parallel, sqs_trigger_messages=sqs_trigger_messages) 71 | except Exception as e: 72 | logger.exception("Error starting mailer MailerSqsQueueProcessor(). \n Error: %s \n" % (e)) 73 | -------------------------------------------------------------------------------- /.github/workflows/infra.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ "youbranch" ] 6 | paths: 7 | - "!infra/cloudcustodian/docker/**" 8 | - infra/cloudcustodian/** 9 | - .github/workflows/infra.yml 10 | - policies/** 11 | pull_request: 12 | branches: [ "youbranch" ] 13 | 14 | # Allows you to run this workflow manually from the Actions tab 15 | workflow_dispatch: 16 | 17 | jobs: 18 | build: 19 | # The type of runner that the job will run on 20 | runs-on: [ self-hosted, ubuntu ] 21 | 22 | steps: 23 | - uses: actions/checkout@v3 24 | - uses: actions/setup-python@v4 25 | with: 26 | python-version: '3.10' 27 | env: 28 | PIP_ROOT_USER_ACTION: ignore 29 | - uses: actions/setup-node@v3 30 | with: 31 | node-version: "20" 32 | 33 | - name: configure aws credentials 34 | uses: aws-actions/configure-aws-credentials@v3 35 | with: 36 | role-skip-session-tagging: true 37 | role-to-assume: <> 38 | aws-region: us-east-1 39 | 40 | - name: Install Python dependencies and CDK 41 | run: | 42 | python -m pip install --upgrade pip 43 | npm install -g aws-cdk 44 | 45 | - name: Deploy to AWS 46 | run: | 47 | cd infra/cloudcustodian 48 | 49 | echo "sha: ${{ github.sha }}" 50 | echo "url: ${{ github.server_url }}//${{ github.repository }}" 51 | 52 | python -m pip install -r requirements.txt 53 | python transform.py > lambda-code/config.json 54 | pip install c7n==0.9.35 -t ./tmp/lambda-code/ --no-deps --no-compile 55 | cp -r ./tmp/lambda-code/c7n ./lambda-code/c7n/ 56 | 57 | pip install -r ./lambda-mailer-code/requirements-mailer.txt -t ./lambda-mailer-code/ --no-deps --no-compile 58 | mv ./lambda-mailer-code/main-modified.py ./lambda-mailer-code/main.py 59 | mv ./lambda-mailer-code/handle-modified.py ./lambda-mailer-code/c7n_mailer/handle.py 60 | mv ./lambda-mailer-code/slack_delivery-modified.py ./lambda-mailer-code/c7n_mailer/slack_delivery.py 61 | mv ./lambda-mailer-code/sqs_queue_processor-modified.py ./lambda-mailer-code/c7n_mailer/sqs_queue_processor.py 62 | mv ./lambda-mailer-code/target-modified.py ./lambda-mailer-code/c7n_mailer/target.py 63 | mv ./lambda-mailer-code/email_delivery-modified.py ./lambda-mailer-code/c7n_mailer/email_delivery.py 64 | mv ../../policies/templates/*.j2 ./lambda-mailer-code/c7n_mailer/msg-templates/ 65 | 66 | export SHA="${{ github.sha }}" 67 | export GIT="${{ github.server_url }}/${{ github.repository }}" 68 | export JOB="${{ github.run_id }}" 69 | export TEAM="IE" 70 | 71 | cdk deploy --require-approval=never --all 72 | -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-mailer-code/target-modified.py: -------------------------------------------------------------------------------- 1 | # Copyright The Cloud Custodian Authors. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | import traceback 5 | 6 | from .email_delivery import EmailDelivery 7 | from .utils import decrypt 8 | 9 | 10 | class MessageTargetMixin(object): 11 | def handle_targets(self, message, sent_timestamp, email_delivery=True, sns_delivery=False): 12 | # get the map of email_to_addresses to mimetext messages (with resources baked in) 13 | # and send any emails (to SES or SMTP) if there are email addresses found 14 | if email_delivery: 15 | email_delivery = EmailDelivery(self.config, self.session, self.logger) 16 | email_delivery.send_c7n_email(message) 17 | 18 | # this sections gets the map of sns_to_addresses to rendered_jinja messages 19 | # (with resources baked in) and delivers the message to each sns topic 20 | if sns_delivery: 21 | from .sns_delivery import SnsDelivery 22 | 23 | sns_delivery = SnsDelivery(self.config, self.session, self.logger) 24 | sns_message_packages = sns_delivery.get_sns_message_packages(message) 25 | sns_delivery.deliver_sns_messages(sns_message_packages, message) 26 | 27 | # this section sends a notification to the resource owner via Slack 28 | if any( 29 | e.startswith("slack") or e.startswith("https://hooks.slack.com/") 30 | for e in message.get("action", {}).get("to", []) 31 | + message.get("action", {}).get("owner_absent_contact", []) 32 | ): 33 | from .slack_delivery import SlackDelivery 34 | 35 | # if self.config.get("slack_token"): 36 | # self.config["slack_token"] = decrypt( 37 | # self.config, self.logger, self.session, "slack_token" 38 | # ).strip() 39 | 40 | slack_delivery = SlackDelivery(self.config, self.logger, email_delivery) 41 | slack_messages = slack_delivery.get_to_addrs_slack_messages_map(message) 42 | try: 43 | slack_delivery.slack_handler(message, slack_messages) 44 | except Exception: 45 | traceback.print_exc() 46 | pass 47 | 48 | # MongoDB delivery 49 | if any( 50 | e.startswith("mongodb") for e in message.get("action", {}).get("to", []) 51 | ): 52 | from .mongodb_delivery import MongoDBDelivery 53 | 54 | # Instantiate MongoDBDelivery class 55 | mongodb_delivery = MongoDBDelivery(self.config, self.session, self.logger) 56 | 57 | # Process MongoDB message packages and deliver to MongoDB 58 | mongodb_message_packages = mongodb_delivery.get_mongodb_message_packages(message) 59 | mongodb_delivery.deliver_mongodb_messages(mongodb_message_packages, message) 60 | -------------------------------------------------------------------------------- /infra/cloudcustodian/tests/eventbridge/trail_event.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0", 3 | "id": "12345678-1234-1234-1234-123456789012", 4 | "detail-type": "AWS API Call via CloudTrail", 5 | "source": "local.app", 6 | "account": "123456789012", 7 | "time": "2023-04-20T10:00:00Z", 8 | "region": "us-east-1", 9 | "resources": [], 10 | "detail": { 11 | "eventVersion": "1.08", 12 | "userIdentity": { 13 | "type": "AssumedRole", 14 | "principalId": "AROA:ABC123DEF456", 15 | "arn": "arn:aws:sts::123456789012:assumed-role/MyRole/MySessionName", 16 | "accountId": "123456789012", 17 | "accessKeyId": "ABC123DEF456", 18 | "sessionContext": { 19 | "sessionIssuer": { 20 | "type": "Role", 21 | "principalId": "ABC123DEF456", 22 | "arn": "arn:aws:sts::123456789012:assumed-role/MyRole", 23 | "accountId": "123456789012", 24 | "userName": "MyRole" 25 | }, 26 | "webIdFederationData": {}, 27 | "attributes": { 28 | "creationDate": "2023-04-20T09:00:00Z", 29 | "mfaAuthenticated": "false" 30 | } 31 | } 32 | }, 33 | "eventTime": "2023-04-20T10:00:00Z", 34 | "eventSource": "ec2.amazonaws.com", 35 | "eventName": "AuthorizeSecurityGroupIngress", 36 | "awsRegion": "us-east-1", 37 | "sourceIPAddress": "1.2.3.4", 38 | "userAgent": "aws-cli/1.20.0 Python/3.8.8 Darwin/20.3.0 botocore/1.21.0", 39 | "requestParameters": { 40 | "groupId": "sg-0123456789abcdef0", 41 | "ipPermissions": { 42 | "items": [{ 43 | "ipProtocol": "tcp", 44 | "fromPort": 22, 45 | "toPort": 22, 46 | "groups": [], 47 | "ipRanges": { 48 | "items": [{ 49 | "cidrIp": "0.0.0.0/0" 50 | }] 51 | }, 52 | "ipv6Ranges": [], 53 | "prefixListIds": [] 54 | }] 55 | } 56 | }, 57 | "responseElements": { 58 | "requestId": "12345678-1234-1234-1234-123456789012", 59 | "_return": true, 60 | "securityGroupRuleSet": { 61 | "items": [{ 62 | "groupOwnerId": "123456789012", 63 | "groupId": "sg-0123456789abcdef0", 64 | "securityGroupRuleId": "sgr-0123456789abcdef0", 65 | "isEgress": false, 66 | "ipProtocol": "tcp", 67 | "fromPort": 22, 68 | "toPort": 22, 69 | "cidrIpv4": "0.0.0.0/0" 70 | }] 71 | } 72 | }, 73 | "requestID": "12345678-1234-1234-1234-123456789012", 74 | "eventID": "12345678-1234-1234-1234-123456789012", 75 | "readOnly": false, 76 | "eventType": "AwsApiCall", 77 | "managementEvent": true, 78 | "recipientAccountId": "123456789012", 79 | "eventCategory": "Management", 80 | "sessionCredentialFromConsole": "true" 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /infra/cloudcustodian/transform.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright Li10 3 | # SPDX-License-Identifier: Apache-2.0 4 | import argparse 5 | import logging 6 | import os 7 | import json 8 | import yaml 9 | import c7n.policy 10 | from c7n.config import Config 11 | 12 | 13 | def setup_logging(log_level): 14 | """ 15 | Configure logging level. 16 | """ 17 | if log_level: 18 | logging.basicConfig(level=log_level) 19 | return logging.getLogger(__name__) 20 | 21 | 22 | def parse_arguments(): 23 | """ 24 | Parse command line arguments. 25 | """ 26 | parser = argparse.ArgumentParser(description="Combine Cloud Custodian policy files.") 27 | parser.add_argument("--policy-dir", default="../../policies/event-based", help="Path to directory containing policy files") 28 | parser.add_argument("--log-level", default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], 29 | help="Logging level") 30 | return parser.parse_args() 31 | 32 | 33 | def load_policy_file(policy_file, vars): 34 | """ 35 | Load policy file with given variables. 36 | """ 37 | config = Config.empty() 38 | return c7n.policy.load(options=config, path=policy_file, validate=True, vars=vars) 39 | 40 | def merge_policies(policy_files, vars): 41 | """ 42 | Merge policies from multiple files into one. 43 | """ 44 | combined_policies = [] 45 | for policy_file in policy_files: 46 | policy = load_policy_file(policy_file, vars) 47 | renamed_filters_actions = f"filters_actions_{os.path.splitext(os.path.basename(policy_file))[0]}" 48 | for p in policy: 49 | if 'vars' in p.data: 50 | p.data['vars'][renamed_filters_actions] = p.data['vars'].pop('filters_actions', {}) 51 | combined_policies.extend(policy) 52 | return combined_policies 53 | 54 | 55 | 56 | def transform(policy_dir, vars, log_level=None): 57 | """ 58 | Combine policy files, rename anchors, and output JSON policies. 59 | """ 60 | logging = setup_logging(log_level) 61 | 62 | policy_files = [os.path.join(policy_dir, f) for f in os.listdir(policy_dir) if os.path.isfile(os.path.join(policy_dir, f))] 63 | if not policy_files: 64 | logging.error("No policy files found in the directory.") 65 | return 66 | 67 | combined_policies = merge_policies(policy_files, vars) 68 | json_policies = json.dumps( 69 | {'execution-options': {}, 70 | 'policies': [p.data for p in combined_policies]}, indent=2) 71 | 72 | logging.debug("Combined policies:") 73 | print(json_policies) 74 | 75 | if __name__ == "__main__": 76 | args = parse_arguments() 77 | 78 | external_policy_vars = { 79 | "NOTIFY_EMAIL": "groupmail@test.com", 80 | "NOTIFY_SLACK": "slack://#alerts", 81 | "INSTANT_SQS_QUEUE": "https://sqs.us-east-1.amazonaws.com/123456789123/CloudcustodianStackMailer-mailerqueue723rw-X9WSmXGNE3cb", 82 | "MEMBER_ROLE": "arn:aws:iam::{account_id}:role/governance_tagging_role", 83 | "MONGODB": "mongodb://" 84 | } 85 | 86 | transform(args.policy_dir, external_policy_vars, log_level=args.log_level) 87 | -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-mailer-code/mongodb_delivery.py: -------------------------------------------------------------------------------- 1 | # Copyright li10 2 | # SPDX-License-Identifier: Apache-2.0 3 | import time 4 | import json 5 | from pymongo import MongoClient 6 | 7 | class MongoDBDelivery: 8 | def __init__(self, config, session, logger): 9 | self.config = config 10 | self.logger = logger 11 | self.session = session 12 | self.mongodb_uri = self.config.get("mongodb_uri") 13 | self.mongodb_database = self.config.get("mongodb_database") 14 | self.mongodb_collection = self.config.get("mongodb_collection") 15 | 16 | # Initialize MongoDB client 17 | if self.mongodb_uri: 18 | self.logger.debug("Connecting to MongoDB URI: %s", self.mongodb_uri) 19 | self.client = MongoClient(self.mongodb_uri) 20 | self.logger.debug("Connection successful") 21 | self.db = self.client[self.mongodb_database] 22 | self.logger.debug("Using database: %s", self.mongodb_database) 23 | self.collection = self.db[self.mongodb_collection] 24 | self.logger.debug("Using collection: %s", self.mongodb_collection) 25 | 26 | def deliver_mongodb_messages(self, mongodb_message_packages, sqs_message): 27 | if len(mongodb_message_packages) > 0: 28 | self.logger.info( 29 | "Sending account:{account} policy:{policy} {resource}:{quantity} to MongoDB".format( 30 | account=sqs_message.get("account", ""), 31 | policy=sqs_message["policy"]["name"], 32 | resource=sqs_message["policy"]["resource"], 33 | quantity=len(sqs_message["resources"]), 34 | ) 35 | ) 36 | 37 | for message in mongodb_message_packages: 38 | self.logger.debug("Inserting message into MongoDB: %s", message) 39 | self.collection.insert_one(message) 40 | self.logger.debug("Message inserted successfully") 41 | 42 | def get_mongodb_message_packages(self, sqs_message): 43 | timestamp = time.time() 44 | mongodb_rendered_messages = [] 45 | 46 | decoded_message = json.dumps(sqs_message) 47 | self.logger.debug("Decoded SQS Message: %s", decoded_message) 48 | 49 | mongodb_message = { 50 | "_id": { 51 | "ts": timestamp, 52 | "execution_id": sqs_message["execution_id"] 53 | }, 54 | "account_name": sqs_message["account"], 55 | "account_id": sqs_message["account_id"], 56 | "region": sqs_message["region"], 57 | "source": sqs_message["policy"]["resource"], 58 | "policy": sqs_message["policy"]["filters"], 59 | } 60 | if sqs_message.get("event", None) is not None: 61 | self.logger.debug("SQS message contains event") 62 | mongodb_message["event"] = sqs_message["event"]["detail"]["eventName"] 63 | mongodb_message["mode_type"] = sqs_message["policy"]["mode"]["type"] 64 | else: 65 | self.logger.debug("SQS message does not contain event") 66 | mongodb_message["mode_type"] = "scheduled" 67 | 68 | mongodb_rendered_messages.append(mongodb_message) 69 | 70 | return mongodb_rendered_messages 71 | -------------------------------------------------------------------------------- /infra/cloudcustodian/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "python app.py", 3 | "watch": { 4 | "include": [ 5 | "**" 6 | ], 7 | "exclude": [ 8 | "README.md", 9 | "cdk*.json", 10 | "requirements*.txt", 11 | "source.bat", 12 | "**/__init__.py", 13 | "**/__pycache__", 14 | "tests" 15 | ] 16 | }, 17 | "context": { 18 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true, 19 | "@aws-cdk/core:checkSecretUsage": true, 20 | "@aws-cdk/core:target-partitions": [ 21 | "aws", 22 | "aws-cn" 23 | ], 24 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, 25 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, 26 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, 27 | "@aws-cdk/aws-iam:minimizePolicies": true, 28 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true, 29 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, 30 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, 31 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, 32 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, 33 | "@aws-cdk/core:enablePartitionLiterals": true, 34 | "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, 35 | "@aws-cdk/aws-iam:standardizedServicePrincipals": true, 36 | "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, 37 | "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, 38 | "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, 39 | "@aws-cdk/aws-route53-patters:useCertificate": true, 40 | "@aws-cdk/customresources:installLatestAwsSdkDefault": false, 41 | "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, 42 | "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, 43 | "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, 44 | "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, 45 | "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, 46 | "@aws-cdk/aws-redshift:columnId": true, 47 | "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, 48 | "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, 49 | "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, 50 | "@aws-cdk/aws-kms:aliasNameRef": true, 51 | "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true, 52 | "@aws-cdk/core:includePrefixInUniqueNameGeneration": true, 53 | "@aws-cdk/aws-efs:denyAnonymousAccess": true, 54 | "@aws-cdk/aws-opensearchservice:enableOpensearchMultiAzWithStandby": true, 55 | "@aws-cdk/aws-lambda-nodejs:useLatestRuntimeVersion": true, 56 | "@aws-cdk/aws-efs:mountTargetOrderInsensitiveLogicalId": true, 57 | "@aws-cdk/aws-rds:auroraClusterChangeScopeOfInstanceParameterGroupWithEachParameters": true, 58 | "@aws-cdk/aws-appsync:useArnForSourceApiAssociationIdentifier": true, 59 | "@aws-cdk/aws-rds:preventRenderingDeprecatedCredentials": true, 60 | "@aws-cdk/aws-codepipeline-actions:useNewDefaultBranchForCodeCommitSource": true, 61 | "@aws-cdk/aws-cloudwatch-actions:changeLambdaPermissionLogicalIdForLambdaAction": true, 62 | "@aws-cdk/aws-codepipeline:crossAccountKeysDefaultValueToFalse": true 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /infra/cloudcustodian/tests/eventbridge/send_cloudtrail_event.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import boto3 3 | import json 4 | import logging 5 | from datetime import datetime 6 | 7 | def setup_logging(log_level): 8 | """ 9 | Configure logging level. 10 | """ 11 | logging.basicConfig(level=log_level) 12 | return logging.getLogger(__name__) 13 | 14 | def parse_arguments(): 15 | """ 16 | Parse command line arguments. 17 | """ 18 | parser = argparse.ArgumentParser(description="Send JSON payload to EventBridge.") 19 | parser.add_argument("--trail-event", default="./trail_event.json", help="Path to JSON file containing event details.") 20 | parser.add_argument("--event-bus", default="cloud-custodian-bus", help="Name of the EventBridge bus to send the event to") 21 | parser.add_argument("--region", default="us-east-1", help="AWS region to use") 22 | parser.add_argument("--profile", default=None, help="AWS profile to use") 23 | parser.add_argument("--log-level", default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], 24 | help="Logging level") 25 | return parser.parse_args() 26 | 27 | def read_event_from_file(file_path): 28 | 29 | try: 30 | with open(file_path, 'r') as file: 31 | event_detail = json.load(file) 32 | except FileNotFoundError: 33 | logger.error("File not found. Please provide a valid JSON file path.") 34 | except json.JSONDecodeError: 35 | logger.error("Invalid JSON format in the provided file.") 36 | 37 | return event_detail 38 | 39 | def send_eventbridge_event(event_detail, event_bus_name, region, profile): 40 | # Initialize the EventBridge client with the specified event bus name 41 | session = boto3.Session(region_name=region, profile_name=profile) 42 | client = session.client('events') 43 | 44 | try: 45 | # Get the ARN of the EventBridge bus 46 | event_bus_arn = client.describe_event_bus(Name=event_bus_name)['Arn'] 47 | logging.debug(f"EventBridge bus '{event_bus_name}' has arn of: {event_bus_arn}") 48 | except client.exceptions.ResourceNotFoundException as e: 49 | logging.error(f"EventBridge bus '{event_bus_name}' not found: {e}") 50 | return 51 | 52 | # Define the event details 53 | event = { 54 | 'Entries': [{ 55 | 'Time': datetime.now().isoformat(), 56 | 'Source': 'local.test', 57 | 'DetailType': 'AWS API Call via CloudTrail', 58 | 'Detail': json.dumps(event_detail), 59 | 'EventBusName': event_bus_arn 60 | }] 61 | } 62 | 63 | # Send the event to the specified EventBridge bus 64 | response = client.put_events(Entries=event['Entries']) 65 | 66 | # Check the response 67 | if response['FailedEntryCount'] == 0: 68 | logging.info(f"Event sent successfully to EventBridge bus '{event_bus_name}'.") 69 | else: 70 | logging.error(f"Failed to send event to EventBridge bus '{event_bus_name}': {response['Entries']}") 71 | 72 | 73 | if __name__ == "__main__": 74 | args = parse_arguments() 75 | logger = setup_logging(args.log_level) 76 | event_detail = read_event_from_file(args.trail_event) 77 | 78 | send_eventbridge_event(event_detail, args.event_bus, args.region, args.profile) 79 | -------------------------------------------------------------------------------- /infra/cloudcustodian/cloudcustodian/cloudcustodian_mailer.py: -------------------------------------------------------------------------------- 1 | from aws_cdk import ( 2 | Stack, 3 | aws_sqs as sqs, 4 | aws_lambda as _lambda, 5 | aws_iam as iam, 6 | Duration, 7 | aws_lambda_event_sources as event_sources, 8 | aws_ses as ses 9 | ) 10 | 11 | from constructs import Construct 12 | 13 | from cloudcustodian import common 14 | 15 | class CloudcustodianStackMailer(Stack): 16 | 17 | def __init__(self, scope: Construct, construct_id: str, org_id: str, secret_arn: str, **kwargs) -> None: 18 | super().__init__(scope, construct_id, **kwargs) 19 | 20 | ses_identity = ses.EmailIdentity(self, "ses_email_notification", identity=ses.Identity.email(common.from_address)) 21 | 22 | inline_policies = { 23 | 'read_sqs': iam.PolicyDocument( 24 | statements=[ 25 | iam.PolicyStatement( 26 | sid="sqs", 27 | effect=iam.Effect.ALLOW, 28 | actions=["sqs:ReceiveMessage"], 29 | resources=["*"] 30 | ), 31 | iam.PolicyStatement( 32 | sid="secret", 33 | effect=iam.Effect.ALLOW, 34 | actions=["secretsmanager:GetSecretValue"], 35 | resources=[secret_arn] 36 | ), 37 | iam.PolicyStatement( 38 | sid="ses", 39 | effect=iam.Effect.ALLOW, 40 | actions=["ses:SendRawEmail"], 41 | resources=[ses_identity.email_identity_arn] 42 | ) 43 | ] 44 | ) 45 | } 46 | 47 | role = iam.Role(self, 48 | id="cloud_custodian_mailer_lambda", 49 | description="give permissions to the cloud custodian lambda to send notifications", 50 | managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")], 51 | inline_policies=inline_policies, 52 | assumed_by=iam.ServicePrincipal("lambda.amazonaws.com") 53 | ) 54 | 55 | queue = sqs.Queue(self, "mailer_queue", enforce_ssl=True) 56 | 57 | queue.add_to_resource_policy(iam.PolicyStatement( 58 | actions=["sqs:*"], 59 | effect=iam.Effect.ALLOW, 60 | resources=["*"], 61 | principals=[role] )) 62 | 63 | queue.add_to_resource_policy(iam.PolicyStatement( 64 | actions=["sqs:*"], 65 | effect=iam.Effect.ALLOW, 66 | resources=["*"], 67 | principals=[iam.AnyPrincipal()], 68 | conditions={ 69 | "StringEquals": { 70 | "aws:PrincipalOrgID": org_id 71 | } 72 | } 73 | )) 74 | 75 | mailer_lambda = _lambda.Function(self, 76 | id="mailer_lambda", 77 | runtime=_lambda.Runtime.PYTHON_3_12, 78 | handler="main.dispatch", 79 | code=_lambda.Code.from_asset("lambda-mailer-code"), 80 | timeout=Duration.seconds(30), 81 | role=role, 82 | environment={ 83 | "SECRET_ARN": secret_arn, 84 | "QUEUE_URL": queue.queue_url, 85 | "FROM_ADDRESS": common.from_address, 86 | "MAILER_ROLE_ARN":role.role_arn 87 | } 88 | ) 89 | 90 | mailer_lambda.add_event_source(event_sources.SqsEventSource(queue)) -------------------------------------------------------------------------------- /docs/img/li10-logo-light-bg.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /docs/img/li10-logo-dark-bg.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /.gitlab/.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - deploy-infra 3 | - docker 4 | 5 | .common: 6 | before_script: 7 | - echo "common script" 8 | - yum install -y python pip 9 | - python --version 10 | - pip --version 11 | - yum install -y aws-cli 12 | - aws --version 13 | - yum install -y nodejs 14 | - node --version 15 | - npm install -g aws-cdk 16 | - cdk --version 17 | 18 | - eval aws configure set aws_access_key_id \$${MyEnv}_AWS_ACCESS_KEY_ID 19 | - eval aws configure set aws_secret_access_key \$${MyEnv}_AWS_SECRET_ACCESS_KEY 20 | - aws sts get-caller-identity 21 | 22 | # - aws codeartifact login --tool npm --domain greenify --domain-owner $ACCOUNT_ID --repository npmjs --region $AWS_DEFAULT_REGION 23 | 24 | deploy-infra: 25 | stage: deploy-infra 26 | image: amazonlinux:2023 27 | extends: .common 28 | variables: 29 | MyEnv: dev 30 | rules: 31 | - if: $CI_COMMIT_BRANCH == "main" 32 | #when: manual 33 | allow_failure: true 34 | # changes: 35 | # - template.yaml 36 | # - .gitlab.yml 37 | script: 38 | - cd infra/cloudcustodian 39 | - echo sha= ${CI_COMMIT_SHA} 40 | - echo url= ${CI_PROJECT_ROOT_NAMESPACE} ${CI_PROJECT_NAME} 41 | - echo id= ${CI_JOB_ID} 42 | 43 | - python -m pip install -r requirements.txt 44 | - python transform.py > lambda-code/config.json 45 | - pip install c7n==0.9.35 -t ./tmp/lambda-code/ --no-deps --no-compile 46 | - cp -r ./tmp/lambda-code/c7n ./lambda-code/c7n/ 47 | 48 | - pip install -r ./lambda-mailer-code/requirements-mailer.txt -t ./lambda-mailer-code/ --no-deps --no-compile 49 | - mv ./lambda-mailer-code/main-modified.py ./lambda-mailer-code/main.py 50 | - mv ./lambda-mailer-code/handle-modified.py ./lambda-mailer-code/c7n_mailer/handle.py 51 | - mv ./lambda-mailer-code/slack_delivery-modified.py ./lambda-mailer-code/c7n_mailer/slack_delivery.py 52 | - mv ./lambda-mailer-code/sqs_queue_processor-modified.py ./lambda-mailer-code/c7n_mailer/sqs_queue_processor.py 53 | - mv ./lambda-mailer-code/target-modified.py ./lambda-mailer-code/c7n_mailer/target.py 54 | - mv ./lambda-mailer-code/email_delivery-modified.py ./lambda-mailer-code/c7n_mailer/email_delivery.py 55 | - mv ./lambda-mailer-code/mongodb_delivery.py ./lambda-mailer-code/c7n_mailer/mongodb_delivery.py 56 | - mv ../../policies/templates/*.j2 ./lambda-mailer-code/c7n_mailer/msg-templates/ 57 | 58 | - export SHA=${CI_COMMIT_SHA} 59 | - export GIT=${CI_PROJECT_ROOT_NAMESPACE}/${CI_PROJECT_NAME} 60 | - export JOB=${CI_JOB_ID} 61 | 62 | - cdk deploy --require-approval=never --all 63 | 64 | docker: 65 | stage: docker 66 | image: docker 67 | services: 68 | - docker:dind 69 | # extends: .common 70 | variables: 71 | MyEnv: dev 72 | REGISTRY: 123456789123.dkr.ecr.us-east-1.amazonaws.com 73 | REPOSITORY: governance 74 | IMAGE_TAG: latest 75 | rules: 76 | - if: $CI_COMMIT_BRANCH == "main" 77 | #when: manual 78 | allow_failure: true 79 | # changes: 80 | # - template.yaml 81 | # - .gitlab.yml 82 | script: 83 | # - yum install -y docker 84 | # - systemctl start docker 85 | - apk add --no-cache aws-cli 86 | - eval aws configure set aws_access_key_id \$${MyEnv}_AWS_ACCESS_KEY_ID 87 | - eval aws configure set aws_secret_access_key \$${MyEnv}_AWS_SECRET_ACCESS_KEY 88 | - aws sts get-caller-identity 89 | - docker --help 90 | - whoami 91 | # - apt-get update && apt-get install -y aws-cli 92 | - aws --version 93 | - aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin 123456789123.dkr.ecr.us-east-1.amazonaws.com 94 | - docker build --platform=linux/amd64 -t $REGISTRY/$REPOSITORY:$IMAGE_TAG -f infra/cloudcustodian/docker/Dockerfile . 95 | - docker push $REGISTRY/$REPOSITORY:$IMAGE_TAG 96 | -------------------------------------------------------------------------------- /policies/templates/default.slack.j2: -------------------------------------------------------------------------------- 1 | {%- macro getTag(resource, tagKey) -%} 2 | {%- if resource.get('Tags') -%} 3 | {%- for t in resource.get('Tags') -%} 4 | {%- if t.get('Key') == tagKey -%} 5 | {{ t.get('Value') }} 6 | {%- endif -%} 7 | {%- endfor -%} 8 | {%- endif -%} 9 | {%- endmacro -%} 10 | 11 | {% set ids = [] %} 12 | {% set _ = ids.append(resources[0].InstanceId) %} 13 | {% if event %} 14 | {% set _ = ids.append(event.detail.requestParameters.bucketName) %} 15 | {% set _ = ids.append(event.detail.responseElements.dBInstanceArn) %} 16 | {% set _ = ids.append(event.detail.responseElements.snapshotId) %} 17 | {% set _ = ids.append(event.detail.requestParameters.functionName) %} 18 | {% set _ = ids.append(event.detail.responseElements.volumeId) %} 19 | {% endif %} 20 | 21 | {% set sagemaker_ids = [] %} 22 | {% if event %} 23 | {% set _ = sagemaker_ids.append(event.detail.responseElements.modelArn) %} 24 | {% set _ = sagemaker_ids.append(event.detail.responseElements.endpointConfigArn) %} 25 | {% set _ = sagemaker_ids.append(event.detail.responseElements.endpointArn) %} 26 | {% set _ = sagemaker_ids.append(event.detail.responseElements.transformJobArn) %} 27 | {% set _ = sagemaker_ids.append(event.detail.responseElements.trainingJobArn) %} 28 | {% set _ = sagemaker_ids.append(event.detail.responseElements.notebookInstanceArn) %} 29 | {% endif %} 30 | 31 | {%- set nameTag = getTag(resources[0],"Name") -%} 32 | 33 | { 34 | "attachments":[ 35 | { 36 | "fallback":"{{ policy['resource'] }} - {{ action.violation_desc }}", 37 | "color":"{{ action.level }}", 38 | "fields":[ 39 | { 40 | "title":"Finding", 41 | "value":"{{ action.violation_desc }}", 42 | "short": false 43 | } 44 | ,{ 45 | "title":"Account", 46 | "value":"{{ account }} {{ account_id }}", 47 | "short": true 48 | } 49 | ,{ 50 | "title":"Region", 51 | "value":"{{ region }}", 52 | "short": true 53 | } 54 | ,{ 55 | "title":"Action Taken", 56 | "value":"{{ action.action_desc }}" 57 | } 58 | {% if policy['resource'] == "acm-certificate" %} 59 | ,{ 60 | "title":"Domain", 61 | "value":"```{{ resources[0].DomainName }}```", 62 | } 63 | {% endif %} 64 | {%- if nameTag -%} 65 | ,{ 66 | "title":"Name Tag", 67 | "value":"{{ nameTag }}", 68 | "short": true 69 | } 70 | {%- endif -%} 71 | ,{ 72 | "title":"Identifier", 73 | {% if policy['resource'] == "app-elb-target-group" and event.detail.eventName == "CreateTargetGroup" %} 74 | "value":"```{{ event.detail.responseElements.targetGroups[0].targetGroupArn }}```", 75 | 76 | {% elif policy['resource'] == "app-elb" and event.detail.eventName == "CreateLoadBalancer" %} 77 | "value":"```{{ event.detail.responseElements.loadBalancers[0].loadBalancerArn }}```", 78 | 79 | {% elif policy['resource'] == "elastic-ip" %} 80 | "value":"```{{ resources | selectattr('PublicIp') | map(attribute='PublicIp') | list }}```", 81 | 82 | {% elif policy['resource'] == "s3" %} 83 | "value":"```{{ resources[0].Name }}```", 84 | 85 | {% elif policy['resource'] == "acm-certificate" %} 86 | "value":"```{{ resources[0].CertificateArn }}```", 87 | 88 | {% elif policy['resource'].startswith("sagemaker") %} 89 | "value":"```{{ sagemaker_ids | join('') }}```", 90 | 91 | {% elif policy['resource'] == "dynamodb-table" %} 92 | "value":"```{{event.detail.responseElements.tableDescription.tableArn}}```", 93 | 94 | {% elif policy['resource'] == "security-group" and event.detail.eventName == "CreateSecurityGroup" %} 95 | "value":"```groupName: {{event.detail.requestParameters.groupName}} groupId: {{event.detail.responseElements.groupid}} ```", 96 | 97 | {% elif policy['resource'] == "security-group" and event.detail.eventName == "AuthorizeSecurityGroupIngress" %} 98 | "value":"```groupId: {{event.detail.requestParameters.groupId}}\nRemoved: {{event.detail.requestParameters.ipPermissions}}```", 99 | 100 | {% else %} 101 | "value":"```{{ ids | join('') }}```", 102 | {% endif %} 103 | } 104 | ] 105 | } 106 | ], 107 | "channel":"{{ recipient }}", 108 | "username":"{{ action.violation_desc }}" 109 | } 110 | -------------------------------------------------------------------------------- /policies/event-based/tagging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | vars: 3 | mode: &mode 4 | type: cloudtrail 5 | member-role: "{MEMBER_ROLE}" 6 | filters_actions: &filters_actions 7 | filters: 8 | - tag:li10.autotag: absent 9 | actions: 10 | - type: auto-tag-user 11 | tag: li10.autotag 12 | - type: notify 13 | to: 14 | - "{NOTIFY_EMAIL}" 15 | - "{NOTIFY_SLACK}" 16 | template: default.html 17 | slack_template: default.slack 18 | subject: "[custodian {{ account }}] Automated tagging - {{ region }}" 19 | transport: 20 | type: sqs 21 | queue: "{INSTANT_SQS_QUEUE}" 22 | action_desc: "Resource Auto-Tagged :magic_wand:" 23 | violation_desc: "Resource was missing Owner tag" 24 | policies: 25 | - name: sagemaker-model-tag 26 | # must match the matching resource name in the CC doc 27 | # if it does not match, actions won't be executed 28 | resource: sagemaker-model 29 | mode: 30 | <<: *mode 31 | events: 32 | # configure EventBridge Rule 33 | - source: sagemaker.amazonaws.com 34 | event: "CreateModel" 35 | # allow Lambda to extract information about resource to tag 36 | ids: "responseElements.modelArn" 37 | <<: *filters_actions 38 | 39 | - name: sagemaker-endpoint-config-tag 40 | resource: sagemaker-endpoint-config 41 | mode: 42 | <<: *mode 43 | events: 44 | - source: sagemaker.amazonaws.com 45 | event: "CreateEndpointConfig" 46 | ids: "responseElements.endpointConfigArn" 47 | <<: *filters_actions 48 | 49 | - name: sagemaker-endpoint-tag 50 | resource: sagemaker-endpoint 51 | mode: 52 | <<: *mode 53 | events: 54 | - source: sagemaker.amazonaws.com 55 | event: "CreateEndpoint" 56 | ids: "responseElements.endpointArn" 57 | <<: *filters_actions 58 | 59 | - name: sagemaker-training-job-tag 60 | resource: sagemaker-job 61 | mode: 62 | <<: *mode 63 | events: 64 | - source: sagemaker.amazonaws.com 65 | event: "CreateTrainingJob" 66 | ids: "responseElements.trainingJobArn" 67 | <<: *filters_actions 68 | 69 | - name: sagemaker-notebook-tag 70 | resource: sagemaker-notebook 71 | mode: 72 | <<: *mode 73 | events: 74 | - source: sagemaker.amazonaws.com 75 | event: "CreateNotebookInstance" 76 | ids: "responseElements.notebookInstanceArn" 77 | <<: *filters_actions 78 | 79 | - name: sagemaker-transform-job 80 | resource: sagemaker-transform-job 81 | mode: 82 | <<: *mode 83 | events: 84 | - source: sagemaker.amazonaws.com 85 | event: "CreateTransformJob" 86 | ids: "requestParameters.transformJobName" 87 | <<: *filters_actions 88 | 89 | - name: ec2-instance 90 | resource: ec2 91 | mode: 92 | <<: *mode 93 | events: 94 | - RunInstances 95 | <<: *filters_actions 96 | 97 | - name: s3-bucket 98 | resource: s3 99 | mode: 100 | <<: *mode 101 | events: 102 | - CreateBucket 103 | <<: *filters_actions 104 | 105 | - name: rds 106 | resource: rds 107 | mode: 108 | <<: *mode 109 | events: 110 | - CreateDBInstance 111 | <<: *filters_actions 112 | 113 | - name: lambda 114 | resource: lambda 115 | mode: 116 | <<: *mode 117 | events: 118 | - source: lambda.amazonaws.com 119 | event: CreateFunction20150331 120 | ids: responseElements.functionArn 121 | <<: *filters_actions 122 | 123 | - name: ebs-snapshot 124 | resource: ebs-snapshot 125 | mode: 126 | <<: *mode 127 | events: 128 | - source: ec2.amazonaws.com 129 | event: CreateSnapshot 130 | ids: "responseElements.snapshotId" 131 | <<: *filters_actions 132 | 133 | - name: ebs-volume 134 | resource: ebs 135 | mode: 136 | <<: *mode 137 | events: 138 | - CreateVolume 139 | <<: *filters_actions 140 | 141 | - name: app-elb 142 | resource: app-elb 143 | mode: 144 | <<: *mode 145 | events: 146 | - source: elasticloadbalancing.amazonaws.com 147 | event: CreateLoadBalancer 148 | ids: responseElements.loadBalancers[0].loadBalancerArn 149 | <<: *filters_actions 150 | 151 | - name: app-elb-target-group 152 | resource: app-elb-target-group 153 | mode: 154 | <<: *mode 155 | events: 156 | - source: elasticloadbalancing.amazonaws.com 157 | event: CreateTargetGroup 158 | ids: responseElements.targetGroups[0].targetGroupArn 159 | <<: *filters_actions 160 | 161 | - name: glue-job 162 | resource: glue-job 163 | mode: 164 | <<: *mode 165 | events: 166 | - source: glue.amazonaws.com 167 | event: CreateJob 168 | ids: responseElements.name 169 | <<: *filters_actions 170 | 171 | - name: opensearch-serverless 172 | resource: opensearch-serverless 173 | mode: 174 | <<: *mode 175 | events: 176 | - source: aoss.amazonaws.com 177 | event: CreateCollection 178 | ids: responseElements.createCollectionDetail.id 179 | <<: *filters_actions 180 | 181 | - name: dynamodb 182 | resource: dynamodb-table 183 | mode: 184 | <<: *mode 185 | events: 186 | - source: dynamodb.amazonaws.com 187 | event: CreateTable 188 | ids: requestParameters.tableName 189 | <<: *filters_actions 190 | 191 | - name: eip-allocate 192 | resource: ec2 193 | mode: 194 | <<: *mode 195 | events: 196 | - source: ec2.amazonaws.com 197 | event: AllocateAddress 198 | ids: responseElements.publicIp 199 | <<: *filters_actions 200 | 201 | - name: ec2-securitygroup 202 | resource: ec2 203 | mode: 204 | <<: *mode 205 | events: 206 | - source: ec2.amazonaws.com 207 | event: CreateSecurityGroup 208 | ids: responseElements.groupId 209 | <<: *filters_actions -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-mailer-code/sqs_queue_processor-modified.py: -------------------------------------------------------------------------------- 1 | # Copyright The Cloud Custodian Authors. 2 | # SPDX-License-Identifier: Apache-2.0 3 | """ 4 | SQS Message Processing 5 | =============== 6 | 7 | """ 8 | import base64 9 | import json 10 | import logging 11 | import zlib 12 | 13 | from c7n_mailer.target import MessageTargetMixin 14 | 15 | DATA_MESSAGE = "maidmsg/1.0" 16 | 17 | 18 | def getDictionaryValue(dictionary, key): 19 | return dictionary.get(key, dictionary.get(key[0].lower() + key[1:], "")) 20 | 21 | 22 | 23 | class MailerSqsQueueIterator: 24 | # Copied from custodian to avoid runtime library dependency 25 | msg_attributes = ["sequence_id", "op", "ser"] 26 | 27 | def __init__(self, aws_sqs, queue_url, logger, limit=0, timeout=10): 28 | self.aws_sqs = aws_sqs 29 | self.queue_url = queue_url 30 | self.limit = limit 31 | self.logger = logger 32 | self.timeout = timeout 33 | self.messages = [] 34 | 35 | # this and the next function make this object iterable with a for loop 36 | def __iter__(self): 37 | return self 38 | 39 | def __next__(self): 40 | if self.messages: 41 | return self.messages.pop(0) 42 | 43 | if self.queue_url == None: 44 | raise StopIteration() 45 | 46 | response = self.aws_sqs.receive_message( 47 | QueueUrl=self.queue_url, 48 | WaitTimeSeconds=self.timeout, 49 | MaxNumberOfMessages=3, 50 | MessageAttributeNames=self.msg_attributes, 51 | AttributeNames=["SentTimestamp"], 52 | ) 53 | 54 | msgs = response.get("Messages", []) 55 | self.logger.debug("Messages received %d", len(msgs)) 56 | for m in msgs: 57 | self.messages.append(m) 58 | if self.messages: 59 | return self.messages.pop(0) 60 | raise StopIteration() 61 | 62 | next = __next__ # python2.7 63 | 64 | def ack(self, m): 65 | if self.queue_url != None: 66 | self.aws_sqs.delete_message(QueueUrl=self.queue_url, ReceiptHandle=getDictionaryValue(m, "ReceiptHandle")) 67 | 68 | 69 | class MailerSqsQueueProcessor(MessageTargetMixin): 70 | def __init__(self, config, session, logger, max_num_processes=16): 71 | self.config = config 72 | self.logger = logger 73 | self.session = session 74 | self.max_num_processes = max_num_processes 75 | self.receive_queue = self.config["queue_url"] 76 | self.endpoint_url = self.config.get("endpoint_url", None) 77 | if self.config.get("debug", False): 78 | self.logger.debug("debug logging is turned on from mailer config file.") 79 | logger.setLevel(logging.DEBUG) 80 | 81 | """ 82 | Cases 83 | - aws resource is tagged CreatorName: 'milton', ldap_tag_uids has CreatorName, 84 | we do an ldap lookup, get milton's email and send him an email 85 | - you put an email in the to: field of the notify of your policy, we send an email 86 | for all resources enforce by that policy 87 | - you put an sns topic in the to: field of the notify of your policy, we send an sns 88 | message for all resources enforce by that policy 89 | - an lambda enforces a policy based on an event, we lookup the event aws username, get their 90 | ldap email and send them an email about a policy enforcement (from lambda) for the event 91 | - resource-owners has a list of tags, SupportEmail, OwnerEmail, if your resources 92 | include those tags with valid emails, we'll send an email for those resources 93 | any others 94 | - resource-owners has a list of tags, SnSTopic, we'll deliver an sns message for 95 | any resources with SnSTopic set with a value that is a valid sns topic. 96 | """ 97 | 98 | def run(self, parallel=False, sqs_trigger_messages=None): 99 | if sqs_trigger_messages == None: 100 | self.logger.info("Downloading messages from the SQS queue.") 101 | aws_sqs = self.session.client("sqs", endpoint_url=self.endpoint_url) 102 | sqs_messages = MailerSqsQueueIterator(aws_sqs, self.receive_queue, self.logger) 103 | else: 104 | sqs_messages = MailerSqsQueueIterator(None, None, None) 105 | sqs_messages.messages = sqs_trigger_messages 106 | 107 | sqs_messages.msg_attributes = ["mtype", "recipient"] 108 | print(f"{sqs_messages=}") 109 | # lambda doesn't support multiprocessing, so we don't instantiate any mp stuff 110 | # unless it's being run from CLI on a normal system with SHM 111 | if parallel: 112 | import multiprocessing 113 | process_pool = multiprocessing.Pool(processes=self.max_num_processes) 114 | 115 | for sqs_message in sqs_messages: 116 | self.logger.debug( 117 | "Message id: %s received %s" 118 | % (sqs_message.get("MessageId", sqs_message.get("messageId", "")), sqs_message.get("MessageAttributes", sqs_message.get("messageAttributes", ""))) 119 | ) 120 | 121 | self.logger.debug(f"{json.dumps(sqs_message)=}") 122 | 123 | msg_kind = sqs_message.get("MessageAttributes", {}).get("mtype") 124 | if msg_kind: 125 | msg_kind = msg_kind["StringValue"] 126 | if not msg_kind == DATA_MESSAGE: 127 | warning_msg = "Unknown sqs_message or sns format %s" % (sqs_message.get("Body", sqs_message.get("body", ""))[:50]) 128 | self.logger.warning(warning_msg) 129 | if parallel: 130 | process_pool.apply_async(self.process_sqs_message, args=sqs_message) 131 | else: 132 | self.process_sqs_message(sqs_message) 133 | self.logger.debug("Processed sqs_message") 134 | sqs_messages.ack(sqs_message) 135 | if parallel: 136 | process_pool.close() 137 | process_pool.join() 138 | self.logger.info("No sqs_messages left on the queue, exiting c7n_mailer.") 139 | return 140 | 141 | # This function when processing sqs messages will only deliver messages over email or sns 142 | # If you explicitly declare which tags are aws_usernames (synonymous with ldap uids) 143 | # in the ldap_uid_tags section of your mailer.yml, we'll do a lookup of those emails 144 | # (and their manager if that option is on) and also send emails there. 145 | def process_sqs_message(self, encoded_sqs_message): 146 | body = encoded_sqs_message.get("Body", encoded_sqs_message.get("body", "")) 147 | try: 148 | body = json.dumps(json.loads(body)["Message"]) 149 | except ValueError: 150 | pass 151 | sqs_message = json.loads(zlib.decompress(base64.b64decode(body))) 152 | 153 | print(f"decoded: {json.dumps(sqs_message)=}") 154 | self.logger.debug( 155 | "Got account:%s message:%s %s:%d policy:%s recipients:%s" 156 | % ( 157 | sqs_message.get("account", "na"), 158 | getDictionaryValue(encoded_sqs_message, "MessageId"), 159 | sqs_message["policy"]["resource"], 160 | len(sqs_message["resources"]), 161 | sqs_message["policy"]["name"], 162 | ", ".join(sqs_message["action"].get("to", [])), 163 | ) 164 | ) 165 | 166 | self.handle_targets( 167 | sqs_message, 168 | getDictionaryValue(encoded_sqs_message, "Attributes")["SentTimestamp"], 169 | email_delivery=True, 170 | sns_delivery=True, 171 | ) 172 | -------------------------------------------------------------------------------- /infra/cloudcustodian/cloudcustodian/cloudcustodian_stack.py: -------------------------------------------------------------------------------- 1 | from aws_cdk import ( 2 | Size, 3 | Stack, 4 | # aws_ec2 as ec2, 5 | aws_events as events, 6 | aws_events_targets as targets, 7 | aws_lambda as _lambda, 8 | aws_iam as iam, 9 | Duration, 10 | custom_resources, 11 | aws_secretsmanager as secretsmanager, 12 | SecretValue, 13 | aws_ecr as ecr, 14 | aws_ecs as ecs, 15 | aws_logs as logs, 16 | aws_ec2 as ec2, 17 | aws_ecs_patterns as ecs_patterns, 18 | aws_applicationautoscaling as aascaling 19 | ) 20 | 21 | from constructs import Construct 22 | 23 | from cloudcustodian import common 24 | 25 | class CloudcustodianStack(Stack): 26 | 27 | 28 | def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: 29 | super().__init__(scope, construct_id, **kwargs) 30 | 31 | # TODO remove hardcoded vpc id 32 | vpc = ec2.Vpc.from_lookup(self, 33 | id="vpc", 34 | # is_default=True, 35 | vpc_id="vpc-0de496dd67345739e", 36 | 37 | region=self.region) 38 | 39 | # TODO fix email subjet 40 | # TODO automate jinja template validation in pipeline 41 | # TODO process the policies so that they have variables from the ecs task definition 42 | # TODO tag resources Team, Git hash etc... 43 | # TODO pipeline to use less permissions 44 | # TODO pass vars to policies? 45 | # TODO add resource policy to SES 46 | # TODO enforce PR review 47 | 48 | inline_policies = { 49 | 'tagging_policies': iam.PolicyDocument( 50 | statements=[ 51 | iam.PolicyStatement( 52 | sid="assumeRemoteGovernanceRole", 53 | effect=iam.Effect.ALLOW, 54 | actions=["sts:AssumeRole"], 55 | resources=[f"arn:aws:iam::*:role/{common.target_governance_role_name}", 56 | f"arn:aws:iam::*:role/{common.target_governance_security_level_1_role}"] 57 | ), 58 | iam.PolicyStatement( 59 | sid="lambdaExecution", 60 | effect=iam.Effect.ALLOW, 61 | actions=["logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents"], 62 | resources=["*"] 63 | ), 64 | ] 65 | ) 66 | } 67 | 68 | role = iam.Role(self, 69 | id="central_governance_role", 70 | description="role used by the governance lambda", 71 | role_name=common.central_governance_role_name, 72 | inline_policies=inline_policies, 73 | assumed_by= iam.CompositePrincipal( 74 | iam.ServicePrincipal("lambda.amazonaws.com"), 75 | iam.ServicePrincipal("ecs-tasks.amazonaws.com")) 76 | ) 77 | 78 | # to be accessed by sub stacks 79 | self.role_arn = role.role_arn 80 | 81 | cloud_custodian_lambda = _lambda.Function(self, "cloud_custodian_tagger", 82 | runtime=_lambda.Runtime.PYTHON_3_12, 83 | handler="main.run", 84 | code=_lambda.Code.from_asset("lambda-code"), 85 | timeout=Duration.seconds(30), 86 | role=role 87 | ) 88 | 89 | bus = events.EventBus( 90 | self, 91 | id="cloud-custodian-bus", 92 | event_bus_name="cloud-custodian-bus" 93 | ) 94 | 95 | # to be accessed by the substack 96 | self.bus_arn = bus.event_bus_arn 97 | 98 | self.org_id = custom_resources.AwsCustomResource(self, "DescribeOrganizationCustomResource", 99 | install_latest_aws_sdk=True, 100 | on_update=custom_resources.AwsSdkCall( 101 | service='organizations', 102 | action= 'describeOrganization', 103 | physical_resource_id= custom_resources.PhysicalResourceId.of('Organization') 104 | ), 105 | policy=custom_resources.AwsCustomResourcePolicy.from_statements([ 106 | iam.PolicyStatement(resources=["*"], actions=["organizations:DescribeOrganization"], effect=iam.Effect.ALLOW) 107 | ])).get_response_field("Organization.Id") 108 | 109 | bus_policy = { 110 | "Sid": "AllowAllAccountsFromOrganizationToPutEvents", 111 | "Effect": "Allow", 112 | "Principal": "*", 113 | "Action": "events:PutEvents", 114 | "Resource": bus.event_bus_arn, 115 | "Condition": { 116 | "StringEquals": { 117 | "aws:PrincipalOrgID": self.org_id 118 | } 119 | } 120 | } 121 | 122 | bus.add_to_resource_policy(iam.PolicyStatement.from_json(bus_policy)) 123 | 124 | _cross_account_rule = events.Rule(self, 125 | id="cross_account_rule", 126 | description="catch remote cloudtrail events from other account to be analyzed by Cloud Custodian", 127 | event_pattern=events.EventPattern( 128 | detail=common.event_pattern_detail, 129 | detail_type=events.Match.equals_ignore_case("AWS API Call via CloudTrail"), 130 | ), 131 | event_bus=bus 132 | ).add_target(targets.LambdaFunction(handler=cloud_custodian_lambda)) 133 | 134 | # add secret TODO move to mailer stack? 135 | secret = secretsmanager.Secret(self, "governance", 136 | description="store configuration and secrets for Cloud Custodian" 137 | ) 138 | self.secret_arn = secret.secret_arn 139 | 140 | # container 141 | repo = ecr.Repository(self, id="ecr-repo", 142 | image_scan_on_push=True, 143 | repository_name="governance", 144 | ) 145 | 146 | 147 | ecs_role = iam.Role(self, 148 | id="central_governance_role_ecs", 149 | description="role used by the governance ecs", 150 | role_name=f"{common.central_governance_role_name}_ecs", 151 | managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy")], 152 | assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com") 153 | ) 154 | 155 | task_def = ecs.FargateTaskDefinition(self, id="ecs-td", 156 | runtime_platform=ecs.RuntimePlatform( 157 | operating_system_family=ecs.OperatingSystemFamily.LINUX, 158 | cpu_architecture=ecs.CpuArchitecture.X86_64), 159 | cpu=256, memory_limit_mib=512, 160 | task_role=role, 161 | execution_role=ecs_role) 162 | 163 | task_def.add_container( 164 | id="td-container", 165 | image=ecs.ContainerImage.from_registry(repo.repository_uri), 166 | logging=ecs.LogDriver.aws_logs( 167 | stream_prefix="governance", 168 | log_retention=logs.RetentionDays.ONE_MONTH 169 | )) 170 | 171 | cluster = ecs.Cluster(self, "FargateCPCluster", 172 | vpc=vpc, 173 | enable_fargate_capacity_providers=True 174 | ) 175 | 176 | ecs_patterns.ScheduledFargateTask(self, 177 | id="scheduled_task", 178 | cluster=cluster, 179 | schedule=aascaling.Schedule.expression("rate(1 day)"), 180 | scheduled_fargate_task_definition_options=ecs_patterns.ScheduledFargateTaskDefinitionOptions( 181 | task_definition=task_def), 182 | rule_name="daily_scan" 183 | ) -------------------------------------------------------------------------------- /infra/cloudcustodian/cloudcustodian/cloudcustodian_sub_stack.py: -------------------------------------------------------------------------------- 1 | from aws_cdk import ( 2 | Stack, 3 | aws_events as events, 4 | aws_events_targets as targets, 5 | aws_iam as iam, 6 | CfnCondition, 7 | Fn, 8 | Stack 9 | ) 10 | 11 | from constructs import Construct 12 | from cloudcustodian import common 13 | 14 | class CloudcustodianSubStack(Stack): 15 | 16 | def add_region_condition(self, role: iam.Role): 17 | cdk_child_role = role.node.default_child 18 | 19 | if not self.regionCondition: 20 | self.regionCondition = CfnCondition(self, 21 | id="region_condition", 22 | expression=Fn.condition_equals( Stack.of(self).region, "us-east-1")) 23 | 24 | cdk_child_role.cfn_options.condition = self.regionCondition 25 | 26 | def __init__(self, scope: Construct, construct_id: str, bus_arn: str, central_role_arn: str, **kwargs) -> None: 27 | super().__init__(scope, construct_id, **kwargs) 28 | 29 | self.regionCondition = None 30 | 31 | central_role = iam.Role.from_role_arn(self, "central_governance_role", central_role_arn) 32 | 33 | bus = events.EventBus.from_event_bus_arn(self, id="central_bus", event_bus_arn=bus_arn) 34 | 35 | 36 | _rule = events.Rule(self, 37 | id="rule", 38 | description="catch local events to be forwarded to the central governance", 39 | event_pattern=events.EventPattern( 40 | detail=common.event_pattern_detail, 41 | detail_type=events.Match.any_of( 42 | events.Match.equals_ignore_case("AWS API Call via CloudTrail"), 43 | events.Match.equals_ignore_case("AWS Health Event") 44 | ), 45 | ) 46 | ).add_target(targets.EventBus(bus)) 47 | 48 | inline_policies = { 49 | 'tagging_policies': iam.PolicyDocument( 50 | statements=[ 51 | iam.PolicyStatement( 52 | sid="sagemaker", 53 | effect=iam.Effect.ALLOW, 54 | actions=["sagemaker:ListTags", "sagemaker:AddTags", "sagemaker:List*", "sagemaker:Describe*"], 55 | resources=["*"] 56 | ), 57 | iam.PolicyStatement( 58 | sid="ec2", 59 | effect=iam.Effect.ALLOW, 60 | actions=["ec2:CreateTags", "ec2:Describe*","ec2:RevokeSecurityGroupIngress"], 61 | resources=["*"] 62 | ), 63 | iam.PolicyStatement( 64 | sid="s3", 65 | effect=iam.Effect.ALLOW, 66 | actions=["s3:ListBucket", "s3:TagResource", "s3:PutJobTagging", 67 | "s3:GetBucketTagging", "s3:GetLifecycleConfiguration", 68 | "s3:GetBucketNotification", "s3:GetBucketLogging", 69 | "s3:GetBucketWebsite", "s3:GetBucketVersioning", "s3:GetReplicationConfiguration", 70 | "s3:GetBucketAcl", "s3:GetBucketPolicy", "s3:GetBucketLocation", 71 | "s3:PutBucketTagging" ], 72 | resources=["*"] 73 | ), 74 | iam.PolicyStatement( 75 | sid="lambda", 76 | effect=iam.Effect.ALLOW, 77 | actions=["lambda:GetFunction","lambda:TagResource", "lambda:ListTags"], 78 | resources=["*"] 79 | ), 80 | iam.PolicyStatement( 81 | sid="rds", 82 | effect=iam.Effect.ALLOW, 83 | actions=["rds:AddTagsToResource", "rds:ListTagsForResource", "rds:Describe*" ], 84 | resources=["*"] 85 | ), 86 | iam.PolicyStatement( 87 | sid="resourcegroup", 88 | effect=iam.Effect.ALLOW, 89 | actions=["tag:TagResources","tag:GetResources"], 90 | resources=["*"] 91 | ), 92 | iam.PolicyStatement( 93 | sid="notify", 94 | effect=iam.Effect.ALLOW, 95 | actions=["iam:ListAccountAliases", "sqs:sendmessage"], 96 | resources=["*"] 97 | ), 98 | iam.PolicyStatement( 99 | sid="elbv2", 100 | effect=iam.Effect.ALLOW, 101 | actions=['elasticloadbalancing:DescribeTargetGroups', 'elasticloadbalancing:AddTags', 'elasticloadbalancing:DescribeListeners', 'elasticloadbalancing:DescribeLoadBalancers', 'elasticloadbalancing:DescribeLoadBalancerAttributes', 'elasticloadbalancing:DescribeTags', 'elasticloadbalancing:DescribeTargetHealth'], 102 | resources=["*"] 103 | ), 104 | iam.PolicyStatement( 105 | sid="glue", 106 | effect=iam.Effect.ALLOW, 107 | actions=['glue:GetTags', 'glue:TagResource', 'glue:GetJobs'], 108 | resources=["*"] 109 | ), 110 | iam.PolicyStatement( 111 | sid="aoss", 112 | effect=iam.Effect.ALLOW, 113 | actions=['glue:GetTags', 'glue:TagResource', 'glue:GetJobs'], 114 | resources=["*"] 115 | ), 116 | iam.PolicyStatement( 117 | sid="dynamo", 118 | effect=iam.Effect.ALLOW, 119 | actions=["dynamodb:DescribeTable","dynamodb:ListTables","dynamodb:ListGlobalTables","dynamodb:TagResource", "dynamodb:ListTagsOfResource"], 120 | resources=["*"] 121 | ), 122 | iam.PolicyStatement( 123 | sid="health", 124 | effect=iam.Effect.ALLOW, 125 | actions=['aoss:ListTagsForResource', 'aoss:TagResource', 'aoss:ListCollections'], 126 | resources=["*"] 127 | ), 128 | ] 129 | ) 130 | } 131 | 132 | role = iam.Role(self, 133 | id="remote_scan_role", 134 | description="role used by cloud custodian", 135 | role_name=common.target_governance_role_name, 136 | inline_policies=inline_policies, 137 | # managed_policies=iam.ManagedPolicy. 138 | assumed_by=central_role, 139 | ) 140 | self.add_region_condition(role) 141 | 142 | security_level_1_inline_policies = { 143 | 'governance_level_1_policy': iam.PolicyDocument( 144 | statements=[ 145 | iam.PolicyStatement( 146 | sid="ec2Tag", 147 | effect=iam.Effect.ALLOW, 148 | actions=["ec2:CreateTags", "ec2:Describe*"], 149 | resources=["*"] 150 | ), 151 | iam.PolicyStatement( 152 | sid="notify", 153 | effect=iam.Effect.ALLOW, 154 | actions=["iam:ListAccountAliases", "sqs:sendmessage"], 155 | resources=["*"] 156 | ), 157 | iam.PolicyStatement( 158 | sid="acm", 159 | effect=iam.Effect.ALLOW, 160 | actions=["acm:List*", "acm:Describe*", "acm:ListTagsForCertificate", "acm:AddTagsToCertificate"], 161 | resources=["*"] 162 | ), 163 | iam.PolicyStatement( 164 | sid="s3", 165 | effect=iam.Effect.ALLOW, 166 | actions=["s3:List*", "s3:Describe*", "s3:GetBucket*"], 167 | resources=["arn:aws:s3:::*"] 168 | ), 169 | iam.PolicyStatement( 170 | sid="tag", 171 | effect=iam.Effect.ALLOW, 172 | actions=['tag:TagResources', 'tag:GetResources'], 173 | resources=["*"] 174 | ), 175 | iam.PolicyStatement( 176 | sid="health", 177 | effect=iam.Effect.ALLOW, 178 | actions=['health:DescribeEvents'], 179 | resources=["*"] 180 | ), 181 | ] 182 | ) 183 | } 184 | 185 | security_level_1_role = iam.Role(self, 186 | id="security_level_1_role", 187 | description="role used by cloud custodian for low risk actions", 188 | role_name=common.target_governance_security_level_1_role, 189 | inline_policies=security_level_1_inline_policies, 190 | assumed_by=central_role, 191 | ) 192 | 193 | self.add_region_condition(security_level_1_role) 194 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | 4 | 5 | Li10 logo 6 | 7 | 8 | This repository is supported by Li10. 9 |
10 | Check out www.li10.com to learn more about our private repository and SaaS options. 11 |
12 | 13 |
14 | 15 | # Li10 Governance 16 | Li10 Governance is an advanced cloud governance solution designed for enterprises seeking real-time compliance and auditing capabilities for cloud infrastructure. Our solution can be deployed quickly (as a SaaS or self-hosted), following industry best-practices, and delivers immediate benefits such as cost reduction, heightened security, and a reduced environmental footprint. Li10 Governance occupies a unique position in the market, balancing flexibility, ease of operation, and cost-effectiveness. 17 | 18 | Built upon the robust open source project Cloud Custodian, Li10 Governance utilizes its versatile policy-as-code engine to automate: 19 | * the audit of the infrastructure in your AWS environment 20 | * the remediation of non-compliant resources 21 | 22 | In addition, Li10 Governance provides: 23 | * a suite of field-tested policies 24 | * infrastructure-as-code and pipelines allowing for predictable, auditable and customizable deployment of Li10 Governance 25 | * a centralized processing for AWS EventBridge Events 26 | * defined least privilege AWS permissions to run Li10 Governance 27 | * a well-formatted notification template 28 | 29 | You can find more details about how this solution compares with others on our website. 30 | 31 | # Architecture 32 | The following diagram shows the infrastructure of Li10 Governance which can be deployed in your AWS environment. 33 | 34 | ![architecture diagram](./docs/img/arch.png) 35 | 36 | 1. Events from remote AWS accounts are forwarded to a central EventBridge Event Bus 37 | 2. Events are fed to a central Lambda function for audit and remediation according to your governance policies 38 | 3. Notifications are send to an SQS queue. They can be sent to an email or Slack. 39 | 4. ECS (Elastic Container Service) pulls a container image with your policies 40 | 5. an ECS Fargate Task is triggered daily and assumes an IAM Role in each account to be audited and remediated. 41 | 42 | # CloudFormation Stacks 43 | The infrastructure-as-code deploys 3 stacks: 44 | 1. A central stack for audit and remediation 45 | 2. A stack for the notification processing 46 | 3. A stack for the deployment of necessary IAM Roles and EventBridge Rules in remote accounts 47 | 48 | # Repo structure 49 | ``` 50 | .github/ Contains the GitHub Actions used to deploy this solution 51 | .gitlab/ Contains the GitLab Jobs used to deploy this solution 52 | infra/ Contains the AWS CDK code 53 | ├─ cloudcustodian/ 54 | │ ├─ cloudcustodian/ 55 | │ │ ├─ cloudcustodian_stack.py Central stack containing the Lambda 56 | │ │ ├─ cloudcustodian_mailer.py Stack for the Cloud Custodian Mailer deployed along the main stack 57 | │ │ ├─ cloudcustodian_sub_stack.py Sub stack deployed in all accounts and regions 58 | │ │ ├─ common.py Shared configuration 59 | | ├─ docker/ Dockerfile used to generate the image deployed to ECS 60 | │ ├─ lambda-code/ contains the code to customize the Cloud Custodian Lambda for Event mode policies 61 | │ ├─ lambda-mailer-code/ contains the code to customize the Cloud Custodian Mailer Lambda 62 | │ ├─ transform.py combine multiple policies into one to eliminate infrastructure duplication 63 | │ ├─ app.py CDK app where we control which account and regions are monitored 64 | policies/ Cloud Custodian policies to deploy 65 | ├─ event-based/ Event mode policies processed in Lambda 66 | ├─ scheduled-based/ Pull mode policies trigered by an EventBridge Schedule and running in an ECS Task 67 | ├─ templates/ Email and Slack messages templates 68 | ``` 69 | 70 | # Setup 71 | 1/ Each region monitored by this solution must have CloudTrail enabled. This is required by event-mode policies such as automated tagging. 72 | 73 | 2/ Each region monitored by this solution must be CDK-bootstrapped with the command `cdk bootstrap aws:/// --profile ` 74 | 75 | 3/ Each CDK IAM Roles (per region bootstrapped) must have their trust policy configured to trust the role used by the pipeline to deploy the infrastructure. 76 | For example, the IAM Role `arn:aws:iam::111122223333:role/cdk-hnb659fds-cfn-exec-role-111122223333-us-east-1` should be configured to trust `arn:aws:iam::555566667777:role/gh-actions-ssvc-governance`. So the trust policy looks like this 77 | ``` 78 | { 79 | "Version": "2012-10-17", 80 | "Statement": [ 81 | { 82 | "Effect": "Allow", 83 | "Principal": { 84 | "Service": "cloudformation.amazonaws.com", 85 | "AWS": "arn:aws:iam::555566667777:role/gh-actions-ssvc-governance" 86 | }, 87 | "Action": "sts:AssumeRole" 88 | } 89 | ] 90 | } 91 | ``` 92 | 93 | 4/ The AWS Accounts and Regions to monitor should be added to the variable `subStackTargets` in `governance\infra\cloudcustodian\cloudcustodian\common.py` as in the example below. 94 | 95 | ``` 96 | subStackTargets = { 97 | "555566667777" : ["us-east-1", "us-west-2"], 98 | "999988887777" : ["us-east-1", "us-west-2"], 99 | "111122223333" : ["us-east-1", "us-west-2"], 100 | } 101 | ``` 102 | 103 | 5/ Push your code to the `main` branch to trigger the pipeline and deploy the infrastructure in the relevant accounts. 104 | 105 | 6/ The pipeline creates an empty secret in Secrets Manager. To send Slack notifications, create a key `MAILER_SLACK_TOKEN` and set the value to a token, eg: `xoxb-359...` 106 | 107 | 7/ Update the following settings in `governance\infra\cloudcustodian\transform.py`: 108 | 1. `NOTIFY_EMAIL` with the email used to send notifications 109 | 2. `NOTIFY_SLACK` with the name of the slack channel where to send notifications 110 | 3. `INSTANT_SQS_QUEUE` with the SQS URL of the queue deployed 111 | 112 | 8/ By default, under `./policies` there are 2 directories: `all` and `staging`. Each of these folders must contain an `accounts.yml` file which contains the details about the AWS accounts and Regions to be audited and remediated. You can add more folders to target specific policies to a different group of accounts and regions. 113 | 114 | In addition to `NOTIFY_EMAIL`, `NOTIFY_SLACK` and `INSTANT_SQS_QUEUE`, `role` need to be configured with the role arn of the role created by the IaC. It has this format: `arn:aws:iam::555566667777:role/governance_security_level_1_role` 115 | 116 | 9/ To receive Slack notifications, you can install the Li10 Alerts Slack app in a channel in your Slack Workspace. 117 | 118 | Li10 Alerts Slack notification example 119 | 120 | # Policies 121 | The following policies are available by default. You can customize them and add your own. 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 |
Policy FileRuns InDescription
sg_ingress.ymlLambdaRemediate unauthorized Security Groups
tagging.ymlLambdaAuto-tag EC2 and Sagemaker resources
acm-certificate-audit.ymlECSNotify if ACM certificates are expiring soon
ebs-copy-tags.ymlECSCopy EC2 tags to EBS volules and snapshots
ec2-public-instance-audit.ymlECSReport public-facing EC2 instances
ec2_unused_eip.ymlECSRemoves unused Elastic IPs
rds-deprecated.ymlECSMotify when an RDS cluster uses a deprecated engine
rds-unused-30-days.ymlECSRemediate unused RDS clusters
s3-public-access-audit.ymlECSNotify if a public S3 bucket is found
service-quota.ymlECSAutomate service quota increase request
sg-remove-ingress-rule.ymlECSRemediate non-compliant Security Groups
197 | 198 | # How To 199 | ## Add a policy 200 | To add or update a policy, we recommend creating a policy file in `./policies/staging/` where it can be executed against a test account. Once the policy is ready, you can move it a different folder where will be applied to more AWS accounts. 201 | 202 | ## Test locally 203 | 204 | To test a policy locally, you can run this command (after replacing the variables with the expected values in the policy file): 205 | ```custodian run --profile profile-name iam-policy-has-all.yml -s out --verbose --cache-period 0 --region us-east-1``` 206 | 207 | ## Launch ECS Task using the AWS CLI 208 | ``` 209 | aws ecs run-task \ 210 | --region us-east-1 \ 211 | --cluster CloudcustodianStack-FargateCPCluster668E71F2-nSqu5xLMqiqa \ 212 | --task-definition CloudcustodianStackecstdCF984091:2 \ 213 | --profile profile-name \ 214 | --network-configuration 'awsvpcConfiguration={subnets=["subnet-0938213728b33a3e6","subnet-0f4184e56927a53b1"],assignPublicIp="DISABLED"}' \ 215 | --launch-type="FARGATE" 216 | ``` 217 | 218 | To run a single policy, add the following with the name of the policy to execute. Others will be ignore: 219 | ``` 220 | --overrides '{ "containerOverrides": [ { "name": "td-container", "environment": [ { "name": "POLICY_FILTER", "value": "ec2_unused_eip.yml" } ] } ] }' 221 | ``` 222 | -------------------------------------------------------------------------------- /policies/templates/default.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | {# 5 | Sample Policy that can be used with this template: 6 | 7 | Additional parameters can be passed in from the policy - i.e. action_desc, violation_desc 8 | 9 | - name: delete-unencrypted-ec2 10 | resource: ec2 11 | filters: 12 | - type: ebs 13 | key: Encrypted 14 | value: false 15 | actions: 16 | - terminate 17 | - type: notify 18 | template: default.html 19 | subject: "[custodian {{ account }}] Delete Unencrypted EC2 - {{ region }}" 20 | violation_desc: "The following EC2(s) are not encrypted:" 21 | action_desc: "The EC2(s) have been terminated" 22 | from: custodian@domain.com 23 | to: 24 | - owner@domain.com 25 | transport: 26 | type: sqs 27 | queue: https://sqs.us-east-1.amazonaws.com/12345678910/custodian-sqs-queue 28 | #} 29 | 30 | 31 | {# You can set any mandatory tags here, and they will be formatted/outputted in the message #} 32 | {% set requiredTags = ['Application','Owner'] %} 33 | 34 | {# The macros below format some resource attributes for better presentation #} 35 | {% macro getTag(resource, tagKey) -%} 36 | {% if resource.get('Tags') %} 37 | {% for t in resource.get('Tags') %} 38 | {% if t.get('Key') == tagKey %} 39 | {{ t.get('Value') }} 40 | {% endif %} 41 | {% endfor %} 42 | {% endif %} 43 | {%- endmacro %} 44 | 45 | {% macro extractList(resource, column) -%} 46 | {% for p in resource.get(column) %} 47 | {{ p }}, 48 | {% endfor %} 49 | {%- endmacro %} 50 | 51 | {% macro columnHeading(columnNames, tableWidth) -%} 52 | 53 | {% for columnName in columnNames %} 54 | 55 | {% endfor %} 56 | {%- endmacro %} 57 | 58 | {# The macro below creates the table: 59 | Formatting can be dependent on the column names that are passed in 60 | #} 61 | {% macro columnData(resources, columnNames) -%} 62 | {% for resource in resources %} 63 | 64 | {% for columnName in columnNames %} 65 | {% if columnName in requiredTags %} 66 | 67 | {% elif columnName == 'tag.Name' %} 68 | 69 | {% elif columnName == 'InstanceCount' %} 70 | 71 | {% elif columnName == 'VolumeConsumedReadWriteOps' %} 72 | 73 | {% elif columnName == 'PublicIp' %} 74 | 75 | {% else %} 76 | 77 | {% endif %} 78 | {% endfor %} 79 | 80 | {% endfor %} 81 |
{{ columnName }}
{{ getTag(resource,columnName) }}{{ getTag(resource,'Name') }}{{ resource['Instances'] | length }}{{ resource['c7n.metrics']['AWS/EBS.VolumeConsumedReadWriteOps.Maximum'][0]['Maximum'] }}{{ resource['NetworkInterfaces'][0].get('Association')['PublicIp'] }}{{ resource[columnName] }}
82 | {%- endmacro %} 83 | 84 | {# Main #} 85 | {% macro createTable(columnNames, resources, tableWidth) %} 86 | {{ columnHeading(columnNames, tableWidth) }} 87 | {{ columnData(resources, columnNames) }} 88 | {%- endmacro %} 89 | 90 | 91 | Custodian Notification - {{ account }} 92 | 93 | 94 | 131 | 132 | 133 |
    134 |

    {{ "%s - %s" | format(account,region) }}

    135 |

    {{ action['violation_desc'] }}

    136 | 137 | {# Below, notifications for any resource-type can be formatted with specific columns #} 138 | {% if policy['resource'] == "ami" %} 139 | {% set columnNames = ['Name','ImageId','CreationDate'] %} 140 | {{ createTable(columnNames, resources, '60') }} 141 | 142 | {% elif policy['resource'] == "app-elb" %} 143 | {% set columnNames = ['LoadBalancerName','CreatedTime','Application','Owner'] %} 144 | {{ createTable(columnNames, resources, '80') }} 145 | 146 | {% elif policy['resource'] == "asg" %} 147 | {% if resources[0]['Invalid'] is defined %} 148 | {% set columnNames = ['AutoScalingGroupName','InstanceCount','Invalid'] %} 149 | {% else %} 150 | {% set columnNames = ['AutoScalingGroupName','InstanceCount','Application','Owner'] %} 151 | {% endif %} 152 | {{ createTable(columnNames, resources, '60') }} 153 | 154 | {% elif policy['resource'] == "cache-cluster" %} 155 | {% set columnNames = ['CacheClusterId','CacheClusterCreateTime','CacheClusterStatus','Application','Owner'] %} 156 | {{ createTable(columnNames, resources, '80') }} 157 | 158 | {% elif policy['resource'] == "cache-snapshot" %} 159 | {% set columnNames = ['SnapshotName','CacheClusterId','SnapshotSource','Application','Owner'] %} 160 | {{ createTable(columnNames, resources, '80') }} 161 | 162 | {% elif policy['resource'] == "cfn" %} 163 | {% set columnNames = ['StackName'] %} 164 | {{ createTable(columnNames, resources, '50') }} 165 | 166 | {% elif policy['resource'] == "cloudsearch" %} 167 | {% set columnNames = ['DomainName'] %} 168 | {{ createTable(columnNames, resources, '50') }} 169 | 170 | {% elif policy['resource'] == "ebs" %} 171 | {% set columnNames = ['VolumeId','CreateTime','State','Application','Owner'] %} 172 | {{ createTable(columnNames, resources, '50') }} 173 | 174 | {% elif policy['resource'] == "ebs-snapshot" %} 175 | {% set columnNames = ['SnapshotId','StartTime','Application','Owner'] %} 176 | {{ createTable(columnNames, resources, '80') }} 177 | 178 | {% elif policy['resource'] == "ec2" %} 179 | {% if resources[0]['MatchedFilters'] == ['PublicIpAddress'] %} 180 | {% set columnNames = ['tag.Name','PublicIp','InstanceId'] %} 181 | {% else %} 182 | {% set columnNames = ['tag.Name','PrivateIpAddress','InstanceId','ImageId','Application','Owner'] %} 183 | {% endif %} 184 | {{ createTable(columnNames, resources, '80') }} 185 | 186 | {% elif policy['resource'] == "efs" %} 187 | {% set columnNames = ['CreationToken','CreationTime','FileSystemId','OwnerId'] %} 188 | {{ createTable(columnNames, resources, '50') }} 189 | 190 | {% elif policy['resource'] == "elasticsearch" %} 191 | {% set columnNames = ['DomainName','Endpoint'] %} 192 | {{ createTable(columnNames, resources, '50') }} 193 | 194 | {% elif policy['resource'] == "elb" %} 195 | {% set columnNames = ['LoadBalancerName','InstanceCount','AvailabilityZones','Application','Owner'] %} 196 | {{ createTable(columnNames, resources, '80') }} 197 | 198 | {% elif policy['resource'] == "emr" %} 199 | {% set columnNames = ['Id','EmrState'] %} 200 | {{ createTable(columnNames, resources, '50') }} 201 | 202 | {% elif policy['resource'] == "kinesis" %} 203 | {% set columnNames = ['StreamName'] %} 204 | {{ createTable(columnNames, resources, '50') }} 205 | 206 | {% elif policy['resource'] == "launch-config" %} 207 | {% set columnNames = ['LaunchConfigurationName'] %} 208 | {{ createTable(columnNames, resources, '30') }} 209 | 210 | {% elif policy['resource'] == "log-group" %} 211 | {% set columnNames = ['logGroupName'] %} 212 | {{ createTable(columnNames, resources, '30') }} 213 | 214 | {% elif policy['resource'] == "rds" %} 215 | {% if resources[0]['PubliclyAccessible'] == true or resources[0]['StorageEncrypted'] == false %} 216 | {% set columnNames = ['DBInstanceIdentifier','PubliclyAccessible','StorageEncrypted','DBSubnetGroup'] %} 217 | {% else %} 218 | {% set columnNames = ['DBInstanceIdentifier','Application','Owner'] %} 219 | {% endif %} 220 | {{ createTable(columnNames, resources, '80') }} 221 | 222 | {% elif policy['resource'] == "rds-snapshot" %} 223 | {% set columnNames = ['DBSnapshotIdentifier','SnapshotCreateTime','DBInstanceIdentifier','SnapshotType','Application','Owner'] %} 224 | {{ createTable(columnNames, resources, '80') }} 225 | 226 | {% elif policy['resource'] == "redshift" %} 227 | {% if resources[0]['PubliclyAccessible'] == true or resources[0]['Encrypted'] == false %} 228 | {% set columnNames = ['ClusterIdentifier','NodeCount','PubliclyAccessible','Encrypted'] %} 229 | {% else %} 230 | {% set columnNames = ['ClusterIdentifier','NodeCount','Application','Owner'] %} 231 | {% endif %} 232 | {{ createTable(columnNames, resources, '80') }} 233 | 234 | {% elif policy['resource'] == "redshift-snapshot" %} 235 | {% set columnNames = ['SnapshotIdentifier','DBName','Application','Owner'] %} 236 | {{ createTable(columnNames, resources, '80') }} 237 | 238 | {% elif policy['resource'] == "s3" %} 239 | {% if resources[0]['GlobalPermissions'] is defined %} 240 | {% set columnNames = ['Name','GlobalPermissions'] %} 241 | {% else %} 242 | {% set columnNames = ['Name','Application','Owner'] %} 243 | {% endif %} 244 | {{ createTable(columnNames, resources, '80') }} 245 | 246 | {% elif policy['resource'] == "security-group" %} 247 | {% set columnNames = ['GroupName','tag.Name','GroupId','VpcId'] %} 248 | {{ createTable(columnNames, resources, '80') }} 249 | {% elif policy['resource'] == "iam-role" %} 250 | {% set columnNames = ['RoleName','CreateDate','Arn'] %} 251 | {{ createTable(columnNames, resources, '80') }} 252 | 253 | {% elif policy['resource'] == "simpledb" %} 254 | {% set columnNames = ['DomainName'] %} 255 | {{ createTable(columnNames, resources, '60') }} 256 | 257 | {# If no special formatting is defined for a resource type, all attributes will be formatted in the email #} 258 | {% else %} 259 | 260 | 261 | {% for column in resources[0] %} 262 | 263 | {% endfor %} 264 | 265 | {% set columnlen = resources[0]|length|int %} 266 | {% for resource in resources %} 267 | 268 | {% for column in resource %} 269 | 270 | {% endfor %} 271 | 272 | {% endfor %} 273 |
    {{ column }}
    {{ resource[column] }}
    274 | {% endif %} 275 | 276 |

    {{ action['action_desc'] }}

    277 | 278 | {# 279 |

    280 | For any other questions, 281 | email us 282 |

    283 | #} 284 |
285 | 286 | 287 | -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-mailer-code/slack_delivery-modified.py: -------------------------------------------------------------------------------- 1 | # Copyright The Cloud Custodian Authors. 2 | # SPDX-License-Identifier: Apache-2.0 3 | import copy 4 | import time 5 | 6 | import requests 7 | # from c7n_mailer.ldap_lookup import Redis 8 | from c7n_mailer.utils import get_rendered_jinja 9 | from c7n_mailer.utils_email import is_email 10 | import json 11 | 12 | class SlackDelivery: 13 | def __init__(self, config, logger, email_handler): 14 | self.caching = self.cache_factory(config, config.get("cache_engine", None)) 15 | self.config = config 16 | self.logger = logger 17 | self.email_handler = email_handler # adding back to use slack://owners 18 | 19 | def cache_factory(self, config, type): 20 | if type == "redis": 21 | return Redis( 22 | redis_host=config.get("redis_host"), 23 | redis_port=int(config.get("redis_port", 6379)), 24 | db=0, 25 | ) 26 | else: 27 | return None 28 | 29 | def get_to_addrs_slack_messages_map(self, sqs_message): 30 | resource_list = copy.deepcopy(sqs_message["resources"]) 31 | 32 | slack_messages = {} 33 | 34 | # Check for Slack targets in 'to' action and render appropriate template. 35 | for target in sqs_message.get("action", ()).get("to", []): 36 | if target == "slack://owners": 37 | self.logger.debug("target has an owner") 38 | to_addrs_to_resources_map = self.email_handler.get_emails_to_resources_map( 39 | sqs_message 40 | ) 41 | self.logger.debug(f"{to_addrs_to_resources_map=}") 42 | for to_addrs, resources in to_addrs_to_resources_map.items(): 43 | self.logger.debug(f"{to_addrs=}") 44 | resolved_addrs = self.retrieve_user_im(list(to_addrs)) 45 | self.logger.debug(f"{resolved_addrs=}") 46 | if not resolved_addrs: 47 | continue 48 | 49 | for address, slack_target in resolved_addrs.items(): 50 | slack_messages[address] = get_rendered_jinja( 51 | slack_target, 52 | sqs_message, 53 | resources, 54 | self.logger, 55 | "slack_template", 56 | "slack_default", 57 | self.config["templates_folders"], 58 | ) 59 | self.logger.debug( 60 | "Generating messages for recipient list produced by resource owner resolution." 61 | ) 62 | elif target.startswith("https://hooks.slack.com/"): 63 | slack_messages[target] = get_rendered_jinja( 64 | target, 65 | sqs_message, 66 | resource_list, 67 | self.logger, 68 | "slack_template", 69 | "slack_default", 70 | self.config["templates_folders"], 71 | ) 72 | elif target.startswith("slack://webhook/#") and self.config.get("slack_webhook"): 73 | webhook_target = self.config.get("slack_webhook") 74 | slack_messages[webhook_target] = get_rendered_jinja( 75 | target.split("slack://webhook/#", 1)[1], 76 | sqs_message, 77 | resource_list, 78 | self.logger, 79 | "slack_template", 80 | "slack_default", 81 | self.config["templates_folders"], 82 | ) 83 | self.logger.debug( 84 | "Generating message for webhook %s." % self.config.get("slack_webhook") 85 | ) 86 | elif target.startswith("slack://") and is_email(target.split("slack://", 1)[1]): 87 | resolved_addrs = self.retrieve_user_im([target.split("slack://", 1)[1]]) 88 | for address, slack_target in resolved_addrs.items(): 89 | slack_messages[address] = get_rendered_jinja( 90 | slack_target, 91 | sqs_message, 92 | resource_list, 93 | self.logger, 94 | "slack_template", 95 | "slack_default", 96 | self.config["templates_folders"], 97 | ) 98 | elif target.startswith("slack://#"): 99 | resolved_addrs = target.split("slack://#", 1)[1] 100 | slack_messages[resolved_addrs] = get_rendered_jinja( 101 | resolved_addrs, 102 | sqs_message, 103 | resource_list, 104 | self.logger, 105 | "slack_template", 106 | "slack_default", 107 | self.config["templates_folders"], 108 | ) 109 | elif target.startswith("slack://tag/") and "Tags" in resource_list[0]: 110 | tag_name = target.split("tag/", 1)[1] 111 | result = next( 112 | (item for item in resource_list[0].get("Tags", []) if item["Key"] == tag_name), 113 | None, 114 | ) 115 | if not result: 116 | self.logger.debug("No %s tag found in resource." % tag_name) 117 | continue 118 | 119 | resolved_addr = slack_target = result["Value"] 120 | 121 | if is_email(resolved_addr): 122 | ims = self.retrieve_user_im([resolved_addr]) 123 | slack_target = ims[resolved_addr] 124 | elif not resolved_addr.startswith("#"): 125 | resolved_addr = "#" + resolved_addr 126 | slack_target = resolved_addr 127 | 128 | slack_messages[resolved_addr] = get_rendered_jinja( 129 | slack_target, 130 | sqs_message, 131 | resource_list, 132 | self.logger, 133 | "slack_template", 134 | "slack_default", 135 | self.config["templates_folders"], 136 | ) 137 | self.logger.debug("Generating message for specified Slack channel.") 138 | return slack_messages 139 | 140 | def slack_handler(self, sqs_message, slack_messages): 141 | for key, payload in slack_messages.items(): 142 | self.logger.info( 143 | "Sending account:%s policy:%s %s:%s slack:%s to %s" 144 | % ( 145 | sqs_message.get("account", ""), 146 | sqs_message["policy"]["name"], 147 | sqs_message["policy"]["resource"], 148 | str(len(sqs_message["resources"])), 149 | sqs_message["action"].get("slack_template", "slack_default"), 150 | key, 151 | ) 152 | ) 153 | 154 | self.send_slack_msg(key, payload.encode("utf-8")) 155 | 156 | def retrieve_user_im(self, email_addresses): 157 | list = {} 158 | 159 | if not self.config["slack_token"]: 160 | self.logger.info("No Slack token found.") 161 | 162 | for address in email_addresses: 163 | if self.caching and self.caching.get(address): 164 | self.logger.debug("Got Slack metadata from cache for: %s" % address) 165 | list[address] = self.caching.get(address) 166 | continue 167 | 168 | response = requests.post( 169 | url="https://slack.com/api/users.lookupByEmail", 170 | data={"email": address}, 171 | headers={ 172 | "Content-Type": "application/x-www-form-urlencoded", 173 | "Authorization": "Bearer %s" % self.config.get("slack_token"), 174 | }, 175 | timeout=60, 176 | ).json() 177 | 178 | if not response["ok"]: 179 | if "headers" in response.keys() and "Retry-After" in response["headers"]: 180 | self.logger.info( 181 | "Slack API rate limiting. Waiting %d seconds", 182 | int(response.headers["retry-after"]), 183 | ) 184 | time.sleep(int(response.headers["Retry-After"])) 185 | continue 186 | elif response["error"] == "invalid_auth": 187 | raise Exception("Invalid Slack token.") 188 | elif response["error"] == "users_not_found": 189 | self.logger.info("Slack user ID for email address %s not found.", address) 190 | if self.caching: 191 | self.caching.set(address, {}) 192 | continue 193 | else: 194 | self.logger.warning("Slack Response: {}".format(response)) 195 | else: 196 | slack_user_id = response["user"]["id"] 197 | if "enterprise_user" in response["user"].keys(): 198 | slack_user_id = response["user"]["enterprise_user"]["id"] 199 | self.logger.debug("Slack account %s found for user %s", slack_user_id, address) 200 | if self.caching: 201 | self.logger.debug("Writing user: %s metadata to cache.", address) 202 | self.caching.set(address, slack_user_id) 203 | 204 | list[address] = slack_user_id 205 | 206 | return list 207 | 208 | def send_slack_msg(self, key, message_payload): 209 | 210 | print(key, message_payload) 211 | if key.startswith("https://hooks.slack.com/"): 212 | response = requests.post( 213 | url=key, 214 | data=message_payload, 215 | headers={"Content-Type": "application/json;charset=utf-8"}, 216 | timeout=60, 217 | ) 218 | else: 219 | response = requests.post( 220 | url="https://slack.com/api/chat.postMessage", 221 | data=message_payload, 222 | headers={ 223 | "Content-Type": "application/json;charset=utf-8", 224 | "Authorization": "Bearer %s" % self.config.get("slack_token"), 225 | }, 226 | timeout=60, 227 | ) 228 | 229 | if response.status_code == 429 and "Retry-After" in response.headers: 230 | self.logger.info( 231 | "Slack API rate limiting. Waiting %d seconds", int(response.headers["Retry-After"]) 232 | ) 233 | time.sleep(int(response.headers["Retry-After"])) 234 | return 235 | 236 | elif response.status_code != 200: 237 | self.logger.info( 238 | "Error in sending Slack message status:%s response: %s", 239 | response.status_code, 240 | response.text, 241 | ) 242 | return 243 | 244 | if "text/html" in response.headers["content-type"]: 245 | if response.text != "ok": 246 | self.logger.info( 247 | "Error in sending Slack message. Status:%s, response:%s", 248 | response.status_code, 249 | response.text, 250 | ) 251 | return 252 | 253 | else: 254 | response_json = response.json() 255 | if not response_json["ok"]: 256 | self.logger.info( 257 | "Error in sending Slack message. Status:%s, response:%s", 258 | response.status_code, 259 | response_json["error"], 260 | ) 261 | return 262 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2024 Li10 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /infra/cloudcustodian/lambda-mailer-code/email_delivery-modified.py: -------------------------------------------------------------------------------- 1 | # Copyright The Cloud Custodian Authors. 2 | # SPDX-License-Identifier: Apache-2.0 3 | from itertools import chain 4 | 5 | # from c7n_mailer.azure_mailer.sendgrid_delivery import SendGridDelivery 6 | # from c7n_mailer.graph_delivery import GraphDelivery 7 | # from c7n_mailer.smtp_delivery import SmtpDelivery 8 | 9 | # from .ldap_lookup import LdapLookup 10 | from .utils import ( 11 | Providers, 12 | decrypt, 13 | get_aws_username_from_event, 14 | get_provider, 15 | get_resource_tag_targets, 16 | unique, 17 | ) 18 | from .utils_email import get_mimetext_message, is_email 19 | 20 | 21 | class EmailDelivery: 22 | def __init__(self, config, session, logger): 23 | self.config = config 24 | self.logger = logger 25 | self.session = session 26 | self.provider = get_provider(self.config) 27 | if self.provider == Providers.AWS: 28 | self.aws_ses = self.get_ses_session() 29 | # self.ldap_lookup = self.get_ldap_connection() 30 | 31 | def get_ses_session(self): 32 | if self.config.get("ses_role", False): 33 | creds = self.session.client("sts").assume_role( 34 | RoleArn=self.config.get("ses_role"), RoleSessionName="CustodianNotification" 35 | )["Credentials"] 36 | 37 | return self.session.client( 38 | "ses", 39 | region_name=self.config.get("ses_region"), 40 | aws_access_key_id=creds["AccessKeyId"], 41 | aws_secret_access_key=creds["SecretAccessKey"], 42 | aws_session_token=creds["SessionToken"], 43 | ) 44 | 45 | return self.session.client("ses", region_name=self.config.get("ses_region")) 46 | 47 | def get_ldap_connection(self): 48 | if self.config.get("ldap_uri"): 49 | credential = decrypt(self.config, self.logger, self.session, "ldap_bind_password") 50 | self.config["ldap_bind_password"] = credential 51 | return LdapLookup(self.config, self.logger) 52 | return None 53 | 54 | def get_valid_emails_from_list(self, targets): 55 | emails = [] 56 | for target in targets: 57 | if target in ("resource-owner", "event-owner"): 58 | continue 59 | for email in target.split(":"): 60 | email = email.strip() 61 | if not email: 62 | continue 63 | if is_email(target): 64 | emails.append(target) 65 | # gcp doesn't support the '@' character in their label values so we 66 | # allow users to specify an email_base_url to append to the end of their 67 | # owner contact tags 68 | if not is_email(email) and self.config.get("email_base_url"): 69 | full_email = "%s@%s" % (email, self.config["email_base_url"]) 70 | if is_email(full_email): 71 | emails.append(full_email) 72 | return unique(emails) 73 | 74 | def get_event_owner_email(self, targets, event): # TODO: GCP-friendly 75 | if "event-owner" in targets: 76 | aws_username = get_aws_username_from_event(self.logger, event) 77 | if aws_username: 78 | # is using SSO, the target might already be an email 79 | if is_email(aws_username): 80 | return [aws_username] 81 | # if the LDAP config is set, lookup in ldap 82 | elif self.config.get("ldap_uri", False): 83 | return self.ldap_lookup.get_email_to_addrs_from_uid(aws_username) 84 | 85 | # the org_domain setting is configured, append the org_domain 86 | # to the username from AWS 87 | elif self.config.get("org_domain", False): 88 | org_domain = self.config.get("org_domain", False) 89 | self.logger.info("adding email %s to targets.", aws_username + "@" + org_domain) 90 | return [aws_username + "@" + org_domain] 91 | else: 92 | self.logger.warning( 93 | "unable to lookup owner email. \ 94 | Please configure LDAP or org_domain" 95 | ) 96 | else: 97 | self.logger.info("no aws username in event") 98 | return [] 99 | 100 | def get_ldap_emails_from_resource(self, sqs_message, resource): 101 | ldap_uid_tag_keys = self.config.get("ldap_uid_tags", []) 102 | ldap_uri = self.config.get("ldap_uri", False) 103 | if not ldap_uid_tag_keys or not ldap_uri: 104 | return [] 105 | # this whole section grabs any ldap uids (including manager emails if option is on) 106 | # and gets the emails for them and returns an array with all the emails 107 | ldap_uid_tag_values = get_resource_tag_targets(resource, ldap_uid_tag_keys) 108 | email_manager = sqs_message["action"].get("email_ldap_username_manager", False) 109 | ldap_uid_emails = [] 110 | # some types of resources, like iam-user have 'Username' in the resource, if the policy 111 | # opted in to resource_ldap_lookup_username: true, we'll do a lookup and send an email 112 | if sqs_message["action"].get("resource_ldap_lookup_username"): 113 | ldap_uid_emails = ldap_uid_emails + self.ldap_lookup.get_email_to_addrs_from_uid( 114 | resource.get("UserName"), manager=email_manager 115 | ) 116 | for ldap_uid_tag_value in ldap_uid_tag_values: 117 | ldap_emails_set = self.ldap_lookup.get_email_to_addrs_from_uid( 118 | ldap_uid_tag_value, manager=email_manager 119 | ) 120 | ldap_uid_emails = ldap_uid_emails + ldap_emails_set 121 | return ldap_uid_emails 122 | 123 | def get_resource_owner_emails_from_resource(self, sqs_message, resource): 124 | if "resource-owner" not in sqs_message["action"].get("to", []): 125 | return [] 126 | resource_owner_tag_keys = self.config.get("contact_tags", []) 127 | resource_owner_tag_values = get_resource_tag_targets(resource, resource_owner_tag_keys) 128 | explicit_emails = self.get_valid_emails_from_list(resource_owner_tag_values) 129 | 130 | # resolve the contact info from ldap 131 | ldap_emails = [] 132 | org_emails = [] 133 | non_email_ids = list(set(resource_owner_tag_values).difference(explicit_emails)) 134 | if self.config.get("ldap_uri", False): 135 | ldap_emails = list( 136 | chain.from_iterable( 137 | [self.ldap_lookup.get_email_to_addrs_from_uid(uid) for uid in non_email_ids] 138 | ) 139 | ) 140 | 141 | elif self.config.get("org_domain", False): 142 | self.logger.debug( 143 | "Using org_domain to reconstruct email addresses from contact_tags values" 144 | ) 145 | org_domain = self.config.get("org_domain") 146 | org_emails = [uid + "@" + org_domain for uid in non_email_ids] 147 | 148 | return list(chain(explicit_emails, ldap_emails, org_emails)) 149 | 150 | def get_account_emails(self, sqs_message): # TODO: GCP-friendly 151 | email_list = [] 152 | 153 | if "account-emails" not in sqs_message["action"].get("to", []): 154 | return [] 155 | 156 | account_id = sqs_message.get("account_id", None) 157 | self.logger.debug("get_account_emails for account_id: %s.", account_id) 158 | 159 | if account_id is not None: 160 | account_email_mapping = self.config.get("account_emails", {}) 161 | self.logger.debug( 162 | "get_account_emails account_email_mapping: %s.", account_email_mapping 163 | ) 164 | email_list = account_email_mapping.get(account_id, []) 165 | self.logger.debug("get_account_emails email_list: %s.", email_list) 166 | 167 | return self.get_valid_emails_from_list(email_list) 168 | 169 | # this function returns a dictionary with a tuple of emails as the key 170 | # and the list of resources as the value. This helps ensure minimal emails 171 | # are sent, while only ever sending emails to the respective parties. 172 | def get_emails_to_resources_map(self, sqs_message): 173 | # policy_to_emails always get sent to any email msg that goes out 174 | # these were manually set by the policy writer in notify to section 175 | # or it's an email from an aws event username from an ldap_lookup 176 | emails_to_resources_map = {} 177 | targets = sqs_message["action"].get("to", []) + ( 178 | sqs_message["action"]["cc"] if "cc" in sqs_message["action"] else [] 179 | ) 180 | no_owner_targets = self.get_valid_emails_from_list( 181 | sqs_message["action"].get("owner_absent_contact", []) 182 | ) 183 | # policy_to_emails includes event-owner if that's set in the policy notify to section 184 | policy_to_emails = self.get_valid_emails_from_list(targets) 185 | # if event-owner is set, and the aws_username has an ldap_lookup email 186 | # we add that email to the policy emails for these resource(s) on this sqs_message 187 | event_owner_email = self.get_event_owner_email(targets, sqs_message["event"]) 188 | 189 | account_emails = self.get_account_emails(sqs_message) 190 | 191 | policy_to_emails = policy_to_emails + event_owner_email + account_emails 192 | for resource in sqs_message["resources"]: 193 | # this is the list of emails that will be sent for this resource 194 | resource_emails = [] 195 | # add in any ldap emails to resource_emails 196 | resource_emails = resource_emails + self.get_ldap_emails_from_resource( 197 | sqs_message, resource 198 | ) 199 | resource_emails = resource_emails + policy_to_emails 200 | # add in any emails from resource-owners to resource_owners 201 | ro_emails = self.get_resource_owner_emails_from_resource(sqs_message, resource) 202 | 203 | resource_emails = resource_emails + ro_emails 204 | # if 'owner_absent_contact' was specified in the policy and no resource 205 | # owner emails were found, add those addresses 206 | if len(ro_emails) < 1 and len(no_owner_targets) > 0: 207 | resource_emails = resource_emails + no_owner_targets 208 | # we allow multiple emails from various places, we'll unique with set to not have any 209 | # duplicates, and we'll also sort it so we always have the same key for other resources 210 | # and finally we'll make it a tuple, since that is hashable and can be a key in a dict 211 | resource_emails = tuple(sorted(set(resource_emails))) 212 | # only if there are valid emails available, add it to the map 213 | if resource_emails: 214 | emails_to_resources_map.setdefault(resource_emails, []).append(resource) 215 | if emails_to_resources_map == {}: 216 | self.logger.debug("Found no email addresses, sending no emails.") 217 | # eg: { ('milton@initech.com', 'peter@initech.com'): [resource1, resource2, etc] } 218 | return emails_to_resources_map 219 | 220 | def get_emails_to_mimetext_map(self, sqs_message): 221 | emails_to_resources_map = self.get_emails_to_resources_map(sqs_message) 222 | emails_to_mimetext_map = {} 223 | for to_addrs, resources in emails_to_resources_map.items(): 224 | emails_to_mimetext_map[to_addrs] = get_mimetext_message( 225 | self.config, self.logger, sqs_message, resources, list(to_addrs) 226 | ) 227 | # eg: { ('milton@initech.com', 'peter@initech.com'): mimetext_message } 228 | return emails_to_mimetext_map 229 | 230 | def send_c7n_email(self, sqs_message): 231 | emails_to_mimetext_map = self.get_emails_to_mimetext_map(sqs_message) 232 | email_to_addrs = list(emails_to_mimetext_map.keys()) 233 | try: 234 | # if smtp_server is set in mailer.yml, send through smtp 235 | if "smtp_server" in self.config: 236 | delivery = SmtpDelivery(self.config, self.session, self.logger) 237 | for emails, mimetext_msg in emails_to_mimetext_map.items(): 238 | delivery.send_message(message=mimetext_msg, to_addrs=list(emails)) 239 | elif "sendgrid_api_key" in self.config: 240 | delivery = SendGridDelivery(self.config, self.session, self.logger) 241 | delivery.sendgrid_handler(sqs_message, emails_to_mimetext_map) 242 | elif "graph_sendmail_endpoint" in self.config: 243 | delivery = GraphDelivery(self.config, self.session, self.logger) 244 | delivery.send_message(emails_to_mimetext_map) 245 | # use aws ses normally. 246 | else: 247 | for emails, mimetext_msg in emails_to_mimetext_map.items(): 248 | self.aws_ses.send_raw_email(RawMessage={"Data": mimetext_msg.as_string()}) 249 | except Exception as error: 250 | self.logger.error( 251 | "policy:%s account:%s sending to:%s \n\n error: %s\n\n mailer.yml: %s" 252 | % ( 253 | sqs_message["policy"], 254 | sqs_message.get("account", ""), 255 | email_to_addrs, 256 | error, 257 | self.config, 258 | ) 259 | ) 260 | # TODO shall we raise the exception and interrupt the entire notify process? 261 | return 262 | self.logger.info( 263 | "Sent account:%s policy:%s %s:%s email:%s to %s" 264 | % ( 265 | sqs_message.get("account", ""), 266 | sqs_message["policy"]["name"], 267 | sqs_message["policy"]["resource"], 268 | str(len(sqs_message["resources"])), 269 | sqs_message["action"].get("template", "default"), 270 | email_to_addrs, 271 | ) 272 | ) 273 | --------------------------------------------------------------------------------