├── .github ├── ISSUE_TEMPLATE │ ├── bug-report.md │ └── general-request.md ├── labels.yaml ├── pull_request_template.md ├── release-drafter-config.yaml └── workflows │ ├── build-lambda.yaml │ ├── label-synchronization.yaml │ ├── pr-validation.yaml │ ├── release-drafter.yaml │ ├── terraform-validation.yaml │ └── update-changelog.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── UPGRADING.md ├── data.tf ├── examples ├── basic-separate-file │ ├── main.tf │ └── versions.tf ├── basic │ ├── main.tf │ └── versions.tf ├── jira-integration │ ├── main.tf │ └── versions.tf ├── kms.json ├── rules.yaml └── servicenow-integration │ ├── main.tf │ └── versions.tf ├── files ├── lambda-artifacts │ ├── findings-manager-jira │ │ ├── findings_manager_jira.py │ │ ├── helpers.py │ │ ├── requirements-dev.txt │ │ └── requirements.txt │ └── securityhub-findings-manager │ │ ├── requirements-dev.txt │ │ ├── requirements.txt │ │ ├── securityhub_events.py │ │ ├── securityhub_trigger.py │ │ ├── securityhub_trigger_worker.py │ │ └── strategize_findings_manager.py ├── pkg │ ├── lambda_findings-manager-jira_python3.11.zip │ ├── lambda_findings-manager-jira_python3.12.zip │ ├── lambda_securityhub-findings-manager_python3.11.zip │ └── lambda_securityhub-findings-manager_python3.12.zip └── step-function-artifacts │ ├── securityhub-findings-manager-orchestrator-graph.png │ └── securityhub-findings-manager-orchestrator.json.tpl ├── findings_manager.tf ├── jira_lambda.tf ├── jira_step_function.tf ├── modules └── servicenow │ ├── README.md │ ├── cloudwatch.tf │ ├── eventbridge.tf │ ├── iam.tf │ ├── main.tf │ ├── sqs.tf │ ├── templates │ └── findings_filter.json.tftpl │ ├── variables.tf │ └── versions.tf ├── moved.tf ├── outputs.tf ├── s3_bucket.tf ├── servicenow.tf ├── variables.tf └── versions.tf /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Template to report a bug 4 | title: 'bug: ' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **💡 Problem description** 11 | Enter summary of the problem here. 12 | 13 | **☹️ Current Behavior** 14 | Describe what is happening. More detail is better. When code is pasted, use correct formatting. 15 | 16 | **😀 Expected Behavior** 17 | Enter any other details such as examples, links to requirements, etc. Any criteria that might help with fixing the problem. Attach screenshots if possible. More detail is better. 18 | 19 | **❓Steps to Reproduce** 20 | Enter detailed steps to reproduce here. More detail is better. 21 | 22 | **🚧 Workaround** 23 | If there is a way to work around the problem, place that information here. 24 | 25 | **💻 Environment** 26 | Anything that will help triage the bug will help. For example: 27 | - Terraform version 28 | - Module version 29 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/general-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General Request 3 | about: A template for a general request on this repository 4 | title: '' 5 | labels: documentation, enhancement, chore 6 | assignees: '' 7 | 8 | --- 9 | 10 | **:thought_balloon: Description of the request or enhancement** 11 | A clear and concise description of what the request is about. Please add the fitting label to this issue: 12 | - Documentation 13 | - Enhancement 14 | - Chore (not covered by something else / question) 15 | 16 | **:bookmark: Additional context** 17 | Add any other context or screenshots about the feature request here. 18 | 19 | **:100: Acceptance criteria** 20 | Enter the conditions of satisfaction here. That is, the conditions that will satisfy the user/persona that the goal/benefit/value has been achieved. 21 | -------------------------------------------------------------------------------- /.github/labels.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: breaking 3 | color: "b60205" 4 | description: This change is not backwards compatible 5 | - name: bug 6 | color: "d93f0b" 7 | description: Something isn't working 8 | - name: documentation 9 | color: "0075ca" 10 | description: Improvements or additions to documentation 11 | - name: enhancement 12 | color: "0e8a16" 13 | description: New feature or request 14 | - name: feature 15 | color: "0e8a16" 16 | description: New feature or request 17 | - name: fix 18 | color: "d93f0b" 19 | description: Fixes a bug 20 | - name: chore 21 | color: "6b93d3" 22 | description: Task not covered by something else (e.g. refactor, CI changes, tests) 23 | - name: no-changelog 24 | color: "cccccc" 25 | description: No entry should be added to the release notes and changelog 26 | - name: security 27 | color: "5319e7" 28 | description: Solving a security issue 29 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | **:hammer_and_wrench: Summary** 2 | 3 | 4 | 5 | **:rocket: Motivation** 6 | 7 | 8 | **:pencil: Additional Information** 9 | 10 | -------------------------------------------------------------------------------- /.github/release-drafter-config.yaml: -------------------------------------------------------------------------------- 1 | name-template: "v$RESOLVED_VERSION" 2 | tag-template: "v$RESOLVED_VERSION" 3 | version-template: "$MAJOR.$MINOR.$PATCH" 4 | change-title-escapes: '\<*_&' 5 | 6 | categories: 7 | - title: "🚀 Features" 8 | labels: 9 | - "breaking" 10 | - "enhancement" 11 | - "feature" 12 | - title: "🐛 Bug Fixes" 13 | labels: 14 | - "bug" 15 | - "fix" 16 | - "security" 17 | - title: "📖 Documentation" 18 | labels: 19 | - "documentation" 20 | - title: "🧺 Miscellaneous" 21 | labels: 22 | - "chore" 23 | 24 | version-resolver: 25 | major: 26 | labels: 27 | - "breaking" 28 | minor: 29 | labels: 30 | - "enhancement" 31 | - "feature" 32 | patch: 33 | labels: 34 | - "bug" 35 | - "chore" 36 | - "documentation" 37 | - "fix" 38 | - "security" 39 | default: "minor" 40 | 41 | autolabeler: 42 | - label: "documentation" 43 | body: 44 | - "/documentation/" 45 | branch: 46 | - '/docs\/.+/' 47 | title: 48 | - "/documentation/i" 49 | - "/docs/i" 50 | - label: "bug" 51 | body: 52 | - "/bug/" 53 | branch: 54 | - '/bug\/.+/' 55 | - '/fix\/.+/' 56 | title: 57 | - "/bug/i" 58 | - "/fix/i" 59 | - label: "feature" 60 | branch: 61 | - '/feature\/.+/' 62 | - '/enhancement\/.+/' 63 | title: 64 | - "/feature/i" 65 | - "/feat/i" 66 | - "/enhancement/i" 67 | - label: "breaking" 68 | body: 69 | - "/breaking change/i" 70 | branch: 71 | - '/breaking\/.+/' 72 | title: 73 | - "/!:/" 74 | - "/breaking/i" 75 | - "/major/i" 76 | - label: "chore" 77 | branch: 78 | - '/chore\/.+/' 79 | title: 80 | - "/chore/i" 81 | 82 | exclude-contributors: 83 | - "github-actions[bot]" 84 | 85 | exclude-labels: 86 | - "no-changelog" 87 | 88 | template: | 89 | # What's Changed 90 | 91 | $CHANGES 92 | 93 | **Full Changelog**: https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION 94 | -------------------------------------------------------------------------------- /.github/workflows/build-lambda.yaml: -------------------------------------------------------------------------------- 1 | name: Build and Package Lambda(s) 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | branches: 7 | - main 8 | - master 9 | paths: 10 | - files/lambda-artifacts/** 11 | 12 | permissions: 13 | contents: write 14 | pull-requests: write 15 | 16 | env: 17 | LAMBDA_DIR: "files/lambda-artifacts" 18 | PKG_DIR: "files/pkg" 19 | 20 | jobs: 21 | pkg: 22 | runs-on: ubuntu-latest 23 | 24 | strategy: 25 | matrix: 26 | python-version: ["3.11", "3.12"] 27 | lambda-name: ["securityhub-findings-manager", "findings-manager-jira"] 28 | 29 | steps: 30 | - name: Checkout repository 31 | uses: actions/checkout@v4 32 | 33 | - name: Set up Python 34 | uses: actions/setup-python@v5 35 | with: 36 | python-version: ${{ matrix.python-version }} 37 | 38 | - name: Install dependencies 39 | run: | 40 | cd $LAMBDA_DIR/${{ matrix.lambda-name }} 41 | python -m venv venv 42 | source venv/bin/activate 43 | pip install --upgrade pip 44 | pip install -r requirements.txt 45 | 46 | - name: Create Lambda deployment package 47 | run: | 48 | mkdir -p $PKG_DIR 49 | 50 | # Navigate to site-packages 51 | cd $LAMBDA_DIR/${{ matrix.lambda-name }}/venv/lib/python${{ matrix.python-version }}/site-packages 52 | 53 | # Removing nonessential files 'https://github.com/aws-powertools/powertools-lambda-layer-cdk/blob/d24716744f7d1f37617b4998c992c4c067e19e64/layer/Python/Dockerfile' 54 | rm -rf boto* s3transfer* *dateutil* urllib3* six* jmespath* 55 | find . -name '*.so' -type f -exec strip "{}" \; 56 | find . -wholename "*/tests/*" -type f -delete 57 | find . -regex '^.*\(__pycache__\|\.py[co]\)$' -delete 58 | 59 | # Package the lambda function. Package the dependencies and then add the source code to the created zip to ensure a flat archive structure. 60 | zip -r ../../../../../../../$PKG_DIR/lambda_${{ matrix.lambda-name }}_python${{ matrix.python-version }}.zip . 61 | cd ../../../../ 62 | zip -g ../../../$PKG_DIR/lambda_${{ matrix.lambda-name }}_python${{ matrix.python-version }}.zip -r * --exclude venv/\* 63 | 64 | - name: Upload artifact 65 | uses: actions/upload-artifact@v4 66 | with: 67 | name: lambda_${{ matrix.lambda-name }}_python${{ matrix.python-version }}.zip 68 | path: files/pkg/lambda_${{ matrix.lambda-name }}_python${{ matrix.python-version }}.zip 69 | 70 | # Download all artifacts and commit them to the repository. This seperate job prevents a push to the repository per zip file due to the matrix. 71 | push: 72 | runs-on: ubuntu-latest 73 | needs: pkg 74 | 75 | steps: 76 | - name: Checkout repository 77 | uses: actions/checkout@v4 78 | 79 | - name: Remove old pkg artifacts 80 | run: rm -rf files/pkg/ 81 | 82 | - name: Download all artifacts 83 | uses: actions/download-artifact@v4 84 | with: 85 | path: files/pkg/ 86 | merge-multiple: true 87 | 88 | - name: Commit deployment packages 89 | uses: stefanzweifel/git-auto-commit-action@v5 90 | with: 91 | commit_message: "Add all Lambda deployment packages" 92 | file_pattern: "files/pkg/*.zip" 93 | -------------------------------------------------------------------------------- /.github/workflows/label-synchronization.yaml: -------------------------------------------------------------------------------- 1 | # DO NOT CHANGE THIS FILE DIRECTLY 2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows 3 | 4 | name: label-synchronization 5 | on: 6 | workflow_dispatch: 7 | push: 8 | branches: 9 | - main 10 | - master 11 | paths: 12 | - .github/labels.yaml 13 | - .github/workflows/label-sync.yaml 14 | 15 | permissions: 16 | # write permission is required to edit issue labels 17 | issues: write 18 | 19 | jobs: 20 | build: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - name: Checkout code 24 | uses: actions/checkout@v4 25 | 26 | - name: Synchronize labels 27 | uses: crazy-max/ghaction-github-labeler@v5 28 | with: 29 | dry-run: false 30 | github-token: ${{ secrets.GITHUB_TOKEN }} 31 | skip-delete: false 32 | yaml-file: .github/labels.yaml 33 | -------------------------------------------------------------------------------- /.github/workflows/pr-validation.yaml: -------------------------------------------------------------------------------- 1 | # DO NOT CHANGE THIS FILE DIRECTLY 2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows 3 | 4 | name: "pr-validation" 5 | 6 | on: 7 | pull_request: 8 | 9 | permissions: 10 | checks: write 11 | contents: read 12 | pull-requests: write 13 | 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.event.pull_request.number }} 16 | cancel-in-progress: true 17 | 18 | jobs: 19 | autolabeler: 20 | runs-on: ubuntu-latest 21 | steps: 22 | - uses: release-drafter/release-drafter@v6 23 | with: 24 | config-name: release-drafter-config.yaml 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 27 | 28 | title-checker: 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: amannn/action-semantic-pull-request@v5 32 | id: lint_pr_title 33 | with: 34 | types: | 35 | breaking 36 | bug 37 | chore 38 | docs 39 | documentation 40 | enhancement 41 | feat 42 | feature 43 | fix 44 | security 45 | requireScope: false 46 | ignoreLabels: | 47 | skip-changelog 48 | env: 49 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 50 | 51 | - uses: marocchino/sticky-pull-request-comment@v2 52 | # When the previous steps fails, the workflow would stop. By adding this 53 | # condition you can continue the execution with the populated error message. 54 | if: always() && (steps.lint_pr_title.outputs.error_message != null) 55 | with: 56 | header: pr-title-lint-error 57 | message: | 58 | Hey there and thank you for opening this pull request! 👋🏼 59 | 60 | We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted. 61 | 62 | Examples for valid PR titles: 63 | feat(ui): Add button component. 64 | fix: Correct typo. 65 | _type(scope): subject._ 66 | 67 | Adding a scope is optional 68 | 69 | Details: 70 | ``` 71 | ${{ steps.lint_pr_title.outputs.error_message }} 72 | ``` 73 | 74 | # Delete a previous comment when the issue has been resolved 75 | - if: ${{ steps.lint_pr_title.outputs.error_message == null }} 76 | uses: marocchino/sticky-pull-request-comment@v2 77 | with: 78 | header: pr-title-lint-error 79 | delete: true 80 | 81 | label-checker: 82 | needs: autolabeler 83 | runs-on: ubuntu-latest 84 | steps: 85 | - uses: docker://agilepathway/pull-request-label-checker:v1.6.55 86 | id: lint_pr_labels 87 | with: 88 | any_of: breaking,bug,chore,documentation,enhancement,feature,fix,security 89 | repo_token: ${{ secrets.GITHUB_TOKEN }} 90 | 91 | - uses: marocchino/sticky-pull-request-comment@v2 92 | # When the previous steps fails, the workflow would stop. By adding this 93 | # condition you can continue the execution with the populated error message. 94 | if: always() && (steps.lint_pr_labels.outputs.label_check == 'failure') 95 | with: 96 | header: pr-labels-lint-error 97 | message: | 98 | Hey there and thank you for opening this pull request! 👋🏼 99 | 100 | The PR needs to have at least one of the following labels: 101 | 102 | - breaking 103 | - bug 104 | - chore 105 | - documentation 106 | - enhancement 107 | - feature 108 | - fix 109 | - security 110 | 111 | # Delete a previous comment when the issue has been resolved 112 | - if: ${{ steps.lint_pr_labels.outputs.label_check == 'success' }} 113 | uses: marocchino/sticky-pull-request-comment@v2 114 | with: 115 | header: pr-labels-lint-error 116 | delete: true 117 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yaml: -------------------------------------------------------------------------------- 1 | # DO NOT CHANGE THIS FILE DIRECTLY 2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows 3 | 4 | name: "release-drafter" 5 | 6 | on: 7 | push: 8 | branches: 9 | - main 10 | - master 11 | paths-ignore: 12 | - .github/** 13 | - .gitignore 14 | - .pre-commit-config.yaml 15 | - CHANGELOG.md 16 | - CONTRIBUTING.md 17 | - LICENSE 18 | 19 | permissions: 20 | # write permission is required to create a github release 21 | contents: write 22 | 23 | jobs: 24 | draft: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: release-drafter/release-drafter@v6 28 | with: 29 | publish: false 30 | prerelease: false 31 | config-name: release-drafter-config.yaml 32 | env: 33 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 34 | -------------------------------------------------------------------------------- /.github/workflows/terraform-validation.yaml: -------------------------------------------------------------------------------- 1 | # DO NOT CHANGE THIS FILE DIRECTLY 2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows 3 | 4 | name: "terraform" 5 | 6 | on: 7 | pull_request: 8 | 9 | permissions: 10 | contents: write 11 | pull-requests: write 12 | 13 | env: 14 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 15 | TF_IN_AUTOMATION: 1 16 | 17 | jobs: 18 | fmt-lint-validate: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Checkout code 22 | uses: actions/checkout@v4 23 | 24 | - name: Setup Terraform 25 | uses: hashicorp/setup-terraform@v3 26 | 27 | - name: Setup Terraform Linters 28 | uses: terraform-linters/setup-tflint@v4 29 | with: 30 | github_token: ${{ github.token }} 31 | 32 | - name: Terraform Format 33 | id: fmt 34 | run: terraform fmt -check -recursive 35 | 36 | - name: Terraform Lint 37 | id: lint 38 | run: | 39 | echo "Checking ." 40 | tflint --format compact 41 | 42 | for d in examples/*/; do 43 | echo "Checking ${d} ..." 44 | tflint --chdir=$d --format compact 45 | done 46 | 47 | - name: Terraform Validate 48 | id: validate 49 | if: ${{ !vars.SKIP_TERRAFORM_VALIDATE }} 50 | run: | 51 | for d in examples/*/; do 52 | echo "Checking ${d} ..." 53 | terraform -chdir=$d init 54 | terraform -chdir=$d validate -no-color 55 | done 56 | env: 57 | AWS_DEFAULT_REGION: eu-west-1 58 | 59 | - name: Terraform Test 60 | id: test 61 | if: ${{ !vars.SKIP_TERRAFORM_TESTS }} 62 | run: | 63 | terraform init 64 | terraform test 65 | 66 | - uses: actions/github-script@v7 67 | if: github.event_name == 'pull_request' || always() 68 | with: 69 | github-token: ${{ secrets.GITHUB_TOKEN }} 70 | script: | 71 | // 1. Retrieve existing bot comments for the PR 72 | const { data: comments } = await github.rest.issues.listComments({ 73 | owner: context.repo.owner, 74 | repo: context.repo.repo, 75 | issue_number: context.issue.number, 76 | }) 77 | const botComment = comments.find(comment => { 78 | return comment.user.type === 'Bot' && comment.body.includes('Terraform Format and Style') 79 | }) 80 | 81 | // 2. Prepare format of the comment 82 | const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\` 83 | #### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\` 84 | #### Terraform Lint 📖\`${{ steps.lint.outcome }}\` 85 | #### Terraform Validation 🤖\`${{ steps.validate.outcome }}\` 86 |
Validation Output 87 | 88 | \`\`\`\n 89 | ${{ steps.validate.outputs.stdout }} 90 | \`\`\` 91 | 92 |
`; 93 | 94 | // 3. If we have a comment, update it, otherwise create a new one 95 | if (botComment) { 96 | github.rest.issues.updateComment({ 97 | owner: context.repo.owner, 98 | repo: context.repo.repo, 99 | comment_id: botComment.id, 100 | body: output 101 | }) 102 | } else { 103 | github.rest.issues.createComment({ 104 | issue_number: context.issue.number, 105 | owner: context.repo.owner, 106 | repo: context.repo.repo, 107 | body: output 108 | }) 109 | } 110 | 111 | docs: 112 | runs-on: ubuntu-latest 113 | steps: 114 | - name: Checkout code 115 | uses: actions/checkout@v4 116 | with: 117 | ref: ${{ github.event.pull_request.head.ref }} 118 | 119 | - name: Render terraform docs inside the README.md and push changes back to PR branch 120 | uses: terraform-docs/gh-actions@v1.3.0 121 | with: 122 | args: --sort-by required 123 | git-commit-message: "docs(readme): update module usage" 124 | git-push: true 125 | output-file: README.md 126 | output-method: inject 127 | working-dir: . 128 | continue-on-error: true # added this to prevent a PR from a remote fork failing the workflow 129 | 130 | # If the recursive flag is set to true, the action will not update the main README.md file. 131 | # Therefore we need to run the action twice, once for the root module and once for the modules directory 132 | docs-modules: 133 | runs-on: ubuntu-latest 134 | steps: 135 | - name: Checkout code 136 | uses: actions/checkout@v4 137 | with: 138 | ref: ${{ github.event.pull_request.head.ref }} 139 | 140 | - name: Render terraform docs inside the README.md and push changes back to PR branch 141 | uses: terraform-docs/gh-actions@v1.3.0 142 | with: 143 | args: --sort-by required 144 | git-commit-message: "docs(readme): update module usage" 145 | git-push: true 146 | output-file: README.md 147 | output-method: inject 148 | recursive-path: modules 149 | recursive: true 150 | working-dir: . 151 | continue-on-error: true # added this to prevent a PR from a remote fork failing the workflow 152 | 153 | checkov: 154 | runs-on: ubuntu-latest 155 | steps: 156 | - name: Check out code 157 | uses: actions/checkout@v4 158 | 159 | - name: Run Checkov 160 | uses: bridgecrewio/checkov-action@v12 161 | with: 162 | container_user: 1000 163 | directory: "/" 164 | download_external_modules: false 165 | framework: terraform 166 | output_format: sarif 167 | quiet: true 168 | skip_check: "CKV_GIT_5,CKV_GLB_1,CKV_TF_1" 169 | soft_fail: false 170 | skip_path: "examples/" 171 | 172 | ### SKIP REASON ### 173 | # Check | Description | Reason 174 | 175 | # CKV_GIT_5 | Ensure GitHub pull requests have at least 2 approvals | We strive for at least 1 approval 176 | # CKV_GLB_1 | Ensure at least two approving reviews are required to merge a GitLab MR | We strive for at least 1 approval 177 | # CKV_TF_1 | Ensure Terraform module sources use a commit hash | We think this check is too restrictive and that versioning should be preferred over commit hash 178 | -------------------------------------------------------------------------------- /.github/workflows/update-changelog.yaml: -------------------------------------------------------------------------------- 1 | # DO NOT CHANGE THIS FILE DIRECTLY 2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows 3 | 4 | name: "update-changelog" 5 | 6 | on: 7 | release: 8 | types: 9 | - published 10 | 11 | permissions: 12 | contents: write 13 | 14 | jobs: 15 | update: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v4 20 | with: 21 | token: ${{ secrets.MCAF_GITHUB_TOKEN }} 22 | 23 | - name: Update Changelog 24 | uses: stefanzweifel/changelog-updater-action@v1 25 | with: 26 | latest-version: ${{ github.event.release.tag_name }} 27 | release-notes: ${{ github.event.release.body }} 28 | 29 | - name: Commit updated Changelog 30 | uses: stefanzweifel/git-auto-commit-action@v5 31 | with: 32 | branch: ${{ github.event.repository.default_branch }} 33 | commit_message: "docs(changelog): update changelog" 34 | file_pattern: CHANGELOG.md 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # This file is managed by mcaf-github-workflows 2 | 3 | # CheckOv pre-commit external modules path 4 | **/.external_modules/* 5 | 6 | # Local .terraform directories 7 | **/.terraform/* 8 | 9 | # Local Lambda function package directories 10 | **/builds/* 11 | 12 | # Terraform locks 13 | # This is a module, not a stand-alone deployment 14 | .terraform.lock.hcl 15 | 16 | # Ignore CLI configuration files 17 | .terraformrc 18 | terraform.rc 19 | 20 | # Crash log files 21 | crash.log 22 | crash.*.log 23 | 24 | # .tfstate files 25 | *.tfstate 26 | *.tfstate.* 27 | 28 | # .tfvars files 29 | # These should not be part of version control as they are data points which are potentially sensitive and subject to change depending on the environment. 30 | *.tfvars 31 | *.tfvars.json 32 | 33 | # override files 34 | # These should not be part of version control as they are usually used to override resources locally 35 | override.tf 36 | override.tf.json 37 | *_override.tf 38 | *_override.tf.json 39 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # DO NOT CHANGE THIS FILE DIRECTLY 2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows 3 | default_stages: [pre-commit] 4 | repos: 5 | - repo: https://github.com/pre-commit/pre-commit-hooks 6 | rev: v5.0.0 7 | hooks: 8 | - id: check-json 9 | - id: check-merge-conflict 10 | - id: trailing-whitespace 11 | - id: end-of-file-fixer 12 | - id: check-yaml 13 | - id: check-added-large-files 14 | - id: pretty-format-json 15 | args: 16 | - --autofix 17 | - id: detect-aws-credentials 18 | args: 19 | - --allow-missing-credentials 20 | - id: detect-private-key 21 | - repo: https://github.com/antonbabenko/pre-commit-terraform 22 | rev: v1.98.1 23 | hooks: 24 | - id: terraform_fmt 25 | - id: terraform_tflint 26 | - id: terraform_docs 27 | - id: terraform_validate 28 | - repo: https://github.com/bridgecrewio/checkov.git 29 | rev: 3.2.388 30 | hooks: 31 | - id: checkov 32 | verbose: false 33 | args: 34 | - --download-external-modules 35 | - "true" 36 | - --quiet 37 | - --compact 38 | - --skip-check 39 | - CKV_GIT_5,CKV_GLB_1,CKV_TF_1 40 | - --skip-path 41 | - examples/* 42 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Coding Guidelines 4 | 5 | - The terraform language has some [style conventions](https://developer.hashicorp.com/terraform/language/syntax/style) which must be followed for consistency between files and modules written by different teams. 6 | 7 | ## Opening a pull request 8 | 9 | - We require pull request titles to follow the [conventional commits specification](https://www.conventionalcommits.org/en/v1.0.0/) 10 | 11 | - Labels are automatically added to your PR based on certain keywords in the `title`, `body`, and `branch` . You are able to manually add or remove labels from your PR, the following labels are allowed: `breaking`, `enhancement`, `feature`, `bug`, `fix`, `security`, `documentation`. 12 | 13 | ## Release flow 14 | 15 | 1. Every time a PR is merged, a draft release note is created or updated to add an entry for this PR. The release version is automatically incremented based on the labels specified. 16 | 17 | 2. When you are ready to publish the release, you can use the drafted release note to do so. `MCAF Contributors` are able to publish releases. If you are an `MCAF Contributor` and want to publish a drafted release: 18 | - Browse to the release page 19 | - Edit the release you want to publish (click on the pencil) 20 | - Click `Update release` (the green button at the bottom of the page) 21 | 22 | If a PR should not be added to the release notes and changelog, add the label `no-changelog` to your PR. 23 | 24 | ## Local Development 25 | 26 | To ease local development, [pre-commit](https://pre-commit.com/) configuration has been added to the repository. Pre-commit is useful for identifying simple issues before creating a PR: 27 | 28 | To use it, follow these steps: 29 | 30 | 1. Installation: 31 | - Using Brew: `brew install tflint` 32 | - Using Python: `pip3 install pre-commit --upgrade` 33 | - Using Conda: `conda install -c conda-forge pre-commit` 34 | 35 | 2. Run the pre-commit hooks against all the files (the first time run might take a few minutes): 36 | `pre-commit run -a` 37 | 38 | 3. (optional) Install the pre-commit hooks to run before each commit: 39 | `pre-commit install` 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Security Hub Findings Manager 2 | Automated scanning and finding consolidation are essential for evaluating your security posture. AWS Security Hub is the native solution for this task within AWS. The number of findings it generates can initially be overwhelming. Additionally, some findings may be irrelevant or have less urgency for your specific situation. 3 | 4 | The Security Hub Findings Manager is a framework designed to automatically manage findings recorded by AWS Security Hub, including its [AWS service integrations](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-internal-providers.html#internal-integrations-summary), based on a configurable rules list. Its primary aim is to reduce noise and help you prioritize genuine security issues. 5 | 6 | ### Supported Functionality: 7 | * Suppressing findings to manage irrelevant or less urgent issues effectively. 8 | * Automated ticket creation in Jira and ServiceNow for non-suppressed findings exceeding a customizable severity threshold. 9 | 10 | > [!TIP] 11 | > Deploy this module in the Audit/Security Account of an AWS reference multi-account setup. This setup receives events from all child accounts, providing a comprehensive overview of the organization's security posture. 12 | 13 | > [!IMPORTANT] 14 | > This module relies heavily on [awsfindingsmanagerlib](https://github.com/schubergphilis/awsfindingsmanagerlib/tree/main). For detailed suppression logic information, refer to the library's [documentation](https://awsfindingsmanagerlib.readthedocs.io/en/latest/). 15 | 16 | ## Components 17 | Here's a high-level overview of the components. For more details, see [Resources](#resources) and [Modules](#modules). 18 | 19 | * Rules backend (currently only S3 supported). 20 | * Three Lambda Functions: 21 | * Security Hub Findings Manager Events: triggered by EventBridge for new Security Hub findings. 22 | * Security Hub Findings Manager Triggers: responds to changes in the S3 backend rules list, putting suppression rules on SQS. 23 | * Security Hub Findings Manager Trigger Worker: activated by suppression rules on SQS. 24 | * Infrastructure supporting Lambda functions (IAM role, EventBridge integration, S3 Trigger Notifications, SQS queue). 25 | * Optional Jira integration components. 26 | * Optional ServiceNow integration components. 27 | 28 | ## Deployment Modes 29 | Three deployment modes are available: 30 | 31 | > [!IMPORTANT] 32 | > During the first deployment, be aware that S3 triggers might take time to become fully functional. Re-create the rules object later to apply rules to your findings history. 33 | 34 | ### Default (Without Jira & ServiceNow Integration) 35 | Deploys two Lambda functions: 36 | * `securityhub-findings-manager-events`: target for the EventBridge rule `Security Hub Findings - Imported` events. 37 | * `securityhub-findings-manager-trigger`: target for the S3 PutObject trigger. 38 | 39 | ### With Jira Integration 40 | * Enable by setting the variable `jira_integration` to `true` (default = false). 41 | * Deploys an additional Jira lambda function and a Step function for orchestration, triggered by an EventBridge rule. 42 | * Non-suppressed findings with severity above a threshold result in ticket creation and workflow status update from `NEW` to `NOTIFIED`. 43 | * Auto-closing can be activated with `jira_integration.autoclose_enabled` (default = false). Using the issue number in the finding note, the function transitions issues using `jira_integration.autoclose_transition_name` and `jira_integration.autoclose_comment`. Criteria for being forwarded for automatic ticket closure are: 44 | * Workflow Status "RESOLVED" 45 | * Workflow Status "NOTIFIED" and one of: 46 | * Record State "ARCHIVED" 47 | * Compliance State "PASSED" or "NOT_AVAILABLE" 48 | 49 | Only findings with a normalized severity level above the threshold (default `70`) initiate Jira integration. 50 | 51 | [Normalized severity levels](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_Severity.html): 52 | * 0 - INFORMATIONAL 53 | * 1–39 - LOW 54 | * 40–69 - MEDIUM 55 | * 70–89 - HIGH 56 | * 90–100 - CRITICAL 57 | 58 | ![Step Function Graph](files/step-function-artifacts/securityhub-findings-manager-orchestrator-graph.png) 59 | 60 | ### With ServiceNow Integration 61 | [Reference design](https://aws.amazon.com/blogs/security/how-to-set-up-two-way-integration-between-aws-security-hub-and-servicenow) 62 | * Enable by setting the variable `servicenow_integration` to `true` (default = false). 63 | * Deploys resources supporting ServiceNow integration, including an SQS Queue, EventBridge Rule, and required IAM user. 64 | * EventBridge triggers events for Security Hub, placing them on an SQS Queue. 65 | * Configure which findings are forwarded via `severity_label_filter`. 66 | * ServiceNow retrieves events from the SQS queue using `SCSyncUser` credentials. 67 | 68 | > [!WARNING] 69 | > Generate the `access_key` & `secret_access_key` in the AWS Console. If you prefer Terraform to handle this (and output them), set the variable `create_servicenow_access_keys` to `true` (default = false). 70 | 71 | ## Formatting the `rules.yaml` File 72 | An example file is available under `examples/rules.yaml`. For detailed information, see the Rule Syntax section in the [awsfindingsmanagerlib documentation](https://awsfindingsmanagerlib.readthedocs.io/en/latest/#rule-syntax). 73 | 74 | ## Local Development on Python Code 75 | A lambda layer provides aws-lambda-powertools. To have these dependencies locally, use `requirements-dev.txt` from the source code. 76 | 77 | 78 | ## Requirements 79 | 80 | | Name | Version | 81 | |------|---------| 82 | | [terraform](#requirement\_terraform) | >= 1.3.0 | 83 | | [archive](#requirement\_archive) | >= 2.0 | 84 | | [aws](#requirement\_aws) | >= 4.9 | 85 | | [external](#requirement\_external) | >= 2.0 | 86 | | [local](#requirement\_local) | >= 1.0 | 87 | | [null](#requirement\_null) | >= 2.0 | 88 | 89 | ## Providers 90 | 91 | | Name | Version | 92 | |------|---------| 93 | | [aws](#provider\_aws) | >= 4.9 | 94 | 95 | ## Modules 96 | 97 | | Name | Source | Version | 98 | |------|--------|---------| 99 | | [findings\_manager\_bucket](#module\_findings\_manager\_bucket) | schubergphilis/mcaf-s3/aws | ~> 0.14.1 | 100 | | [findings\_manager\_events\_lambda](#module\_findings\_manager\_events\_lambda) | schubergphilis/mcaf-lambda/aws | ~> 1.4.1 | 101 | | [findings\_manager\_trigger\_lambda](#module\_findings\_manager\_trigger\_lambda) | schubergphilis/mcaf-lambda/aws | ~> 1.4.1 | 102 | | [findings\_manager\_worker\_lambda](#module\_findings\_manager\_worker\_lambda) | schubergphilis/mcaf-lambda/aws | ~> 1.4.1 | 103 | | [jira\_eventbridge\_iam\_role](#module\_jira\_eventbridge\_iam\_role) | schubergphilis/mcaf-role/aws | ~> 0.3.2 | 104 | | [jira\_lambda](#module\_jira\_lambda) | schubergphilis/mcaf-lambda/aws | ~> 1.4.1 | 105 | | [jira\_step\_function\_iam\_role](#module\_jira\_step\_function\_iam\_role) | schubergphilis/mcaf-role/aws | ~> 0.3.2 | 106 | | [servicenow\_integration](#module\_servicenow\_integration) | ./modules/servicenow/ | n/a | 107 | 108 | ## Resources 109 | 110 | | Name | Type | 111 | |------|------| 112 | | [aws_cloudwatch_event_rule.securityhub_findings_events](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | 113 | | [aws_cloudwatch_event_target.findings_manager_events_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | 114 | | [aws_cloudwatch_event_target.jira_orchestrator](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | 115 | | [aws_cloudwatch_log_group.log_group_jira_orchestrator_sfn](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | 116 | | [aws_lambda_event_source_mapping.sqs_to_worker](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_event_source_mapping) | resource | 117 | | [aws_lambda_permission.eventbridge_invoke_findings_manager_events_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | 118 | | [aws_lambda_permission.s3_invoke_findings_manager_trigger_lambda](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/lambda_permission) | resource | 119 | | [aws_s3_bucket_notification.findings_manager_trigger](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_notification) | resource | 120 | | [aws_s3_object.findings_manager_lambdas_deployment_package](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_object) | resource | 121 | | [aws_s3_object.jira_lambda_deployment_package](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_object) | resource | 122 | | [aws_s3_object.rules](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_object) | resource | 123 | | [aws_sfn_state_machine.jira_orchestrator](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sfn_state_machine) | resource | 124 | | [aws_sqs_queue.dlq_for_findings_manager_rule_q](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | 125 | | [aws_sqs_queue.findings_manager_rule_q](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | 126 | | [aws_sqs_queue_policy.findings_manager_rule_sqs_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource | 127 | | [aws_sqs_queue_redrive_allow_policy.dead_letter_allow_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_redrive_allow_policy) | resource | 128 | | [aws_sqs_queue_redrive_policy.redrive_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_redrive_policy) | resource | 129 | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | 130 | | [aws_iam_policy_document.findings_manager_lambda_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 131 | | [aws_iam_policy_document.findings_manager_rule_sqs_policy_doc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 132 | | [aws_iam_policy_document.jira_eventbridge_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 133 | | [aws_iam_policy_document.jira_lambda_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 134 | | [aws_iam_policy_document.jira_step_function_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 135 | | [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | 136 | 137 | ## Inputs 138 | 139 | | Name | Description | Type | Default | Required | 140 | |------|-------------|------|---------|:--------:| 141 | | [kms\_key\_arn](#input\_kms\_key\_arn) | The ARN of the KMS key used to encrypt the resources | `string` | n/a | yes | 142 | | [s3\_bucket\_name](#input\_s3\_bucket\_name) | The name for the S3 bucket which will be created for storing the function's deployment package | `string` | n/a | yes | 143 | | [findings\_manager\_events\_lambda](#input\_findings\_manager\_events\_lambda) | Findings Manager Lambda settings - Manage Security Hub findings in response to EventBridge events |
object({
name = optional(string, "securityhub-findings-manager-events")
log_level = optional(string, "ERROR")
memory_size = optional(number, 256)
timeout = optional(number, 300)

security_group_egress_rules = optional(list(object({
cidr_ipv4 = optional(string)
cidr_ipv6 = optional(string)
description = string
from_port = optional(number, 0)
ip_protocol = optional(string, "-1")
prefix_list_id = optional(string)
referenced_security_group_id = optional(string)
to_port = optional(number, 0)
})), [])
})
| `{}` | no | 144 | | [findings\_manager\_trigger\_lambda](#input\_findings\_manager\_trigger\_lambda) | Findings Manager Lambda settings - Manage Security Hub findings in response to S3 file upload triggers |
object({
name = optional(string, "securityhub-findings-manager-trigger")
log_level = optional(string, "ERROR")
memory_size = optional(number, 256)
timeout = optional(number, 300)

security_group_egress_rules = optional(list(object({
cidr_ipv4 = optional(string)
cidr_ipv6 = optional(string)
description = string
from_port = optional(number, 0)
ip_protocol = optional(string, "-1")
prefix_list_id = optional(string)
referenced_security_group_id = optional(string)
to_port = optional(number, 0)
})), [])
})
| `{}` | no | 145 | | [findings\_manager\_worker\_lambda](#input\_findings\_manager\_worker\_lambda) | Findings Manager Lambda settings - Manage Security Hub findings in response to SQS trigger |
object({
name = optional(string, "securityhub-findings-manager-worker")
log_level = optional(string, "ERROR")
memory_size = optional(number, 256)
timeout = optional(number, 900)

security_group_egress_rules = optional(list(object({
cidr_ipv4 = optional(string)
cidr_ipv6 = optional(string)
description = string
from_port = optional(number, 0)
ip_protocol = optional(string, "-1")
prefix_list_id = optional(string)
referenced_security_group_id = optional(string)
to_port = optional(number, 0)
})), [])
})
| `{}` | no | 146 | | [jira\_eventbridge\_iam\_role\_name](#input\_jira\_eventbridge\_iam\_role\_name) | The name of the role which will be assumed by EventBridge rules for Jira integration | `string` | `"SecurityHubFindingsManagerJiraEventBridge"` | no | 147 | | [jira\_integration](#input\_jira\_integration) | Findings Manager - Jira integration settings |
object({
enabled = optional(bool, false)
autoclose_enabled = optional(bool, false)
autoclose_comment = optional(string, "Security Hub finding has been resolved. Autoclosing the issue.")
autoclose_transition_name = optional(string, "Close Issue")
credentials_secret_arn = string
exclude_account_ids = optional(list(string), [])
finding_severity_normalized_threshold = optional(number, 70)
issue_custom_fields = optional(map(string), {})
issue_type = optional(string, "Security Advisory")
project_key = string

security_group_egress_rules = optional(list(object({
cidr_ipv4 = optional(string)
cidr_ipv6 = optional(string)
description = string
from_port = optional(number, 0)
ip_protocol = optional(string, "-1")
prefix_list_id = optional(string)
referenced_security_group_id = optional(string)
to_port = optional(number, 0)
})), [])

lambda_settings = optional(object({
name = optional(string, "securityhub-findings-manager-jira")
log_level = optional(string, "INFO")
memory_size = optional(number, 256)
timeout = optional(number, 60)
}), {
name = "securityhub-findings-manager-jira"
iam_role_name = "SecurityHubFindingsManagerJiraLambda"
log_level = "INFO"
memory_size = 256
timeout = 60
security_group_egress_rules = []
})

step_function_settings = optional(object({
log_level = optional(string, "ERROR")
retention = optional(number, 90)
}), {
log_level = "ERROR"
retention = 90
})

})
|
{
"credentials_secret_arn": null,
"enabled": false,
"project_key": null
}
| no | 148 | | [jira\_step\_function\_iam\_role\_name](#input\_jira\_step\_function\_iam\_role\_name) | The name of the role which will be assumed by AWS Step Function for Jira integration | `string` | `"SecurityHubFindingsManagerJiraStepFunction"` | no | 149 | | [lambda\_runtime](#input\_lambda\_runtime) | The version of Python to use for the Lambda functions | `string` | `"python3.12"` | no | 150 | | [rules\_filepath](#input\_rules\_filepath) | Pathname to the file that stores the manager rules | `string` | `""` | no | 151 | | [rules\_s3\_object\_name](#input\_rules\_s3\_object\_name) | The S3 object containing the rules to be applied to Security Hub findings manager | `string` | `"rules.yaml"` | no | 152 | | [servicenow\_integration](#input\_servicenow\_integration) | ServiceNow integration settings |
object({
enabled = optional(bool, false)
create_access_keys = optional(bool, false)
cloudwatch_retention_days = optional(number, 365)
severity_label_filter = optional(list(string), [])
})
|
{
"enabled": false
}
| no | 153 | | [subnet\_ids](#input\_subnet\_ids) | The subnet ids where the Lambda functions needs to run | `list(string)` | `null` | no | 154 | | [tags](#input\_tags) | A mapping of tags to assign to the resources | `map(string)` | `{}` | no | 155 | 156 | ## Outputs 157 | 158 | | Name | Description | 159 | |------|-------------| 160 | | [findings\_manager\_events\_lambda\_sg\_id](#output\_findings\_manager\_events\_lambda\_sg\_id) | This will output the security group id attached to the lambda\_findings\_manager\_events Lambda. This can be used to tune ingress and egress rules. | 161 | | [findings\_manager\_trigger\_lambda\_sg\_id](#output\_findings\_manager\_trigger\_lambda\_sg\_id) | This will output the security group id attached to the lambda\_findings\_manager\_trigger Lambda. This can be used to tune ingress and egress rules. | 162 | | [findings\_manager\_worker\_lambda\_sg\_id](#output\_findings\_manager\_worker\_lambda\_sg\_id) | This will output the security group id attached to the lambda\_findings\_manager\_worker Lambda. This can be used to tune ingress and egress rules. | 163 | | [jira\_lambda\_sg\_id](#output\_jira\_lambda\_sg\_id) | This will output the security group id attached to the jira\_lambda Lambda. This can be used to tune ingress and egress rules. | 164 | 165 | -------------------------------------------------------------------------------- /UPGRADING.md: -------------------------------------------------------------------------------- 1 | # Upgrading Notes 2 | 3 | This document captures required refactoring on your part when upgrading to a module version that contains breaking changes. 4 | 5 | ## Upgrading to v4.0.0 6 | 7 | We are introducing a new worker Lambda function and an SQS queue, enabling the Lambda to run within the 15-minute timeout, which is especially relevant for larger environments. 8 | 9 | The following variable defaults have been modified: 10 | - `findings_manager_events_lambda.log_level` -> default: `ERROR` (previous default: `INFO`). The logging configuration has been updated, and `ERROR` is now more logical as the default level. 11 | - `findings_manager_trigger_lambda.log_level` -> default: `ERROR` (previous default: `INFO`). The logging configuration has been updated, and `ERROR` is now more logical as the default level. 12 | - `findings_manager_trigger_lambda.memory_size` -> default: `256` (previous default: `1024`). With the new setup, the trigger Lambda requires less memory. 13 | - `findings_manager_trigger_lambda.timeout` -> default: `300` (previous default: `900`). With the new setup, the trigger Lambda completes tasks in less time. 14 | 15 | The following variables have been introduced: 16 | - `findings_manager_worker_lambda` 17 | 18 | The following output has been introduced: 19 | - `findings_manager_worker_lambda_sg_id` 20 | 21 | Note: 22 | - Ensure your KMS key is available for SQS access. 23 | 24 | ## Upgrading to v3.0.0 25 | 26 | ### Variables (v3.0.0) 27 | 28 | The following variables have been removed: 29 | 30 | - `dynamodb_table` 31 | - `dynamodb_deletion_protection` 32 | 33 | The following variables have been introduced: 34 | - `rules_filepath` 35 | - `rules_s3_object_name` 36 | 37 | The following variables have been renamed: 38 | - `lambda_events_suppressor` -> `findings_manager_events_lambda` 39 | - `lambda_streams_suppressor` -> `findings_manager_trigger_lambda` 40 | - `lambda_suppressor_iam_role_name` -> `findings_manager_lambda_iam_role_name` 41 | - `eventbridge_suppressor_iam_role_name` -> `jira_eventbridge_iam_role_name` 42 | - `step_function_suppressor_iam_role_name` -> `jira_step_function_iam_role_name` 43 | 44 | A Lambda function now triggers on S3 Object Creation Trigger Events. 45 | By default it is triggered by putting a new (version of) an object called `rules.yaml` in the bucket created by this module. 46 | This filename can be customized with the `rules_s3_object_name` variable. 47 | 48 | You can add the `rules.yaml` file to the bucket in any way you like after deploying this module, for instance with an `aws_s3_object` resource. 49 | This way you can separate management of your infrastructure and security. 50 | If this separation is not necessary in your case you also let this module directly upload the file for you by setting the `rules_filepath` variable to a filepath to your `rules.yaml` file. 51 | In either case, be mindful that there can be a delay between creating S3 triggers and those being fully functional. 52 | Re-create the rules object later to have rules run on your findings history in that case. 53 | 54 | ### Outputs (v3.0.0) 55 | 56 | The following output has been removed: 57 | 58 | - `dynamodb_arn` 59 | 60 | The following output has been renamed: 61 | 62 | - `lambda_jira_security_hub_sg_id` -> `jira_lambda_sg_id` 63 | - `lambda_securityhub_events_suppressor_sg_id` -> `findings_manager_events_lambda_sg_id` 64 | - `lambda_securityhub_streams_suppressor_sg_id` -> `findings_manager_trigger_lambda_sg_id` 65 | 66 | ### Behaviour (v3.0.0) 67 | 68 | New functionality: 69 | 70 | - Managing consolidated control findings is now supported 71 | - Managing based on tags is now supported 72 | 73 | See the README, section `## How to format the rules.yaml file?` for more information on the keys you need to use to control this. 74 | 75 | The `rules.yaml` file needs to be written in a different syntax. The script below can be used to easily convert your current `suppressions.yml` file to the new format. 76 | 77 | ```python 78 | import yaml 79 | 80 | suppressions = yaml.safe_load(open('suppressions.yml'))['Suppressions'] 81 | 82 | rules = { 83 | 'Rules': [ 84 | { 85 | 'note': content['notes'], 86 | 'action': content['action'], 87 | 'match_on': { 88 | 'rule_or_control_id': rule_or_control_id, 89 | 'resource_id_regexps': content['rules'] 90 | } 91 | } 92 | for rule_or_control_id, contents in suppressions.items() 93 | for content in contents 94 | ] 95 | } 96 | 97 | print(yaml.dump(rules, indent=2)) 98 | ``` 99 | 100 | If you do not want to rename your file from `suppressions.yml` to `rules.yaml` you can override the name using the `rules_s3_object_name` variable. 101 | 102 | ## Upgrading to v2.0.0 103 | 104 | ### Variables (v2.0.0) 105 | 106 | The following variable has been replaced: 107 | 108 | - `create_allow_all_egress_rule` -> `jira_integration.security_group_egress_rules`, `lambda_streams_suppressor.security_group_egress_rules`, `lambda_events_suppressor.security_group_egress_rules` 109 | 110 | Instead of only being able to allow all egress or block all egress and having to rely on resources outside this module to create specific egress rules this is now supported natively by the module. 111 | 112 | The following variable defaults have been modified: 113 | 114 | - `servicenow_integration.cloudwatch_retention_days` -> default: `365` (previous hardcoded: `14`). In order to comply with AWS Security Hub control CloudWatch.16. 115 | 116 | ### Behaviour (v2.0.0) 117 | 118 | The need to provide a `providers = { aws = aws }` argument has been removed, but is still allowed. E.g. when deploying this module in the audit account typically `providers = { aws = aws.audit }` is passed. 119 | 120 | ## Upgrading to v1.0.0 121 | 122 | ### Behaviour (v1.0.0) 123 | 124 | - Timeouts of the suppressor lambdas have been increased to 120 seconds. The current timeout of 60 seconds is not always enough to process 100 records of findings. 125 | - The `create_servicenow_access_keys` variable, now called `servicenow_integration.create_access_keys` was not used in the code and therefore the default behaviour was that access keys would be created. This issue has been resolved. 126 | - The `create_allow_all_egress_rule` variable has been set to `false`. 127 | - The `tags` variable is now optional. 128 | 129 | ### Variables (v1.0.0) 130 | 131 | The following variables have been replaced by a new variable `jira_integration`: 132 | 133 | - `jira_exclude_account_filter` -> `jira_integration.exclude_account_ids` 134 | - `jira_finding_severity_normalized` -> `jira_integration.finding_severity_normalized_threshold` 135 | - `jira_integration` -> `jira_integration.enabled` 136 | - `jira_issue_type` -> `jira_integration.issue_type` 137 | - `jira_project_key` -> `jira_integration.project_key` 138 | - `jira_secret_arn` -> `jira_integration.credentials_secret_arn` 139 | - `lambda_jira_name` -> `jira_integration.lambda_settings.name` 140 | - `lambda_jira_iam_role_name` -> `jira_integration.lambda_settings.iam_role_name` 141 | - Additionally you are now able to specify the `log_level`, `memory_size,` and `timeout` of the lambda. 142 | 143 | The following variables have been replaced by a new variable `servicenow_integration`: 144 | 145 | - `servicenow_integration` -> `servicenow_integration.enabled` 146 | - `create_servicenow_access_keys` -> `servicenow_integration.create_access_keys` 147 | 148 | The following variables have been replaced by a new variable `lambda_events_suppressor`: 149 | 150 | - `lambda_events_suppressor_name` -> `lambda_events_suppressor.name` 151 | - Additionally you are now able to specify the `log_level`, `memory_size,` and `timeout` of the lambda. 152 | 153 | The following variables have been replaced by a new variable `lambda_streams_suppressor`: 154 | 155 | - `lambda_streams_suppressor_name` -> `lambda_streams_suppressor.name` 156 | - Additionally you are now able to specify the `log_level`, `memory_size,` and `timeout` of the lambda. 157 | -------------------------------------------------------------------------------- /data.tf: -------------------------------------------------------------------------------- 1 | # Data Source to get the access to Account ID in which Terraform is authorized and the region configured on the provider 2 | data "aws_caller_identity" "current" {} 3 | 4 | data "aws_region" "current" {} 5 | -------------------------------------------------------------------------------- /examples/basic-separate-file/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Replace with a globally unique bucket name 3 | s3_bucket_name = "securityhub-findings-manager" 4 | } 5 | 6 | provider "aws" { 7 | region = "eu-west-1" 8 | } 9 | 10 | data "aws_caller_identity" "current" {} 11 | 12 | module "kms" { 13 | source = "schubergphilis/mcaf-kms/aws" 14 | version = "~> 0.3.0" 15 | 16 | name = "securityhub-findings-manager" 17 | 18 | policy = templatefile( 19 | "${path.module}/../kms.json", 20 | { account_id = data.aws_caller_identity.current.account_id } 21 | ) 22 | } 23 | 24 | module "aws_securityhub_findings_manager" { 25 | source = "../../" 26 | 27 | kms_key_arn = module.kms.arn 28 | s3_bucket_name = local.s3_bucket_name 29 | 30 | tags = { Terraform = true } 31 | } 32 | 33 | # It can take a long time before S3 notifications become active 34 | # You may want to deploy this resource a few minutes after those above 35 | resource "aws_s3_object" "rules" { 36 | bucket = local.s3_bucket_name 37 | key = "rules.yaml" 38 | content_type = "application/x-yaml" 39 | content = file("${path.module}/../rules.yaml") 40 | source_hash = filemd5("${path.module}/../rules.yaml") 41 | 42 | depends_on = [module.aws_securityhub_findings_manager] 43 | } 44 | -------------------------------------------------------------------------------- /examples/basic-separate-file/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.9" 8 | } 9 | local = { 10 | source = "hashicorp/local" 11 | version = ">= 1.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 2.0" 16 | } 17 | random = { 18 | source = "hashicorp/random" 19 | version = ">= 3.0" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /examples/basic/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "eu-west-1" 3 | } 4 | 5 | data "aws_caller_identity" "current" {} 6 | 7 | module "kms" { 8 | source = "schubergphilis/mcaf-kms/aws" 9 | version = "~> 0.3.0" 10 | 11 | name = "securityhub-findings-manager" 12 | 13 | policy = templatefile( 14 | "${path.module}/../kms.json", 15 | { account_id = data.aws_caller_identity.current.account_id } 16 | ) 17 | } 18 | 19 | # It can take a long time before S3 notifications become active 20 | # You may want to deploy an empty set of rules before the actual ones or do a trick with yaml comments 21 | module "aws_securityhub_findings_manager" { 22 | source = "../../" 23 | 24 | kms_key_arn = module.kms.arn 25 | s3_bucket_name = "securityhub-findings-manager-artifacts" # Replace with a globally unique bucket name 26 | rules_filepath = "${path.module}/../rules.yaml" 27 | 28 | tags = { Terraform = true } 29 | } 30 | -------------------------------------------------------------------------------- /examples/basic/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.9" 8 | } 9 | local = { 10 | source = "hashicorp/local" 11 | version = ">= 1.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 2.0" 16 | } 17 | random = { 18 | source = "hashicorp/random" 19 | version = ">= 3.0" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /examples/jira-integration/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Replace with a globally unique bucket name 3 | s3_bucket_name = "securityhub-findings-manager" 4 | } 5 | 6 | provider "aws" { 7 | region = "eu-west-1" 8 | } 9 | 10 | data "aws_caller_identity" "current" {} 11 | 12 | module "kms" { 13 | source = "schubergphilis/mcaf-kms/aws" 14 | version = "~> 0.3.0" 15 | 16 | name = "securityhub-findings-manager" 17 | 18 | policy = templatefile( 19 | "${path.module}/../kms.json", 20 | { account_id = data.aws_caller_identity.current.account_id } 21 | ) 22 | } 23 | 24 | resource "aws_secretsmanager_secret" "jira_credentials" { 25 | #checkov:skip=CKV2_AWS_57: automatic rotation of the jira credentials is recommended. 26 | description = "Security Hub Findings Manager Jira Credentials Secret" 27 | kms_key_id = module.kms.arn 28 | name = "lambda/jira_credentials_secret" 29 | } 30 | 31 | // tfsec:ignore:GEN003 32 | resource "aws_secretsmanager_secret_version" "jira_credentials" { 33 | secret_id = aws_secretsmanager_secret.jira_credentials.id 34 | secret_string = jsonencode({ 35 | "url" = "https://jira.mycompany.com" 36 | "apiuser" = "username" 37 | "apikey" = "apikey" 38 | }) 39 | } 40 | 41 | module "aws_securityhub_findings_manager" { 42 | source = "../../" 43 | 44 | kms_key_arn = module.kms.arn 45 | s3_bucket_name = local.s3_bucket_name 46 | 47 | jira_integration = { 48 | enabled = true 49 | credentials_secret_arn = aws_secretsmanager_secret.jira_credentials.arn 50 | project_key = "PROJECT" 51 | 52 | security_group_egress_rules = [{ 53 | cidr_ipv4 = "1.1.1.1/32" 54 | description = "Allow access from lambda_jira_securityhub to Jira" 55 | from_port = 443 56 | ip_protocol = "tcp" 57 | to_port = 443 58 | }] 59 | } 60 | 61 | tags = { Terraform = true } 62 | } 63 | 64 | # It can take a long time before S3 notifications become active 65 | # You may want to deploy this resource a few minutes after those above 66 | resource "aws_s3_object" "rules" { 67 | bucket = local.s3_bucket_name 68 | key = "rules.yaml" 69 | content_type = "application/x-yaml" 70 | content = file("${path.module}/../rules.yaml") 71 | source_hash = filemd5("${path.module}/../rules.yaml") 72 | 73 | depends_on = [module.aws_securityhub_findings_manager] 74 | } 75 | -------------------------------------------------------------------------------- /examples/jira-integration/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.9" 8 | } 9 | local = { 10 | source = "hashicorp/local" 11 | version = ">= 1.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 2.0" 16 | } 17 | random = { 18 | source = "hashicorp/random" 19 | version = ">= 3.0" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /examples/kms.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "Enable IAM User Permissions", 6 | "Effect": "Allow", 7 | "Principal": { 8 | "AWS": "arn:aws:iam::${account_id}:root" 9 | }, 10 | "Action": "kms:*", 11 | "Resource": "*" 12 | }, 13 | { 14 | "Sid": "Allow Encrypt and Decrypt permissions for Cloudwatch Logs", 15 | "Effect": "Allow", 16 | "Principal": { 17 | "Service": "logs.eu-west-1.amazonaws.com" 18 | }, 19 | "Action": [ 20 | "kms:ReEncrypt*", 21 | "kms:GenerateDataKey*", 22 | "kms:Encrypt", 23 | "kms:DescribeKey", 24 | "kms:Decrypt" 25 | ], 26 | "Resource": "arn:aws:kms:eu-west-1:${account_id}:key/*" 27 | } 28 | ] 29 | } -------------------------------------------------------------------------------- /examples/rules.yaml: -------------------------------------------------------------------------------- 1 | # Comments 2 | Rules: 3 | - note: 'MF-Neigh' 4 | action: 'SUPPRESSED' 5 | match_on: 6 | security_control_id: 'S3.20' 7 | - note: 'Config as code' 8 | action: 'SUPPRESSED' 9 | match_on: 10 | security_control_id: 'S3.14' 11 | tags: 12 | - key: 'ManagedBy' 13 | value: 'Terraform' 14 | - key: 'ManagedBy' 15 | value: 'CFN' 16 | - note: 'Too expensive on non-prod' 17 | action: 'SUPPRESSED' 18 | match_on: 19 | security_control_id: 'S3.9' 20 | resource_id_regexps: 21 | - '^arn:aws:s3:::.*-dev$' 22 | - '^arn:aws:s3:::.*-acc$' 23 | - note: 'Suppress EC2.172 on us-east-1 and eu-central-1' 24 | action: 'SUPPRESSED' 25 | match_on: 26 | security_control_id: 'EC2.172' 27 | regions: 28 | - 'us-east-1' 29 | - 'eu-central-1' 30 | -------------------------------------------------------------------------------- /examples/servicenow-integration/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Replace with a globally unique bucket name 3 | s3_bucket_name = "securityhub-findings-manager" 4 | } 5 | 6 | provider "aws" { 7 | region = "eu-west-1" 8 | } 9 | 10 | data "aws_caller_identity" "current" {} 11 | 12 | module "kms" { 13 | source = "schubergphilis/mcaf-kms/aws" 14 | version = "~> 0.3.0" 15 | 16 | name = "securityhub-findings-manager" 17 | 18 | policy = templatefile( 19 | "${path.module}/../kms.json", 20 | { account_id = data.aws_caller_identity.current.account_id } 21 | ) 22 | } 23 | 24 | module "aws_securityhub_findings_manager" { 25 | source = "../../" 26 | 27 | kms_key_arn = module.kms.arn 28 | s3_bucket_name = local.s3_bucket_name 29 | 30 | servicenow_integration = { 31 | enabled = true 32 | } 33 | 34 | tags = { Terraform = true } 35 | } 36 | 37 | # It can take a long time before S3 notifications become active 38 | # You may want to deploy this resource a few minutes after those above 39 | resource "aws_s3_object" "rules" { 40 | bucket = local.s3_bucket_name 41 | key = "rules.yaml" 42 | content_type = "application/x-yaml" 43 | content = file("${path.module}/../rules.yaml") 44 | source_hash = filemd5("${path.module}/../rules.yaml") 45 | 46 | depends_on = [module.aws_securityhub_findings_manager] 47 | } 48 | -------------------------------------------------------------------------------- /examples/servicenow-integration/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.9" 8 | } 9 | local = { 10 | source = "hashicorp/local" 11 | version = ">= 1.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 2.0" 16 | } 17 | random = { 18 | source = "hashicorp/random" 19 | version = ">= 3.0" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /files/lambda-artifacts/findings-manager-jira/findings_manager_jira.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import boto3 4 | from aws_lambda_powertools import Logger 5 | from aws_lambda_powertools.utilities.typing import LambdaContext 6 | from jira.exceptions import JIRAError 7 | import helpers 8 | 9 | logger = Logger() 10 | securityhub = boto3.client('securityhub') 11 | secretsmanager = boto3.client('secretsmanager') 12 | 13 | REQUIRED_ENV_VARS = [ 14 | 'EXCLUDE_ACCOUNT_FILTER', 'JIRA_ISSUE_CUSTOM_FIELDS', 'JIRA_ISSUE_TYPE', 'JIRA_PROJECT_KEY', 'JIRA_SECRET_ARN' 15 | ] 16 | 17 | DEFAULT_JIRA_AUTOCLOSE_COMMENT = 'Security Hub finding has been resolved. Autoclosing the issue.' 18 | DEFAULT_JIRA_AUTOCLOSE_TRANSITION = 'Done' 19 | 20 | STATUS_NEW = 'NEW' 21 | STATUS_NOTIFIED = 'NOTIFIED' 22 | STATUS_RESOLVED = 'RESOLVED' 23 | COMPLIANCE_STATUS_FAILED = 'FAILED' 24 | COMPLIANCE_STATUS_NOT_AVAILABLE = 'NOT_AVAILABLE' 25 | COMPLIANCE_STATUS_PASSED = 'PASSED' 26 | COMPLIANCE_STATUS_WARNING = 'WARNING' 27 | COMPLIANCE_STATUS_MISSING = 'MISSING' 28 | RECORD_STATE_ACTIVE = 'ACTIVE' 29 | RECORD_STATE_ARCHIVED = 'ARCHIVED' 30 | 31 | 32 | @logger.inject_lambda_context 33 | def lambda_handler(event: dict, context: LambdaContext): 34 | # Validate required environment variables 35 | try: 36 | helpers.validate_env_vars(REQUIRED_ENV_VARS) 37 | except Exception as e: 38 | logger.error(f"Environment variable validation failed: {e}") 39 | raise RuntimeError("Required environment variables are missing.") from e 40 | 41 | # Retrieve environment variables 42 | exclude_account_filter = os.environ['EXCLUDE_ACCOUNT_FILTER'] 43 | jira_autoclose_comment = os.getenv( 44 | 'JIRA_AUTOCLOSE_COMMENT', DEFAULT_JIRA_AUTOCLOSE_COMMENT) 45 | jira_autoclose_transition = os.getenv( 46 | 'JIRA_AUTOCLOSE_TRANSITION', DEFAULT_JIRA_AUTOCLOSE_TRANSITION) 47 | jira_issue_custom_fields = os.environ['JIRA_ISSUE_CUSTOM_FIELDS'] 48 | jira_issue_type = os.environ['JIRA_ISSUE_TYPE'] 49 | jira_project_key = os.environ['JIRA_PROJECT_KEY'] 50 | jira_secret_arn = os.environ['JIRA_SECRET_ARN'] 51 | 52 | # Parse custom fields 53 | try: 54 | jira_issue_custom_fields = json.loads(jira_issue_custom_fields) 55 | jira_issue_custom_fields = {k: {"value": v} 56 | for k, v in jira_issue_custom_fields.items()} 57 | except json.JSONDecodeError as e: 58 | logger.error(f"Failed to parse JSON for custom fields: {e}.") 59 | raise ValueError(f"Invalid JSON in JIRA_ISSUE_CUSTOM_FIELDS: {e}") from e 60 | 61 | # Retrieve Jira client 62 | try: 63 | jira_secret = helpers.get_secret(secretsmanager, jira_secret_arn) 64 | jira_client = helpers.get_jira_client(jira_secret) 65 | except Exception as e: 66 | logger.error(f"Failed to retrieve Jira client: {e}") 67 | raise RuntimeError("Could not initialize Jira client.") from e 68 | 69 | # Get Sechub event details 70 | event_detail = event['detail'] 71 | finding = event_detail['findings'][0] 72 | finding_account_id = finding['AwsAccountId'] 73 | workflow_status = finding['Workflow']['Status'] 74 | compliance_status = finding['Compliance']['Status'] if 'Compliance' in finding else COMPLIANCE_STATUS_MISSING 75 | record_state = finding['RecordState'] 76 | 77 | # Only process finding if account is not excluded 78 | if finding_account_id in exclude_account_filter: 79 | logger.info( 80 | f"Account {finding_account_id} is excluded from Jira ticket creation.") 81 | return 82 | 83 | # Handle new findings 84 | # Ticket is created when Workflow Status is NEW and Compliance Status is FAILED, WARNING or is missing from the finding (case with e.g. Inspector findings) 85 | # Compliance status check is necessary because some findings from AWS Config can have Workflow Status NEW but Compliance Status NOT_AVAILABLE 86 | # In such case, we don't want to create a Jira ticket, because the finding is not actionable 87 | if (workflow_status == STATUS_NEW 88 | and compliance_status in [COMPLIANCE_STATUS_FAILED, 89 | COMPLIANCE_STATUS_WARNING, 90 | COMPLIANCE_STATUS_MISSING] 91 | and record_state == RECORD_STATE_ACTIVE): 92 | # Create Jira issue and updates Security Hub status to NOTIFIED 93 | # and adds Jira issue key to note (in JSON format) 94 | try: 95 | issue = helpers.create_jira_issue( 96 | jira_client, jira_project_key, jira_issue_type, event_detail, jira_issue_custom_fields) 97 | note = json.dumps({'jiraIssue': issue.key}) 98 | helpers.update_security_hub( 99 | securityhub, finding["Id"], finding["ProductArn"], STATUS_NOTIFIED, note) 100 | except Exception as e: 101 | logger.error( 102 | f"Error processing new finding for findingID {finding['Id']}: {e}") 103 | raise RuntimeError(f"Failed to create Jira issue or update Security Hub for finding ID {finding['Id']}.") from e 104 | 105 | # Handle resolved findings 106 | # Close Jira issue if finding in SecurityHub has Workflow Status RESOLVED 107 | # or if the finding is in NOTIFIED status and compliance is PASSED (finding resoloved) or NOT_AVAILABLE (when the resource is deleted, for example) or the finding's Record State is ARCHIVED 108 | # If closed from NOTIFIED status, also resolve the finding in SecurityHub. If the finding becomes relevant again, Security Hub will reopen it and new ticket will be created. 109 | elif (workflow_status == STATUS_RESOLVED 110 | or (workflow_status == STATUS_NOTIFIED 111 | and (compliance_status in [COMPLIANCE_STATUS_PASSED, 112 | COMPLIANCE_STATUS_NOT_AVAILABLE] 113 | or record_state == RECORD_STATE_ARCHIVED))): 114 | # Close Jira issue if finding is resolved. 115 | # Note text should contain Jira issue key in JSON format 116 | try: 117 | note_text = finding['Note']['Text'] 118 | note_text_json = json.loads(note_text) 119 | jira_issue_id = note_text_json.get('jiraIssue') 120 | if jira_issue_id: 121 | try: 122 | issue = jira_client.issue(jira_issue_id) 123 | except JIRAError as e: 124 | logger.error( 125 | f"Failed to retrieve Jira issue {jira_issue_id}: {e}. Cannot autoclose.") 126 | return # Skip further processing for this finding 127 | helpers.close_jira_issue( 128 | jira_client, issue, jira_autoclose_transition, jira_autoclose_comment) 129 | if workflow_status == STATUS_NOTIFIED: 130 | # Resolve SecHub finding as it will be reopened anyway in case the compliance fails 131 | # Also change the note to prevent a second run with RESOLVED status. 132 | helpers.update_security_hub( 133 | securityhub, finding["Id"], finding["ProductArn"], STATUS_RESOLVED, f"Closed Jira issue {jira_issue_id}") 134 | except json.JSONDecodeError as e: 135 | logger.error( 136 | f"Failed to decode JSON from note text: {e}. Cannot autoclose.") 137 | raise ValueError(f"Invalid JSON in note text for finding ID {finding['Id']}.") from e 138 | except Exception as e: 139 | logger.error( 140 | f"Error processing resolved finding for findingId {finding['Id']}: {e}. Cannot autoclose.") 141 | return 142 | 143 | else: 144 | logger.info( 145 | f"Finding {finding['Id']} is not in a state to be processed. Workflow status: {workflow_status}, Compliance status: {compliance_status}, Record state: {record_state}") 146 | -------------------------------------------------------------------------------- /files/lambda-artifacts/findings-manager-jira/helpers.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import os 4 | from typing import List, Dict 5 | 6 | from aws_lambda_powertools import Logger 7 | from botocore.client import BaseClient 8 | from botocore.exceptions import ClientError 9 | from jira import JIRA 10 | from jira.resources import Issue 11 | 12 | logger = Logger() 13 | 14 | 15 | def validate_env_vars(env_vars: List[str]) -> None: 16 | """ 17 | Validate that all specified environment variables are set. 18 | 19 | Args: 20 | env_vars (List[str]): A list of environment variable names to check. 21 | 22 | Raises: 23 | ValueError: If any of the specified environment variables are not set. 24 | """ 25 | 26 | missing_vars = [var for var in env_vars if var not in os.environ] 27 | 28 | for var in missing_vars: 29 | logger.error(f"Environment variable {var} is not set!") 30 | 31 | if missing_vars: 32 | missing_str = ', '.join(missing_vars) 33 | logger.error(f"Missing environment variables: {missing_str}") 34 | raise ValueError(f"Missing environment variables: {missing_str}") 35 | 36 | 37 | def get_jira_client(jira_secret: Dict[str, str]) -> JIRA: 38 | """ 39 | Create a Jira client instance using the specified secret. 40 | 41 | Args: 42 | jira_secret (Dict[str, str]): A dictionary containing the Jira connection details. 43 | 44 | Returns: 45 | JIRA: A Jira client instance. 46 | 47 | Raises: 48 | ValueError: If the Jira connection details are not valid. 49 | """ 50 | 51 | jira_url = jira_secret.get('url') 52 | jira_user = jira_secret.get('apiuser') 53 | jira_password = jira_secret.get('apikey') 54 | 55 | if not jira_url or not jira_user or not jira_password: 56 | raise ValueError("Jira connection details are not valid!") 57 | 58 | return JIRA(server=jira_url, basic_auth=(jira_user, jira_password)) 59 | 60 | 61 | def get_secret(client: BaseClient, secret_arn: str) -> Dict[str, str]: 62 | """ 63 | Retrieve a secret from AWS Secrets Manager. 64 | 65 | Args: 66 | client (BaseClient): A boto3 client instance for Secrets Manager. 67 | secret_arn (str): The ARN of the secret to retrieve. 68 | 69 | Returns: 70 | Dict[str, str]: The secret value as a dictionary. 71 | 72 | Raises: 73 | ValueError: If the client is not an instance of Secrets Manager. 74 | ClientError: If there is an error retrieving the secret. 75 | """ 76 | 77 | # Validate that the client is an instance of botocore.client.SecretsManager 78 | if client.meta.service_model.service_name != 'secretsmanager': 79 | raise ValueError(f"Client must be an instance of botocore.client.SecretsManager. Got { 80 | type(client)} instead.") 81 | 82 | try: 83 | response = client.get_secret_value(SecretId=secret_arn) 84 | secret = response.get('SecretString') 85 | 86 | if secret is None: 87 | secret = base64.b64decode(response['SecretBinary']).decode('utf-8') 88 | 89 | logger.info(f"Secret fetched from ARN {secret_arn}") 90 | return json.loads(secret) 91 | except Exception as e: 92 | logger.error(f"Error retrieving secret from ARN {secret_arn}: {e}") 93 | raise e 94 | 95 | 96 | def create_jira_issue(jira_client: JIRA, project_key: str, issue_type: str, event: dict, custom_fields: dict) -> Issue: 97 | """ 98 | Create a Jira issue based on a Security Hub event. 99 | 100 | Args: 101 | jira_client (JIRA): An authenticated Jira client instance. 102 | project_key (str): The key of the Jira project. 103 | issue_type (str): The type of the Jira issue. 104 | event (Dict): The Security Hub event data. 105 | custom_fields (Dict): The custom fields to include in the Jira issue. 106 | 107 | Returns: 108 | Issue: The created Jira issue. 109 | 110 | Raises: 111 | Exception: If there is an error creating the Jira issue. 112 | """ 113 | 114 | finding = event['findings'][0] 115 | finding_account_id = finding['AwsAccountId'] 116 | finding_title = finding['Title'] 117 | 118 | issue_title = f"Security Hub ({finding_title}) detected in { 119 | finding_account_id}" 120 | 121 | issue_description = f""" 122 | {finding['Description']} 123 | 124 | A Security Hub finding has been detected: 125 | {{code}}{json.dumps(event, indent=2, sort_keys=True)}{{code}} 126 | """ 127 | 128 | issue_labels = [ 129 | finding["Region"], 130 | finding_account_id, 131 | finding['Severity']['Label'].lower(), 132 | *[finding['ProductFields'][key].replace(" ", "") 133 | for key in ["RuleId", "ControlId", "aws/securityhub/ProductName"] 134 | if key in finding['ProductFields']] 135 | ] 136 | 137 | issue_dict = { 138 | **custom_fields, 139 | 'project': {'key': project_key}, 140 | 'issuetype': {'name': issue_type}, 141 | 'summary': issue_title, 142 | 'description': issue_description, 143 | 'labels': issue_labels, 144 | } 145 | 146 | try: 147 | issue = jira_client.create_issue(fields=issue_dict) 148 | logger.info(f"Created Jira issue: {issue.key}") 149 | return issue 150 | except Exception as e: 151 | logger.error(f"Failed to create Jira issue for finding {finding['Id']}: {e}") 152 | raise e 153 | 154 | 155 | def close_jira_issue(jira_client: JIRA, issue: Issue, transition_name: str, comment: str) -> None: 156 | """ 157 | Close a Jira issue. 158 | 159 | Args: 160 | jira_client (JIRA): An authenticated Jira client instance. 161 | issue (Issue): The Jira issue to close. 162 | 163 | Raises: 164 | Exception: If there is an error closing the Jira issue. 165 | """ 166 | 167 | try: 168 | transition_id = jira_client.find_transitionid_by_name(issue, transition_name) 169 | if transition_id is None: 170 | logger.warning(f"Failed to close Jira issue: Invalid transition.") 171 | return 172 | jira_client.add_comment(issue, comment) 173 | jira_client.transition_issue(issue, transition_id, comment=comment) 174 | logger.info(f"Closed Jira issue: {issue.key}") 175 | except Exception as e: 176 | logger.error(f"Failed to close Jira issue {issue.key}: {e}") 177 | raise e 178 | 179 | 180 | def update_security_hub(client: BaseClient, finding_id: str, 181 | product_arn: str, status: str, note: str = "") -> None: 182 | """ 183 | Update a Security Hub finding with the given status and note. 184 | 185 | Args: 186 | client (BaseClient): A boto3 client instance for Security Hub. 187 | finding_id (str): The ID of the finding to update. 188 | product_arn (str): The ARN of the product associated with the finding. 189 | status (str): The new status for the finding. 190 | note (str): A note to add to the finding. 191 | 192 | Raises: 193 | ValueError: If the client is not an instance of Security Hub. 194 | ClientError: If there is an error updating the finding. 195 | """ 196 | 197 | # Validate that the client is an instance of botocore.client.SecurityHub 198 | if client.meta.service_model.service_name != 'securityhub': 199 | raise ValueError(f"Client must be an instance of botocore.client.SecurityHub. Got { 200 | type(client)} instead.") 201 | 202 | try: 203 | kwargs = {} 204 | if note: 205 | kwargs['Note'] = { 206 | 'Text': note, 207 | 'UpdatedBy': 'securityhub-findings-manager-jira' 208 | } 209 | logger.info(f"Updating SecurityHub finding {finding_id} to status {status} with note '{note}'.") 210 | response = client.batch_update_findings( 211 | FindingIdentifiers=[ 212 | { 213 | 'Id': finding_id, 214 | 'ProductArn': product_arn 215 | } 216 | ], 217 | Workflow={'Status': status}, 218 | **kwargs 219 | ) 220 | 221 | if response.get('FailedFindings'): 222 | for element in response['FailedFindings']: 223 | logger.error(f"Updating SecurityHub finding failed: FindingId { 224 | element['Id']}, ErrorCode {element['ErrorCode']}, ErrorMessage { 225 | element['ErrorMessage']}") 226 | else: 227 | logger.info("SecurityHub finding updated successfully.") 228 | 229 | except Exception as e: 230 | logger.exception(f"Updating SecurityHub finding failed: {e}") 231 | raise e 232 | -------------------------------------------------------------------------------- /files/lambda-artifacts/findings-manager-jira/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | aws-lambda-powertools 2 | -------------------------------------------------------------------------------- /files/lambda-artifacts/findings-manager-jira/requirements.txt: -------------------------------------------------------------------------------- 1 | jira==3.8.0 2 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | aws-lambda-powertools 2 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/requirements.txt: -------------------------------------------------------------------------------- 1 | urllib3==1.26.19 2 | awsfindingsmanagerlib==1.3.0 3 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/securityhub_events.py: -------------------------------------------------------------------------------- 1 | from aws_lambda_powertools import Logger 2 | from awsfindingsmanagerlib import FindingsManager 3 | from strategize_findings_manager import manage 4 | 5 | LOGGER = Logger() 6 | 7 | 8 | @LOGGER.inject_lambda_context(log_event=True) 9 | def lambda_handler(event, context): 10 | return manage( 11 | FindingsManager.suppress_findings_on_matching_rules, 12 | (event["detail"]["findings"],), 13 | LOGGER 14 | ) 15 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/securityhub_trigger.py: -------------------------------------------------------------------------------- 1 | from boto3 import client 2 | from json import dumps 3 | from os import environ 4 | from aws_lambda_powertools import Logger 5 | from strategize_findings_manager import get_rules 6 | 7 | SQS_QUEUE_NAME = environ.get("SQS_QUEUE_NAME") 8 | LOGGER = Logger() 9 | 10 | 11 | @LOGGER.inject_lambda_context(log_event=True) 12 | def lambda_handler(event, context): 13 | try: 14 | sqs = client("sqs") 15 | for rule in get_rules(LOGGER): 16 | message_body = dumps(rule.data) 17 | LOGGER.info(f"Putting rule on SQS. Rule details: {message_body}") 18 | sqs.send_message( 19 | QueueUrl=SQS_QUEUE_NAME, 20 | MessageBody=message_body 21 | ) 22 | except Exception as e: 23 | LOGGER.error(f"Failed putting rule(s) on SQS.") 24 | LOGGER.error(f"Original error: {e}", exc_info=True) 25 | raise Exception 26 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/securityhub_trigger_worker.py: -------------------------------------------------------------------------------- 1 | from json import loads 2 | from aws_lambda_powertools import Logger 3 | from strategize_findings_manager import manager_per_rule 4 | 5 | LOGGER = Logger() 6 | 7 | 8 | @LOGGER.inject_lambda_context(log_event=True) 9 | def lambda_handler(event, context): 10 | for record in event["Records"]: 11 | rule = loads(record["body"]) 12 | try: 13 | manager_per_rule(rule, LOGGER) 14 | except Exception as e: 15 | LOGGER.error(f"Failed to process rule. Rule details; {rule}") 16 | LOGGER.error(f"Original error: {e}", exc_info=True) 17 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/strategize_findings_manager.py: -------------------------------------------------------------------------------- 1 | from os import environ 2 | from aws_lambda_powertools import Logger 3 | from awsfindingsmanagerlib import S3, FindingsManager 4 | 5 | S3_BUCKET_NAME = environ.get("S3_BUCKET_NAME") 6 | S3_OBJECT_NAME = environ.get("S3_OBJECT_NAME") 7 | 8 | 9 | def _initialize_findings_manager(logger: Logger) -> FindingsManager: 10 | s3_backend = S3(S3_BUCKET_NAME, S3_OBJECT_NAME) 11 | rules = s3_backend.get_rules() 12 | logger.info(rules) 13 | findings_manager = FindingsManager() 14 | findings_manager.register_rules(rules) 15 | return findings_manager 16 | 17 | 18 | def manage(func, args, logger: Logger): 19 | try: 20 | findings_manager = _initialize_findings_manager(logger) 21 | except Exception as e: 22 | logger.error("Findings manager failed to initialize, please investigate.") 23 | logger.error(f"Original error: {e}", exc_info=True) 24 | return {"finding_state": "skipped"} 25 | 26 | try: 27 | success, suppressed_payload = getattr(findings_manager, func.__name__)(*args) 28 | except Exception as e: 29 | logger.error("Findings manager failed to apply findings management rules, please investigate.") 30 | logger.error(f"Original error: {e}", exc_info=True) 31 | return {"finding_state": "skipped"} 32 | 33 | if success: 34 | logger.info("Successfully applied all findings management rules.") 35 | return suppression_logging(logger, suppressed_payload) 36 | else: 37 | logger.error( 38 | "No explicit error was raised, but not all findings management rules were applied successfully, please investigate." 39 | ) 40 | return {"finding_state": "skipped"} 41 | 42 | 43 | def manager_per_rule(rule: list, logger: Logger): 44 | try: 45 | logger.info(f"Processing rule: {rule}") 46 | findings_manager_per_rule = FindingsManager() 47 | findings_manager_per_rule.register_rules([rule]) 48 | success, suppressed_payload = findings_manager_per_rule.suppress_matching_findings() 49 | except Exception as e: 50 | logger.error("Findings manager failed to apply findings management rules, please investigate.") 51 | logger.error(f"Original error: {e}", exc_info=True) 52 | return {"finding_state": "skipped"} 53 | 54 | if success: 55 | logger.info("Successfully applied all findings management rules.") 56 | return suppression_logging(logger, suppressed_payload) 57 | else: 58 | logger.error( 59 | "No explicit error was raised, but not all findings management rules were applied successfully, please investigate." 60 | ) 61 | return {"finding_state": "skipped"} 62 | 63 | 64 | def get_rules(logger: Logger): 65 | try: 66 | findings_manager = _initialize_findings_manager(logger) 67 | except Exception as e: 68 | logger.error("Findings manager failed to initialize, please investigate.") 69 | logger.error(f"Original error: {e}", exc_info=True) 70 | return {"finding_state": "skipped"} 71 | return findings_manager.rules 72 | 73 | 74 | def suppression_logging(logger: Logger, suppressed_payload: list): 75 | if len(suppressed_payload) > 0: 76 | for chunk in suppressed_payload: 77 | note_text = chunk["Note"]["Text"] 78 | workflow_status = chunk["Workflow"]["Status"] 79 | count = len(chunk["FindingIdentifiers"]) 80 | logger.info(f"{count} finding(s) {workflow_status} with note: {note_text}.") 81 | return {"finding_state": "suppressed"} 82 | else: 83 | logger.info("No findings were suppressed.") 84 | return {"finding_state": "skipped"} 85 | -------------------------------------------------------------------------------- /files/pkg/lambda_findings-manager-jira_python3.11.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/pkg/lambda_findings-manager-jira_python3.11.zip -------------------------------------------------------------------------------- /files/pkg/lambda_findings-manager-jira_python3.12.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/pkg/lambda_findings-manager-jira_python3.12.zip -------------------------------------------------------------------------------- /files/pkg/lambda_securityhub-findings-manager_python3.11.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/pkg/lambda_securityhub-findings-manager_python3.11.zip -------------------------------------------------------------------------------- /files/pkg/lambda_securityhub-findings-manager_python3.12.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/pkg/lambda_securityhub-findings-manager_python3.12.zip -------------------------------------------------------------------------------- /files/step-function-artifacts/securityhub-findings-manager-orchestrator-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/step-function-artifacts/securityhub-findings-manager-orchestrator-graph.png -------------------------------------------------------------------------------- /files/step-function-artifacts/securityhub-findings-manager-orchestrator.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Comment": "Step Function to orchestrate Security Hub findings manager Lambda functions", 3 | "StartAt": "ChoiceSuppressor", 4 | "States": { 5 | "ChoiceSuppressor": { 6 | "Type": "Choice", 7 | "Choices": [ 8 | { 9 | "Or": [ 10 | { 11 | "Variable": "$.detail.findings[0].Workflow.Status", 12 | "StringEquals": "NEW" 13 | }, 14 | { 15 | "Variable": "$.detail.findings[0].Workflow.Status", 16 | "StringEquals": "NOTIFIED" 17 | } 18 | ], 19 | "Next": "invoke-securityhub-findings-manager-events" 20 | } 21 | ], 22 | "Default": "ChoiceJiraIntegration" 23 | }, 24 | "invoke-securityhub-findings-manager-events": { 25 | "Type": "Task", 26 | "Resource": "arn:aws:states:::lambda:invoke", 27 | "Parameters": { 28 | "Payload.$": "$", 29 | "FunctionName": "${findings_manager_events_lambda}" 30 | }, 31 | "Retry": [ 32 | { 33 | "ErrorEquals": [ 34 | "Lambda.ServiceException", 35 | "Lambda.AWSLambdaException", 36 | "Lambda.SdkClientException" 37 | ], 38 | "IntervalSeconds": 2, 39 | "MaxAttempts": 6, 40 | "BackoffRate": 2 41 | } 42 | ], 43 | "Catch": [ 44 | { 45 | "ErrorEquals": [ 46 | "States.TaskFailed" 47 | ], 48 | "Comment": "Catch all task failures", 49 | "Next": "ChoiceJiraIntegration", 50 | "ResultPath": "$.error" 51 | } 52 | ], 53 | "Next": "ChoiceJiraIntegration", 54 | "ResultPath": "$.TaskResult" 55 | }, 56 | "ChoiceJiraIntegration": { 57 | "Type": "Choice", 58 | "Choices": [ 59 | { 60 | "And": [ 61 | { 62 | "Or": [ 63 | { 64 | "Variable": "$.TaskResult.Payload.finding_state", 65 | "IsPresent": false 66 | }, 67 | { 68 | "And": [ 69 | { 70 | "Variable": "$.TaskResult.Payload.finding_state", 71 | "IsPresent": true 72 | }, 73 | { 74 | "Variable": "$.TaskResult.Payload.finding_state", 75 | "StringEquals": "skipped" 76 | } 77 | ] 78 | } 79 | ] 80 | }, 81 | { 82 | "Variable": "$.detail.findings[0].Severity.Normalized", 83 | "NumericGreaterThanEquals": ${finding_severity_normalized} 84 | }, 85 | %{~ if jira_autoclose_enabled } 86 | { 87 | "Or": [ 88 | { 89 | "And": [ 90 | { 91 | "Variable": "$.detail.findings[0].Workflow.Status", 92 | "StringEquals": "NEW" 93 | }, 94 | { 95 | "Variable": "$.detail.findings[0].RecordState", 96 | "StringEquals": "ACTIVE" 97 | }, 98 | { 99 | "Or": [ 100 | { 101 | "Variable": "$.detail.findings[0].Compliance.Status", 102 | "IsPresent": false 103 | }, 104 | { 105 | "And": [ 106 | { 107 | "Variable": "$.detail.findings[0].Compliance.Status", 108 | "IsPresent": true 109 | }, 110 | { 111 | "Or": [ 112 | { 113 | "Variable": "$.detail.findings[0].Compliance.Status", 114 | "StringEquals": "FAILED" 115 | }, 116 | { 117 | "Variable": "$.detail.findings[0].Compliance.Status", 118 | "StringEquals": "WARNING" 119 | } 120 | ] 121 | } 122 | ] 123 | } 124 | ] 125 | } 126 | ] 127 | }, 128 | { 129 | "And": [ 130 | { 131 | "Or": [ 132 | { 133 | "Variable": "$.detail.findings[0].Workflow.Status", 134 | "StringEquals": "RESOLVED" 135 | }, 136 | { 137 | "And": [ 138 | { 139 | "Variable": "$.detail.findings[0].Workflow.Status", 140 | "StringEquals": "NOTIFIED" 141 | }, 142 | { 143 | "Or": [ 144 | { 145 | "Variable": "$.detail.findings[0].RecordState", 146 | "StringEquals": "ARCHIVED" 147 | }, 148 | { 149 | "And": [ 150 | { 151 | "Variable": "$.detail.findings[0].Compliance.Status", 152 | "IsPresent": true 153 | }, 154 | { 155 | "Or": [ 156 | { 157 | "Variable": "$.detail.findings[0].Compliance.Status", 158 | "StringEquals": "PASSED" 159 | }, 160 | { 161 | "Variable": "$.detail.findings[0].Compliance.Status", 162 | "StringEquals": "NOT_AVAILABLE" 163 | } 164 | ] 165 | } 166 | ] 167 | } 168 | ] 169 | } 170 | ] 171 | } 172 | ] 173 | }, 174 | { 175 | "Variable": "$.detail.findings[0].Note.Text", 176 | "IsPresent": true 177 | }, 178 | { 179 | "Variable": "$.detail.findings[0].Note.Text", 180 | "StringMatches": "*jiraIssue*" 181 | } 182 | ] 183 | } 184 | ] 185 | } 186 | %{ else } 187 | { 188 | "Variable": "$.detail.findings[0].Workflow.Status", 189 | "StringEquals": "NEW" 190 | } 191 | %{ endif ~} 192 | ], 193 | "Next": "invoke-securityhub-jira" 194 | } 195 | ], 196 | "Default": "Success" 197 | }, 198 | "Success": { 199 | "Type": "Succeed" 200 | }, 201 | "invoke-securityhub-jira": { 202 | "Type": "Task", 203 | "Resource": "arn:aws:states:::lambda:invoke", 204 | "OutputPath": "$.Payload", 205 | "Parameters": { 206 | "Payload.$": "$", 207 | "FunctionName": "${jira_lambda}" 208 | }, 209 | "Retry": [ 210 | { 211 | "ErrorEquals": [ 212 | "Lambda.ServiceException", 213 | "Lambda.AWSLambdaException", 214 | "Lambda.SdkClientException" 215 | ], 216 | "IntervalSeconds": 2, 217 | "MaxAttempts": 6, 218 | "BackoffRate": 2 219 | } 220 | ], 221 | "End": true 222 | } 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /findings_manager.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | workflow_status_filter = var.jira_integration.autoclose_enabled ? ["NEW", "NOTIFIED", "RESOLVED"] : ["NEW", "NOTIFIED"] 3 | } 4 | 5 | data "aws_iam_policy_document" "findings_manager_lambda_iam_role" { 6 | statement { 7 | sid = "TrustEventsToStoreLogEvent" 8 | actions = [ 9 | "logs:CreateLogGroup", 10 | "logs:CreateLogStream", 11 | "logs:DescribeLogStreams", 12 | "logs:PutLogEvents" 13 | ] 14 | resources = [ 15 | "arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:*" 16 | ] 17 | } 18 | 19 | statement { 20 | sid = "S3GetObjectAccess" 21 | actions = ["s3:GetObject"] 22 | resources = ["${module.findings_manager_bucket.arn}/*"] 23 | } 24 | 25 | statement { 26 | sid = "S3ListBucketObjects" 27 | actions = ["s3:ListBucket"] 28 | resources = ["${module.findings_manager_bucket.arn}/*"] 29 | } 30 | 31 | statement { 32 | sid = "EC2DescribeRegionsAccess" 33 | actions = ["ec2:DescribeRegions"] 34 | resources = ["*"] 35 | } 36 | 37 | statement { 38 | sid = "SecurityHubAccess" 39 | actions = [ 40 | "securityhub:BatchUpdateFindings", 41 | "securityhub:GetFindings" 42 | ] 43 | resources = [ 44 | "arn:aws:securityhub:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:hub/default" 45 | ] 46 | } 47 | 48 | statement { 49 | sid = "SecurityHubAccessList" 50 | actions = [ 51 | "securityhub:ListFindingAggregators" 52 | ] 53 | resources = ["*"] 54 | } 55 | 56 | statement { 57 | sid = "LambdaKMSAccess" 58 | actions = [ 59 | "kms:Decrypt", 60 | "kms:Encrypt", 61 | "kms:GenerateDataKey*", 62 | "kms:ReEncrypt*" 63 | ] 64 | effect = "Allow" 65 | resources = [ 66 | var.kms_key_arn 67 | ] 68 | } 69 | 70 | statement { 71 | sid = "LambdaSQSAllow" 72 | actions = [ 73 | "sqs:SendMessage", 74 | "sqs:ReceiveMessage", 75 | "sqs:DeleteMessage", 76 | "sqs:GetQueueAttributes" 77 | ] 78 | effect = "Allow" 79 | resources = [aws_sqs_queue.findings_manager_rule_q.arn] 80 | } 81 | 82 | } 83 | 84 | # Push the Lambda code zip deployment package to s3 85 | resource "aws_s3_object" "findings_manager_lambdas_deployment_package" { 86 | bucket = module.findings_manager_bucket.id 87 | key = "lambda_securityhub-findings-manager_${var.lambda_runtime}.zip" 88 | kms_key_id = var.kms_key_arn 89 | source = "${path.module}/files/pkg/lambda_securityhub-findings-manager_${var.lambda_runtime}.zip" 90 | source_hash = filemd5("${path.module}/files/pkg/lambda_securityhub-findings-manager_${var.lambda_runtime}.zip") 91 | tags = var.tags 92 | } 93 | 94 | ################################################################################ 95 | # Events Lambda 96 | ################################################################################ 97 | 98 | # Lambda function to manage Security Hub findings in response to an EventBridge event 99 | module "findings_manager_events_lambda" { 100 | #checkov:skip=CKV_AWS_272:Code signing not used for now 101 | source = "schubergphilis/mcaf-lambda/aws" 102 | version = "~> 1.4.1" 103 | 104 | name = var.findings_manager_events_lambda.name 105 | create_policy = true 106 | create_s3_dummy_object = false 107 | description = "Lambda to manage Security Hub findings in response to an EventBridge event" 108 | handler = "securityhub_events.lambda_handler" 109 | kms_key_arn = var.kms_key_arn 110 | layers = ["arn:aws:lambda:${data.aws_region.current.name}:017000801446:layer:AWSLambdaPowertoolsPythonV2:79"] 111 | log_retention = 365 112 | memory_size = var.findings_manager_events_lambda.memory_size 113 | policy = data.aws_iam_policy_document.findings_manager_lambda_iam_role.json 114 | runtime = var.lambda_runtime 115 | s3_bucket = var.s3_bucket_name 116 | s3_key = aws_s3_object.findings_manager_lambdas_deployment_package.key 117 | s3_object_version = aws_s3_object.findings_manager_lambdas_deployment_package.version_id 118 | security_group_egress_rules = var.findings_manager_events_lambda.security_group_egress_rules 119 | source_code_hash = aws_s3_object.findings_manager_lambdas_deployment_package.checksum_sha256 120 | subnet_ids = var.subnet_ids 121 | tags = var.tags 122 | timeout = var.findings_manager_events_lambda.timeout 123 | 124 | environment = { 125 | S3_BUCKET_NAME = var.s3_bucket_name 126 | S3_OBJECT_NAME = var.rules_s3_object_name 127 | LOG_LEVEL = var.findings_manager_events_lambda.log_level 128 | POWERTOOLS_LOGGER_LOG_EVENT = "false" 129 | POWERTOOLS_SERVICE_NAME = "securityhub-findings-manager-events" 130 | } 131 | } 132 | 133 | # EventBridge Rule that detect Security Hub events 134 | resource "aws_cloudwatch_event_rule" "securityhub_findings_events" { 135 | name = "rule-${var.findings_manager_events_lambda.name}" 136 | description = "EventBridge rule for detecting Security Hub findings events, triggering the findings manager events lambda." 137 | tags = var.tags 138 | 139 | event_pattern = <= Function timeout 293 | } 294 | 295 | resource "aws_sqs_queue_policy" "findings_manager_rule_sqs_policy" { 296 | policy = data.aws_iam_policy_document.findings_manager_rule_sqs_policy_doc.json 297 | queue_url = aws_sqs_queue.findings_manager_rule_q.id 298 | } 299 | 300 | resource "aws_sqs_queue" "dlq_for_findings_manager_rule_q" { 301 | name = "DlqForSecurityHubFindingsManagerRuleQueue" 302 | kms_master_key_id = var.kms_key_arn 303 | } 304 | 305 | resource "aws_sqs_queue_redrive_policy" "redrive_policy" { 306 | queue_url = aws_sqs_queue.findings_manager_rule_q.id 307 | redrive_policy = jsonencode({ 308 | deadLetterTargetArn = aws_sqs_queue.dlq_for_findings_manager_rule_q.arn 309 | maxReceiveCount = 10 310 | }) 311 | } 312 | 313 | resource "aws_sqs_queue_redrive_allow_policy" "dead_letter_allow_policy" { 314 | queue_url = aws_sqs_queue.dlq_for_findings_manager_rule_q.id 315 | 316 | redrive_allow_policy = jsonencode({ 317 | redrivePermission = "byQueue", 318 | sourceQueueArns = [aws_sqs_queue.findings_manager_rule_q.arn] 319 | }) 320 | } 321 | 322 | data "aws_iam_policy_document" "findings_manager_rule_sqs_policy_doc" { 323 | statement { 324 | actions = [ 325 | "SQS:SendMessage" 326 | ] 327 | resources = [aws_sqs_queue.findings_manager_rule_q.arn] 328 | principals { 329 | identifiers = ["lambda.amazonaws.com"] 330 | type = "Service" 331 | } 332 | condition { 333 | test = "ArnEquals" 334 | values = [module.findings_manager_trigger_lambda.name] 335 | variable = "aws:SourceArn" 336 | } 337 | } 338 | } 339 | 340 | # The SQS queue with rules triggers the worker lambda 341 | resource "aws_lambda_event_source_mapping" "sqs_to_worker" { 342 | event_source_arn = aws_sqs_queue.findings_manager_rule_q.arn 343 | function_name = module.findings_manager_worker_lambda.name 344 | # assumes a rule processing time of 30 sec average (which is high) 345 | batch_size = var.findings_manager_worker_lambda.timeout / 30 346 | maximum_batching_window_in_seconds = 60 347 | scaling_config { 348 | maximum_concurrency = 4 # to prevent Security Hub API rate limits 349 | } 350 | enabled = true 351 | } 352 | -------------------------------------------------------------------------------- /jira_lambda.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "jira_lambda_iam_role" { 2 | count = var.jira_integration.enabled ? 1 : 0 3 | 4 | statement { 5 | sid = "TrustEventsToStoreLogEvent" 6 | actions = [ 7 | "logs:CreateLogGroup", 8 | "logs:CreateLogStream", 9 | "logs:DescribeLogStreams", 10 | "logs:PutLogEvents" 11 | ] 12 | resources = [ 13 | "arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:*" 14 | ] 15 | } 16 | 17 | statement { 18 | sid = "SecretManagerAccess" 19 | actions = [ 20 | "secretsmanager:GetSecretValue" 21 | ] 22 | resources = [ 23 | var.jira_integration.credentials_secret_arn 24 | ] 25 | } 26 | 27 | statement { 28 | sid = "SecurityHubAccess" 29 | actions = [ 30 | "securityhub:BatchUpdateFindings" 31 | ] 32 | resources = [ 33 | "arn:aws:securityhub:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:hub/default" 34 | ] 35 | condition { 36 | test = "ForAnyValue:StringEquals" 37 | variable = "securityhub:ASFFSyntaxPath/Workflow.Status" 38 | values = var.jira_integration.autoclose_enabled ? ["NOTIFIED", "RESOLVED"] : ["NOTIFIED"] 39 | } 40 | } 41 | 42 | statement { 43 | sid = "LambdaKMSAccess" 44 | actions = [ 45 | "kms:Decrypt", 46 | "kms:Encrypt", 47 | "kms:GenerateDataKey*", 48 | "kms:ReEncrypt*" 49 | ] 50 | effect = "Allow" 51 | resources = [ 52 | var.kms_key_arn 53 | ] 54 | } 55 | } 56 | 57 | # Upload the zip archive to S3 58 | resource "aws_s3_object" "jira_lambda_deployment_package" { 59 | count = var.jira_integration.enabled ? 1 : 0 60 | 61 | bucket = module.findings_manager_bucket.id 62 | key = "lambda_${var.jira_integration.lambda_settings.name}_${var.lambda_runtime}.zip" 63 | kms_key_id = var.kms_key_arn 64 | source = "${path.module}/files/pkg/lambda_findings-manager-jira_${var.lambda_runtime}.zip" 65 | source_hash = filemd5("${path.module}/files/pkg/lambda_findings-manager-jira_${var.lambda_runtime}.zip") 66 | tags = var.tags 67 | } 68 | 69 | # Lambda function to create Jira ticket for Security Hub findings and set the workflow state to NOTIFIED 70 | module "jira_lambda" { 71 | #checkov:skip=CKV_AWS_272:Code signing not used for now 72 | count = var.jira_integration.enabled ? 1 : 0 73 | 74 | source = "schubergphilis/mcaf-lambda/aws" 75 | version = "~> 1.4.1" 76 | 77 | name = var.jira_integration.lambda_settings.name 78 | create_policy = true 79 | create_s3_dummy_object = false 80 | description = "Lambda to create jira ticket and set the Security Hub workflow status to notified" 81 | handler = "findings_manager_jira.lambda_handler" 82 | kms_key_arn = var.kms_key_arn 83 | layers = ["arn:aws:lambda:${data.aws_region.current.name}:017000801446:layer:AWSLambdaPowertoolsPythonV2:79"] 84 | log_retention = 365 85 | memory_size = var.jira_integration.lambda_settings.memory_size 86 | policy = data.aws_iam_policy_document.jira_lambda_iam_role[0].json 87 | runtime = var.lambda_runtime 88 | s3_bucket = var.s3_bucket_name 89 | s3_key = aws_s3_object.jira_lambda_deployment_package[0].key 90 | s3_object_version = aws_s3_object.jira_lambda_deployment_package[0].version_id 91 | security_group_egress_rules = var.jira_integration.security_group_egress_rules 92 | source_code_hash = aws_s3_object.jira_lambda_deployment_package[0].checksum_sha256 93 | subnet_ids = var.subnet_ids 94 | tags = var.tags 95 | timeout = var.jira_integration.lambda_settings.timeout 96 | 97 | environment = { 98 | EXCLUDE_ACCOUNT_FILTER = jsonencode(var.jira_integration.exclude_account_ids) 99 | JIRA_AUTOCLOSE_COMMENT = var.jira_integration.autoclose_comment 100 | JIRA_AUTOCLOSE_TRANSITION = var.jira_integration.autoclose_transition_name 101 | JIRA_ISSUE_CUSTOM_FIELDS = jsonencode(var.jira_integration.issue_custom_fields) 102 | JIRA_ISSUE_TYPE = var.jira_integration.issue_type 103 | JIRA_PROJECT_KEY = var.jira_integration.project_key 104 | JIRA_SECRET_ARN = var.jira_integration.credentials_secret_arn 105 | LOG_LEVEL = var.jira_integration.lambda_settings.log_level 106 | POWERTOOLS_LOGGER_LOG_EVENT = "false" 107 | POWERTOOLS_SERVICE_NAME = "securityhub-findings-manager-jira" 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /jira_step_function.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | sfn_jira_orchestrator_name = "securityhub-findings-manager-orchestrator" 3 | } 4 | 5 | # IAM role to be assumed by Step Function 6 | module "jira_step_function_iam_role" { 7 | count = var.jira_integration.enabled ? 1 : 0 8 | 9 | source = "schubergphilis/mcaf-role/aws" 10 | version = "~> 0.3.2" 11 | 12 | name = var.jira_step_function_iam_role_name 13 | create_policy = true 14 | principal_identifiers = ["states.amazonaws.com"] 15 | principal_type = "Service" 16 | role_policy = data.aws_iam_policy_document.jira_step_function_iam_role[0].json 17 | tags = var.tags 18 | } 19 | 20 | data "aws_iam_policy_document" "jira_step_function_iam_role" { 21 | count = var.jira_integration.enabled ? 1 : 0 22 | 23 | statement { 24 | sid = "LambdaInvokeAccess" 25 | actions = [ 26 | "lambda:InvokeFunction" 27 | ] 28 | resources = [ 29 | module.findings_manager_events_lambda.arn, 30 | module.jira_lambda[0].arn 31 | ] 32 | } 33 | 34 | statement { 35 | sid = "CloudWatchLogDeliveryResourcePolicyAccess" 36 | actions = [ 37 | "logs:CreateLogDelivery", 38 | "logs:DeleteLogDelivery", 39 | "logs:DescribeLogGroups", 40 | "logs:DescribeResourcePolicies", 41 | "logs:GetLogDelivery", 42 | "logs:ListLogDeliveries", 43 | "logs:PutResourcePolicy", 44 | "logs:UpdateLogDelivery" 45 | ] 46 | resources = [ 47 | "*" 48 | ] 49 | } 50 | 51 | statement { 52 | sid = "TrustEventsToStoreLogEvent" 53 | actions = [ 54 | "logs:CreateLogStream", 55 | "logs:PutLogEvents" 56 | ] 57 | resources = [ 58 | "arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:*" 59 | ] 60 | } 61 | } 62 | 63 | resource "aws_cloudwatch_log_group" "log_group_jira_orchestrator_sfn" { 64 | #checkov:skip=CKV_AWS_338:Ensure CloudWatch log groups retains logs for at least 1 year 65 | count = var.jira_integration.enabled ? 1 : 0 66 | 67 | name = "/aws/sfn/${local.sfn_jira_orchestrator_name}" 68 | retention_in_days = var.jira_integration.step_function_settings.retention 69 | kms_key_id = var.kms_key_arn 70 | } 71 | 72 | # Step Function to orchestrate findings manager lambda functions 73 | resource "aws_sfn_state_machine" "jira_orchestrator" { 74 | #checkov:skip=CKV_AWS_284:x-ray is not enabled due to the simplicity of this state machine and the costs involved with enabling this feature. 75 | #checkov:skip=CKV_AWS_285:logging configuration is only supported for SFN type 'EXPRESS'. 76 | count = var.jira_integration.enabled ? 1 : 0 77 | 78 | name = local.sfn_jira_orchestrator_name 79 | role_arn = module.jira_step_function_iam_role[0].arn 80 | tags = var.tags 81 | 82 | definition = templatefile("${path.module}/files/step-function-artifacts/${local.sfn_jira_orchestrator_name}.json.tpl", { 83 | finding_severity_normalized = var.jira_integration.finding_severity_normalized_threshold 84 | findings_manager_events_lambda = module.findings_manager_events_lambda.arn 85 | jira_autoclose_enabled = var.jira_integration.autoclose_enabled 86 | jira_lambda = module.jira_lambda[0].arn 87 | }) 88 | 89 | logging_configuration { 90 | include_execution_data = true 91 | level = var.jira_integration.step_function_settings.log_level 92 | log_destination = "${aws_cloudwatch_log_group.log_group_jira_orchestrator_sfn[0].arn}:*" 93 | } 94 | } 95 | 96 | # IAM role to be assumed by EventBridge 97 | module "jira_eventbridge_iam_role" { 98 | count = var.jira_integration.enabled ? 1 : 0 99 | 100 | source = "schubergphilis/mcaf-role/aws" 101 | version = "~> 0.3.2" 102 | 103 | name = var.jira_eventbridge_iam_role_name 104 | create_policy = true 105 | principal_identifiers = ["events.amazonaws.com"] 106 | principal_type = "Service" 107 | role_policy = data.aws_iam_policy_document.jira_eventbridge_iam_role[0].json 108 | tags = var.tags 109 | } 110 | 111 | data "aws_iam_policy_document" "jira_eventbridge_iam_role" { 112 | count = var.jira_integration.enabled ? 1 : 0 113 | 114 | statement { 115 | sid = "StepFunctionExecutionAccess" 116 | actions = [ 117 | "states:StartExecution" 118 | ] 119 | resources = [ 120 | aws_sfn_state_machine.jira_orchestrator[0].arn 121 | ] 122 | } 123 | } 124 | 125 | resource "aws_cloudwatch_event_target" "jira_orchestrator" { 126 | count = var.jira_integration.enabled ? 1 : 0 127 | 128 | arn = aws_sfn_state_machine.jira_orchestrator[0].arn 129 | role_arn = module.jira_eventbridge_iam_role[0].arn 130 | rule = aws_cloudwatch_event_rule.securityhub_findings_events.name 131 | } 132 | -------------------------------------------------------------------------------- /modules/servicenow/README.md: -------------------------------------------------------------------------------- 1 | # Usage 2 | 3 | ## Requirements 4 | 5 | | Name | Version | 6 | |------|---------| 7 | | [terraform](#requirement\_terraform) | >= 1.3.0 | 8 | | [aws](#requirement\_aws) | >= 4.9 | 9 | 10 | ## Providers 11 | 12 | | Name | Version | 13 | |------|---------| 14 | | [aws](#provider\_aws) | >= 4.9 | 15 | 16 | ## Modules 17 | 18 | | Name | Source | Version | 19 | |------|--------|---------| 20 | | [sync-user](#module\_sync-user) | github.com/schubergphilis/terraform-aws-mcaf-user | v0.4.0 | 21 | 22 | ## Resources 23 | 24 | | Name | Type | 25 | |------|------| 26 | | [aws_cloudwatch_event_rule.securityhub](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource | 27 | | [aws_cloudwatch_event_target.log_group_target](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | 28 | | [aws_cloudwatch_event_target.securityhub](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource | 29 | | [aws_cloudwatch_log_group.servicenow](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_group) | resource | 30 | | [aws_cloudwatch_log_resource_policy.servicenow](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_log_resource_policy) | resource | 31 | | [aws_iam_policy.sqs_sechub](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | 32 | | [aws_sqs_queue.servicenow_queue](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue) | resource | 33 | | [aws_sqs_queue_policy.servicenow](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/sqs_queue_policy) | resource | 34 | | [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source | 35 | | [aws_iam_policy_document.servicenow](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 36 | | [aws_iam_policy_document.servicenow_sqs_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 37 | | [aws_iam_policy_document.sqs_sechub](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | 38 | | [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source | 39 | 40 | ## Inputs 41 | 42 | | Name | Description | Type | Default | Required | 43 | |------|-------------|------|---------|:--------:| 44 | | [kms\_key\_arn](#input\_kms\_key\_arn) | The ARN of the KMS key used to encrypt the resources | `string` | n/a | yes | 45 | | [tags](#input\_tags) | A mapping of tags to assign to the resources | `map(string)` | n/a | yes | 46 | | [cloudwatch\_retention\_days](#input\_cloudwatch\_retention\_days) | Time to retain the CloudWatch Logs for the ServiceNow integration | `number` | `365` | no | 47 | | [create\_access\_keys](#input\_create\_access\_keys) | Whether to create an access\_key and secret\_access key for the ServiceNow user | `bool` | `false` | no | 48 | | [severity\_label\_filter](#input\_severity\_label\_filter) | Only forward findings to ServiceNow with severity labels from this list (by default all severity labels are forwarded) | `list(string)` | `[]` | no | 49 | 50 | ## Outputs 51 | 52 | No outputs. 53 | 54 | -------------------------------------------------------------------------------- /modules/servicenow/cloudwatch.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_log_group" "servicenow" { 2 | name = "/aws/events/servicenow-integration" 3 | retention_in_days = var.cloudwatch_retention_days 4 | kms_key_id = var.kms_key_arn 5 | } 6 | 7 | data "aws_iam_policy_document" "servicenow" { 8 | statement { 9 | actions = [ 10 | "logs:CreateLogStream", 11 | "logs:PutLogEvents", 12 | ] 13 | 14 | resources = [ 15 | "${aws_cloudwatch_log_group.servicenow.arn}:*", 16 | "${aws_cloudwatch_log_group.servicenow.arn}:log-stream:*" 17 | ] 18 | 19 | principals { 20 | identifiers = ["events.amazonaws.com", "delivery.logs.amazonaws.com"] 21 | type = "Service" 22 | } 23 | 24 | condition { 25 | test = "ArnEquals" 26 | values = [aws_cloudwatch_event_rule.securityhub.arn] 27 | variable = "aws:SourceArn" 28 | } 29 | } 30 | } 31 | 32 | resource "aws_cloudwatch_log_resource_policy" "servicenow" { 33 | policy_document = data.aws_iam_policy_document.servicenow.json 34 | policy_name = "log-delivery-servicenow" 35 | } 36 | -------------------------------------------------------------------------------- /modules/servicenow/eventbridge.tf: -------------------------------------------------------------------------------- 1 | resource "aws_cloudwatch_event_rule" "securityhub" { 2 | name = "snow-RuleLifeCycleEvents" 3 | description = "Send Security Hub imported findings to the AwsServiceManagementConnectorForSecurityHubQueue SQS." 4 | event_pattern = templatefile("${path.module}/templates/findings_filter.json.tftpl", { 5 | severity_label_filter = jsonencode(var.severity_label_filter) }) 6 | } 7 | 8 | resource "aws_cloudwatch_event_target" "securityhub" { 9 | arn = aws_sqs_queue.servicenow_queue.arn 10 | rule = aws_cloudwatch_event_rule.securityhub.name 11 | target_id = "SendToSQS" 12 | } 13 | 14 | resource "aws_cloudwatch_event_target" "log_group_target" { 15 | arn = aws_cloudwatch_log_group.servicenow.arn 16 | rule = aws_cloudwatch_event_rule.securityhub.name 17 | } 18 | -------------------------------------------------------------------------------- /modules/servicenow/iam.tf: -------------------------------------------------------------------------------- 1 | module "sync-user" { 2 | #checkov:skip=CKV_AWS_273:We really need a user for this setup 3 | name = "SCSyncUser" 4 | source = "github.com/schubergphilis/terraform-aws-mcaf-user?ref=v0.4.0" 5 | create_iam_access_key = var.create_access_keys 6 | create_policy = true 7 | kms_key_id = var.kms_key_arn 8 | policy = aws_iam_policy.sqs_sechub.policy 9 | tags = var.tags 10 | 11 | policy_arns = [ 12 | "arn:aws:iam::aws:policy/service-role/AWSConfigRoleForOrganizations", 13 | "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess", 14 | "arn:aws:iam::aws:policy/AWSConfigUserAccess", 15 | "arn:aws:iam::aws:policy/AWSServiceCatalogAdminReadOnlyAccess" 16 | ] 17 | } 18 | 19 | //Create custom policies 20 | resource "aws_iam_policy" "sqs_sechub" { 21 | name = "sqs_sechub" 22 | description = "sqs_sechub" 23 | policy = data.aws_iam_policy_document.sqs_sechub.json 24 | } 25 | 26 | data "aws_iam_policy_document" "sqs_sechub" { 27 | statement { 28 | sid = "SqsMessages" 29 | actions = [ 30 | "sqs:ReceiveMessage", 31 | "sqs:DeleteMessage", 32 | "sqs:DeleteMessageBatch" 33 | ] 34 | resources = [aws_sqs_queue.servicenow_queue.arn] 35 | } 36 | 37 | statement { 38 | sid = "SecurityHubAccess" 39 | actions = [ 40 | "securityhub:BatchUpdateFindings", 41 | "securityhub:GetFindings" 42 | ] 43 | resources = ["arn:aws:securityhub:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:hub/default"] 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /modules/servicenow/main.tf: -------------------------------------------------------------------------------- 1 | # Data Source to get the access to Account ID in which Terraform is authorized and the region configured on the provider 2 | data "aws_caller_identity" "current" {} 3 | data "aws_region" "current" {} 4 | -------------------------------------------------------------------------------- /modules/servicenow/sqs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_sqs_queue" "servicenow_queue" { 2 | name = "AwsServiceManagementConnectorForSecurityHubQueue" 3 | kms_master_key_id = var.kms_key_arn 4 | } 5 | 6 | resource "aws_sqs_queue_policy" "servicenow" { 7 | policy = data.aws_iam_policy_document.servicenow_sqs_policy.json 8 | queue_url = aws_sqs_queue.servicenow_queue.id 9 | } 10 | 11 | data "aws_iam_policy_document" "servicenow_sqs_policy" { 12 | statement { 13 | actions = [ 14 | "SQS:SendMessage" 15 | ] 16 | 17 | resources = [aws_sqs_queue.servicenow_queue.arn] 18 | 19 | principals { 20 | identifiers = ["events.amazonaws.com"] 21 | type = "Service" 22 | } 23 | 24 | condition { 25 | test = "ArnEquals" 26 | values = [aws_cloudwatch_event_rule.securityhub.arn] 27 | variable = "aws:SourceArn" 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /modules/servicenow/templates/findings_filter.json.tftpl: -------------------------------------------------------------------------------- 1 | { 2 | "detail-type" : ["Security Hub Findings - Imported"], 3 | "source" : ["aws.securityhub"] 4 | %{ if length(jsondecode(severity_label_filter)) > 0 ~} 5 | , 6 | "detail": { 7 | "findings": { 8 | "Severity": { 9 | "Label": ${severity_label_filter} 10 | } 11 | } 12 | } 13 | %{ endif ~} 14 | } 15 | -------------------------------------------------------------------------------- /modules/servicenow/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cloudwatch_retention_days" { 2 | type = number 3 | default = 365 4 | description = "Time to retain the CloudWatch Logs for the ServiceNow integration" 5 | } 6 | 7 | variable "create_access_keys" { 8 | type = bool 9 | default = false 10 | description = "Whether to create an access_key and secret_access key for the ServiceNow user" 11 | } 12 | 13 | variable "severity_label_filter" { 14 | type = list(string) 15 | default = [] 16 | description = "Only forward findings to ServiceNow with severity labels from this list (by default all severity labels are forwarded)" 17 | } 18 | 19 | variable "kms_key_arn" { 20 | type = string 21 | description = "The ARN of the KMS key used to encrypt the resources" 22 | } 23 | 24 | variable "tags" { 25 | type = map(string) 26 | description = "A mapping of tags to assign to the resources" 27 | } 28 | -------------------------------------------------------------------------------- /modules/servicenow/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.9" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /moved.tf: -------------------------------------------------------------------------------- 1 | moved { 2 | from = module.lambda_artifacts_bucket 3 | to = module.findings_manager_bucket 4 | } 5 | 6 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "jira_lambda_sg_id" { 2 | value = length(module.jira_lambda) > 0 ? module.jira_lambda[*].security_group_id : null 3 | description = "This will output the security group id attached to the jira_lambda Lambda. This can be used to tune ingress and egress rules." 4 | } 5 | 6 | output "findings_manager_events_lambda_sg_id" { 7 | value = module.findings_manager_events_lambda.security_group_id 8 | description = "This will output the security group id attached to the lambda_findings_manager_events Lambda. This can be used to tune ingress and egress rules." 9 | } 10 | 11 | output "findings_manager_trigger_lambda_sg_id" { 12 | value = module.findings_manager_trigger_lambda.security_group_id 13 | description = "This will output the security group id attached to the lambda_findings_manager_trigger Lambda. This can be used to tune ingress and egress rules." 14 | } 15 | 16 | output "findings_manager_worker_lambda_sg_id" { 17 | value = module.findings_manager_worker_lambda.security_group_id 18 | description = "This will output the security group id attached to the lambda_findings_manager_worker Lambda. This can be used to tune ingress and egress rules." 19 | } 20 | -------------------------------------------------------------------------------- /s3_bucket.tf: -------------------------------------------------------------------------------- 1 | # S3 bucket to store Lambda artifacts and the rules list 2 | module "findings_manager_bucket" { 3 | #checkov:skip=CKV_AWS_145:Bug in CheckOV https://github.com/bridgecrewio/checkov/issues/3847 4 | #checkov:skip=CKV_AWS_19:Bug in CheckOV https://github.com/bridgecrewio/checkov/issues/3847 5 | source = "schubergphilis/mcaf-s3/aws" 6 | version = "~> 0.14.1" 7 | 8 | name = var.s3_bucket_name 9 | kms_key_arn = var.kms_key_arn 10 | logging = null 11 | tags = var.tags 12 | versioning = true 13 | 14 | lifecycle_rule = [ 15 | { 16 | id = "default" 17 | enabled = true 18 | 19 | abort_incomplete_multipart_upload = { 20 | days_after_initiation = 7 21 | } 22 | 23 | expiration = { 24 | expired_object_delete_marker = true 25 | } 26 | 27 | noncurrent_version_expiration = { 28 | noncurrent_days = 7 29 | } 30 | } 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /servicenow.tf: -------------------------------------------------------------------------------- 1 | module "servicenow_integration" { 2 | #checkov:skip=CKV_AWS_273:We really need a user for this setup 3 | count = var.servicenow_integration.enabled ? 1 : 0 4 | 5 | source = "./modules/servicenow/" 6 | 7 | cloudwatch_retention_days = var.servicenow_integration.cloudwatch_retention_days 8 | create_access_keys = var.servicenow_integration.create_access_keys 9 | severity_label_filter = var.servicenow_integration.severity_label_filter 10 | kms_key_arn = var.kms_key_arn 11 | tags = var.tags 12 | } 13 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "findings_manager_events_lambda" { 2 | type = object({ 3 | name = optional(string, "securityhub-findings-manager-events") 4 | log_level = optional(string, "ERROR") 5 | memory_size = optional(number, 256) 6 | timeout = optional(number, 300) 7 | 8 | security_group_egress_rules = optional(list(object({ 9 | cidr_ipv4 = optional(string) 10 | cidr_ipv6 = optional(string) 11 | description = string 12 | from_port = optional(number, 0) 13 | ip_protocol = optional(string, "-1") 14 | prefix_list_id = optional(string) 15 | referenced_security_group_id = optional(string) 16 | to_port = optional(number, 0) 17 | })), []) 18 | }) 19 | default = {} 20 | description = "Findings Manager Lambda settings - Manage Security Hub findings in response to EventBridge events" 21 | 22 | validation { 23 | condition = alltrue([for o in var.findings_manager_events_lambda.security_group_egress_rules : (o.cidr_ipv4 != null || o.cidr_ipv6 != null || o.prefix_list_id != null || o.referenced_security_group_id != null)]) 24 | error_message = "Although \"cidr_ipv4\", \"cidr_ipv6\", \"prefix_list_id\", and \"referenced_security_group_id\" are all marked as optional, you must provide one of them in order to configure the destination of the traffic." 25 | } 26 | } 27 | 28 | variable "findings_manager_trigger_lambda" { 29 | type = object({ 30 | name = optional(string, "securityhub-findings-manager-trigger") 31 | log_level = optional(string, "ERROR") 32 | memory_size = optional(number, 256) 33 | timeout = optional(number, 300) 34 | 35 | security_group_egress_rules = optional(list(object({ 36 | cidr_ipv4 = optional(string) 37 | cidr_ipv6 = optional(string) 38 | description = string 39 | from_port = optional(number, 0) 40 | ip_protocol = optional(string, "-1") 41 | prefix_list_id = optional(string) 42 | referenced_security_group_id = optional(string) 43 | to_port = optional(number, 0) 44 | })), []) 45 | }) 46 | default = {} 47 | description = "Findings Manager Lambda settings - Manage Security Hub findings in response to S3 file upload triggers" 48 | 49 | validation { 50 | condition = alltrue([for o in var.findings_manager_trigger_lambda.security_group_egress_rules : (o.cidr_ipv4 != null || o.cidr_ipv6 != null || o.prefix_list_id != null || o.referenced_security_group_id != null)]) 51 | error_message = "Although \"cidr_ipv4\", \"cidr_ipv6\", \"prefix_list_id\", and \"referenced_security_group_id\" are all marked as optional, you must provide one of them in order to configure the destination of the traffic." 52 | } 53 | } 54 | 55 | variable "findings_manager_worker_lambda" { 56 | type = object({ 57 | name = optional(string, "securityhub-findings-manager-worker") 58 | log_level = optional(string, "ERROR") 59 | memory_size = optional(number, 256) 60 | timeout = optional(number, 900) 61 | 62 | security_group_egress_rules = optional(list(object({ 63 | cidr_ipv4 = optional(string) 64 | cidr_ipv6 = optional(string) 65 | description = string 66 | from_port = optional(number, 0) 67 | ip_protocol = optional(string, "-1") 68 | prefix_list_id = optional(string) 69 | referenced_security_group_id = optional(string) 70 | to_port = optional(number, 0) 71 | })), []) 72 | }) 73 | default = {} 74 | description = "Findings Manager Lambda settings - Manage Security Hub findings in response to SQS trigger" 75 | 76 | validation { 77 | condition = alltrue([for o in var.findings_manager_worker_lambda.security_group_egress_rules : (o.cidr_ipv4 != null || o.cidr_ipv6 != null || o.prefix_list_id != null || o.referenced_security_group_id != null)]) 78 | error_message = "Although \"cidr_ipv4\", \"cidr_ipv6\", \"prefix_list_id\", and \"referenced_security_group_id\" are all marked as optional, you must provide one of them in order to configure the destination of the traffic." 79 | } 80 | } 81 | 82 | variable "jira_eventbridge_iam_role_name" { 83 | type = string 84 | default = "SecurityHubFindingsManagerJiraEventBridge" 85 | description = "The name of the role which will be assumed by EventBridge rules for Jira integration" 86 | } 87 | 88 | variable "jira_integration" { 89 | type = object({ 90 | enabled = optional(bool, false) 91 | autoclose_enabled = optional(bool, false) 92 | autoclose_comment = optional(string, "Security Hub finding has been resolved. Autoclosing the issue.") 93 | autoclose_transition_name = optional(string, "Close Issue") 94 | credentials_secret_arn = string 95 | exclude_account_ids = optional(list(string), []) 96 | finding_severity_normalized_threshold = optional(number, 70) 97 | issue_custom_fields = optional(map(string), {}) 98 | issue_type = optional(string, "Security Advisory") 99 | project_key = string 100 | 101 | security_group_egress_rules = optional(list(object({ 102 | cidr_ipv4 = optional(string) 103 | cidr_ipv6 = optional(string) 104 | description = string 105 | from_port = optional(number, 0) 106 | ip_protocol = optional(string, "-1") 107 | prefix_list_id = optional(string) 108 | referenced_security_group_id = optional(string) 109 | to_port = optional(number, 0) 110 | })), []) 111 | 112 | lambda_settings = optional(object({ 113 | name = optional(string, "securityhub-findings-manager-jira") 114 | log_level = optional(string, "INFO") 115 | memory_size = optional(number, 256) 116 | timeout = optional(number, 60) 117 | }), { 118 | name = "securityhub-findings-manager-jira" 119 | iam_role_name = "SecurityHubFindingsManagerJiraLambda" 120 | log_level = "INFO" 121 | memory_size = 256 122 | timeout = 60 123 | security_group_egress_rules = [] 124 | }) 125 | 126 | step_function_settings = optional(object({ 127 | log_level = optional(string, "ERROR") 128 | retention = optional(number, 90) 129 | }), { 130 | log_level = "ERROR" 131 | retention = 90 132 | }) 133 | 134 | }) 135 | default = { 136 | enabled = false 137 | credentials_secret_arn = null 138 | project_key = null 139 | } 140 | description = "Findings Manager - Jira integration settings" 141 | 142 | validation { 143 | condition = alltrue([for o in var.jira_integration.security_group_egress_rules : (o.cidr_ipv4 != null || o.cidr_ipv6 != null || o.prefix_list_id != null || o.referenced_security_group_id != null)]) 144 | error_message = "Although \"cidr_ipv4\", \"cidr_ipv6\", \"prefix_list_id\", and \"referenced_security_group_id\" are all marked as optional, you must provide one of them in order to configure the destination of the traffic." 145 | } 146 | } 147 | 148 | variable "jira_step_function_iam_role_name" { 149 | type = string 150 | default = "SecurityHubFindingsManagerJiraStepFunction" 151 | description = "The name of the role which will be assumed by AWS Step Function for Jira integration" 152 | } 153 | 154 | variable "kms_key_arn" { 155 | type = string 156 | description = "The ARN of the KMS key used to encrypt the resources" 157 | } 158 | 159 | # Modify the build-lambda.yaml GitHub action if you modify the allowed versions to ensure a proper zip is created. 160 | variable "lambda_runtime" { 161 | type = string 162 | default = "python3.12" 163 | description = "The version of Python to use for the Lambda functions" 164 | validation { 165 | condition = contains(["python3.11", "python3.12"], var.lambda_runtime) 166 | error_message = "The runtime must be one of the following: python3.11, python3.12." 167 | } 168 | } 169 | 170 | variable "rules_filepath" { 171 | type = string 172 | default = "" 173 | description = "Pathname to the file that stores the manager rules" 174 | } 175 | 176 | variable "rules_s3_object_name" { 177 | type = string 178 | default = "rules.yaml" 179 | description = "The S3 object containing the rules to be applied to Security Hub findings manager" 180 | } 181 | 182 | variable "servicenow_integration" { 183 | type = object({ 184 | enabled = optional(bool, false) 185 | create_access_keys = optional(bool, false) 186 | cloudwatch_retention_days = optional(number, 365) 187 | severity_label_filter = optional(list(string), []) 188 | }) 189 | default = { 190 | enabled = false 191 | } 192 | description = "ServiceNow integration settings" 193 | } 194 | 195 | variable "subnet_ids" { 196 | type = list(string) 197 | default = null 198 | description = "The subnet ids where the Lambda functions needs to run" 199 | } 200 | 201 | variable "s3_bucket_name" { 202 | type = string 203 | description = "The name for the S3 bucket which will be created for storing the function's deployment package" 204 | } 205 | 206 | variable "tags" { 207 | type = map(string) 208 | default = {} 209 | description = "A mapping of tags to assign to the resources" 210 | } 211 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | archive = { 6 | source = "hashicorp/archive" 7 | version = ">= 2.0" 8 | } 9 | aws = { 10 | source = "hashicorp/aws" 11 | version = ">= 4.9" 12 | } 13 | external = { 14 | source = "hashicorp/external" 15 | version = ">= 2.0" 16 | } 17 | local = { 18 | source = "hashicorp/local" 19 | version = ">= 1.0" 20 | } 21 | null = { 22 | source = "hashicorp/null" 23 | version = ">= 2.0" 24 | } 25 | } 26 | } 27 | --------------------------------------------------------------------------------