├── .github
├── ISSUE_TEMPLATE
│ ├── bug-report.md
│ └── general-request.md
├── labels.yaml
├── pull_request_template.md
├── release-drafter-config.yaml
└── workflows
│ ├── build-lambda.yaml
│ ├── label-synchronization.yaml
│ ├── pr-validation.yaml
│ ├── release-drafter.yaml
│ ├── terraform-validation.yaml
│ └── update-changelog.yaml
├── .gitignore
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── UPGRADING.md
├── data.tf
├── examples
├── basic-separate-file
│ ├── main.tf
│ └── versions.tf
├── basic
│ ├── main.tf
│ └── versions.tf
├── jira-integration
│ ├── main.tf
│ └── versions.tf
├── kms.json
├── rules.yaml
└── servicenow-integration
│ ├── main.tf
│ └── versions.tf
├── files
├── lambda-artifacts
│ ├── findings-manager-jira
│ │ ├── findings_manager_jira.py
│ │ ├── helpers.py
│ │ ├── requirements-dev.txt
│ │ └── requirements.txt
│ └── securityhub-findings-manager
│ │ ├── requirements-dev.txt
│ │ ├── requirements.txt
│ │ ├── securityhub_events.py
│ │ ├── securityhub_trigger.py
│ │ ├── securityhub_trigger_worker.py
│ │ └── strategize_findings_manager.py
├── pkg
│ ├── lambda_findings-manager-jira_python3.11.zip
│ ├── lambda_findings-manager-jira_python3.12.zip
│ ├── lambda_securityhub-findings-manager_python3.11.zip
│ └── lambda_securityhub-findings-manager_python3.12.zip
└── step-function-artifacts
│ ├── securityhub-findings-manager-orchestrator-graph.png
│ └── securityhub-findings-manager-orchestrator.json.tpl
├── findings_manager.tf
├── jira_lambda.tf
├── jira_step_function.tf
├── modules
└── servicenow
│ ├── README.md
│ ├── cloudwatch.tf
│ ├── eventbridge.tf
│ ├── iam.tf
│ ├── main.tf
│ ├── sqs.tf
│ ├── templates
│ └── findings_filter.json.tftpl
│ ├── variables.tf
│ └── versions.tf
├── moved.tf
├── outputs.tf
├── s3_bucket.tf
├── servicenow.tf
├── variables.tf
└── versions.tf
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Template to report a bug
4 | title: 'bug: '
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **💡 Problem description**
11 | Enter summary of the problem here.
12 |
13 | **☹️ Current Behavior**
14 | Describe what is happening. More detail is better. When code is pasted, use correct formatting.
15 |
16 | **😀 Expected Behavior**
17 | Enter any other details such as examples, links to requirements, etc. Any criteria that might help with fixing the problem. Attach screenshots if possible. More detail is better.
18 |
19 | **❓Steps to Reproduce**
20 | Enter detailed steps to reproduce here. More detail is better.
21 |
22 | **🚧 Workaround**
23 | If there is a way to work around the problem, place that information here.
24 |
25 | **💻 Environment**
26 | Anything that will help triage the bug will help. For example:
27 | - Terraform version
28 | - Module version
29 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/general-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: General Request
3 | about: A template for a general request on this repository
4 | title: ''
5 | labels: documentation, enhancement, chore
6 | assignees: ''
7 |
8 | ---
9 |
10 | **:thought_balloon: Description of the request or enhancement**
11 | A clear and concise description of what the request is about. Please add the fitting label to this issue:
12 | - Documentation
13 | - Enhancement
14 | - Chore (not covered by something else / question)
15 |
16 | **:bookmark: Additional context**
17 | Add any other context or screenshots about the feature request here.
18 |
19 | **:100: Acceptance criteria**
20 | Enter the conditions of satisfaction here. That is, the conditions that will satisfy the user/persona that the goal/benefit/value has been achieved.
21 |
--------------------------------------------------------------------------------
/.github/labels.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: breaking
3 | color: "b60205"
4 | description: This change is not backwards compatible
5 | - name: bug
6 | color: "d93f0b"
7 | description: Something isn't working
8 | - name: documentation
9 | color: "0075ca"
10 | description: Improvements or additions to documentation
11 | - name: enhancement
12 | color: "0e8a16"
13 | description: New feature or request
14 | - name: feature
15 | color: "0e8a16"
16 | description: New feature or request
17 | - name: fix
18 | color: "d93f0b"
19 | description: Fixes a bug
20 | - name: chore
21 | color: "6b93d3"
22 | description: Task not covered by something else (e.g. refactor, CI changes, tests)
23 | - name: no-changelog
24 | color: "cccccc"
25 | description: No entry should be added to the release notes and changelog
26 | - name: security
27 | color: "5319e7"
28 | description: Solving a security issue
29 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | **:hammer_and_wrench: Summary**
2 |
3 |
4 |
5 | **:rocket: Motivation**
6 |
7 |
8 | **:pencil: Additional Information**
9 |
10 |
--------------------------------------------------------------------------------
/.github/release-drafter-config.yaml:
--------------------------------------------------------------------------------
1 | name-template: "v$RESOLVED_VERSION"
2 | tag-template: "v$RESOLVED_VERSION"
3 | version-template: "$MAJOR.$MINOR.$PATCH"
4 | change-title-escapes: '\<*_&'
5 |
6 | categories:
7 | - title: "🚀 Features"
8 | labels:
9 | - "breaking"
10 | - "enhancement"
11 | - "feature"
12 | - title: "🐛 Bug Fixes"
13 | labels:
14 | - "bug"
15 | - "fix"
16 | - "security"
17 | - title: "📖 Documentation"
18 | labels:
19 | - "documentation"
20 | - title: "🧺 Miscellaneous"
21 | labels:
22 | - "chore"
23 |
24 | version-resolver:
25 | major:
26 | labels:
27 | - "breaking"
28 | minor:
29 | labels:
30 | - "enhancement"
31 | - "feature"
32 | patch:
33 | labels:
34 | - "bug"
35 | - "chore"
36 | - "documentation"
37 | - "fix"
38 | - "security"
39 | default: "minor"
40 |
41 | autolabeler:
42 | - label: "documentation"
43 | body:
44 | - "/documentation/"
45 | branch:
46 | - '/docs\/.+/'
47 | title:
48 | - "/documentation/i"
49 | - "/docs/i"
50 | - label: "bug"
51 | body:
52 | - "/bug/"
53 | branch:
54 | - '/bug\/.+/'
55 | - '/fix\/.+/'
56 | title:
57 | - "/bug/i"
58 | - "/fix/i"
59 | - label: "feature"
60 | branch:
61 | - '/feature\/.+/'
62 | - '/enhancement\/.+/'
63 | title:
64 | - "/feature/i"
65 | - "/feat/i"
66 | - "/enhancement/i"
67 | - label: "breaking"
68 | body:
69 | - "/breaking change/i"
70 | branch:
71 | - '/breaking\/.+/'
72 | title:
73 | - "/!:/"
74 | - "/breaking/i"
75 | - "/major/i"
76 | - label: "chore"
77 | branch:
78 | - '/chore\/.+/'
79 | title:
80 | - "/chore/i"
81 |
82 | exclude-contributors:
83 | - "github-actions[bot]"
84 |
85 | exclude-labels:
86 | - "no-changelog"
87 |
88 | template: |
89 | # What's Changed
90 |
91 | $CHANGES
92 |
93 | **Full Changelog**: https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION
94 |
--------------------------------------------------------------------------------
/.github/workflows/build-lambda.yaml:
--------------------------------------------------------------------------------
1 | name: Build and Package Lambda(s)
2 |
3 | on:
4 | workflow_dispatch:
5 | pull_request:
6 | branches:
7 | - main
8 | - master
9 | paths:
10 | - files/lambda-artifacts/**
11 |
12 | permissions:
13 | contents: write
14 | pull-requests: write
15 |
16 | env:
17 | LAMBDA_DIR: "files/lambda-artifacts"
18 | PKG_DIR: "files/pkg"
19 |
20 | jobs:
21 | pkg:
22 | runs-on: ubuntu-latest
23 |
24 | strategy:
25 | matrix:
26 | python-version: ["3.11", "3.12"]
27 | lambda-name: ["securityhub-findings-manager", "findings-manager-jira"]
28 |
29 | steps:
30 | - name: Checkout repository
31 | uses: actions/checkout@v4
32 |
33 | - name: Set up Python
34 | uses: actions/setup-python@v5
35 | with:
36 | python-version: ${{ matrix.python-version }}
37 |
38 | - name: Install dependencies
39 | run: |
40 | cd $LAMBDA_DIR/${{ matrix.lambda-name }}
41 | python -m venv venv
42 | source venv/bin/activate
43 | pip install --upgrade pip
44 | pip install -r requirements.txt
45 |
46 | - name: Create Lambda deployment package
47 | run: |
48 | mkdir -p $PKG_DIR
49 |
50 | # Navigate to site-packages
51 | cd $LAMBDA_DIR/${{ matrix.lambda-name }}/venv/lib/python${{ matrix.python-version }}/site-packages
52 |
53 | # Removing nonessential files 'https://github.com/aws-powertools/powertools-lambda-layer-cdk/blob/d24716744f7d1f37617b4998c992c4c067e19e64/layer/Python/Dockerfile'
54 | rm -rf boto* s3transfer* *dateutil* urllib3* six* jmespath*
55 | find . -name '*.so' -type f -exec strip "{}" \;
56 | find . -wholename "*/tests/*" -type f -delete
57 | find . -regex '^.*\(__pycache__\|\.py[co]\)$' -delete
58 |
59 | # Package the lambda function. Package the dependencies and then add the source code to the created zip to ensure a flat archive structure.
60 | zip -r ../../../../../../../$PKG_DIR/lambda_${{ matrix.lambda-name }}_python${{ matrix.python-version }}.zip .
61 | cd ../../../../
62 | zip -g ../../../$PKG_DIR/lambda_${{ matrix.lambda-name }}_python${{ matrix.python-version }}.zip -r * --exclude venv/\*
63 |
64 | - name: Upload artifact
65 | uses: actions/upload-artifact@v4
66 | with:
67 | name: lambda_${{ matrix.lambda-name }}_python${{ matrix.python-version }}.zip
68 | path: files/pkg/lambda_${{ matrix.lambda-name }}_python${{ matrix.python-version }}.zip
69 |
70 | # Download all artifacts and commit them to the repository. This seperate job prevents a push to the repository per zip file due to the matrix.
71 | push:
72 | runs-on: ubuntu-latest
73 | needs: pkg
74 |
75 | steps:
76 | - name: Checkout repository
77 | uses: actions/checkout@v4
78 |
79 | - name: Remove old pkg artifacts
80 | run: rm -rf files/pkg/
81 |
82 | - name: Download all artifacts
83 | uses: actions/download-artifact@v4
84 | with:
85 | path: files/pkg/
86 | merge-multiple: true
87 |
88 | - name: Commit deployment packages
89 | uses: stefanzweifel/git-auto-commit-action@v5
90 | with:
91 | commit_message: "Add all Lambda deployment packages"
92 | file_pattern: "files/pkg/*.zip"
93 |
--------------------------------------------------------------------------------
/.github/workflows/label-synchronization.yaml:
--------------------------------------------------------------------------------
1 | # DO NOT CHANGE THIS FILE DIRECTLY
2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows
3 |
4 | name: label-synchronization
5 | on:
6 | workflow_dispatch:
7 | push:
8 | branches:
9 | - main
10 | - master
11 | paths:
12 | - .github/labels.yaml
13 | - .github/workflows/label-sync.yaml
14 |
15 | permissions:
16 | # write permission is required to edit issue labels
17 | issues: write
18 |
19 | jobs:
20 | build:
21 | runs-on: ubuntu-latest
22 | steps:
23 | - name: Checkout code
24 | uses: actions/checkout@v4
25 |
26 | - name: Synchronize labels
27 | uses: crazy-max/ghaction-github-labeler@v5
28 | with:
29 | dry-run: false
30 | github-token: ${{ secrets.GITHUB_TOKEN }}
31 | skip-delete: false
32 | yaml-file: .github/labels.yaml
33 |
--------------------------------------------------------------------------------
/.github/workflows/pr-validation.yaml:
--------------------------------------------------------------------------------
1 | # DO NOT CHANGE THIS FILE DIRECTLY
2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows
3 |
4 | name: "pr-validation"
5 |
6 | on:
7 | pull_request:
8 |
9 | permissions:
10 | checks: write
11 | contents: read
12 | pull-requests: write
13 |
14 | concurrency:
15 | group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
16 | cancel-in-progress: true
17 |
18 | jobs:
19 | autolabeler:
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: release-drafter/release-drafter@v6
23 | with:
24 | config-name: release-drafter-config.yaml
25 | env:
26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
27 |
28 | title-checker:
29 | runs-on: ubuntu-latest
30 | steps:
31 | - uses: amannn/action-semantic-pull-request@v5
32 | id: lint_pr_title
33 | with:
34 | types: |
35 | breaking
36 | bug
37 | chore
38 | docs
39 | documentation
40 | enhancement
41 | feat
42 | feature
43 | fix
44 | security
45 | requireScope: false
46 | ignoreLabels: |
47 | skip-changelog
48 | env:
49 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
50 |
51 | - uses: marocchino/sticky-pull-request-comment@v2
52 | # When the previous steps fails, the workflow would stop. By adding this
53 | # condition you can continue the execution with the populated error message.
54 | if: always() && (steps.lint_pr_title.outputs.error_message != null)
55 | with:
56 | header: pr-title-lint-error
57 | message: |
58 | Hey there and thank you for opening this pull request! 👋🏼
59 |
60 | We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted.
61 |
62 | Examples for valid PR titles:
63 | feat(ui): Add button component.
64 | fix: Correct typo.
65 | _type(scope): subject._
66 |
67 | Adding a scope is optional
68 |
69 | Details:
70 | ```
71 | ${{ steps.lint_pr_title.outputs.error_message }}
72 | ```
73 |
74 | # Delete a previous comment when the issue has been resolved
75 | - if: ${{ steps.lint_pr_title.outputs.error_message == null }}
76 | uses: marocchino/sticky-pull-request-comment@v2
77 | with:
78 | header: pr-title-lint-error
79 | delete: true
80 |
81 | label-checker:
82 | needs: autolabeler
83 | runs-on: ubuntu-latest
84 | steps:
85 | - uses: docker://agilepathway/pull-request-label-checker:v1.6.55
86 | id: lint_pr_labels
87 | with:
88 | any_of: breaking,bug,chore,documentation,enhancement,feature,fix,security
89 | repo_token: ${{ secrets.GITHUB_TOKEN }}
90 |
91 | - uses: marocchino/sticky-pull-request-comment@v2
92 | # When the previous steps fails, the workflow would stop. By adding this
93 | # condition you can continue the execution with the populated error message.
94 | if: always() && (steps.lint_pr_labels.outputs.label_check == 'failure')
95 | with:
96 | header: pr-labels-lint-error
97 | message: |
98 | Hey there and thank you for opening this pull request! 👋🏼
99 |
100 | The PR needs to have at least one of the following labels:
101 |
102 | - breaking
103 | - bug
104 | - chore
105 | - documentation
106 | - enhancement
107 | - feature
108 | - fix
109 | - security
110 |
111 | # Delete a previous comment when the issue has been resolved
112 | - if: ${{ steps.lint_pr_labels.outputs.label_check == 'success' }}
113 | uses: marocchino/sticky-pull-request-comment@v2
114 | with:
115 | header: pr-labels-lint-error
116 | delete: true
117 |
--------------------------------------------------------------------------------
/.github/workflows/release-drafter.yaml:
--------------------------------------------------------------------------------
1 | # DO NOT CHANGE THIS FILE DIRECTLY
2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows
3 |
4 | name: "release-drafter"
5 |
6 | on:
7 | push:
8 | branches:
9 | - main
10 | - master
11 | paths-ignore:
12 | - .github/**
13 | - .gitignore
14 | - .pre-commit-config.yaml
15 | - CHANGELOG.md
16 | - CONTRIBUTING.md
17 | - LICENSE
18 |
19 | permissions:
20 | # write permission is required to create a github release
21 | contents: write
22 |
23 | jobs:
24 | draft:
25 | runs-on: ubuntu-latest
26 | steps:
27 | - uses: release-drafter/release-drafter@v6
28 | with:
29 | publish: false
30 | prerelease: false
31 | config-name: release-drafter-config.yaml
32 | env:
33 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
34 |
--------------------------------------------------------------------------------
/.github/workflows/terraform-validation.yaml:
--------------------------------------------------------------------------------
1 | # DO NOT CHANGE THIS FILE DIRECTLY
2 | # Source: https://github.com/schubergphilis/mcaf-github-workflows
3 |
4 | name: "terraform"
5 |
6 | on:
7 | pull_request:
8 |
9 | permissions:
10 | contents: write
11 | pull-requests: write
12 |
13 | env:
14 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
15 | TF_IN_AUTOMATION: 1
16 |
17 | jobs:
18 | fmt-lint-validate:
19 | runs-on: ubuntu-latest
20 | steps:
21 | - name: Checkout code
22 | uses: actions/checkout@v4
23 |
24 | - name: Setup Terraform
25 | uses: hashicorp/setup-terraform@v3
26 |
27 | - name: Setup Terraform Linters
28 | uses: terraform-linters/setup-tflint@v4
29 | with:
30 | github_token: ${{ github.token }}
31 |
32 | - name: Terraform Format
33 | id: fmt
34 | run: terraform fmt -check -recursive
35 |
36 | - name: Terraform Lint
37 | id: lint
38 | run: |
39 | echo "Checking ."
40 | tflint --format compact
41 |
42 | for d in examples/*/; do
43 | echo "Checking ${d} ..."
44 | tflint --chdir=$d --format compact
45 | done
46 |
47 | - name: Terraform Validate
48 | id: validate
49 | if: ${{ !vars.SKIP_TERRAFORM_VALIDATE }}
50 | run: |
51 | for d in examples/*/; do
52 | echo "Checking ${d} ..."
53 | terraform -chdir=$d init
54 | terraform -chdir=$d validate -no-color
55 | done
56 | env:
57 | AWS_DEFAULT_REGION: eu-west-1
58 |
59 | - name: Terraform Test
60 | id: test
61 | if: ${{ !vars.SKIP_TERRAFORM_TESTS }}
62 | run: |
63 | terraform init
64 | terraform test
65 |
66 | - uses: actions/github-script@v7
67 | if: github.event_name == 'pull_request' || always()
68 | with:
69 | github-token: ${{ secrets.GITHUB_TOKEN }}
70 | script: |
71 | // 1. Retrieve existing bot comments for the PR
72 | const { data: comments } = await github.rest.issues.listComments({
73 | owner: context.repo.owner,
74 | repo: context.repo.repo,
75 | issue_number: context.issue.number,
76 | })
77 | const botComment = comments.find(comment => {
78 | return comment.user.type === 'Bot' && comment.body.includes('Terraform Format and Style')
79 | })
80 |
81 | // 2. Prepare format of the comment
82 | const output = `#### Terraform Format and Style 🖌\`${{ steps.fmt.outcome }}\`
83 | #### Terraform Initialization ⚙️\`${{ steps.init.outcome }}\`
84 | #### Terraform Lint 📖\`${{ steps.lint.outcome }}\`
85 | #### Terraform Validation 🤖\`${{ steps.validate.outcome }}\`
86 | Validation Output
87 |
88 | \`\`\`\n
89 | ${{ steps.validate.outputs.stdout }}
90 | \`\`\`
91 |
92 |
object({| `{}` | no | 144 | | [findings\_manager\_trigger\_lambda](#input\_findings\_manager\_trigger\_lambda) | Findings Manager Lambda settings - Manage Security Hub findings in response to S3 file upload triggers |
name = optional(string, "securityhub-findings-manager-events")
log_level = optional(string, "ERROR")
memory_size = optional(number, 256)
timeout = optional(number, 300)
security_group_egress_rules = optional(list(object({
cidr_ipv4 = optional(string)
cidr_ipv6 = optional(string)
description = string
from_port = optional(number, 0)
ip_protocol = optional(string, "-1")
prefix_list_id = optional(string)
referenced_security_group_id = optional(string)
to_port = optional(number, 0)
})), [])
})
object({| `{}` | no | 145 | | [findings\_manager\_worker\_lambda](#input\_findings\_manager\_worker\_lambda) | Findings Manager Lambda settings - Manage Security Hub findings in response to SQS trigger |
name = optional(string, "securityhub-findings-manager-trigger")
log_level = optional(string, "ERROR")
memory_size = optional(number, 256)
timeout = optional(number, 300)
security_group_egress_rules = optional(list(object({
cidr_ipv4 = optional(string)
cidr_ipv6 = optional(string)
description = string
from_port = optional(number, 0)
ip_protocol = optional(string, "-1")
prefix_list_id = optional(string)
referenced_security_group_id = optional(string)
to_port = optional(number, 0)
})), [])
})
object({| `{}` | no | 146 | | [jira\_eventbridge\_iam\_role\_name](#input\_jira\_eventbridge\_iam\_role\_name) | The name of the role which will be assumed by EventBridge rules for Jira integration | `string` | `"SecurityHubFindingsManagerJiraEventBridge"` | no | 147 | | [jira\_integration](#input\_jira\_integration) | Findings Manager - Jira integration settings |
name = optional(string, "securityhub-findings-manager-worker")
log_level = optional(string, "ERROR")
memory_size = optional(number, 256)
timeout = optional(number, 900)
security_group_egress_rules = optional(list(object({
cidr_ipv4 = optional(string)
cidr_ipv6 = optional(string)
description = string
from_port = optional(number, 0)
ip_protocol = optional(string, "-1")
prefix_list_id = optional(string)
referenced_security_group_id = optional(string)
to_port = optional(number, 0)
})), [])
})
object({|
enabled = optional(bool, false)
autoclose_enabled = optional(bool, false)
autoclose_comment = optional(string, "Security Hub finding has been resolved. Autoclosing the issue.")
autoclose_transition_name = optional(string, "Close Issue")
credentials_secret_arn = string
exclude_account_ids = optional(list(string), [])
finding_severity_normalized_threshold = optional(number, 70)
issue_custom_fields = optional(map(string), {})
issue_type = optional(string, "Security Advisory")
project_key = string
security_group_egress_rules = optional(list(object({
cidr_ipv4 = optional(string)
cidr_ipv6 = optional(string)
description = string
from_port = optional(number, 0)
ip_protocol = optional(string, "-1")
prefix_list_id = optional(string)
referenced_security_group_id = optional(string)
to_port = optional(number, 0)
})), [])
lambda_settings = optional(object({
name = optional(string, "securityhub-findings-manager-jira")
log_level = optional(string, "INFO")
memory_size = optional(number, 256)
timeout = optional(number, 60)
}), {
name = "securityhub-findings-manager-jira"
iam_role_name = "SecurityHubFindingsManagerJiraLambda"
log_level = "INFO"
memory_size = 256
timeout = 60
security_group_egress_rules = []
})
step_function_settings = optional(object({
log_level = optional(string, "ERROR")
retention = optional(number, 90)
}), {
log_level = "ERROR"
retention = 90
})
})
{| no | 148 | | [jira\_step\_function\_iam\_role\_name](#input\_jira\_step\_function\_iam\_role\_name) | The name of the role which will be assumed by AWS Step Function for Jira integration | `string` | `"SecurityHubFindingsManagerJiraStepFunction"` | no | 149 | | [lambda\_runtime](#input\_lambda\_runtime) | The version of Python to use for the Lambda functions | `string` | `"python3.12"` | no | 150 | | [rules\_filepath](#input\_rules\_filepath) | Pathname to the file that stores the manager rules | `string` | `""` | no | 151 | | [rules\_s3\_object\_name](#input\_rules\_s3\_object\_name) | The S3 object containing the rules to be applied to Security Hub findings manager | `string` | `"rules.yaml"` | no | 152 | | [servicenow\_integration](#input\_servicenow\_integration) | ServiceNow integration settings |
"credentials_secret_arn": null,
"enabled": false,
"project_key": null
}
object({|
enabled = optional(bool, false)
create_access_keys = optional(bool, false)
cloudwatch_retention_days = optional(number, 365)
severity_label_filter = optional(list(string), [])
})
{| no | 153 | | [subnet\_ids](#input\_subnet\_ids) | The subnet ids where the Lambda functions needs to run | `list(string)` | `null` | no | 154 | | [tags](#input\_tags) | A mapping of tags to assign to the resources | `map(string)` | `{}` | no | 155 | 156 | ## Outputs 157 | 158 | | Name | Description | 159 | |------|-------------| 160 | | [findings\_manager\_events\_lambda\_sg\_id](#output\_findings\_manager\_events\_lambda\_sg\_id) | This will output the security group id attached to the lambda\_findings\_manager\_events Lambda. This can be used to tune ingress and egress rules. | 161 | | [findings\_manager\_trigger\_lambda\_sg\_id](#output\_findings\_manager\_trigger\_lambda\_sg\_id) | This will output the security group id attached to the lambda\_findings\_manager\_trigger Lambda. This can be used to tune ingress and egress rules. | 162 | | [findings\_manager\_worker\_lambda\_sg\_id](#output\_findings\_manager\_worker\_lambda\_sg\_id) | This will output the security group id attached to the lambda\_findings\_manager\_worker Lambda. This can be used to tune ingress and egress rules. | 163 | | [jira\_lambda\_sg\_id](#output\_jira\_lambda\_sg\_id) | This will output the security group id attached to the jira\_lambda Lambda. This can be used to tune ingress and egress rules. | 164 | 165 | -------------------------------------------------------------------------------- /UPGRADING.md: -------------------------------------------------------------------------------- 1 | # Upgrading Notes 2 | 3 | This document captures required refactoring on your part when upgrading to a module version that contains breaking changes. 4 | 5 | ## Upgrading to v4.0.0 6 | 7 | We are introducing a new worker Lambda function and an SQS queue, enabling the Lambda to run within the 15-minute timeout, which is especially relevant for larger environments. 8 | 9 | The following variable defaults have been modified: 10 | - `findings_manager_events_lambda.log_level` -> default: `ERROR` (previous default: `INFO`). The logging configuration has been updated, and `ERROR` is now more logical as the default level. 11 | - `findings_manager_trigger_lambda.log_level` -> default: `ERROR` (previous default: `INFO`). The logging configuration has been updated, and `ERROR` is now more logical as the default level. 12 | - `findings_manager_trigger_lambda.memory_size` -> default: `256` (previous default: `1024`). With the new setup, the trigger Lambda requires less memory. 13 | - `findings_manager_trigger_lambda.timeout` -> default: `300` (previous default: `900`). With the new setup, the trigger Lambda completes tasks in less time. 14 | 15 | The following variables have been introduced: 16 | - `findings_manager_worker_lambda` 17 | 18 | The following output has been introduced: 19 | - `findings_manager_worker_lambda_sg_id` 20 | 21 | Note: 22 | - Ensure your KMS key is available for SQS access. 23 | 24 | ## Upgrading to v3.0.0 25 | 26 | ### Variables (v3.0.0) 27 | 28 | The following variables have been removed: 29 | 30 | - `dynamodb_table` 31 | - `dynamodb_deletion_protection` 32 | 33 | The following variables have been introduced: 34 | - `rules_filepath` 35 | - `rules_s3_object_name` 36 | 37 | The following variables have been renamed: 38 | - `lambda_events_suppressor` -> `findings_manager_events_lambda` 39 | - `lambda_streams_suppressor` -> `findings_manager_trigger_lambda` 40 | - `lambda_suppressor_iam_role_name` -> `findings_manager_lambda_iam_role_name` 41 | - `eventbridge_suppressor_iam_role_name` -> `jira_eventbridge_iam_role_name` 42 | - `step_function_suppressor_iam_role_name` -> `jira_step_function_iam_role_name` 43 | 44 | A Lambda function now triggers on S3 Object Creation Trigger Events. 45 | By default it is triggered by putting a new (version of) an object called `rules.yaml` in the bucket created by this module. 46 | This filename can be customized with the `rules_s3_object_name` variable. 47 | 48 | You can add the `rules.yaml` file to the bucket in any way you like after deploying this module, for instance with an `aws_s3_object` resource. 49 | This way you can separate management of your infrastructure and security. 50 | If this separation is not necessary in your case you also let this module directly upload the file for you by setting the `rules_filepath` variable to a filepath to your `rules.yaml` file. 51 | In either case, be mindful that there can be a delay between creating S3 triggers and those being fully functional. 52 | Re-create the rules object later to have rules run on your findings history in that case. 53 | 54 | ### Outputs (v3.0.0) 55 | 56 | The following output has been removed: 57 | 58 | - `dynamodb_arn` 59 | 60 | The following output has been renamed: 61 | 62 | - `lambda_jira_security_hub_sg_id` -> `jira_lambda_sg_id` 63 | - `lambda_securityhub_events_suppressor_sg_id` -> `findings_manager_events_lambda_sg_id` 64 | - `lambda_securityhub_streams_suppressor_sg_id` -> `findings_manager_trigger_lambda_sg_id` 65 | 66 | ### Behaviour (v3.0.0) 67 | 68 | New functionality: 69 | 70 | - Managing consolidated control findings is now supported 71 | - Managing based on tags is now supported 72 | 73 | See the README, section `## How to format the rules.yaml file?` for more information on the keys you need to use to control this. 74 | 75 | The `rules.yaml` file needs to be written in a different syntax. The script below can be used to easily convert your current `suppressions.yml` file to the new format. 76 | 77 | ```python 78 | import yaml 79 | 80 | suppressions = yaml.safe_load(open('suppressions.yml'))['Suppressions'] 81 | 82 | rules = { 83 | 'Rules': [ 84 | { 85 | 'note': content['notes'], 86 | 'action': content['action'], 87 | 'match_on': { 88 | 'rule_or_control_id': rule_or_control_id, 89 | 'resource_id_regexps': content['rules'] 90 | } 91 | } 92 | for rule_or_control_id, contents in suppressions.items() 93 | for content in contents 94 | ] 95 | } 96 | 97 | print(yaml.dump(rules, indent=2)) 98 | ``` 99 | 100 | If you do not want to rename your file from `suppressions.yml` to `rules.yaml` you can override the name using the `rules_s3_object_name` variable. 101 | 102 | ## Upgrading to v2.0.0 103 | 104 | ### Variables (v2.0.0) 105 | 106 | The following variable has been replaced: 107 | 108 | - `create_allow_all_egress_rule` -> `jira_integration.security_group_egress_rules`, `lambda_streams_suppressor.security_group_egress_rules`, `lambda_events_suppressor.security_group_egress_rules` 109 | 110 | Instead of only being able to allow all egress or block all egress and having to rely on resources outside this module to create specific egress rules this is now supported natively by the module. 111 | 112 | The following variable defaults have been modified: 113 | 114 | - `servicenow_integration.cloudwatch_retention_days` -> default: `365` (previous hardcoded: `14`). In order to comply with AWS Security Hub control CloudWatch.16. 115 | 116 | ### Behaviour (v2.0.0) 117 | 118 | The need to provide a `providers = { aws = aws }` argument has been removed, but is still allowed. E.g. when deploying this module in the audit account typically `providers = { aws = aws.audit }` is passed. 119 | 120 | ## Upgrading to v1.0.0 121 | 122 | ### Behaviour (v1.0.0) 123 | 124 | - Timeouts of the suppressor lambdas have been increased to 120 seconds. The current timeout of 60 seconds is not always enough to process 100 records of findings. 125 | - The `create_servicenow_access_keys` variable, now called `servicenow_integration.create_access_keys` was not used in the code and therefore the default behaviour was that access keys would be created. This issue has been resolved. 126 | - The `create_allow_all_egress_rule` variable has been set to `false`. 127 | - The `tags` variable is now optional. 128 | 129 | ### Variables (v1.0.0) 130 | 131 | The following variables have been replaced by a new variable `jira_integration`: 132 | 133 | - `jira_exclude_account_filter` -> `jira_integration.exclude_account_ids` 134 | - `jira_finding_severity_normalized` -> `jira_integration.finding_severity_normalized_threshold` 135 | - `jira_integration` -> `jira_integration.enabled` 136 | - `jira_issue_type` -> `jira_integration.issue_type` 137 | - `jira_project_key` -> `jira_integration.project_key` 138 | - `jira_secret_arn` -> `jira_integration.credentials_secret_arn` 139 | - `lambda_jira_name` -> `jira_integration.lambda_settings.name` 140 | - `lambda_jira_iam_role_name` -> `jira_integration.lambda_settings.iam_role_name` 141 | - Additionally you are now able to specify the `log_level`, `memory_size,` and `timeout` of the lambda. 142 | 143 | The following variables have been replaced by a new variable `servicenow_integration`: 144 | 145 | - `servicenow_integration` -> `servicenow_integration.enabled` 146 | - `create_servicenow_access_keys` -> `servicenow_integration.create_access_keys` 147 | 148 | The following variables have been replaced by a new variable `lambda_events_suppressor`: 149 | 150 | - `lambda_events_suppressor_name` -> `lambda_events_suppressor.name` 151 | - Additionally you are now able to specify the `log_level`, `memory_size,` and `timeout` of the lambda. 152 | 153 | The following variables have been replaced by a new variable `lambda_streams_suppressor`: 154 | 155 | - `lambda_streams_suppressor_name` -> `lambda_streams_suppressor.name` 156 | - Additionally you are now able to specify the `log_level`, `memory_size,` and `timeout` of the lambda. 157 | -------------------------------------------------------------------------------- /data.tf: -------------------------------------------------------------------------------- 1 | # Data Source to get the access to Account ID in which Terraform is authorized and the region configured on the provider 2 | data "aws_caller_identity" "current" {} 3 | 4 | data "aws_region" "current" {} 5 | -------------------------------------------------------------------------------- /examples/basic-separate-file/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Replace with a globally unique bucket name 3 | s3_bucket_name = "securityhub-findings-manager" 4 | } 5 | 6 | provider "aws" { 7 | region = "eu-west-1" 8 | } 9 | 10 | data "aws_caller_identity" "current" {} 11 | 12 | module "kms" { 13 | source = "schubergphilis/mcaf-kms/aws" 14 | version = "~> 0.3.0" 15 | 16 | name = "securityhub-findings-manager" 17 | 18 | policy = templatefile( 19 | "${path.module}/../kms.json", 20 | { account_id = data.aws_caller_identity.current.account_id } 21 | ) 22 | } 23 | 24 | module "aws_securityhub_findings_manager" { 25 | source = "../../" 26 | 27 | kms_key_arn = module.kms.arn 28 | s3_bucket_name = local.s3_bucket_name 29 | 30 | tags = { Terraform = true } 31 | } 32 | 33 | # It can take a long time before S3 notifications become active 34 | # You may want to deploy this resource a few minutes after those above 35 | resource "aws_s3_object" "rules" { 36 | bucket = local.s3_bucket_name 37 | key = "rules.yaml" 38 | content_type = "application/x-yaml" 39 | content = file("${path.module}/../rules.yaml") 40 | source_hash = filemd5("${path.module}/../rules.yaml") 41 | 42 | depends_on = [module.aws_securityhub_findings_manager] 43 | } 44 | -------------------------------------------------------------------------------- /examples/basic-separate-file/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.9" 8 | } 9 | local = { 10 | source = "hashicorp/local" 11 | version = ">= 1.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 2.0" 16 | } 17 | random = { 18 | source = "hashicorp/random" 19 | version = ">= 3.0" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /examples/basic/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "eu-west-1" 3 | } 4 | 5 | data "aws_caller_identity" "current" {} 6 | 7 | module "kms" { 8 | source = "schubergphilis/mcaf-kms/aws" 9 | version = "~> 0.3.0" 10 | 11 | name = "securityhub-findings-manager" 12 | 13 | policy = templatefile( 14 | "${path.module}/../kms.json", 15 | { account_id = data.aws_caller_identity.current.account_id } 16 | ) 17 | } 18 | 19 | # It can take a long time before S3 notifications become active 20 | # You may want to deploy an empty set of rules before the actual ones or do a trick with yaml comments 21 | module "aws_securityhub_findings_manager" { 22 | source = "../../" 23 | 24 | kms_key_arn = module.kms.arn 25 | s3_bucket_name = "securityhub-findings-manager-artifacts" # Replace with a globally unique bucket name 26 | rules_filepath = "${path.module}/../rules.yaml" 27 | 28 | tags = { Terraform = true } 29 | } 30 | -------------------------------------------------------------------------------- /examples/basic/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.9" 8 | } 9 | local = { 10 | source = "hashicorp/local" 11 | version = ">= 1.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 2.0" 16 | } 17 | random = { 18 | source = "hashicorp/random" 19 | version = ">= 3.0" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /examples/jira-integration/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Replace with a globally unique bucket name 3 | s3_bucket_name = "securityhub-findings-manager" 4 | } 5 | 6 | provider "aws" { 7 | region = "eu-west-1" 8 | } 9 | 10 | data "aws_caller_identity" "current" {} 11 | 12 | module "kms" { 13 | source = "schubergphilis/mcaf-kms/aws" 14 | version = "~> 0.3.0" 15 | 16 | name = "securityhub-findings-manager" 17 | 18 | policy = templatefile( 19 | "${path.module}/../kms.json", 20 | { account_id = data.aws_caller_identity.current.account_id } 21 | ) 22 | } 23 | 24 | resource "aws_secretsmanager_secret" "jira_credentials" { 25 | #checkov:skip=CKV2_AWS_57: automatic rotation of the jira credentials is recommended. 26 | description = "Security Hub Findings Manager Jira Credentials Secret" 27 | kms_key_id = module.kms.arn 28 | name = "lambda/jira_credentials_secret" 29 | } 30 | 31 | // tfsec:ignore:GEN003 32 | resource "aws_secretsmanager_secret_version" "jira_credentials" { 33 | secret_id = aws_secretsmanager_secret.jira_credentials.id 34 | secret_string = jsonencode({ 35 | "url" = "https://jira.mycompany.com" 36 | "apiuser" = "username" 37 | "apikey" = "apikey" 38 | }) 39 | } 40 | 41 | module "aws_securityhub_findings_manager" { 42 | source = "../../" 43 | 44 | kms_key_arn = module.kms.arn 45 | s3_bucket_name = local.s3_bucket_name 46 | 47 | jira_integration = { 48 | enabled = true 49 | credentials_secret_arn = aws_secretsmanager_secret.jira_credentials.arn 50 | project_key = "PROJECT" 51 | 52 | security_group_egress_rules = [{ 53 | cidr_ipv4 = "1.1.1.1/32" 54 | description = "Allow access from lambda_jira_securityhub to Jira" 55 | from_port = 443 56 | ip_protocol = "tcp" 57 | to_port = 443 58 | }] 59 | } 60 | 61 | tags = { Terraform = true } 62 | } 63 | 64 | # It can take a long time before S3 notifications become active 65 | # You may want to deploy this resource a few minutes after those above 66 | resource "aws_s3_object" "rules" { 67 | bucket = local.s3_bucket_name 68 | key = "rules.yaml" 69 | content_type = "application/x-yaml" 70 | content = file("${path.module}/../rules.yaml") 71 | source_hash = filemd5("${path.module}/../rules.yaml") 72 | 73 | depends_on = [module.aws_securityhub_findings_manager] 74 | } 75 | -------------------------------------------------------------------------------- /examples/jira-integration/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.9" 8 | } 9 | local = { 10 | source = "hashicorp/local" 11 | version = ">= 1.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 2.0" 16 | } 17 | random = { 18 | source = "hashicorp/random" 19 | version = ">= 3.0" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /examples/kms.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "Enable IAM User Permissions", 6 | "Effect": "Allow", 7 | "Principal": { 8 | "AWS": "arn:aws:iam::${account_id}:root" 9 | }, 10 | "Action": "kms:*", 11 | "Resource": "*" 12 | }, 13 | { 14 | "Sid": "Allow Encrypt and Decrypt permissions for Cloudwatch Logs", 15 | "Effect": "Allow", 16 | "Principal": { 17 | "Service": "logs.eu-west-1.amazonaws.com" 18 | }, 19 | "Action": [ 20 | "kms:ReEncrypt*", 21 | "kms:GenerateDataKey*", 22 | "kms:Encrypt", 23 | "kms:DescribeKey", 24 | "kms:Decrypt" 25 | ], 26 | "Resource": "arn:aws:kms:eu-west-1:${account_id}:key/*" 27 | } 28 | ] 29 | } -------------------------------------------------------------------------------- /examples/rules.yaml: -------------------------------------------------------------------------------- 1 | # Comments 2 | Rules: 3 | - note: 'MF-Neigh' 4 | action: 'SUPPRESSED' 5 | match_on: 6 | security_control_id: 'S3.20' 7 | - note: 'Config as code' 8 | action: 'SUPPRESSED' 9 | match_on: 10 | security_control_id: 'S3.14' 11 | tags: 12 | - key: 'ManagedBy' 13 | value: 'Terraform' 14 | - key: 'ManagedBy' 15 | value: 'CFN' 16 | - note: 'Too expensive on non-prod' 17 | action: 'SUPPRESSED' 18 | match_on: 19 | security_control_id: 'S3.9' 20 | resource_id_regexps: 21 | - '^arn:aws:s3:::.*-dev$' 22 | - '^arn:aws:s3:::.*-acc$' 23 | - note: 'Suppress EC2.172 on us-east-1 and eu-central-1' 24 | action: 'SUPPRESSED' 25 | match_on: 26 | security_control_id: 'EC2.172' 27 | regions: 28 | - 'us-east-1' 29 | - 'eu-central-1' 30 | -------------------------------------------------------------------------------- /examples/servicenow-integration/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # Replace with a globally unique bucket name 3 | s3_bucket_name = "securityhub-findings-manager" 4 | } 5 | 6 | provider "aws" { 7 | region = "eu-west-1" 8 | } 9 | 10 | data "aws_caller_identity" "current" {} 11 | 12 | module "kms" { 13 | source = "schubergphilis/mcaf-kms/aws" 14 | version = "~> 0.3.0" 15 | 16 | name = "securityhub-findings-manager" 17 | 18 | policy = templatefile( 19 | "${path.module}/../kms.json", 20 | { account_id = data.aws_caller_identity.current.account_id } 21 | ) 22 | } 23 | 24 | module "aws_securityhub_findings_manager" { 25 | source = "../../" 26 | 27 | kms_key_arn = module.kms.arn 28 | s3_bucket_name = local.s3_bucket_name 29 | 30 | servicenow_integration = { 31 | enabled = true 32 | } 33 | 34 | tags = { Terraform = true } 35 | } 36 | 37 | # It can take a long time before S3 notifications become active 38 | # You may want to deploy this resource a few minutes after those above 39 | resource "aws_s3_object" "rules" { 40 | bucket = local.s3_bucket_name 41 | key = "rules.yaml" 42 | content_type = "application/x-yaml" 43 | content = file("${path.module}/../rules.yaml") 44 | source_hash = filemd5("${path.module}/../rules.yaml") 45 | 46 | depends_on = [module.aws_securityhub_findings_manager] 47 | } 48 | -------------------------------------------------------------------------------- /examples/servicenow-integration/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.3.0" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = ">= 4.9" 8 | } 9 | local = { 10 | source = "hashicorp/local" 11 | version = ">= 1.0" 12 | } 13 | null = { 14 | source = "hashicorp/null" 15 | version = ">= 2.0" 16 | } 17 | random = { 18 | source = "hashicorp/random" 19 | version = ">= 3.0" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /files/lambda-artifacts/findings-manager-jira/findings_manager_jira.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import boto3 4 | from aws_lambda_powertools import Logger 5 | from aws_lambda_powertools.utilities.typing import LambdaContext 6 | from jira.exceptions import JIRAError 7 | import helpers 8 | 9 | logger = Logger() 10 | securityhub = boto3.client('securityhub') 11 | secretsmanager = boto3.client('secretsmanager') 12 | 13 | REQUIRED_ENV_VARS = [ 14 | 'EXCLUDE_ACCOUNT_FILTER', 'JIRA_ISSUE_CUSTOM_FIELDS', 'JIRA_ISSUE_TYPE', 'JIRA_PROJECT_KEY', 'JIRA_SECRET_ARN' 15 | ] 16 | 17 | DEFAULT_JIRA_AUTOCLOSE_COMMENT = 'Security Hub finding has been resolved. Autoclosing the issue.' 18 | DEFAULT_JIRA_AUTOCLOSE_TRANSITION = 'Done' 19 | 20 | STATUS_NEW = 'NEW' 21 | STATUS_NOTIFIED = 'NOTIFIED' 22 | STATUS_RESOLVED = 'RESOLVED' 23 | COMPLIANCE_STATUS_FAILED = 'FAILED' 24 | COMPLIANCE_STATUS_NOT_AVAILABLE = 'NOT_AVAILABLE' 25 | COMPLIANCE_STATUS_PASSED = 'PASSED' 26 | COMPLIANCE_STATUS_WARNING = 'WARNING' 27 | COMPLIANCE_STATUS_MISSING = 'MISSING' 28 | RECORD_STATE_ACTIVE = 'ACTIVE' 29 | RECORD_STATE_ARCHIVED = 'ARCHIVED' 30 | 31 | 32 | @logger.inject_lambda_context 33 | def lambda_handler(event: dict, context: LambdaContext): 34 | # Validate required environment variables 35 | try: 36 | helpers.validate_env_vars(REQUIRED_ENV_VARS) 37 | except Exception as e: 38 | logger.error(f"Environment variable validation failed: {e}") 39 | raise RuntimeError("Required environment variables are missing.") from e 40 | 41 | # Retrieve environment variables 42 | exclude_account_filter = os.environ['EXCLUDE_ACCOUNT_FILTER'] 43 | jira_autoclose_comment = os.getenv( 44 | 'JIRA_AUTOCLOSE_COMMENT', DEFAULT_JIRA_AUTOCLOSE_COMMENT) 45 | jira_autoclose_transition = os.getenv( 46 | 'JIRA_AUTOCLOSE_TRANSITION', DEFAULT_JIRA_AUTOCLOSE_TRANSITION) 47 | jira_issue_custom_fields = os.environ['JIRA_ISSUE_CUSTOM_FIELDS'] 48 | jira_issue_type = os.environ['JIRA_ISSUE_TYPE'] 49 | jira_project_key = os.environ['JIRA_PROJECT_KEY'] 50 | jira_secret_arn = os.environ['JIRA_SECRET_ARN'] 51 | 52 | # Parse custom fields 53 | try: 54 | jira_issue_custom_fields = json.loads(jira_issue_custom_fields) 55 | jira_issue_custom_fields = {k: {"value": v} 56 | for k, v in jira_issue_custom_fields.items()} 57 | except json.JSONDecodeError as e: 58 | logger.error(f"Failed to parse JSON for custom fields: {e}.") 59 | raise ValueError(f"Invalid JSON in JIRA_ISSUE_CUSTOM_FIELDS: {e}") from e 60 | 61 | # Retrieve Jira client 62 | try: 63 | jira_secret = helpers.get_secret(secretsmanager, jira_secret_arn) 64 | jira_client = helpers.get_jira_client(jira_secret) 65 | except Exception as e: 66 | logger.error(f"Failed to retrieve Jira client: {e}") 67 | raise RuntimeError("Could not initialize Jira client.") from e 68 | 69 | # Get Sechub event details 70 | event_detail = event['detail'] 71 | finding = event_detail['findings'][0] 72 | finding_account_id = finding['AwsAccountId'] 73 | workflow_status = finding['Workflow']['Status'] 74 | compliance_status = finding['Compliance']['Status'] if 'Compliance' in finding else COMPLIANCE_STATUS_MISSING 75 | record_state = finding['RecordState'] 76 | 77 | # Only process finding if account is not excluded 78 | if finding_account_id in exclude_account_filter: 79 | logger.info( 80 | f"Account {finding_account_id} is excluded from Jira ticket creation.") 81 | return 82 | 83 | # Handle new findings 84 | # Ticket is created when Workflow Status is NEW and Compliance Status is FAILED, WARNING or is missing from the finding (case with e.g. Inspector findings) 85 | # Compliance status check is necessary because some findings from AWS Config can have Workflow Status NEW but Compliance Status NOT_AVAILABLE 86 | # In such case, we don't want to create a Jira ticket, because the finding is not actionable 87 | if (workflow_status == STATUS_NEW 88 | and compliance_status in [COMPLIANCE_STATUS_FAILED, 89 | COMPLIANCE_STATUS_WARNING, 90 | COMPLIANCE_STATUS_MISSING] 91 | and record_state == RECORD_STATE_ACTIVE): 92 | # Create Jira issue and updates Security Hub status to NOTIFIED 93 | # and adds Jira issue key to note (in JSON format) 94 | try: 95 | issue = helpers.create_jira_issue( 96 | jira_client, jira_project_key, jira_issue_type, event_detail, jira_issue_custom_fields) 97 | note = json.dumps({'jiraIssue': issue.key}) 98 | helpers.update_security_hub( 99 | securityhub, finding["Id"], finding["ProductArn"], STATUS_NOTIFIED, note) 100 | except Exception as e: 101 | logger.error( 102 | f"Error processing new finding for findingID {finding['Id']}: {e}") 103 | raise RuntimeError(f"Failed to create Jira issue or update Security Hub for finding ID {finding['Id']}.") from e 104 | 105 | # Handle resolved findings 106 | # Close Jira issue if finding in SecurityHub has Workflow Status RESOLVED 107 | # or if the finding is in NOTIFIED status and compliance is PASSED (finding resoloved) or NOT_AVAILABLE (when the resource is deleted, for example) or the finding's Record State is ARCHIVED 108 | # If closed from NOTIFIED status, also resolve the finding in SecurityHub. If the finding becomes relevant again, Security Hub will reopen it and new ticket will be created. 109 | elif (workflow_status == STATUS_RESOLVED 110 | or (workflow_status == STATUS_NOTIFIED 111 | and (compliance_status in [COMPLIANCE_STATUS_PASSED, 112 | COMPLIANCE_STATUS_NOT_AVAILABLE] 113 | or record_state == RECORD_STATE_ARCHIVED))): 114 | # Close Jira issue if finding is resolved. 115 | # Note text should contain Jira issue key in JSON format 116 | try: 117 | note_text = finding['Note']['Text'] 118 | note_text_json = json.loads(note_text) 119 | jira_issue_id = note_text_json.get('jiraIssue') 120 | if jira_issue_id: 121 | try: 122 | issue = jira_client.issue(jira_issue_id) 123 | except JIRAError as e: 124 | logger.error( 125 | f"Failed to retrieve Jira issue {jira_issue_id}: {e}. Cannot autoclose.") 126 | return # Skip further processing for this finding 127 | helpers.close_jira_issue( 128 | jira_client, issue, jira_autoclose_transition, jira_autoclose_comment) 129 | if workflow_status == STATUS_NOTIFIED: 130 | # Resolve SecHub finding as it will be reopened anyway in case the compliance fails 131 | # Also change the note to prevent a second run with RESOLVED status. 132 | helpers.update_security_hub( 133 | securityhub, finding["Id"], finding["ProductArn"], STATUS_RESOLVED, f"Closed Jira issue {jira_issue_id}") 134 | except json.JSONDecodeError as e: 135 | logger.error( 136 | f"Failed to decode JSON from note text: {e}. Cannot autoclose.") 137 | raise ValueError(f"Invalid JSON in note text for finding ID {finding['Id']}.") from e 138 | except Exception as e: 139 | logger.error( 140 | f"Error processing resolved finding for findingId {finding['Id']}: {e}. Cannot autoclose.") 141 | return 142 | 143 | else: 144 | logger.info( 145 | f"Finding {finding['Id']} is not in a state to be processed. Workflow status: {workflow_status}, Compliance status: {compliance_status}, Record state: {record_state}") 146 | -------------------------------------------------------------------------------- /files/lambda-artifacts/findings-manager-jira/helpers.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | import os 4 | from typing import List, Dict 5 | 6 | from aws_lambda_powertools import Logger 7 | from botocore.client import BaseClient 8 | from botocore.exceptions import ClientError 9 | from jira import JIRA 10 | from jira.resources import Issue 11 | 12 | logger = Logger() 13 | 14 | 15 | def validate_env_vars(env_vars: List[str]) -> None: 16 | """ 17 | Validate that all specified environment variables are set. 18 | 19 | Args: 20 | env_vars (List[str]): A list of environment variable names to check. 21 | 22 | Raises: 23 | ValueError: If any of the specified environment variables are not set. 24 | """ 25 | 26 | missing_vars = [var for var in env_vars if var not in os.environ] 27 | 28 | for var in missing_vars: 29 | logger.error(f"Environment variable {var} is not set!") 30 | 31 | if missing_vars: 32 | missing_str = ', '.join(missing_vars) 33 | logger.error(f"Missing environment variables: {missing_str}") 34 | raise ValueError(f"Missing environment variables: {missing_str}") 35 | 36 | 37 | def get_jira_client(jira_secret: Dict[str, str]) -> JIRA: 38 | """ 39 | Create a Jira client instance using the specified secret. 40 | 41 | Args: 42 | jira_secret (Dict[str, str]): A dictionary containing the Jira connection details. 43 | 44 | Returns: 45 | JIRA: A Jira client instance. 46 | 47 | Raises: 48 | ValueError: If the Jira connection details are not valid. 49 | """ 50 | 51 | jira_url = jira_secret.get('url') 52 | jira_user = jira_secret.get('apiuser') 53 | jira_password = jira_secret.get('apikey') 54 | 55 | if not jira_url or not jira_user or not jira_password: 56 | raise ValueError("Jira connection details are not valid!") 57 | 58 | return JIRA(server=jira_url, basic_auth=(jira_user, jira_password)) 59 | 60 | 61 | def get_secret(client: BaseClient, secret_arn: str) -> Dict[str, str]: 62 | """ 63 | Retrieve a secret from AWS Secrets Manager. 64 | 65 | Args: 66 | client (BaseClient): A boto3 client instance for Secrets Manager. 67 | secret_arn (str): The ARN of the secret to retrieve. 68 | 69 | Returns: 70 | Dict[str, str]: The secret value as a dictionary. 71 | 72 | Raises: 73 | ValueError: If the client is not an instance of Secrets Manager. 74 | ClientError: If there is an error retrieving the secret. 75 | """ 76 | 77 | # Validate that the client is an instance of botocore.client.SecretsManager 78 | if client.meta.service_model.service_name != 'secretsmanager': 79 | raise ValueError(f"Client must be an instance of botocore.client.SecretsManager. Got { 80 | type(client)} instead.") 81 | 82 | try: 83 | response = client.get_secret_value(SecretId=secret_arn) 84 | secret = response.get('SecretString') 85 | 86 | if secret is None: 87 | secret = base64.b64decode(response['SecretBinary']).decode('utf-8') 88 | 89 | logger.info(f"Secret fetched from ARN {secret_arn}") 90 | return json.loads(secret) 91 | except Exception as e: 92 | logger.error(f"Error retrieving secret from ARN {secret_arn}: {e}") 93 | raise e 94 | 95 | 96 | def create_jira_issue(jira_client: JIRA, project_key: str, issue_type: str, event: dict, custom_fields: dict) -> Issue: 97 | """ 98 | Create a Jira issue based on a Security Hub event. 99 | 100 | Args: 101 | jira_client (JIRA): An authenticated Jira client instance. 102 | project_key (str): The key of the Jira project. 103 | issue_type (str): The type of the Jira issue. 104 | event (Dict): The Security Hub event data. 105 | custom_fields (Dict): The custom fields to include in the Jira issue. 106 | 107 | Returns: 108 | Issue: The created Jira issue. 109 | 110 | Raises: 111 | Exception: If there is an error creating the Jira issue. 112 | """ 113 | 114 | finding = event['findings'][0] 115 | finding_account_id = finding['AwsAccountId'] 116 | finding_title = finding['Title'] 117 | 118 | issue_title = f"Security Hub ({finding_title}) detected in { 119 | finding_account_id}" 120 | 121 | issue_description = f""" 122 | {finding['Description']} 123 | 124 | A Security Hub finding has been detected: 125 | {{code}}{json.dumps(event, indent=2, sort_keys=True)}{{code}} 126 | """ 127 | 128 | issue_labels = [ 129 | finding["Region"], 130 | finding_account_id, 131 | finding['Severity']['Label'].lower(), 132 | *[finding['ProductFields'][key].replace(" ", "") 133 | for key in ["RuleId", "ControlId", "aws/securityhub/ProductName"] 134 | if key in finding['ProductFields']] 135 | ] 136 | 137 | issue_dict = { 138 | **custom_fields, 139 | 'project': {'key': project_key}, 140 | 'issuetype': {'name': issue_type}, 141 | 'summary': issue_title, 142 | 'description': issue_description, 143 | 'labels': issue_labels, 144 | } 145 | 146 | try: 147 | issue = jira_client.create_issue(fields=issue_dict) 148 | logger.info(f"Created Jira issue: {issue.key}") 149 | return issue 150 | except Exception as e: 151 | logger.error(f"Failed to create Jira issue for finding {finding['Id']}: {e}") 152 | raise e 153 | 154 | 155 | def close_jira_issue(jira_client: JIRA, issue: Issue, transition_name: str, comment: str) -> None: 156 | """ 157 | Close a Jira issue. 158 | 159 | Args: 160 | jira_client (JIRA): An authenticated Jira client instance. 161 | issue (Issue): The Jira issue to close. 162 | 163 | Raises: 164 | Exception: If there is an error closing the Jira issue. 165 | """ 166 | 167 | try: 168 | transition_id = jira_client.find_transitionid_by_name(issue, transition_name) 169 | if transition_id is None: 170 | logger.warning(f"Failed to close Jira issue: Invalid transition.") 171 | return 172 | jira_client.add_comment(issue, comment) 173 | jira_client.transition_issue(issue, transition_id, comment=comment) 174 | logger.info(f"Closed Jira issue: {issue.key}") 175 | except Exception as e: 176 | logger.error(f"Failed to close Jira issue {issue.key}: {e}") 177 | raise e 178 | 179 | 180 | def update_security_hub(client: BaseClient, finding_id: str, 181 | product_arn: str, status: str, note: str = "") -> None: 182 | """ 183 | Update a Security Hub finding with the given status and note. 184 | 185 | Args: 186 | client (BaseClient): A boto3 client instance for Security Hub. 187 | finding_id (str): The ID of the finding to update. 188 | product_arn (str): The ARN of the product associated with the finding. 189 | status (str): The new status for the finding. 190 | note (str): A note to add to the finding. 191 | 192 | Raises: 193 | ValueError: If the client is not an instance of Security Hub. 194 | ClientError: If there is an error updating the finding. 195 | """ 196 | 197 | # Validate that the client is an instance of botocore.client.SecurityHub 198 | if client.meta.service_model.service_name != 'securityhub': 199 | raise ValueError(f"Client must be an instance of botocore.client.SecurityHub. Got { 200 | type(client)} instead.") 201 | 202 | try: 203 | kwargs = {} 204 | if note: 205 | kwargs['Note'] = { 206 | 'Text': note, 207 | 'UpdatedBy': 'securityhub-findings-manager-jira' 208 | } 209 | logger.info(f"Updating SecurityHub finding {finding_id} to status {status} with note '{note}'.") 210 | response = client.batch_update_findings( 211 | FindingIdentifiers=[ 212 | { 213 | 'Id': finding_id, 214 | 'ProductArn': product_arn 215 | } 216 | ], 217 | Workflow={'Status': status}, 218 | **kwargs 219 | ) 220 | 221 | if response.get('FailedFindings'): 222 | for element in response['FailedFindings']: 223 | logger.error(f"Updating SecurityHub finding failed: FindingId { 224 | element['Id']}, ErrorCode {element['ErrorCode']}, ErrorMessage { 225 | element['ErrorMessage']}") 226 | else: 227 | logger.info("SecurityHub finding updated successfully.") 228 | 229 | except Exception as e: 230 | logger.exception(f"Updating SecurityHub finding failed: {e}") 231 | raise e 232 | -------------------------------------------------------------------------------- /files/lambda-artifacts/findings-manager-jira/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | aws-lambda-powertools 2 | -------------------------------------------------------------------------------- /files/lambda-artifacts/findings-manager-jira/requirements.txt: -------------------------------------------------------------------------------- 1 | jira==3.8.0 2 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | aws-lambda-powertools 2 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/requirements.txt: -------------------------------------------------------------------------------- 1 | urllib3==1.26.19 2 | awsfindingsmanagerlib==1.3.0 3 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/securityhub_events.py: -------------------------------------------------------------------------------- 1 | from aws_lambda_powertools import Logger 2 | from awsfindingsmanagerlib import FindingsManager 3 | from strategize_findings_manager import manage 4 | 5 | LOGGER = Logger() 6 | 7 | 8 | @LOGGER.inject_lambda_context(log_event=True) 9 | def lambda_handler(event, context): 10 | return manage( 11 | FindingsManager.suppress_findings_on_matching_rules, 12 | (event["detail"]["findings"],), 13 | LOGGER 14 | ) 15 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/securityhub_trigger.py: -------------------------------------------------------------------------------- 1 | from boto3 import client 2 | from json import dumps 3 | from os import environ 4 | from aws_lambda_powertools import Logger 5 | from strategize_findings_manager import get_rules 6 | 7 | SQS_QUEUE_NAME = environ.get("SQS_QUEUE_NAME") 8 | LOGGER = Logger() 9 | 10 | 11 | @LOGGER.inject_lambda_context(log_event=True) 12 | def lambda_handler(event, context): 13 | try: 14 | sqs = client("sqs") 15 | for rule in get_rules(LOGGER): 16 | message_body = dumps(rule.data) 17 | LOGGER.info(f"Putting rule on SQS. Rule details: {message_body}") 18 | sqs.send_message( 19 | QueueUrl=SQS_QUEUE_NAME, 20 | MessageBody=message_body 21 | ) 22 | except Exception as e: 23 | LOGGER.error(f"Failed putting rule(s) on SQS.") 24 | LOGGER.error(f"Original error: {e}", exc_info=True) 25 | raise Exception 26 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/securityhub_trigger_worker.py: -------------------------------------------------------------------------------- 1 | from json import loads 2 | from aws_lambda_powertools import Logger 3 | from strategize_findings_manager import manager_per_rule 4 | 5 | LOGGER = Logger() 6 | 7 | 8 | @LOGGER.inject_lambda_context(log_event=True) 9 | def lambda_handler(event, context): 10 | for record in event["Records"]: 11 | rule = loads(record["body"]) 12 | try: 13 | manager_per_rule(rule, LOGGER) 14 | except Exception as e: 15 | LOGGER.error(f"Failed to process rule. Rule details; {rule}") 16 | LOGGER.error(f"Original error: {e}", exc_info=True) 17 | -------------------------------------------------------------------------------- /files/lambda-artifacts/securityhub-findings-manager/strategize_findings_manager.py: -------------------------------------------------------------------------------- 1 | from os import environ 2 | from aws_lambda_powertools import Logger 3 | from awsfindingsmanagerlib import S3, FindingsManager 4 | 5 | S3_BUCKET_NAME = environ.get("S3_BUCKET_NAME") 6 | S3_OBJECT_NAME = environ.get("S3_OBJECT_NAME") 7 | 8 | 9 | def _initialize_findings_manager(logger: Logger) -> FindingsManager: 10 | s3_backend = S3(S3_BUCKET_NAME, S3_OBJECT_NAME) 11 | rules = s3_backend.get_rules() 12 | logger.info(rules) 13 | findings_manager = FindingsManager() 14 | findings_manager.register_rules(rules) 15 | return findings_manager 16 | 17 | 18 | def manage(func, args, logger: Logger): 19 | try: 20 | findings_manager = _initialize_findings_manager(logger) 21 | except Exception as e: 22 | logger.error("Findings manager failed to initialize, please investigate.") 23 | logger.error(f"Original error: {e}", exc_info=True) 24 | return {"finding_state": "skipped"} 25 | 26 | try: 27 | success, suppressed_payload = getattr(findings_manager, func.__name__)(*args) 28 | except Exception as e: 29 | logger.error("Findings manager failed to apply findings management rules, please investigate.") 30 | logger.error(f"Original error: {e}", exc_info=True) 31 | return {"finding_state": "skipped"} 32 | 33 | if success: 34 | logger.info("Successfully applied all findings management rules.") 35 | return suppression_logging(logger, suppressed_payload) 36 | else: 37 | logger.error( 38 | "No explicit error was raised, but not all findings management rules were applied successfully, please investigate." 39 | ) 40 | return {"finding_state": "skipped"} 41 | 42 | 43 | def manager_per_rule(rule: list, logger: Logger): 44 | try: 45 | logger.info(f"Processing rule: {rule}") 46 | findings_manager_per_rule = FindingsManager() 47 | findings_manager_per_rule.register_rules([rule]) 48 | success, suppressed_payload = findings_manager_per_rule.suppress_matching_findings() 49 | except Exception as e: 50 | logger.error("Findings manager failed to apply findings management rules, please investigate.") 51 | logger.error(f"Original error: {e}", exc_info=True) 52 | return {"finding_state": "skipped"} 53 | 54 | if success: 55 | logger.info("Successfully applied all findings management rules.") 56 | return suppression_logging(logger, suppressed_payload) 57 | else: 58 | logger.error( 59 | "No explicit error was raised, but not all findings management rules were applied successfully, please investigate." 60 | ) 61 | return {"finding_state": "skipped"} 62 | 63 | 64 | def get_rules(logger: Logger): 65 | try: 66 | findings_manager = _initialize_findings_manager(logger) 67 | except Exception as e: 68 | logger.error("Findings manager failed to initialize, please investigate.") 69 | logger.error(f"Original error: {e}", exc_info=True) 70 | return {"finding_state": "skipped"} 71 | return findings_manager.rules 72 | 73 | 74 | def suppression_logging(logger: Logger, suppressed_payload: list): 75 | if len(suppressed_payload) > 0: 76 | for chunk in suppressed_payload: 77 | note_text = chunk["Note"]["Text"] 78 | workflow_status = chunk["Workflow"]["Status"] 79 | count = len(chunk["FindingIdentifiers"]) 80 | logger.info(f"{count} finding(s) {workflow_status} with note: {note_text}.") 81 | return {"finding_state": "suppressed"} 82 | else: 83 | logger.info("No findings were suppressed.") 84 | return {"finding_state": "skipped"} 85 | -------------------------------------------------------------------------------- /files/pkg/lambda_findings-manager-jira_python3.11.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/pkg/lambda_findings-manager-jira_python3.11.zip -------------------------------------------------------------------------------- /files/pkg/lambda_findings-manager-jira_python3.12.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/pkg/lambda_findings-manager-jira_python3.12.zip -------------------------------------------------------------------------------- /files/pkg/lambda_securityhub-findings-manager_python3.11.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/pkg/lambda_securityhub-findings-manager_python3.11.zip -------------------------------------------------------------------------------- /files/pkg/lambda_securityhub-findings-manager_python3.12.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/pkg/lambda_securityhub-findings-manager_python3.12.zip -------------------------------------------------------------------------------- /files/step-function-artifacts/securityhub-findings-manager-orchestrator-graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schubergphilis/terraform-aws-mcaf-securityhub-findings-manager/5ba8fa40d83be2735f9727bf97edd537f8430125/files/step-function-artifacts/securityhub-findings-manager-orchestrator-graph.png -------------------------------------------------------------------------------- /files/step-function-artifacts/securityhub-findings-manager-orchestrator.json.tpl: -------------------------------------------------------------------------------- 1 | { 2 | "Comment": "Step Function to orchestrate Security Hub findings manager Lambda functions", 3 | "StartAt": "ChoiceSuppressor", 4 | "States": { 5 | "ChoiceSuppressor": { 6 | "Type": "Choice", 7 | "Choices": [ 8 | { 9 | "Or": [ 10 | { 11 | "Variable": "$.detail.findings[0].Workflow.Status", 12 | "StringEquals": "NEW" 13 | }, 14 | { 15 | "Variable": "$.detail.findings[0].Workflow.Status", 16 | "StringEquals": "NOTIFIED" 17 | } 18 | ], 19 | "Next": "invoke-securityhub-findings-manager-events" 20 | } 21 | ], 22 | "Default": "ChoiceJiraIntegration" 23 | }, 24 | "invoke-securityhub-findings-manager-events": { 25 | "Type": "Task", 26 | "Resource": "arn:aws:states:::lambda:invoke", 27 | "Parameters": { 28 | "Payload.$": "$", 29 | "FunctionName": "${findings_manager_events_lambda}" 30 | }, 31 | "Retry": [ 32 | { 33 | "ErrorEquals": [ 34 | "Lambda.ServiceException", 35 | "Lambda.AWSLambdaException", 36 | "Lambda.SdkClientException" 37 | ], 38 | "IntervalSeconds": 2, 39 | "MaxAttempts": 6, 40 | "BackoffRate": 2 41 | } 42 | ], 43 | "Catch": [ 44 | { 45 | "ErrorEquals": [ 46 | "States.TaskFailed" 47 | ], 48 | "Comment": "Catch all task failures", 49 | "Next": "ChoiceJiraIntegration", 50 | "ResultPath": "$.error" 51 | } 52 | ], 53 | "Next": "ChoiceJiraIntegration", 54 | "ResultPath": "$.TaskResult" 55 | }, 56 | "ChoiceJiraIntegration": { 57 | "Type": "Choice", 58 | "Choices": [ 59 | { 60 | "And": [ 61 | { 62 | "Or": [ 63 | { 64 | "Variable": "$.TaskResult.Payload.finding_state", 65 | "IsPresent": false 66 | }, 67 | { 68 | "And": [ 69 | { 70 | "Variable": "$.TaskResult.Payload.finding_state", 71 | "IsPresent": true 72 | }, 73 | { 74 | "Variable": "$.TaskResult.Payload.finding_state", 75 | "StringEquals": "skipped" 76 | } 77 | ] 78 | } 79 | ] 80 | }, 81 | { 82 | "Variable": "$.detail.findings[0].Severity.Normalized", 83 | "NumericGreaterThanEquals": ${finding_severity_normalized} 84 | }, 85 | %{~ if jira_autoclose_enabled } 86 | { 87 | "Or": [ 88 | { 89 | "And": [ 90 | { 91 | "Variable": "$.detail.findings[0].Workflow.Status", 92 | "StringEquals": "NEW" 93 | }, 94 | { 95 | "Variable": "$.detail.findings[0].RecordState", 96 | "StringEquals": "ACTIVE" 97 | }, 98 | { 99 | "Or": [ 100 | { 101 | "Variable": "$.detail.findings[0].Compliance.Status", 102 | "IsPresent": false 103 | }, 104 | { 105 | "And": [ 106 | { 107 | "Variable": "$.detail.findings[0].Compliance.Status", 108 | "IsPresent": true 109 | }, 110 | { 111 | "Or": [ 112 | { 113 | "Variable": "$.detail.findings[0].Compliance.Status", 114 | "StringEquals": "FAILED" 115 | }, 116 | { 117 | "Variable": "$.detail.findings[0].Compliance.Status", 118 | "StringEquals": "WARNING" 119 | } 120 | ] 121 | } 122 | ] 123 | } 124 | ] 125 | } 126 | ] 127 | }, 128 | { 129 | "And": [ 130 | { 131 | "Or": [ 132 | { 133 | "Variable": "$.detail.findings[0].Workflow.Status", 134 | "StringEquals": "RESOLVED" 135 | }, 136 | { 137 | "And": [ 138 | { 139 | "Variable": "$.detail.findings[0].Workflow.Status", 140 | "StringEquals": "NOTIFIED" 141 | }, 142 | { 143 | "Or": [ 144 | { 145 | "Variable": "$.detail.findings[0].RecordState", 146 | "StringEquals": "ARCHIVED" 147 | }, 148 | { 149 | "And": [ 150 | { 151 | "Variable": "$.detail.findings[0].Compliance.Status", 152 | "IsPresent": true 153 | }, 154 | { 155 | "Or": [ 156 | { 157 | "Variable": "$.detail.findings[0].Compliance.Status", 158 | "StringEquals": "PASSED" 159 | }, 160 | { 161 | "Variable": "$.detail.findings[0].Compliance.Status", 162 | "StringEquals": "NOT_AVAILABLE" 163 | } 164 | ] 165 | } 166 | ] 167 | } 168 | ] 169 | } 170 | ] 171 | } 172 | ] 173 | }, 174 | { 175 | "Variable": "$.detail.findings[0].Note.Text", 176 | "IsPresent": true 177 | }, 178 | { 179 | "Variable": "$.detail.findings[0].Note.Text", 180 | "StringMatches": "*jiraIssue*" 181 | } 182 | ] 183 | } 184 | ] 185 | } 186 | %{ else } 187 | { 188 | "Variable": "$.detail.findings[0].Workflow.Status", 189 | "StringEquals": "NEW" 190 | } 191 | %{ endif ~} 192 | ], 193 | "Next": "invoke-securityhub-jira" 194 | } 195 | ], 196 | "Default": "Success" 197 | }, 198 | "Success": { 199 | "Type": "Succeed" 200 | }, 201 | "invoke-securityhub-jira": { 202 | "Type": "Task", 203 | "Resource": "arn:aws:states:::lambda:invoke", 204 | "OutputPath": "$.Payload", 205 | "Parameters": { 206 | "Payload.$": "$", 207 | "FunctionName": "${jira_lambda}" 208 | }, 209 | "Retry": [ 210 | { 211 | "ErrorEquals": [ 212 | "Lambda.ServiceException", 213 | "Lambda.AWSLambdaException", 214 | "Lambda.SdkClientException" 215 | ], 216 | "IntervalSeconds": 2, 217 | "MaxAttempts": 6, 218 | "BackoffRate": 2 219 | } 220 | ], 221 | "End": true 222 | } 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /findings_manager.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | workflow_status_filter = var.jira_integration.autoclose_enabled ? ["NEW", "NOTIFIED", "RESOLVED"] : ["NEW", "NOTIFIED"] 3 | } 4 | 5 | data "aws_iam_policy_document" "findings_manager_lambda_iam_role" { 6 | statement { 7 | sid = "TrustEventsToStoreLogEvent" 8 | actions = [ 9 | "logs:CreateLogGroup", 10 | "logs:CreateLogStream", 11 | "logs:DescribeLogStreams", 12 | "logs:PutLogEvents" 13 | ] 14 | resources = [ 15 | "arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:*" 16 | ] 17 | } 18 | 19 | statement { 20 | sid = "S3GetObjectAccess" 21 | actions = ["s3:GetObject"] 22 | resources = ["${module.findings_manager_bucket.arn}/*"] 23 | } 24 | 25 | statement { 26 | sid = "S3ListBucketObjects" 27 | actions = ["s3:ListBucket"] 28 | resources = ["${module.findings_manager_bucket.arn}/*"] 29 | } 30 | 31 | statement { 32 | sid = "EC2DescribeRegionsAccess" 33 | actions = ["ec2:DescribeRegions"] 34 | resources = ["*"] 35 | } 36 | 37 | statement { 38 | sid = "SecurityHubAccess" 39 | actions = [ 40 | "securityhub:BatchUpdateFindings", 41 | "securityhub:GetFindings" 42 | ] 43 | resources = [ 44 | "arn:aws:securityhub:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:hub/default" 45 | ] 46 | } 47 | 48 | statement { 49 | sid = "SecurityHubAccessList" 50 | actions = [ 51 | "securityhub:ListFindingAggregators" 52 | ] 53 | resources = ["*"] 54 | } 55 | 56 | statement { 57 | sid = "LambdaKMSAccess" 58 | actions = [ 59 | "kms:Decrypt", 60 | "kms:Encrypt", 61 | "kms:GenerateDataKey*", 62 | "kms:ReEncrypt*" 63 | ] 64 | effect = "Allow" 65 | resources = [ 66 | var.kms_key_arn 67 | ] 68 | } 69 | 70 | statement { 71 | sid = "LambdaSQSAllow" 72 | actions = [ 73 | "sqs:SendMessage", 74 | "sqs:ReceiveMessage", 75 | "sqs:DeleteMessage", 76 | "sqs:GetQueueAttributes" 77 | ] 78 | effect = "Allow" 79 | resources = [aws_sqs_queue.findings_manager_rule_q.arn] 80 | } 81 | 82 | } 83 | 84 | # Push the Lambda code zip deployment package to s3 85 | resource "aws_s3_object" "findings_manager_lambdas_deployment_package" { 86 | bucket = module.findings_manager_bucket.id 87 | key = "lambda_securityhub-findings-manager_${var.lambda_runtime}.zip" 88 | kms_key_id = var.kms_key_arn 89 | source = "${path.module}/files/pkg/lambda_securityhub-findings-manager_${var.lambda_runtime}.zip" 90 | source_hash = filemd5("${path.module}/files/pkg/lambda_securityhub-findings-manager_${var.lambda_runtime}.zip") 91 | tags = var.tags 92 | } 93 | 94 | ################################################################################ 95 | # Events Lambda 96 | ################################################################################ 97 | 98 | # Lambda function to manage Security Hub findings in response to an EventBridge event 99 | module "findings_manager_events_lambda" { 100 | #checkov:skip=CKV_AWS_272:Code signing not used for now 101 | source = "schubergphilis/mcaf-lambda/aws" 102 | version = "~> 1.4.1" 103 | 104 | name = var.findings_manager_events_lambda.name 105 | create_policy = true 106 | create_s3_dummy_object = false 107 | description = "Lambda to manage Security Hub findings in response to an EventBridge event" 108 | handler = "securityhub_events.lambda_handler" 109 | kms_key_arn = var.kms_key_arn 110 | layers = ["arn:aws:lambda:${data.aws_region.current.name}:017000801446:layer:AWSLambdaPowertoolsPythonV2:79"] 111 | log_retention = 365 112 | memory_size = var.findings_manager_events_lambda.memory_size 113 | policy = data.aws_iam_policy_document.findings_manager_lambda_iam_role.json 114 | runtime = var.lambda_runtime 115 | s3_bucket = var.s3_bucket_name 116 | s3_key = aws_s3_object.findings_manager_lambdas_deployment_package.key 117 | s3_object_version = aws_s3_object.findings_manager_lambdas_deployment_package.version_id 118 | security_group_egress_rules = var.findings_manager_events_lambda.security_group_egress_rules 119 | source_code_hash = aws_s3_object.findings_manager_lambdas_deployment_package.checksum_sha256 120 | subnet_ids = var.subnet_ids 121 | tags = var.tags 122 | timeout = var.findings_manager_events_lambda.timeout 123 | 124 | environment = { 125 | S3_BUCKET_NAME = var.s3_bucket_name 126 | S3_OBJECT_NAME = var.rules_s3_object_name 127 | LOG_LEVEL = var.findings_manager_events_lambda.log_level 128 | POWERTOOLS_LOGGER_LOG_EVENT = "false" 129 | POWERTOOLS_SERVICE_NAME = "securityhub-findings-manager-events" 130 | } 131 | } 132 | 133 | # EventBridge Rule that detect Security Hub events 134 | resource "aws_cloudwatch_event_rule" "securityhub_findings_events" { 135 | name = "rule-${var.findings_manager_events_lambda.name}" 136 | description = "EventBridge rule for detecting Security Hub findings events, triggering the findings manager events lambda." 137 | tags = var.tags 138 | 139 | event_pattern = <
"enabled": false
}