├── .github
├── CLONE.md
└── workflows
│ └── main.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── img
├── AccountTag.jpg
├── Aggregator.jpg
├── ConfigAggregator.jpg
├── Heidibaseline.jpg
├── Q.jpg
├── dashboard.gif
├── exportAccountList.jpg
├── s3Location.jpg
└── sampleDashboardHeidi.jpg
├── notification.md
└── src
├── DataCollectionModule
├── DataCollectionModule.yaml
└── HeidiRoot.yaml
├── HealthModule
├── HealthModuleCollectionSetup.yaml
├── HealthModuleDataSetSetup.yaml
├── HealthModuleEventUrlSetup.yaml
├── HealthModuleQSAnalysis.yaml
├── HealthModuleTaginfoSetup.yaml
├── MockHealthEvent.json
└── OrgHealthEventBackFill.Yaml
├── NotificationModule
└── NotificationModule.yaml
├── ReferenceOds
└── AccountsInfo
│ └── Organization_accounts_information_sample.csv
└── Setup
├── OneClickSetup.py
└── utils
├── DataCollectionSetup.py
├── HealthEventBackFill.py
├── MemberSetup.py
└── TagBackFill.py
/.github/CLONE.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | **Markdown**
4 |
5 | ```markdown
6 | [](https://github.com/MShawon/github-clone-count-badge)
7 |
8 | ```
9 |
10 | **HTML**
11 | ```html
12 |
13 | ```
14 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | name: GitHub Clone Count Update Everyday
2 |
3 | on:
4 | schedule:
5 | - cron: "0 */24 * * *"
6 | workflow_dispatch:
7 |
8 | jobs:
9 | build:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - uses: actions/checkout@v2
14 |
15 | - name: gh login
16 | run: echo "${{ secrets.SECRET_TOKEN }}" | gh auth login --with-token
17 |
18 | - name: parse latest clone count
19 | run: |
20 | curl --user "${{ github.actor }}:${{ secrets.SECRET_TOKEN }}" \
21 | -H "Accept: application/vnd.github.v3+json" \
22 | https://api.github.com/repos/${{ github.repository }}/traffic/clones \
23 | > clone.json
24 |
25 | - name: create gist and download previous count
26 | id: set_id
27 | run: |
28 | if gh secret list | grep -q "GIST_ID"
29 | then
30 | echo "GIST_ID found"
31 | echo ::set-output name=GIST::${{ secrets.GIST_ID }}
32 | curl https://gist.githubusercontent.com/${{ github.actor }}/${{ secrets.GIST_ID }}/raw/clone.json > clone_before.json
33 | if cat clone_before.json | grep '404: Not Found'; then
34 | echo "GIST_ID not valid anymore. Creating another gist..."
35 | gist_id=$(gh gist create clone.json | awk -F / '{print $NF}')
36 | echo $gist_id | gh secret set GIST_ID
37 | echo ::set-output name=GIST::$gist_id
38 | cp clone.json clone_before.json
39 | git rm --ignore-unmatch CLONE.md
40 | fi
41 | else
42 | echo "GIST_ID not found. Creating a gist..."
43 | gist_id=$(gh gist create clone.json | awk -F / '{print $NF}')
44 | echo $gist_id | gh secret set GIST_ID
45 | echo ::set-output name=GIST::$gist_id
46 | cp clone.json clone_before.json
47 | fi
48 |
49 | - name: update clone.json
50 | run: |
51 | curl https://raw.githubusercontent.com/MShawon/github-clone-count-badge/master/main.py > main.py
52 | python3 main.py
53 |
54 | - name: Update gist with latest count
55 | run: |
56 | content=$(sed -e 's/\\/\\\\/g' -e 's/\t/\\t/g' -e 's/\"/\\"/g' -e 's/\r//g' "clone.json" | sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g')
57 | echo '{"description": "${{ github.repository }} clone statistics", "files": {"clone.json": {"content": "'"$content"'"}}}' > post_clone.json
58 | curl -s -X PATCH \
59 | --user "${{ github.actor }}:${{ secrets.SECRET_TOKEN }}" \
60 | -H "Content-Type: application/json" \
61 | -d @post_clone.json https://api.github.com/gists/${{ steps.set_id.outputs.GIST }} > /dev/null 2>&1
62 |
63 | if [ ! -f .github/CLONE.md ]; then
64 | shields="https://img.shields.io/badge/dynamic/json?color=success&label=Clone&query=count&url="
65 | url="https://gist.githubusercontent.com/${{ github.actor }}/${{ steps.set_id.outputs.GIST }}/raw/clone.json"
66 | repo="https://github.com/MShawon/github-clone-count-badge"
67 | echo ''> .github/CLONE.md
68 | echo '
69 | **Markdown**
70 |
71 | ```markdown' >> .github/CLONE.md
72 | echo "[]($repo)" >> .github/CLONE.md
73 | echo '
74 | ```
75 |
76 | **HTML**
77 | ```html' >> .github/CLONE.md
78 | echo "
" >> .github/CLONE.md
79 | echo '```' >> .github/CLONE.md
80 |
81 | git add .github/CLONE.md
82 | git config --global user.name "GitHub Action"
83 | git config --global user.email "action@github.com"
84 | git commit -m "create clone count badge"
85 | fi
86 |
87 | - name: Push
88 | uses: ad-m/github-push-action@master
89 | with:
90 | github_token: ${{ secrets.GITHUB_TOKEN }}
91 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .*
2 | !/.gitignore
3 | !/.github
4 | src/Setup/utils/*.txt
5 | ./src/Setup/utils/__pycache__/
6 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 |
2 | # Code of Conduct
3 |
4 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
5 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
6 | opensource-codeofconduct@amazon.com with any additional questions or comments.
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
4 | documentation, we greatly value feedback and contributions from our community.
5 |
6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
7 | information to effectively respond to your bug report or contribution.
8 |
9 |
10 | ## Reporting Bugs/Feature Requests
11 |
12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features.
13 |
14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
16 |
17 | * A reproducible test case or series of steps
18 | * The version of our code being used
19 | * Any modifications you've made relevant to the bug
20 | * Anything unusual about your environment or deployment
21 |
22 |
23 | ## Contributing via Pull Requests
24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
25 |
26 | 1. You are working against the latest source on the *main* branch.
27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
29 |
30 | To send us a pull request, please:
31 |
32 | 1. Fork the repository.
33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
34 | 3. Ensure local tests pass.
35 | 4. Commit to your fork using clear commit messages.
36 | 5. Send us a pull request, answering any default questions in the pull request interface.
37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
38 |
39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
41 |
42 |
43 | ## Finding contributions to work on
44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
45 |
46 |
47 | ## Code of Conduct
48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
50 | opensource-codeofconduct@amazon.com with any additional questions or comments.
51 |
52 |
53 | ## Security issue notifications
54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
55 |
56 |
57 | ## Licensing
58 |
59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
60 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT No Attribution
2 |
3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so.
10 |
11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
17 |
18 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## **Health Events Intelligence Dashboards and Insights (HEIDI)**
2 | Single pane of glass for all your Health events across different accounts, regions and organizations.
3 |
4 | 
5 |
6 | ## **Table of Contents**
7 | - [What's New?](#whats-new)
8 | - [Solution Architecture](#solution-architecture)
9 | - [Modules](#modules)
10 | - [Prerequisites](#prerequisites)
11 | - [Installation](#installation)
12 | - [Data Collection Account Setup](#data-collection-account-setup)
13 | - [Member Setup](#member-setup)
14 | - [Update Metadata](#update-metadata-optional)
15 | - [Backfill HealthEvents](#backfill-healthevents-optional)
16 | - [Setup Validation](#setup-validation)
17 | - [Troubleshooting](#troubleshooting)
18 |
19 | ## **Whats New?**
20 | *NEW:* Heidi now presents an efficient mechanism for data collection, visualization, and notifications. Operating on an event-driven model, Heidi simplifies the collection of data from various AWS services across numerous accounts within multiple AWS organizations. Heidi generates QuickSight dashboard that is designed to streamline the monitoring of AWS events in a user-friendly format, enabling customers to conveniently access a comprehensive overview without the need to log in to each individual AWS account.
21 |
22 | Heidi consists of following Modules.
23 |
24 | * Health Module (Unified view of Upcoming events, Planned events(EOL) scheduled events and issues across all AWS Accounts and AWS Organizations)
25 | * Notification Module (Powered by AWSChat bot to get Notification on Slack or Teams)
26 |
27 | ## **Solution Architecture**
28 |
29 | HEIDI Data Collection Framework enables you to collect data from different accounts, regions and organizations. The following diagram illustrates a multi-account structure. The DataCollection Account refers to the account which will display the unified Amazon QuickSight dashboard, and will receive events routed from the current account as well as all other accounts within your AWS Organizations. The link accounts refer to any accounts other than the DataCollection Account, routing enriched events to the DataCollection Account. The DataCollection architecture consists of an AWS EventBridge custom bus, an AWS EventBridge rule, an AWS Lambda function and Amazon S3 bucket. The presentation layer includes an Amazon Quicksight dashboard and Amazon Athena as the query engine.
30 |
31 | 1. AWS services generate events, which are sent to the default event bus in member/linked accounts.
32 | 2. A corresponding Event Bridge rule on the default event bus directs events to a centralized bus upon pattern matching.
33 | 3. The centralized event bus then routes the events to Kinesis.
34 | 4. Kinesis Data Firehose processes and stores the events in S3 buckets.
35 | 5. Athena queries the S3 buckets.
36 | 6. QuickSight provides insights into the events.
37 |
38 | 
39 |
40 | ## **Prerequisites**
41 |
42 | 1. To backfill your existing health events, the solution use AWS Health API. You need to have a Business Support, Enterprise On-Ramp or Enterprise Support plan from AWS Support in order to use this API.
43 | 2. Sign up for QuickSight if you have never used it in the Data Collection account. To use the forecast capability in QuickSight, sign up for the Enterprise Edition.
44 | 3. Verify QuickSight service has access to Amazon Athena. To enable, go to security and permissions under *manage QuickSight*.
45 | 4. AWS Health Event Dashboard will use [SPICE](https://docs.aws.amazon.com/quicksight/latest/user/spice.html) to hold data. Go to SPICE capacity under manage QuickSight and verify you have required space.
46 |
47 | ## **Installation**
48 |
49 | In this section, we will guide you through configuring HEIDI for AWS Health Events in both the Data Collection and Member accounts. Our recommendation is to deploy HEIDI in a delegated health admin account so that it can automatically receive feeds from all other accounts within your organization without any additional setup. You can designate up to 5 linked accounts as delegated health admin accounts within your AWS organization. To register an account as a delegated health admin, please follow these [instructions](https://docs.aws.amazon.com/health/latest/ug/delegated-administrator-organizational-view.html#register-a-delegated-administrator)
50 |
51 | ### **Data Collection Setup**
52 |
53 | The setup script provided in this repo will set up all the necessary components required to receive AWS health events from other accounts and other regions. This can be your payer/organization or any other linked/member AWS account which you designate to receive AWS Health data from other HEIDI Member accounts and regions.
54 |
55 | 1. To start, log in to your AWS console and launch **AWS CloudShell** and clone aws-health-events-insight repo. You can use your local environment provided that you have assumed a role with necessary permissions to deploy the solution.
56 |
57 | git clone https://github.com/aws-samples/aws-health-events-insight.git
58 |
59 | 2. Go to `aws-health-events-insight` directory and run `OneClickSetup.py` and provide account specific inputs. The setup script will generate a CloudFormation stack to deploy all necessary AWS resources, including the QuickSight dashboard.
60 |
61 | cd aws-health-events-insight/src/Setup
62 | python3 OneClickSetup.py
63 |
64 | 3. Once CloudFormation status changes to **CREATE_COMPLETE** (about 10-15 minutes), go to QuickSight Analysis and verify the initial deployment.
65 |
66 | ### **Member Setup ( Not required for Heidi in delegated health admin account )**
67 |
68 | You can now receive a feed of AWS Health events on Amazon EventBridge from all accounts within your organization in AWS Organizations using organizational view and delegated administrator. With this feature, if you are deploying HEIDI in the health delegated administrator account, it will ingest AWS Health events from all other accounts and you dont have to run this setup.
69 |
70 | **Note:** If HEIDI is deployed in the health delegated admin account, you dont have to run member setup in any other linked accounts across the organization. This step can be skipped. However, if HEIDI is not deployed in the health delegated admin account, or if the account is outside your AWS organization, you must complete this setup to enable those accounts to send data feeds to the HEIDI data collection account.
71 |
72 | If you have additional Payer/Organization IDs, you are also required to run member setup within the delegated health admin account for each additional Payer.
73 |
74 | #### (Option 1) One Click Setup Script to add Member Region
75 | 1. Setup AWS credentials for DataCollection Account. Or, log in to your AWS console and launch **AWS CloudShell**.
76 | 2. Go to `aws-health-events-insight` directory and run `OneClickSetup.py` and select `Member Setup` provide necessary inputs.
77 |
78 | cd aws-health-events-insight/src/Setup
79 | python3 OneClickSetup.py
80 |
81 | #### (Option 2) Bulk deployment via CloudFormation StackSet
82 | 1. In CloudFormation console, create a StackSet with new resources from the template file [HealthModuleCollectionSetup.yaml](https://github.com/aws-samples/aws-health-events-insight/blob/main/src/HealthModule/HealthModuleCollectionSetup.yaml).
83 | 2. Input variables
84 | 3. Select deployment targets (Deploy to OU or deploy to organization).
85 | 4. Select regions to deploy.
86 | 5. Submit.
87 |
88 | **Note:** If you are NOT deploying HEIDI in the health delegated admin account, you MUST complete Member Setup for each Link Account and Region for which you want to receive events. To receive global events, you must create Member account/region setup for the US East (N. Virginia) region and US West (Oregon) Region as the backup region if needed.
89 |
90 | ## **Populate Resource Tags (optional)**
91 | This is an optional step. AWS Resource Explorer customers can now list all AWS resources indexed by Resource Explorer across Services, AWS Regions, and AWS accounts. In order to populate you must configure AWS Resource Explorer. Please visit [setting up and configuring Resource Explorer before proceeding](https://docs.aws.amazon.com/resource-explorer/latest/userguide/getting-started-setting-up.html). Once setup is complete proceed as follow:
92 |
93 | 1. Go to AWS account where you have created AWS Resource Explorer [aggregator index](https://docs.aws.amazon.com/resource-explorer/latest/userguide/getting-started-terms-and-concepts.html#term-index)
94 |
95 | 2. Go to `aws-health-events-insight` directory and run `TagBackfill.py` and provide necessary inputs.
96 |
97 | cd aws-health-events-insight/src/Setup/utils
98 | python3 TagBackFill.py
99 |
100 | 3. Once script is finished, refresh Quicksight Dataset.
101 |
102 | ## **Update Metadata (optional)**
103 | This is an optional step. You can map AWS AccountIDs with Account Name and Account Tags (AppID, Env, etc.)
104 |
105 | 1. From your Payer/Organization account, go to AWS Organizations and export the account list. If you don't have access or can't get the export list, you can create one from [this sample file](https://github.com/aws-samples/aws-health-events-insight/blob/main/src/ReferenceOds/AccountsInfo/Organization_accounts_information_sample.csv).
106 |
107 | 
108 |
109 | 2. For Account Tag, edit the account list CSV file and add the Tag field as shown below.
110 |
111 | 
112 |
113 | 3. **Important:** Upload the file to a specific Amazon S3 location so that QuickSight dataset can join the data and create mapping.
114 |
115 | 
116 |
117 | ## **Setup Validation**
118 | Send a mock event to test setup.
119 |
120 | 1. Go to EventBridge console and choose default event bus. (You can choose any member account or region) and click the **Send events** button.
121 | 2. **Important** Put the **Event source** and **Detail type** as `heidi.health` , otherwise the rule will discard the mock event.
122 | 3. Copy the content below `Send a mock event to test Control Account setup` from [MockEvent.json](https://github.com/aws-samples/aws-health-events-insight/blob/main/src/HealthModule/MockHealthEvent.json) and paste it in the **Event detail** field.
123 | 4. Click **Send**.
124 |
125 | You will see the event in Amazon S3. For the mock event to reflect in QuickSight sooner, you can refresh the QuickSight dataset manually.
126 |
127 | ## **Backfill HealthEvents (optional)**
128 |
129 | **Option 1: Manual backfill for individual Account**
130 |
131 | By default, Heidi does not automatically backfill older health events. However, you can manually perform this task using the [HealthEventBackFill.py](https://github.com/aws-samples/aws-health-events-insight/blob/main/src/Setup/utils/HealthEventBackFill.py) script. You need to run this Python script in each account where backfill is required.
132 |
133 | Go to `aws-health-events-insight` directory and run `HealthEventBackFill.py` and provide necessary inputs.
134 |
135 | cd aws-health-events-insight/src/Setup/utils
136 | python3 HealthEventBackFill.py
137 |
138 | Ensure to execute this script in the specific AWS account for which you intend to backfill the events.
139 |
140 | **Option 2: Bulk Backfill across AWS Organization/Organizational Unit (OU)**
141 |
142 | 1. In CloudFormation console, create a StackSet with new resources from the template file [OrgHealthEventBackfill.yaml](https://github.com/aws-samples/aws-health-events-insight/blob/main/src/HealthModule/OrgHealthEventBackFill.Yaml).
143 | 2. Input Centerlized Heidi DataCollection Account, DataCollection Region and ResourcePrefix.
144 | 3. Select deployment targets (Deploy to OU or deploy to organization).
145 | 4. Select us-east-1 region to deploy.
146 | 5. Submit.
147 |
148 | Once stacksets are deployed, It will create lambda function which will send events from individual accounts to Heidi datacollection account. You can go ahead and remove stacksets once they are deployed as this is once time activity to backfill the events.
149 |
150 | ## **FAQ**
151 |
152 | **Q: Does HEIDI support multiple payer/AWS organizations?**\
153 | **A:** Yes, HEIDI allows you to include additional organizational IDs during deployment.
154 |
155 | **Q: Is member setup required in all linked accounts?**\
156 | **A:** No, if the data collection deployment is in the delegated health admin account, member setup is NOT required in all linked accounts. See Member Setup for more details.
157 |
158 | **Q: What is the cost of the Heidi solution?**\
159 | **A:** The cost varies depending on the number of notifications. For up to 1 million notifications per month, it's approximately $30 including QuickSight licencing cost. For more details see [calculations](https://calculator.aws/#/estimate?id=a5243df5c6b91c413a8b535d292e480f34bdb030).
160 |
161 |
162 | ## **Troubleshooting**
163 |
164 | #### ***1. SYNTAX_ERROR: line 41:15: UNNEST on other than the right side of CROSS JOIN is not supported***
165 |
166 | This implies that you are using Athena V2. Please upgrade worker to Athena V3. Athena V2 is on deprecated path.
167 |
168 | #### ***2. Template format error: Unrecognized resource types: [AWS::QuickSight::RefreshSchedule]***
169 |
170 | `AWS::QuickSight::RefreshSchedule` does not exist in certain regions such as us-west-1, ca-central-1 etc. You can comment out `AWSHealthEventQSDataSetRefresh` section in [AWSHealthEventQSDataSet.yaml](https://github.com/aws-samples/aws-health-events-insight/blob/main/src/HealthModule/HealthModuleDataSetSetup.yaml) and setup refresh schedule from QuickSight console.
171 |
172 | #### ***3. Resource handler returned message: Insufficient permissions to execute the query. Insufficient Lake Formation permission(s) on heididatacollectiondb***
173 |
174 | In case Lakeformation is enabled, both the QuickSight Analysis author and the QuickSight Service Role need to provide access permissions for the heididatacollectiondb database and all associated tables.
175 |
176 | 1. Navigate to Lakeformation and go to the "Permissions" tab.
177 | 2. Under "Data Lake Permissions," select "Grant."
178 | 3. Choose "SAML users and groups."
179 | 4. **Important:** Provide the QuickSight ARN. This ARN represents the role that owns (or authors) the dataset.
180 | 5. From the dropdown menu, select the "heididatacollectiondb" database and grant the necessary permission.
181 | 6. Repeat the previous step (Step 5), but this time, select all tables and grant the required permission.
182 | Repeat same process for QuickSight Service Role.
183 |
184 | #### ***4. Possible Reasons for No Data in AWS QuickSight Analysis:***
185 |
186 | 1. Your AWS environment is relatively new and does not currently have any AWS Health Events. To verify this, please check the AWS Health Dashboard on the AWS Console and send mock event.
187 | 2. The QuickSight DataSet was created before the event could be backfilled by Kinesis Data Firehose. To resolve this, manually refresh the QuickSight DataSet.
188 |
189 | #### ***5. Not getting Notifications on Slack/Team:***
190 |
191 | 1. Go to EventBridge and see if it has failures sending events to SNS. It's possible that SNS is encrypted with KMS keys which is not accessible by EventBridge service role.
192 | 2. Your Amazon SNS topic must use an AWS KMS key that is customer managed. Visit [SNS-EB-Notification](https://repost.aws/knowledge-center/sns-not-getting-eventbridge-notification) to learn more.
193 |
194 |
195 | [](https://github.com/aws-samples/aws-health-events-insight)
196 |
197 |
--------------------------------------------------------------------------------
/img/AccountTag.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/aws-health-events-insight/e4f6f27ed057c5b51048cf686c98ba79be67bd46/img/AccountTag.jpg
--------------------------------------------------------------------------------
/img/Aggregator.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/aws-health-events-insight/e4f6f27ed057c5b51048cf686c98ba79be67bd46/img/Aggregator.jpg
--------------------------------------------------------------------------------
/img/ConfigAggregator.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/aws-health-events-insight/e4f6f27ed057c5b51048cf686c98ba79be67bd46/img/ConfigAggregator.jpg
--------------------------------------------------------------------------------
/img/Heidibaseline.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/aws-health-events-insight/e4f6f27ed057c5b51048cf686c98ba79be67bd46/img/Heidibaseline.jpg
--------------------------------------------------------------------------------
/img/Q.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/aws-health-events-insight/e4f6f27ed057c5b51048cf686c98ba79be67bd46/img/Q.jpg
--------------------------------------------------------------------------------
/img/dashboard.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/aws-health-events-insight/e4f6f27ed057c5b51048cf686c98ba79be67bd46/img/dashboard.gif
--------------------------------------------------------------------------------
/img/exportAccountList.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/aws-health-events-insight/e4f6f27ed057c5b51048cf686c98ba79be67bd46/img/exportAccountList.jpg
--------------------------------------------------------------------------------
/img/s3Location.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/aws-health-events-insight/e4f6f27ed057c5b51048cf686c98ba79be67bd46/img/s3Location.jpg
--------------------------------------------------------------------------------
/img/sampleDashboardHeidi.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/aws-health-events-insight/e4f6f27ed057c5b51048cf686c98ba79be67bd46/img/sampleDashboardHeidi.jpg
--------------------------------------------------------------------------------
/notification.md:
--------------------------------------------------------------------------------
1 | # Overview
2 |
3 | Heidi can send incoming event notification to slack/teams/chime. AWS Chatbot is an AWS service that enables DevOps and software development teams to use messaging program chat rooms to monitor and respond to operational events in their AWS Cloud. AWS Chatbot processes AWS service notifications from Amazon Simple Notification Service (Amazon SNS), and forwards them to chat rooms so teams can analyze and act on them immediately, regardless of location.
4 |
5 | 
6 |
7 | # Slack Notification Setup
8 |
9 | Heidi will setup all required resources through cloudformation template. If this is first time setup, Slack Workspace setup/OAuth authorization must be done from AWS console.
10 | 1. Open the AWS Chatbot console at https://console.aws.amazon.com/chatbot/
11 | 2. Under Configure a chat client, choose Slack, then choose Configure client.
12 | 3. From the dropdown list at the top right, choose the Slack workspace that you want to use with AWS Chatbot and Choose Allow.
13 | 4. Proceed to OneClickSetup script and provide all necessary informaiton.
14 |
15 | # Teams notification Setup
16 |
17 | Similar to Slack setup, If this is first time setup for MS Teams, Team Workspace setup/OAuth authorization must be done from AWS console.\
18 | 1. Open the AWS Chatbot console at https://console.aws.amazon.com/chatbot/
19 | 2. Under Configure a chat client, choose Microsoft Teams, then choose Configure client.
20 | 3. Copy and paste your Microsoft Teams channel URL. Your channel URL contains your tenant, team, and channel IDs.
21 | 4. Choose Configure and On the Microsoft Teams authorization page, choose Accept.
22 | 5. From the Microsoft Teams page, choose Configure new channel.
23 | 6. Proceed to OneClickSetup script and provide all necessary informaiton.
24 |
25 | # Additional Reading
26 |
27 | 1. To setup AWS Chatbot with Slack, please visit this this step by step tutorial: [Chatbot-Setup-with-slack](https://docs.aws.amazon.com/chatbot/latest/adminguide/slack-setup.html).
28 | 2. To setup AWS Chatbot with Amazon Chime, please visit this this step by step tutorial: [Chatbot-Setup-with-chime](https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html).
29 | 3. To setup AWS Chatbot with Teams, please visit this this step by step tutorial: [Chatbot-Setup-with-teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html).
30 |
31 | # Setting up Filters
32 |
33 | By default, all the events going to centerlized event bus will be routed to your notification channel. If you want specific events, you can set filter on SNS subscriptions.
34 |
35 | [Filter-Events](https://docs.aws.amazon.com/sns/latest/dg/sns-subscription-filter-policies.html)
--------------------------------------------------------------------------------
/src/DataCollectionModule/DataCollectionModule.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: '2010-09-09'
3 | Description: Event Driven Data Collection Stack
4 |
5 | Parameters:
6 | AWSOrganizationID:
7 | Type: String
8 | Description: The AWS Organizations ID for the organization that should be allowed to put events on the event bus. 'Comma Delimited list of AWS Organization IDs for all Payer Account'
9 | DataCollectionBucket:
10 | Type: String
11 | Description: Name of the S3 Bucket to be created to hold data information.
12 | DataCollectionBucketKmsArn:
13 | Type: String
14 | Default: "na"
15 | Description: Enter KMS Arn if supplied Destination bucket is encrypted with KMS(Type N for SSE encryption)
16 | AthenaResultBucket:
17 | Type: String
18 | Default: "aws-athena-query-results-*"
19 | Description: S3 Bucket where Amazon Athena stores results
20 | AthenaBucketKmsArn:
21 | Type: String
22 | Default: "na"
23 | Description: Enter KMS Arn if Athena Results bucket is encrypted with KMS(Type N for SSE encryption)
24 | ResourcePrefix:
25 | Type: String
26 | Description: This prefix will be placed in front of resources created where required. Note you may wish to add a dash at the end to make more readable
27 | Default: "heidi-"
28 | HeidiDataCollectionDB:
29 | Type: String
30 | Description: Athena DataCollection DB name
31 | Default: "datacollectiondb"
32 | EnableHealthModule:
33 | Type: String
34 | Description: Collects AWS Health data from different accounts
35 | Default: "yes"
36 | AllowedValues:
37 | - "yes"
38 | - "no"
39 |
40 | Outputs:
41 | HeidiQSDataSourceArn:
42 | Condition: DeployDataCollectionComponents
43 | Value: !GetAtt HeidiQSDataSource.Arn
44 | Export:
45 | Name: !Sub ${ResourcePrefix}QSDataSourceArn
46 | HeidiDataCollectionDB:
47 | Condition: DeployDataCollectionComponents
48 | Value: !Ref HeidiDataCollectionDB
49 | Export:
50 | Name: !Sub ${ResourcePrefix}DataCollectionDB
51 |
52 | Conditions:
53 | DataCollectionBucketKmsArn: !Not [!Equals [!Ref DataCollectionBucketKmsArn, "na"]]
54 | AthenaBucketKmsArn: !Not [!Equals [!Ref AthenaBucketKmsArn, "na"]]
55 | DeployDataCollectionComponents: !Equals [ !Ref EnableHealthModule, "yes"]
56 |
57 | Resources:
58 | # Define an IAM Role for the Kinesis Firehose delivery stream
59 | DataCollectionKinesisFirehoseRole:
60 | Condition: DeployDataCollectionComponents
61 | Type: AWS::IAM::Role
62 | Properties:
63 | AssumeRolePolicyDocument:
64 | Version: '2012-10-17'
65 | Statement:
66 | - Effect: Allow
67 | Principal:
68 | Service: firehose.amazonaws.com
69 | Action: sts:AssumeRole
70 | Policies:
71 | # Policy allowing CloudWatch Logs access
72 | - PolicyName: cloudwatch-logs-access
73 | PolicyDocument:
74 | Version: '2012-10-17'
75 | Statement:
76 | - Effect: Allow
77 | Action:
78 | - logs:CreateLogGroup
79 | Resource: !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*"
80 | - Effect: Allow
81 | Action:
82 | - logs:CreateLogStream
83 | - logs:PutLogEvents
84 | Resource: !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/kinesisfirehose/*"
85 | # Policy allowing S3 access
86 | - PolicyName: AllowS3Access
87 | PolicyDocument:
88 | Version: '2012-10-17'
89 | Statement:
90 | - Sid: AllowS3Access
91 | Effect: Allow
92 | Action:
93 | - "s3:AbortMultipartUpload"
94 | - "s3:GetBucketLocation"
95 | - "s3:GetObject"
96 | - "s3:ListBucket"
97 | - "s3:ListBucketMultipartUploads"
98 | - "s3:PutObject"
99 | Resource:
100 | - !Sub "arn:${AWS::Partition}:s3:::${DataCollectionBucket}"
101 | - !Sub "arn:${AWS::Partition}:s3:::${DataCollectionBucket}/*"
102 | # Policy allowing KMS access
103 | - !If
104 | - DataCollectionBucketKmsArn
105 | - PolicyName: AllowkmsAccess
106 | PolicyDocument:
107 | Version: '2012-10-17'
108 | Statement:
109 | - Sid: AllowKMSAccess
110 | Effect: Allow
111 | Action:
112 | - "kms:Encrypt"
113 | - "kms:Decrypt"
114 | - "kms:ReEncrypt*"
115 | - "kms:GenerateDataKey*"
116 | Resource:
117 | - !Ref DataCollectionBucketKmsArn
118 | - !Ref AWS::NoValue
119 |
120 | #This is common EventBridge event role that give EB necessary permission to put events to Kinesis Firehose
121 | DataCollectionRuleRole:
122 | Condition: DeployDataCollectionComponents
123 | Type: AWS::IAM::Role
124 | Properties:
125 | AssumeRolePolicyDocument:
126 | Version: "2012-10-17"
127 | Statement:
128 | - Effect: Allow
129 | Principal:
130 | Service: events.amazonaws.com
131 | Action: sts:AssumeRole
132 | Path: "/"
133 | Policies:
134 | # Policy allowing the role to put records to the Kinesis Firehose
135 | - PolicyName: !Sub "DataCollectionRule-${AWS::AccountId}-${AWS::Region}-Policy"
136 | PolicyDocument:
137 | Version: "2012-10-17"
138 | Statement:
139 | - Effect: Allow
140 | Action:
141 | - "firehose:PutRecord"
142 | - "firehose:PutRecordBatch"
143 | Resource:
144 | - !GetAtt DataCollectionKinesisFirehose.Arn
145 | - PolicyName: !Sub "DataCollectionRuleDefault-${AWS::AccountId}-${AWS::Region}-Policy"
146 | PolicyDocument:
147 | Version: "2012-10-17"
148 | Statement:
149 | - Effect: Allow
150 | Action: "events:PutEvents"
151 | Resource:
152 | - !GetAtt DataCollectionBus.Arn
153 |
154 | # Define an AWS Events Event Bus for DataCollection
155 | DataCollectionBus:
156 | Type: "AWS::Events::EventBus"
157 | Properties:
158 | Name: !Sub ${ResourcePrefix}DataCollectionBus-${AWS::AccountId}
159 |
160 | # Define the policy for the event bus to allow all accounts from the specified organization to put events
161 | DataCollectionBusPolicy:
162 | Type: AWS::Events::EventBusPolicy
163 | Properties:
164 | EventBusName: !Ref DataCollectionBus
165 | StatementId: !Sub ${ResourcePrefix}AllowAllAccountsInOrganizationToPutEvents
166 | Statement:
167 | Effect: "Allow"
168 | Principal: "*"
169 | Action: "events:PutEvents"
170 | Resource: !GetAtt DataCollectionBus.Arn
171 | Condition:
172 | StringEquals:
173 | aws:PrincipalOrgID: !Split [",", !Ref AWSOrganizationID]
174 |
175 | # Define an AWS Kinesis Firehose Delivery Stream for data ingestion
176 | DataCollectionKinesisFirehose:
177 | Condition: DeployDataCollectionComponents
178 | Type: "AWS::KinesisFirehose::DeliveryStream"
179 | Properties:
180 | DeliveryStreamName: !Sub "${ResourcePrefix}DataCollection-${AWS::AccountId}-${AWS::Region}"
181 | DeliveryStreamType: "DirectPut"
182 | DeliveryStreamEncryptionConfigurationInput:
183 | KeyType: AWS_OWNED_CMK
184 | ExtendedS3DestinationConfiguration:
185 | BucketARN: !Sub "arn:${AWS::Partition}:s3:::${DataCollectionBucket}"
186 | RoleARN: !GetAtt DataCollectionKinesisFirehoseRole.Arn
187 | Prefix: "DataCollection-data/!{partitionKeyFromQuery:source}/!{timestamp:yyyy}/!{timestamp:MM}/!{timestamp:dd}/"
188 | CompressionFormat: "UNCOMPRESSED"
189 | BufferingHints:
190 | IntervalInSeconds: 60
191 | SizeInMBs: 64
192 | ErrorOutputPrefix: "DataCollection-error/"
193 | CloudWatchLoggingOptions:
194 | Enabled: true
195 | LogGroupName: "DataCollectionFirehoseLogs"
196 | LogStreamName: "DataCollectionFirehoseStream"
197 | DynamicPartitioningConfiguration:
198 | Enabled: true
199 | RetryOptions:
200 | DurationInSeconds: 300
201 | ProcessingConfiguration:
202 | Enabled: true
203 | Processors:
204 | - Type: AppendDelimiterToRecord
205 | - Type: MetadataExtraction
206 | Parameters:
207 | - ParameterName: MetadataExtractionQuery
208 | ParameterValue: "{source:.source}"
209 | - ParameterName: JsonParsingEngine
210 | ParameterValue: JQ-1.6
211 |
212 | DataCollectionRuleOnCustomBus:
213 | Condition: DeployDataCollectionComponents
214 | Type: AWS::Events::Rule
215 | Properties:
216 | Description: DataCollectionRuleForHeidi
217 | EventBusName: !Ref DataCollectionBus
218 | EventPattern:
219 | source:
220 | - prefix: aws.
221 | - prefix: heidi.
222 | - prefix: awshealthtest
223 | Targets:
224 | - Id: DataCollectionRuleOnDataCollectionBus
225 | Arn: !GetAtt DataCollectionKinesisFirehose.Arn
226 | RoleArn: !GetAtt DataCollectionRuleRole.Arn
227 |
228 | # Create an AWS IAM Managed Policy for QuickSight service role with necessary permissions
229 | AthenaQuicksightAssumeRole:
230 | Condition: DeployDataCollectionComponents
231 | Type: AWS::IAM::Role
232 | Properties:
233 | AssumeRolePolicyDocument:
234 | Version: '2012-10-17'
235 | Statement:
236 | - Effect: Allow
237 | Principal:
238 | Service: quicksight.amazonaws.com
239 | Action: sts:AssumeRole
240 | Policies:
241 | # Policy allowing athena access for heidi datacollection
242 | - PolicyName: heidi-s3-access-policy
243 | PolicyDocument:
244 | Version: "2012-10-17"
245 | Statement:
246 | - Effect: "Allow"
247 | Action:
248 | - s3:ListAllMyBuckets
249 | Resource: "*"
250 | - Effect: "Allow"
251 | Action:
252 | - "s3:GetBucketLocation"
253 | - "s3:GetObject"
254 | - "s3:ListBucket"
255 | - "s3:ListBucketMultipartUploads"
256 | - "s3:AbortMultipartUpload"
257 | - "s3:PutObject"
258 | - "s3:ListMultipartUploadParts"
259 | - "s3:CreateBucket"
260 | Resource:
261 | - !Sub arn:${AWS::Partition}:s3:::${DataCollectionBucket}
262 | - !Sub arn:${AWS::Partition}:s3:::${DataCollectionBucket}/*
263 | - !Sub arn:${AWS::Partition}:s3:::${AthenaResultBucket}
264 | - !Sub arn:${AWS::Partition}:s3:::${AthenaResultBucket}/*
265 | - Effect: "Allow"
266 | Action:
267 | - athena:BatchGetQueryExecution
268 | - athena:CancelQueryExecution
269 | - athena:GetCatalogs
270 | - athena:GetExecutionEngine
271 | - athena:GetExecutionEngines
272 | - athena:GetNamespace
273 | - athena:GetNamespaces
274 | - athena:GetQueryExecution
275 | - athena:GetQueryExecutions
276 | - athena:GetQueryResults
277 | - athena:GetQueryResultsStream
278 | - athena:GetTable
279 | - athena:GetTables
280 | - athena:ListQueryExecutions
281 | - athena:RunQuery
282 | - athena:StartQueryExecution
283 | - athena:StopQueryExecution
284 | - athena:ListWorkGroups
285 | - athena:ListEngineVersions
286 | - athena:GetWorkGroup
287 | - athena:GetDataCatalog
288 | - athena:GetDatabase
289 | - athena:GetTableMetadata
290 | - athena:ListDataCatalogs
291 | - athena:ListDatabases
292 | - athena:ListTableMetadata
293 | Resource: "*"
294 | - Effect: "Allow"
295 | Action:
296 | - "glue:CreateDatabase"
297 | - "glue:DeleteDatabase"
298 | - "glue:GetDatabase"
299 | - "glue:GetDatabases"
300 | - "glue:UpdateDatabase"
301 | - "glue:CreateTable"
302 | - "glue:DeleteTable"
303 | - "glue:BatchDeleteTable"
304 | - "glue:UpdateTable"
305 | - "glue:GetTable"
306 | - "glue:GetTables"
307 | - "glue:BatchCreatePartition"
308 | - "glue:CreatePartition"
309 | - "glue:DeletePartition"
310 | - "glue:BatchDeletePartition"
311 | - "glue:UpdatePartition"
312 | - "glue:GetPartition"
313 | - "glue:GetPartitions"
314 | - "glue:BatchGetPartition"
315 | Resource:
316 | - "*"
317 | - Effect: "Allow"
318 | Action:
319 | - "lakeformation:GetDataAccess"
320 | Resource:
321 | - "*"
322 | - !If
323 | - DataCollectionBucketKmsArn
324 | - Effect: Allow
325 | Action:
326 | - "kms:Encrypt"
327 | - "kms:Decrypt"
328 | - "kms:ReEncrypt*"
329 | - "kms:GenerateDataKey*"
330 | Resource:
331 | - !Ref DataCollectionBucketKmsArn
332 | - !Ref AWS::NoValue
333 | - !If
334 | - AthenaBucketKmsArn
335 | - Effect: Allow
336 | Action:
337 | - "kms:Encrypt"
338 | - "kms:Decrypt"
339 | - "kms:ReEncrypt*"
340 | - "kms:GenerateDataKey*"
341 | Resource:
342 | - !Ref AthenaBucketKmsArn
343 | - !Ref AWS::NoValue
344 |
345 | AthenaDataCollectionDB:
346 | Condition: DeployDataCollectionComponents
347 | Type: AWS::Glue::Database
348 | Properties:
349 | CatalogId: !Sub '${AWS::AccountId}'
350 | DatabaseInput:
351 | Name: !Sub ${ResourcePrefix}${HeidiDataCollectionDB}
352 | Description: "Heidi Data Collection Athena DB"
353 |
354 | # Create an AWS QuickSight DataSource for DataCollection
355 | HeidiQSDataSource:
356 | Condition: DeployDataCollectionComponents
357 | Type: AWS::QuickSight::DataSource
358 | Properties:
359 | DataSourceId: !Sub "${ResourcePrefix}DataSource-${AWS::AccountId}"
360 | AwsAccountId: !Sub ${AWS::AccountId}
361 | Name: !Sub "${ResourcePrefix}DataSource-${AWS::AccountId}"
362 | Type: ATHENA
363 | DataSourceParameters:
364 | AthenaParameters:
365 | WorkGroup: primary
366 | RoleArn: !GetAtt AthenaQuicksightAssumeRole.Arn
367 | SslProperties:
368 | DisableSsl: false
369 |
370 | # Common Account Mapping Table
371 | GlueTableAccountMap:
372 | Condition: DeployDataCollectionComponents
373 | Type: AWS::Glue::Table
374 | Properties:
375 | DatabaseName: !Ref AthenaDataCollectionDB
376 | CatalogId: !Sub '${AWS::AccountId}'
377 | TableInput:
378 | Name: accountsinfo
379 | Description: AWS Health Event Account Data
380 | Parameters:
381 | skip.header.line.count: '1'
382 | TableType: EXTERNAL_TABLE
383 | StorageDescriptor:
384 | Columns:
385 | - { Name: accountid, Type: string }
386 | - { Name: arn, Type: string }
387 | - { Name: email, Type: string }
388 | - { Name: name, Type: string }
389 | - { Name: status, Type: string }
390 | - { Name: joinedmethod, Type: string }
391 | - { Name: joinedtimestamp, Type: string }
392 | - { Name: Tag, Type: string }
393 | Location: !Sub 's3://${DataCollectionBucket}/DataCollection-metadata/ReferenceOds/AccountsInfo'
394 | InputFormat: org.apache.hadoop.mapred.TextInputFormat
395 | OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
396 | SerdeInfo:
397 | SerializationLibrary: org.apache.hadoop.hive.serde2.OpenCSVSerde
398 | Parameters:
399 | separatorChar: ","
400 | quoteChar: "\""
401 | escapeChar: "\\"
402 | columns: "accountid,arn,email,name,status,joinedmethod,joinedtimestamp"
403 | PartitionKeys: []
404 |
405 | # Common Tag info table
406 | GlueTableTaginfo:
407 | # AWS Glue Table resource tag info
408 | Condition: DeployDataCollectionComponents
409 | Type: AWS::Glue::Table
410 | Properties:
411 | # Reference to the AWS Glue Database
412 | DatabaseName: !Ref AthenaDataCollectionDB
413 | CatalogId: !Sub '${AWS::AccountId}'
414 | TableInput:
415 | Name: taginfo
416 | Description: 'AWS tag info data'
417 | Owner: GlueTeam
418 | PartitionKeys:
419 | - Name: date_created
420 | Type: string
421 | - Name: source_partition
422 | Type: string
423 | Parameters:
424 | EXTERNAL: 'TRUE' # 'EXTERNAL' should be a string
425 | projection.enabled: 'true'
426 | projection.date_created.type: 'date'
427 | projection.date_created.format: 'yyyy/MM/dd'
428 | projection.date_created.interval: '1'
429 | projection.date_created.interval.unit: 'DAYS'
430 | projection.date_created.range: '2021/01/01,NOW'
431 | projection.source_partition.type: 'enum'
432 | projection.source_partition.values: 'heidi.taginfo'
433 | storage.location.template: !Join ['', ['s3://', !Ref DataCollectionBucket, '/DataCollection-data/${source_partition}/${date_created}/']]
434 | StorageDescriptor:
435 | # Columns and their data types for the table
436 | Columns:
437 | - Name: version
438 | Type: string
439 | Comment: 'from deserializer'
440 | - Name: id
441 | Type: string
442 | Comment: 'from deserializer'
443 | - Name: detail-type
444 | Type: string
445 | Comment: 'from deserializer'
446 | - Name: source
447 | Type: string
448 | Comment: 'from deserializer'
449 | - Name: account
450 | Type: string
451 | Comment: 'from deserializer'
452 | - Name: time
453 | Type: string
454 | Comment: 'from deserializer'
455 | - Name: region
456 | Type: string
457 | Comment: 'from deserializer'
458 | - Name: resources
459 | Type: array
460 | Comment: 'from deserializer'
461 | - Name: detail
462 | Type: struct>>
463 | Comment: 'from deserializer'
464 | # S3 location of the data for the Athena External Table
465 | Location: !Sub 's3://${DataCollectionBucket}/DataCollection-data'
466 | InputFormat: 'org.apache.hadoop.mapred.TextInputFormat'
467 | OutputFormat: 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
468 | SerdeInfo:
469 | SerializationLibrary: 'org.openx.data.jsonserde.JsonSerDe'
470 | Parameters:
471 | paths: 'account,detail,detail-type,id,region,resources,source,time,version'
472 | Compressed: false
473 | TableType: EXTERNAL_TABLE
474 | Retention: 30
--------------------------------------------------------------------------------
/src/DataCollectionModule/HeidiRoot.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: "2010-09-09"
3 | Description: Root stack for DataCollection Bus ES-Heidi
4 |
5 | Parameters:
6 | AWSOrganizationID:
7 | Type: String
8 | Description: The AWS Organizations ID for the organization that should be allowed to put events on the event bus. 'Comma Delimited list of AWS Organization IDs for all Payer Account'
9 | DataCollectionBucket:
10 | Type: String
11 | Description: Name of the S3 Bucket to be created to hold data information.
12 | DataCollectionBucketKmsArn:
13 | Type: String
14 | Default: "na"
15 | Description: Enter KMS Arn if supplied Destination bucket is encrypted with KMS(Type N for SSE encryption)
16 | AthenaResultBucket:
17 | Type: String
18 | Default: "aws-athena-query-results-*"
19 | Description: S3 Bucket where Amazon Athena stores results
20 | AthenaBucketKmsArn:
21 | Type: String
22 | Default: "na"
23 | Description: Enter KMS Arn if Athena Results bucket is encrypted with KMS(Type N for SSE encryption)
24 | QuickSightAnalysisAuthor:
25 | Type: String
26 | Description: The QuickSight analysis author Arn that is allowed configure and manage the QS Analyses and dashboards. e.g. arn:aws:quicksight:::user/default/
27 | ResourcePrefix:
28 | Type: String
29 | Description: This prefix will be placed in front of resources created where required. Note you may wish to add a dash at the end to make more readable
30 | Default: "heidi-"
31 | EnableHealthModule:
32 | Type: String
33 | Description: Collects AWS Health data from different accounts and create quicksight analysis
34 | Default: "yes"
35 | AllowedValues:
36 | - "yes"
37 | - "no"
38 | EnableHealthEventUrl:
39 | Type: String
40 | Description: "Optional: Event URL are for easy read. Enabling this would create DynamoDB, APIGW and Lambda"
41 | Default: "no"
42 | AllowedValues:
43 | - "yes"
44 | - "no"
45 | Enabletaginfo:
46 | Type: String
47 | Description: "Optional: Enable Tag enrichment to pull tagging info from resource explorer API"
48 | Default: "no"
49 | AllowedValues:
50 | - "yes"
51 | - "no"
52 | ResourceExplorerViewArn:
53 | Type: String
54 | Default: "na"
55 | Description: If Enabletaginfo, Resource Explorer View Arn is required.
56 | EnableNotificationModule:
57 | Type: String
58 | Description: "Optional: This required preauth with chatbot and slack/teams as prereq."
59 | Default: "no"
60 | AllowedValues:
61 | - "yes"
62 | - "no"
63 | SlackChannelId:
64 | Type: String
65 | Default: "na"
66 | Description: If EnableNotificationModule, ensure that the SlackChannelId is provided when the channel is Slack.
67 | SlackWorkspaceId:
68 | Type: String
69 | Default: "na"
70 | Description: If EnableNotificationModule, ensure that the SlackWorkspaceId is provided when the channel is Slack.
71 | TeamId:
72 | Type: String
73 | Description: If EnableNotificationModule, ensure that the TeamId is provided when the channel is Slack.
74 | Default: "na"
75 | TeamsTenantId:
76 | Type: String
77 | Description: If EnableNotificationModule, ensure that the TeamsTenantId is provided when the channel is Slack.
78 | Default: "na"
79 | TeamsChannelId:
80 | Type: String
81 | Description: If EnableNotificationModule, ensure that the TeamsChannelId is provided when the channel is Slack.
82 | Default: "na"
83 |
84 | Conditions:
85 | EnableHealthModule: !Equals [ !Ref EnableHealthModule, "yes"]
86 | EnableHealthEventUrl: !Equals [ !Ref EnableHealthEventUrl, "yes"]
87 | EnableNotificationModule: !Equals [ !Ref EnableNotificationModule, "yes"]
88 | Enabletaginfo: !Equals [ !Ref Enabletaginfo, "yes"]
89 | DeployHealthEventUrl: !And
90 | - !Condition EnableHealthModule
91 | - !Condition EnableHealthEventUrl
92 | DeploytaginfoSetup: !And
93 | - !Condition EnableHealthModule
94 | - !Condition Enabletaginfo
95 |
96 | Resources:
97 | DataCollectionModule:
98 | Type: AWS::CloudFormation::Stack
99 | Properties:
100 | TemplateURL: !Sub https://${DataCollectionBucket}.s3.amazonaws.com/DataCollection-metadata/DataCollectionModule/DataCollectionModule.yaml
101 | Parameters:
102 | AWSOrganizationID: !Ref AWSOrganizationID
103 | DataCollectionBucket: !Ref DataCollectionBucket
104 | DataCollectionBucketKmsArn: !Ref DataCollectionBucketKmsArn
105 | AthenaResultBucket: !Ref AthenaResultBucket
106 | AthenaBucketKmsArn: !Ref AthenaBucketKmsArn
107 | ResourcePrefix: !Ref ResourcePrefix
108 | EnableHealthModule: !Ref EnableHealthModule
109 |
110 | ####Notification Module Stack Start########
111 | NotificationModuleSetup:
112 | Type: AWS::CloudFormation::Stack
113 | DependsOn: DataCollectionModule
114 | Condition: EnableNotificationModule
115 | Properties:
116 | TemplateURL: !Sub https://${DataCollectionBucket}.s3.amazonaws.com/DataCollection-metadata/NotificationModule/NotificationModule.yaml
117 | Parameters:
118 | DataCollectionAccountID: !Sub ${AWS::AccountId}
119 | ResourcePrefix: !Ref ResourcePrefix
120 | SlackChannelId: !Ref SlackChannelId
121 | SlackWorkspaceId: !Ref SlackWorkspaceId
122 | TeamId: !Ref TeamId
123 | TeamsTenantId: !Ref TeamsTenantId
124 | TeamsChannelId: !Ref TeamsChannelId
125 |
126 | ####Health Module Stack Start#####
127 | HealthModuleCollectionSetup:
128 | Type: AWS::CloudFormation::Stack
129 | DependsOn: DataCollectionModule
130 | Condition: EnableHealthModule
131 | Properties:
132 | TemplateURL: !Sub https://${DataCollectionBucket}.s3.amazonaws.com/DataCollection-metadata/HealthModule/HealthModuleCollectionSetup.yaml
133 | Parameters:
134 | DataCollectionAccountID: !Sub ${AWS::AccountId}
135 | DataCollectionRegion: !Sub ${AWS::Region}
136 | ResourcePrefix: !Ref ResourcePrefix
137 |
138 | HealthModuleDataSetSetup:
139 | Type: AWS::CloudFormation::Stack
140 | DependsOn: HealthModuleCollectionSetup
141 | Condition: EnableHealthModule
142 | Properties:
143 | TemplateURL: !Sub https://${DataCollectionBucket}.s3.amazonaws.com/DataCollection-metadata/HealthModule/HealthModuleDataSetSetup.yaml
144 | Parameters:
145 | DataCollectionBucket: !Ref DataCollectionBucket
146 | QuickSightAnalysisAuthor: !Ref QuickSightAnalysisAuthor
147 | HeidiDataCollectionDB: !GetAtt DataCollectionModule.Outputs.HeidiDataCollectionDB
148 | HeidiQSDataSourceArn: !GetAtt DataCollectionModule.Outputs.HeidiQSDataSourceArn
149 | ResourcePrefix: !Ref ResourcePrefix
150 |
151 | HealthModuleEventUrlSetup:
152 | Type: AWS::CloudFormation::Stack
153 | Condition: DeployHealthEventUrl
154 | Properties:
155 | TemplateURL: !Sub https://${DataCollectionBucket}.s3.amazonaws.com/DataCollection-metadata/HealthModule/HealthModuleEventUrlSetup.yaml
156 | Parameters:
157 | DataCollectionAccountID: !Sub ${AWS::AccountId}
158 | DataCollectionRegion: !Sub ${AWS::Region}
159 | ResourcePrefix: !Ref ResourcePrefix
160 |
161 | HealthModuleTaginfoSetuo:
162 | Type: AWS::CloudFormation::Stack
163 | Condition: DeploytaginfoSetup
164 | Properties:
165 | TemplateURL: !Sub https://${DataCollectionBucket}.s3.amazonaws.com/DataCollection-metadata/HealthModule/HealthModuleTaginfoSetup.yaml
166 | Parameters:
167 | DataCollectionAccountID: !Sub ${AWS::AccountId}
168 | DataCollectionRegion: !Sub ${AWS::Region}
169 | ResourcePrefix: !Ref ResourcePrefix
170 | ResourceExplorerViewArn: !Ref ResourceExplorerViewArn
171 |
172 | HealthModuleQSAnalysis:
173 | Type: AWS::CloudFormation::Stack
174 | DependsOn: HealthModuleCollectionSetup
175 | Condition: EnableHealthModule
176 | Properties:
177 | TemplateURL: !Sub https://${DataCollectionBucket}.s3.amazonaws.com/DataCollection-metadata/HealthModule/HealthModuleQSAnalysis.yaml
178 | Parameters:
179 | QuickSightAnalysisAuthor: !Ref QuickSightAnalysisAuthor
180 | QSDataSetHealthEvent: !GetAtt HealthModuleDataSetSetup.Outputs.QSDataSetHealthEvent
181 | EventDetailUrl: !If [ EnableHealthEventUrl, !GetAtt HealthModuleEventUrlSetup.Outputs.EventDetailApiEndpoint, "https://example.com" ]
182 | ResourcePrefix: !Ref ResourcePrefix
--------------------------------------------------------------------------------
/src/HealthModule/HealthModuleCollectionSetup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: '2010-09-09'
3 | Description: Health Module Data Collection Setup
4 |
5 | Parameters:
6 | DataCollectionAccountID:
7 | Type: String
8 | Description: AccountId of where the collector is deployed
9 | DataCollectionRegion:
10 | Type: String
11 | Description: Account Region of where the collector is deployed
12 | ResourcePrefix:
13 | Type: String
14 | Description: This prefix will be placed in front of resources created where required. Note you may wish to add a dash at the end to make more readable
15 | Default: "heidi-"
16 |
17 | Resources:
18 | DataCollectionRuleRole:
19 | Type: AWS::IAM::Role
20 | Properties:
21 | AssumeRolePolicyDocument:
22 | Version: "2012-10-17"
23 | Statement:
24 | - Effect: Allow
25 | Principal:
26 | Service: events.amazonaws.com
27 | Action: sts:AssumeRole
28 | Path: "/"
29 | Policies:
30 | - PolicyName: !Sub "HealthEventPutEvent-${AWS::AccountId}-${AWS::Region}-Policy"
31 | PolicyDocument:
32 | Version: "2012-10-17"
33 | Statement:
34 | - Effect: Allow
35 | Action: "events:PutEvents"
36 | Resource:
37 | - !Sub "arn:${AWS::Partition}:events:${DataCollectionRegion}:${DataCollectionAccountID}:event-bus/${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}"
38 |
39 | DefaultBusRuleHealth:
40 | Type: "AWS::Events::Rule"
41 | Properties:
42 | Description: "EventBridge default rule for aws.health events"
43 | EventBusName: "default"
44 | EventPattern:
45 | source:
46 | - "aws.health"
47 | - "heidi.health"
48 | Targets:
49 | - Arn: !Sub "arn:${AWS::Partition}:events:${DataCollectionRegion}:${DataCollectionAccountID}:event-bus/${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}"
50 | Id: DataCollectionRuleOnDefaultBus
51 | RoleArn: !GetAtt DataCollectionRuleRole.Arn
--------------------------------------------------------------------------------
/src/HealthModule/HealthModuleDataSetSetup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: '2010-09-09'
3 | Description: Health Module QS DataSet Setup
4 |
5 | Parameters:
6 | DataCollectionBucket:
7 | Type: String
8 | Description: Name of the S3 Bucket to be created to hold data information
9 | QuickSightAnalysisAuthor:
10 | Type: String
11 | Description: The QuickSight analysis author Arn that is allowed configure and manage the QS Analyses and dashboards. e.g. arn:aws:quicksight:::user/default/
12 | HeidiDataCollectionDB:
13 | Type: String
14 | Description: Athena DataCollection DB
15 | Default: "datacollectiondb"
16 | HeidiQSDataSourceArn:
17 | Type: String
18 | Description: Quick Sight Datasource Arn.
19 | ResourcePrefix:
20 | Type: String
21 | Description: This prefix will be placed in front of resources created where required. Note you may wish to add a dash at the end to make more readable
22 | Default: "heidi-"
23 |
24 | Outputs:
25 | QSDataSetHealthEvent:
26 | Value: !GetAtt QSDataSetHealthEvent.Arn
27 | Export:
28 | Name: !Sub ${ResourcePrefix}QSDataSetHealthEvent
29 |
30 | Resources:
31 | GlueHealthTable:
32 | # AWS Glue Table resource representing AWS Health External Table
33 | Type: AWS::Glue::Table
34 | Properties:
35 | # Reference to the AWS Glue Database
36 | DatabaseName: !Sub ${ResourcePrefix}${HeidiDataCollectionDB}
37 | CatalogId: !Sub '${AWS::AccountId}'
38 | TableInput:
39 | Name: awshealthevent
40 | Description: 'AWS Health Events Data'
41 | Owner: GlueTeam
42 | PartitionKeys:
43 | - Name: date_created
44 | Type: string
45 | - Name: source_partition
46 | Type: string
47 | Parameters:
48 | EXTERNAL: 'TRUE' # 'EXTERNAL' should be a string
49 | projection.enabled: 'true'
50 | projection.date_created.type: 'date'
51 | projection.date_created.format: 'yyyy/MM/dd'
52 | projection.date_created.interval: '1'
53 | projection.date_created.interval.unit: 'DAYS'
54 | projection.date_created.range: '2021/01/01,NOW'
55 | projection.source_partition.type: 'enum'
56 | projection.source_partition.values: 'heidi.health,aws.health,awshealthtest'
57 | storage.location.template: !Join ['', ['s3://', !Ref DataCollectionBucket, '/DataCollection-data/${source_partition}/${date_created}/']]
58 | StorageDescriptor:
59 | # Columns and their data types for the table
60 | Columns:
61 | - Name: version
62 | Type: string
63 | Comment: 'from deserializer'
64 | - Name: id
65 | Type: string
66 | Comment: 'from deserializer'
67 | - Name: detail-type
68 | Type: string
69 | Comment: 'from deserializer'
70 | - Name: source
71 | Type: string
72 | Comment: 'from deserializer'
73 | - Name: account
74 | Type: string
75 | Comment: 'from deserializer'
76 | - Name: time
77 | Type: string
78 | Comment: 'from deserializer'
79 | - Name: region
80 | Type: string
81 | Comment: 'from deserializer'
82 | - Name: resources
83 | Type: array
84 | Comment: 'from deserializer'
85 | - Name: detail
86 | Type: struct>,eventMetadata:string,affectedentities:array>>>>
87 | Comment: 'from deserializer'
88 | # S3 location of the data for the Athena External Table
89 | Location: !Sub 's3://${DataCollectionBucket}/DataCollection-data'
90 | InputFormat: 'org.apache.hadoop.mapred.TextInputFormat'
91 | OutputFormat: 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
92 | SerdeInfo:
93 | SerializationLibrary: 'org.openx.data.jsonserde.JsonSerDe'
94 | Parameters:
95 | paths: 'account,detail,detail-type,id,region,resources,source,time,version'
96 | Compressed: false
97 | TableType: EXTERNAL_TABLE
98 | Retention: 30
99 |
100 | QSDataSetHealthEvent:
101 | # Create an AWS QuickSight DataSet for AWS Health events
102 | Type: AWS::QuickSight::DataSet
103 | Properties:
104 | AwsAccountId: !Sub ${AWS::AccountId}
105 | ImportMode: SPICE
106 | DataSetId: !Sub "${ResourcePrefix}${AWS::AccountId}-${AWS::Region}"
107 | Name: !Sub "${ResourcePrefix}${AWS::AccountId}-${AWS::Region}"
108 | PhysicalTableMap:
109 | "AWSHealthQSPT":
110 | CustomSql:
111 | DataSourceArn: !Ref HeidiQSDataSourceArn
112 | Name: !Sub "${ResourcePrefix}${AWS::AccountId}-${AWS::Region}"
113 | SqlQuery: !Sub |-
114 | WITH latestRow AS (
115 | select * from (SELECT
116 | detail.eventTypeCode,
117 | source AS eventSource,
118 | COALESCE(detail.affectedAccount, account) as account,
119 | detail.service,
120 | detail.eventScopeCode,
121 | detail.eventMetadata,
122 | CASE
123 | WHEN ((detail.eventTypeCategory = 'scheduledChange') AND (detail.eventArn like '%PLANNED_LIFECYCLE_EVENT%')) THEN 'PlannedLifeCycle'
124 | ELSE detail.eventTypeCategory
125 | END AS "eventTypeCategory",
126 | detail.eventArn,
127 | detail.communicationid,
128 | detail.eventRegion,
129 | entities.entityValue AS affectedEntities,
130 | entities.status As affectedEntityStatus,
131 | SUBSTRING(detail.eventdescription[1].latestdescription, 1, 2000) AS eventDescription1,
132 | SUBSTRING(detail.eventdescription[1].latestdescription, 2001) AS eventDescription2,
133 | json_extract_scalar(detail.eventMetadata, '$.deprecated_versions') AS deprecated_versions,
134 | rank() OVER (PARTITION BY detail.eventArn, COALESCE(detail.affectedAccount, account) ORDER BY time DESC) AS rowrank,
135 | array_join(resources, ', ') AS resources,
136 | CAST(from_iso8601_timestamp("time") AS timestamp) AS ingestionTime,
137 | CAST(date_parse(detail.endTime, '%a, %e %b %Y %H:%i:%s GMT') AS timestamp) AS endTime,
138 | CAST(date_parse(detail.startTime, '%a, %e %b %Y %H:%i:%s GMT') AS timestamp) AS startTime,
139 | CAST(date_parse(detail.lastUpdatedTime, '%a, %e %b %Y %H:%i:%s GMT') AS timestamp) AS lastUpdatedTime,
140 | CAST(DATE_DIFF('HOUR', CAST(date_parse(detail.startTime, '%a, %e %b %Y %H:%i:%s GMT') AS timestamp), CAST(date_parse(detail.endTime, '%a, %e %b %Y %H:%i:%s GMT') AS timestamp)) AS BIGINT) AS eventDuration,
141 | CASE
142 | WHEN ((CAST(date_parse(detail.endTime, '%a, %e %b %Y %H:%i:%s GMT') AS timestamp) IS NULL) AND (detail.eventTypeCategory = 'scheduledChange')) THEN detail.statusCode
143 | WHEN (((CAST(date_parse(detail.startTime, '%a, %e %b %Y %H:%i:%s GMT') AS timestamp) + (15 * INTERVAL '1' DAY)) < current_timestamp) AND (detail.eventTypeCategory = 'accountNotification') AND (CAST(date_parse(detail.endTime, '%a, %e %b %Y %H:%i:%s GMT') AS timestamp) IS NULL)) THEN 'closed'
144 | WHEN (CAST(date_parse(detail.endTime, '%a, %e %b %Y %H:%i:%s GMT') AS timestamp) IS NULL) THEN 'open'
145 | ELSE detail.statusCode
146 | END AS "statusCode",
147 | CASE
148 | WHEN (detail.eventArn like '%PLANNED_LIFECYCLE_EVENT%') THEN 'Y'
149 | ELSE 'N'
150 | END AS "plannedLifeCycleEvent"
151 | FROM "AwsDataCatalog"."${ResourcePrefix}${HeidiDataCollectionDB}"."awshealthevent"
152 | LEFT JOIN UNNEST(detail.affectedEntities) AS t(entities) ON TRUE)
153 | WHERE rowrank = 1),
154 | tagInfo AS (select * from (
155 | SELECT
156 | detail.entityarn as entityArn,
157 | rank() OVER (PARTITION BY detail.entityarn, tags.entitykey,tags.entityvalue ORDER BY time DESC) AS rowranktag,
158 | '' as entityAZ,
159 | tags.entitykey as entityTagKey,
160 | tags.entityvalue as entityTagValue
161 | FROM "AwsDataCatalog"."${ResourcePrefix}${HeidiDataCollectionDB}"."taginfo", unnest(detail.tags) as t(tags)) where rowranktag =1)
162 | SELECT
163 | detail.*,
164 | taginfo.*,
165 | COALESCE(accountinfo."name", detail."account") AS accountName,
166 | accountinfo.Tag as accountTag
167 | FROM latestRow detail
168 | LEFT JOIN "AwsDataCatalog"."${ResourcePrefix}${HeidiDataCollectionDB}"."accountsinfo" accountinfo ON detail.account = accountinfo."accountid"
169 | LEFT JOIN tagInfo ON detail.affectedEntities in (taginfo.entityarn)
170 | Columns:
171 | - Name: eventTypeCode
172 | Type: STRING
173 | - Name: affectedEntityStatus
174 | Type: STRING
175 | - Name: rowrank
176 | Type: INTEGER
177 | - Name: eventSource
178 | Type: STRING
179 | - Name: account
180 | Type: STRING
181 | - Name: eventScopeCode
182 | Type: STRING
183 | - Name: eventTypeCategory
184 | Type: STRING
185 | - Name: eventArn
186 | Type: STRING
187 | - Name: communicationid
188 | Type: STRING
189 | - Name: eventDescription1
190 | Type: STRING
191 | - Name: eventDescription2
192 | Type: STRING
193 | - Name: deprecated_versions
194 | Type: STRING
195 | - Name: eventMetadata
196 | Type: STRING
197 | - Name: resources
198 | Type: STRING
199 | - Name: ingestionTime
200 | Type: DATETIME
201 | - Name: endTime
202 | Type: DATETIME
203 | - Name: startTime
204 | Type: DATETIME
205 | - Name: lastUpdatedTime
206 | Type: DATETIME
207 | - Name: eventDuration
208 | Type: INTEGER
209 | - Name: statusCode
210 | Type: STRING
211 | - Name: eventRegion
212 | Type: STRING
213 | - Name: service
214 | Type: STRING
215 | - Name: accountName
216 | Type: STRING
217 | - Name: accountTag
218 | Type: STRING
219 | - Name: affectedEntities
220 | Type: STRING
221 | - Name: entityArn
222 | Type: STRING
223 | - Name: entityAZ
224 | Type: STRING
225 | - Name: entityTagKey
226 | Type: STRING
227 | - Name: entityTagValue
228 | Type: STRING
229 | - Name: plannedLifeCycleEvent
230 | Type: STRING
231 | LogicalTableMap:
232 | AWSHealthQSLT:
233 | Alias: !Sub "awshealthevents-${AWS::AccountId}-${AWS::Region}"
234 | DataTransforms:
235 | - ProjectOperation:
236 | ProjectedColumns:
237 | - eventTypeCode
238 | - affectedEntityStatus
239 | - rowrank
240 | - eventSource
241 | - account
242 | - eventScopeCode
243 | - eventTypeCategory
244 | - eventArn
245 | - communicationid
246 | - eventDescription1
247 | - eventDescription2
248 | - deprecated_versions
249 | - eventMetadata
250 | - resources
251 | - ingestionTime
252 | - endTime
253 | - startTime
254 | - lastUpdatedTime
255 | - eventDuration
256 | - statusCode
257 | - eventRegion
258 | - service
259 | - accountName
260 | - accountTag
261 | - affectedEntities
262 | - entityArn
263 | - entityAZ
264 | - entityTagKey
265 | - entityTagValue
266 | - plannedLifeCycleEvent
267 | Source:
268 | PhysicalTableId: AWSHealthQSPT
269 | Permissions:
270 | - Principal: !Sub "${QuickSightAnalysisAuthor}"
271 | Actions:
272 | - quicksight:DescribeDataSet
273 | - quicksight:DescribeDataSetPermissions
274 | - quicksight:PassDataSet
275 | - quicksight:DescribeIngestion
276 | - quicksight:ListIngestions
277 | - quicksight:UpdateDataSet
278 | - quicksight:DeleteDataSet
279 | - quicksight:CreateIngestion
280 | - quicksight:CancelIngestion
281 | - quicksight:UpdateDataSetPermissions
282 |
283 | QSDataSetHealthEventRefresh:
284 | # Create an AWS QuickSight Refresh Schedule for AWS Health events
285 | DependsOn: QSDataSetHealthEvent
286 | Type: AWS::QuickSight::RefreshSchedule
287 | Properties:
288 | AwsAccountId: !Sub ${AWS::AccountId}
289 | DataSetId: !Sub "${ResourcePrefix}${AWS::AccountId}-${AWS::Region}"
290 | Schedule:
291 | RefreshType: FULL_REFRESH
292 | ScheduleFrequency:
293 | Interval: HOURLY
294 | ScheduleId: QSDataSetHealthEventRefresh
--------------------------------------------------------------------------------
/src/HealthModule/HealthModuleEventUrlSetup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: '2010-09-09'
3 | Description: Health Module EventUrl Setup
4 |
5 | Parameters:
6 | DataCollectionAccountID:
7 | Type: String
8 | Description: AccountId of where the collector is deployed
9 | DataCollectionRegion:
10 | Type: String
11 | Description: Account Region of where the collector is deployed
12 | ResourcePrefix:
13 | Type: String
14 | Description: This prefix will be placed in front of resources created where required. Note you may wish to add a dash at the end to make more readable
15 | Default: "heidi-"
16 | AllowedIpRange:
17 | Default: "0.0.0.0/32"
18 | Type: String
19 | Description: AllowedIpRange who can access EventDetailUrls.
20 | AuthorizationType:
21 | Default: "NONE"
22 | Type: String
23 | Description: Specify a valid Default value for AuthorizationType. Valid values are ["NONE", "AWS_IAM", "CUSTOM", "COGNITO_USER_POOLS"]
24 |
25 | Resources:
26 | HealthEventDynamoDB:
27 | DeletionPolicy: Retain
28 | UpdateReplacePolicy: Retain
29 | Type: AWS::DynamoDB::GlobalTable
30 | Properties:
31 | AttributeDefinitions:
32 | - AttributeName: eventArn
33 | AttributeType: S
34 | - AttributeName: account
35 | AttributeType: S
36 | KeySchema:
37 | - AttributeName: eventArn
38 | KeyType: HASH
39 | - AttributeName: account
40 | KeyType: RANGE
41 | BillingMode: PAY_PER_REQUEST
42 | StreamSpecification:
43 | StreamViewType: NEW_IMAGE
44 | Replicas:
45 | - Region: !Sub "${AWS::Region}"
46 |
47 | HealthEventLambadDdbRole:
48 | Type: AWS::IAM::Role
49 | Properties:
50 | AssumeRolePolicyDocument:
51 | Version: '2012-10-17'
52 | Statement:
53 | - Effect: Allow
54 | Principal:
55 | Service: lambda.amazonaws.com
56 | Action: sts:AssumeRole
57 | Policies:
58 | - PolicyName: cloudwatch-logsAccess-Policy
59 | PolicyDocument:
60 | Version: '2012-10-17'
61 | Statement:
62 | - Effect: Allow
63 | Action:
64 | - logs:CreateLogGroup
65 | Resource: !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*"
66 | - Effect: Allow
67 | Action:
68 | - logs:CreateLogStream
69 | - logs:PutLogEvents
70 | Resource: !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/*"
71 | - PolicyName: AwshealtheventDDBAccess-Policy
72 | PolicyDocument:
73 | Version: '2012-10-17'
74 | Statement:
75 | - Effect: Allow
76 | Action:
77 | - dynamodb:PutItem
78 | Resource: !GetAtt HealthEventDynamoDB.Arn
79 | - PolicyName: AwshealtheventSendEventAccess-Policy
80 | PolicyDocument:
81 | Version: '2012-10-17'
82 | Statement:
83 | - Effect: Allow
84 | Action:
85 | - "events:PutEvents"
86 | Resource: !Sub "arn:${AWS::Partition}:events:${DataCollectionRegion}:${DataCollectionAccountID}:event-bus/${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}"
87 |
88 | HealthEventLambadDdb:
89 | Type: AWS::Lambda::Function
90 | Metadata:
91 | cfn_nag:
92 | rules_to_suppress:
93 | - id: W58
94 | reason: "Given AWSLambda ExecutionRole and allows Cloudwatch"
95 | Properties:
96 | Code:
97 | ZipFile: |
98 | import json
99 | import boto3
100 | import os
101 | # Initialize the DynamoDB client
102 | dynamodb = boto3.resource('dynamodb')
103 | table = dynamodb.Table(os.environ['DynamoDBName'])
104 |
105 | def lambda_handler(event, context):
106 | try:
107 | # Extract the data from the event
108 | payload = event['detail']
109 | event_data = {
110 | 'eventDescription': payload.get('eventDescription', [{'latestDescription': None}])[0]['latestDescription'],
111 | 'affectedEntities': ', '.join(entity['entityValue'] for entity in payload.get('affectedEntities', [])),
112 | 'account': event.get('account')
113 | }
114 | event_data.update((key, value) for key, value in payload.items() if key not in event_data)
115 | print(event_data)
116 |
117 | # Put the data into DynamoDB
118 | response = table.put_item(Item=event_data)
119 |
120 | # If successful, return the response
121 | return {
122 | 'statusCode': 200,
123 | 'body': json.dumps('Data inserted successfully.')
124 | }
125 | except Exception as e:
126 | # If there's an error, return the error message
127 | return {
128 | 'statusCode': 500,
129 | 'body': json.dumps(str(e))
130 | }
131 | Handler: index.lambda_handler
132 | Runtime: python3.11
133 | Timeout: 900
134 | ReservedConcurrentExecutions: 5
135 | Role: !GetAtt HealthEventLambadDdbRole.Arn
136 | Environment:
137 | Variables:
138 | DynamoDBName: !Ref HealthEventDynamoDB
139 |
140 | HealthtEventDataCollectionBusRule:
141 | Type: "AWS::Events::Rule"
142 | Properties:
143 | Description: "Event Health bus rule for aws.health events"
144 | EventBusName: !Sub ${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}
145 | EventPattern:
146 | source:
147 | - "heidi.health"
148 | - "aws.health"
149 | Targets:
150 | - Arn: !GetAtt HealthEventLambadDdb.Arn
151 | Id: "LambaasTarget"
152 |
153 | EventHealthLambdaForDDBPermissions:
154 | Type: "AWS::Lambda::Permission"
155 | Properties:
156 | Action: lambda:InvokeFunction
157 | FunctionName: !GetAtt HealthEventLambadDdb.Arn
158 | Principal: events.amazonaws.com
159 | SourceArn: !GetAtt HealthtEventDataCollectionBusRule.Arn
160 |
161 | apiGatewayRole:
162 | Type: AWS::IAM::Role
163 | Properties:
164 | AssumeRolePolicyDocument:
165 | Version: 2012-10-17
166 | Statement:
167 | - Sid: AllowApiGatewayServiceToAssumeRole
168 | Effect: Allow
169 | Action:
170 | - 'sts:AssumeRole'
171 | Principal:
172 | Service:
173 | - apigateway.amazonaws.com
174 | Policies:
175 | - PolicyName: dynamoDBAccess
176 | PolicyDocument:
177 | Version: '2012-10-17'
178 | Statement:
179 | - Effect: Allow
180 | Action:
181 | - dynamodb:GetItem
182 | Resource: !GetAtt HealthEventDynamoDB.Arn
183 | - PolicyName: ApiGatewayLogsPolicy
184 | PolicyDocument:
185 | Statement:
186 | - Effect: Allow
187 | Action:
188 | - logs:CreateLogGroup
189 | - logs:CreateLogStream
190 | - logs:DescribeLogGroups
191 | - logs:DescribeLogStreams
192 | - logs:PutLogEvents
193 | - logs:GetLogEvents
194 | - logs:FilterLogEvents
195 | Resource: !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*"
196 |
197 | ApiGatewayLogs:
198 | Type: AWS::Logs::LogGroup
199 | DeletionPolicy: Retain
200 | UpdateReplacePolicy: Retain
201 | Properties:
202 | LogGroupName: !Sub /aws/api-gateway/${apiGateway}
203 | RetentionInDays: 30
204 |
205 | APIGatewayAccountSettings:
206 | Type: AWS::ApiGateway::Account
207 | Properties:
208 | CloudWatchRoleArn: !GetAtt apiGatewayRole.Arn
209 |
210 | apiGateway:
211 | Type: AWS::ApiGateway::RestApi
212 | Properties:
213 | EndpointConfiguration:
214 | Types:
215 | - REGIONAL
216 | Name: !Sub HealthEventDetailUrl-${AWS::AccountId}-${AWS::Region}-api
217 | Description: (AWSHEIDI) for eventdetail Urls
218 | Policy: {
219 | "Version": "2012-10-17",
220 | "Statement": [
221 | {
222 | "Effect": "Deny",
223 | "Principal": "*",
224 | "Action": "execute-api:Invoke",
225 | "Resource": "execute-api:/*",
226 | "Condition": {
227 | "NotIpAddress": {
228 | "aws:SourceIp": !Ref AllowedIpRange
229 | }
230 | }
231 | },
232 | {
233 | "Effect": "Allow",
234 | "Principal": "*",
235 | "Action": "execute-api:Invoke",
236 | "Resource": "execute-api:/*"
237 | }
238 | ]
239 | }
240 |
241 | apiGatewayMethodResource:
242 | Type: AWS::ApiGateway::Resource
243 | Properties:
244 | RestApiId: !Ref apiGateway
245 | ParentId: !GetAtt apiGateway.RootResourceId
246 | PathPart: healthevent
247 |
248 | apiGatewayMethod:
249 | Type: AWS::ApiGateway::Method
250 | Properties:
251 | AuthorizationType: !Ref AuthorizationType
252 | HttpMethod: GET
253 | MethodResponses:
254 | - StatusCode: 200
255 | ResponseModels:
256 | text/html: Empty
257 | RequestParameters:
258 | method.request.querystring.eventArn: False
259 | method.request.querystring.account: False
260 | Integration:
261 | IntegrationHttpMethod: POST
262 | Type: AWS
263 | Credentials: !GetAtt apiGatewayRole.Arn
264 | Uri: !Sub arn:${AWS::Partition}:apigateway:${AWS::Region}:dynamodb:action/GetItem
265 | PassthroughBehavior: WHEN_NO_TEMPLATES
266 | RequestTemplates:
267 | application/json: !Sub
268 | |-
269 | {
270 | "TableName": "${HealthEventDynamoDB}",
271 | "Key":{
272 | "eventArn": {"S": "$util.escapeJavaScript($input.params().querystring.get("eventArn"))"},
273 | "account": {"S": "$util.escapeJavaScript($input.params().querystring.get("account"))"}
274 | }
275 | }
276 | IntegrationResponses:
277 | - StatusCode: 200
278 | ResponseTemplates:
279 | application/json:
280 | |-
281 |
282 | Event Detail
283 |
Service: $input.path('$.Item.service.S')
284 |
Account: $input.path('$.Item.account.S')
285 |
Region: $input.path('$.Item.eventRegion.S')
286 |
Affected Entities: $input.path('$.Item.affectedEntities.S')
287 |
Description: $input.json('$.Item.eventDescription.S').replaceAll("\\n","
")
288 |
289 | ResourceId: !Ref apiGatewayMethodResource
290 | RestApiId: !Ref apiGateway
291 |
292 | apiGatewayDeployment:
293 | Type: AWS::ApiGateway::Deployment
294 | DependsOn:
295 | - apiGatewayMethod
296 | Properties:
297 | RestApiId: !Ref apiGateway
298 | StageName: 'v1'
299 | StageDescription:
300 | AccessLogSetting:
301 | DestinationArn: !GetAtt ApiGatewayLogs.Arn
302 | Format: $context.requestId
303 |
304 | Outputs:
305 | EventDetailApiEndpoint:
306 | Description: "API Gateway endpoint URL for Prod stage for Product api"
307 | Value: !Sub "https://${apiGateway}.execute-api.${AWS::Region}.amazonaws.com/v1/healthevent?"
308 |
--------------------------------------------------------------------------------
/src/HealthModule/HealthModuleTaginfoSetup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: '2010-09-09'
3 | Description: Template to pull tagging data
4 |
5 | Parameters:
6 | DataCollectionAccountID:
7 | Type: String
8 | Description: AccountId of where the collector is deployed
9 | DataCollectionRegion:
10 | Type: String
11 | Description: Account Region of where the collector is deployed
12 | ResourcePrefix:
13 | Type: String
14 | Description: This prefix will be placed in front of resources created where required. Note you may wish to add a dash at the end to make more readable
15 | Default: "heidi-"
16 | ResourceExplorerViewArn:
17 | Type: String
18 | Description: Provide Resource Explorer View Arn
19 |
20 | Resources:
21 | HealthModuleResourceExplorerRole:
22 | Type: AWS::IAM::Role
23 | Properties:
24 | AssumeRolePolicyDocument:
25 | Version: '2012-10-17'
26 | Statement:
27 | - Effect: Allow
28 | Principal:
29 | Service: lambda.amazonaws.com
30 | Action: sts:AssumeRole
31 | Policies:
32 | - PolicyName: cloudwatch-logsAccess-Policy
33 | PolicyDocument:
34 | Version: '2012-10-17'
35 | Statement:
36 | - Effect: Allow
37 | Action:
38 | - logs:CreateLogGroup
39 | Resource: !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*"
40 | - Effect: Allow
41 | Action:
42 | - logs:CreateLogStream
43 | - logs:PutLogEvents
44 | Resource: !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/*"
45 | - PolicyName: ResourceExplorer-Policy
46 | PolicyDocument:
47 | Version: '2012-10-17'
48 | Statement:
49 | - Effect: Allow
50 | Action:
51 | - "resource-explorer-2:search"
52 | Resource: !Ref ResourceExplorerViewArn
53 | - PolicyName: Putevents-Policy
54 | PolicyDocument:
55 | Version: '2012-10-17'
56 | Statement:
57 | - Effect: Allow
58 | Action:
59 | - "events:PutEvents"
60 | Resource: !Sub "arn:${AWS::Partition}:events:${DataCollectionRegion}:${DataCollectionAccountID}:event-bus/${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}"
61 |
62 | HealthModuleResourceExploreLambda:
63 | Type: AWS::Lambda::Function
64 | Metadata:
65 | cfn_nag:
66 | rules_to_suppress:
67 | - id: W58
68 | reason: "Given AWSLambda ExecutionRole and allows Cloudwatch"
69 | Properties:
70 | Code:
71 | ZipFile: |
72 | import json
73 | import boto3
74 | import os
75 | import re
76 |
77 | def lambda_handler(event, context):
78 | try:
79 | # Extract the data from the event
80 | payload = event['detail']
81 | for entity in payload.get('affectedEntities', []):
82 | entity_value = entity.get('entityValue', '')
83 | if re.match(r'^arn:.*', entity_value):
84 | resource_explorer(entity_value)
85 | except Exception as e:
86 | print(e)
87 |
88 | def resource_explorer(entityValue):
89 | try:
90 | resource_arn = os.environ['ResourceExplorerViewArn']
91 | region = resource_arn.split(":")[3]
92 | resource_explorer = boto3.client('resource-explorer-2',region)
93 | query_string = f"id:{entityValue}"
94 | view_arn = os.environ['ResourceExplorerViewArn']
95 | response = resource_explorer.search(QueryString=query_string, ViewArn=view_arn)
96 | tag_data = {}
97 | for resource in response.get('Resources', []):
98 | arn = resource.get('Arn')
99 | tags = [{'entityKey': item['Key'], 'entityValue': item['Value']} for prop in resource.get('Properties', []) for item in prop.get('Data', [])]
100 | tag_data = {'entityArn': arn, 'tags': tags}
101 | send_event(tag_data) if tags else print("No resources found")
102 | except Exception as e:
103 | print(e)
104 |
105 | def send_event(tag_data):
106 | try:
107 | eventbridge_client = boto3.client('events')
108 | response = eventbridge_client.put_events(
109 | Entries=[{
110 | 'Source': 'heidi.taginfo',
111 | 'DetailType': 'Heidi tags from resource explorer',
112 | 'Detail': json.dumps(tag_data),
113 | 'EventBusName': os.environ['EventBusName']
114 | }]
115 | )
116 | print(response)
117 | except Exception as e:
118 | print(response)
119 | Handler: index.lambda_handler
120 | Runtime: python3.11
121 | Timeout: 900
122 | ReservedConcurrentExecutions: 5
123 | Role: !GetAtt HealthModuleResourceExplorerRole.Arn
124 | Environment:
125 | Variables:
126 | ResourceExplorerViewArn: !Ref ResourceExplorerViewArn
127 | EventBusName: !Sub "${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}"
128 |
129 | HealthModuleResourceExploreRule:
130 | Type: "AWS::Events::Rule"
131 | Properties:
132 | Description: "Event Health bus rule for aws.health events"
133 | EventBusName: !Sub ${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}
134 | EventPattern:
135 | source:
136 | - "heidi.health"
137 | - "aws.health"
138 | Targets:
139 | - Arn: !GetAtt HealthModuleResourceExploreLambda.Arn
140 | Id: "LambaasTarget"
141 |
142 | HealthModuleResourceExploreLambdaPermissions:
143 | Type: "AWS::Lambda::Permission"
144 | Properties:
145 | Action: lambda:InvokeFunction
146 | FunctionName: !GetAtt HealthModuleResourceExploreLambda.Arn
147 | Principal: events.amazonaws.com
148 | SourceArn: !GetAtt HealthModuleResourceExploreRule.Arn
--------------------------------------------------------------------------------
/src/HealthModule/MockHealthEvent.json:
--------------------------------------------------------------------------------
1 | Send a mock event to test slack/teams integration. Go to sns and publish following message.
2 |
3 | {
4 | "version":"0",
5 | "id":"5a527972-98c1-9ddd-2107-49e6b72268d9",
6 | "detail-type":"AWS Health Event",
7 | "source":"aws.health",
8 | "account":"123456789012",
9 | "time":"2023-08-29T03:30:00Z",
10 | "region":"us-west-2",
11 | "resources":[
12 |
13 | ],
14 | "detail":{
15 | "eventArn":"arn:aws:health:us-west-2::event/EC2/AWS_EC2_MAINTENANCE_SCHEDULED/AWS_EC2_MAINTENANCE_SCHEDULED_1693278720289",
16 | "service":"EC2",
17 | "eventScopeCode":"ACCOUNT_SPECIFIC",
18 | "communicationId":"32157c62a5a64a33ec5445c5c77f941128b345fa1fe98bbd8ffd7a4a708323bf",
19 | "lastUpdatedTime":"Tue, 29 Aug 2023 03:13:27 GMT",
20 | "statusCode":"upcoming",
21 | "eventRegion":"us-west-2",
22 | "eventTypeCode":"AWS_EC2_MAINTENANCE_SCHEDULED",
23 | "eventTypeCategory":"accountnotification",
24 | "startTime":"Tue, 29 Aug 2023 03:30:00 GMT",
25 | "endTime":"Tue, 29 Aug 2023 04:30:00 GMT",
26 | "eventDescription":[
27 | {
28 | "language":"en_US",
29 | "latestDescription":"This is a test AWS Health Event AWS_EC2_MAINTENANCE_SCHEDULED"
30 | }
31 | ]
32 | }
33 | }
34 |
35 | Send a mock event to test Control Account setup
36 |
37 | {
38 | "eventArn":"arn:aws:health:us-west-2::event/EC2/PLANNED_LIFECYCLE_EVENT/AWS_EC2_MAINTENANCE_SCHEDULED_1693278720289",
39 | "service":"EC2",
40 | "eventScopeCode":"ACCOUNT_SPECIFIC",
41 | "communicationId":"32157c62a5a64a33ec5445c5c77f941128b345fa1fe98bbd8ffd7a4a708323bf",
42 | "lastUpdatedTime":"Tue, 29 Aug 2023 03:13:27 GMT",
43 | "statusCode":"upcoming",
44 | "eventRegion":"us-west-2",
45 | "eventTypeCode":"AWS_EC2_MAINTENANCE_SCHEDULED",
46 | "eventTypeCategory":"scheduledChange",
47 | "startTime":"Tue, 29 Aug 2023 03:30:00 GMT",
48 | "eventDescription":[
49 | {
50 | "language":"en_US",
51 | "latestDescription":"This is a test AWS Health Event AWS_EC2_MAINTENANCE_SCHEDULED"
52 | }
53 | ],
54 | "affectedEntities":[
55 | {"entityValue":"arn:ec2-1-101002929","lastupdatedTime": "Thu, 26 Jan 2023 19:01:55 GMT", "status": "PENDING","tags":{}},
56 | {"entityValue":"arn:ec2-1-101002930","lastupdatedTime": "Thu, 26 Jan 2023 19:05:12 GMT", "status": "RESOLVED","tags":{}},
57 | {"entityValue":"arn:ec2-1-101002931","lastupdatedTime": "Thu, 26 Jan 2023 19:07:13 GMT", "status": "UPCOMING","tags":{}},
58 | {"entityValue":"arn:ec2-1-101002932","lastupdatedTime": "Thu, 26 Jan 2023 19:10:59 GMT", "status": "RESOLVED","tags":{}}
59 | ]
60 | }
--------------------------------------------------------------------------------
/src/HealthModule/OrgHealthEventBackFill.Yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: '2010-09-09'
3 | Description: Health Module for Data Collection Setup
4 |
5 | Parameters:
6 | DataCollectionAccountID:
7 | Type: String
8 | Description: AccountId of where the collector is deployed
9 | DataCollectionRegion:
10 | Type: String
11 | Description: Account Region of where the collector is deployed
12 | ResourcePrefix:
13 | Type: String
14 | Description: This prefix will be placed in front of resources created where required. Note you may wish to add a dash at the end to make more readable
15 | Default: "heidi-"
16 |
17 | #This is one time execution of lamnda to backfill the events
18 | Resources:
19 | LambdaBackfillEventsRole:
20 | Type: AWS::IAM::Role
21 | Metadata:
22 | cfn_nag:
23 | rules_to_suppress:
24 | - id: W11
25 | reason: "Health API required Resource *"
26 | Properties:
27 | AssumeRolePolicyDocument:
28 | Statement:
29 | - Action:
30 | - sts:AssumeRole
31 | Effect: Allow
32 | Principal:
33 | Service:
34 | - lambda.amazonaws.com
35 | ManagedPolicyArns:
36 | - arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
37 | Path: /
38 | Policies:
39 | - PolicyName: Healthapiaccess
40 | PolicyDocument:
41 | Version: '2012-10-17'
42 | Statement:
43 | - Effect: Allow
44 | Action:
45 | - health:DescribeEvents
46 | - health:DescribeEventDetails
47 | - health:DescribeAffectedEntities
48 | Resource: "*"
49 | - PolicyName: PutEventtoDataCollectionBus-access
50 | PolicyDocument:
51 | Version: '2012-10-17'
52 | Statement:
53 | - Effect: Allow
54 | Action:
55 | - "events:PutEvents"
56 | Resource: !Sub "arn:${AWS::Partition}:events:${DataCollectionRegion}:${DataCollectionAccountID}:event-bus/${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}"
57 |
58 | LambdaBackfillEvents:
59 | Type: AWS::Lambda::Function
60 | Metadata:
61 | cfn_nag:
62 | rules_to_suppress:
63 | - id: W58
64 | reason: "Given AWSLambdaBasicExecutionRole and allows Cloudwatch"
65 | Properties:
66 | Code:
67 | ZipFile: |
68 | import boto3
69 | import json
70 | import os
71 | from datetime import datetime
72 |
73 | # Initialize clients outside the handler to take advantage of connection reuse
74 | health_client = boto3.client('health', 'us-east-1')
75 | eventbridge_client = boto3.client('events')
76 | EventBusArnVal = os.environ['EventBusArnVal']
77 |
78 | def get_events():
79 | events = []
80 | next_token = None
81 | try:
82 | while True:
83 | kwargs = {}
84 | if next_token:
85 | kwargs['nextToken'] = next_token
86 | events_response = health_client.describe_events(filter={}, **kwargs)
87 | events += events_response['events']
88 | next_token = events_response.get('nextToken')
89 | if not next_token:
90 | break
91 | return events
92 | except Exception as e:
93 | print(f"Error fetching events: {e}")
94 | return []
95 |
96 | def get_event_data(event_details, event_description):
97 | event_data = {
98 | 'eventArn': event_details['arn'],
99 | 'eventRegion': event_details.get('region', ''),
100 | 'eventTypeCode': event_details.get('eventTypeCode', ''),
101 | 'startTime': event_details['startTime'].strftime('%a, %d %b %Y %H:%M:%S GMT'),
102 | 'eventDescription': [{'latestDescription': event_description['latestDescription']}]
103 | }
104 | if 'endTime' in event_details:
105 | event_data['endTime'] = event_details['endTime'].strftime('%a, %d %b %Y %H:%M:%S GMT')
106 | if 'lastUpdatedTime' in event_details:
107 | event_data['lastUpdatedTime'] = event_details['lastUpdatedTime'].strftime('%a, %d %b %Y %H:%M:%S GMT')
108 |
109 | event_data.update((key, value) for key, value in event_details.items() if key not in event_data)
110 |
111 | return event_data
112 |
113 | def send_event_default_bus(event_data, event_bus_arn):
114 | try:
115 | eventbridge_client.put_events(
116 | Entries=[
117 | {
118 | 'Source': 'heidi.health',
119 | 'DetailType': 'awshealthtest',
120 | 'Detail': json.dumps(event_data),
121 | 'EventBusName': event_bus_arn
122 | }
123 | ]
124 | )
125 | except Exception as e:
126 | print(f"Error sending event to EventBridge: {e}")
127 |
128 | def backfill():
129 | events = get_events()
130 | for awsevent in events:
131 | try:
132 | event_details_response = health_client.describe_event_details(eventArns=[awsevent['arn']])
133 | event_affected_response = health_client.describe_affected_entities(filter={'eventArns': [awsevent['arn']]})
134 | entities = event_affected_response['entities']
135 | affected_entities = [{'entityValue': entity['entityValue'], 'status': entity.get('statusCode', 'UNKNOWN')} for entity in entities]
136 |
137 | event_details = event_details_response['successfulSet'][0]['event'] if event_details_response.get('successfulSet') else None
138 | if not event_details:
139 | continue
140 |
141 | event_details['affectedEntities'] = affected_entities
142 | event_description = event_details_response['successfulSet'][0]['eventDescription']
143 | event_data = get_event_data(event_details, event_description)
144 | send_event_default_bus(event_data, EventBusArnVal)
145 | except Exception as e:
146 | print(f"Error processing event {awsevent['arn']}: {e}")
147 |
148 | def lambda_handler(event, context):
149 | backfill()
150 | return {
151 | 'statusCode': 200,
152 | 'body': json.dumps('Backfill process completed successfully')
153 | }
154 |
155 | Handler: index.lambda_handler
156 | Runtime: python3.10
157 | ReservedConcurrentExecutions: 5
158 | Timeout: 900
159 | Role: !GetAtt LambdaBackfillEventsRole.Arn
160 | Environment:
161 | Variables:
162 | EventBusArnVal: !Sub "arn:${AWS::Partition}:events:${DataCollectionRegion}:${DataCollectionAccountID}:event-bus/${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}"
163 |
164 | # Permissioned for EB to trigger Heidi Backfill lambda.
165 | HeidiBackfillLambdaPermissions:
166 | Type: "AWS::Lambda::Permission"
167 | Properties:
168 | Action: lambda:InvokeFunction
169 | FunctionName: !GetAtt LambdaBackfillEvents.Arn
170 | Principal: events.amazonaws.com
171 | SourceArn: !GetAtt EventToTriggerBackfillLambda.Arn
172 |
173 | # Trigger Lambda when its created successfully.
174 | EventToTriggerBackfillLambda:
175 | Type: "AWS::Events::Rule"
176 | Properties:
177 | Description: "EventBridge default rule to trigger Heidi backfill Lambda"
178 | EventBusName: default
179 | EventPattern:
180 | source:
181 | - "aws.cloudformation"
182 | detail:
183 | logical-resource-id:
184 | - HeidiBackfillLambdaPermissions
185 | status-details:
186 | status:
187 | - CREATE_COMPLETE
188 | Targets:
189 | - Arn: !GetAtt LambdaBackfillEvents.Arn
190 | Id: EventToTriggerBackfillLambda
191 |
--------------------------------------------------------------------------------
/src/NotificationModule/NotificationModule.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AWSTemplateFormatVersion: '2010-09-09'
3 | Description: (AwsHealthEvents) Stack for notification.
4 |
5 | Parameters:
6 | DataCollectionAccountID:
7 | Type: String
8 | Description: AccountId of where the collector is deployed
9 | ResourcePrefix:
10 | Type: String
11 | Description: This prefix will be placed in front of resources created where required. Note you may wish to add a dash at the end to make more readable
12 | Default: "heidi-"
13 | SlackChannelId:
14 | Type: String
15 | Default: "na"
16 | Description: SlackChannelId.
17 | SlackWorkspaceId:
18 | Type: String
19 | Default: "na"
20 | Description: SlackWorkspaceId.
21 | TeamId:
22 | Type: String
23 | Description: The ID of the Microsoft Teams team to configure.
24 | Default: "na"
25 | TeamsTenantId:
26 | Type: String
27 | Description: The ID of the Microsoft Teams tenant.
28 | Default: "na"
29 | TeamsChannelId:
30 | Type: String
31 | Description: The ID of the Microsoft Teams channel to configure.
32 | Default: "na"
33 |
34 | Conditions:
35 | ChatbotTeamsChannelConfiguration:
36 | !And
37 | - !Not [!Equals [!Ref TeamId, "na"]]
38 | - !Not [!Equals [!Ref TeamsTenantId, "na"]]
39 | - !Not [!Equals [!Ref TeamsChannelId, "na"]]
40 | ChatbotSlackChannelConfiguration:
41 | !And
42 | - !Not [!Equals [!Ref SlackChannelId, "na"]]
43 | - !Not [!Equals [!Ref SlackWorkspaceId, "na"]]
44 |
45 | Resources:
46 | HealthEventSNSTopic:
47 | Type: AWS::SNS::Topic
48 |
49 | HealthEventSNSTopicPolicy:
50 | Type: AWS::SNS::TopicPolicy
51 | Properties:
52 | PolicyDocument:
53 | Statement:
54 | - Sid: "DefaultSNSTopicPolicy"
55 | Effect: Allow
56 | Principal:
57 | Service: "chatbot.amazonaws.com"
58 | Action:
59 | - "SNS:GetTopicAttributes"
60 | - "SNS:SetTopicAttributes"
61 | - "SNS:AddPermission"
62 | - "SNS:RemovePermission"
63 | - "SNS:DeleteTopic"
64 | - "SNS:Subscribe"
65 | - "SNS:ListSubscriptionsByTopic"
66 | - "SNS:Publish"
67 | Resource: !Ref HealthEventSNSTopic
68 | Condition:
69 | StringEquals:
70 | "AWS:SourceOwner": !Sub "${AWS::AccountId}"
71 | - Sid: "EBPolicy"
72 | Effect: Allow
73 | Principal:
74 | Service: "events.amazonaws.com"
75 | Action: "sns:Publish"
76 | Resource: !Ref HealthEventSNSTopic
77 | Topics:
78 | - !Ref HealthEventSNSTopic
79 |
80 | HealthEventNotificationRule:
81 | Type: "AWS::Events::Rule"
82 | Properties:
83 | Description: "Event Health bus rule for aws.health events"
84 | EventBusName: !Sub ${ResourcePrefix}DataCollectionBus-${DataCollectionAccountID}
85 | EventPattern:
86 | source:
87 | - "aws.health"
88 | - "awshealthtest"
89 | Targets:
90 | - Arn: !Ref HealthEventSNSTopic
91 | Id: "snsAsTarget"
92 |
93 | ChatbotRole:
94 | Type: AWS::IAM::Role
95 | Properties:
96 | AssumeRolePolicyDocument:
97 | Version: '2012-10-17'
98 | Statement:
99 | - Effect: Allow
100 | Principal:
101 | Service: chatbot.amazonaws.com
102 | Action: sts:AssumeRole
103 | Policies:
104 | - PolicyName: cloudwatch-logsAccess-Policy
105 | PolicyDocument:
106 | Version: '2012-10-17'
107 | Statement:
108 | - Effect: Allow
109 | Action:
110 | - logs:CreateLogGroup
111 | Resource: !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:*"
112 | - Effect: Allow
113 | Action:
114 | - logs:CreateLogStream
115 | - logs:PutLogEvents
116 | Resource: !Sub "arn:${AWS::Partition}:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/chatbot/*"
117 |
118 | ChatbotSlackChannelConfiguration:
119 | Condition: ChatbotSlackChannelConfiguration
120 | Type: AWS::Chatbot::SlackChannelConfiguration
121 | Properties:
122 | ConfigurationName: !Sub ${ResourcePrefix}ChatbotSlack
123 | GuardrailPolicies:
124 | - arn:aws:iam::aws:policy/ReadOnlyAccess
125 | IamRoleArn: !GetAtt ChatbotRole.Arn
126 | LoggingLevel: INFO
127 | SlackWorkspaceId: !Ref SlackWorkspaceId
128 | SlackChannelId: !Ref SlackChannelId
129 | SnsTopicArns:
130 | - !Ref HealthEventSNSTopic
131 |
132 | ChatbotTeamsChannelConfiguration:
133 | Condition: ChatbotTeamsChannelConfiguration
134 | Type: AWS::Chatbot::MicrosoftTeamsChannelConfiguration
135 | Properties:
136 | ConfigurationName: ChatbotTeams
137 | GuardrailPolicies:
138 | - arn:aws:iam::aws:policy/ReadOnlyAccess
139 | IamRoleArn: !GetAtt ChatbotRole.Arn
140 | TeamsChannelId: !Ref TeamsChannelId
141 | TeamsTenantId: !Ref TeamsTenantId
142 | TeamId: !Ref TeamId
143 | SnsTopicArns:
144 | - !Ref HealthEventSNSTopic
145 |
146 |
--------------------------------------------------------------------------------
/src/ReferenceOds/AccountsInfo/Organization_accounts_information_sample.csv:
--------------------------------------------------------------------------------
1 | Account ID,ARN,Email,Name,Status,Joined method,Joined timestamp,Tag
2 | "646279148361","arn:aws:organizations::123456789123:account/o-xxxxxxxxxx/123456789012","Production@example.com","ProductionAccount","ACTIVE","CREATED",Wed Jan 27 2021 10:36:17 GMT-0500 (Eastern Standard Time),"Production"
3 | "234567890123","arn:aws:organizations::123456789123:account/o-xxxxxxxxxx/234567890123","Operations@example.com","OperationsAccount","ACTIVE","CREATED",Wed Jan 27 2021 10:36:17 GMT-0500 (Eastern Standard Time),"Production"
4 | "345678901234","arn:aws:organizations::345678901234:account/o-xxxxxxxxxx/345678901234","Development@example.com","DevelopmentAccount","ACTIVE","CREATED",Wed Jan 27 2021 10:36:17 GMT-0500 (Eastern Standard Time),"Development"
5 | "456789012345","arn:aws:organizations::456789012345:account/o-xxxxxxxxxx/456789012345","Security@example.com","SecurityAccount","ACTIVE","CREATED",Wed Jan 27 2021 10:36:17 GMT-0500 (Eastern Standard Time),"Security"
--------------------------------------------------------------------------------
/src/Setup/OneClickSetup.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.dont_write_bytecode = True
3 | from utils import DataCollectionSetup
4 | from utils import MemberSetup
5 |
6 | #Print pretty Box
7 | def print_boxed_text(text):
8 | lines = text.strip().split('\n')
9 | max_length = max(len(line) for line in lines)
10 |
11 | print('═' * (max_length + 2))
12 | for line in lines:
13 | print(f' {line.ljust(max_length)} ')
14 | print('═' * (max_length + 2))
15 |
16 | def get_user_choice():
17 | #Get user choice to get deployment type
18 | options = {'1': 'DataCollection Setup', '2': 'Member Setup'}
19 | while True:
20 | print("Select Deployment option:")
21 | for key, value in options.items():
22 | print(f"{key}. {value}")
23 | choice = input("Enter the number of your choice: ")
24 | if choice in options:
25 | return options[choice]
26 | else:
27 | print("Invalid option. Please choose 1, 2")
28 |
29 | def main():
30 | selected_option = get_user_choice()
31 | if selected_option == 'DataCollection Setup':
32 | print_boxed_text("You selected: DataCollection Setup")
33 | DataCollectionSetup.setup()
34 |
35 | elif selected_option == 'Member Setup':
36 | print_boxed_text("You selected: Member Setup")
37 | MemberSetup.setup()
38 |
39 | if __name__ == "__main__":
40 | main()
41 |
--------------------------------------------------------------------------------
/src/Setup/utils/DataCollectionSetup.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import subprocess
3 | import datetime
4 | from botocore.exceptions import ClientError
5 | import os
6 |
7 | # Get Tag
8 | def tag():
9 | # Get the default AWS region from the current session
10 | tag_key = input("Enter the key to tag the stack (Hit enter to use default: 'App'): ") or "App"
11 | tag_value = input(f"Enter the value for '{tag_key}' (Hit enter to use default: 'Heidi'): ") or "Heidi"
12 | return tag_key, tag_value
13 |
14 |
15 | # Call the tag function to get the tag key and value
16 | tag_key, tag_value = tag()
17 |
18 |
19 | #Get Current Region
20 | def get_default_region():
21 | # Get the default AWS region from the current session
22 | session = boto3.Session()
23 | region = input(f"Enter datacollection region (Hit enter to use default: {session.region_name}): ") or session.region_name
24 | MemberRegionHealth = input(f"Enter member regions where you wish to receive events with comma seperated: ")
25 | return region, MemberRegionHealth
26 |
27 | #Get Current Account ID
28 | def get_account_id():
29 | # Get the AWS account ID for Unique names
30 | sts_client = boto3.client("sts")
31 | account_id = sts_client.get_caller_identity().get("Account")
32 | return account_id
33 |
34 | #Get current AWS Organization ID
35 | def get_organization_details():
36 | # Get the ID of the AWS organization for event bus
37 | AdditionalOrgs = ''
38 | org_client = boto3.client('organizations')
39 | OrgID = org_client.describe_organization()['Organization']['Id']
40 | AdditionalOrgsRequired = ask_yes_no(f"You will get events from OrganizationId {OrgID}. Do you want to add additional Payers/Organization Ids")
41 | if AdditionalOrgsRequired:
42 | AdditionalOrgs = input(f"Enter organizations IDs with comma seperated: ")
43 | POrgID = f"{OrgID},{AdditionalOrgs}"
44 | else:
45 | POrgID = OrgID
46 | print(f"OrgID included {POrgID}")
47 | return POrgID
48 |
49 | #Create or update user with bucket KMS
50 | def create_or_get_s3_bucket(account_id, region):
51 | #create bucket or upload file if bucket is supplied by user
52 | bucket_name = input(f"Enter S3 bucket name for Primary Region (Hit enter to use default: awseventhealth-{account_id}-{region}): ") or f"awseventhealth-{account_id}-{region}"
53 | try:
54 | s3_client = boto3.client('s3', region_name=region)
55 | s3_client.head_bucket(Bucket=bucket_name)
56 | response = s3_client.get_bucket_encryption(Bucket=bucket_name)
57 | encryption = response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm']
58 | if encryption != "AES256":
59 | try:
60 | bucketkmsarn = response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['KMSMasterKeyID']
61 | print(f"Skip Creating: S3 bucket {bucket_name} exists and encrypted with kms {bucketkmsarn}")
62 | except KeyError:
63 | bucketkmsarn = input(f"Enter kms Key Arn for {bucket_name}: ")
64 | else:
65 | bucketkmsarn = "na"
66 | print(f"Skip Creating: S3 bucket {bucket_name} already exists")
67 | except ClientError as e:
68 | if region == 'us-east-1':
69 | s3_client.create_bucket(Bucket=bucket_name)
70 | bucketkmsarn = "na"
71 | else:
72 | location = {'LocationConstraint': region}
73 | s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=location)
74 | bucketkmsarn = "na"
75 | s3_client.get_waiter("bucket_exists").wait(Bucket=bucket_name)
76 | print(f"S3 bucket {bucket_name} has been created")
77 |
78 | # Add tags to the newly created bucket
79 | tagging = {
80 | 'TagSet': [{'Key': tag_key, 'Value': tag_value},]}
81 |
82 | s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tagging)
83 | print(f"Tags added to bucket {bucket_name}")
84 |
85 | return bucket_name, bucketkmsarn
86 |
87 | #Upload CFN and Metadatafiles
88 | def sync_cfnfiles(bucket_name):
89 | #Sync cloudformation and metadata files
90 | try:
91 | aws_sync_command = f"aws s3 sync ../../src/ s3://{bucket_name}/DataCollection-metadata"
92 | subprocess.call(aws_sync_command.split())
93 | except ClientError as e:
94 | print("Error while syncing S3. Check if deployer role has required S3 and KMS permissions.")
95 | exit()
96 |
97 | #Get QuickSight Author User
98 | def get_quicksight_user(account_id, qsregion):
99 | #get quicksight user. ES user can have multiplenamespaces
100 | try:
101 | quicksight_client = boto3.client('quicksight', region_name=qsregion)
102 | response = quicksight_client.list_namespaces(AwsAccountId=account_id)
103 | namespaces = [namespace['Name'] for namespace in response['Namespaces']]
104 | qsusernames = []
105 | except:
106 | print("Error while listing namespaces. Check if QuickSight is an enterprise plan.")
107 | exit()
108 | try:
109 | for namespace in namespaces:
110 | response = quicksight_client.list_users(AwsAccountId=account_id, Namespace=namespace, MaxResults=100)
111 | qsusernames.extend([user['Arn'] for user in response['UserList']])
112 | except ClientError as q:
113 | print("Wrong QuickSight Identity region.")
114 | print(q)
115 | exit()
116 | print("\nAvailable QuickSight Users")
117 | try:
118 | for i, qsusername in enumerate(qsusernames, 1):
119 | print("{}. {}".format(i, qsusername))
120 | print()
121 | while True:
122 | quicksight_number = input("Enter the number corresponding to the QuickSight username from the list: ")
123 | quicksight_user = qsusernames[int(quicksight_number) - 1] if quicksight_number.isdigit() and 1 <= int(quicksight_number) <= len(qsusernames) else None
124 | if quicksight_user:
125 | return quicksight_user
126 | else:
127 | print("Invalid Option")
128 | except ClientError as q:
129 | print("Something went wrong, Check Quicksight settings")
130 |
131 | #Get yes or no for modules
132 | def ask_yes_no(prompt):
133 | while True:
134 | user_input = input(f"{prompt} (yes/no): ").lower()
135 | if user_input == 'yes':
136 | return True
137 | elif user_input == 'no':
138 | return False
139 | else:
140 | print("Invalid input. Please enter 'yes' or 'no'.")
141 | #Print pretty Box
142 | def print_boxed_text(text):
143 | lines = text.strip().split('\n')
144 | max_length = max(len(line) for line in lines)
145 |
146 | print('═' * (max_length + 2))
147 | for line in lines:
148 | print(f' {line.ljust(max_length)} ')
149 | print('═' * (max_length + 2))
150 |
151 | def SendMockEvent():
152 | try:
153 | aws_account_id = get_account_id()
154 | current_time = datetime.datetime.now()
155 | support_client = boto3.client('support','us-east-1')
156 | event_start_time = (current_time + datetime.timedelta(hours=12)).strftime('%Y-%m-%d %H:%M UTC')
157 | event_end_time = (current_time + datetime.timedelta(hours=24)).strftime('%Y-%m-%d %H:%M UTC')
158 | communication_body = f"Hello \nCan you please send a mock PHD event to this account? If eventStart time is passed, please pick any random start time, its just a test.\nAccountId: {aws_account_id}\nEvent Region: us-east-1\nEvent Start Time: {event_start_time}\nEvent End Time: {event_end_time}\nEvent Category: EC2 Service Event"
159 |
160 | support_client.create_case(
161 | subject=f"Heidi mock event request for {aws_account_id}",
162 | serviceCode="aws-health",
163 | severityCode="low",
164 | categoryCode="general-guidance",
165 | communicationBody=communication_body)
166 | except Exception as e:
167 | print(e)
168 |
169 | #Save Input to file for future use
170 | def save_output_to_file(output):
171 | with open('utils/ParametersDataCollection.txt', 'w') as file:
172 | file.write(output + '\n')
173 |
174 | #deploy stack
175 | def deploy_stack(command):
176 | try:
177 | subprocess.call(command, shell=True)
178 | except Exception as e:
179 | print("An error occurred:", e)
180 |
181 | #User Input Data
182 | def get_user_input():
183 | SlackChannelId = "na"
184 | SlackWorkspaceId = "na"
185 | TeamId = "na"
186 | TeamsTenantId = "na"
187 | TeamsChannelId = "na"
188 | qsregion ="na"
189 | QuickSightAnalysisAuthor = "na"
190 | AthenaResultBucket = "na"
191 | AthenaBucketKmsArn ="na"
192 |
193 | region, MemberRegionHealth = get_default_region()
194 | account_id = get_account_id()
195 | AWSOrganizationID = get_organization_details()
196 | DataCollectionBucket, DataCollectionBucketKmsArn = create_or_get_s3_bucket(account_id, region)
197 |
198 | ResourcePrefix = input("Enter ResourcePrefix (Must be in lowercase), Hit enter to use default (heidi-): ") or "heidi-"
199 | ResourcePrefix = ResourcePrefix.lower()
200 | print_boxed_text("Module Selection")
201 | EnableHealthModule = ask_yes_no("Do you want to enable the AWS Health Module(HEIDI)?")
202 | if EnableHealthModule:
203 | qsregion = input(f"Enter QuickSight Identity Region, Hit enter to use default {region}: ") or region
204 | QuickSightAnalysisAuthor = get_quicksight_user(account_id, qsregion)
205 | AthenaResultBucket = input("Enter AthenaResultBucket, Hit enter to use default (aws-athena-query-results-*): ") or "aws-athena-query-results-*"
206 | AthenaBucketKmsArn = input("Enter AthenaBucketKmsArn, Hit enter to use default (na): ") or "na"
207 | print()
208 | EnableNotificationModule = ask_yes_no("Do you want to enable the Notification Module?")
209 | if EnableNotificationModule:
210 | print_boxed_text("Notification Module can setup notification for MS Teams and/Or slack")
211 | EnableSlack = ask_yes_no("Do you want to enable the Notification for Slack Channel?")
212 | if EnableSlack:
213 | SlackChannelId = input(" Provide Slack Channel Id (Note: Slack Channel must be private and you must invite aws@ to the channel): ") or "na"
214 | SlackWorkspaceId = input(" Provide Workspace ID: ") or "na"
215 | EnableTeams = ask_yes_no("Do you want to enable the Notification for Teams Channel?")
216 | if EnableTeams:
217 | TeamId = input(" Provide TeamId: ") or "na"
218 | TeamsTenantId = input(" Provide TeamsTenantId: ") or "na"
219 | TeamsChannelId = input(" Provide TeamsChannelId: ") or "na"
220 | print()
221 | TestSetupViaSupportCase = ask_yes_no("Do you want to test end to end setup with Mock Health Event(via support case)?")
222 | if TestSetupViaSupportCase:
223 | SendMockEvent()
224 |
225 | return (
226 | "yes" if EnableHealthModule else "no",
227 | "yes" if EnableNotificationModule else "no",
228 | region, account_id, AWSOrganizationID,
229 | DataCollectionBucket, DataCollectionBucketKmsArn, QuickSightAnalysisAuthor,
230 | AthenaResultBucket, AthenaBucketKmsArn,ResourcePrefix,
231 | SlackChannelId, SlackWorkspaceId, TeamId, TeamsTenantId, TeamsChannelId, qsregion, #qs region not required in parameter
232 | MemberRegionHealth
233 | )
234 |
235 | def save_variables_to_file(variables): #last variable is variables[20], increment from here and also update this comment.
236 | output = "\n".join([
237 | f"#Deploy AWS Health Events Intelligence Dashboards and Insights (HEIDI)\nEnableHealthModule: {variables[0]}\n",
238 | f"#Deploy Notification module\nEnableNotificationModule: {variables[1]}\n",
239 | f"#Data Collection Region\nDataCollectionRegion: {variables[2]}\n",
240 | f"#Data Collection Account\nDataCollectionAccountID: {variables[3]}\n",
241 | f"#Member Regions \nMemberRegionHealth: {variables[17]}\n",
242 | f"#AWS Organization ID which can send events to Data Collection Account\nAWSOrganizationID: {variables[4]}\n",
243 | f"#Bucket which would collection data from various members\nDataCollectionBucket: {variables[5]}\n",
244 | f"#Update here if Collection bucket is encrypted with KMS otherwise na\nDataCollectionBucketKmsArn: {variables[6]}\n",
245 | f"#QuickSight Analysis Author\nQuickSightAnalysisAuthor: {variables[7]}\n",
246 | f"#Update here if Athena result bucket is not default\nAthenaResultBucket: {variables[8]}\n",
247 | f"#Update here Athena bucket is encrypted with KMS otherwise na\nAthenaBucketKmsArn: {variables[9]}\n",
248 | f"#Resource prefix, DO NOT CHANGE\nResourcePrefix: {variables[10]}\n",
249 | f"#If EnableNotificationModule, Provide SlackChannelId for slack\nSlackChannelId: {variables[11]}\n",
250 | f"#If EnableNotificationModule, Provide SlackWorkspaceId for slack\nSlackWorkspaceId: {variables[12]}\n",
251 | f"#If EnableNotificationModule, Provide TeamId for MS Teams\nTeamId: {variables[13]}\n",
252 | f"#If EnableNotificationModule, Provide TeamsTenantId for MS Teams\nTeamsTenantId: {variables[14]}\n",
253 | f"#If EnableNotificationModule, Provide TeamsChannelId for MS Teams\nTeamsChannelId: {variables[15]}\n"
254 | ])
255 | save_output_to_file(output)
256 |
257 |
258 | def read_parameters(file_path):
259 | # Define a dictionary to store the parameters
260 | parameters = {}
261 |
262 | # Read the file and extract parameters
263 | with open(file_path, 'r') as file:
264 | for line in file:
265 | # Skip comments and empty lines
266 | if line.startswith('#') or not line.strip():
267 | continue
268 |
269 | # Split each line into key and value
270 | key, value = map(str.strip, line.split(':', 1))
271 |
272 | # Store in the dictionary
273 | parameters[key] = value
274 |
275 | # Access the variables
276 | enable_health_module = parameters.get('EnableHealthModule', '')
277 | enable_notification_module = parameters.get('EnableNotificationModule', '')
278 | data_collection_region = parameters.get('DataCollectionRegion', '')
279 | data_collection_account_id = parameters.get('DataCollectionAccountID', '')
280 | MemberRegionHealth = parameters.get('MemberRegionHealth','')
281 | aws_organization_id = parameters.get('AWSOrganizationID', '')
282 | quicksight_analysis_author = parameters.get('QuickSightAnalysisAuthor', '')
283 | data_collection_bucket = parameters.get('DataCollectionBucket', '')
284 | data_collection_bucket_kms_arn = parameters.get('DataCollectionBucketKmsArn', 'na')
285 | athena_result_bucket = parameters.get('AthenaResultBucket', 'aws-athena-query-results-*')
286 | athena_bucket_kms_arn = parameters.get('AthenaBucketKmsArn', 'na')
287 | resource_prefix = parameters.get('ResourcePrefix', '')
288 | slack_channel_id = parameters.get('SlackChannelId', '')
289 | Slack_Workspace_Id = parameters.get('SlackWorkspaceId', '')
290 | team_id = parameters.get('TeamId', '')
291 | Teams_Tenant_Id = parameters.get('TeamsTenantId', '')
292 | Teams_Channel_Id = parameters.get('TeamsChannelId', '')
293 |
294 | # Return the variables as a dictionary
295 | return {
296 | 'EnableHealthModule': enable_health_module,
297 | 'EnableNotificationModule': enable_notification_module,
298 | 'DataCollectionRegion': data_collection_region,
299 | 'DataCollectionAccountID': data_collection_account_id,
300 | 'MemberRegionHealth': MemberRegionHealth,
301 | 'AWSOrganizationID': aws_organization_id,
302 | 'QuickSightAnalysisAuthor': quicksight_analysis_author,
303 | 'DataCollectionBucket': data_collection_bucket,
304 | 'DataCollectionBucketKmsArn': data_collection_bucket_kms_arn,
305 | 'AthenaResultBucket': athena_result_bucket,
306 | 'AthenaBucketKmsArn': athena_bucket_kms_arn,
307 | 'ResourcePrefix': resource_prefix,
308 | 'SlackChannelId': slack_channel_id,
309 | 'SlackWorkspaceId': Slack_Workspace_Id,
310 | 'TeamId': team_id,
311 | 'TeamsTenantId': Teams_Tenant_Id,
312 | 'TeamsChannelId': Teams_Channel_Id
313 | }
314 |
315 | def setup():
316 | file_path = 'utils/ParametersDataCollection.txt'
317 |
318 | if os.path.exists(file_path):
319 | with open(file_path, 'r') as file:
320 | print(f"./ParametersDataCollection.txt found with previously saved parameters")
321 | print_boxed_text(f"{file.read()}")
322 | reinput = ask_yes_no("Do you want to re-input parameters?")
323 | if reinput:
324 | variables = get_user_input()
325 | save_variables_to_file(variables)
326 | else:
327 | print("Skipping re-input. Using existing variables.")
328 | else:
329 | variables = get_user_input()
330 | save_variables_to_file(variables)
331 | print_boxed_text(f"\nDeployment will use these parameters. Update ./utils/ParametersDataCollection.txt file for additional changes")
332 | with open(file_path, 'r') as file:
333 | print(f"{file.read()}")
334 |
335 | #Read dictionary for parameeters
336 | parameters_dict = read_parameters('utils/ParametersDataCollection.txt')
337 | #sync cfn template files
338 | sync_cfnfiles(parameters_dict['DataCollectionBucket'])
339 |
340 | # Create or update the CloudFormation stack
341 | stack_name = f"{parameters_dict['ResourcePrefix']}{parameters_dict['DataCollectionAccountID']}-{parameters_dict['DataCollectionRegion']}"
342 |
343 | parameters = f"AWSOrganizationID={parameters_dict['AWSOrganizationID']} " \
344 | f"DataCollectionBucket={parameters_dict['DataCollectionBucket']} " \
345 | f"DataCollectionBucketKmsArn={parameters_dict['DataCollectionBucketKmsArn']} " \
346 | f"AthenaBucketKmsArn={parameters_dict['AthenaBucketKmsArn']} " \
347 | f"QuickSightAnalysisAuthor={parameters_dict['QuickSightAnalysisAuthor']} " \
348 | f"ResourcePrefix={parameters_dict['ResourcePrefix']} " \
349 | f"SlackChannelId={parameters_dict['SlackChannelId']} " \
350 | f"SlackWorkspaceId={parameters_dict['SlackWorkspaceId']} " \
351 | f"TeamId={parameters_dict['TeamId']} " \
352 | f"TeamsTenantId={parameters_dict['TeamsTenantId']} " \
353 | f"TeamsChannelId={parameters_dict['TeamsChannelId']} " \
354 | f"EnableHealthModule={parameters_dict['EnableHealthModule']} " \
355 | f"EnableNotificationModule={parameters_dict['EnableNotificationModule']} "
356 |
357 | #Update tags here
358 | # Call the tag function to get the tag key and value
359 | tags = f"{tag_key}={tag_value} "
360 |
361 | command= f"sam deploy --stack-name {stack_name} --region {parameters_dict['DataCollectionRegion']} --parameter-overrides {parameters}\
362 | --template-file ../DataCollectionModule/HeidiRoot.yaml --tags {tags} --capabilities CAPABILITY_NAMED_IAM --disable-rollback"
363 |
364 | #Deploy Stack
365 | deploy_stack(command)
366 |
367 | memberparameters = f"DataCollectionAccountID={parameters_dict['DataCollectionAccountID']} " \
368 | f"DataCollectionRegion={parameters_dict['DataCollectionRegion']} " \
369 | f"ResourcePrefix={parameters_dict['ResourcePrefix']} "
370 |
371 | for memberregion in parameters_dict['MemberRegionHealth'].split(','):
372 | # Check if the memberregion is not empty or just whitespace
373 | if memberregion.strip(): # This ensures that empty strings or whitespace regions are ignored
374 | Member_stack_name = f"{parameters_dict['ResourcePrefix']}HealthModule-{get_account_id()}-{memberregion}"
375 | Membercommand = f"sam deploy --stack-name {Member_stack_name} --region {memberregion} --parameter-overrides {memberparameters} \
376 | --template-file ../HealthModule/HealthModuleCollectionSetup.yaml --tags {tags} --capabilities CAPABILITY_NAMED_IAM --disable-rollback"
377 |
378 | # Deploy the stack
379 | deploy_stack(Membercommand)
380 | else:
381 | print(f"Skipping member Region deployment, no member Region supplied.")
382 |
383 | if __name__ == "__main__":
384 | setup()
385 |
386 |
387 |
--------------------------------------------------------------------------------
/src/Setup/utils/HealthEventBackFill.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import json
3 | from datetime import datetime
4 |
5 | DataCollectionAccountID = input("Enter DataCollection Account ID: ")
6 | DataCollectionRegion = input("Enter DataCollection region: ")
7 | ResourcePrefix = input("Enter ResourcePrefix, Hit enter to use default (heidi-): ") or "heidi-"
8 |
9 | health_client = boto3.client('health', 'us-east-1')
10 | eventbridge_client = boto3.client('events',DataCollectionRegion)
11 | EventBusArnVal = f"arn:aws:events:{DataCollectionRegion}:{DataCollectionAccountID}:event-bus/{ResourcePrefix}DataCollectionBus-{DataCollectionAccountID}"
12 |
13 | def get_events():
14 | events = []
15 | next_token = None
16 | try:
17 | while True:
18 | kwargs = {}
19 | if next_token and len(next_token) >= 4:
20 | kwargs['nextToken'] = next_token
21 | events_response = health_client.describe_events(filter={}, **kwargs)
22 | events += events_response['events']
23 | if 'nextToken' in events_response:
24 | next_token = events_response['nextToken']
25 | else:
26 | break
27 | return events
28 | except Exception as e:
29 | print(e)
30 | return []
31 |
32 | def get_event_data(event_details, event_description,event_metadata):
33 | event_data = {
34 | 'eventArn': event_details['arn'],
35 | 'eventRegion': event_details.get('region', ''),
36 | 'eventTypeCode': event_details.get('eventTypeCode', ''),
37 | 'startTime': event_details.get('startTime').strftime('%a, %d %b %Y %H:%M:%S GMT'),
38 | 'eventDescription': [{'latestDescription': event_description['latestDescription']}],
39 | 'eventMetadata': event_metadata
40 | }
41 | # Check if 'timefield' exists in event_details before including it in event_data
42 | if 'endTime' in event_details:
43 | event_data['endTime'] = event_details['endTime'].strftime('%a, %d %b %Y %H:%M:%S GMT')
44 |
45 | if 'lastUpdatedTime' in event_details:
46 | event_data['lastUpdatedTime'] = event_details['lastUpdatedTime'].strftime('%a, %d %b %Y %H:%M:%S GMT')
47 |
48 | event_data.update((key, value) for key, value in event_details.items() if key not in event_data)
49 | print(event_data)
50 |
51 | return event_data
52 |
53 | def send_event_defaultBus(event_data, EventBusArn):
54 | # Send the event to EventBridge
55 | eventbridge_client.put_events(
56 | Entries=[
57 | {
58 | 'Source': 'heidi.health',
59 | 'DetailType': 'awshealthtest',
60 | 'Detail': json.dumps(event_data),
61 | 'EventBusName': EventBusArn
62 | }
63 | ]
64 | )
65 |
66 | # def backfill():
67 | # events = get_events()
68 | # EventBusArn = EventBusArnVal
69 | # try:
70 | # for awsevent in events:
71 | # event_details_response = health_client.describe_event_details(eventArns=[awsevent['arn']])
72 | # event_affected_response = health_client.describe_affected_entities(filter={'eventArns': [awsevent['arn']]})
73 | # entities = event_affected_response['entities']
74 | # affected_entities = []
75 | # for entity in entities:
76 | # entity_values = entity['entityValue']
77 | # status_code = entity.get('statusCode', 'UNKNOWN')
78 | # affected_entities.append({'entityValue': entity_values, 'status': status_code})
79 |
80 | # event_details = event_details_response['successfulSet'][0]['event'] if event_details_response.get('successfulSet') else None
81 | # if not event_details:
82 | # continue
83 |
84 | # event_details['affectedEntities'] = affected_entities
85 |
86 | # event_description = event_details_response['successfulSet'][0]['eventDescription']
87 | # event_data = get_event_data(event_details, event_description)
88 | # send_event_defaultBus(event_data, EventBusArn)
89 | # except Exception as e:
90 | # print(e)
91 |
92 | def backfill():
93 | events = get_events()
94 | EventBusArn = EventBusArnVal
95 |
96 | for awsevent in events:
97 | try:
98 | # Fetch event details
99 | event_details_response = health_client.describe_event_details(eventArns=[awsevent['arn']])
100 |
101 | # Pagination setup for affected entities
102 | affected_entities = []
103 | next_token = None
104 |
105 | while True:
106 | # Fetch affected entities with optional pagination token
107 | params = {
108 | 'filter': {'eventArns': [awsevent['arn']]}
109 | }
110 | if next_token:
111 | params['nextToken'] = next_token
112 |
113 | event_affected_response = health_client.describe_affected_entities(**params)
114 |
115 | # Process entities
116 | entities = event_affected_response.get('entities', [])
117 | for entity in entities:
118 | entity_value = entity.get('entityValue', 'UNKNOWN')
119 | status_code = entity.get('statusCode', 'UNKNOWN')
120 | affected_entities.append({'entityValue': entity_value, 'status': status_code})
121 |
122 | # Check for pagination token
123 | next_token = event_affected_response.get('nextToken')
124 | if not next_token:
125 | break # Exit loop if no more pages
126 |
127 | # Extract event details
128 | successful_set = event_details_response.get('successfulSet', [])
129 | if not successful_set:
130 | continue
131 |
132 | event_details = successful_set[0].get('event', {})
133 | if not event_details:
134 | continue
135 |
136 | # Append accumulated affected entities
137 | event_details['affectedEntities'] = affected_entities
138 |
139 | # Extract event description
140 | event_description = successful_set[0].get('eventDescription', '')
141 |
142 | # Extract event Metadata
143 | event_metadata = successful_set[0].get('eventMetadata', '')
144 |
145 | # Prepare and send event data
146 | event_data = get_event_data(event_details, event_description,event_metadata)
147 | send_event_defaultBus(event_data, EventBusArn)
148 |
149 | except Exception as e:
150 | print(f"Error occurred: {e}")
151 |
152 | backfill()
153 |
--------------------------------------------------------------------------------
/src/Setup/utils/MemberSetup.py:
--------------------------------------------------------------------------------
1 | import boto3
2 | import subprocess
3 | from botocore.exceptions import ClientError
4 | import os
5 |
6 | # Get Current Account ID
7 | def get_account_id():
8 | # Get the AWS account ID for Unique names
9 | sts_client = boto3.client("sts")
10 | account_id = sts_client.get_caller_identity().get("Account")
11 | return account_id
12 |
13 | # Get yes or no for modules
14 | def ask_yes_no(prompt):
15 | while True:
16 | user_input = input(f"{prompt} (yes/no): ").lower()
17 | if user_input == 'yes':
18 | return True
19 | elif user_input == 'no':
20 | return False
21 | else:
22 | print("Invalid input. Please enter 'yes' or 'no'.")
23 |
24 | # Print pretty Box
25 | def print_boxed_text(text):
26 | lines = text.strip().split('\n')
27 | max_length = max(len(line) for line in lines)
28 |
29 | print('═' * (max_length + 2))
30 | for line in lines:
31 | print(f' {line.ljust(max_length)} ')
32 | print('═' * (max_length + 2))
33 |
34 |
35 | # deploy stack
36 | def deploy_stack(command):
37 | try:
38 | subprocess.call(command, shell=True)
39 | except Exception as e:
40 | print("An error occurred:", e)
41 |
42 | # User Input Data
43 | def get_user_input():
44 | DeploymentRegionHealth = input("Enter comma-separated Region names for AWS health data collection: ")
45 | print_boxed_text("Data Collection Account Parameters")
46 | DataCollectionAccountID = input(f"Enter Data Collection Account ID, Default {get_account_id()}: ") or get_account_id()
47 | DataCollectionRegion = input("Enter Data Collection Region ID: ")
48 | ResourcePrefix = input("Enter ResourcePrefix, Hit enter to use default (heidi-): ") or "heidi-"
49 | return (
50 | DataCollectionAccountID, DataCollectionRegion, DeploymentRegionHealth, ResourcePrefix
51 | )
52 |
53 | # setup
54 | def setup():
55 | parameters_dict = {}
56 | DataCollectionAccountID, DataCollectionRegion, DeploymentRegionHealth, ResourcePrefix = get_user_input()
57 |
58 | parameters_dict['DataCollectionAccountID'] = DataCollectionAccountID
59 | parameters_dict['DataCollectionRegion'] = DataCollectionRegion
60 | parameters_dict['ResourcePrefix'] = ResourcePrefix
61 |
62 | parameters = f"DataCollectionAccountID={parameters_dict['DataCollectionAccountID']} \
63 | DataCollectionRegion={parameters_dict['DataCollectionRegion']} \
64 | ResourcePrefix={parameters_dict['ResourcePrefix']}"
65 |
66 | for region in DeploymentRegionHealth.split(','):
67 | stack_name = f"{parameters_dict['ResourcePrefix']}HealthModule-member-{get_account_id()}-{region}"
68 | command = f"sam deploy --stack-name {stack_name} --region {region} --parameter-overrides {parameters} \
69 | --template-file ../HealthModule/HealthModuleCollectionSetup.yaml --capabilities CAPABILITY_NAMED_IAM --disable-rollback"
70 | # Deploy Stack
71 | deploy_stack(command)
72 |
73 | if __name__ == "__main__":
74 | setup()
75 |
--------------------------------------------------------------------------------
/src/Setup/utils/TagBackFill.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import boto3
4 |
5 | DataCollectionAccountID = input("Enter DataCollection Account ID: ")
6 | DataCollectionRegion = input("Enter DataCollection region: ")
7 | ResourcePrefix = input("Enter ResourcePrefix, Hit enter to use default (heidi-): ") or "heidi-"
8 | ResourceExplorerViewArn = input("Enter Resource explorere view ARN: ")
9 |
10 | eventbridge_client = boto3.client('events',DataCollectionRegion)
11 | EventBusArnVal = f"arn:aws:events:{DataCollectionRegion}:{DataCollectionAccountID}:event-bus/{ResourcePrefix}DataCollectionBus-{DataCollectionAccountID}"
12 |
13 | def resource_explorer():
14 | try:
15 | # Get the Resource Explorer ARN and region
16 | view_arn = ResourceExplorerViewArn
17 | region = view_arn.split(":")[3]
18 |
19 | # Create a Resource Explorer client
20 | resource_explorer = boto3.client('resource-explorer-2', region)
21 |
22 | # Create a paginator for listing resources
23 | paginator = resource_explorer.get_paginator('list_resources')
24 |
25 | # Define pagination configuration
26 | pagination_config = {
27 | 'MaxItems': 20000000, # Total maximum items to return across all pages
28 | 'PageSize': 1000 # Number of items per page
29 | }
30 |
31 | # Paginate through the resources with filters
32 | response_iterator = paginator.paginate(
33 | ViewArn=view_arn,
34 | PaginationConfig=pagination_config
35 | )
36 |
37 | for page in response_iterator:
38 | for resource in page.get('Resources', []):
39 | arn = resource.get('Arn')
40 | tags = [{'entityKey': item['Key'], 'entityValue': item['Value']}
41 | for prop in resource.get('Properties', [])
42 | for item in prop.get('Data', [])]
43 |
44 | tag_data = {'entityArn': arn, 'tags': tags}
45 | if tags:
46 | send_event(tag_data)
47 | else:
48 | pass
49 |
50 | except Exception as e:
51 | print(f"Error in resource_explorer: {e}")
52 |
53 | def send_event(tag_data):
54 | try:
55 |
56 | # Put events to the specified Event Bus
57 | response = eventbridge_client.put_events(
58 | Entries=[{
59 | 'Source': 'heidi.taginfo',
60 | 'DetailType': 'Heidi tags from resource explorer',
61 | 'Detail': json.dumps(tag_data),
62 | 'EventBusName': EventBusArnVal
63 | }]
64 | )
65 | print(f"Event sent: {response}")
66 |
67 | except Exception as e:
68 | print(f"Error in send_event: {e}")
69 |
70 | # Example usage
71 | if __name__ == "__main__":
72 | resource_explorer()
73 |
--------------------------------------------------------------------------------