├── .gitignore ├── .markdownlint.json ├── README.md ├── assets ├── css │ └── PITCHME.css └── snippets │ ├── password_policy.tf │ ├── rds_encrypted.tf │ └── rds_plain.tf ├── talk ├── AWS Security Essentials.key ├── AWS Security Essentials.pdf ├── PITCHME.md └── PITCHME.yaml └── workshop ├── PITCHME.md ├── PITCHME.yaml ├── auditing ├── .gitignore ├── inactive │ ├── Dockerfile │ ├── inactive_user_audit │ └── requirements.txt ├── mfa │ ├── Dockerfile │ ├── mfa_audit │ └── requirements.txt ├── scout2 │ ├── Dockerfile │ ├── boot.sh │ ├── index.html │ └── requirements.txt └── setup.tf ├── iam ├── aws_vault_setup ├── config.json ├── scratch.txt └── setup.tf ├── kms ├── complete │ ├── Dockerfile │ ├── dynamo_backed_aes │ └── requirements.txt ├── dynamo │ ├── Dockerfile │ ├── insert_dek │ └── requirements.txt ├── kms_master │ ├── Dockerfile │ ├── kms_master │ └── requirements.txt ├── local │ ├── Dockerfile │ ├── local_aes │ └── requirements.txt ├── scratch.txt └── setup.tf ├── terraform └── setup.tf └── vpc ├── Dockerfile ├── scratch.txt └── setup.tf /.gitignore: -------------------------------------------------------------------------------- 1 | /workshop/iam/out.csv 2 | terraform.tfstate* 3 | .terraform -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "MD001": false, 3 | "MD002": false, 4 | "MD026": false, 5 | "MD024": false, 6 | "MD041": false 7 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AWS Security Essentials 2 | -------------------------------------------------------------------------------- /assets/css/PITCHME.css: -------------------------------------------------------------------------------- 1 | .reveal pre { 2 | width: 100%; 3 | box-shadow : none; 4 | } 5 | 6 | .reveal pre code { 7 | font-size: 1.2em; 8 | line-height: 1.2; 9 | max-height: 50vh !important; 10 | opacity: 0.95; 11 | border-radius: 10px; 12 | } -------------------------------------------------------------------------------- /assets/snippets/password_policy.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_account_password_policy" "approved" { 2 | minimum_password_length = 8 3 | require_lowercase_characters = true 4 | require_numbers = true 5 | require_uppercase_characters = true 6 | require_symbols = true 7 | allow_users_to_change_password = true 8 | } 9 | -------------------------------------------------------------------------------- /assets/snippets/rds_encrypted.tf: -------------------------------------------------------------------------------- 1 | resource "aws_db_instance" "default" { 2 | allocated_storage = 10 3 | storage_type = "gp2" 4 | engine = "mysql" 5 | engine_version = "5.6.17" 6 | instance_class = "db.t1.micro" 7 | name = "mydb" 8 | username = "foo" 9 | password = "bar" 10 | db_subnet_group_name = "my_database_subnet_group" 11 | parameter_group_name = "default.mysql5.6" 12 | kms_key_id = "${aws_kms_key.foo.key_id}" 13 | } 14 | -------------------------------------------------------------------------------- /assets/snippets/rds_plain.tf: -------------------------------------------------------------------------------- 1 | resource "aws_db_instance" "default" { 2 | allocated_storage = 10 3 | storage_type = "gp2" 4 | engine = "mysql" 5 | engine_version = "5.6.17" 6 | instance_class = "db.t1.micro" 7 | name = "mydb" 8 | username = "sheep" 9 | password = "cheese" 10 | db_subnet_group_name = "my_database_subnet_group" 11 | parameter_group_name = "default.mysql5.6" 12 | } 13 | -------------------------------------------------------------------------------- /talk/AWS Security Essentials.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jemurai/AWS-Security-Essentials/890bff12ec00fe480610d7975c67777366af5ded/talk/AWS Security Essentials.key -------------------------------------------------------------------------------- /talk/AWS Security Essentials.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Jemurai/AWS-Security-Essentials/890bff12ec00fe480610d7975c67777366af5ded/talk/AWS Security Essentials.pdf -------------------------------------------------------------------------------- /talk/PITCHME.md: -------------------------------------------------------------------------------- 1 | ## AWS Security Essentials 2 | 3 | Aaron Bedra [@abedra](https://twitter.com/abedra) 4 | Chief Scientist, Jemurai 5 | 6 | --- 7 | 8 | ## Before we get started 9 | 10 | --- 11 | 12 | ## Cloud infrastructure offers some incredible benefits 13 | 14 | --- 15 | 16 | ## There are many reasons to take the leap 17 | 18 | --- 19 | 20 | ## But there are a lot of misconceptions about how to approach it 21 | 22 | --- 23 | 24 | ### This talk will focus on AWS, but the concepts are transferrable 25 | 26 | --- 27 | 28 | ## It's easy to get caugh up in mirroring your datacenter 29 | 30 | --- 31 | 32 | ## Just say no to lift and shift 33 | 34 | --- 35 | 36 | ## This is not a traditional datacenter 37 | 38 | --- 39 | 40 | ## And you shouldn't treat it that way 41 | 42 | --- 43 | 44 | ## There are aspects you want to keep 45 | 46 | --- 47 | 48 | ## And some you want to throw away 49 | 50 | --- 51 | 52 | ## Getting security right in the cloud requires change 53 | 54 | --- 55 | 56 | ## Key Areas 57 | 58 | @ul 59 | 60 | - Automation 61 | - IAM 62 | - Network Design 63 | - Encryption 64 | - Auditing 65 | - Continuous Integration 66 | 67 | @ulend 68 | 69 | --- 70 | 71 | ## Automation 72 | 73 | @fa[arrow-down] 74 | 75 | +++ 76 | 77 | # TODO: devops rainbow picture 78 | 79 | +++ 80 | 81 | ## This is a critical step 82 | 83 | +++ 84 | 85 | ## There's no excuse for ignoring automation 86 | 87 | +++ 88 | 89 | ## The platforms are built for it 90 | 91 | +++ 92 | 93 | ## Humans clicking buttons is what causes security issues 94 | 95 | +++ 96 | 97 | ## It also causes blockers, frustration, and lower velocity 98 | 99 | +++ 100 | 101 | ## If you want to keep clicking buttons, stay where you are 102 | 103 | +++ 104 | 105 | ## You shouldn't be logging into the AWS console 106 | 107 | +++ 108 | 109 | ## In fact, page the security team when it happens 110 | 111 | +++ 112 | 113 | ## Or just disable access entirely* 114 | 115 | +++ 116 | 117 | ## Infrastructure as code is step 1 118 | 119 | +++ 120 | 121 | ## It allows for review, analysis, and audit 122 | 123 | +++ 124 | 125 | ## It creates a culture of describing systems as data 126 | 127 | +++ 128 | 129 | ## The cloud is an abstraction, talk about it as one 130 | 131 | +++ 132 | 133 | ## Make peer review the only blocker between you and the end result 134 | 135 | +++ 136 | 137 | ## Automation Checklist 138 | 139 | @ul 140 | 141 | - All infrastructure is recorded as code 142 | - All infrastructure changes are made by automated tools 143 | - Console logins are restricted to administrators 144 | - Teams are educated and empowered to make necessary changes through automation 145 | 146 | @ulend 147 | 148 | --- 149 | 150 | ## IAM 151 | 152 | @fa[arrow-down] 153 | 154 | +++ 155 | 156 | ## Get a grip on users and permissions 157 | 158 | +++ 159 | 160 | ## I've seen some things... 161 | 162 | +++ 163 | 164 | ## A mistake here could provide control over everything 165 | 166 | +++ 167 | 168 | ## How do we get to a good place? 169 | 170 | +++ 171 | 172 | ## Use a directory! 173 | 174 | +++ 175 | 176 | ## If you already have a directory, replicate or extend trust into AWS 177 | 178 | +++ 179 | 180 | ## Avoid keeping multiple systems of record for user accounts 181 | 182 | +++ 183 | 184 | ## This solves onboarding and offboarding issues 185 | 186 | +++ 187 | 188 | ## And automatically propagates modifications 189 | 190 | +++ 191 | 192 | ## If you don't have a directory, make sure to establish strong account requirements 193 | 194 | +++?code=assets/snippets/password_policy.tf 195 | 196 | @[1-1] 197 | @[2-2] 198 | @[3-3] 199 | @[4-4] 200 | @[5-5] 201 | @[6-6] 202 | @[7-7] 203 | 204 | +++ 205 | 206 | ## The root account 207 | 208 | +++ 209 | 210 | ## Don't use it! 211 | 212 | +++ 213 | 214 | ## Page security when it is used 215 | 216 | +++ 217 | 218 | ## There are only a handful of things you should use the root account for 219 | 220 | +++ 221 | 222 | [https://docs.aws.amazon.com/general/latest/gr/aws_tasks-that-require-root.html](https://docs.aws.amazon.com/general/latest/gr/aws_tasks-that-require-root.html) 223 | 224 | +++ 225 | 226 | ## If it's not on that list, it's not acceptable 227 | 228 | +++ 229 | 230 | ## Now for the IAM users 231 | 232 | +++ 233 | 234 | ## The only permission IAM accounts should have is assume role 235 | 236 | +++ 237 | 238 | ## Security Token Service should be the gateway to everything 239 | 240 | +++ 241 | 242 | ## This reduces direct exposure of credentials 243 | 244 | +++ 245 | 246 | ## And forces everyone to think about the permissions required to perform a task 247 | 248 | +++ 249 | 250 | ## IAM Checklist 251 | 252 | @ul 253 | 254 | - Root account has MFA enabled 255 | - Root account has no access keys* 256 | - Users have no permissions outside of STS assume role and establish MFA device 257 | - Users have no inline policies 258 | - Your directory is used as the system of record 259 | - MFA is required for all human users 260 | - MFA is required to access privileged roles 261 | - Users are trained and provided tools to make role assumption seamless 262 | 263 | @ulend 264 | 265 | --- 266 | 267 | ## Network Design 268 | 269 | @fa[arrow-down] 270 | 271 | +++ 272 | 273 | ## Network design is situation dependent 274 | 275 | +++ 276 | 277 | ## But there are a few things that matter 278 | 279 | +++ 280 | 281 | ## Create a boundary 282 | 283 | +++ 284 | 285 | ## VPC should be that boundary 286 | 287 | +++ 288 | 289 | ## Isolate environments and scope with a VPC 290 | 291 | +++ 292 | 293 | ## Monitor what comes in and out of a subnet 294 | 295 | +++ 296 | 297 | ## Be conscious about entry points 298 | 299 | +++ 300 | 301 | ## There should only be one way in 302 | 303 | +++ 304 | 305 | ## VPN or Bastion Host 306 | 307 | +++ 308 | 309 | ## Do not expose management of all machines directly! 310 | 311 | +++ 312 | 313 | ## Use tools to report on external footprint 314 | 315 | +++ 316 | 317 | ### Example 318 | 319 | ```sh 320 | aws ec2 describe-instances 321 | --query "Reservations[].Instances[] 322 | .[InstanceId,PublicIpAddress,SecurityGroups[].GroupName]" 323 | 324 | "i-02d6f02ed4cd65571", 325 | "18.220.232.57", 326 | "bastion_external_security_group" 327 | 328 | "i-088a3499844e2f3b0", 329 | "18.222.41.186", 330 | "bastion_internal_security_group", 331 | "api_security_group" 332 | ``` 333 | 334 | +++ 335 | 336 | ## Network Design Checklist 337 | 338 | @ul 339 | 340 | - Everything is deployed inside a custom VPC 341 | - Flow logs are enabled for all subnets 342 | - Flow logs are monitored 343 | - Everything that can has a security group attached 344 | - Any security group with public facing 0.0.0.0/0 access justification and approval 345 | - Remote administration is restricted to bastion hosts or internal network via VPN 346 | 347 | @ulend 348 | 349 | --- 350 | 351 | ## Encryption 352 | 353 | @fa[arrow-down] 354 | 355 | +++ 356 | 357 | ## TODO: IMAGE 358 | 359 | +++ 360 | 361 | ## We can all acknowledge that this is difficult 362 | 363 | +++ 364 | 365 | ## But we can reduce the chance for mistakes 366 | 367 | +++ 368 | 369 | ## KMS 370 | 371 | +++ 372 | 373 | ## This is your new default 374 | 375 | +++ 376 | 377 | ## All your encryption keys should originate from KMS 378 | 379 | +++ 380 | 381 | ## Any AWS service that stores data should have a KMS key attached 382 | 383 | +++?code=assets/snippets/rds_plain.tf 384 | 385 | +++?code=assets/snippets/rds_encrypted.tf 386 | 387 | @[12-12] 388 | 389 | +++ 390 | 391 | ## There are limitations 392 | 393 | +++ 394 | 395 | ## KMS has a message limit of 4k 396 | 397 | +++ 398 | 399 | ## But it allows you to generate data encryption keys 400 | 401 | +++ 402 | 403 | ## Which provides strong randomness for key generation 404 | 405 | +++ 406 | 407 | ### This pushes you towards attaching keys to an AWS service or local crypto 408 | 409 | +++ 410 | 411 | ## Favor AWS managed encryption via KMS 412 | 413 | +++ 414 | 415 | ### If you have to move to local encryption, stick with KMS for key generation 416 | 417 | +++ 418 | 419 | ## Encryption Checklist 420 | 421 | @ul 422 | 423 | - All encryption keys are generated using KMS 424 | - All KMS key have rotation enabled 425 | - All AWS services that have a KMS option should use KMS 426 | 427 | @ul 428 | 429 | --- 430 | 431 | ## Auditing 432 | 433 | @fa[arrow-down] 434 | 435 | +++ 436 | 437 | ## How do you know things are configured correctly? 438 | 439 | +++ 440 | 441 | ## Scout2 442 | 443 | +++ 444 | 445 | ## Scout2 audits configurations across all regions 446 | 447 | +++ 448 | 449 | ## It produces a report of dangerous issues 450 | 451 | +++ 452 | 453 | ## TODO Scout2 Report Image 454 | 455 | +++ 456 | 457 | ## Run this tool to see what you find 458 | 459 | +++ 460 | 461 | ## You will likely be surprised 462 | 463 | +++ 464 | 465 | ## Take some time to discuss and correct these issues 466 | 467 | +++ 468 | 469 | ## This helps with audit of configuration 470 | 471 | +++ 472 | 473 | ## But what about user activity? 474 | 475 | +++ 476 | 477 | ## CloudTrail/CloudWatch 478 | 479 | +++ 480 | 481 | ## These tools are invaluable 482 | 483 | +++ 484 | 485 | ## They are an absolute must for anyone taking security seriously 486 | 487 | +++ 488 | 489 | ## Enable CloudTrail for all regions 490 | 491 | +++ 492 | 493 | ## Use CloudWatch to establish alerts on behavior 494 | 495 | +++ 496 | 497 | ## Alert Examples 498 | 499 | @ul 500 | 501 | - Root account login 502 | - Root account key usage 503 | - New user created 504 | - User added to administrative roles 505 | 506 | @ul 507 | 508 | +++ 509 | 510 | ## Or better yet, use a third party 511 | 512 | +++ 513 | 514 | ## You don't have to manage everything on your own 515 | 516 | +++ 517 | 518 | ## AWS GuardDuty 519 | 520 | +++ 521 | 522 | ## TODO: Explanation 523 | 524 | +++ 525 | -------------------------------------------------------------------------------- /talk/PITCHME.yaml: -------------------------------------------------------------------------------- 1 | theme-override : assets/css/PITCHME.css 2 | mathjax : TeX-AMS_HTML-full 3 | charts : true 4 | theme: night 5 | highlight: vs2015 -------------------------------------------------------------------------------- /workshop/PITCHME.md: -------------------------------------------------------------------------------- 1 | ## AWS Security Essentials 2 | 3 | Aaron Bedra [@abedra](https://twitter.com/abedra) 4 | Chief Scientist, Jemurai 5 | 6 | --- 7 | @title[Agenda] 8 | 9 | ## Agenda 10 | 11 | - Setup 12 | - Auditing 13 | - IAM 14 | - Network Design 15 | - Encryption and Key Management 16 | - Wrap Up 17 | 18 | --- 19 | 20 | ## Setup 21 | 22 | @fa[arrow-down] 23 | 24 | +++ 25 | @title[list] 26 | 27 | ## Checklist 28 | 29 | - A clone of this repository 30 | - Docker 31 | - Terraform 32 | - AWS Command Line (awscli) 33 | - An AWS account with admin permissions 34 | 35 | --- 36 | 37 | ## Terraform 38 | 39 | @fa[arrow-down] 40 | 41 | +++ 42 | 43 | ## Infrastructure Automation 44 | 45 | +++ 46 | 47 | ### As noted in the intro talk, this is an important first step 48 | 49 | +++ 50 | 51 | ### We will be using terraform throughout the workshop to setup our AWS resources 52 | 53 | +++ 54 | 55 | ## There are other automation options 56 | 57 | +++ 58 | 59 | ## To save time please use the terraform code provided 60 | 61 | +++ 62 | 63 | ## Feel free to translate this code to any other tool of your choice 64 | 65 | +++ 66 | 67 | ## And don't hesitate to ask any questions 68 | 69 | +++ 70 | 71 | ### Create a bucket for the workshop 72 | 73 | ```sh 74 | aws s3 mb s3://abedra-tfstate --region us-east-2 75 | ``` 76 | 77 | +++ 78 | 79 | ## Before we start terraform, let's look at our first script 80 | 81 | +++?code=workshop/terraform/setup.tf 82 | @title[Setup] 83 | 84 | @[1-3] 85 | @[5-12] 86 | @[14-20] 87 | 88 | +++ 89 | 90 | ### Initialize Terraform 91 | 92 | ```sh 93 | workshop/terraform> terraform init 94 | ``` 95 | 96 | +++ 97 | 98 | ### Import the bucket we just created 99 | 100 | ```sh 101 | workshop/terraform> terraform import \ 102 | aws_s3_bucket.abedra-tfstate \ 103 | abedra-tfstate 104 | ``` 105 | 106 | +++ 107 | 108 | ### Finally, we apply our changes 109 | 110 | ```sh 111 | workshop/terraform> terraform plan 112 | workshop/terraform> terraform apply 113 | ``` 114 | 115 | +++ 116 | 117 | ### Throughout the workshop we will use this bucket to store our terraform state 118 | 119 | +++ 120 | 121 | ### Check the bucket 122 | 123 | ```sh 124 | aws s3 ls s3://abedra-tfstate/setup --recursive 125 | 2018-05-18 10:19:28 1845 setup/terraform.tfstate 126 | ``` 127 | 128 | +++ 129 | 130 | ### You don't have to store your `tfstate` in `S3` during this workshop 131 | 132 | +++ 133 | 134 | ### But if you are working with others in the same account it is recommended 135 | 136 | +++ 137 | 138 | ## Questions? 139 | 140 | --- 141 | 142 | ## Auditing 143 | 144 | @fa[arrow-down] 145 | 146 | +++ 147 | 148 | ## How do we track what our users are doing? 149 | 150 | +++ 151 | 152 | ## Cloudtrail 153 | 154 | +++ 155 | 156 | ## Cloudtrail logs all activity to the AWS API 157 | 158 | +++ 159 | 160 | ### These logs provide actionable data that you can use to automate security response 161 | 162 | +++ 163 | 164 | ### This is vital to to the auditing process 165 | 166 | +++ 167 | 168 | ### And it gives you a way to track every action taken on the platform 169 | 170 | +++ 171 | 172 | ## The output of Cloudtrail will go to S3 173 | 174 | +++ 175 | 176 | ### You can use more advanced tooling to process CloudTrail events and issue alerts 177 | 178 | +++ 179 | 180 | ## One of these tools is GuardDuty 181 | 182 | +++ 183 | 184 | ### GuardDuty analyzes CloudTrail and alerts on known security issues 185 | 186 | +++ 187 | 188 | ### It monitors outbound network connections and flags contact with known bad actors 189 | 190 | +++ 191 | 192 | ### It monitors dns requests and flags queries that resolve to known bad actors 193 | 194 | +++ 195 | 196 | ## It alerts on overexposure of resources 197 | 198 | +++ 199 | 200 | ### And it alerts when overexposed resources are probed or attacked 201 | 202 | +++ 203 | 204 | ### It's new and under active development, so look for more features very soon 205 | 206 | +++ 207 | 208 | ### Initialize terraform in the auditing directory 209 | 210 | ```sh 211 | workshop/auditing> terraform init 212 | ``` 213 | 214 | +++ 215 | 216 | ## Now let's review what we are going to add 217 | 218 | +++?code=workshop/auditing/setup.tf 219 | 220 | @[1-3] 221 | @[5-11] 222 | @[13-13] 223 | @[15-16] 224 | @[18-21] 225 | @[23-24] 226 | @[28-29] 227 | @[31-34] 228 | @[36-37] 229 | @[39-44] 230 | @[48-51] 231 | @[53-56] 232 | @[58-64] 233 | @[66-68] 234 | 235 | +++ 236 | 237 | ### Deploy CloudTrail and GuardDuty 238 | 239 | ```sh 240 | workshop/auditing> terraform plan 241 | workshop/auditing> terraform apply 242 | ``` 243 | 244 | +++ 245 | 246 | ### Check the audit bucket 247 | 248 | ```sh 249 | aws s3 ls s3://abedra-audit --recursive 250 | 2018-06-11 12:10:51 0 audit/AWSLogs/489175270805/ 251 | ``` 252 | 253 | +++ 254 | 255 | ### Examine the events 256 | 257 | ```json 258 | { 259 | "eventVersion":"1.02","userIdentity": { 260 | "...SNIP..." 261 | }, 262 | "eventTime":"2017-11-24T22:12:51Z", 263 | "eventName":"GetAccountPasswordPolicy", 264 | "sourceIPAddress":"199.27.255.43", 265 | "requestID":"9c81b384-d164-11e7-b896-bb38c46dd0a0", 266 | "eventID":"35ba2cdd-7b76-40bc-aeca-3b7283518bae", 267 | "eventType":"AwsApiCall","recipientAccountId":"489175270805" 268 | } 269 | ``` 270 | 271 | +++ 272 | 273 | ### How do we know our AWS configuration doesn't have security issues? 274 | 275 | +++ 276 | 277 | ## Questions 278 | 279 | @ul 280 | 281 | - Do all our users have MFA enabled? 282 | - Do we have inactive users? 283 | - Do we have security groups that expose too much? 284 | - Do we have S3 buckets with improper access control? 285 | - How do we know that our users aren't abusing their permissions? 286 | 287 | @ulend 288 | 289 | +++ 290 | 291 | ### Let's ask! 292 | 293 | ```sh 294 | workshop/auditing/mfa> docker build -t mfa_audit . 295 | workshop/auditing/mfa> docker run \ 296 | -e AWS_ACCESS_KEY_ID=XXX \ 297 | -e AWS_SECRET_ACCESS_KEY=xxx \ 298 | mfa_audit 299 | [] 300 | ``` 301 | 302 | ```sh 303 | workshop/auditing/inactive> docker build -t inactive_user_audit . 304 | workshop/auditing/inactive> docker run \ 305 | -e AWS_ACCESS_KEY_ID=XXX \ 306 | -e AWS_SECRET_ACCESS_KEY=xxx \ 307 | inactive_user_audit 308 | [] 309 | ``` 310 | 311 | +++ 312 | 313 | ## We could go on all day with one off scripts 314 | 315 | +++ 316 | 317 | ### Let's try something a little more effective 318 | 319 | ```sh 320 | workshop/auditing/scout2> docker build -t scout . 321 | workshop/auditing/scout2> docker run \ 322 | -p 22222:80 \ 323 | -e AWS_ACCESS_KEY_ID=XXX \ 324 | -e AWS_SECRET_ACCESS_KEY=xxx \ 325 | scout 326 | ``` 327 | 328 | +++ 329 | 330 | ### Navigate to [localhost:2222](http://localhost:22222) 331 | 332 | ```fundamental 333 | Fetching IAM config.. 334 | ... Lots of output ... 335 | *** Visit *** 336 | 337 | http://localhost:22222 338 | 339 | *** to view the report *** 340 | ``` 341 | 342 | +++ 343 | 344 | ## What we just ran is called Scout2 345 | 346 | +++ 347 | 348 | ## Walkthrough 349 | 350 | +++ 351 | 352 | ### There is also commercial tooling, but this is a great way to get started 353 | 354 | +++ 355 | 356 | ## Exercise: Fix the issues 357 | 358 | +++ 359 | 360 | ### Pick and choose your battles, not everything you find is an emergency 361 | 362 | +++ 363 | 364 | ## Let's walk through a real world scenario 365 | 366 | +++ 367 | 368 | [https://aws.amazon.com/blogs/security/how-to-receive-notifications-when-your-aws-accounts-root-access-keys-are-used](https://aws.amazon.com/blogs/security/how-to-receive-notifications-when-your-aws-accounts-root-access-keys-are-used) 369 | 370 | +++ 371 | 372 | ### Alarms like this are key in detecting and understanding a breach 373 | 374 | +++ 375 | 376 | ### Other Alarm Possibilities 377 | 378 | @ul 379 | 380 | - Unusually high amout of KMS actions 381 | - New user created 382 | - Access keys created for the root account 383 | - Access keys used for the root account 384 | - Anyone logs into the AWS console 385 | - Anything that would signal unexpected behavior 386 | 387 | @ulend 388 | 389 | +++ 390 | 391 | ### GuardDuty triggers alerts, but you can define alerts on additional conditions 392 | 393 | +++ 394 | 395 | ## Questions? 396 | 397 | --- 398 | 399 | ## IAM 400 | 401 | @fa[arrow-down] 402 | 403 | +++ 404 | 405 | ## This is the foundation 406 | 407 | +++ 408 | 409 | ## Locking down access is incredibly important 410 | 411 | +++ 412 | 413 | ## In fact, users shouldn't have any permissions 414 | 415 | +++ 416 | 417 | ### They should only be able to assume roles that provide required access 418 | 419 | +++ 420 | 421 | ## This makes the console obsolete 422 | 423 | +++ 424 | 425 | ### But it requires you to have a high degree of automation 426 | 427 | +++ 428 | 429 | ### If you do it right, people won't care what cloud provider they are using 430 | 431 | +++ 432 | 433 | ## Let's take a look at the setup 434 | 435 | +++?code=workshop/iam/setup.tf 436 | 437 | @[1-3] 438 | @[5-11] 439 | @[22-24] 440 | @[26-28] 441 | @[30-37] 442 | @[39-46] 443 | @[48-48] 444 | @[50-61] 445 | @[63-67] 446 | @[69-69] 447 | @[71-71] 448 | @[73-76] 449 | @[78-78] 450 | @[80-84] 451 | @[88-92] 452 | @[94-100] 453 | @[102-107] 454 | @[109-115] 455 | @[117-122] 456 | @[124-128] 457 | @[130-134] 458 | @[136-140] 459 | @[142-146] 460 | 461 | +++ 462 | 463 | ### Deploy the changes 464 | 465 | ```sh 466 | workshop/iam> terraform init 467 | workshop/iam> terraform plan 468 | workshop/iam> terraform apply 469 | ``` 470 | 471 | +++ 472 | 473 | ### Verify the changes 474 | 475 | ```sh 476 | aws iam list-roles --query 'Roles[].RoleName' 477 | [ 478 | "admin", 479 | "AWSServiceRoleForAmazonGuardDuty", 480 | "read_only" 481 | ] 482 | ``` 483 | 484 | ```sh 485 | aws iam list-attached-role-policies --role-name admin 486 | { 487 | "AttachedPolicies": [ 488 | { 489 | "PolicyName": "AdministratorAccess", 490 | "PolicyArn": "arn:aws:iam::aws:policy/AdministratorAccess" 491 | } 492 | ] 493 | } 494 | ``` 495 | 496 | +++ 497 | 498 | ### Grab keys for each of the newly created users 499 | 500 | ```sh 501 | aws iam create-access-key --user-name audit \ 502 | --query 'AccessKey.[AccessKeyId,SecretAccessKey]' 503 | [ 504 | "XXX", 505 | "XXX" 506 | ] 507 | ``` 508 | 509 | ```sh 510 | aws iam create-access-key --user-name operator \ 511 | --query 'AccessKey.[AccessKeyId,SecretAccessKey]' 512 | [ 513 | "XXX", 514 | "XXX" 515 | ] 516 | ``` 517 | 518 | +++ 519 | 520 | ## How do we use these roles? 521 | 522 | +++ 523 | 524 | ## We do it through STS 525 | 526 | +++ 527 | 528 | ### Security Token Service provides temporary, limited credentials 529 | 530 | +++ 531 | 532 | ### Before we try STS we need to know the arn of the role we want to assume 533 | 534 | +++ 535 | 536 | ### List role arns 537 | 538 | ```sh 539 | aws iam list-roles --query 'Roles[].Arn' 540 | [ 541 | "arn:aws:iam::489175270805:role/admin", 542 | "arn:aws:iam::489175270805:role/aws-service-role/guardduty.amazonaws.com/AWSServiceRoleForAmazonGuardDuty", 543 | "arn:aws:iam::489175270805:role/read_only" 544 | ] 545 | ``` 546 | 547 | +++ 548 | 549 | ### Users use their AWS credentials to get STS tokens that are used to make API calls 550 | 551 | +++ 552 | 553 | ### Switch to audit user's credentials 554 | 555 | ```sh 556 | export AWS_ACCESS_KEY_ID=XXX 557 | export AWS_SECRET_ACCESS_KEY=xxx 558 | ``` 559 | 560 | +++ 561 | 562 | ### Acquire STS credentials 563 | 564 | ```sh 565 | aws sts assume-role \ 566 | --role-arn arn:aws:iam::489175270805:role/read_only \ 567 | --role-session-name audit-test 568 | { 569 | "Credentials": { 570 | "AccessKeyId": "XXX", 571 | "SecretAccessKey": "XXX", 572 | "SessionToken": "XXX", 573 | }, 574 | "AssumedRoleUser": { 575 | "AssumedRoleId": "XXX:audit-test", 576 | "Arn": "XXX:assumed-role/read_only/audit-test" 577 | } 578 | } 579 | ``` 580 | 581 | +++ 582 | 583 | ### Set the environment 584 | 585 | ```sh 586 | export AWS_ACCESS_KEY_ID=XXX 587 | export AWS_SECRET_ACCESS_KEY=XXX 588 | export AWS_SESSION_TOKEN=XXX 589 | ``` 590 | 591 | +++ 592 | 593 | ### Verify 594 | 595 | ```sh 596 | aws sts get-caller-identity 597 | { 598 | "UserId": "AROAJ6TY53DCE4TYG6JWC:audit-test", 599 | "Account": "489175270805", 600 | "Arn": "arn:aws:sts::489175270805:assumed-role/read_only/audit-test" 601 | } 602 | ``` 603 | 604 | ```sh 605 | aws s3 ls 606 | 2018-04-19 14:40:37 abedra-audit 607 | 2018-04-19 11:35:45 abedra-tfstate 608 | ``` 609 | 610 | +++ 611 | 612 | ## But there's an easier way 613 | 614 | +++ 615 | 616 | # aws-vault 617 | 618 | +++ 619 | 620 | ## This tool is effective on multiple levels 621 | 622 | +++ 623 | 624 | ## It allows you to assume roles via STS 625 | 626 | +++ 627 | 628 | ## It even handles MFA 629 | 630 | +++ 631 | 632 | ## It also moves credentials into the OS keyring 633 | 634 | +++ 635 | 636 | ## Which encrypts your credentials and keeps them protected 637 | 638 | +++ 639 | 640 | ## Let's go through the setup 641 | 642 | +++ 643 | 644 | [https://github.com/99designs/aws-vault](https://github.com/99designs/aws-vault) 645 | 646 | +++ 647 | 648 | #### ~/.aws/config 649 | 650 | ```toml 651 | [default] 652 | output = json 653 | region = us-east-2 654 | 655 | [profile personal-admin] 656 | region=us-east-2 657 | output=json 658 | role_arn=arn:aws:iam::489175270805:role/admin 659 | mfa_serial=arn:aws:iam::489175270805:mfa/operator 660 | 661 | [profile personal-read_only] 662 | region=us-east-2 663 | output=json 664 | role_arn=arn:aws:iam::489175270805:role/read_only 665 | ``` 666 | 667 | +++ 668 | 669 | ### Now we import our credentials into vault 670 | 671 | ```sh 672 | workshop/iam> aws-vault add personal-read_only 673 | Enter Access Key ID: XXX 674 | Enter Secret Access Key: XXX 675 | Added credentials to profile "personal-read_only" in vault 676 | workshop/iam> aws-vault add personal-admin 677 | Enter Access Key ID: XXX 678 | Enter Secret Access Key: XXX 679 | Added credentials to profile "personal-admin" in vault 680 | workshop/iam> aws-vault list 681 | personal-read_only 682 | personal-admin 683 | ``` 684 | 685 | +++ 686 | 687 | ### Let's give it a spin 688 | 689 | ```sh 690 | workshop/iam> aws-vault exec personal-read_only -- aws s3 ls 691 | 2018-04-19 14:40:37 abedra-audit 692 | 2018-04-19 11:35:45 abedra-tfstate 693 | ``` 694 | 695 | +++ 696 | 697 | ### Now that you have things setup you can remove all other permissions 698 | 699 | +++ 700 | 701 | ### This creates a clean separation and a good audit trail 702 | 703 | +++ 704 | 705 | ## Questions? 706 | 707 | --- 708 | 709 | ## Network Design 710 | 711 | @fa[arrow-down] 712 | 713 | +++ 714 | 715 | ## Before we get started, the number one rule... 716 | 717 | +++ 718 | 719 | ## Keep it simple 720 | 721 | +++ 722 | 723 | ## Complicated networks are complicated 724 | 725 | +++ 726 | 727 | ## And cause all kinds of problems 728 | 729 | +++ 730 | 731 | ## Now that we have that out of the way 732 | 733 | +++ 734 | 735 | ## Let's move on to the VPC 736 | 737 | +++ 738 | 739 | ## This is the foundation of your network 740 | 741 | +++ 742 | 743 | ## If you haven't been doing this, start now 744 | 745 | +++ 746 | 747 | ## Let's explore the setup 748 | 749 | +++?code=workshop/vpc/setup.tf 750 | 751 | @[1-3] 752 | @[5-11] 753 | @[13-13] 754 | @[14-14] 755 | @[16-19] 756 | @[21-27] 757 | @[30-32] 758 | @[34-38] 759 | @[40-40] 760 | @[41-43] 761 | @[45-50] 762 | @[52-54] 763 | @[57-57] 764 | @[58-60] 765 | @[62-67] 766 | @[69-71] 767 | @[74-80] 768 | @[82-82] 769 | @[83-85] 770 | @[87-92] 771 | @[94-96] 772 | @[99-99] 773 | @[100-102] 774 | @[104-109] 775 | @[111-113] 776 | @[116-126] 777 | 778 | +++ 779 | 780 | ### Check security groups assigned to public instances 781 | 782 | ```sh 783 | aws ec2 describe-instances \ 784 | --query "Reservations[].Instances[].[InstanceId,PublicIpAddress,SecurityGroups[].GroupName]" 785 | 786 | "i-02d6f02ed4cd65571", 787 | "18.220.232.57", 788 | "bastion_external_security_group" 789 | 790 | "i-088a3499844e2f3b0", 791 | "18.222.41.186", 792 | "bastion_internal_security_group", 793 | "api_security_group" 794 | ``` 795 | 796 | +++ 797 | 798 | ### Or use the nmap docker image 799 | 800 | ```sh 801 | workshop/vpc> docker build -t nmap . 802 | workshop/vpc> docker run nmap -Pn -T5 -sS -A [bastion_ip] 803 | ``` 804 | 805 | +++ 806 | 807 | ## We could keep going, but this is a good start 808 | 809 | +++ 810 | 811 | ### Remember to keep things simple, but maintain proper boundaries 812 | 813 | +++ 814 | 815 | ## Questions? 816 | 817 | --- 818 | 819 | ## Encryption and Key Management 820 | 821 | @fa[arrow-down] 822 | 823 | +++ 824 | 825 | ## Let's set a baseline 826 | 827 | +++ 828 | 829 | ## The Problem 830 | 831 | +++ 832 | 833 | ## Encrypt an unknown amount of data at rest 834 | 835 | +++ 836 | 837 | ## We should rely on symmetric encryption for this 838 | 839 | +++ 840 | 841 | ## In particular, AES 842 | 843 | +++ 844 | 845 | ## For this workshop we will use `AES-256-GCM` 846 | 847 | +++ 848 | 849 | ## A breakdown 850 | 851 | @ul 852 | 853 | - AES is the cipher core 854 | - 256 is length of the key in bits 855 | - GCM, short for Galois Counter Mode, is the cipher block mode 856 | 857 | @ulend 858 | 859 | +++ 860 | 861 | ### There are other AES options, but this is the strongest choice* 862 | 863 | +++ 864 | 865 | ## Local Python Example 866 | 867 | +++?code=workshop/kms/local/local_aes&lang=python 868 | 869 | @[8-13] 870 | @[16-19] 871 | @[22-29] 872 | 873 | +++ 874 | 875 | ### Test it out 876 | 877 | ```sh 878 | workshop/kms/local> docker build -t local_aes . 879 | workshop/kms/local> docker run local_aes 880 | 'Attack at dawn' 881 | ``` 882 | 883 | +++ 884 | 885 | ## Questions 886 | 887 | @ul 888 | 889 | - Does this solve our encryption problem? 890 | - How do we get a better key? 891 | - How do we protect the key material? 892 | - Is one encryption key enough? 893 | 894 | @ulend 895 | 896 | +++ 897 | 898 | ## Amazon KMS 899 | 900 | +++ 901 | 902 | ### A hardware backed symmetric encryption service available via a simple API 903 | 904 | +++ 905 | 906 | ### If you are encrypting anything inside of AWS, you should be using this service 907 | 908 | +++ 909 | 910 | ### Any AWS service that stores data has a KMS option 911 | 912 | +++ 913 | 914 | ### If you can, design your system to use RDS with KMS for data at rest 915 | 916 | +++ 917 | 918 | ## Let's explore the other path 919 | 920 | +++?code=workshop/kms/setup.tf 921 | 922 | @[1-3] 923 | @[5-11] 924 | @[13-16] 925 | @[17-20] 926 | @[22-32] 927 | 928 | +++ 929 | 930 | ### Deploy 931 | 932 | ```sh 933 | workshop/kms> terraform init 934 | workshop/kms> terraform plan 935 | workshop/kms> terraform apply 936 | ``` 937 | 938 | +++ 939 | 940 | ### This creates a KMS key with an alias and a dynamodb table 941 | 942 | +++ 943 | 944 | ### We will use two types of encryption keys 945 | 946 | @ul 947 | 948 | - Key Encryption Keys (KEK) 949 | - Data Encryption Keys (DEK) 950 | 951 | @ulend 952 | 953 | +++ 954 | 955 | ### The dynamodb table is for data encryption keys 956 | 957 | +++ 958 | 959 | ## Let's encrypt with KMS 960 | 961 | +++?code=workshop/kms/kms_master/kms_master&lang=python 962 | 963 | @[9-12] 964 | @[14-17] 965 | @[19-19] 966 | @[21-23] 967 | @[25-25] 968 | 969 | +++ 970 | 971 | ### Build and run 972 | 973 | ```sh 974 | workshop/kms/kms_master> docker build -t kms_master . 975 | workshop/kms/kms_master> docker run \ 976 | -e AWS_ACCESS_KEY_ID=XXX \ 977 | -e AWS_SECRET_ACCESS_KEY=XXX \ 978 | -e AWS_DEFAULT_REGION=us-east-2 \ 979 | kms_master 980 | 'base64 encoded string' 981 | 'Attack at dawn' 982 | ``` 983 | 984 | +++ 985 | 986 | ### This creates black box encryption with no knowledge of key material 987 | 988 | +++ 989 | 990 | ### The downside is that it only works for data up to 4 kilobytes in size 991 | 992 | +++ 993 | 994 | ### To solve our problem we will need to combine our use of KMS and local encryption 995 | 996 | +++ 997 | 998 | ### We do this by using KMS to generate and encrypt data encryption keys 999 | 1000 | +++ 1001 | 1002 | ### We will store our encrypted data encryption keys in dynamodb 1003 | 1004 | +++ 1005 | 1006 | ### Generate a DEK and insert it into dynamodb 1007 | 1008 | +++?code=workshop/kms/dynamo/insert_dek&lang=python 1009 | 1010 | @[11-17] 1011 | @[19-30] 1012 | @[32-39] 1013 | @[41-41] 1014 | 1015 | +++ 1016 | 1017 | ### Execute 1018 | 1019 | ```sh 1020 | workshop/kms/dynamo> docker build -t insert_dek . 1021 | workshop/kms/dynamo> docker run \ 1022 | -e AWS_ACCESS_KEY_ID=XXX \ 1023 | -e AWS_SECRET_ACCESS_KEY=XXX \ 1024 | -e AWS_DEFAULT_REGION=us-east-2 \ 1025 | insert_dek 1026 | 1027 | {u'Item': {u'KeyId': {u'S': u'application'}, 1028 | u'Value': {u'S': u'XXX'}}, 1029 | 'ResponseMetadata': {'HTTPHeaders': { ...SNIP... } 1030 | 'HTTPStatusCode': 200, 1031 | 'RequestId': XXX, 1032 | 'RetryAttempts': 0}} 1033 | ``` 1034 | 1035 | +++ 1036 | 1037 | ### Now we can perform end to end encryption using dynamodb and kms backed encryption 1038 | 1039 | +++?code=workshop/kms/complete/dynamo_backed_aes&lang=python 1040 | 1041 | @[10-15] 1042 | @[18-21] 1043 | @[24-27] 1044 | @[29-36] 1045 | @[38-40] 1046 | @[42-42] 1047 | @[45-53] 1048 | 1049 | +++ 1050 | 1051 | ### Run the complete example 1052 | 1053 | ```sh 1054 | workshop/kms/complete> docker build -t complete . 1055 | workshop/kms/complete> docker run \ 1056 | -e AWS_ACCESS_KEY_ID=XXX \ 1057 | -e AWS_SECRET_ACCESS_KEY=XXX \ 1058 | -e AWS_DEFAULT_REGION=us-east-2 \ 1059 | complete 1060 | 1061 | 'Attack at dawn' 1062 | ``` 1063 | 1064 | +++ 1065 | 1066 | ### This assumes a single data encryption key for all data in an application 1067 | 1068 | +++ 1069 | 1070 | ### Other Options 1071 | 1072 | @ul 1073 | 1074 | - A DEK per customer 1075 | - A DEK per record 1076 | - A DEK per shard 1077 | 1078 | @ulend 1079 | 1080 | +++ 1081 | 1082 | ### With multiple DEKs, store the key id with the record in the database 1083 | 1084 | +++ 1085 | 1086 | ## Wrap Up 1087 | 1088 | @ul 1089 | 1090 | - Does this solve our encryption problem? 1091 | - How do we get a better key? 1092 | - How do we protect the key material? 1093 | - Is one encryption key enough? 1094 | 1095 | @ulend 1096 | 1097 | +++ 1098 | 1099 | ## Questions? 1100 | 1101 | --- 1102 | 1103 | ## Parting thoughts and questions 1104 | -------------------------------------------------------------------------------- /workshop/PITCHME.yaml: -------------------------------------------------------------------------------- 1 | theme-override : assets/css/PITCHME.css 2 | mathjax : TeX-AMS_HTML-full 3 | charts : true 4 | theme: night 5 | highlight: vs2015 -------------------------------------------------------------------------------- /workshop/auditing/.gitignore: -------------------------------------------------------------------------------- 1 | scout2-report -------------------------------------------------------------------------------- /workshop/auditing/inactive/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | WORKDIR /src 3 | COPY requirements.txt . 4 | RUN pip install --no-cache-dir -r requirements.txt 5 | COPY inactive_user_audit . 6 | CMD [ "./inactive_user_audit" ] -------------------------------------------------------------------------------- /workshop/auditing/inactive/inactive_user_audit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This script audits AWS IAM configuration for a series of potential security issues""" 3 | 4 | import sys 5 | import pprint 6 | from datetime import datetime 7 | import pytz 8 | import boto3 9 | from botocore.exceptions import NoCredentialsError 10 | 11 | 12 | def create_iam_client(): 13 | """This method creates an IAM object for use in the rest of the script""" 14 | resource = boto3.resource('iam') 15 | return resource.meta.client 16 | 17 | 18 | def access_key_used_recently(iam, user): 19 | """Determine if account has been used recently""" 20 | access_keys = iam.list_access_keys(UserName=user["UserName"]) 21 | for key in access_keys["AccessKeyMetadata"]: 22 | last_used = iam.get_access_key_last_used( 23 | AccessKeyId=key["AccessKeyId"]) 24 | if key["Status"] == "Active" and "LastUsedDate" in last_used["AccessKeyLastUsed"]: 25 | if (datetime.utcnow().replace(tzinfo=pytz.UTC) - last_used["AccessKeyLastUsed"]["LastUsedDate"]).days < 90: 26 | return True 27 | return False 28 | 29 | 30 | def main(): 31 | """Main""" 32 | iam = create_iam_client() 33 | try: 34 | users = iam.list_users() 35 | except NoCredentialsError: 36 | print("No credentials found") 37 | sys.exit(1) 38 | 39 | user_list = [] 40 | inactive_users = [] 41 | 42 | for user in users['Users']: 43 | user_list.append(user) 44 | 45 | while 'Marker' in users: 46 | users = iam.list_users(Marker=users['Marker']) 47 | for user in users['Users']: 48 | user_list.append(user) 49 | 50 | for user in user_list: 51 | if not access_key_used_recently(iam, user): 52 | inactive_users.append(user['UserName']) 53 | 54 | pprint.pprint(inactive_users) 55 | 56 | 57 | if __name__ == '__main__': 58 | main() 59 | -------------------------------------------------------------------------------- /workshop/auditing/inactive/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 2 | pytz 3 | -------------------------------------------------------------------------------- /workshop/auditing/mfa/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | WORKDIR /src 3 | COPY requirements.txt . 4 | RUN pip install --no-cache-dir -r requirements.txt 5 | COPY mfa_audit . 6 | CMD [ "./mfa_audit" ] -------------------------------------------------------------------------------- /workshop/auditing/mfa/mfa_audit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This script audits AWS IAM configuration for a series of potential security issues""" 3 | 4 | from __future__ import print_function 5 | 6 | import pprint 7 | import sys 8 | import boto3 9 | from botocore.exceptions import ClientError, NoCredentialsError 10 | 11 | 12 | def create_iam_client(): 13 | """This method creates an IAM object for use in the rest of the script""" 14 | resource = boto3.resource('iam') 15 | return resource.meta.client 16 | 17 | 18 | def is_mfa_enabled(iam, user): 19 | """This method first checks to see if a user is able to login, 20 | and then to see if they have an MFA device attached""" 21 | try: 22 | iam.get_login_profile(UserName=user['UserName']) 23 | mfa_devices = iam.list_mfa_devices(UserName=user['UserName']) 24 | if mfa_devices['MFADevices']: 25 | return True 26 | else: 27 | return False 28 | except ClientError: 29 | return True 30 | 31 | 32 | def main(): 33 | """Main""" 34 | iam = create_iam_client() 35 | try: 36 | users = iam.list_users() 37 | except NoCredentialsError: 38 | print("No credentials found") 39 | sys.exit(1) 40 | 41 | user_list = [] 42 | inactive_mfa_users = [] 43 | 44 | for user in users['Users']: 45 | user_list.append(user) 46 | 47 | while 'Marker' in users: 48 | users = iam.list_users(Marker=users['Marker']) 49 | for user in users['Users']: 50 | user_list.append(user) 51 | 52 | for user in user_list: 53 | if not is_mfa_enabled(iam, user): 54 | inactive_mfa_users.append(user['UserName']) 55 | 56 | pprint.pprint(inactive_mfa_users) 57 | 58 | 59 | if __name__ == '__main__': 60 | main() 61 | -------------------------------------------------------------------------------- /workshop/auditing/mfa/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 -------------------------------------------------------------------------------- /workshop/auditing/scout2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:latest 2 | 3 | RUN yum update -y 4 | RUN yum install -y epel-release 5 | RUN yum install -y python2-pip nginx 6 | RUN pip install --upgrade pip 7 | RUN pip install python-dateutil==2.6.1 8 | RUN pip install awsscout2 9 | 10 | RUN date 11 | 12 | COPY index.html /usr/share/nginx/html/index.html 13 | 14 | WORKDIR /work 15 | COPY boot.sh . 16 | 17 | EXPOSE 80 18 | 19 | CMD ["sh", "/work/boot.sh"] 20 | -------------------------------------------------------------------------------- /workshop/auditing/scout2/boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | /usr/sbin/nginx 6 | 7 | Scout2 --no-browser --report-dir /usr/share/nginx/html 8 | 9 | mv /usr/share/nginx/html/report.html /usr/share/nginx/html/index.html 10 | 11 | printf "\n\n*** Visit ***\n\nhttp://localhost:22222\n\n*** to view the report ***\n\n" 12 | 13 | tail -f /var/log/nginx/access.log 14 | -------------------------------------------------------------------------------- /workshop/auditing/scout2/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | AWS Security Analyzer 4 | 5 | 6 | 7 |

AWS Security Analyzer

8 | 9 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /workshop/auditing/scout2/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 2 | awsscout2 -------------------------------------------------------------------------------- /workshop/auditing/setup.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "us-east-2" 3 | } 4 | 5 | terraform { 6 | backend "s3" { 7 | bucket = "abedra-tfstate" 8 | key = "auditing/terraform.tfstate" 9 | region = "us-east-2" 10 | } 11 | } 12 | 13 | data "aws_iam_policy_document" "policy" { 14 | statement { 15 | sid = "AWSCloudTrailAclCheck" 16 | effect = "Allow" 17 | 18 | principals { 19 | type = "Service" 20 | identifiers = ["cloudtrail.amazonaws.com"] 21 | } 22 | 23 | actions = ["s3:GetBucketAcl"] 24 | resources = ["${aws_s3_bucket.abedra-audit.arn}"] 25 | } 26 | 27 | statement { 28 | sid = "AWSCloudTrailWrite" 29 | effect = "Allow" 30 | 31 | principals { 32 | type = "Service" 33 | identifiers = ["cloudtrail.amazonaws.com"] 34 | } 35 | 36 | actions = ["s3:PutObject"] 37 | resources = ["${aws_s3_bucket.abedra-audit.arn}/audit/*"] 38 | 39 | condition { 40 | test = "StringEquals" 41 | variable = "s3:x-amz-acl" 42 | 43 | values = ["bucket-owner-full-control"] 44 | } 45 | } 46 | } 47 | 48 | resource "aws_s3_bucket_policy" "audit" { 49 | bucket = "${aws_s3_bucket.abedra-audit.bucket}" 50 | policy = "${data.aws_iam_policy_document.policy.json}" 51 | } 52 | 53 | resource "aws_s3_bucket" "abedra-audit" { 54 | bucket = "abedra-audit" 55 | force_destroy = true 56 | } 57 | 58 | resource "aws_cloudtrail" "audit" { 59 | name = "audit" 60 | s3_bucket_name = "${aws_s3_bucket.abedra-audit.id}" 61 | s3_key_prefix = "audit" 62 | include_global_service_events = true 63 | depends_on = ["aws_s3_bucket_policy.audit"] 64 | } 65 | 66 | resource "aws_guardduty_detector" "workshop" { 67 | enable = true 68 | } 69 | -------------------------------------------------------------------------------- /workshop/iam/aws_vault_setup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This script imports profiles into aws-vault""" 3 | 4 | import os 5 | import subprocess 6 | import json 7 | import getpass 8 | import boto3 9 | 10 | 11 | def read_config(): 12 | """Reads and parses the config file""" 13 | with open("config.json") as config_file: 14 | return json.load(config_file) 15 | 16 | 17 | def create_iam_client(): 18 | """Creates an iam client object""" 19 | resource = boto3.resource('iam') 20 | return resource.meta.client 21 | 22 | 23 | def create_config_directory(): 24 | """Creates the aws configuration directory if it doesn't already exist""" 25 | if not os.path.exists(os.path.expanduser("~/.aws")): 26 | os.makedirs(os.path.expanduser("~/.aws")) 27 | 28 | 29 | def get_mfa_serial(client): 30 | """Obtains the MFA serial number""" 31 | devices = client.list_mfa_devices(UserName=getpass.getuser()) 32 | return devices["MFADevices"][0]["SerialNumber"] 33 | 34 | 35 | def get_account_id(): 36 | """Gets the AWS account id""" 37 | return boto3.client('sts').get_caller_identity().get('Account') 38 | 39 | 40 | def add_vault(name, access_key_id, secret_key): 41 | """Add an entry into vault""" 42 | write_expect_script(name, access_key_id, secret_key) 43 | subprocess.call(["expect", "/tmp/temp.exp"]) 44 | 45 | 46 | def write_expect_script(name, access_key_id, secret_access_key): 47 | """Writes the expect script to a temp file""" 48 | script = """spawn aws-vault add %s 49 | 50 | expect "Enter Access Key ID: " { send "%s\\n" } 51 | expect "Enter Secret Access Key: " { send "%s\\n" } 52 | 53 | interact""" % (name, access_key_id, secret_access_key) 54 | 55 | f = open("/tmp/temp.exp", "w+") 56 | f.write(script) 57 | f.close() 58 | 59 | 60 | def remove_expect_script(): 61 | """Deletes the temporary expect script""" 62 | os.remove("/tmp/temp.exp") 63 | 64 | 65 | def write_config(data): 66 | """Writes the config file""" 67 | create_config_directory() 68 | 69 | f = open(os.path.expanduser("~/.aws/config"), "w+") 70 | f.write(data) 71 | f.close() 72 | 73 | 74 | def backup_config(): 75 | """Creates a backup of the current config file""" 76 | if os.path.exists(os.path.expanduser("~/.aws")) and os.path.exists(os.path.expanduser("~/.aws/config")): 77 | os.rename(os.path.expanduser("~/.aws/config"), 78 | os.path.expanduser("~/.aws/config.backup")) 79 | 80 | 81 | def main(): 82 | """Main""" 83 | config = read_config() 84 | iam = create_iam_client() 85 | mfa_serial = get_mfa_serial(iam) 86 | 87 | access_key_id = raw_input("Access Key ID: ") 88 | secret_key = raw_input("Secret Access Key: ") 89 | 90 | data = [] 91 | 92 | for environment in config["environments"]: 93 | for role in config["roles"]: 94 | add_vault("%s-%s" % 95 | (environment["name"], role), access_key_id, secret_key) 96 | data.append("[profile %s-%s]" % (environment["name"], role)) 97 | data.append("region=%s" % environment["region"]) 98 | data.append("output=%s" % environment["output"]) 99 | data.append("role_arn=arn:aws:iam::%s:role/%s" % 100 | (get_account_id(), role)) 101 | data.append("mfa_serial=%s\n" % mfa_serial) 102 | 103 | backup_config() 104 | write_config('\n'.join(data)) 105 | remove_expect_script() 106 | 107 | 108 | if __name__ == '__main__': 109 | main() 110 | -------------------------------------------------------------------------------- /workshop/iam/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "roles" : ["admin","read_only"], 3 | "environments": [ 4 | { 5 | "name": "personal", 6 | "region": "us-east-2", 7 | "output": "json" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /workshop/iam/scratch.txt: -------------------------------------------------------------------------------- 1 | aws iam get-account-password-policy 2 | 3 | aws iam list-roles 4 | 5 | aws iam list-roles | jq -r '.Roles | .[].RoleName' 6 | 7 | aws iam get-role --role-name admin 8 | 9 | aws iam generate-credential-report 10 | 11 | aws iam get-credential-report | jq .Content | base64 -dn > out.csv && open out.csv 12 | 13 | aws iam list-users 14 | 15 | aws iam list-attached-user-policies --user-name abedra 16 | -------------------------------------------------------------------------------- /workshop/iam/setup.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "us-east-2" 3 | } 4 | 5 | terraform { 6 | backend "s3" { 7 | bucket = "abedra-tfstate" 8 | key = "iam/terraform.tfstate" 9 | region = "us-east-2" 10 | } 11 | } 12 | 13 | # resource "aws_iam_account_password_policy" "approved" { 14 | # minimum_password_length = 8 15 | # require_lowercase_characters = true 16 | # require_numbers = true 17 | # require_uppercase_characters = true 18 | # require_symbols = true 19 | # allow_users_to_change_password = true 20 | # } 21 | 22 | resource "aws_iam_user" "audit" { 23 | name = "audit" 24 | } 25 | 26 | resource "aws_iam_user" "operator" { 27 | name = "operator" 28 | } 29 | 30 | variable "read_only_users" { 31 | type = "list" 32 | description = "Users that are allowed to assume ReadOnly rights" 33 | 34 | default = [ 35 | "audit", 36 | ] 37 | } 38 | 39 | variable "admin_users" { 40 | type = "list" 41 | description = "Users that are allowed to assume full Admin rights" 42 | 43 | default = [ 44 | "operator", 45 | ] 46 | } 47 | 48 | data "aws_caller_identity" "current" {} 49 | 50 | data "aws_iam_policy_document" "read_only" { 51 | statement { 52 | actions = ["sts:AssumeRole"] 53 | 54 | principals { 55 | type = "AWS" 56 | identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] 57 | } 58 | 59 | effect = "Allow" 60 | } 61 | } 62 | 63 | resource "aws_iam_role" "read_only" { 64 | name = "read_only" 65 | description = "ReadOnly Access" 66 | assume_role_policy = "${data.aws_iam_policy_document.read_only.json}" 67 | } 68 | 69 | data "aws_iam_policy_document" "admin" { 70 | statement { 71 | actions = ["sts:AssumeRole"] 72 | 73 | principals { 74 | type = "AWS" 75 | identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] 76 | } 77 | 78 | effect = "Allow" 79 | 80 | condition { 81 | test = "Bool" 82 | variable = "aws:MultifactorAuthPresent" 83 | values = ["true"] 84 | } 85 | } 86 | } 87 | 88 | resource "aws_iam_role" "admin" { 89 | name = "admin" 90 | description = "Administrator Access" 91 | assume_role_policy = "${data.aws_iam_policy_document.admin.json}" 92 | } 93 | 94 | data "aws_iam_policy_document" "assume_read_only" { 95 | statement { 96 | effect = "Allow" 97 | actions = ["sts:AssumeRole"] 98 | resources = ["${aws_iam_role.read_only.arn}"] 99 | } 100 | } 101 | 102 | resource "aws_iam_policy" "assume_read_only" { 103 | name = "assume_read_only" 104 | description = "Ability to assume the read_only role" 105 | 106 | policy = "${data.aws_iam_policy_document.assume_read_only.json}" 107 | } 108 | 109 | data "aws_iam_policy_document" "assume_admin" { 110 | statement { 111 | effect = "Allow" 112 | actions = ["sts:AssumeRole"] 113 | resources = ["${aws_iam_role.admin.arn}"] 114 | } 115 | } 116 | 117 | resource "aws_iam_policy" "assume_admin" { 118 | name = "assume_admin" 119 | description = "Ability to assume the admin role" 120 | 121 | policy = "${data.aws_iam_policy_document.assume_admin.json}" 122 | } 123 | 124 | resource "aws_iam_policy_attachment" "read_only_policy_attachment" { 125 | name = "read_only_policy_attachment" 126 | roles = ["${aws_iam_role.read_only.name}"] 127 | policy_arn = "arn:aws:iam::aws:policy/ReadOnlyAccess" 128 | } 129 | 130 | resource "aws_iam_policy_attachment" "admin_policy_attachment" { 131 | name = "admin_policy_attachment" 132 | roles = ["${aws_iam_role.admin.name}"] 133 | policy_arn = "arn:aws:iam::aws:policy/AdministratorAccess" 134 | } 135 | 136 | resource "aws_iam_policy_attachment" "assume_read_only_policy_attachment" { 137 | name = "assume_read_only_policy_attachment" 138 | users = "${var.read_only_users}" 139 | policy_arn = "${aws_iam_policy.assume_read_only.arn}" 140 | } 141 | 142 | resource "aws_iam_policy_attachment" "assume_admin_policy_attachment" { 143 | name = "assume_admin_policy_attachment" 144 | users = "${var.admin_users}" 145 | policy_arn = "${aws_iam_policy.assume_admin.arn}" 146 | } 147 | -------------------------------------------------------------------------------- /workshop/kms/complete/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2 2 | WORKDIR /src 3 | COPY requirements.txt . 4 | RUN pip install --no-cache-dir -r requirements.txt 5 | COPY dynamo_backed_aes . 6 | CMD [ "./dynamo_backed_aes" ] -------------------------------------------------------------------------------- /workshop/kms/complete/dynamo_backed_aes: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This script generates a data encryption key and stores it in dynamodb""" 3 | 4 | import base64 5 | import pprint 6 | import boto3 7 | from Crypto.Cipher import AES 8 | 9 | 10 | def encrypt(key, message): 11 | """encrypt""" 12 | encryption_cipher = AES.new(key, AES.MODE_GCM) 13 | nonce = encryption_cipher.nonce 14 | ciphertext, tag = encryption_cipher.encrypt_and_digest(message) 15 | return (nonce, tag, ciphertext) 16 | 17 | 18 | def decrypt(key, nonce, tag, ciphertext): 19 | """decrypt""" 20 | decryption_cipher = AES.new(key, AES.MODE_GCM, nonce) 21 | return decryption_cipher.decrypt_and_verify(ciphertext, tag) 22 | 23 | 24 | def fetch_encryption_key(): 25 | """return usable data encryption key""" 26 | kms = boto3.client('kms') 27 | dynamo = boto3.client('dynamodb') 28 | 29 | encrypted_key = dynamo.get_item( 30 | TableName='workshop_encryption_keys', 31 | Key={ 32 | 'KeyId': { 33 | 'S': "application" 34 | } 35 | } 36 | ) 37 | 38 | decrypted_key = kms.decrypt( 39 | CiphertextBlob=base64.decodestring(encrypted_key['Item']['Value']['S']) 40 | ) 41 | 42 | return decrypted_key['Plaintext'] 43 | 44 | 45 | def main(): 46 | """Main""" 47 | key = fetch_encryption_key() 48 | message = "Attack at dawn" 49 | 50 | nonce, tag, ciphertext = encrypt(key, message) 51 | plaintext = decrypt(key, nonce, tag, ciphertext) 52 | 53 | pprint.pprint(plaintext) 54 | 55 | 56 | if __name__ == '__main__': 57 | main() 58 | -------------------------------------------------------------------------------- /workshop/kms/complete/requirements.txt: -------------------------------------------------------------------------------- 1 | pycryptodome 2 | boto3 -------------------------------------------------------------------------------- /workshop/kms/dynamo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2 2 | WORKDIR /src 3 | COPY requirements.txt . 4 | RUN pip install --no-cache-dir -r requirements.txt 5 | COPY insert_dek . 6 | CMD [ "./insert_dek" ] -------------------------------------------------------------------------------- /workshop/kms/dynamo/insert_dek: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This script generates a data encryption key and stores it in dynamodb""" 3 | 4 | import base64 5 | import pprint 6 | import boto3 7 | 8 | 9 | def main(): 10 | """Main""" 11 | kms = boto3.client('kms') 12 | master_key_id = kms.describe_key(KeyId='alias/workshop')["KeyMetadata"]["KeyId"] 13 | dek = kms.generate_data_key( 14 | KeyId=master_key_id, 15 | KeySpec='AES_256' 16 | ) 17 | encoded_dek = base64.b64encode(dek['CiphertextBlob']) 18 | 19 | dynamo = boto3.client('dynamodb') 20 | dynamo.put_item( 21 | TableName='workshop_encryption_keys', 22 | Item={ 23 | 'KeyId': { 24 | 'S': "application", 25 | }, 26 | 'Value': { 27 | 'S': encoded_dek 28 | } 29 | } 30 | ) 31 | 32 | encrypted_key = dynamo.get_item( 33 | TableName='workshop_encryption_keys', 34 | Key={ 35 | 'KeyId': { 36 | 'S': "application" 37 | } 38 | } 39 | ) 40 | 41 | pprint.pprint(encrypted_key) 42 | 43 | 44 | if __name__ == '__main__': 45 | main() 46 | -------------------------------------------------------------------------------- /workshop/kms/dynamo/requirements.txt: -------------------------------------------------------------------------------- 1 | pycryptodome 2 | boto3 -------------------------------------------------------------------------------- /workshop/kms/kms_master/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2 2 | WORKDIR /src 3 | COPY requirements.txt . 4 | RUN pip install --no-cache-dir -r requirements.txt 5 | COPY kms_master . 6 | CMD [ "./kms_master" ] -------------------------------------------------------------------------------- /workshop/kms/kms_master/kms_master: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This script performs a symmetric encrypt and decrypt operation using a KMS master key""" 3 | 4 | from pprint import pprint 5 | import boto3 6 | import base64 7 | 8 | 9 | def main(): 10 | """Main""" 11 | kms = boto3.client('kms') 12 | key_id = kms.describe_key(KeyId='alias/workshop')["KeyMetadata"]["KeyId"] 13 | 14 | cipher_text = kms.encrypt( 15 | KeyId=key_id, 16 | Plaintext="Attack at dawn" 17 | ) 18 | 19 | pprint(base64.b64encode(cipher_text['CiphertextBlob'])) 20 | 21 | plain_text = kms.decrypt( 22 | CiphertextBlob=cipher_text['CiphertextBlob'] 23 | ) 24 | 25 | pprint(plain_text['Plaintext']) 26 | 27 | 28 | if __name__ == '__main__': 29 | main() 30 | -------------------------------------------------------------------------------- /workshop/kms/kms_master/requirements.txt: -------------------------------------------------------------------------------- 1 | pycryptodome 2 | boto3 -------------------------------------------------------------------------------- /workshop/kms/local/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2 2 | WORKDIR /src 3 | COPY requirements.txt . 4 | RUN pip install --no-cache-dir -r requirements.txt 5 | COPY local_aes . 6 | CMD [ "./local_aes" ] -------------------------------------------------------------------------------- /workshop/kms/local/local_aes: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """This script performs a local AES-256-GCM encrypt and decrypt operation""" 3 | 4 | import pprint 5 | from Crypto.Cipher import AES 6 | 7 | 8 | def encrypt(key, message): 9 | """encrypt""" 10 | cipher = AES.new(key, AES.MODE_GCM) 11 | nonce = cipher.nonce 12 | ciphertext, tag = cipher.encrypt_and_digest(message) 13 | return (nonce, tag, ciphertext) 14 | 15 | 16 | def decrypt(key, nonce, tag, ciphertext): 17 | """decrypt""" 18 | cipher = AES.new(key, AES.MODE_GCM, nonce) 19 | return cipher.decrypt_and_verify(ciphertext, tag) 20 | 21 | 22 | def main(): 23 | """Main""" 24 | message = "Attack at dawn" 25 | key = "ThisIsASecretKeyAReallySecretKey" 26 | 27 | nonce, tag, ciphertext = encrypt(key, message) 28 | plaintext = decrypt(key, nonce, tag, ciphertext) 29 | pprint.pprint(plaintext) 30 | 31 | 32 | if __name__ == '__main__': 33 | main() 34 | -------------------------------------------------------------------------------- /workshop/kms/local/requirements.txt: -------------------------------------------------------------------------------- 1 | pycryptodome -------------------------------------------------------------------------------- /workshop/kms/scratch.txt: -------------------------------------------------------------------------------- 1 | aws kms list-keys 2 | 3 | python kms_master/kms_master 4 | -------------------------------------------------------------------------------- /workshop/kms/setup.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "us-east-2" 3 | } 4 | 5 | terraform { 6 | backend "s3" { 7 | bucket = "abedra-tfstate" 8 | key = "kms/terraform.tfstate" 9 | region = "us-east-2" 10 | } 11 | } 12 | 13 | resource "aws_kms_key" "workshop" { 14 | description = "Workshop Key" 15 | } 16 | 17 | resource "aws_kms_alias" "master_alias" { 18 | name = "alias/workshop" 19 | target_key_id = "${aws_kms_key.workshop.key_id}" 20 | } 21 | 22 | resource "aws_dynamodb_table" "workshop_encryption_keys" { 23 | name = "workshop_encryption_keys" 24 | read_capacity = 1 25 | write_capacity = 1 26 | hash_key = "KeyId" 27 | 28 | attribute { 29 | name = "KeyId" 30 | type = "S" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /workshop/terraform/setup.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "us-east-2" 3 | } 4 | 5 | resource "aws_s3_bucket" "abedra-tfstate" { 6 | bucket = "abedra-tfstate" 7 | acl = "private" 8 | force_destroy = true 9 | versioning { 10 | enabled = true 11 | } 12 | } 13 | 14 | terraform { 15 | backend "s3" { 16 | bucket = "abedra-tfstate" 17 | key = "setup/terraform.tfstate" 18 | region = "us-east-2" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /workshop/vpc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM progrium/busybox 2 | 3 | RUN opkg-install nmap 4 | ENTRYPOINT ["nmap"] -------------------------------------------------------------------------------- /workshop/vpc/scratch.txt: -------------------------------------------------------------------------------- 1 | aws ec2 describe-vpcs 2 | 3 | aws ec2 describe-security-groups 4 | 5 | aws ec2 describe-instances --query "Reservations[].Instances[].[InstanceId,PublicIpAddress]" 6 | 7 | aws ec2 describe-instances --instance-ids i-01825ce8bd5844896 8 | 9 | aws ec2 describe-flow-logs 10 | -------------------------------------------------------------------------------- /workshop/vpc/setup.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "us-east-2" 3 | } 4 | 5 | terraform { 6 | backend "s3" { 7 | bucket = "abedra-tfstate" 8 | key = "vpc/terraform.tfstate" 9 | region = "us-east-2" 10 | } 11 | } 12 | 13 | data "aws_ami" "image" { 14 | most_recent = true 15 | 16 | filter { 17 | name = "owner-alias" 18 | values = ["amazon"] 19 | } 20 | 21 | filter { 22 | name = "name" 23 | 24 | values = [ 25 | "amzn-ami-hvm-*-x86_64-gp2", 26 | ] 27 | } 28 | } 29 | 30 | resource "aws_vpc" "workshop" { 31 | cidr_block = "10.1.0.0/16" 32 | } 33 | 34 | resource "aws_subnet" "workshop_subnet" { 35 | vpc_id = "${aws_vpc.workshop.id}" 36 | cidr_block = "10.1.0.0/24" 37 | availability_zone = "us-east-2a" 38 | } 39 | 40 | resource "aws_security_group" "bastion_external" { 41 | name = "bastion_external_security_group" 42 | description = "Allowed external ports for bastion hosts" 43 | vpc_id = "${aws_vpc.workshop.id}" 44 | 45 | ingress { 46 | from_port = 22 47 | to_port = 22 48 | protocol = "tcp" 49 | cidr_blocks = ["0.0.0.0/0"] 50 | } 51 | 52 | tags { 53 | Name = "JIRA-1234" 54 | } 55 | } 56 | 57 | resource "aws_security_group" "internal_ssh" { 58 | name = "bastion_internal_security_group" 59 | description = "Allow SSH via internal instances" 60 | vpc_id = "${aws_vpc.workshop.id}" 61 | 62 | ingress { 63 | from_port = 22 64 | to_port = 22 65 | protocol = "tcp" 66 | cidr_blocks = ["10.1.0.0/16"] 67 | } 68 | 69 | tags { 70 | Name = "JIRA-456" 71 | } 72 | } 73 | 74 | resource "aws_instance" "bastion" { 75 | ami = "${data.aws_ami.image.id}" 76 | instance_type = "t2.micro" 77 | associate_public_ip_address = true 78 | subnet_id = "${aws_subnet.workshop_subnet.id}" 79 | vpc_security_group_ids = ["${aws_security_group.bastion_external.id}"] 80 | } 81 | 82 | resource "aws_security_group" "api_security_group" { 83 | name = "api_security_group" 84 | description = "Allowed inbound ports for api" 85 | vpc_id = "${aws_vpc.workshop.id}" 86 | 87 | ingress { 88 | from_port = 80 89 | to_port = 80 90 | protocol = "tcp" 91 | cidr_blocks = ["0.0.0.0/0"] 92 | } 93 | 94 | tags { 95 | Name = "JIRA-789" 96 | } 97 | } 98 | 99 | resource "aws_security_group" "tls_security_group" { 100 | name = "tls_security_group" 101 | description = "Allowed inbound ports for api" 102 | vpc_id = "${aws_vpc.workshop.id}" 103 | 104 | ingress { 105 | from_port = 443 106 | to_port = 443 107 | protocol = "tcp" 108 | cidr_blocks = ["0.0.0.0/0"] 109 | } 110 | 111 | tags { 112 | Name = "JIRA-91011" 113 | } 114 | } 115 | 116 | resource "aws_instance" "api" { 117 | ami = "${data.aws_ami.image.id}" 118 | instance_type = "t2.micro" 119 | associate_public_ip_address = true 120 | subnet_id = "${aws_subnet.workshop_subnet.id}" 121 | 122 | vpc_security_group_ids = [ 123 | "${aws_security_group.api_security_group.id}", 124 | "${aws_security_group.internal_ssh.id}", 125 | ] 126 | } 127 | --------------------------------------------------------------------------------