├── .gitignore ├── LICENSE ├── README.md ├── aws-scripts ├── __init__.py ├── ec2-ebs.py ├── ec2-elb.py ├── ec2-instance-state.py ├── ec2-instances.py ├── ec2-reserved.py ├── ec2-sg.py ├── ec2-snap-mgmt.py ├── ec2-tg.py ├── lifecycle-hook-worker.py ├── mongodb-backup.py ├── rds-instances.py ├── role.py ├── route53-del-hostname.py ├── route53-set-hostname.py └── s3-download-file.py ├── img └── demo.png ├── requirements.txt ├── setup.py └── tests └── __init__.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .DS_Store 3 | client_secret.json 4 | credentials.json 5 | build 6 | aws_scripts.egg-info 7 | dist 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Marcos Martínez 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![PyPI](https://img.shields.io/pypi/v/aws-scripts.svg)](https://pypi.org/project/aws-scripts/) 2 | [![license](https://img.shields.io/github/license/mashape/apistatus.svg)](https://opensource.org/licenses/MIT) 3 | 4 | aws-scripts 5 | =========== 6 | 7 | Here you will find some useful AWS scripts I use often. 8 | 9 | 10 | All the scripts relies on [Boto](http://aws.amazon.com/sdkforpython/), a Python package that provides interfaces to Amazon Web Services. 11 | 12 | So, to use these scripts, you need to install Boto and provide your AWS credentinals: 13 | 14 | To install aws-scripts and all the required Python packages just type: 15 | 16 | ``` 17 | pip install aws-scripts 18 | ``` 19 | 20 | If dependencies are already satisfied, nothing will be installed. 21 | 22 | If you already have aws-scripts installed in your computer you can update to the latest version as follows: 23 | 24 | ``` 25 | pip install --upgrade aws-scripts 26 | ``` 27 | 28 | To provide your AWS credentials use the boto/boto3 config file `~/.aws/credentials`: 29 | 30 | ``` ini 31 | [default] 32 | aws_access_key_id = 33 | aws_secret_access_key = 34 | region=xx-xxxx-x 35 | ``` 36 | 37 | > Note that you can use the environment variable: ```AWS_DEFAULT_REGION=xx-xxxx-x``` to override the default region on the config file. 38 | > In the ec2-instances.py script you can also use the ```--region``` option for the same purpose 39 | 40 | ec2-instances.py 41 | ---------------- 42 | 43 | Lists the EC2 instances including the Name Tag, IP, type, zone, vpc, subnet and the status. 44 | 45 | You can filter the result by name, type, status and/or public or private IP address. Or you can provide a list of instance IDs instead. 46 | 47 | Finally you can execute remote commands on all the instances returned by the filter or the list. 48 | 49 | The execution method support both: direct connections against the public instance IP or using a bastion host. 50 | 51 | When this method is used, the .ssh/config file is used to establish the connection. 52 | 53 | ![Demo](https://github.com/frommelmak/aws-scripts/raw/master/img/demo.png) 54 | 55 | The '-h' option shows you how to use the available options. 56 | 57 | ``` 58 | usage: ec2-instances.py [-h] [-n NAME] [-t TYPE] [-s STATUS] [-z ZONE] [-v VPC] [-S SUBNET] [--public_ip PUBLIC_IP] [--private_ip PRIVATE_IP] [-l ID_LIST [ID_LIST ...]] [-i IGNORE] [-e EXECUTE] [-r REGION] 59 | [-u USER] [-c {direct,bastion-host}] 60 | 61 | Shows a list with your EC2 instances, then you can execute remote commands on those instances. 62 | 63 | options: 64 | -h, --help show this help message and exit 65 | -n NAME, --name NAME Filter result by name. 66 | -t TYPE, --type TYPE Filer result by type. 67 | -s STATUS, --status STATUS 68 | Filter result by status. 69 | -z ZONE, --zone ZONE Filter result by Availability Zone. 70 | -v VPC, --vpc VPC Filter result by VPC Id. 71 | -S SUBNET, --subnet SUBNET 72 | Filter result by Subnet Id. 73 | --public_ip PUBLIC_IP 74 | Filter result by public ip address. You can provide the whole IP address string or just a portion of it. 75 | --private_ip PRIVATE_IP 76 | Filter result by private ip adreess. You can provide the whole IP address string or just a portion of it. 77 | -l ID_LIST [ID_LIST ...], --id_list ID_LIST [ID_LIST ...] 78 | Do not filter the result. Provide a InstanceIds list instead. 79 | -i IGNORE, --ignore IGNORE 80 | Do not show hosts lines containing the "IGNORE" pattern in the tag Name 81 | -e EXECUTE, --execute EXECUTE 82 | Execute a command on instances 83 | -r REGION, --region REGION 84 | Specify an alternate region to override the one defined in the .aws/credentials file 85 | -u USER, --user USER User to run commands (if -e option is used). A user is always required, even if you have one defined in .ssh/config file 86 | -c {direct,bastion-host}, --connection_method {direct,bastion-host} 87 | The Method to connect to the instance (if -e option is used). If the instance exposes the SSH port on a public IP, use direct. Otherwhise choose bastion-host. This method look for 88 | the hostname and username inside the .ssh/config file to reach the target instance. 89 | ``` 90 | 91 | ec2-reserved.py 92 | ---------------- 93 | 94 | Lists details of all your Instance Reservations, including a summary of the active reservations by type and size. 95 | 96 | The summary also shows your reserved active capacity after apply the normalization factor. This is useful to compare the reserved capacity with the deployed in production. 97 | 98 | You can also use the option `--create-google-calendar-events` to add the expiration date of the active reservations in your Google Calendar Account. 99 | 100 | ``` 101 | usage: ec2-reserved.py [-h] 102 | [-s {payment-pending,active,payment-failed,retired}] 103 | [--create-google-calendar-events] [-t TYPE] 104 | 105 | Show reserved EC2 instances 106 | 107 | optional arguments: 108 | -h, --help show this help message and exit 109 | -s {payment-pending,active,payment-failed,retired}, --state {payment-pending,active,payment-failed,retired} 110 | Filer result by reservation state. 111 | --create-google-calendar-events 112 | Create events in your Google Calendar, using the 113 | expiration dates of your active reservations 114 | -t TYPE, --type TYPE Filer result by instance type. 115 | ``` 116 | 117 | To use the Google calendar feature you just have to [enable the calendar API in your Google Account](https://console.developers.google.com) and create a calendar called aws in the [Google Calendar](http://calendar.google.com/). Then create the *OAuth client ID* credentials. Download the credentials file and save it as `client_secret.json` in the aws-scripts folder repo. When you run the script using the `--create-google-calendar-events` option for the first time, a web browser will be opened asking your to login with the Google account you want to use. 118 | 119 | Then, whenever you buy new reservations on Amazon Web Services, you can add the new reservations in your calendar by just running the script. 120 | 121 | 122 | ec2-instance-state.py 123 | --------------------- 124 | Set the desired state for an EC2 instance or a list of instances. 125 | 126 | You can use it form a cron task in order to manage the instance state of one or several instances. 127 | You can even use it without providing the IAM user credentials, thanks to the assume role support. 128 | 129 | The '-h' optipn shows you how to use the available options. 130 | 131 | ``` 132 | usage: ec2-instance-state.py [-h] [-s {stop,start,reboot,terminate}] -l ID_LIST [ID_LIST ...] [--role_arn ROLE_ARN] [-r REGION] 133 | 134 | Set desired EC2 instance state 135 | 136 | optional arguments: 137 | -h, --help show this help message and exit 138 | -s {stop,start,reboot,terminate}, --state {stop,start,reboot,terminate} 139 | Set the desired state for the instances provided 140 | -l ID_LIST [ID_LIST ...], --id_list ID_LIST [ID_LIST ...] 141 | InstanceIds list 142 | --role_arn ROLE_ARN If the script run on an EC2 instance with an IAM role attached, then the Security Token Service will provide a set of temporary credentials allowing the actions of the assumed role. 143 | With this method, no user credentials are required, just the Role ARN to be assumed. 144 | -r REGION, --region REGION 145 | Specify an alternate region to override the one defined in the .aws/credentials file 146 | ``` 147 | 148 | This is an example of the minimum permissions required in the Role Policy in order to auto-stop an instance from a cron job. 149 | 150 | ``` 151 | { 152 | "Version": "2012-10-17", 153 | "Statement": [ 154 | { 155 | "Sid": "VisualEditor0", 156 | "Effect": "Allow", 157 | "Action": "ec2:StopInstances", 158 | "Resource": "arn:aws:ec2:::instance/" 159 | }, 160 | { 161 | "Sid": "VisualEditor1", 162 | "Effect": "Allow", 163 | "Action": "sts:AssumeRole", 164 | "Resource": "arn:aws:iam:::role/" 165 | } 166 | ] 167 | } 168 | ``` 169 | 170 | ec2-sg.py 171 | --------- 172 | 173 | Lists the EC2 Security Groups within an AWS region. The result can be filtered by name. 174 | You can also show the Inbound and Outbound rules of the chosen security group. 175 | 176 | As a sysadmin and/or developer, you or your team mates, will probably find yourself updating your public IP address frequently in order to gain SSH access to your EC2 instances. 177 | 178 | This command help you to do so. Just use the argument `--allow_my_public_ip` providing the Security Group ID and the Security Group Rule ID you want to update. The command will find out your public IP and will update the rule allowing you the SSH access. 179 | 180 | ``` 181 | usage: ec2-sg.py [-h] [-n NAME] [-l GID_LIST [GID_LIST ...]] [-r REGION] [-s SHOW] [--allow_my_public_ip ALLOW_MY_PUBLIC_IP] [--security_group_rule_id SECURITY_GROUP_RULE_ID] [--description DESCRIPTION] 182 | 183 | Security Groups Management 184 | 185 | options: 186 | -h, --help show this help message and exit 187 | -n NAME, --name NAME Filter result by group name. 188 | -l GID_LIST [GID_LIST ...], --gid_list GID_LIST [GID_LIST ...] 189 | Do not filter the result. Provide a GroupIds list instead. 190 | -r REGION, --region REGION 191 | Specify an alternate region to override the one defined in the .aws/credentials file 192 | -s SHOW, --show SHOW Show inbound and outbound rules for the provided SG ID 193 | --allow_my_public_ip ALLOW_MY_PUBLIC_IP 194 | Modify the SSH inbound rule with your current public IP address inside the provided Security Group ID. 195 | --security_group_rule_id SECURITY_GROUP_RULE_ID 196 | Modify the SSH inbound rule with your current public IP address inside the provided Security Group Rule ID 197 | --description DESCRIPTION 198 | Allows you to append a string to the rule description field 199 | ``` 200 | 201 | ec2-ebs.py 202 | ---------- 203 | Lists the EC2 EBS volumes including the Name Tag, size, device, ID, attached instance ID, Attached instance Tag Name, type, IOPS, zone and status. 204 | 205 | You can filter the result by type, status and Tag name. 206 | 207 | The '-h' option shows you how to use the available options. 208 | 209 | ``` 210 | usage: ec2-ebs.py [-h] [-n NAME] [-t {gp2,io1,st1,sc1,standard}] [-s {creating,available,in-use,deleting,deleted,error}] 211 | 212 | List all the Elastic Block Storage volumes 213 | 214 | optional arguments: 215 | -h, --help show this help message and exit 216 | -n NAME, --name NAME Filter result by name. 217 | -t {gp2,io1,st1,sc1,standard}, --type {gp2,io1,st1,sc1,standard} 218 | Filer result by type. 219 | -s {creating,available,in-use,deleting,deleted,error}, --status {creating,available,in-use,deleting,deleted,error} 220 | Filter result by status. 221 | ``` 222 | 223 | ec2-elb.py 224 | ---------- 225 | 226 | Lists all your Elastic Load Balancers and his related instances. 227 | 228 | ``` 229 | usage: ec2-elb.py [-h] [-t {classic,current,all}] 230 | 231 | For every Elastic Load Balancer list the attached instances 232 | 233 | options: 234 | -h, --help show this help message and exit 235 | -t {classic,current,all}, --type {classic,current,all} 236 | It shows the current generation of ELBs (Application, Network and/or Gateway) and/or the previous one (Classic). 237 | ``` 238 | 239 | ec2-tg.py 240 | --------- 241 | 242 | Without parameters just lists the target groups within a region. You can also list the targets in a given target group. Finally, you can also register or deregister targets to/from a group. 243 | 244 | ``` 245 | usage: ec2-tg.py [-h] [-s SHOW] [-a {register,deregister}] [--target_type {instances,ip_address,lambda_function,alb}] [--targets_id_list TARGETS_ID_LIST [TARGETS_ID_LIST ...]] 246 | [--target_group_arn TARGET_GROUP_ARN] [--role_arn ROLE_ARN] [-r REGION] 247 | 248 | Shows a list of Target Grops. Also allows you to register/deregister targets in/from a provided Targer Group 249 | 250 | options: 251 | -h, --help show this help message and exit 252 | -s SHOW, --show SHOW Shows the target for the provided Target Group ARN 253 | -a {register,deregister,details}, --action {register,deregister,details} 254 | Set the desired action. 255 | --target_type {instances,ip_address,lambda_function,alb} 256 | Set the desired state for the instances provided 257 | --targets_id_list TARGETS_ID_LIST [TARGETS_ID_LIST ...] 258 | Targets Id list 259 | --target_group_arn TARGET_GROUP_ARN 260 | Target Group ARN 261 | --role_arn ROLE_ARN If the script run on an EC2 instance with an IAM role attached, then the Security Token Service will provide a set of temporary credentials allowing the actions of the assumed role. 262 | With this method, no user credentials are required, just the Role ARN to be assumed. 263 | -r REGION, --region REGION 264 | Specify the region to override the one setted in the credentials file or if you are using --role_arn. 265 | ``` 266 | 267 | This is the minimum permissions required in the assumed role policy in order to allow register or deregister a target from the same target. 268 | 269 | ``` 270 | { 271 | "Version": "2012-10-17", 272 | "Statement": [ 273 | { 274 | "Sid": "VisualEditor0", 275 | "Effect": "Allow", 276 | "Action": [ 277 | "elasticloadbalancing:RegisterTargets", 278 | "elasticloadbalancing:DeregisterTargets" 279 | ], 280 | "Resource": "arn:aws:elasticloadbalancing:::targetgroup//" 281 | }, 282 | { 283 | "Sid": "VisualEditor1", 284 | "Effect": "Allow", 285 | "Action": "sts:AssumeRole", 286 | "Resource": "arn:aws:iam:::role/TPS-TargetGroup-management" 287 | } 288 | ] 289 | } 290 | ``` 291 | 292 | In addition, you need to perform a little modification in the Trusted Relationships to allow the Elastic Load Balancing as a AWS service or as role session. 293 | 294 | ``` 295 | { 296 | "Version": "2012-10-17", 297 | "Statement": [ 298 | { 299 | "Effect": "Allow", 300 | "Principal": { 301 | "Service": [ 302 | "elasticloadbalancing.amazonaws.com", 303 | "ec2.amazonaws.com" 304 | ] 305 | }, 306 | "Action": "sts:AssumeRole" 307 | } 308 | ] 309 | } 310 | ``` 311 | 312 | ``` 313 | { 314 | "Version": "2012-10-17", 315 | "Statement": [ 316 | { 317 | "Effect": "Allow", 318 | "Principal": { 319 | "AWS": "arn:aws:sts:::assumed-role//", 320 | "Service": "ec2.amazonaws.com" 321 | }, 322 | "Action": "sts:AssumeRole" 323 | } 324 | ] 325 | } 326 | ``` 327 | 328 | ec2-snap-mgmt.py 329 | ---------------- 330 | 331 | With this script you can see the relationships between your snapshots and your EBS volumes and AMIs. This allows you to choose the snapshots you don't need to keep in the AWS S3 service. 332 | 333 | By default the script shows all the volumes and AMIs related to each snapshost. 334 | 335 | You you can also show all the snapshots related with each volume. This option is specially usefull when you only want to keep a certain number of snapshots per volume. 336 | 337 | Finally, you can show all the snapshots related with each AMI. 338 | 339 | The '-h' option shows you how to use the available options. 340 | 341 | ``` 342 | usage: ec2-snap-mgmt.py [-h] [-v {orphan,volumes}] owner_id 343 | 344 | positional arguments: 345 | owner_id 12-digit AWS Account Number 346 | 347 | optional arguments: 348 | -h, --help show this help message and exit 349 | -v {orphan,volumes,images}, --view {orphan,volumes,images} 350 | Available views: orphan and volumes. Orphan is the 351 | default one. 352 | ``` 353 | 354 | The script doesn't delete anything actually, just shows you the relationship in a tree view. 355 | 356 | mongodb-backup.py 357 | ----------------- 358 | 359 | This is a tool to make MongoDB backups on Amazon. 360 | 361 | Two methods are supported: dump and snapshot. 362 | 363 | - For the first one It uses `mongodump` to perform a binary backup of your local or remote MongoDB instance. The dumped files are compressed in a tarball file and uploaded to a Amazon S3 bucket. 364 | - For the snapshot method, you can provide the data and / or the journal volumes and the script automatically will lock the database and will suspend all the writes during the backup process to ensure the consistency of the backup if required. 365 | 366 | For the dump method, you can specify the number of copies to retain in the bucket or in the EC2 snapshot area. The oldest ones will be automatically removed. 367 | 368 | ``` 369 | usage: mongodb-backup.py [-h] [-m {dump,snapshot}] [-u USER] [-p PASSWORD] [-H HOST] [-d DATABASE] [-c COLLECTION] [-e EXCLUDE_COLLECTION] [-o OUT] [-n NUMBER] [-b BUCKET] [-P PREFIX] 370 | [-v VOLUME_ID [VOLUME_ID ...]] [--no_journal] [-r REGION] 371 | 372 | A tool to make mongodb backups on Amazon 373 | 374 | optional arguments: 375 | -h, --help show this help message and exit 376 | -m {dump,snapshot}, --method {dump,snapshot} 377 | Backup method. Dump if none is provided 378 | -u USER, --user USER Mongodb user (optional) 379 | -p PASSWORD, --password PASSWORD 380 | Mongodb password (optional) 381 | -H HOST, --host HOST Mongodb host: :. By default: localhost:27017 382 | -d DATABASE, --database DATABASE 383 | For the dump method: The database to backup (all if not provided) 384 | -c COLLECTION, --collection COLLECTION 385 | For the dump method: The collection to backup. Requires '-d' option 386 | -e EXCLUDE_COLLECTION, --exclude_collection EXCLUDE_COLLECTION 387 | For the dump method: The collection to exclude from backup. Requires '-d' option 388 | -o OUT, --out OUT For the dump method: The output directory for dumped files 389 | -n NUMBER, --number NUMBER 390 | Number of copies to retain 391 | -b BUCKET, --bucket BUCKET 392 | For the dump method: Amazon s3 bucket. 393 | -P PREFIX, --prefix PREFIX 394 | For the dump method: For grouped objects aka s3 folders, provide the prefix key 395 | -v VOLUME_ID [VOLUME_ID ...], --volume_id VOLUME_ID [VOLUME_ID ...] 396 | For the snapshot method: Provide the data and journal volume_id list to snapshot: If data and journal resides in a separate volumes, both volumes are required. 397 | --no_journal For the snapshot method: If pressent, the instance is either running without journaling or has the journal files on a separate volume, you must flush all writes to disk and lock the 398 | database to prevent writes during the backup process. 399 | -r REGION, --region REGION 400 | Specify an alternate region to override the one defined in the .aws/credentials file 401 | 402 | ``` 403 | 404 | route53-set-hostname.py 405 | ----------------------- 406 | 407 | This script allows you to automatically set predictable DNS records for instances launched using AWS Auto Scaling. 408 | 409 | It is intended to be executed from the ec2 instance at launch time. 410 | The script looks for an available name matching the provided pattern in the DNS zone. Then, it adds this name as a CNAME record in the DNS zone pointing to the EC2 instance public name. 411 | 412 | ``` 413 | usage: route53-set-hostname.py [-h] --HostedZoneId HOSTEDZONEID --HostStr 414 | HOSTSTR [--rangeSize RANGESIZE] [--dryrun] 415 | 416 | AWS Route53 hostname managment for Autoscaled EC2 Instances 417 | 418 | optional arguments: 419 | -h, --help show this help message and exit 420 | --HostedZoneId HOSTEDZONEID 421 | The ID of the hosted zone where the new resource 422 | record will be added. 423 | --HostStr HOSTSTR The host string used to build the new name 424 | --rangeSize RANGESIZE 425 | The maximun number to be assigned. The first available 426 | will be used 427 | --dryrun Shows what is going to be done but doesn't change 428 | anything actually 429 | ``` 430 | 431 | Example: 432 | 433 | ``` bash 434 | user@host:~$ ./route53-set-hostname.py --HostedZoneId XXXXXXXXXXXXXX --HostStr websrv --rangeSize 10 435 | 15:41:58 06/09/16: creating CNAME websrv03.example.com. -> ec2-XX-XX-XXX-XX.compute-1.amazonaws.com......INSYNC 436 | ``` 437 | 438 | route53-del-hostname.py 439 | ----------------------- 440 | 441 | This script is executed from the ec2 instance at shutdown. 442 | The script delete his host record zone from the passed DNS zone identifier. 443 | 444 | ``` 445 | usage: route53-del-hostname.py [-h] --HostedZoneId HOSTEDZONEID [--dryrun] 446 | 447 | AWS Route53 hostname managment for Autoscaled EC2 Instances 448 | 449 | optional arguments: 450 | -h, --help show this help message and exit 451 | --HostedZoneId HOSTEDZONEID 452 | The ID of the hosted zone where the new resource 453 | record will be added. 454 | --dryrun Shows what is going to be done but doesn't change 455 | anything actually 456 | ``` 457 | 458 | s3-download-file.py 459 | ------------------- 460 | 461 | This script just download the requested S3 object. 462 | 463 | ``` 464 | usage: s3-download-file.py [-h] -b BUCKET -o OBJECTKEY -f FILEPATH 465 | 466 | Donwload file from AWS S3 467 | 468 | optional arguments: 469 | -h, --help show this help message and exit 470 | -b BUCKET, --bucket BUCKET 471 | The bucket name. 472 | -o OBJECTKEY, --objectkey OBJECTKEY 473 | The host string used to build the new name 474 | -f FILEPATH, --filepath FILEPATH 475 | The filepath of the file to be saved 476 | ``` 477 | 478 | lifecycle-hook-worker.py 479 | ------------------------ 480 | 481 | As its own name says, this worker is designed to use auto scaling [lifecycle hooks](http://docs.aws.amazon.com/autoscaling/latest/userguide/lifecycle-hooks.html). 482 | 483 | The process looks for incoming messages into the SQS queue associated with the autoscaling group. Then, when a message comes for the instance, it is consumed and the associated custom action is triggered. Finally, using the lifecycle action token, the worker completes the autoscaling action going on with the launch or ending the instance action. 484 | 485 | ``` 486 | usage: lifecycle-hook-worker.py [-h] -q QUEUE -s {LAUNCHING,TERMINATING} -g 487 | GROUP -H HOOKNAME -e EXECUTE [-w WAIT] 488 | 489 | SQS Lifecycle hook consumer and trigger 490 | 491 | optional arguments: 492 | -h, --help show this help message and exit 493 | -q QUEUE, --queue QUEUE 494 | Queue resource. 495 | -s {LAUNCHING,TERMINATING}, --state {LAUNCHING,TERMINATING} 496 | Indicates if the consumer is waiting for LAUNCHING or 497 | TERMINATING state 498 | -g GROUP, --group GROUP 499 | Auto Scaling Group Name 500 | -H HOOKNAME, --hookName HOOKNAME 501 | Life Cycle Hook Name 502 | -e EXECUTE, --execute EXECUTE 503 | The filepath of the triggered script 504 | -w WAIT, --wait WAIT Time between query loops in seconds (default: 60) 505 | ``` 506 | 507 | rds-instances.py 508 | ---------------- 509 | 510 | Shows the main info regarding all the RDS instances such as: endpoint, engine, version, status etc. 511 | 512 | ``` 513 | usage: rds-instances.py [-h] 514 | 515 | List all the RDS instances 516 | 517 | optional arguments: 518 | -h, --help show this help message and exit 519 | ``` 520 | -------------------------------------------------------------------------------- /aws-scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/frommelmak/aws-scripts/b84ec8d9f6a353c8306a3edb909ede3ef2c7656e/aws-scripts/__init__.py -------------------------------------------------------------------------------- /aws-scripts/ec2-ebs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import boto3 4 | import sys 5 | import argparse 6 | from rich.console import Console 7 | from rich.table import Table 8 | 9 | def list_volumes(Filter): 10 | ec2 = boto3.resource('ec2') 11 | # Getting all the instances ids and his tag Name 12 | instances_lst=[] 13 | instances = ec2.instances.all() 14 | for i in instances: 15 | try: 16 | if i.tags is not None: 17 | name = next((item for item in i.tags if item["Key"] == "Name")) 18 | else: 19 | name['Value'] = '' 20 | except StopIteration: 21 | name['Value'] = '' 22 | item={'id': i.id, 'name': name['Value'],} 23 | instances_lst.append(item) 24 | 25 | 26 | volumes = ec2.volumes.filter(Filters=Filter) 27 | table = Table() 28 | table.add_column("num", justify="right", no_wrap=True) 29 | table.add_column("Name", style="green") 30 | table.add_column("Size", style="red") 31 | table.add_column("device", style="green") 32 | table.add_column("Volume ID", justify="right", style="cyan") 33 | table.add_column("Instance ID", justify="right", style="cyan") 34 | table.add_column("Instance Tag Name", justify="right", style="green") 35 | table.add_column("Type", style="green") 36 | table.add_column("IOPS", style="red") 37 | table.add_column("Zone", style="green") 38 | table.add_column("Status", style="green") 39 | num = 1 40 | vols = [] 41 | name = {} 42 | for i in volumes: 43 | try: 44 | if i.tags is not None: 45 | name = next((item for item in i.tags if item["Key"] == "Name")) 46 | else: 47 | name['Value'] = '' 48 | except StopIteration: 49 | name['Value'] = '' 50 | 51 | if len(i.attachments) == 0: 52 | device = '' 53 | instance_id = '' 54 | elif len(i.attachments) == 1: 55 | device = i.attachments[0]['Device'] 56 | instance_id = i.attachments[0]['InstanceId'] 57 | else: 58 | # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volumes-multi.html 59 | device = 'Multi-Attach' 60 | instance_id = 'Multi-Attach' 61 | 62 | ec2_attached = next((instance for instance in instances_lst if instance["id"] == instance_id), {'id': instance_id, 'name':''}) 63 | #print(tag_name['name']) 64 | table.add_row( 65 | str(num), 66 | name['Value'], 67 | str(i.size) + " GB", 68 | device, 69 | i.volume_id, 70 | instance_id, 71 | ec2_attached['name'], 72 | i.volume_type, 73 | str(i.iops), 74 | i.availability_zone, 75 | i.state 76 | ) 77 | num = num + 1 78 | console = Console() 79 | console.print(table) 80 | 81 | 82 | def main(): 83 | parser = argparse.ArgumentParser(description='List all the Elastic Block Storage volumes') 84 | parser.add_argument('-n', '--name', 85 | help="Filter result by name.") 86 | parser.add_argument('-t', '--type', 87 | choices=['gp3', 'gp2', 'io1', 'st1', 'sc1', 'standard'], 88 | help="Filer result by type.") 89 | parser.add_argument('-s', '--status', 90 | choices=['creating','available','in-use','deleting','deleted','error'], 91 | help="Filter result by status." ) 92 | arg = parser.parse_args() 93 | 94 | # Default filter if no options are specified 95 | filter=[] 96 | InstanceIds=[] 97 | 98 | if arg.name: 99 | filter.append({'Name': 'tag-value', 'Values': ["*" + arg.name + "*"]}) 100 | 101 | if arg.type: 102 | filter.append({'Name': 'volume-type', 'Values': ["*" + arg.type + "*"]}) 103 | 104 | if arg.status: 105 | filter.append({'Name': 'status', 'Values': ["*" + arg.status + "*"]}) 106 | 107 | vols=list_volumes(filter) 108 | 109 | if __name__ == '__main__': 110 | sys.exit(main()) 111 | -------------------------------------------------------------------------------- /aws-scripts/ec2-elb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import boto3 4 | import sys 5 | import argparse 6 | from rich.tree import Tree 7 | from rich import print 8 | from rich.progress import track 9 | 10 | def get_info(ec2, id): 11 | instance = ec2.Instance(id) 12 | name="" 13 | for i in range(len(instance.tags)): 14 | if instance.tags[i]['Key'] == 'Name': 15 | name = instance.tags[i]['Value'] 16 | return name, instance.placement['AvailabilityZone'] 17 | 18 | def list_elb(ec2, region): 19 | client = boto3.client('elb') 20 | response = client.describe_load_balancers() 21 | tree = Tree("[bold white]Classic Elastic Load Balancers in the "+region+" AWS region") 22 | for elb in track(range(len(response.get('LoadBalancerDescriptions'))), description="Procesing..."): 23 | load_balancer_name = response.get('LoadBalancerDescriptions')[elb].get('LoadBalancerName') 24 | elb_instances_state = client.describe_instance_health(LoadBalancerName=load_balancer_name) 25 | elb_tree = tree.add(":file_folder: [bold white]"+load_balancer_name) 26 | instances_info=[] 27 | for instance in range(len(response.get('LoadBalancerDescriptions')[elb].get('Instances'))): 28 | instance_id = response.get('LoadBalancerDescriptions')[elb].get('Instances')[instance].get('InstanceId') 29 | instance_name, instance_zone = get_info(ec2, instance_id) 30 | instance_state = client.describe_instance_health(LoadBalancerName=load_balancer_name, Instances=[{'InstanceId': instance_id}]).get('InstanceStates')[0].get('State') 31 | instances_info.append({"id": instance_id, "name": instance_name, "zone": instance_zone, "state": str(instance_state) }) 32 | for zone in range(len(response.get('LoadBalancerDescriptions')[elb].get('AvailabilityZones'))): 33 | zone_name=response.get('LoadBalancerDescriptions')[elb].get('AvailabilityZones')[zone] 34 | zone_tree = elb_tree.add(":file_folder: [bold white]"+zone_name) 35 | for n in range(len(instances_info)): 36 | if instances_info[n].get('zone') == zone_name: 37 | if instances_info[n].get('state') == 'OutOfService': 38 | branch = zone_tree.add("[cyan]"+instances_info[n].get('id')+" [white]("+instances_info[n].get('name')+")"+" Status: [red]"+instances_info[n].get('state')) 39 | else: 40 | branch = zone_tree.add("[cyan]"+instances_info[n].get('id')+" [white]("+instances_info[n].get('name')+")"+" Status: [green]"+instances_info[n].get('state')) 41 | print(tree) 42 | 43 | def list_elbv2(ec2, region): 44 | client = boto3.client('elbv2') 45 | response = client.describe_load_balancers() 46 | tree = Tree("[bold white]Current Generation of Elastic Load Balancers in %s AWS region" % region) 47 | for elb in track(range(len(response.get('LoadBalancers'))), description="Procesing..."): 48 | load_balancer_name = response.get('LoadBalancers')[elb].get('LoadBalancerName') 49 | load_balancer_arn = response.get('LoadBalancers')[elb].get('LoadBalancerArn') 50 | load_balancer_type = response.get('LoadBalancers')[elb].get('Type') 51 | load_balancer_zones = [] 52 | load_balancer_tg = client.describe_target_groups(LoadBalancerArn=load_balancer_arn) 53 | elb_branch = tree.add(":file_folder:[bold white] "+load_balancer_name+" Type: "+load_balancer_type) 54 | tg_folder = elb_branch.add(":file_folder: Target Groups") 55 | for tg in range(len(load_balancer_tg.get('TargetGroups'))): 56 | tg_dict = load_balancer_tg.get('TargetGroups')[tg] 57 | tg_branch = tg_folder.add(":file_folder:[bold white] "+tg_dict.get('TargetGroupName')+" [not bold]Type: "+tg_dict.get('TargetType')) 58 | target_health = client.describe_target_health(TargetGroupArn=tg_dict.get('TargetGroupArn')) 59 | for zone in range(len(response.get('LoadBalancers')[elb].get('AvailabilityZones'))): 60 | zone_name =response.get('LoadBalancers')[elb].get('AvailabilityZones')[zone].get('ZoneName') 61 | zone_tree = tg_branch.add(":file_folder: [bold white]"+zone_name) 62 | for t in range(len(target_health.get('TargetHealthDescriptions'))): 63 | target_id=target_health.get('TargetHealthDescriptions')[t].get('Target').get('Id') 64 | elb_name, target_zone=get_info(ec2, target_id) 65 | target_state=target_health.get('TargetHealthDescriptions')[t].get('TargetHealth').get('State') 66 | target_state_desc=target_health.get('TargetHealthDescriptions')[t].get('TargetHealth').get('Description') 67 | 68 | if tg_dict.get('TargetType') == 'instance': 69 | target_name, zone =get_info(ec2, target_id) 70 | elif tg_dict.get('TargetType') == 'ip': 71 | target_name="ip" 72 | elif tg_dict.get('TargetType') == 'lambda': 73 | target_name="lambda" 74 | elif tg_dict.get('TargetType') == 'alb': 75 | target_name="alb" 76 | 77 | if target_state == 'healthy': 78 | target_state = "[green]"+target_state 79 | elif target_state == 'unhealthy': 80 | target_state = "[red]"+target_state 81 | else: 82 | target_state = "[orange1]"+target_state 83 | 84 | if target_zone == zone_name: 85 | zone_tree.add("[cyan]"+target_id+" [white]("+target_name+") [white]Status: "+str(target_state)+" [white]Description: "+str(target_state_desc)) 86 | print(tree) 87 | 88 | def main(): 89 | parser = argparse.ArgumentParser(description='For every Elastic Load Balancer list the attached instances') 90 | parser.add_argument('-t', '--type', choices=['classic', 'current', 'all'], 91 | default="all", help="It shows the current generation of ELBs (Application, Network and/or Gateway) and/or the previous one (Classic).") 92 | 93 | arg = parser.parse_args() 94 | 95 | session = boto3.session.Session() 96 | region = session.region_name 97 | 98 | ec2 = boto3.resource('ec2') 99 | if arg.type == 'classic' or arg.type == 'all': 100 | list_elb(ec2, region) 101 | if arg.type == 'current' or arg.type == 'all': 102 | list_elbv2(ec2, region) 103 | 104 | if __name__ == '__main__': 105 | sys.exit(main()) 106 | -------------------------------------------------------------------------------- /aws-scripts/ec2-instance-state.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import botocore 4 | import boto3 5 | import argparse 6 | import sys 7 | import datetime 8 | from dateutil.tz import tzlocal 9 | import role 10 | 11 | def main(): 12 | parser = argparse.ArgumentParser(description='Set desired EC2 instance state') 13 | parser.add_argument('-s', '--state', action='store', 14 | choices=['stop', 'start', 'reboot', 'terminate'], 15 | help="Set the desired state for the instances provided") 16 | parser.add_argument('-l', '--id_list', required=True, 17 | nargs='+', type=str, 18 | help="InstanceIds list" ) 19 | parser.add_argument('--role_arn', required=False, type=str, 20 | help="If the script run on an EC2 instance with an IAM \ 21 | role attached, then the Security Token Service \ 22 | will provide a set of temporary credentials \ 23 | allowing the actions of the assumed role.\ 24 | With this method, no user credentials are \ 25 | required, just the Role ARN to be assumed." ) 26 | parser.add_argument('-r', '--region', required=True, 27 | help="Specify the region. This flag is required") 28 | 29 | arg = parser.parse_args() 30 | 31 | instances=[] 32 | 33 | if arg.id_list: 34 | instances=arg.id_list 35 | 36 | print ('instances:' + str(instances)) 37 | 38 | if arg.role_arn: 39 | session = role.assumed_role_session(arg.role_arn) 40 | ec2 = session.client('ec2', region_name=arg.region) 41 | else: 42 | ec2 = boto3.client('ec2', region_name=arg.region) 43 | 44 | if arg.state == 'stop': 45 | ec2.stop_instances(InstanceIds=instances) 46 | print('stopped your instances: ' + str(instances)) 47 | elif arg.state == 'start': 48 | ec2.start_instances(InstanceIds=instances) 49 | print('started your instances: ' + str(instances)) 50 | elif arg.state == 'reboot': 51 | ec2.reboot_instances(InstanceIds=instances) 52 | print('rebooted your instances: ' + str(instances)) 53 | elif arg.state == 'terminate': 54 | ec2.terminate_instances(InstanceIds=instances) 55 | print('terminated your instances: ' + str(instances)) 56 | 57 | if __name__ == '__main__': 58 | sys.exit(main()) 59 | -------------------------------------------------------------------------------- /aws-scripts/ec2-instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import boto3 3 | import sys 4 | import argparse 5 | from fabric import Connection 6 | import re 7 | from rich.console import Console 8 | from rich.table import Table 9 | 10 | def list_instances(Filter, RegionName, InstanceIds, IgnorePattern): 11 | ec2 = boto3.resource('ec2', region_name=RegionName) 12 | instances = ec2.instances.filter(Filters=Filter, InstanceIds=InstanceIds) 13 | num = 1 14 | hosts = [] 15 | name = {} 16 | for i in instances: 17 | num = num + 1 18 | if num > 1: 19 | table = Table() 20 | table.add_column("num", justify="right", no_wrap=True) 21 | table.add_column("Name", style="green") 22 | table.add_column("Public IP", style="red") 23 | table.add_column("Private IP", style="red") 24 | table.add_column("ID", justify="right", style="cyan") 25 | table.add_column("Type", justify="right", style="green") 26 | table.add_column("Zone", justify="right", style="green") 27 | table.add_column("VPC", style="cyan") 28 | table.add_column("Subnet", style="cyan") 29 | table.add_column("Status") 30 | num =1 31 | for i in instances: 32 | try: 33 | if i.tags is not None: 34 | name = next((item for item in i.tags if item["Key"] == "Name")) 35 | else: 36 | name['Value'] = '' 37 | except StopIteration: 38 | name['Value'] = '' 39 | 40 | pattern = re.compile(IgnorePattern) 41 | if len(IgnorePattern) > 0 and pattern.search(name['Value']): 42 | #IgnorePattern Found 43 | num = num + 1 44 | else: 45 | if i.state['Name'] == 'stopped': 46 | table.add_row( 47 | str(num), 48 | name['Value'], 49 | i.public_ip_address, 50 | i.private_ip_address, 51 | i.id, 52 | i.instance_type, 53 | i.placement['AvailabilityZone'], 54 | i.vpc_id, 55 | i.subnet_id, 56 | i.state['Name'], 57 | style='italic grey42' 58 | ) 59 | else: 60 | table.add_row( 61 | str(num), 62 | name['Value'], 63 | i.public_ip_address, 64 | i.private_ip_address, 65 | i.id, 66 | i.instance_type, 67 | i.placement['AvailabilityZone'], 68 | i.vpc_id, 69 | i.subnet_id, 70 | i.state['Name'] 71 | ) 72 | num = num + 1 73 | item={'id': i.id, 'public_ip': i.public_ip_address, 'private_ip': i.private_ip_address, 'hostname': name['Value'], 'status': i.state['Name'],} 74 | hosts.append(item) 75 | if num > 1: 76 | console = Console() 77 | console.print(table) 78 | return hosts 79 | 80 | def execute_cmd(host,user,cmd,connection_method): 81 | if connection_method == 'bastion-host': 82 | # The connection user is readed from ./ssh/config file 83 | result = Connection(host=host, user=user).run(cmd, hide=True, warn=True) 84 | return result 85 | if connection_method == 'direct': 86 | # The connection user is passed as an argument and defaults to ubuntu 87 | result = Connection(host=host, user=user).run(cmd, hide=True, warn=True) 88 | return result 89 | 90 | def main(): 91 | parser = argparse.ArgumentParser(description='Shows a list with your EC2 instances, then you can execute remote commands on those instances.') 92 | parser.add_argument('-n', '--name', 93 | help="Filter result by name.") 94 | parser.add_argument('-t', '--type', 95 | help="Filer result by type.") 96 | parser.add_argument('-s', '--status', 97 | help="Filter result by status." ) 98 | parser.add_argument('-z', '--zone', 99 | help="Filter result by Availability Zone.") 100 | parser.add_argument('-v', '--vpc', 101 | help="Filter result by VPC Id.") 102 | parser.add_argument('-S', '--subnet', 103 | help="Filter result by Subnet Id.") 104 | parser.add_argument('--public_ip', 105 | help="Filter result by public ip address. You can provide the whole IP address string or just a portion of it.") 106 | parser.add_argument('--private_ip', 107 | help="Filter result by private ip adreess. You can provide the whole IP address string or just a portion of it.") 108 | parser.add_argument('-l', '--id_list', 109 | nargs='+', type=str, 110 | help="Do not filter the result. Provide a InstanceIds list instead." ) 111 | parser.add_argument('-i', '--ignore', default="", 112 | help="Do not show hosts lines containing the \"IGNORE\" pattern in the tag Name" ) 113 | parser.add_argument('-e', '--execute', 114 | help="Execute a command on instances") 115 | parser.add_argument('-r', '--region', 116 | help="Specify an alternate region to override \ 117 | the one defined in the .aws/credentials file") 118 | parser.add_argument('-u', '--user', 119 | help="User to run commands (if -e option is used).\ 120 | A user is always required, even if you have one defined in .ssh/config file") 121 | parser.add_argument('-c', '--connection_method', 122 | help="The Method to connect to the instance (if -e option is used). \ 123 | If the instance exposes the SSH port on a public IP, use direct. \ 124 | Otherwhise choose bastion-host. This method look for the hostname and username \ 125 | inside the .ssh/config file to reach the target instance.", 126 | choices=['direct', 'bastion-host'], 127 | default="direct") 128 | 129 | arg = parser.parse_args() 130 | 131 | # Default filter if no options are specified 132 | filter=[] 133 | InstanceIds=[] 134 | IgnorePattern="" 135 | 136 | if arg.execute and (arg.user is None): 137 | parser.error("--execute requires --user.") 138 | 139 | if arg.name: 140 | filter.append({'Name': 'tag-value', 'Values': ["*" + arg.name + "*"]}) 141 | 142 | if arg.type: 143 | filter.append({'Name': 'instance-type', 'Values': ["*" + arg.type + "*"]}) 144 | 145 | if arg.status: 146 | filter.append({'Name': 'instance-state-name', 'Values': ["*" + arg.status + "*"]}) 147 | 148 | if arg.vpc: 149 | filter.append({'Name': 'vpc-id', 'Values': ["*" + arg.vpc + "*"]}) 150 | 151 | if arg.zone: 152 | filter.append({'Name': 'availability-zone', 'Values': ["*" + arg.zone + "*"]}) 153 | 154 | if arg.subnet: 155 | filter.append({'Name': 'subnet-id', 'Values': ["*" + arg.subnet + "*"]}) 156 | 157 | if arg.public_ip: 158 | filter.append({'Name': 'ip-address', 'Values': ["*" + arg.public_ip + "*"]}) 159 | 160 | if arg.private_ip: 161 | filter.append({'Name': 'private-ip-address', 'Values': ["*" + arg.private_ip + "*"]}) 162 | 163 | if arg.id_list: 164 | InstanceIds=arg.id_list 165 | 166 | if arg.ignore: 167 | IgnorePattern=arg.ignore 168 | 169 | if arg.region: 170 | client = boto3.client('ec2') 171 | regions = [region['RegionName'] for region in client.describe_regions()['Regions']] 172 | if arg.region not in regions: 173 | sys.exit("ERROR: Please, choose a valid region.") 174 | 175 | hosts=list_instances(filter,arg.region,InstanceIds,IgnorePattern) 176 | names = "" 177 | 178 | if arg.execute: 179 | if arg.connection_method == 'direct': 180 | target='public_ip' 181 | if arg.connection_method == 'bastion-host': 182 | target='hostname' 183 | 184 | for item in hosts: 185 | names = names + " " + "[green]" + item["hostname"] + "[/green]:[cyan]" + item["id"] + "[/cyan] " 186 | 187 | console = Console() 188 | console.print("Command to execute: %s" % arg.execute) 189 | console.print("Executed by: %s" % arg.user) 190 | console.print("Hosts list: %s" % names) 191 | with console.status("[bold green]Working on remote execution...[/bold green]") as status: 192 | for item in hosts: 193 | if item["status"] == 'running': 194 | if item["public_ip"] is None and arg.connection_method == 'direct': 195 | console.rule("[green]%s[/green][cyan] : %s[/cyan] is not reachable using direct method. Use the bastion-host instead (command execution skiped)" % (item["hostname"], item["id"])) 196 | continue 197 | console.rule("[green]%s[/green][cyan] : %s[/cyan]" % (item["hostname"], item["id"]), align='center') 198 | print(execute_cmd(item[target], arg.user, arg.execute, arg.connection_method)) 199 | else: 200 | console.rule("[green]%s[/green][cyan] : %s[/cyan] is not running (command execution skiped)" % (item["hostname"], item["id"])) 201 | 202 | if __name__ == '__main__': 203 | sys.exit(main()) 204 | -------------------------------------------------------------------------------- /aws-scripts/ec2-reserved.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import boto3 4 | import sys 5 | from datetime import date, datetime, timedelta 6 | import hashlib 7 | from apiclient.discovery import build 8 | from httplib2 import Http 9 | from oauth2client import file, client, tools 10 | import argparse 11 | 12 | def list_reserved_instances(filters): 13 | events = [] 14 | instances = [] 15 | event_ids = [] 16 | client = boto3.client('ec2') 17 | response = client.describe_reserved_instances(Filters=filters) 18 | size = len(response.get('ReservedInstances')) 19 | columns_format="%-36s %-10s %-12s %-24s %-18s %-14s %-10s %-9s %-26s %-6s" 20 | print(columns_format % ("Reserved Id", "Instances", "Type", "Product Description", "Scope", "Zone", "Duration", "Time Left", "End", "Offering")) 21 | for n in range(size): 22 | id = response.get('ReservedInstances')[n].get('ReservedInstancesId') 23 | count = response.get('ReservedInstances')[n].get('InstanceCount') 24 | type = response.get('ReservedInstances')[n].get('InstanceType') 25 | product = response.get('ReservedInstances')[n].get('ProductDescription') 26 | scope = response.get('ReservedInstances')[n].get('Scope') 27 | zone = response.get('ReservedInstances')[n].get('AvailabilityZone') 28 | duration = response.get('ReservedInstances')[n].get('Duration') 29 | offering = response.get('ReservedInstances')[n].get('OfferingType') 30 | td = timedelta(seconds=int(duration)) 31 | end = response.get('ReservedInstances')[n].get('End') 32 | end_dt = datetime.strptime(str(end), "%Y-%m-%d %H:%M:%S+00:00") 33 | now_dt = datetime.now() 34 | delta = end_dt - now_dt 35 | time_left = max(0, delta.days) 36 | print(columns_format % (id, count, type, product, scope, zone, td.days, time_left, end, offering)) 37 | description="A purchased reservervation affecting to %s x %s instances is about to expire. Reservation id: %s" % (count, type, id) 38 | 39 | if time_left > 0: 40 | state = 'active' 41 | else: 42 | state = 'retired' 43 | 44 | instance = { 45 | 'scope': scope, 46 | 'zone': zone, 47 | 'type': type, 48 | 'state': state, 49 | 'count': count 50 | } 51 | instances.append(instance) 52 | 53 | event_start = end_dt.strftime("%Y-%m-%dT%H:%M:%S+00:00") 54 | event_end = (end_dt + timedelta(hours=1)).strftime("%Y-%m-%dT%H:%M:%S+00:00") 55 | m = hashlib.sha224() 56 | m.update(id.encode()) 57 | sha_id = m.hexdigest() 58 | event = { 59 | 'id': sha_id, 60 | 'summary': 'Reserve Instance Expiration', 61 | 'location': 'aws', 62 | 'description': description, 63 | 'start': { 64 | 'dateTime': event_start, 65 | 'timeZone': 'America/Los_Angeles', 66 | }, 67 | 'end': { 68 | 'dateTime': event_end, 69 | 'timeZone': 'America/Los_Angeles', 70 | }, 71 | 'reminders': { 72 | 'useDefault': False, 73 | 'overrides': [ 74 | {'method': 'email', 'minutes': 24 * 60}, 75 | {'method': 'popup', 'minutes': 10}, 76 | ], 77 | }, 78 | } 79 | events.append(event) 80 | event_ids.append(sha_id) 81 | 82 | return events, event_ids, instances 83 | 84 | def create_events(service, events, event_ids): 85 | import datetime 86 | 87 | page_token = None 88 | while True: 89 | calendar_list = service.calendarList().list(pageToken=page_token).execute() 90 | for calendar_list_entry in calendar_list['items']: 91 | if calendar_list_entry['summary'] == "aws": 92 | calendar_id = calendar_list_entry['id'] 93 | 94 | page_token = calendar_list.get('nextPageToken') 95 | if not page_token: 96 | break 97 | 98 | ''' Get the current events from Google Calendar''' 99 | page_token = None 100 | g_event_ids = [] 101 | while True: 102 | g_events = service.events().list(calendarId=calendar_id, pageToken=page_token).execute() 103 | for event in g_events['items']: 104 | g_event_ids.append(event['id']) 105 | page_token = g_events.get('nextPageToken') 106 | if not page_token: 107 | break 108 | 109 | if len(events) >= 1: 110 | print("Creating %s events in the aws Calendar of your Google Account" % len(events)) 111 | 112 | n=0 113 | for id in event_ids : 114 | if id in g_event_ids: 115 | print("The event: %s is already scheduled. Nothing to do..." % events[n]['id']) 116 | else: 117 | event = service.events().insert(calendarId=calendar_id, body=events[n]).execute() 118 | print("Event created: %s" % event.get('htmlLink')) 119 | n += 1 120 | 121 | 122 | 123 | def main(): 124 | parser = argparse.ArgumentParser(description='Show reserved EC2 instances') 125 | parser.add_argument('-s', '--state', action='store', 126 | choices=['payment-pending', 'active', 'payment-failed', 'retired'], 127 | help="Filer result by reservation state.") 128 | parser.add_argument('--create-google-calendar-events', 129 | action='store_true', 130 | default=False, 131 | help="Create events in your Google Calendar, using the \ 132 | expiration dates of your active reservations") 133 | parser.add_argument('-t', '--type', 134 | help="Filer result by instance type.") 135 | 136 | arg = parser.parse_args() 137 | 138 | filters=[] 139 | 140 | if arg.create_google_calendar_events: 141 | filters=[] 142 | filters.append({'Name': 'state', 'Values': ['active']}) 143 | 144 | if arg.state and arg.create_google_calendar_events is False: 145 | filters.append({'Name': 'state', 'Values': ["" + arg.state + ""]}) 146 | 147 | if arg.type and arg.create_google_calendar_events is False: 148 | filters.append({'Name': 'instance-type', 'Values': ["*" + arg.type + "*"]}) 149 | 150 | events, event_ids, instances = list_reserved_instances(filters) 151 | 152 | normalization_factor = { 153 | 'nano': 0.25, 154 | 'micro': 0.5, 155 | 'small': 1, 156 | 'medium': 2, 157 | 'large': 4, 158 | 'xlarge': 8, 159 | '2xlarge': 16, 160 | '4xlarge': 32, 161 | '8xlarge': 64, 162 | '9xlarge': 72, 163 | '10xlarge': 80, 164 | '12xlarge': 96, 165 | '16xlarge': 128, 166 | '18xlarge': 144, 167 | '24xlarge': 192, 168 | '32xlarge': 256 169 | } 170 | 171 | # Normalized value for regional and zonal active reservations 172 | region = {} 173 | zone = {} 174 | for instance in instances: 175 | instance_type, instance_size = instance['type'].split('.') 176 | if instance['state'] == 'active': 177 | if instance['scope'] == 'Region': 178 | if instance_type not in region: 179 | region[instance_type] = { instance_size: instance['count']} 180 | elif instance_size in region[instance_type]: 181 | region[instance_type][instance_size] += instance['count'] 182 | else: 183 | region[instance_type][instance_size] = instance['count'] 184 | elif instance['scope'] == 'Availability Zone': 185 | if instance_type not in zone: 186 | zone[instance_type] = {} 187 | zone[instance_type][instance['zone']] = {} 188 | zone[instance_type][instance['zone']] = { instance_size: instance['count'] } 189 | elif instance_size in zone[instance_type][instance['zone']]: 190 | zone[instance_type][instance['zone']][instance_size] += instance['count'] 191 | else: 192 | zone[instance_type][instance['zone']][instance_size] = instance['count'] 193 | 194 | nrrs = 0 195 | nrrs_sum = 0 196 | print("") 197 | print("Summary") 198 | print("") 199 | print(" Active Standard Regional Reserverd Instances (by type and size)") 200 | for type in region: 201 | print(" Instance Type: %s" % type) 202 | nrrs += nrrs 203 | for size in region[type]: 204 | # Normalized reserved region size (nrrs) 205 | nrrs = normalization_factor[size] * region[type][size] 206 | nrrs_sum = nrrs_sum + nrrs 207 | print(" %s x %s (%s) = %s" % (region[type][size], size, normalization_factor[size], nrrs)) 208 | 209 | print("") 210 | print(" Total Regional (normalized): %s" % nrrs_sum) 211 | print("") 212 | print("") 213 | 214 | nrrs = 0 215 | nrrs_sum = 0 216 | print(" Active Standard Zonal Reserverd Instances (by type, availability zone and size)") 217 | for type in zone: 218 | print(" Instance Type: %s" % type) 219 | nrrs += nrrs 220 | for availability_zone in zone[type]: 221 | print(" Availabilidy zone: %s" % availability_zone) 222 | for size in zone[type][availability_zone]: 223 | nrrs = normalization_factor[size] * zone[type][availability_zone][size] 224 | nrrs_sum = nrrs_sum + nrrs 225 | print(" %s x %s (%s) = %s" % (zone[type][availability_zone][size], size, normalization_factor[size], nrrs)) 226 | 227 | print("") 228 | print(" Total Zonal (normalized): %s" % nrrs_sum) 229 | print("") 230 | 231 | 232 | if arg.create_google_calendar_events: 233 | # Setup the Calendar API 234 | SCOPES = 'https://www.googleapis.com/auth/calendar' 235 | store = file.Storage('credentials.json') 236 | creds = store.get() 237 | if not creds or creds.invalid: 238 | flow = client.flow_from_clientsecrets('client_secret.json', SCOPES) 239 | flags = tools.argparser.parse_args(args=[]) 240 | creds = tools.run_flow(flow, store, flags) 241 | service = build('calendar', 'v3', http=creds.authorize(Http())) 242 | create_events(service, events, event_ids) 243 | 244 | if __name__ == '__main__': 245 | sys.exit(main()) 246 | -------------------------------------------------------------------------------- /aws-scripts/ec2-sg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import boto3 4 | import argparse 5 | import sys 6 | from botocore.exceptions import ClientError 7 | from rich.console import Console 8 | from rich.table import Table 9 | from requests import get 10 | import random 11 | import datetime 12 | 13 | def list_security_groups(Filter, GroupIds, RegionName): 14 | 15 | ec2 = boto3.client('ec2', region_name=RegionName) 16 | table = Table() 17 | table.add_column("num", justify="right", no_wrap=True) 18 | table.add_column("SG ID", style="cyan") 19 | table.add_column("SG Name", style="green") 20 | table.add_column("Description") 21 | table.add_column("Inbound Rules", justify="right", style="red") 22 | table.add_column("Outbound Rules", justify="right", style="red") 23 | table.add_column("VPC", justify="right", style="green") 24 | 25 | num = 1 26 | 27 | try: 28 | sgs = ec2.describe_security_groups(Filters=Filter, GroupIds=GroupIds) 29 | for g in range(len(sgs.get('SecurityGroups'))): 30 | 31 | if len(sgs.get('SecurityGroups')[g].get('GroupName')) > 23: 32 | SGName = sgs.get('SecurityGroups')[g].get('GroupName')[ 0 : 23 ]+'...' 33 | else: 34 | SGName = sgs.get('SecurityGroups')[g].get('GroupName') 35 | ip_ranges =[] 36 | ipv6_ranges = [] 37 | prefix_list_ids = [] 38 | user_id_group_pairs = [] 39 | for r in range(len(sgs.get('SecurityGroups')[g].get('IpPermissions'))): 40 | ip_ranges = sgs.get('SecurityGroups')[g].get('IpPermissions')[r].get('IpRanges') 41 | ipv6_ranges = sgs.get('SecurityGroups')[g].get('IpPermissions')[r].get('Ipv6Ranges') 42 | prefix_list_ids = sgs.get('SecurityGroups')[g].get('IpPermissions')[r].get('PrefixListIds') 43 | user_id_group_pairs = sgs.get('SecurityGroups')[g].get('IpPermissions')[r].get('UserIdGroupPairs') 44 | inbound_rules_count = len(ip_ranges) + len(ipv6_ranges) + len (prefix_list_ids) + len(user_id_group_pairs) 45 | for r in range(len(sgs.get('SecurityGroups')[g].get('IpPermissionsEgress'))): 46 | ip_ranges = sgs.get('SecurityGroups')[g].get('IpPermissionsEgress')[r].get('IpRanges') 47 | ipv6_ranges = sgs.get('SecurityGroups')[g].get('IpPermissionsEgress')[r].get('Ipv6Ranges') 48 | prefix_list_ids = sgs.get('SecurityGroups')[g].get('IpPermissionsEgress')[r].get('PrefixListIds') 49 | user_id_group_pairs = sgs.get('SecurityGroups')[g].get('IpPermissionsEgress')[r].get('UserIdGroupPairs') 50 | outbound_rules_count = len(ip_ranges) + len(ipv6_ranges) + len (prefix_list_ids) + len(user_id_group_pairs) 51 | table.add_row( 52 | str(num), 53 | str(sgs.get('SecurityGroups')[g].get('GroupId')), 54 | SGName, 55 | sgs.get('SecurityGroups')[g].get('Description'), 56 | str(inbound_rules_count), 57 | str(outbound_rules_count), 58 | sgs.get('SecurityGroups')[g].get('VpcId') 59 | ) 60 | 61 | num = num + 1 62 | console = Console() 63 | console.print(table) 64 | except ClientError as e: 65 | print(e) 66 | 67 | def list_security_group(Filter, RegionName): 68 | 69 | ec2 = boto3.client('ec2', region_name=RegionName) 70 | num = 1 71 | 72 | try: 73 | sgs = ec2.describe_security_group_rules(Filters=Filter) 74 | 75 | in_table = Table(title="Inbound Rules for "+Filter[0].get('Values')[0]+" Security Group") 76 | in_table.add_column("num", justify="right") 77 | in_table.add_column("SG Rule ID", style="cyan") 78 | in_table.add_column("IP Version", style="green") 79 | in_table.add_column("Protocol", justify="right", style="green") 80 | in_table.add_column("Port Range", justify="right", style="red") 81 | in_table.add_column("Source", justify="right", style="green") 82 | in_table.add_column("Description") 83 | 84 | out_table = Table(title="Outbound Rules for "+Filter[0].get('Values')[0]+" Security Group") 85 | out_table.add_column("num", justify="right") 86 | out_table.add_column("SG Rule ID", style="cyan") 87 | out_table.add_column("IP Version", style="green") 88 | out_table.add_column("Protocol", justify="right", style="green") 89 | out_table.add_column("Port Range", justify="right", style="red") 90 | out_table.add_column("Source", justify="right", style="green") 91 | out_table.add_column("Description") 92 | 93 | for n in range(len(sgs.get('SecurityGroupRules'))): 94 | 95 | if sgs.get('SecurityGroupRules')[n].get('ReferencedGroupInfo'): 96 | ip_version = "-" 97 | source = sgs.get('SecurityGroupRules')[n].get('ReferencedGroupInfo').get('GroupId') 98 | 99 | if sgs.get('SecurityGroupRules')[n].get('CidrIpv4'): 100 | ip_version = "IPv4" 101 | source = sgs.get('SecurityGroupRules')[n].get('CidrIpv4') 102 | 103 | if sgs.get('SecurityGroupRules')[n].get('CidrIpv6'): 104 | ip_version = "IPv6" 105 | source = sgs.get('SecurityGroupRules')[n].get('CidrIpv4') 106 | 107 | if sgs.get('SecurityGroupRules')[n].get('FromPort') == sgs.get('SecurityGroupRules')[n].get('ToPort'): 108 | if sgs.get('SecurityGroupRules')[n].get('FromPort') == -1: 109 | port_range = 'all' 110 | else: 111 | port_range = sgs.get('SecurityGroupRules')[n].get('FromPort') 112 | else: 113 | port_range = str(sgs.get('SecurityGroupRules')[n].get('FromPort'))+"-"+ str(sgs.get('SecurityGroupRules')[n].get('ToPort')) 114 | 115 | if sgs.get('SecurityGroupRules')[n].get('IpProtocol') == '-1': 116 | ip_protocol = 'all' 117 | else: 118 | ip_protocol = sgs.get('SecurityGroupRules')[n].get('IpProtocol') 119 | 120 | if sgs.get('SecurityGroupRules')[n].get('IsEgress'): 121 | out_table.add_row( 122 | str(num), 123 | sgs.get('SecurityGroupRules')[n].get('SecurityGroupRuleId'), 124 | ip_version, 125 | ip_protocol, 126 | str(port_range), 127 | source, 128 | sgs.get('SecurityGroupRules')[n].get('Description') 129 | ) 130 | else: 131 | in_table.add_row( 132 | str(num), 133 | sgs.get('SecurityGroupRules')[n].get('SecurityGroupRuleId'), 134 | ip_version, 135 | ip_protocol, 136 | str(port_range), 137 | source, 138 | sgs.get('SecurityGroupRules')[n].get('Description') 139 | ) 140 | num = num + 1 141 | 142 | console = Console() 143 | console.print(in_table) 144 | console.print(out_table) 145 | except ClientError as e: 146 | print(e) 147 | 148 | 149 | def main(): 150 | parser = argparse.ArgumentParser(description='Security Groups Management') 151 | parser.add_argument('-n', '--name', 152 | help="Filter result by group name.") 153 | parser.add_argument('-l', '--gid_list', 154 | nargs='+', type=str, 155 | help="Do not filter the result. Provide a GroupIds list instead." ) 156 | parser.add_argument('-r', '--region', 157 | help="Specify an alternate region to override \ 158 | the one defined in the .aws/credentials file") 159 | parser.add_argument('-s','--show', 160 | help="Show inbound and outbound rules for the provided SG ID") 161 | parser.add_argument('--allow_my_public_ip', 162 | help="Modify the SSH inbound rule with your current public IP \ 163 | address inside the provided Security Group ID.") 164 | parser.add_argument('--security_group_rule_id', 165 | help="Modify the SSH inbound rule with your current public IP \ 166 | address inside the provided Security Group Rule ID") 167 | parser.add_argument('--description', 168 | default="", 169 | help="Allows you to append a string to the rule description field") 170 | 171 | arg = parser.parse_args() 172 | 173 | filter=[] 174 | GroupIds=[] 175 | 176 | if arg.allow_my_public_ip and not arg.security_group_rule_id: 177 | print("The argument allow_my_public_ip requires the argument security_group_rule_id.") 178 | sys.exit(1) 179 | 180 | if arg.allow_my_public_ip: 181 | ec2 = boto3.client('ec2') 182 | ip=None 183 | ip_services=[ 184 | "https://api.ipify.org", 185 | "https://ifconfig.me", 186 | "https://api.my-ip.io/ip", 187 | "http://myexternalip.com/raw", 188 | "http://ipwho.is/&fields=ip&output=csv" 189 | ] 190 | random.shuffle(ip_services) 191 | for url in ip_services: 192 | try: 193 | ip = get(url).content.decode('utf8') 194 | break 195 | except: 196 | print("%s fail. Trying next..." % url) 197 | if ip is None: 198 | print("Public IP address not found using any services") 199 | sys.exit(1) 200 | else: 201 | now = datetime.datetime.now() 202 | try: 203 | data = ec2.modify_security_group_rules( 204 | GroupId=arg.allow_my_public_ip, 205 | SecurityGroupRules=[ 206 | { 207 | 'SecurityGroupRuleId': arg.security_group_rule_id, 208 | 'SecurityGroupRule': { 209 | 'IpProtocol': 'tcp', 210 | 'FromPort': 22, 211 | 'ToPort': 22, 212 | 'CidrIpv4': ip+'/32', 213 | 'Description': '('+arg.description+') '+now.strftime("%Y-%m-%d %H:%M:%S")+' by ec2-sg.py from aws-scripts' 214 | } 215 | }, 216 | ]) 217 | if data: 218 | rule_table = Table(title="Inbound Rule updated on the "+arg.allow_my_public_ip+" Security Group") 219 | rule_table.add_column("SG Rule ID", style="cyan") 220 | rule_table.add_column("IP Version", style="green") 221 | rule_table.add_column("Type", style="green") 222 | rule_table.add_column("Protocol", justify="right", style="green") 223 | rule_table.add_column("Port Range", justify="right", style="green") 224 | rule_table.add_column("Source", justify="right", style="green") 225 | rule_table.add_column("Description", justify="right", style="green") 226 | rule_table.add_row( 227 | arg.security_group_rule_id, 228 | "IPv4", 229 | "SSH", 230 | "TCP", 231 | "[red]22", 232 | "[red]"+ip+"/32", 233 | "[white]("+arg.description+") "+now.strftime("%Y-%m-%d %H:%M:%S")+" by ec2-sg.py from aws-scripts" 234 | ) 235 | console = Console() 236 | console.print(rule_table) 237 | sys.exit(0) 238 | else: 239 | print("an error occurred!") 240 | except ClientError as e: 241 | print(e) 242 | sys.exit(0) 243 | 244 | if arg.name: 245 | filter.append({'Name': 'group-name', 'Values': ["*" + arg.name + "*"]}) 246 | 247 | if arg.gid_list: 248 | GroupIds=arg.gid_list 249 | 250 | if arg.region: 251 | client = boto3.client('ec2') 252 | regions = [region['RegionName'] for region in client.describe_regions()['Regions']] 253 | if arg.region not in regions: 254 | sys.exit("ERROR: Please, choose a valid region.") 255 | 256 | if not arg.show: 257 | list_security_groups(filter, GroupIds, arg.region) 258 | else: 259 | filter =[] 260 | filter.append({'Name': 'group-id', 'Values': [arg.show]}) 261 | list_security_group(filter, arg.region) 262 | 263 | if __name__ == '__main__': 264 | sys.exit(main()) 265 | -------------------------------------------------------------------------------- /aws-scripts/ec2-snap-mgmt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import boto.ec2 4 | import argparse 5 | 6 | # List all the snapshots for every volume 7 | def snap_x_vol(owner_id): 8 | conn = boto.ec2.connection.EC2Connection() 9 | snapshots = conn.get_all_snapshots(owner=owner_id) 10 | volumes = conn.get_all_volumes() 11 | for v in volumes: 12 | print ("- %s" % (v.id)) 13 | for s in snapshots: 14 | if s.volume_id == v.id: 15 | print (" \_ %s, start_time: %s" % (s.id, s.start_time)) 16 | 17 | # List all the snapshots for every image 18 | def snap_x_ami(owner_id): 19 | conn = boto.ec2.connection.EC2Connection() 20 | images = conn.get_all_images(owners=owner_id) 21 | snapshots = conn.get_all_snapshots(owner=owner_id) 22 | for i in images: 23 | print ("- %s (%s)" % (i.id, i.name)) 24 | for device in i.block_device_mapping: 25 | print (" \_ %s (%s)" % (device, i.block_device_mapping[device].snapshot_id)) 26 | 27 | # Find orphan snapshots (snapshots of non-existeng volumnes and snapshots without ami) 28 | def orphan_snapshots(owner_id): 29 | conn = boto.ec2.connection.EC2Connection() 30 | snapshots = conn.get_all_snapshots(owner=owner_id) 31 | volumes = conn.get_all_volumes() 32 | images = conn.get_all_images(owners=owner_id) 33 | for s in snapshots: 34 | print ("- %s" % (s.id)) 35 | for v in volumes: 36 | if s.volume_id == v.id: 37 | print (" \_(volume %s)" % (v.id)) 38 | for i in images: 39 | for dev in i.block_device_mapping: 40 | if s.id == i.block_device_mapping[dev].snapshot_id: 41 | print (" \_(ami %s) %s" % (i.id, dev)) 42 | 43 | def main(): 44 | parser = argparse.ArgumentParser() 45 | parser.add_argument('-v', '--view', default='orphan', choices=['orphan', 'volumes', 'images'], 46 | required=True, 47 | help="Available views: orphan and volumes. Orphan is the default one.") 48 | parser.add_argument('owner_id', help="12-digit AWS Account Number") 49 | arg = parser.parse_args() 50 | 51 | if arg.view == 'orphan': 52 | orphan_snapshots(arg.owner_id) 53 | 54 | if arg.view == 'volumes': 55 | snap_x_vol(arg.owner_id) 56 | 57 | if arg.view == 'images': 58 | snap_x_ami(arg.owner_id) 59 | 60 | if __name__ == '__main__': 61 | sys.exit(main()) 62 | -------------------------------------------------------------------------------- /aws-scripts/ec2-tg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import botocore 4 | import boto3 5 | import argparse 6 | import sys 7 | import datetime 8 | from dateutil.tz import tzlocal 9 | from rich.console import Console 10 | from rich.table import Table 11 | import role 12 | 13 | def list_target_groups(ec2): 14 | client = boto3.client('elbv2') 15 | response = client.describe_target_groups() 16 | table = Table() 17 | table.add_column("num", justify="right", no_wrap=True) 18 | table.add_column("Target Group Name", style="green") 19 | table.add_column("ARN", style="magenta") 20 | table.add_column("Type", style="green") 21 | table.add_column("Total Targets", style="white") 22 | table.add_column("Healthy", style="green") 23 | table.add_column("Unhealthy", style="red") 24 | for tg in range(len(response.get('TargetGroups'))): 25 | tg_name = response.get('TargetGroups')[tg].get('TargetGroupName') 26 | tg_arn = response.get('TargetGroups')[tg].get('TargetGroupArn') 27 | tg_type = response.get('TargetGroups')[tg].get('TargetType') 28 | targets=client.describe_target_health(TargetGroupArn=tg_arn) 29 | total=len(targets.get('TargetHealthDescriptions')) 30 | healthy=0 31 | for target in range(total): 32 | state=targets.get('TargetHealthDescriptions')[target].get('TargetHealth').get('State') 33 | if state == 'healthy': 34 | healthy+=1 35 | table.add_row(str(tg+1), tg_name, tg_arn, tg_type, str(total), str(healthy), str(total-healthy)) 36 | console = Console() 37 | console.print(table) 38 | 39 | def list_targets(ec2, arn_group): 40 | client = boto3.client('elbv2') 41 | targets=client.describe_target_health(TargetGroupArn=arn_group) 42 | total=len(targets.get('TargetHealthDescriptions')) 43 | healthy=0 44 | table = Table() 45 | table.add_column("num", justify="right", no_wrap=True) 46 | table.add_column("Target Id", style="green") 47 | table.add_column("Status", style="green") 48 | for target in range(total): 49 | target_id=targets.get('TargetHealthDescriptions')[target].get('Target').get('Id') 50 | state=targets.get('TargetHealthDescriptions')[target].get('TargetHealth').get('State') 51 | if state == 'healthy': 52 | state = "[green]"+state 53 | elif state == 'unhealthy': 54 | state = "[red]"+state 55 | else: 56 | state = "[orange1]"+state 57 | 58 | table.add_row(str(target+1), target_id, state) 59 | console = Console() 60 | console.print(table) 61 | 62 | def register_target(client, arn_group, target_list): 63 | #client = boto3.client('elbv2') 64 | try: 65 | response=client.register_targets(TargetGroupArn=arn_group, Targets=target_list) 66 | except botocore.exceptions.ClientError as error: 67 | print(error.response['Error']['Message']) 68 | 69 | def unregister_target(client, arn_group, target_list): 70 | #client = boto3.client('elbv2') 71 | try: 72 | response=client.deregister_targets(TargetGroupArn=arn_group, Targets=target_list) 73 | except botocore.exceptions.ClientError as error: 74 | print(error.response['Error']['Message']) 75 | 76 | 77 | def main(): 78 | parser = argparse.ArgumentParser(description='Shows a list of Target Grops.\ 79 | Also allows you to register/deregister targets in/from \ 80 | a provided Targer Group') 81 | parser.add_argument('-s', '--show', 82 | help="Shows the target for the provided Target Group ARN") 83 | parser.add_argument('-a', '--action', action='store', 84 | choices=['register', 'deregister'], 85 | help="Set the desired action.") 86 | parser.add_argument('--target_type', action='store', 87 | choices=['instances', 'ip_address', 'lambda_function', 'alb'], 88 | help="Set the desired state for the instances provided") 89 | parser.add_argument('--targets_id_list', 90 | nargs='+', type=str, 91 | help="Targets Id list" ) 92 | parser.add_argument('--target_group_arn', 93 | type=str, 94 | help="Target Group ARN" ) 95 | parser.add_argument('--role_arn', required=False, type=str, 96 | help="If the script run on an EC2 instance with an IAM \ 97 | role attached, then the Security Token Service \ 98 | will provide a set of temporary credentials \ 99 | allowing the actions of the assumed role.\ 100 | With this method, no user credentials are \ 101 | required, just the Role ARN to be assumed." ) 102 | parser.add_argument('-r', '--region', 103 | help="Specify the region to override the one setted in \ 104 | the credentials file or if you are using \ 105 | --role_arn.") 106 | 107 | arg = parser.parse_args() 108 | 109 | if arg.role_arn: 110 | session = role.assumed_role_session(arg.role_arn) 111 | ec2 = session.client('elbv2', region_name=arg.region) 112 | else: 113 | ec2 = boto3.client('elbv2', region_name=arg.region) 114 | 115 | missing = 0 116 | if not arg.action: 117 | if arg.target_type: 118 | print('--target_type requires: -a or --action') 119 | missing += 1 120 | if arg.targets_id_list: 121 | print('--targets_id_list requires: -a or --action') 122 | missing += 1 123 | if arg.target_group_arn: 124 | print('--target_id_group requires: -a or --action') 125 | missing += 1 126 | 127 | if missing >= 1: 128 | sys.exit(1) 129 | 130 | if arg.action: 131 | if not arg.target_type: 132 | print('missing argument: --target_type') 133 | if not arg.targets_id_list: 134 | print('missing argument: --targets_id_list') 135 | if not arg.target_group_arn: 136 | print('missing argument: --target_group_arn') 137 | if arg.targets_id_list and arg.target_group_arn and arg.target_type: 138 | if arg.target_type != 'instances': 139 | print('Non-instance types are not yet supported!') 140 | sys.exit(1) 141 | targets=[] 142 | target={} 143 | for id in arg.targets_id_list: 144 | target={'Id': id} 145 | targets.append(target) 146 | if arg.action == 'register': 147 | register_target(ec2, arg.target_group_arn, targets) 148 | elif arg.action == 'deregister': 149 | unregister_target(ec2, arg.target_group_arn, targets) 150 | 151 | elif arg.show: 152 | session = boto3.session.Session() 153 | region = session.region_name 154 | ec2 = boto3.resource('ec2') 155 | list_targets(ec2, arg.show) 156 | else: 157 | session = boto3.session.Session() 158 | region = session.region_name 159 | ec2 = boto3.resource('ec2') 160 | list_target_groups(ec2) 161 | 162 | 163 | 164 | if __name__ == '__main__': 165 | sys.exit(main()) 166 | -------------------------------------------------------------------------------- /aws-scripts/lifecycle-hook-worker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import boto3 4 | import sys 5 | import argparse 6 | import ast 7 | import urllib.request, urllib.error, urllib.parse 8 | from subprocess import call 9 | import time 10 | from datetime import datetime 11 | import shlex 12 | 13 | 14 | def sqs_get_msg(qname): 15 | sqs = boto3.resource('sqs') 16 | queue = sqs.get_queue_by_name(QueueName=qname) 17 | client = boto3.client('sqs') 18 | message = client.receive_message(QueueUrl=queue.url, MaxNumberOfMessages=1, WaitTimeSeconds=20) 19 | if message.get('Messages'): 20 | m = message.get('Messages')[0] 21 | body = ast.literal_eval(m['Body']) 22 | receipt_handle = m['ReceiptHandle'] 23 | else: 24 | body = {'LifecycleTransition': False} 25 | receipt_handle = "" 26 | return body, receipt_handle 27 | 28 | 29 | def sqs_delete_msg(qname, receipt_handle): 30 | sqs = boto3.resource('sqs') 31 | queue = sqs.get_queue_by_name(QueueName=qname) 32 | client = boto3.client('sqs') 33 | response = client.delete_message(QueueUrl=queue.url, ReceiptHandle=receipt_handle) 34 | 35 | 36 | def get_ec2instanceid(): 37 | try: 38 | response = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/instance-id') 39 | except: 40 | sys.exit("%s I am not running in EC2. Aborting!!" % datetime.now().strftime('%H:%M:%S %D')) 41 | 42 | instanceid = response.read() 43 | return instanceid 44 | 45 | 46 | def main(): 47 | parser = argparse.ArgumentParser(description='SQS Lifecycle hook consumer and trigger') 48 | parser.add_argument('-q', '--queue', required=True, 49 | help="Queue resource.") 50 | parser.add_argument('-s', '--state', action='store', choices=['LAUNCHING','TERMINATING'], required=True, 51 | help='Indicates if the consumer is waiting for LAUNCHING or TERMINATING state') 52 | parser.add_argument('-g', '--group', required=True, 53 | help='Auto Scaling Group Name') 54 | parser.add_argument('-H', '--hookName', required=True, 55 | help='Life Cycle Hook Name') 56 | parser.add_argument('-e', '--execute', required=True, 57 | help="The filepath of the triggered script") 58 | parser.add_argument('-w', '--wait', default=60, type=int, 59 | help="Time between query loops in seconds (default: 60)") 60 | 61 | arg = parser.parse_args() 62 | 63 | if arg.state == "LAUNCHING": 64 | state = "autoscaling:EC2_INSTANCE_LAUNCHING" 65 | elif arg.state == "TERMINATING": 66 | state = "autoscaling:EC2_INSTANCE_TERMINATING" 67 | 68 | cmd_args = shlex.split(arg.execute) 69 | 70 | print(("%s Getting EC2 instance ID") % datetime.now().strftime('%H:%M:%S %D')) 71 | ec2instanceid = get_ec2instanceid() 72 | print(("%s Listening for %s SQS messages using long polling") % (datetime.now().strftime('%H:%M:%S %D'), ec2instanceid)) 73 | 74 | while 1: 75 | sqs_msg, sqs_receipt_handle = sqs_get_msg(arg.queue) 76 | if sqs_msg['LifecycleTransition'] == "autoscaling:TEST_NOTIFICATION": 77 | print(("%s Tests message consumed") % datetime.now().strftime('%H:%M:%S %D')) 78 | elif sqs_msg['LifecycleTransition'] == False: 79 | print(("%s There are no messages in the queue. Sleeping and trying again") % datetime.now().strftime('%H:%M:%S %D')) 80 | elif (sqs_msg['LifecycleTransition'] == state) and (sqs_msg['EC2InstanceId'] == ec2instanceid): 81 | sqs_delete_msg(arg.queue, sqs_receipt_handle) 82 | print("%s %s hook message received" % (datetime.now().strftime('%H:%M:%S %D'), arg.state)) 83 | print("%s Executing filepath" % datetime.now().strftime('%H:%M:%S %D')) 84 | call(cmd_args) 85 | print("%s Completing lifecyle action" % datetime.now().strftime('%H:%M:%S %D')) 86 | as_client = boto3.client('autoscaling') 87 | response = as_client.complete_lifecycle_action( 88 | LifecycleHookName=arg.hookName, 89 | AutoScalingGroupName=arg.group, 90 | LifecycleActionToken=sqs_msg['LifecycleActionToken'], 91 | LifecycleActionResult='CONTINUE', 92 | InstanceId=ec2instanceid 93 | ) 94 | time.sleep(arg.wait) 95 | 96 | if __name__ == '__main__': 97 | sys.exit(main()) 98 | -------------------------------------------------------------------------------- /aws-scripts/mongodb-backup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import boto3 4 | import botocore 5 | import sys 6 | import argparse 7 | import subprocess 8 | import shutil 9 | import os 10 | from datetime import datetime 11 | import operator 12 | import distutils.spawn 13 | from pymongo import MongoClient 14 | from pymongo import errors 15 | 16 | def fsync(action, host, username, password): 17 | client = MongoClient(host) 18 | db = client['admin'] 19 | 20 | if action == 'lock': 21 | # lock 22 | if username and password: 23 | try: 24 | db.authenticate(username, password) 25 | print("[+] Database connected!") 26 | try: 27 | lock = db.command("fsync", lock=True)["info"] 28 | except Exception as e: 29 | raise e 30 | except Exception as e: 31 | print("[+] Database connection error!") 32 | raise e 33 | else: 34 | try: 35 | lock = db.command("fsync", lock=True)["info"] 36 | except Exception as e: 37 | raise e 38 | elif action == 'unlock': 39 | # unlock 40 | if username and password: 41 | try: 42 | db.authenticate(username, password) 43 | print("[+] Database connected!") 44 | try: 45 | lock = db.command("fsyncUnlock")["info"] 46 | except Exception as e: 47 | raise e 48 | except Exception as e: 49 | print("[+] Database connection error!") 50 | raise e 51 | else: 52 | try: 53 | lock = db.command("fsyncUnlock")["info"] 54 | except Exception as e: 55 | raise e 56 | 57 | return lock 58 | 59 | def create_snapshot(RegionName, volumes_dict): 60 | 61 | dtime = datetime.now() 62 | client = boto3.client('ec2', region_name=RegionName) 63 | successful_snapshots = dict() 64 | # iterate through each item in volumes_dict and use key as description of snapshot 65 | for snapshot in volumes_dict: 66 | try: 67 | response = client.create_snapshot( 68 | Description= "Crated by aws-scripts/mongodb_backup.py ", 69 | VolumeId= volumes_dict[snapshot], 70 | TagSpecifications=[ 71 | { 72 | 'ResourceType': 'snapshot', 73 | 'Tags': [ 74 | { 75 | 'Key': 'aws-scripts:mongodb_backup.py:managed', 76 | 'Value': 'true' 77 | }, 78 | { 79 | 'Key': 'Name', 80 | 'Value': dtime 81 | }, 82 | ] 83 | }, 84 | ], 85 | DryRun= False 86 | ) 87 | # response is a dictionary containing ResponseMetadata and SnapshotId 88 | status_code = response['ResponseMetadata']['HTTPStatusCode'] 89 | snapshot_id = response['SnapshotId'] 90 | # check if status_code was 200 or not to ensure the snapshot was created successfully 91 | if status_code == 200: 92 | successful_snapshots[snapshot] = snapshot_id 93 | else: 94 | print("status code: %s" % status_code) 95 | except Exception as e: 96 | exception_message = "There was error in creating snapshot " + snapshot + " with volume id "+volumes_dict[snapshot]+" and error is: \n"\ 97 | + str(e) 98 | # print the snapshots which were created successfully 99 | if len(successful_snapshots) == 1: 100 | print(" Snapshots: %s " % successful_snapshots['data']) 101 | snap_ids=[successful_snapshots['data']] 102 | 103 | if len(successful_snapshots) == 2: 104 | print(" Snapshots: %s, %s " % (successful_snapshots['data'],successful_snapshots['journal'])) 105 | snap_ids=[successful_snapshots['data'], successful_snapshots['journal']] 106 | 107 | return snap_ids 108 | 109 | def dump(host, database, collection, exclude_collection, username, password, out): 110 | 111 | if username and password: 112 | auth_str= "--username %s --password %s" % (username, password) 113 | else: 114 | auth_str="" 115 | 116 | if database: 117 | db_str="--db %s" % (database) 118 | if exclude_collection: 119 | db_str="--db %s --excludeCollection %s" % (database, exclude_collection) 120 | if collection: 121 | db_str="--db %s --collection %s" % (database, collection) 122 | else: 123 | db_str="" 124 | 125 | mongodump_cmd="mongodump --host %s -o %s %s %s" % (host,out,auth_str,db_str) 126 | print(mongodump_cmd) 127 | mongodump_output = subprocess.check_output(mongodump_cmd, shell=True) 128 | print(mongodump_output) 129 | 130 | def main(): 131 | parser = argparse.ArgumentParser(description='A tool to make mongodb backups on Amazon') 132 | parser.add_argument('-m', '--method', 133 | help="Backup method. Dump if none is provided", 134 | choices=['dump', 'snapshot'], 135 | default="dump") 136 | parser.add_argument('-u', '--user', 137 | help="Mongodb user (optional)") 138 | parser.add_argument('-p', '--password', 139 | help="Mongodb password (optional)") 140 | parser.add_argument('-H', '--host', default="localhost:27017", 141 | help="Mongodb host: :. By default: localhost:27017" ) 142 | parser.add_argument('-d', '--database', 143 | help="For the dump method: The database to backup (all if not provided)") 144 | parser.add_argument('-c', '--collection', 145 | help="For the dump method: The collection to backup. Requires '-d' option") 146 | parser.add_argument('-e', '--exclude_collection', 147 | help="For the dump method: The collection to exclude from backup. Requires '-d' option") 148 | parser.add_argument('-o', '--out', default='dump', 149 | help="For the dump method: The output directory for dumped files") 150 | parser.add_argument('-n', '--number', type=int, default=7, 151 | help="Number of copies to retain") 152 | parser.add_argument('-b', '--bucket', 153 | help="For the dump method: Amazon s3 bucket." ) 154 | parser.add_argument('-P', '--prefix', 155 | help="For the dump method: For grouped objects aka s3 folders, provide the prefix key") 156 | parser.add_argument('-v', '--volume_id', 157 | nargs='+', type=str, 158 | help="For the snapshot method: Provide the data and journal volume_id list to snapshot: If data and journal resides in a separate volumes, both volumes are required.") 159 | parser.add_argument('--no_journal', 160 | action='store_true', 161 | help="For the snapshot method: If pressent, the instance is either running without journaling or has the journal files on a separate volume, you must flush all writes to disk and lock the database to prevent writes during the backup process.") 162 | parser.add_argument('-r', '--region', 163 | help="Specify an alternate region to override \ 164 | the one defined in the .aws/credentials file") 165 | 166 | 167 | arg = parser.parse_args() 168 | 169 | if arg.user and not arg.password: 170 | parser.error("You provided a user but not a password") 171 | 172 | if arg.password and not arg.user: 173 | parser.error("You provided a password but not a user") 174 | 175 | if arg.prefix is not None and arg.prefix[-1:] == "/": 176 | arg.prefix="%s" % arg.prefix[:-1] 177 | 178 | if arg.exclude_collection and not arg.database: 179 | parser.error("--exclude_collection requires --database") 180 | 181 | if arg.collection and not arg.database: 182 | parser.error("--collection requires --database") 183 | 184 | if arg.region: 185 | client = boto3.client('ec2') 186 | regions = [region['RegionName'] for region in client.describe_regions()['Regions']] 187 | if arg.region not in regions: 188 | sys.exit("ERROR: Please, choose a valid region.") 189 | 190 | if arg.method == "dump": 191 | print("Method: dump") 192 | mongodump_path=distutils.spawn.find_executable("mongodump") 193 | if mongodump_path is not None: 194 | print("mongodump path: %s" % mongodump_path) 195 | else: 196 | print("mongodump path: not found!") 197 | sys.exit(1) 198 | # mongodump 199 | dump(arg.host, arg.database, arg.collection, arg.exclude_collection ,arg.user, arg.password, arg.out) 200 | 201 | # List and get the number of files in the bucket 202 | s3 = boto3.resource('s3') 203 | if arg.prefix: 204 | objects=s3.Bucket(name=arg.bucket).objects.filter(Prefix=arg.prefix) 205 | else: 206 | objects=s3.Bucket(name=arg.bucket).objects.filter() 207 | 208 | print("Filelist on the S3 bucket:") 209 | filedict={} 210 | for object in objects: 211 | if object.key.startswith(arg.prefix + '/dump-' + arg.database): 212 | print((object.key)) 213 | filedict.update({object.key: object.last_modified}) 214 | 215 | # create new tarball 216 | print("Creating the tarball:") 217 | tarball_name="%s-%s.tar.gz" % (arg.out, datetime.strftime(datetime.now(),'%Y-%m-%d-%H%M%S')) 218 | tarball_cmd="tar -czvf %s %s" % (tarball_name, arg.out) 219 | tarball_output = subprocess.check_output(tarball_cmd, shell=True) 220 | print(tarball_output) 221 | 222 | # remove dumped files 223 | print("Removing temporary dump files...") 224 | shutil.rmtree(arg.out) 225 | 226 | # upload the new tarball to s3 227 | remote_file="%s/%s" % (arg.prefix,os.path.basename(tarball_name)) 228 | print("Uploading %s to Amazon S3..." % tarball_name) 229 | s3_client = boto3.client('s3') 230 | s3.meta.client.upload_file(tarball_name, arg.bucket, remote_file) 231 | 232 | # remove temporary tarball 233 | print("Removing temporary local tarball...") 234 | os.remove(tarball_name) 235 | 236 | # keep de the last N dumps on s3: removes the oldest ones 237 | # remove the first element of array if prefix (dirname) was used 238 | prefix= arg.prefix + "/" 239 | #if arg.prefix: 240 | # del filedict[arg.prefix + "/"] 241 | sorted_filedict=sorted(list(filedict.items()), key=operator.itemgetter(1)) 242 | for item in sorted_filedict[0:len(sorted_filedict)-arg.number]: 243 | print("Deleting file from S3: %s" % item[0]) 244 | object = s3.Object(arg.bucket, item[0]).delete() 245 | 246 | if arg.method == "snapshot": 247 | print("Method: EBS snapshot") 248 | 249 | if arg.method == "snapshot" and not arg.volume_id: 250 | parser.error("The snapshot method requires --volume_id") 251 | 252 | if len(arg.volume_id) == 1: 253 | # data and journal are in the same volume: no fsyncLock required 254 | fsyncLock = False 255 | if not arg.volume_id[0].startswith("vol-"): 256 | parser.error("Incorrent volume_id") 257 | volumes_dict = { 258 | 'data' : arg.volume_id[0], 259 | } 260 | # Unless 261 | if arg.no_journal is not None: 262 | fsyncLock = arg.no_journal 263 | if fsyncLock == True: 264 | print(" fsyncLock: %s" % fsyncLock) 265 | if arg.user and not arg.password: 266 | parser.error("You provided a user but not a password") 267 | if arg.password and not arg.user: 268 | parser.error("You provided a password but not a user") 269 | else: 270 | print(" fsyncLock: %s" % fsyncLock) 271 | print(" Volume: %s" % arg.volume_id[0]) 272 | 273 | if len(arg.volume_id) == 2: 274 | if arg.user and not arg.password: 275 | parser.error("You provided a user but not a password") 276 | if arg.password and not arg.user: 277 | parser.error("You provided a password but not a user") 278 | if not arg.volume_id[0].startswith("vol-") or not arg.volume_id[1].startswith("vol-"): 279 | parser.error("Incorrent volume_id") 280 | # data and journal resides in a separate volumes: fsyncLock required 281 | volumes_dict = { 282 | 'data' : arg.volume_id[0], 283 | 'journal' : arg.volume_id[1], 284 | } 285 | fsyncLock = True 286 | print(" fsyncLock: %s" % fsyncLock) 287 | print(" Volumes: %s, %s" % (arg.volume_id[0], arg.volume_id[1])) 288 | 289 | if fsyncLock == True: 290 | try: 291 | lockres = fsync("lock", arg.host, arg.user, arg.password) 292 | print (" Lock result: %s" % lockres) 293 | except Exception as e: 294 | print (" An error ocurred: %s" % e) 295 | # Para cada volumen llamo a función de creación de snapshot 296 | snapshots = create_snapshot(arg.region, volumes_dict) 297 | print (" waiting for %s to complete" % snapshots) 298 | try: 299 | client = boto3.client('ec2', region_name=arg.region) 300 | waiter = client.get_waiter('snapshot_completed') 301 | waiter.wait(SnapshotIds=snapshots) 302 | except botocore.exceptions.WaiterError as e: 303 | print(e.message) 304 | # Llamo a funcion fsycLock(stop) 305 | try: 306 | lockres = fsync("unlock", arg.host, arg.user, arg.password) 307 | print (" Lock result: %s" % lockres) 308 | except Exception as e: 309 | print (" An error ocurred: %s" % e) 310 | 311 | if __name__ == '__main__': 312 | sys.exit(main()) 313 | -------------------------------------------------------------------------------- /aws-scripts/rds-instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | import boto3 4 | import sys 5 | import argparse 6 | from rich.console import Console 7 | from rich.table import Table 8 | 9 | def list_instances(): 10 | client = boto3.client('rds') 11 | instances = client.describe_db_instances() 12 | num = 1 13 | table = Table() 14 | table.add_column("num", justify="right", no_wrap=True) 15 | table.add_column("Identifier", style="green") 16 | table.add_column("Endpoint Address", style="cyan") 17 | table.add_column("Class", justify="right", style="green") 18 | table.add_column("Engine", justify="right", style="green") 19 | table.add_column("Version", justify="right", style="red") 20 | table.add_column("MultiAZ", style="cyan") 21 | table.add_column("VPC", style="cyan") 22 | table.add_column("Zone", style="cyan") 23 | table.add_column("2ry Zone", style="cyan") 24 | table.add_column("Status") 25 | 26 | for n in range(len(instances.get('DBInstances'))): 27 | if instances.get('DBInstances')[n].get('DBSubnetGroup') is None: 28 | vpc_id = 'none' 29 | else: 30 | vpc_id = instances.get('DBInstances')[n].get('DBSubnetGroup').get('VpcId') 31 | if instances.get('DBInstances')[n].get('DBInstanceStatus') == 'stopped': 32 | table.add_row( 33 | str(num), 34 | instances.get('DBInstances')[n].get('DBInstanceIdentifier'), 35 | instances.get('DBInstances')[n].get('Endpoint').get('Address'), 36 | instances.get('DBInstances')[n].get('DBInstanceClass'), 37 | instances.get('DBInstances')[n].get('Engine'), 38 | instances.get('DBInstances')[n].get('EngineVersion'), 39 | str(instances.get('DBInstances')[n].get('MultiAZ')), 40 | vpc_id, 41 | instances.get('DBInstances')[n].get('AvailabilityZone'), 42 | instances.get('DBInstances')[n].get('SecondaryAvailabilityZone'), 43 | instances.get('DBInstances')[n].get('DBInstanceStatus'), 44 | style='italic grey42' 45 | ) 46 | else: 47 | table.add_row( 48 | str(num), 49 | instances.get('DBInstances')[n].get('DBInstanceIdentifier'), 50 | instances.get('DBInstances')[n].get('Endpoint').get('Address'), 51 | instances.get('DBInstances')[n].get('DBInstanceClass'), 52 | instances.get('DBInstances')[n].get('Engine'), 53 | instances.get('DBInstances')[n].get('EngineVersion'), 54 | str(instances.get('DBInstances')[n].get('MultiAZ')), 55 | vpc_id, 56 | instances.get('DBInstances')[n].get('AvailabilityZone'), 57 | instances.get('DBInstances')[n].get('SecondaryAvailabilityZone'), 58 | instances.get('DBInstances')[n].get('DBInstanceStatus') 59 | ) 60 | num = num + 1 61 | console = Console() 62 | console.print(table) 63 | 64 | def main(): 65 | parser = argparse.ArgumentParser(description='List all the RDS instances') 66 | arg = parser.parse_args() 67 | 68 | list_instances() 69 | 70 | if __name__ == '__main__': 71 | sys.exit(main()) 72 | -------------------------------------------------------------------------------- /aws-scripts/role.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | usage: 5 | session = assumed_role_session('arn:aws:iam::ACCOUNTID:role/ROLE_NAME') 6 | """ 7 | import botocore 8 | import boto3 9 | import argparse 10 | import sys 11 | import datetime 12 | from dateutil.tz import tzlocal 13 | 14 | def assumed_role_session(role_arn: str, base_session: botocore.session.Session = None): 15 | base_session = base_session or boto3.session.Session()._session 16 | fetcher = botocore.credentials.AssumeRoleCredentialFetcher( 17 | client_creator = base_session.create_client, 18 | source_credentials = base_session.get_credentials(), 19 | role_arn = role_arn, 20 | extra_args = { 21 | # 'RoleSessionName': None # set this if you want something non-default 22 | } 23 | ) 24 | creds = botocore.credentials.DeferredRefreshableCredentials( 25 | method = 'assume-role', 26 | refresh_using = fetcher.fetch_credentials, 27 | time_fetcher = lambda: datetime.datetime.now(tzlocal()) 28 | ) 29 | botocore_session = botocore.session.Session() 30 | botocore_session._credentials = creds 31 | return boto3.Session(botocore_session = botocore_session) 32 | 33 | if __name__ == '__main__': 34 | sys.exit(main()) 35 | -------------------------------------------------------------------------------- /aws-scripts/route53-del-hostname.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import argparse 4 | import boto3 5 | try: 6 | import urllib2 7 | except ImportError: 8 | import urllib.request, urllib.error, urllib.parse 9 | import time 10 | from datetime import datetime 11 | import socket 12 | 13 | def get_public_dns_hostname(): 14 | # curl http://169.254.169.254/latest/meta-data/public-hostname 15 | try: 16 | # if python 2.x 17 | response = urllib2.urlopen('http://169.254.169.254/latest/meta-data/public-hostname') 18 | except: 19 | # if python3.x 20 | response = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/public-hostname') 21 | public_dns = response.read() 22 | return public_dns 23 | 24 | def get_private_ip(): 25 | # curl http://169.254.169.254/latest/meta-data/local-ipv4 26 | try: 27 | # if python 2.x 28 | response = urllib2.urlopen('http://169.254.169.254/latest/meta-data/local-ipv4') 29 | except: 30 | response = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/local-ipv4') 31 | 32 | private_ip = response.read() 33 | return private_ip 34 | 35 | def del_hostname_record(HostedZoneId, public_dns, fqdn, private_ip): 36 | # Delete hostname from Route53 37 | client = boto3.client('route53') 38 | response = client.change_resource_record_sets( 39 | HostedZoneId=HostedZoneId, 40 | ChangeBatch={ 41 | "Comment": "Record deleted using route53-del-hostname.py script. From: " + private_ip, 42 | "Changes": [ 43 | { 44 | "Action": "DELETE", 45 | "ResourceRecordSet": { 46 | "Name": fqdn, 47 | "Type": "CNAME", 48 | "TTL": 300, 49 | "ResourceRecords": [ 50 | { 51 | "Value": public_dns 52 | }, 53 | ] 54 | } 55 | }, 56 | ] 57 | } 58 | ) 59 | idstring = response.get('ChangeInfo').get('Id') 60 | response = client.get_change(Id=idstring) 61 | while response.get('ChangeInfo').get('Status') == 'PENDING': 62 | sys.stdout.write('.') 63 | sys.stdout.flush() 64 | time.sleep( 5 ) 65 | response = client.get_change(Id=idstring) 66 | else: 67 | print(response.get('ChangeInfo').get('Status')) 68 | return 69 | 70 | def main(): 71 | parser = argparse.ArgumentParser(description='Delete host record from AWS Route53 zone') 72 | parser.add_argument('--HostedZoneId', required=True, 73 | help="The ID of the hosted zone where the new resource record will be added.") 74 | parser.add_argument('--dryrun', action='store_true', 75 | help="Shows what is going to be done but doesn't change anything actually") 76 | 77 | arg = parser.parse_args() 78 | 79 | hostname=socket.gethostname() 80 | 81 | private_ip = get_private_ip() 82 | public_dns = get_public_dns_hostname() 83 | 84 | client = boto3.client('route53') 85 | zone = client.get_hosted_zone(Id=arg.HostedZoneId) 86 | fqdn = hostname+'.'+zone['HostedZone']['Name'] 87 | date = datetime.now().strftime('%H:%M:%S %D') 88 | sys.stdout.write ("%s: deleting CNAME %s -> %s" % (date, fqdn, public_dns)) 89 | sys.stdout.flush() 90 | 91 | if arg.dryrun is False: 92 | del_hostname_record(arg.HostedZoneId, public_dns, fqdn, private_ip) 93 | 94 | if __name__ == '__main__': 95 | sys.exit(main()) 96 | -------------------------------------------------------------------------------- /aws-scripts/route53-set-hostname.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import argparse 4 | import boto3 5 | import re 6 | try: 7 | import urllib2 8 | except ImportError: 9 | import urllib.request, urllib.error, urllib.parse 10 | import time 11 | from datetime import datetime 12 | 13 | def get_available_hostname(HostedZoneId, HostStr, rangeSize): 14 | 15 | client = boto3.client('route53') 16 | 17 | # Getting domain por the provided HostedZoneId 18 | zones = client.list_hosted_zones() 19 | for zone in range(len(zones.get('HostedZones'))): 20 | zoneid = zones.get('HostedZones')[zone].get('Id') 21 | regex = r"/hostedzone/" + re.escape(HostedZoneId) 22 | Idzone = re.match(regex,zoneid) 23 | if Idzone: 24 | domain = zones.get('HostedZones')[zone].get('Name') 25 | 26 | # Getting first available index in the provided range 27 | response = client.list_resource_record_sets(HostedZoneId=HostedZoneId) 28 | num_found = 0 29 | l = [None] * rangeSize 30 | # Fill the list with founded value using the number as index 31 | # Unasigned indexes will appear as None 32 | for record in range(len(response.get('ResourceRecordSets'))): 33 | name = response.get('ResourceRecordSets')[record].get('Name') 34 | regex = r"(" + re.escape(HostStr) + ")(\d+).*" 35 | host=re.match(regex, name) 36 | if host: 37 | num_found=int(host.group(2)) 38 | hostname=host.group(0) 39 | l[num_found]=hostname; 40 | 41 | # Getting first free index 42 | n = 0 43 | while l[n] is not None: 44 | n = n + 1 45 | 46 | hostname = "%s%02d.%s" % (HostStr,n,domain) 47 | return hostname 48 | 49 | def get_public_dns_hostname(): 50 | # curl http://169.254.169.254/latest/meta-data/public-hostname 51 | try: 52 | # if python 2.x 53 | response = urllib2.urlopen('http://169.254.169.254/latest/meta-data/public-hostname') 54 | except: 55 | # if python3.x 56 | response = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/public-hostname') 57 | public_dns = response.read() 58 | return public_dns 59 | 60 | def get_local_dns_hostname(): 61 | # curl http://169.254.169.254/latest/meta-data/hostname 62 | try: 63 | # if python 2.x 64 | response = urllib2.urlopen('http://169.254.169.254/latest/meta-data/hostname') 65 | except: 66 | # if python 3.x 67 | response = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/hostname') 68 | local_dns = response.read() 69 | return local_dns 70 | 71 | def get_private_ip(): 72 | # curl http://169.254.169.254/latest/meta-data/local-ipv4 73 | try: 74 | # if python 2.x 75 | response = urllib2.urlopen('http://169.254.169.254/latest/meta-data/local-ipv4') 76 | except: 77 | # if python 3.x 78 | response = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/local-ipv4') 79 | private_ip = response.read() 80 | return private_ip 81 | 82 | def get_public_ip(): 83 | # curl http://169.254.169.254/latest/meta-data/public-ipv4 84 | try: 85 | # if python 2.x 86 | response = urllib2.urlopen('http://169.254.169.254/latest/meta-data/public-ipv4') 87 | except: 88 | response = urllib.request.urlopen('http://169.254.169.254/latest/meta-data/public-ipv4') 89 | public_ip = response.read() 90 | return public_ip 91 | 92 | def set_hostname_record(HostedZoneId, public_dns, available_hostname, private_ip): 93 | # Set hostname in Route53 94 | client = boto3.client('route53') 95 | response = client.change_resource_record_sets( 96 | HostedZoneId=HostedZoneId, 97 | ChangeBatch={ 98 | "Comment": "Record added using set-hostname.py script. From: " + private_ip, 99 | "Changes": [ 100 | { 101 | "Action": "CREATE", 102 | "ResourceRecordSet": { 103 | "Name": available_hostname, 104 | "Type": "CNAME", 105 | "TTL": 300, 106 | "ResourceRecords": [ 107 | { 108 | "Value": public_dns 109 | }, 110 | ] 111 | } 112 | }, 113 | ] 114 | } 115 | ) 116 | idstring = response.get('ChangeInfo').get('Id') 117 | response = client.get_change(Id=idstring) 118 | while response.get('ChangeInfo').get('Status') == 'PENDING': 119 | sys.stdout.write('.') 120 | sys.stdout.flush() 121 | time.sleep( 5 ) 122 | response = client.get_change(Id=idstring) 123 | else: 124 | print(response.get('ChangeInfo').get('Status')) 125 | return 126 | 127 | def main(): 128 | parser = argparse.ArgumentParser(description='AWS Route53 hostname managment for Autoscaled EC2 Instances') 129 | parser.add_argument('--HostedZoneId', required=True, 130 | help="The ID of the hosted zone where the new resource record will be added.") 131 | parser.add_argument('--HostStr', required=True, 132 | help="The host string used to build the new name") 133 | parser.add_argument('--rangeSize', type=int, default=10, 134 | help="The maximun number to be assigned. The first available will be used" ) 135 | parser.add_argument('--dryrun', action='store_true', 136 | help="Shows what is going to be done but doesn't change anything actually") 137 | 138 | arg = parser.parse_args() 139 | 140 | available_hostname = get_available_hostname(arg.HostedZoneId, arg.HostStr, arg.rangeSize) 141 | 142 | private_ip = get_private_ip() 143 | public_dns = get_public_dns_hostname() 144 | #get_local_dns_hostname() 145 | #get_public_ip() 146 | date = datetime.now().strftime('%H:%M:%S %D') 147 | sys.stdout.write ("%s: creating CNAME %s -> %s" % (date, available_hostname, public_dns)) 148 | sys.stdout.flush() 149 | 150 | if arg.dryrun is False: 151 | set_hostname_record(arg.HostedZoneId, public_dns, available_hostname, private_ip) 152 | 153 | if __name__ == '__main__': 154 | sys.exit(main()) 155 | -------------------------------------------------------------------------------- /aws-scripts/s3-download-file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import boto3 4 | import botocore 5 | import sys 6 | import argparse 7 | 8 | def download_file(bucket, objectkey, filepath ): 9 | s3_client = boto3.client('s3') 10 | try: 11 | s3_client.download_file(bucket, objectkey, filepath) 12 | print("Requested file saved at: %s" % filepath) 13 | except botocore.exceptions.ClientError as e: 14 | if e.response['ResponseMetadata']['HTTPStatusCode'] == 404: 15 | print("Requested file: %s/%s (not found)" % (bucket, objectkey)) 16 | else: 17 | print("Error Msg: %s" % e.response['Error']['Message']) 18 | 19 | def main(): 20 | parser = argparse.ArgumentParser(description='Donwload file from AWS S3') 21 | parser.add_argument('-b', '--bucket', required=True, 22 | help="The bucket name.") 23 | parser.add_argument('-o', '--objectkey', required=True, 24 | help="The host string used to build the new name") 25 | parser.add_argument('-f', '--filepath', required=True, 26 | help="The filepath of the file to be saved" ) 27 | 28 | arg = parser.parse_args() 29 | 30 | download_file(arg.bucket, arg.objectkey, arg.filepath) 31 | 32 | if __name__ == '__main__': 33 | sys.exit(main()) 34 | -------------------------------------------------------------------------------- /img/demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/frommelmak/aws-scripts/b84ec8d9f6a353c8306a3edb909ede3ef2c7656e/img/demo.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Install with: `pip install -r requirements.txt` 2 | 3 | # General 4 | boto3>=1.6.3 5 | argparse 6 | 7 | # ec2-instances.py 8 | paramiko 9 | 10 | # ec2-reserved.py 11 | google-api-python-client 12 | oauth2client 13 | 14 | # ec2-snap-mgmt.py 15 | boto 16 | 17 | # s3-mongodump 18 | sshutil 19 | #operator 20 | 21 | # route53-[set|del]-hostname.py 22 | #urllib2 23 | #socket 24 | #time 25 | 26 | #lifecycle-hook-worker.py 27 | #subprocess 28 | #shlex 29 | ast 30 | 31 | #s3-download-file.py 32 | botocore 33 | 34 | #ec2-sg.py 35 | rich 36 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup, find_packages 3 | 4 | def read(fname): 5 | return open(os.path.join(os.path.dirname(__file__), fname)).read() 6 | 7 | setup( 8 | name = "aws-scripts", 9 | version = "0.1.19", 10 | author = "Marcos Martinez", 11 | author_email = "frommelmak@gmail.com", 12 | description = "Some useful AWS scripts I use from time to time", 13 | license = "MIT", 14 | keywords = "aws amazon-web-services ec2-instance google-calendar-synchronization amazon mongodb backup", 15 | url = "http://github.com/frommelmak/aws-scripts", 16 | install_requires=['boto3>=1.18.60', 17 | 'argparse', 18 | 'fabric>=2.7.1', 19 | 'paramiko==2.8.1', 20 | 'google-api-python-client>=1.7.3', 21 | 'oauth2client>=4.1.2', 22 | 'boto>=2.38.0', 23 | 'sshutil>=0.9.7', 24 | 'botocore>=1.21.60', 25 | 'rich>=12.5.1', 26 | ], 27 | extras_require={ 28 | "mongodb": ['pymongo>=2.9,< 3.0'], 29 | }, 30 | python_requires='>=2.7', 31 | packages=find_packages(exclude=['docs', 'tests*']), 32 | scripts = ['aws-scripts/ec2-instances.py', 33 | 'aws-scripts/ec2-instance-state.py', 34 | 'aws-scripts/ec2-reserved.py', 35 | 'aws-scripts/ec2-elb.py', 36 | 'aws-scripts/ec2-ebs.py', 37 | 'aws-scripts/ec2-snap-mgmt.py', 38 | 'aws-scripts/mongodb-backup.py', 39 | 'aws-scripts/rds-instances.py', 40 | 'aws-scripts/route53-set-hostname.py', 41 | 'aws-scripts/route53-del-hostname.py', 42 | 'aws-scripts/s3-download-file.py', 43 | 'aws-scripts/lifecycle-hook-worker.py', 44 | 'aws-scripts/ec2-sg.py', 45 | 'aws-scripts/ec2-tg.py', 46 | 'aws-scripts/role.py' 47 | ], 48 | long_description=read('README.md'), 49 | long_description_content_type='text/markdown', 50 | classifiers=[ 51 | "Topic :: Utilities", 52 | "License :: OSI Approved :: MIT License", 53 | 'Programming Language :: Python :: 2.7', 54 | 'Programming Language :: Python :: 3', 55 | ], 56 | ) 57 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/frommelmak/aws-scripts/b84ec8d9f6a353c8306a3edb909ede3ef2c7656e/tests/__init__.py --------------------------------------------------------------------------------