├── .gitattributes ├── .gitignore ├── .travis.yml ├── .zappr.yaml ├── Dockerfile ├── LICENSE ├── MAINTAINERS ├── MANIFEST.in ├── README.rst ├── SECURITY.md ├── TODO.rst ├── delivery.yaml ├── examples ├── example-full-configuration.yaml └── example-minimal-configuration.yaml ├── next-version ├── requirements.txt ├── setup.py ├── sevenseconds ├── __init__.py ├── __main__.py ├── cli.py ├── config │ ├── __init__.py │ ├── acm.py │ ├── ami.py │ ├── bastion.py │ ├── cloudtrail.py │ ├── cloudwatch.py │ ├── configure.py │ ├── ec2.py │ ├── elasticache.py │ ├── iam.py │ ├── kms.py │ ├── policysimulator.py │ ├── rds.py │ ├── route53.py │ ├── s3.py │ ├── securitygroup.py │ └── vpc.py └── helper │ ├── __init__.py │ ├── auth.py │ ├── aws.py │ ├── network.py │ └── regioninfo.py ├── tests ├── test_aws.py ├── test_cli.py └── test_iam.py └── tox.ini /.gitattributes: -------------------------------------------------------------------------------- 1 | sevenseconds/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg* 2 | coverage.xml 3 | junit.xml 4 | .coverage 5 | .idea 6 | *.sw* 7 | __pycache__ 8 | dist/ 9 | *.crt 10 | *.key 11 | *.csr 12 | *.pem 13 | .cache 14 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | language: python 3 | python: 3.7 4 | install: 5 | - pip install -r requirements.txt 6 | - pip install coveralls codecov 7 | script: 8 | - python setup.py test 9 | - python setup.py flake8 10 | after_success: 11 | - coveralls 12 | - codecov 13 | -------------------------------------------------------------------------------- /.zappr.yaml: -------------------------------------------------------------------------------- 1 | # for github.com 2 | approvals: 3 | groups: 4 | zalando: 5 | minimum: 2 6 | from: 7 | orgs: 8 | - "zalando" 9 | 10 | X-Zalando-Team: "teapot" 11 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Hack to upload version to Pypi 2 | 3 | FROM registry.opensource.zalan.do/stups/python AS builder 4 | ARG VERSION 5 | RUN apt-get update && \ 6 | apt-get install -q -y python3-pip && \ 7 | pip3 install -U tox setuptools 8 | COPY . /build 9 | WORKDIR /build 10 | RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" */__init__.py 11 | RUN python3 setup.py sdist bdist_wheel 12 | 13 | FROM pierone.stups.zalan.do/teapot/python-cdp-release:latest 14 | COPY --from=builder /build/dist /pydist 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2015 Zalando SE 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | Matthias Kerk 2 | Team Teapot 3 | Henning Jacobs 4 | Andreas Pfeiffer 5 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | include *.rst 3 | recursive-include sevenseconds *.py 4 | recursive-include docs *.rst *.py 5 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======================================== 2 | Seven Seconds - AWS Account Configurator 3 | ======================================== 4 | 5 | .. image:: https://travis-ci.org/zalando-stups/sevenseconds.svg?branch=master 6 | :target: https://travis-ci.org/zalando-stups/sevenseconds 7 | :alt: Travis CI build status 8 | 9 | .. image:: https://coveralls.io/repos/zalando-stups/sevenseconds/badge.svg?branch=master 10 | :target: https://coveralls.io/r/zalando-stups/sevenseconds?branch=master 11 | :alt: Coveralls status 12 | 13 | Command line utility to configure AWS accounts: 14 | 15 | * Enable CloudTrail 16 | * Configure VPC subnets (DMZ and internal) 17 | * Configure NAT instances and routing 18 | * Configure DNS 19 | * Upload SSL cert 20 | * Configure RDS/ElastiCache subnet groups 21 | * Configure IAM roles 22 | * Configure SAML integration 23 | * Configure `SSH bastion host`_ ("odd") 24 | 25 | See the `STUPS Landscape Overview`_ and the `STUPS Installation Guide`_. 26 | 27 | Usage 28 | ===== 29 | 30 | First install with PIP: 31 | 32 | .. code-block:: bash 33 | 34 | $ sudo pip3 install --upgrade stups-sevenseconds 35 | 36 | Run with your YAML configuration (you need valid AWS credentials for this): 37 | 38 | .. code-block:: bash 39 | 40 | $ sevenseconds configure myconfig.yaml myaccountname 41 | 42 | You can also run Seven Seconds on all configured accounts with automatic SAML logins: 43 | 44 | .. code-block:: bash 45 | 46 | $ sevenseconds configure --saml-user=mysamluser myconfig.yaml '*' 47 | 48 | Running from Source 49 | =================== 50 | 51 | .. code-block:: bash 52 | 53 | $ python3 -m sevenseconds configure myconfig.yaml myaccountname 54 | 55 | Releasing 56 | ========= 57 | 58 | .. code-block:: bash 59 | 60 | $ ./release.sh 61 | 62 | 63 | .. _SSH bastion host: https://github.com/zalando-stups/odd 64 | .. _STUPS Landscape Overview: https://zalando-stups.github.io/ 65 | .. _STUPS Installation Guide: http://stups.readthedocs.org/en/latest/installation/index.html 66 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | We acknowledge that every line of code that we write may potentially contain security issues. 2 | We are trying to deal with it responsibly and provide patches as quickly as possible. 3 | 4 | We host our bug bounty program on HackerOne, it is currently private, therefore if you would like to report a vulnerability and get rewarded for it, please ask to join our program by filling this form: 5 | 6 | https://corporate.zalando.com/en/services-and-contact#security-form 7 | 8 | You can also send your report via this form if you do not want to join our bug bounty program and just want to report a vulnerability or security issue. 9 | -------------------------------------------------------------------------------- /TODO.rst: -------------------------------------------------------------------------------- 1 | ============= 2 | Open Features 3 | ============= 4 | 5 | MFA-Confguration For Root User 6 | ============================== 7 | 8 | Amazon doesn't support it yet via API. Feature Request is ongoing... 9 | 10 | example code (possibly not runnable) 11 | 12 | .. code-block:: python 13 | 14 | #!/usr/bin/python3 15 | 16 | import boto.iam 17 | import time 18 | import onetimepass as otp 19 | import base64 20 | from clickclick import Action 21 | 22 | 23 | def get_account_alias(): 24 | conn = boto.iam.connect_to_region('eu-west-1') 25 | resp = conn.get_account_alias() 26 | return resp['list_account_aliases_response']['list_account_aliases_result']['account_aliases'][0] 27 | 28 | 29 | con = boto.iam.connect_to_region('eu-west-1') 30 | user = None 31 | account_alias = get_account_alias() 32 | try: 33 | user = con.get_user()['get_user_response']['get_user_result']['user'] 34 | except: 35 | pass 36 | with Action('Configure MFA Token..') as act: 37 | if user.username == account_alias and user.arn.endswith(':root'): 38 | mfa_devices = con.get_all_mfa_devices()['list_mfa_devices_response']['list_mfa_devices_result']['mfa_devices'] 39 | if not mfa_devices: 40 | act.progress() 41 | mfa = con.create_virtual_mfa_device(device_name='root-account-mfa-device', path=con.get_path()) 42 | secret = base64.b64decode(mfa['create_virtual_mfa_device_response']['create_virtual_mfa_device_result']['virtual_mfa_device']['base_32_string_seed']) 43 | id1 = str(otp.get_hotp(secret, int(time.time()) // 30 - 1)).zfill(6) 44 | id2 = str(otp.get_totp(secret)).zfill(6) 45 | con.enable_mfa_device(user_name='root', serial_number=mfa['create_virtual_mfa_device_response']['create_virtual_mfa_device_result']['virtual_mfa_device']['serial_number'], auth_code_1=id1, auth_code_2=id2) 46 | else: 47 | act.warning('Skipping (root-account)') 48 | 49 | 50 | Other nice Feature currently without API-Support 51 | ================================================ 52 | 53 | * Consulidate Billing 54 | * Enable IAM-Access to Billings 55 | * TAX Informations 56 | 57 | -------------------------------------------------------------------------------- /delivery.yaml: -------------------------------------------------------------------------------- 1 | version: "2017-09-20" 2 | pipeline: 3 | - id: build 4 | type: script 5 | overlay: guild-python/latest 6 | env: 7 | PYENV_VERSION: "3.9" 8 | commands: 9 | - desc: "Install dependencies" 10 | cmd: | 11 | pip install -r requirements.txt 12 | pip install coveralls codecov 13 | - desc: "Run Tests" 14 | cmd: python3 setup.py test 15 | - desc: "Check code style" 16 | cmd: flake8 17 | - desc: "Build docker image that will upload package" 18 | cmd: | 19 | VERSION=$(./next-version) 20 | 21 | if [[ -z "${CDP_PULL_REQUEST_NUMBER}" ]]; then 22 | DOCKER_IMAGE="pierone.stups.zalan.do/teapot/sevenseconds-release:${CDP_TARGET_REPOSITORY_COUNTER}" 23 | else 24 | DOCKER_IMAGE="pierone.stups.zalan.do/teapot/sevenseconds-release-pr:${CDP_TARGET_REPOSITORY_COUNTER}" 25 | fi 26 | 27 | docker build --build-arg VERSION="$VERSION" -t "$DOCKER_IMAGE" . 28 | 29 | docker push "$DOCKER_IMAGE" 30 | 31 | if [[ -z "${CDP_PULL_REQUEST_NUMBER}" ]]; then 32 | git log -1 --pretty=%B > CHANGELOG 33 | # TODO upload the wheel package 34 | git gh-release --message-from-file CHANGELOG $VERSION 35 | fi 36 | # The actual release is done by a pipeline in Zalando's Internal Github Enterprise 37 | -------------------------------------------------------------------------------- /examples/example-full-configuration.yaml: -------------------------------------------------------------------------------- 1 | # Configuration YAML file for AWS Account Configurator 2 | # see https://github.com/zalando-stups/sevenseconds 3 | # the global section applies to all accounts 4 | # every key from the global section can be overridden for each account ("accounts" section) 5 | global: 6 | # optional: admin account name which contains the root hosted zone (*.example.org) 7 | admin_account: "exampleorg" 8 | # account alias: prefix the account alias with the company name ("exampleorg") 9 | alias: "exampleorg-{account_name}" 10 | # regions to configure 11 | regions: 12 | - eu-central-1 13 | - eu-west-1 14 | # configure CloudTrail to log into company's S3 bucket 15 | cloudtrail: 16 | s3_bucket_name: exampleorg-cloudtrail-logs 17 | s3_key_prefix: Exampleorg 18 | # hosted zone for each AWS account 19 | domain: "{account_name}.example.org" 20 | saml_providers: 21 | Shibboleth: "https://idp.example.org/shibboleth" 22 | saml_identity_provider_url: "https://aws-login.example.org" 23 | saml_admin_login_role: "Shibboleth-Administrator" 24 | # base Taupage AMI to search for 25 | base_ami: 26 | default_channel: "Taupage-AMI-*" 27 | channels: 28 | - "TaupageStaging-AMI-*" 29 | - "TaupageDev-AMI-*" 30 | is_public: false 31 | # account_id of the AMI creator 32 | onwer_id: 123456789123 33 | # optional: additional VPC tags 34 | vpc: 35 | tags: 36 | Config: "{registry: pierone.stups.zalan.do, ami_id: {{ami_id}}}" 37 | # optional: list of trusted IP ranges to add to security groups 38 | trusted_networks: 39 | exampleorg_office1: 198.51.100.0/24 40 | exampleorg_dc1: 203.0.113.0/24 41 | # optional: security groups to configure with trusted networks (workaround for IP based security) 42 | security_groups: {} 43 | # S3 buckets to configure in each AWS account 44 | s3_buckets: 45 | # configure Mint bucket for OAuth credentials 46 | mint_bucket: 47 | name: "exampleorg-stups-mint-{account_id}-{region}" 48 | regions: ["eu-west-1"] 49 | policy: 50 | Version: "2012-10-17" 51 | Statement: [ 52 | { 53 | "Sid": "AllowMintWrite", 54 | "Effect": "Allow", 55 | "Principal": { 56 | "AWS": "arn:aws:iam::123456789123:root" 57 | }, 58 | "Action": [ 59 | "s3:PutObject" 60 | ], 61 | "Resource": [ 62 | "arn:aws:s3:::{bucket_name}/*" 63 | ] 64 | }] 65 | 66 | # SSH bastion/jump host 67 | bastion: 68 | # uncomment the following line to terminate and redeploy "odd" 69 | # re_deploy: true 70 | ami_config: 71 | application_id: odd 72 | application_version: "0.1" 73 | runtime: Docker 74 | source: stups/odd:latest 75 | logentries_account_key: 123-123-123-123 76 | ports: 77 | # use default SSH port for Docker container 78 | 22: 22 79 | environment: 80 | ALLOWED_REMOTE_NETWORKS: "172.31.0.0/16" 81 | # configure your even URL here 82 | GRANTING_SERVICE_URL: "https://even.stups.example.org" 83 | # configure your public even SSH key here 84 | GRANTING_SERVICE_SSH_KEY: "ssh-rsa AAAAB3Nza123123mysshpublickey123456789" 85 | root: true 86 | # use non-default SSH port for OpenSSH on host 87 | ssh_ports: [2222] 88 | hostname: "odd-{account_name}" 89 | 90 | # IAM roles to configure 91 | roles: 92 | 93 | # This role is only used by the Cloud team (STUPS), especially when re-running this setup-script is required. 94 | Shibboleth-Administrator: 95 | policy: 96 | Version: "2012-10-17" 97 | Statement: [{"Action": "*", "Resource": "*", "Effect": "Allow"}] 98 | assume_role_policy: 99 | Version: "2012-10-17" 100 | Statement: [{"Sid": "", 101 | "Effect": "Allow", 102 | "Principal": {"Federated": "arn:aws:iam::{account_id}:saml-provider/Shibboleth"}, 103 | "Action": "sts:AssumeRoleWithSAML", 104 | "Condition": {"StringEquals": {"SAML:aud": "https://signin.aws.amazon.com/saml"}}}] 105 | 106 | 107 | # This role grants access to all supported (*) AWS services and action in the regions Ireland and Frankfurt. 108 | # The role is meant for all team members, that actually need to configure or run something in the AWS cloud. 109 | # 110 | # (*) supported by STUPS. Services are in general supported, when we are able to assure audit requirements. 111 | Shibboleth-PowerUser: 112 | policy: 113 | Version: "2012-10-17" 114 | Statement: [ 115 | { 116 | # ??? Security Token Service 117 | "Effect": "Allow", 118 | "Action": "sts:**", 119 | "Resource": "*" 120 | }, 121 | { 122 | # Deny configuring other SAML providers 123 | "NotAction": [ 124 | "iam:CreateSAMLProvider", 125 | "iam:DeleteSAMLProvider" 126 | ], 127 | "Effect": "Allow", 128 | "Resource": "*" 129 | }, 130 | { 131 | # Deny to run EC2 instances out of Europe Regions 132 | "Resource": "*", 133 | "Action": "ec2:*", 134 | "Condition": { 135 | "StringNotLike": { 136 | "ec2:Region": [ 137 | "eu-west-1", 138 | "eu-central-1" 139 | ] 140 | } 141 | }, 142 | "Effect": "Deny" 143 | }, 144 | # Wouldn't it be better to white-list allowed services, instead of black-listing the forbidden ones ??? 145 | # There may be some new (untracked) services in the future, which we do not recognize immediately. 146 | { 147 | "Effect": "Allow", 148 | "NotAction": [ 149 | # all services listed here are "forbidden" 150 | # e.g. because they have no audit capabilities 151 | "appstream:*", 152 | "cognito:*", 153 | "directoryservice:*", 154 | "lambda:*", 155 | "kinesis:*", 156 | "mobileanalytics:*", 157 | "ses:*", 158 | "trustedadvisor:*", 159 | "workmail:*", 160 | "workspaces:*", 161 | "zocalo:*", 162 | 163 | # EC2 is configured above 164 | "ec2:*", 165 | 166 | # IAM is configured above 167 | "iam:*" 168 | ], 169 | "Resource": "*" 170 | }, 171 | { 172 | "Action": [ 173 | "ec2:DeleteNetworkAcl", 174 | "ec2:DeleteRoute", 175 | "ec2:DeleteRouteTable", 176 | "ec2:DeleteSubnet", 177 | "ec2:DeleteVpc", 178 | "ec2:DeleteVpcPeeringConnection", 179 | "ec2:DeleteVpnConnection", 180 | "ec2:DeleteVpnConnectionRoute", 181 | "ec2:DeleteVpnGateway" 182 | ], 183 | "Effect": "Deny", 184 | "Resource": "*" 185 | } 186 | ] 187 | assume_role_policy: 188 | Version: "2012-10-17" 189 | Statement: [{"Sid": "", 190 | "Effect": "Allow", 191 | "Principal": {"Federated": "arn:aws:iam::{account_id}:saml-provider/Shibboleth"}, 192 | "Action": "sts:AssumeRoleWithSAML", 193 | "Condition": {"StringEquals": {"SAML:aud": "https://signin.aws.amazon.com/saml"}}}] 194 | 195 | 196 | # This role grants read-only access to all supported AWS services. It is meant for "unexperienced" team members, 197 | # that should not be able to change something. 198 | Shibboleth-ReadOnly: 199 | 200 | # Can't we just use some "Managed Policies" here??? 201 | policy: 202 | Version: "2012-10-17" 203 | Statement: [{"Effect":"Allow","Resource":"*", 204 | "Action":[ 205 | "appstream:Get*", 206 | "autoscaling:Describe*", 207 | "cloudformation:Describe*", 208 | "cloudformation:GetTemplate", 209 | "cloudformation:List*", 210 | "cloudfront:Get*", 211 | "cloudfront:List*", 212 | "cloudtrail:DescribeTrails", 213 | "cloudtrail:GetTrailStatus", 214 | "cloudwatch:Describe*", 215 | "cloudwatch:Get*", 216 | "cloudwatch:List*", 217 | "directconnect:Describe*", 218 | "dynamodb:GetItem", 219 | "dynamodb:BatchGetItem", 220 | "dynamodb:Query", 221 | "dynamodb:Scan", 222 | "dynamodb:DescribeTable", 223 | "dynamodb:ListTables", 224 | "ec2:Describe*", 225 | "elasticache:Describe*", 226 | "elasticbeanstalk:Check*", 227 | "elasticbeanstalk:Describe*", 228 | "elasticbeanstalk:List*", 229 | "elasticbeanstalk:RequestEnvironmentInfo", 230 | "elasticbeanstalk:RetrieveEnvironmentInfo", 231 | "elasticloadbalancing:Describe*", 232 | "elastictranscoder:Read*", 233 | "elastictranscoder:List*", 234 | "iam:List*", 235 | "iam:Get*", 236 | "kinesis:Describe*", 237 | "kinesis:Get*", 238 | "kinesis:List*", 239 | "opsworks:Describe*", 240 | "opsworks:Get*", 241 | "route53:Get*", 242 | "route53:List*", 243 | "redshift:Describe*", 244 | "redshift:ViewQueriesInConsole", 245 | "rds:Describe*", 246 | "rds:ListTagsForResource", 247 | "s3:Get*", 248 | "s3:List*", 249 | "sdb:GetAttributes", 250 | "sdb:List*", 251 | "sdb:Select*", 252 | "ses:Get*", 253 | "ses:List*", 254 | "sns:Get*", 255 | "sns:List*", 256 | "sqs:GetQueueAttributes", 257 | "sqs:ListQueues", 258 | "sqs:ReceiveMessage", 259 | "storagegateway:List*", 260 | "storagegateway:Describe*", 261 | "trustedadvisor:Describe*"]}] 262 | assume_role_policy: 263 | Version: "2012-10-17" 264 | Statement: [{"Sid": "", 265 | "Effect": "Allow", 266 | "Principal": {"Federated": "arn:aws:iam::{account_id}:saml-provider/Shibboleth"}, 267 | "Action": "sts:AssumeRoleWithSAML", 268 | "Condition": {"StringEquals": {"SAML:aud": "https://signin.aws.amazon.com/saml"}}}] 269 | 270 | # special role for "fullstop." audit reporting component 271 | fullstop: 272 | policy: 273 | Version: "2012-10-17" 274 | Statement: [{"Effect":"Allow","Resource":"*", 275 | "Action":[ 276 | "appstream:Get*", 277 | "autoscaling:Describe*", 278 | "cloudformation:DescribeStacks", 279 | "cloudformation:DescribeStackEvents", 280 | "cloudformation:DescribeStackResource", 281 | "cloudformation:DescribeStackResources", 282 | "cloudformation:GetTemplate", 283 | "cloudformation:List*", 284 | "cloudfront:Get*", 285 | "cloudfront:List*", 286 | "cloudtrail:DescribeTrails", 287 | "cloudtrail:GetTrailStatus", 288 | "cloudwatch:Describe*", 289 | "cloudwatch:Get*", 290 | "cloudwatch:List*", 291 | "directconnect:Describe*", 292 | "dynamodb:GetItem", 293 | "dynamodb:BatchGetItem", 294 | "dynamodb:Query", 295 | "dynamodb:Scan", 296 | "dynamodb:DescribeTable", 297 | "dynamodb:ListTables", 298 | "ec2:Describe*", 299 | "elasticache:Describe*", 300 | "elasticbeanstalk:Check*", 301 | "elasticbeanstalk:Describe*", 302 | "elasticbeanstalk:List*", 303 | "elasticbeanstalk:RequestEnvironmentInfo", 304 | "elasticbeanstalk:RetrieveEnvironmentInfo", 305 | "elasticloadbalancing:Describe*", 306 | "elasticmapreduce:Describe*", 307 | "elasticmapreduce:List*", 308 | "elastictranscoder:Read*", 309 | "elastictranscoder:List*", 310 | "iam:List*", 311 | "iam:Get*", 312 | "kinesis:Describe*", 313 | "kinesis:Get*", 314 | "kinesis:List*", 315 | "opsworks:Describe*", 316 | "opsworks:Get*", 317 | "route53:Get*", 318 | "route53:List*", 319 | "redshift:Describe*", 320 | "redshift:ViewQueriesInConsole", 321 | "rds:Describe*", 322 | "rds:ListTagsForResource", 323 | "sdb:GetAttributes", 324 | "sdb:List*", 325 | "sdb:Select*", 326 | "ses:Get*", 327 | "ses:List*", 328 | "sns:Get*", 329 | "sns:List*", 330 | "sqs:GetQueueAttributes", 331 | "sqs:ListQueues", 332 | "sqs:ReceiveMessage", 333 | "storagegateway:List*", 334 | "storagegateway:Describe*", 335 | "tag:get*", 336 | "trustedadvisor:Describe*"]}] 337 | assume_role_policy: 338 | Version: "2012-10-17" 339 | Statement: [{"Sid": "", 340 | "Effect": "Allow", 341 | # Stups account should be trusted to run read only api calls for fullstop 342 | "Principal": {"AWS": "arn:aws:iam::123456789123:root"}, 343 | "Action": "sts:AssumeRole"}] 344 | 345 | accounts: 346 | legacy-foobar: 347 | # do not overwrite legacy Frankfurt setup! 348 | regions: 349 | - eu-west-1 350 | stups: 351 | -------------------------------------------------------------------------------- /examples/example-minimal-configuration.yaml: -------------------------------------------------------------------------------- 1 | # Minimal Configuration YAML file for AWS Account Configurator 2 | # see https://github.com/zalando-stups/sevenseconds 3 | # the global section applies to all accounts 4 | # every key from the global section can be overridden for each account ("accounts" section) 5 | global: 6 | # account alias: prefix the account alias with the company name ("exampleorg") 7 | alias: "exampleorg-{account_name}" 8 | # regions to configure 9 | regions: 10 | - eu-central-1 11 | - eu-west-1 12 | # hosted zone for each AWS account 13 | domain: "{account_name}.example.org" 14 | # base Taupage AMI to search for 15 | base_ami: 16 | default_channel: "Taupage-AMI-*" 17 | is_public: false 18 | # account_id of the AMI creator 19 | onwer_id: 123456789123 20 | # SSH bastion/jump host 21 | bastion: 22 | # uncomment the following line to terminate and redeploy "odd" 23 | # re_deploy: true 24 | 25 | # this "ami_config" section contains the Taupage user data YAML 26 | # see http://docs.stups.io/en/latest/components/taupage.html 27 | ami_config: 28 | application_id: odd 29 | application_version: "0.1" 30 | runtime: Docker 31 | source: stups/odd:latest 32 | logentries_account_key: 123-123-123-123 33 | ports: 34 | # use default SSH port for Docker container 35 | 22: 22 36 | environment: 37 | ALLOWED_REMOTE_NETWORKS: "172.31.0.0/16" 38 | # configure your even URL here 39 | GRANTING_SERVICE_URL: "https://even.stups.example.org" 40 | # configure your public even SSH key here 41 | GRANTING_SERVICE_SSH_KEY: "ssh-rsa AAAAB3Nza123123mysshpublickey123456789" 42 | root: true 43 | # use non-default SSH port for OpenSSH on host 44 | ssh_ports: [2222] 45 | hostname: "odd-{account_name}" 46 | 47 | accounts: 48 | # configure account with alias "exampleorg-foobar" 49 | foobar: 50 | # configure account with alias "exampleorg-stups" 51 | stups: 52 | -------------------------------------------------------------------------------- /next-version: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import subprocess 4 | 5 | MAJOR_VERSION = 1 6 | MINOR_VERSION = 2 7 | 8 | 9 | def get_latest_version() -> (int, int, int): 10 | """ 11 | Gets latest version based on Git Tags. 12 | """ 13 | proc = subprocess.run(['git', 'tag'], stdout=subprocess.PIPE) 14 | 15 | versions = sorted(map(lambda version: tuple(int(sub) 16 | for sub 17 | in version.split('.')), 18 | proc.stdout.decode().splitlines())) 19 | return versions[-1] 20 | 21 | 22 | if __name__ == '__main__': 23 | major, minor, build = get_latest_version() 24 | 25 | if major != MAJOR_VERSION or minor != MINOR_VERSION: 26 | new_build = 0 27 | else: 28 | new_build = build + 1 29 | 30 | print(f"{MAJOR_VERSION}.{MINOR_VERSION}.{new_build}") 31 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | clickclick>=1.2.1 2 | boto3>=1.9.212 3 | botocore>=1.5.26 4 | netaddr>=0.7.18 5 | requests>=2.9.1 6 | keyring 7 | python-gnupg>=0.3.7 8 | python-dateutil 9 | datetime 10 | PyYAML 11 | PyJWT>=2.0, <3.0 12 | stups-zign>=1.1.26 13 | setuptools>=30 14 | flake8 15 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | AWS Account Configurator 6 | """ 7 | 8 | import sys 9 | import os 10 | import inspect 11 | from distutils.cmd import Command 12 | 13 | import setuptools 14 | from setuptools.command.test import test as TestCommand 15 | from setuptools import setup 16 | 17 | if sys.version_info < (3, 4, 0): 18 | sys.stderr.write('FATAL: AWS Account Configurator needs to be run with Python 3.4+\n') 19 | sys.exit(1) 20 | 21 | __location__ = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))) 22 | 23 | 24 | def read_version(package): 25 | data = {} 26 | with open(os.path.join(package, '__init__.py'), 'r') as fd: 27 | exec(fd.read(), data) 28 | return data['__version__'] 29 | 30 | 31 | NAME = 'stups-sevenseconds' 32 | MAIN_PACKAGE = 'sevenseconds' 33 | VERSION = read_version(MAIN_PACKAGE) 34 | DESCRIPTION = 'Configure AWS accounts' 35 | LICENSE = 'Apache License 2.0' 36 | URL = 'https://github.com/zalando-stups/sevenseconds' 37 | AUTHOR = 'Henning Jacobs' 38 | EMAIL = 'henning.jacobs@zalando.de' 39 | 40 | COVERAGE_XML = True 41 | COVERAGE_HTML = False 42 | JUNIT_XML = False 43 | 44 | 45 | # Add here all kinds of additional classifiers as defined under 46 | # https://pypi.python.org/pypi?%3Aaction=list_classifiers 47 | CLASSIFIERS = [ 48 | 'Development Status :: 4 - Beta', 49 | 'Environment :: Console', 50 | 'Intended Audience :: Developers', 51 | 'Intended Audience :: System Administrators', 52 | 'License :: OSI Approved :: Apache Software License', 53 | 'Operating System :: POSIX :: Linux', 54 | 'Programming Language :: Python', 55 | 'Programming Language :: Python :: 3.4', 56 | 'Programming Language :: Python :: Implementation :: CPython', 57 | ] 58 | 59 | CONSOLE_SCRIPTS = ['sevenseconds = sevenseconds.cli:main'] 60 | 61 | 62 | class PyTest(TestCommand): 63 | 64 | user_options = [('cov=', None, 'Run coverage'), ('cov-xml=', None, 'Generate junit xml report'), ('cov-html=', 65 | None, 'Generate junit html report'), ('junitxml=', None, 'Generate xml of test results')] 66 | 67 | def initialize_options(self): 68 | TestCommand.initialize_options(self) 69 | self.cov = None 70 | self.cov_xml = False 71 | self.cov_html = False 72 | self.junitxml = None 73 | 74 | def finalize_options(self): 75 | TestCommand.finalize_options(self) 76 | if self.cov is not None: 77 | self.cov = ['--cov', self.cov, '--cov-report', 'term-missing'] 78 | if self.cov_xml: 79 | self.cov.extend(['--cov-report', 'xml']) 80 | if self.cov_html: 81 | self.cov.extend(['--cov-report', 'html']) 82 | if self.junitxml is not None: 83 | self.junitxml = ['--junitxml', self.junitxml] 84 | 85 | def run_tests(self): 86 | try: 87 | import pytest 88 | except Exception: 89 | raise RuntimeError('py.test is not installed, run: pip install pytest') 90 | params = {'args': self.test_args} 91 | if self.cov: 92 | params['args'] += self.cov 93 | if self.junitxml: 94 | params['args'] += self.junitxml 95 | params['args'] += ['--doctest-modules', MAIN_PACKAGE, '-s'] 96 | errno = pytest.main(**params) 97 | sys.exit(errno) 98 | 99 | 100 | def sphinx_builder(): 101 | try: 102 | from sphinx.setup_command import BuildDoc 103 | except ImportError: 104 | 105 | class NoSphinx(Command): 106 | 107 | user_options = [] 108 | 109 | def initialize_options(self): 110 | raise RuntimeError('Sphinx documentation is not installed, run: pip install sphinx') 111 | 112 | return NoSphinx 113 | 114 | class BuildSphinxDocs(BuildDoc): 115 | 116 | def run(self): 117 | if self.builder == 'doctest': 118 | import sphinx.ext.doctest as doctest 119 | # Capture the DocTestBuilder class in order to return the total 120 | # number of failures when exiting 121 | ref = capture_objs(doctest.DocTestBuilder) 122 | BuildDoc.run(self) 123 | errno = ref[-1].total_failures 124 | sys.exit(errno) 125 | else: 126 | BuildDoc.run(self) 127 | 128 | return BuildSphinxDocs 129 | 130 | 131 | class ObjKeeper(type): 132 | 133 | instances = {} 134 | 135 | def __init__(cls, name, bases, dct): 136 | cls.instances[cls] = [] 137 | 138 | def __call__(cls, *args, **kwargs): 139 | cls.instances[cls].append(super(ObjKeeper, cls).__call__(*args, **kwargs)) 140 | return cls.instances[cls][-1] 141 | 142 | 143 | def capture_objs(cls): 144 | from six import add_metaclass 145 | module = inspect.getmodule(cls) 146 | name = cls.__name__ 147 | keeper_class = add_metaclass(ObjKeeper)(cls) 148 | setattr(module, name, keeper_class) 149 | cls = getattr(module, name) 150 | return keeper_class.instances[cls] 151 | 152 | 153 | def get_install_requirements(path): 154 | content = open(os.path.join(__location__, path)).read() 155 | return [req for req in content.split('\\n') if req != ''] 156 | 157 | 158 | def read(fname): 159 | return open(os.path.join(__location__, fname)).read() 160 | 161 | 162 | def setup_package(): 163 | cmdclass = {} 164 | cmdclass['test'] = PyTest 165 | 166 | docs_path = os.path.join(__location__, 'docs') 167 | docs_build_path = os.path.join(docs_path, '_build') 168 | install_reqs = get_install_requirements('requirements.txt') 169 | 170 | command_options = {'docs': { 171 | 'project': ('setup.py', MAIN_PACKAGE), 172 | 'version': ('setup.py', VERSION.split('-', 1)[0]), 173 | 'release': ('setup.py', VERSION), 174 | 'build_dir': ('setup.py', docs_build_path), 175 | 'config_dir': ('setup.py', docs_path), 176 | 'source_dir': ('setup.py', docs_path), 177 | }, 'doctest': { 178 | 'project': ('setup.py', MAIN_PACKAGE), 179 | 'version': ('setup.py', VERSION.split('-', 1)[0]), 180 | 'release': ('setup.py', VERSION), 181 | 'build_dir': ('setup.py', docs_build_path), 182 | 'config_dir': ('setup.py', docs_path), 183 | 'source_dir': ('setup.py', docs_path), 184 | 'builder': ('setup.py', 'doctest'), 185 | }, 'test': {'test_suite': ('setup.py', 'tests'), 'cov': ('setup.py', MAIN_PACKAGE)}} 186 | if JUNIT_XML: 187 | command_options['test']['junitxml'] = 'setup.py', 'junit.xml' 188 | if COVERAGE_XML: 189 | command_options['test']['cov_xml'] = 'setup.py', True 190 | if COVERAGE_HTML: 191 | command_options['test']['cov_html'] = 'setup.py', True 192 | 193 | setup( 194 | name=NAME, 195 | version=VERSION, 196 | url=URL, 197 | description=DESCRIPTION, 198 | author=AUTHOR, 199 | author_email=EMAIL, 200 | license=LICENSE, 201 | keywords='aws account vpc', 202 | long_description=read('README.rst'), 203 | classifiers=CLASSIFIERS, 204 | test_suite='tests', 205 | packages=setuptools.find_packages(exclude=['tests', 'tests.*']), 206 | install_requires=install_reqs, 207 | setup_requires=['six', 'flake8'], 208 | cmdclass=cmdclass, 209 | tests_require=['pytest-cov', 'pytest'], 210 | command_options=command_options, 211 | entry_points={'console_scripts': CONSOLE_SCRIPTS}, 212 | ) 213 | 214 | 215 | if __name__ == '__main__': 216 | setup_package() 217 | -------------------------------------------------------------------------------- /sevenseconds/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.0.1' 2 | -------------------------------------------------------------------------------- /sevenseconds/__main__.py: -------------------------------------------------------------------------------- 1 | import sevenseconds.cli 2 | 3 | if __name__ == '__main__': 4 | sevenseconds.cli.main() 5 | -------------------------------------------------------------------------------- /sevenseconds/cli.py: -------------------------------------------------------------------------------- 1 | import re 2 | import click 3 | import yaml 4 | import os 5 | import sys 6 | 7 | import sevenseconds 8 | from netaddr import IPNetwork 9 | from clickclick import AliasedGroup 10 | from .helper import error, info, fatal_error 11 | from .helper.auth import get_sessions 12 | from .helper.network import get_trusted_addresses 13 | from .helper.regioninfo import get_regions 14 | from .config.configure import start_configuration, start_cleanup 15 | 16 | CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) 17 | SUPPORTED_CONFIG_VERSION = 10 18 | 19 | 20 | def print_version(ctx, param, value): 21 | if not value or ctx.resilient_parsing: 22 | return 23 | click.echo('AWS Account Configurator {}'.format(sevenseconds.__version__)) 24 | ctx.exit() 25 | 26 | 27 | @click.group(cls=AliasedGroup, context_settings=CONTEXT_SETTINGS) 28 | @click.option('-V', '--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True) 29 | def cli(): 30 | pass 31 | 32 | 33 | @cli.command() 34 | @click.argument('account_name') 35 | @click.argument('region') 36 | def destroy(account_name, region): 37 | '''not yet implemented''' 38 | 39 | 40 | @cli.command() 41 | @click.argument('file', type=click.File('rb')) 42 | @click.argument('account_name_pattern') 43 | @click.option('--dry-run', is_flag=True) 44 | @click.option('-P', '--max-procs', 45 | help='Run up to max-procs processes at a time. Default CPU Count', 46 | default=os.cpu_count(), 47 | type=click.INT) 48 | @click.option('--update-odd-host', help='Update old Odd Hosts', is_flag=True) 49 | @click.option('--redeploy-odd-host', help='Redeploy Odd Hosts (independ of age and status)', is_flag=True) 50 | @click.option('--migrate2natgateway', 51 | help='Drop NAT Instance and create NAT Gateway (NETWORK OUTAGE!)', 52 | metavar='') 53 | @click.option('--migrate2natgateway-if-empty', 54 | help='Drop NAT Instance and create NAT Gateway, if no other Instance running', is_flag=True) 55 | @click.option('--re-add-defaultroute', 56 | help='Drop and re-add the default route of the internal subnet (NETWORK OUTAGE!)', is_flag=True) 57 | @click.option('--login-only', 58 | help='exit afert Login', is_flag=True) 59 | @click.option('--quite', 60 | help='log only errors', is_flag=True) 61 | @click.option('--login-account', 62 | help='Log in with Account X and use AssumeRole for the other Accounts', type=click.STRING) 63 | @click.option('--token', 64 | help='Oauth2 Token for AWS Credential Service', type=click.STRING) 65 | def configure(file, account_name_pattern, **options): 66 | '''Configure one or more AWS account(s) matching the provided pattern 67 | 68 | ACCOUNT_NAME_PATTERN is a regex that is matched against the full account name 69 | 70 | Posible Enviroment Variables 71 | AWS_PROFILE Connect to this Profile without SAML 72 | SSLDIR Directory with all SSL-Files 73 | ''' 74 | try: 75 | config, sessions = _get_session( 76 | 'configuration of: ', 77 | file, 78 | account_name_pattern, 79 | options) 80 | session_list = list(sessions.values()) 81 | except Exception as e: 82 | fatal_error("Can't get sessions. Error: {}".format(e)) 83 | 84 | # Get NAT/ODD Addresses. Need the first Session to get all AZ for the Regions 85 | trusted_addresses = get_trusted_addresses(session_list[0].admin_session, config) 86 | run_successfully = start_configuration(session_list, trusted_addresses, options) 87 | if not run_successfully: 88 | sys.exit(1) 89 | 90 | 91 | @cli.command('clear-region') 92 | @click.argument('file', type=click.File('rb')) 93 | @click.argument('region') 94 | @click.argument('account_name_pattern') 95 | @click.option('--dry-run', is_flag=True) 96 | @click.option('-P', '--max-procs', 97 | help='Run up to max-procs processes at a time. Default CPU Count', 98 | default=os.cpu_count(), 99 | type=click.INT) 100 | @click.option('--quite', 101 | help='log only errors', is_flag=True) 102 | @click.option('--login-account', 103 | help='Log in with Account X and use AssumeRole for the other Accounts', type=click.STRING) 104 | @click.option('--token', 105 | help='Oauth2 Token for AWS Credential Service', type=click.STRING) 106 | def clear_region(file, region, account_name_pattern, **options): 107 | '''drop all stups service from region X 108 | 109 | ACCOUNT_NAME_PATTERN are Unix shell style: 110 | 111 | \b 112 | * matches everything 113 | ? matches any single character 114 | [seq] matches any character in seq 115 | [!seq] matches any char not in seq 116 | 117 | Posible Enviroment Variables 118 | AWS_PROFILE Connect to this Profile without SAML 119 | ''' 120 | try: 121 | config, sessions = _get_session( 122 | 'cleanup of region {} in '.format(region), 123 | file, 124 | account_name_pattern, 125 | options) 126 | except Exception as e: 127 | fatal_error("Can't get sessions. Error: {}".format(e)) 128 | 129 | start_cleanup(region, sessions, options) 130 | 131 | 132 | @cli.command('update-security-group') 133 | @click.argument('file', type=click.File('rb')) 134 | @click.argument('region') 135 | @click.argument('account_name_pattern') 136 | @click.argument('security_group', nargs=-1) 137 | def cli_update_security_group(file, region, account_name_pattern, security_group): 138 | '''Update a Security Group and allow access from all trusted networks, NAT instances and bastion hosts''' 139 | try: 140 | config, sessions = _get_session( 141 | 'update Secuity Group in region {} for '.format(region), 142 | file, 143 | [account_name_pattern]) 144 | except Exception as e: 145 | fatal_error("Can't get sessions. Error: {}".format(e)) 146 | addresses = get_trusted_addresses(list(sessions.values())[0].admin_session, config) 147 | info(', '.join(sorted(addresses))) 148 | fatal_error('not implemented yet') 149 | 150 | 151 | def load_config(file): 152 | result = yaml.safe_load(file) 153 | config_version = result.get('version') 154 | if config_version != SUPPORTED_CONFIG_VERSION: 155 | error = "Only configuration version {} is supported (found {}), please update sevenseconds".format( 156 | SUPPORTED_CONFIG_VERSION, config_version) 157 | raise Exception(error) 158 | return result 159 | 160 | 161 | @cli.command('verify-trusted-networks') 162 | @click.argument('file', type=click.File('rb')) 163 | @click.argument('cidr-list', nargs=-1) 164 | def verify_trusted_networks(file, cidr_list): 165 | '''Check if the given CIDR included in the trusted networks list 166 | 167 | CIDR One or more CIDR Network Blocks''' 168 | config = load_config(file) 169 | addresses = set() 170 | for name, net in config.get('global', {}).get('trusted_networks', {}).items(): 171 | addresses.add(IPNetwork(net)) 172 | found = [] 173 | not_found = [] 174 | for net in cidr_list: 175 | cidr = IPNetwork(net) 176 | overlaps = False 177 | for trusted in addresses: 178 | if cidr in trusted: 179 | overlaps = True 180 | break 181 | if overlaps: 182 | found.append(cidr) 183 | else: 184 | not_found.append(cidr) 185 | if len(not_found): 186 | print('Not mached:\n{}'.format('\n'.join([str(x) for x in sorted(set(not_found))]))) 187 | elif len(found) > 0 and len(not_found) == 0: 188 | print('All Networks are matched!') 189 | 190 | 191 | def _get_session(msg, file, account_name_pattern, options): 192 | config = load_config(file) 193 | accounts = config.get('accounts', {}) 194 | 195 | account_names = [account for account in accounts.keys() if re.fullmatch(account_name_pattern, account)] 196 | 197 | if not account_names: 198 | error('No configuration found for account {}'.format(account_name_pattern)) 199 | raise 200 | 201 | account_name_length = max([len(x) for x in account_names]) 202 | region_name_length = max([len(x) for x in get_regions('cloudtrail')]) 203 | sevenseconds.helper.PATTERNLENGTH = account_name_length + region_name_length + 2 204 | sevenseconds.helper.QUITE = options.get('quite', False) 205 | 206 | info('Start {}{}'.format(msg, ', '.join(account_names))) 207 | 208 | sessions = get_sessions(account_names, config, accounts, options) 209 | if len(sessions) == 0: 210 | error('No AWS accounts with login!') 211 | raise 212 | if options.get('login_only'): 213 | raise 214 | return config, sessions 215 | 216 | 217 | def main(): 218 | cli() 219 | -------------------------------------------------------------------------------- /sevenseconds/config/__init__.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from typing import NamedTuple, Optional 3 | 4 | import boto3 5 | 6 | from ..helper.auth import OAuthServices 7 | 8 | 9 | class AccountData(NamedTuple): 10 | name: str # Short Name of this Account 11 | alias: str # Full AWS Account Alias Name (prefix + name) 12 | id: str # AWS Account ID 13 | session: boto3.Session # Boto3 Session for the current Account 14 | admin_session: boto3.Session # Boto3 Session for the Admin Account (for DNS deligation) 15 | ami_session: boto3.Session # Boto3 Session of the Taupage Owner Accounts (for EC2 AMI) 16 | config: dict # Configuration of the current Account 17 | dry_run: bool # dry-run boolean Flag 18 | options: dict # Command Options dict 19 | auth: OAuthServices # OAuthServices Object (exp. for Account List and AWS Credentials Service) 20 | 21 | @property 22 | def domain(self) -> Optional[str]: 23 | if self.config['domain'] is None: 24 | return None 25 | return self.config['domain'].format(account_name=self.name) 26 | 27 | 28 | SharedData = namedtuple( 29 | 'SharedData', 30 | ( 31 | 'base_images', # {region -> {channel -> ami_id}} 32 | 'trusted_addresses' 33 | )) 34 | -------------------------------------------------------------------------------- /sevenseconds/config/acm.py: -------------------------------------------------------------------------------- 1 | from ..helper import ActionOnExit, info 2 | import datetime 3 | 4 | 5 | def configure_acm(account: object, region): 6 | config = account.config.get('acm') 7 | if not config: 8 | info('No ACM configuration found. Skip ACM Cert requests') 9 | return 10 | if isinstance(config, list): 11 | certs = config 12 | else: 13 | certs = [config] 14 | for cert_config in certs: 15 | if not cert_config.get('domain_name') or not cert_config.get('validation_domain'): 16 | continue 17 | domains = {} 18 | cert_domain_name = cert_config['domain_name'].format(account_name=account.name) 19 | domains[cert_domain_name] = cert_config['validation_domain'].format(account_name=account.name) 20 | for domain_name, validation_domain in cert_config.get('subject_alternative_names', {}).items(): 21 | domains[domain_name.format(account_name=account.name)] = validation_domain.format(account_name=account.name) 22 | 23 | acm = account.session.client('acm', region) 24 | certificate_list = acm.list_certificates()['CertificateSummaryList'] 25 | if not certificate_list: 26 | request_acm_cert(acm, cert_domain_name, domains) 27 | else: 28 | found_cert = False 29 | with ActionOnExit('Check existing Certificates..') as act: 30 | for cert_summary in certificate_list: 31 | cert = acm.describe_certificate(CertificateArn=cert_summary['CertificateArn'])['Certificate'] 32 | if cert['Status'] == 'PENDING_VALIDATION': 33 | resend_validation_email(acm, cert) 34 | elif cert['Status'] != 'ISSUED': 35 | continue 36 | elif (datetime.timedelta(weeks=8) 37 | > cert['NotAfter'] - datetime.datetime.now(cert['NotAfter'].tzinfo)): 38 | renew_certificate(acm, cert) 39 | domain_options = {} 40 | for options in cert['DomainValidationOptions']: 41 | domain_options[options['DomainName']] = options.get('ValidationDomain', options['DomainName']) 42 | if domain_options == domains: 43 | act.ok('found') 44 | found_cert = True 45 | if not found_cert: 46 | act.warning('nothing found') 47 | if not found_cert: 48 | request_acm_cert(acm, cert_domain_name, domains) 49 | 50 | 51 | def renew_certificate(acm, cert): 52 | with ActionOnExit('Renew Certificate {}. Resend Validation...' 53 | .format(cert['CertificateArn'])) as act_renew: 54 | for d in cert["DomainValidationOptions"]: 55 | try: 56 | acm.resend_validation_email( 57 | CertificateArn=cert['CertificateArn'], 58 | Domain=d["DomainName"], 59 | ValidationDomain=d["ValidationDomain"] 60 | ) 61 | except Exception: 62 | act_renew.error('found existing config') 63 | 64 | 65 | def resend_validation_email(acm, cert): 66 | renewal_status = cert.get('RenewalSummary', {}).get('RenewalStatus') 67 | if renewal_status != 'PENDING_VALIDATION': 68 | info('Certificate {} in {}, not resending validation email'.format(cert['CertificateArn'], renewal_status)) 69 | return 70 | 71 | with ActionOnExit('Certificate {} still Pending. Resend Validation...' 72 | .format(cert['CertificateArn'])): 73 | for d in cert["DomainValidationOptions"]: 74 | acm.resend_validation_email( 75 | CertificateArn=cert['CertificateArn'], 76 | Domain=d["DomainName"], 77 | ValidationDomain=d["ValidationDomain"] 78 | ) 79 | 80 | 81 | def request_acm_cert(acm: object, cert_domain_name, domains): 82 | with ActionOnExit('Create Certificate Request for {}..'.format(', '.join(domains.keys()))): 83 | acm.request_certificate( 84 | DomainName=cert_domain_name, 85 | SubjectAlternativeNames=list(domains.keys()), 86 | IdempotencyToken='sevenseconds', 87 | DomainValidationOptions=[ 88 | { 89 | 'DomainName': d, 90 | 'ValidationDomain': v 91 | } for d, v in domains.items() 92 | ] 93 | ) 94 | -------------------------------------------------------------------------------- /sevenseconds/config/ami.py: -------------------------------------------------------------------------------- 1 | from ..helper import ActionOnExit 2 | 3 | 4 | def latest_ami(session: object, region: str, config: dict, channel: str): 5 | ec2 = session.resource('ec2', region) 6 | 7 | with ActionOnExit('Searching for latest "{}" AMI..'.format(channel)): 8 | filters = {"name": channel, 9 | "is-public": "true" if config['is_public'] else "false", 10 | "state": "available", 11 | "root-device-type": "ebs", 12 | "owner-id": config.get('owner_id')} 13 | 14 | images = sorted(ec2.images.filter(Filters=ami_filter(filters)), key=lambda x: x.creation_date, reverse=True) 15 | if images: 16 | return images[0].id 17 | else: 18 | return None 19 | 20 | 21 | def ami_filter(predicates): 22 | return [{"Name": k, "Values": [str(v)]} for k, v in predicates.items() if v] 23 | 24 | 25 | def latest_base_images(ami_session: object, region: str, config: dict): 26 | channels = set(config.get('channels', [])) 27 | channels.add(config['default_channel']) 28 | 29 | return {channel: latest_ami(ami_session, region, config, channel) for channel in channels} 30 | 31 | 32 | def configure_base_images(account: object, region: str, latest_images: dict): 33 | ec2 = account.session.resource('ec2', region) 34 | ami_ec2 = account.ami_session.resource('ec2', region) 35 | 36 | image_ids = list(filter(None, latest_images.values())) 37 | 38 | with ActionOnExit('Checking that all AMIs ({}) are available...'.format(', '.join(image_ids))): 39 | available_images = set(image.id for image in ec2.images.filter(ImageIds=image_ids)) 40 | pending_image_ids = [image for image in image_ids if image not in available_images] 41 | 42 | if pending_image_ids: 43 | # Allow access from the AMI account 44 | for image in ami_ec2.images.filter(ImageIds=pending_image_ids): 45 | with ActionOnExit('Permit {} for "{}/{}"..'.format(image.id, account.id, account.alias)) as act: 46 | image.modify_attribute(Attribute='launchPermission', OperationType='add', UserIds=[account.id]) 47 | 48 | # Wait until all images are available 49 | with ActionOnExit('Waiting of AWS-Sync') as act: 50 | for image in ec2.images.filter(ImageIds=pending_image_ids): 51 | image.wait_until_exists() 52 | act.progress() 53 | -------------------------------------------------------------------------------- /sevenseconds/config/bastion.py: -------------------------------------------------------------------------------- 1 | import time 2 | import socket 3 | import yaml 4 | import datetime 5 | import base64 6 | import difflib 7 | import botocore.exceptions 8 | import requests 9 | import json 10 | from copy import deepcopy 11 | from ..helper import info, warning, error, ActionOnExit, substitute_template_vars 12 | from ..helper.aws import filter_subnets, associate_address, get_tag 13 | from .route53 import configure_dns_record, delete_dns_record 14 | from ..config import AccountData 15 | 16 | 17 | def configure_bastion_host(account: AccountData, vpc: object, region: str, base_ami_id: str): 18 | ec2 = account.session.resource('ec2', region) 19 | cf = account.session.resource('cloudformation', region) 20 | cfc = account.session.client('cloudformation', region) 21 | 22 | enable_bastion = account.config.get("enable_odd", False) 23 | re_deploy = account.config['bastion'].get('re_deploy', account.options.get('redeploy_odd_host')) 24 | 25 | if not base_ami_id: 26 | enable_bastion = False 27 | 28 | bastion_version = None 29 | if account.config['bastion'].get('version_url'): 30 | with ActionOnExit('Get last Tag for Bastion Image...') as act: 31 | r = requests.get(account.config['bastion'].get('version_url')) 32 | if r.status_code != 200: 33 | act.error('Error code: {}'.format(r.status_code)) 34 | act.error('Error msg: {}'.format(r.text)) 35 | return 36 | tags = sorted(r.json(), key=lambda x: x['created'], reverse=True) 37 | bastion_version = tags[0]['name'] 38 | act.ok(bastion_version) 39 | 40 | config = substitute_template_vars(account.config['bastion'].get('ami_config'), 41 | {'account_name': account.name, 42 | 'vpc_net': str(vpc.cidr_block), 43 | 'version': bastion_version}) 44 | user_data = '#taupage-ami-config\n{}'.format(yaml.safe_dump(config)).encode('utf-8') 45 | 46 | # Search all existing hosts (Instances and Cloudformation) 47 | instance_filter = [ 48 | {'Name': 'tag:Name', 49 | 'Values': ['Odd (SSH Bastion Host)']}, 50 | {'Name': 'instance-state-name', 51 | 'Values': ['running', 'pending', 'stopping', 'stopped']}, 52 | ] 53 | legacy_instances = list(vpc.instances.filter(Filters=instance_filter)) 54 | for instance in legacy_instances: 55 | # Terminate old (stopped) Odd Systems 56 | if instance.state.get('Name') == 'stopped': 57 | drop_bastionhost(instance) 58 | else: 59 | # Verify Running Version (Userdate, FS Parameter) 60 | inst_user_data = base64.b64decode(instance.describe_attribute(Attribute='userData')['UserData']['Value']) 61 | if instance.image_id != base_ami_id: 62 | error('{} use {} instand of {}.'.format(instance.id, instance.image_id, base_ami_id)) 63 | if re_deploy or account.options.get('update_odd_host'): 64 | error(' ==> Make re-deploy') 65 | re_deploy = True 66 | if inst_user_data != user_data: 67 | original = inst_user_data.decode('utf-8') 68 | new = user_data.decode('utf-8') 69 | diff = difflib.ndiff(original.splitlines(1), new.splitlines(1)) 70 | error('{} use a different UserData\n{}'.format(instance.id, ''.join(diff))) 71 | if re_deploy or account.options.get('update_odd_host'): 72 | error(' ==> Make re-deploy') 73 | re_deploy = True 74 | launch_time = instance.launch_time 75 | if (not wait_for_ssh_port(instance.public_ip_address, 60) and 76 | datetime.timedelta(minutes=15) < datetime.datetime.now(launch_time.tzinfo) - launch_time): 77 | error('Bastion Host does not response. Drop Bastionhost and create new one') 78 | drop_bastionhost(instance) 79 | legacy_instances = None 80 | 81 | # Start migration 82 | if legacy_instances and re_deploy: 83 | for instance in legacy_instances: 84 | drop_bastionhost(instance) 85 | legacy_instances = None 86 | 87 | update_needed = False 88 | 89 | # Check Odd Hosts in other vpcs 90 | cloudformation_filter = [ 91 | {'Name': 'tag:aws:cloudformation:logical-id', 92 | 'Values': ['OddServerInstance']}, 93 | {'Name': 'instance-state-name', 94 | 'Values': ['running', 'pending', 'stopping', 'stopped']}, 95 | ] 96 | cloudformation_instances = list(vpc.instances.filter(Filters=cloudformation_filter)) 97 | if cloudformation_instances: 98 | if not enable_bastion: 99 | info('bastion not enabled and instances found. Start clean up') 100 | delete_bastion_host(account, region) 101 | return 102 | for instance in cloudformation_instances: 103 | # Terminate old (stopped) Odd Systems 104 | if instance.state.get('Name') == 'stopped': 105 | drop_bastionhost(instance) 106 | else: 107 | # Verify Running Version (Userdate, FS Parameter) 108 | oddstack = cf.Stack(get_tag(instance.tags, 'aws:cloudformation:stack-name')) 109 | 110 | used_ami_id = get_tag(oddstack.parameters, 'TaupageId', prefix='Parameter') 111 | if used_ami_id != base_ami_id: 112 | error('{} use {} instand of {}.'.format(oddstack.name, used_ami_id, base_ami_id)) 113 | if re_deploy or account.options.get('update_odd_host'): 114 | error(' ==> prepare change set') 115 | update_needed = True 116 | used_bastion_version = get_tag(oddstack.parameters, 'OddRelease', prefix='Parameter') 117 | if used_bastion_version != bastion_version: 118 | error('{} use {} instand of {}.'.format(oddstack.name, used_bastion_version, bastion_version)) 119 | if re_deploy or account.options.get('update_odd_host'): 120 | error(' ==> prepare change set') 121 | update_needed = True 122 | if update_needed or re_deploy: 123 | update_cf_bastion_host(account, vpc, region, oddstack, base_ami_id, bastion_version) 124 | if not legacy_instances: 125 | info('check old odd security groups') 126 | cleanup_old_security_group(account, region, oddstack, vpc) 127 | 128 | if not legacy_instances and not cloudformation_instances and enable_bastion: 129 | try: 130 | stack = cf.Stack('Odd') 131 | info('Stack Status: {}'.format(stack.stack_status)) 132 | except Exception: 133 | create_cf_bastion_host(account, vpc, region, base_ami_id, bastion_version) 134 | if stack.stack_status in ('UPDATE_IN_PROGRESS', 'CREATE_IN_PROGRESS'): 135 | if stack.stack_status.startswith('UPDATE_'): 136 | waiter = cfc.get_waiter('stack_update_complete') 137 | else: 138 | waiter = cfc.get_waiter('stack_create_complete') 139 | with ActionOnExit('Waiting of Stack') as act: 140 | try: 141 | waiter.wait(StackName='Odd') 142 | except botocore.exceptions.WaiterError as e: 143 | act.error('Stack creation failed: {}'.format(e)) 144 | return 145 | info('check old odd security groups') 146 | cleanup_old_security_group(account, region, stack, vpc) 147 | 148 | instance = ec2.Instance(stack.Resource(logical_id='OddServerInstance').physical_resource_id) 149 | launch_time = instance.launch_time 150 | if (not wait_for_ssh_port(instance.public_ip_address, 60) and 151 | datetime.timedelta(minutes=15) < datetime.datetime.now(launch_time.tzinfo) - launch_time): 152 | error('Bastion Host does not response. Force Update for Bastionhost Stack') 153 | update_cf_bastion_host(account, vpc, region, stack, base_ami_id, bastion_version) 154 | 155 | 156 | def cleanup_old_security_group(account: AccountData, region: str, oddstack: object, vpc: object): 157 | ec2 = account.session.resource('ec2', region) 158 | stack_security_group_id = oddstack.Resource(logical_id='OddSecurityGroup').physical_resource_id 159 | sgs = [x for x in vpc.security_groups.all() if x.group_name == 'Odd (SSH Bastion Host)'] 160 | for sg in sgs: 161 | with ActionOnExit('Found old Odd Security Group {}/{}'.format(sg.id, sg.group_name)) as act: 162 | for sg_depency in vpc.meta.client.describe_security_groups(Filters=[ 163 | { 164 | 'Name': 'ip-permission.group-id', 165 | 'Values': [ 166 | sg.group_id, 167 | ] 168 | }, 169 | ])['SecurityGroups']: 170 | sg_depency = ec2.SecurityGroup(sg_depency.get('GroupId')) 171 | with ActionOnExit( 172 | 'Found old Odd SG depency in Security Group {}/{}' 173 | .format(sg_depency.id, sg_depency.group_name)) as act: 174 | for permission in sg_depency.ip_permissions: 175 | _change_permission(sg_depency, permission, sg.group_id, stack_security_group_id, 'ingress', act) 176 | for permission in sg_depency.ip_permissions_egress: 177 | _change_permission(sg_depency, permission, sg.group_id, stack_security_group_id, 'egress', act) 178 | try: 179 | sg.delete() 180 | act.ok('removed') 181 | except Exception as e: 182 | act.error('Can\'t cleanup old Odd Stack: {}'.format(e)) 183 | 184 | 185 | def _change_permission(sg, permission, old_group_id, new_group_id, direction, act): 186 | old_permission = deepcopy(permission) 187 | replace = False 188 | for user_id_group_pair in permission.get('UserIdGroupPairs', []): 189 | if user_id_group_pair.get('GroupId') == old_group_id: 190 | user_id_group_pair['GroupId'] = new_group_id 191 | replace = True 192 | if permission.get('UserIdGroupPairs'): 193 | permission['UserIdGroupPairs'] = list( 194 | dict( 195 | (v['GroupId'], v) for v in permission['UserIdGroupPairs'] 196 | ).values() 197 | ) 198 | 199 | if replace: 200 | try: 201 | if direction == 'egress': 202 | sg.revoke_egress(IpPermissions=[old_permission]) 203 | elif direction == 'ingress': 204 | sg.revoke_ingress(IpPermissions=[old_permission]) 205 | except Exception as e: 206 | act.error('Can\'t revoke the Permissions: {}'.format(e)) 207 | try: 208 | if direction == 'egress': 209 | sg.authorize_egress(IpPermissions=[permission]) 210 | elif direction == 'ingress': 211 | sg.authorize_ingress(IpPermissions=[permission]) 212 | except Exception as e: 213 | act.error('Can\'t authorize the Permissions: {}'.format(e)) 214 | 215 | 216 | def create_cf_bastion_host(account: AccountData, vpc: object, region: str, ami_id: str, bastion_version: str): 217 | cf = account.session.resource('cloudformation', region) 218 | cfc = account.session.client('cloudformation', region) 219 | ec2c = account.session.client('ec2', region) 220 | 221 | subnet_ids = [a.id for a in filter_subnets(vpc, 'dmz')] 222 | if not subnet_ids: 223 | warning('No DMZ subnet found') 224 | return 225 | 226 | allocation_id, ip = associate_address(ec2c) 227 | stackname = 'Odd' 228 | stack = cf.create_stack( 229 | StackName=stackname, 230 | TemplateBody=json.dumps(account.config['bastion'].get('cf_template')), 231 | Parameters=[ 232 | { 233 | 'ParameterKey': 'AccountName', 234 | 'ParameterValue': account.name 235 | }, 236 | { 237 | 'ParameterKey': 'DisableApiTermination', 238 | 'ParameterValue': 'false' 239 | }, 240 | { 241 | 'ParameterKey': 'EIPAllocation', 242 | 'ParameterValue': allocation_id 243 | }, 244 | { 245 | 'ParameterKey': 'OddRelease', 246 | 'ParameterValue': bastion_version 247 | }, 248 | { 249 | 'ParameterKey': 'SubnetId', 250 | 'ParameterValue': subnet_ids[0] 251 | }, 252 | { 253 | 'ParameterKey': 'TaupageId', 254 | 'ParameterValue': ami_id 255 | }, 256 | { 257 | 'ParameterKey': 'VPCNetwork', 258 | 'ParameterValue': str(vpc.cidr_block) 259 | }, 260 | { 261 | 'ParameterKey': 'VpcId', 262 | 'ParameterValue': vpc.id 263 | } 264 | ], 265 | OnFailure='DELETE', 266 | Tags=[ 267 | {'Key': 'LastUpdate', 'Value': time.strftime('%Y-%m-%dT%H:%M:%S%z')}, 268 | {'Key': 'InfrastructureComponent', 'Value': 'true'} 269 | ] 270 | ) 271 | with ActionOnExit('Wait of stack create complete') as act: 272 | waiter = cfc.get_waiter('stack_create_complete') 273 | try: 274 | waiter.wait(StackName=stack.name) 275 | except botocore.exceptions.WaiterError as e: 276 | act.error('Stack creation failed: {}'.format(e)) 277 | return 278 | 279 | info('SSH Bastion instance is running with public IP {}'.format(ip)) 280 | if account.domain is not None: 281 | configure_dns_record(account, 'odd-{}'.format(region), ip) 282 | else: 283 | warning('No DNS domain configured, skipping record creation') 284 | 285 | 286 | def update_cf_bastion_host(account: AccountData, vpc: object, region: str, stack: object, ami_id: str, 287 | bastion_version: str): 288 | cloudformation = account.session.client('cloudformation', region) 289 | 290 | # switch subnet, every update => force reinitialisation 291 | current_subnet = get_tag(stack.parameters, 'SubnetId', prefix='Parameter') 292 | subnet_ids = [a.id for a in filter_subnets(vpc, 'dmz')] 293 | if current_subnet in subnet_ids: 294 | subnet_ids.remove(current_subnet) 295 | 296 | if not subnet_ids: 297 | warning('No DMZ subnet found') 298 | return 299 | 300 | response = stack.update( 301 | TemplateBody=json.dumps(account.config['bastion'].get('cf_template')), 302 | Parameters=[ 303 | { 304 | 'ParameterKey': 'AccountName', 305 | 'ParameterValue': account.name 306 | }, 307 | { 308 | 'ParameterKey': 'DisableApiTermination', 309 | 'ParameterValue': 'false' 310 | }, 311 | { 312 | 'ParameterKey': 'EIPAllocation', 313 | 'ParameterValue': get_tag(stack.parameters, 'EIPAllocation', prefix='Parameter') 314 | }, 315 | { 316 | 'ParameterKey': 'OddRelease', 317 | 'ParameterValue': bastion_version 318 | }, 319 | { 320 | 'ParameterKey': 'SubnetId', 321 | 'ParameterValue': subnet_ids[0] 322 | }, 323 | { 324 | 'ParameterKey': 'TaupageId', 325 | 'ParameterValue': ami_id 326 | }, 327 | { 328 | 'ParameterKey': 'VPCNetwork', 329 | 'ParameterValue': str(vpc.cidr_block) 330 | }, 331 | { 332 | 'ParameterKey': 'VpcId', 333 | 'ParameterValue': vpc.id 334 | } 335 | ], 336 | Tags=[ 337 | {'Key': 'LastUpdate', 'Value': time.strftime('%Y-%m-%dT%H:%M:%S%z')}, 338 | {'Key': 'InfrastructureComponent', 'Value': 'true'} 339 | ] 340 | ) 341 | info(response) 342 | with ActionOnExit('Wait of stack update complete') as act: 343 | waiter = cloudformation.get_waiter('stack_update_complete') 344 | try: 345 | waiter.wait(StackName=stack.name) 346 | except botocore.exceptions.WaiterError as e: 347 | act.error('Stack creation failed: {}'.format(e)) 348 | return 349 | 350 | 351 | def drop_bastionhost(instance): 352 | with ActionOnExit('Terminating SSH Bastion host..'): 353 | instance.reload() 354 | if instance.state.get('Name') in ('running', 'pending', 'stopping', 'stopped'): 355 | instance.modify_attribute(Attribute='disableApiTermination', Value='false') 356 | instance.terminate() 357 | instance.wait_until_terminated() 358 | 359 | 360 | def wait_for_ssh_port(host: str, timeout: int): 361 | start = time.time() 362 | with ActionOnExit('Waiting for SSH port of {}..'.format(host)) as act: 363 | while True: 364 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 365 | try: 366 | result = sock.connect_ex((host, 22)) 367 | except Exception: 368 | result = -1 369 | if result == 0: 370 | return True 371 | if time.time() - start > timeout: 372 | act.error('TIMEOUT') 373 | return False 374 | time.sleep(5) 375 | act.progress() 376 | 377 | 378 | def delete_bastion_host(account: AccountData, region: str): 379 | ec2 = account.session.resource('ec2', region) 380 | cf = account.session.resource('cloudformation', region) 381 | cfc = account.session.client('cloudformation', region) 382 | 383 | for instance in ec2.instances.all(): 384 | if get_tag(instance.tags, 'Name') == 'Odd (SSH Bastion Host)': 385 | if instance.state.get('Name') in ('running', 'pending', 'stopping', 'stopped'): 386 | if account.domain is not None and instance.public_ip_address: 387 | try: 388 | delete_dns_record(account, 'odd-{}'.format(region), instance.public_ip_address) 389 | except Exception: 390 | pass 391 | drop_bastionhost(instance) 392 | 393 | cloudformation_filter = [ 394 | {'Name': 'tag:aws:cloudformation:logical-id', 395 | 'Values': ['OddServerInstance']}, 396 | {'Name': 'instance-state-name', 397 | 'Values': ['running', 'pending', 'stopping', 'stopped']}, 398 | ] 399 | for instance in ec2.instances.filter(Filters=cloudformation_filter): 400 | if account.domain is not None and instance.public_ip_address: 401 | try: 402 | delete_dns_record(account, 'odd-{}'.format(region), instance.public_ip_address) 403 | except Exception as e: 404 | warning('Can\'t cleanup old Odd host name: {}'.format(e)) 405 | oddstack = cf.Stack(get_tag(instance.tags, 'aws:cloudformation:stack-name')) 406 | oddstack.delete() 407 | waiter = cfc.get_waiter('stack_delete_complete') 408 | with ActionOnExit('Waiting of Stack delete') as act: 409 | try: 410 | waiter.wait(StackName=get_tag(instance.tags, 'aws:cloudformation:stack-name')) 411 | except botocore.exceptions.WaiterError as e: 412 | act.error('Stack delete failed: {}'.format(e)) 413 | -------------------------------------------------------------------------------- /sevenseconds/config/cloudtrail.py: -------------------------------------------------------------------------------- 1 | from ..helper import ActionOnExit, info 2 | from ..helper.regioninfo import get_regions 3 | from concurrent.futures import ThreadPoolExecutor 4 | import sevenseconds.helper 5 | 6 | DEFAULT_CLOUDTRAIL_REGION = 'eu-west-1' 7 | 8 | # TODO write a helper for update Cloudtrail S3 Policy 9 | # get IDs from http://docs.aws.amazon.com/general/latest/gr/rande.html#ct_region and update the Policy 10 | 11 | 12 | def configure_cloudtrail_all_regions(account: object): 13 | if 'cloudtrail' not in account.config: 14 | info('Found no Cloudtrail Section in Configfile. Skipping CloudTrail configuration') 15 | return 16 | 17 | enable_cloudtrail = account.config.get("enable_cloudtrail", True) 18 | if enable_cloudtrail: 19 | configure_cloudtrail(account) 20 | drop_old_cloudtrails(account) 21 | 22 | 23 | def drop_old_cloudtrails(account): 24 | 25 | # boto3 doesn't support regioninfo.get_regions from boto 26 | # for region in boto.regioninfo.get_regions('cloudtrail'): 27 | # configure_cloudtrail(session, region.name, cfg, dry_run) 28 | home_region = account.config['cloudtrail'].get('home_region', DEFAULT_CLOUDTRAIL_REGION) 29 | regions = get_regions('cloudtrail') 30 | if home_region in regions: 31 | regions.remove(home_region) 32 | 33 | futures = [] 34 | cloudtrail_regions = account.config.get('cloudtrail', {}).get('regions', []) 35 | enabled_regions = list(set(regions).intersection(set(cloudtrail_regions))) 36 | with ThreadPoolExecutor(max_workers=len(enabled_regions)) as executor: 37 | for region in enabled_regions: 38 | futures.append(executor.submit(drop_old_cloudtrails_worker, account, region, account.dry_run)) 39 | for future in futures: 40 | # will raise an exception if the jobs failed 41 | future.result() 42 | 43 | 44 | def drop_old_cloudtrails_worker(account, region, dry_run): 45 | sevenseconds.helper.THREADDATA.name = '{}|{}'.format(account.name, region) 46 | with ActionOnExit('search for old CloudTrail configuration in Region: {}'.format(region)) as act: 47 | cloudtrail = account.session.client('cloudtrail', region) 48 | trails = cloudtrail.describe_trails(includeShadowTrails=False)['trailList'] 49 | if trails: 50 | act.error('found existing config') 51 | else: 52 | return 53 | 54 | for trail in trails: 55 | delname = trail.get('Name') 56 | with ActionOnExit('[{}] Deleting old trail {}..'.format(region, delname)): 57 | if not dry_run: 58 | cloudtrail.stop_logging(Name=delname) 59 | cloudtrail.delete_trail(Name=delname) 60 | 61 | 62 | def configure_cloudtrail(account: object): 63 | if 'cloudtrail' not in account.config: 64 | return 65 | region = account.config['cloudtrail'].get('home_region', DEFAULT_CLOUDTRAIL_REGION) 66 | cloudtrail = account.session.client('cloudtrail', region) 67 | trails = cloudtrail.describe_trails()['trailList'] 68 | name = 'Default' 69 | trail = find_trail(trails, name) 70 | kwargs = dict(Name=name, 71 | S3BucketName=account.config['cloudtrail']['s3_bucket_name'], 72 | S3KeyPrefix=account.config['cloudtrail']['s3_key_prefix'], 73 | IsMultiRegionTrail=True, 74 | IncludeGlobalServiceEvents=True) 75 | if trail: 76 | with ActionOnExit('Checking CloudTrail in region {}..'.format(region)) as act: 77 | if not account.dry_run: 78 | if (trail['IncludeGlobalServiceEvents'] != kwargs['IncludeGlobalServiceEvents'] or 79 | trail.get('S3KeyPrefix', '') != kwargs['S3KeyPrefix'] or 80 | trail['S3BucketName'] != kwargs['S3BucketName'] or 81 | trail['IsMultiRegionTrail'] != kwargs['IsMultiRegionTrail']): 82 | act.error('wrong configuration') 83 | cloudtrail.update_trail(**kwargs) 84 | status = cloudtrail.get_trail_status(Name=name) 85 | if not status['IsLogging']: 86 | act.error('was not active') 87 | cloudtrail.start_logging(Name=name) 88 | else: 89 | if trails: 90 | for trail in trails: 91 | delname = trail.get('Name') 92 | with ActionOnExit('[{}] Deleting invalid trail {} in region {}..' 93 | .format(account.name, delname, region)): 94 | if not account.dry_run: 95 | cloudtrail.stop_logging(Name=delname) 96 | cloudtrail.delete_trail(Name=delname) 97 | with ActionOnExit('[{}] Enabling CloudTrail..'.format(region)): 98 | if not account.dry_run: 99 | cloudtrail.create_trail(**kwargs) 100 | cloudtrail.start_logging(Name=name) 101 | with ActionOnExit('Enable Lambda data events..') as act: 102 | if not account.dry_run: 103 | cloudtrail.put_event_selectors( 104 | TrailName=name, 105 | EventSelectors=[ 106 | { 107 | 'ReadWriteType': 'All', 108 | 'IncludeManagementEvents': True, 109 | 'DataResources': [], 110 | }, 111 | ] 112 | ) 113 | 114 | 115 | def find_trail(trails: list, name): 116 | for trail in trails: 117 | if trail.get('Name') == name: 118 | return trail 119 | -------------------------------------------------------------------------------- /sevenseconds/config/cloudwatch.py: -------------------------------------------------------------------------------- 1 | def configure_log_group(session: object, region: str, config: dict): 2 | retention_in_days = config.get("vpc", {}).get("flow_logs_retention_in_days", 3653) 3 | client = session.client("logs", region) 4 | logs = client.describe_log_groups(logGroupNamePrefix="vpc-flowgroup")["logGroups"] 5 | for log in logs: 6 | if log.get("logGroupName", "") == "vpc-flowgroup": 7 | if log.get("retentionInDays", 0) != retention_in_days: 8 | client.put_retention_policy( 9 | logGroupName="vpc-flowgroup", 10 | retentionInDays=retention_in_days, 11 | ) 12 | return 13 | client.create_log_group(logGroupName="vpc-flowgroup") 14 | client.put_retention_policy( 15 | logGroupName="vpc-flowgroup", 16 | retentionInDays=retention_in_days, 17 | ) 18 | -------------------------------------------------------------------------------- /sevenseconds/config/configure.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import traceback 4 | from concurrent.futures import ThreadPoolExecutor 5 | from datetime import timedelta 6 | from itertools import repeat 7 | from multiprocessing import Pool 8 | 9 | import boto3 10 | 11 | import sevenseconds.helper 12 | from .acm import configure_acm 13 | from .ami import latest_base_images, configure_base_images 14 | from .bastion import configure_bastion_host, delete_bastion_host 15 | from .cloudtrail import configure_cloudtrail_all_regions 16 | from .cloudwatch import configure_log_group 17 | from .ec2 import configure_ebs_encryption 18 | from .elasticache import configure_elasticache 19 | from .iam import configure_iam 20 | from .kms import configure_kms_keys 21 | from .policysimulator import check_policy_simulator 22 | from .rds import configure_rds 23 | from .route53 import configure_dns 24 | from .s3 import configure_s3_buckets 25 | from .securitygroup import configure_security_groups 26 | from .vpc import configure_vpc, if_vpc_empty, cleanup_vpc, delete_nat_host 27 | from ..helper import error, info, ok 28 | from ..helper.aws import get_account_id, get_account_alias, set_account_alias 29 | from ..config import AccountData, SharedData 30 | 31 | 32 | def start_configuration(sessions: list, trusted_addresses: set, options: dict): 33 | info('Start Pool processing...') 34 | 35 | # TODO move trusted_addresses to prepare_shared_data 36 | shared_data = prepare_shared_data(sessions, trusted_addresses) 37 | 38 | with Pool(processes=options.get('max_procs', os.cpu_count())) as pool: 39 | run_successfully = pool.starmap(configure_account_except, zip(sessions, repeat(shared_data))) 40 | info('Pool processing done...') 41 | if all(run_successfully): 42 | return True 43 | return False 44 | 45 | 46 | def prepare_shared_data(sessions: list, trusted_addresses: set): 47 | """Returns the latest AMI IDs for each configured channel for all used regions""" 48 | ami_session = boto3.session.Session(**sessions[0].ami_session) 49 | ami_config = sessions[0].config['base_ami'] 50 | default_channel = ami_config['default_channel'] 51 | 52 | images = {} 53 | for session in sessions: 54 | if session.config['base_ami'] != ami_config: 55 | raise Exception("base_ami config overrides are unsupported") 56 | 57 | for region in session.config['regions']: 58 | if region not in images: 59 | images[region] = latest_base_images(ami_session, region, ami_config) 60 | if default_channel not in images[region]: 61 | raise Exception("Unable to find default base AMI {} for region {}".format(default_channel, region)) 62 | return SharedData(images, trusted_addresses) 63 | 64 | 65 | def configure_account_except(session_data: AccountData, shared_data: SharedData): 66 | try: 67 | configure_account(session_data, shared_data) 68 | return True 69 | except Exception as e: 70 | error(traceback.format_exc()) 71 | error(e) 72 | return False 73 | 74 | 75 | def configure_account(session_data: AccountData, shared_data: SharedData): 76 | start_time = time.time() 77 | sevenseconds.helper.THREADDATA.name = session_data.name 78 | session = {} 79 | for session_name in ('session', 'admin_session', 'ami_session'): 80 | session[session_name] = boto3.session.Session(**getattr(session_data, session_name)) 81 | account = session_data._replace(id=get_account_id(session['session']), **session) 82 | del (session) 83 | # Remove Default-Session 84 | boto3.DEFAULT_SESSION = None 85 | # account_id = get_account_id(session['account']) 86 | # info('Account ID is {}'.format(account_id)) 87 | account_alias_from_aws = get_account_alias(account.session) 88 | if len(account_alias_from_aws) == 0: 89 | set_account_alias(account.session, account.alias) 90 | elif account.alias != account_alias_from_aws[0]: 91 | error('Connected to "{}", but account "{}" should be configured'.format(account_alias_from_aws, account.alias)) 92 | return 93 | 94 | # check_policy_simulator exit this script with a fatal_error, if it found an error 95 | check_policy_simulator(account) 96 | 97 | configure_cloudtrail_all_regions(account) 98 | configure_dns(account) 99 | configure_iam(account) 100 | configure_s3_buckets(account) 101 | 102 | regions = account.config['regions'] 103 | 104 | futures = [] 105 | if len(regions) > 0: 106 | with ThreadPoolExecutor(max_workers=len(regions)) as executor: 107 | for region in regions: 108 | futures.append(executor.submit(configure_account_region, account, region, shared_data)) 109 | for future in futures: 110 | # will raise an exception if the jobs failed 111 | future.result() 112 | ok('Done with {} / {} after {}'.format(account.id, account.name, timedelta(seconds=time.time() - start_time))) 113 | 114 | 115 | def configure_account_region(account: object, region: str, shared_data: SharedData): 116 | sevenseconds.helper.THREADDATA.name = '{}|{}'.format(account.name, region) 117 | 118 | base_images = shared_data.base_images.get(region, {}) 119 | default_base_ami = base_images[account.config['base_ami']['default_channel']] 120 | 121 | configure_log_group(account.session, region, account.config) 122 | configure_acm(account, region) 123 | configure_kms_keys(account, region) 124 | configure_ebs_encryption(account, region) 125 | configure_base_images(account, region, base_images) 126 | vpc = configure_vpc(account, region, default_base_ami) 127 | configure_bastion_host(account, vpc, region, default_base_ami) 128 | configure_elasticache(account.session, region, vpc) 129 | configure_rds(account.session, region, vpc) 130 | configure_security_groups(account, region, shared_data.trusted_addresses, vpc) 131 | 132 | 133 | def start_cleanup(region: str, sessions: list, options: dict): 134 | info('Start Pool processing...') 135 | with Pool(processes=options.get('max_procs', os.cpu_count())) as pool: 136 | pool.starmap(cleanup_account_except, zip(sessions.values(), repeat(region))) 137 | info('Pool processing done... ') 138 | 139 | 140 | def cleanup_account_except(session_data: AccountData, region: str): 141 | try: 142 | cleanup_account(session_data, region) 143 | except Exception as e: 144 | error(traceback.format_exc()) 145 | error(e) 146 | 147 | 148 | def cleanup_account(session_data: AccountData, region: str): 149 | start_time = time.time() 150 | sevenseconds.helper.THREADDATA.name = session_data.name 151 | session = {} 152 | for session_name in ('session', 'admin_session', 'ami_session'): 153 | session[session_name] = boto3.session.Session(**getattr(session_data, session_name)) 154 | account = session_data._replace(id=get_account_id(session['session']), **session) 155 | del (session) 156 | # Remove Default-Session 157 | boto3.DEFAULT_SESSION = None 158 | # account_id = get_account_id(session['account']) 159 | # info('Account ID is {}'.format(account_id)) 160 | 161 | account_alias_from_aws = get_account_alias(account.session) 162 | if len(account_alias_from_aws) > 0 and account.alias != account_alias_from_aws[0]: 163 | error('Connected to "{}", but account "{}" should be configured'.format(account_alias_from_aws, account.alias)) 164 | return 165 | 166 | cleanup_account_region(account, region) 167 | 168 | ok('Done with {} / {} after {}'.format(account.id, account.name, timedelta(seconds=time.time() - start_time))) 169 | 170 | 171 | def cleanup_account_region(account: object, region: str): 172 | sevenseconds.helper.THREADDATA.name = '{}|{}'.format(account.name, region) 173 | if if_vpc_empty(account, region): 174 | info('Region IS empty. Start clean up!') 175 | if not account.dry_run: 176 | delete_bastion_host(account, region) 177 | delete_nat_host(account, region) 178 | cleanup_vpc(account, region) 179 | else: 180 | error('Region is not empty. Skip clean up!') 181 | 182 | # vpc = configure_vpc(account, region) 183 | # configure_bastion_host(account, vpc, region) 184 | # configure_elasticache(account.session, region, vpc) 185 | # configure_rds(account.session, region, vpc) 186 | # configure_security_groups(account, region, trusted_addresses) 187 | -------------------------------------------------------------------------------- /sevenseconds/config/ec2.py: -------------------------------------------------------------------------------- 1 | from ..helper import ActionOnExit 2 | 3 | 4 | def configure_ebs_encryption(account, region): 5 | should_encrypt = account.config.get('ebs_encrypt_by_default', True) 6 | ec2 = account.session.client('ec2', region) 7 | with ActionOnExit("Checking EBS encryption by default") as act: 8 | result = ec2.get_ebs_encryption_by_default() 9 | if result['EbsEncryptionByDefault'] == should_encrypt: 10 | act.ok("already configured") 11 | elif should_encrypt: 12 | ec2.enable_ebs_encryption_by_default() 13 | act.ok("enabled") 14 | else: 15 | ec2.disable_ebs_encryption_by_default() 16 | act.ok("disabled") 17 | -------------------------------------------------------------------------------- /sevenseconds/config/elasticache.py: -------------------------------------------------------------------------------- 1 | from ..helper import ActionOnExit 2 | from ..helper.aws import filter_subnets 3 | 4 | 5 | def configure_elasticache(session, region, vpc): 6 | client = session.client('elasticache', region) 7 | subnet_ids = [sn.id for sn in filter_subnets(vpc, 'internal')] 8 | try: 9 | response = client.describe_cache_subnet_groups( 10 | CacheSubnetGroupName='internal', 11 | )['CacheSubnetGroups'][0] 12 | 13 | if response['VpcId'] != vpc.id: 14 | with ActionOnExit('Remove ElastiCache subnet group..'): 15 | client.delete_cache_subnet_group(CacheSubnetGroupName='internal') 16 | # go to except 17 | raise 18 | elif set(subnet_ids) != set([x['SubnetIdentifier'] for x in response['Subnets']]): 19 | with ActionOnExit('Replacing ElastiCache subnet group..'): 20 | client.modify_cache_subnet_group( 21 | CacheSubnetGroupName='internal', 22 | CacheSubnetGroupDescription='Default subnet group using all internal subnets', 23 | SubnetIds=subnet_ids 24 | ) 25 | except Exception: 26 | with ActionOnExit('Creating ElastiCache subnet group..') as act: 27 | try: 28 | client.create_cache_subnet_group( 29 | CacheSubnetGroupName='internal', 30 | CacheSubnetGroupDescription='Default subnet group using all internal subnets', 31 | SubnetIds=subnet_ids 32 | ) 33 | except Exception as e: 34 | act.error(e) 35 | -------------------------------------------------------------------------------- /sevenseconds/config/iam.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | 4 | import botocore.exceptions 5 | import gnupg 6 | import json 7 | import requests 8 | from ..helper import fatal_error, info, ActionOnExit, error, warning 9 | from ..config import AccountData 10 | 11 | 12 | def configure_iam(account: AccountData): 13 | configure_iam_policy(account) 14 | configure_iam_saml(account) 15 | 16 | dns_domain = account.domain 17 | if dns_domain: 18 | configure_iam_certificate(account.session, dns_domain) 19 | else: 20 | warning('No DNS domain configured, skipping certificate management') 21 | 22 | 23 | def effective_roles(config): 24 | roles = copy.deepcopy(config.get('roles', {})) 25 | 26 | for additional_policy in config.get('additional_policies', []): 27 | role_name = additional_policy['role'] 28 | role = roles.get(role_name) 29 | if role is None or role.get('drop', False): 30 | raise ValueError("Found a custom policy for disabled or missing role {}".format(role_name)) 31 | statement = role.get('policy', {}).get('Statement') 32 | if statement is None: 33 | raise ValueError("No policy statement found in role {}".format(role_name)) 34 | statement.append(additional_policy['statement']) 35 | 36 | return roles 37 | 38 | 39 | def effective_attached_policies(config, role_name, role_cfg): 40 | """Merge the attached_policies for a role and 41 | additional_attached_policies found in the account config for the 42 | given role. Note it might return duplicates.""" 43 | attached_policies = role_cfg.get("attached_policies", []) 44 | additional_attached_policies = [] 45 | for additional_attached_policy in config.get("additional_attached_policies", []): 46 | role = additional_attached_policy["role"] 47 | if role == role_name: 48 | additional_attached_policies += additional_attached_policy.get("policies", []) 49 | return attached_policies + additional_attached_policies 50 | 51 | 52 | def configure_iam_policy(account: AccountData): 53 | iam = account.session.resource('iam') 54 | sts = account.session.client('sts') 55 | roles = effective_roles(account.config) 56 | current_arn = sts.get_caller_identity()['Arn'] 57 | 58 | info('Account ID is {}'.format(account.id)) 59 | 60 | for role_name, role_cfg in sorted(roles.items()): 61 | if role_cfg.get('drop', False): 62 | with ActionOnExit('Drop Role {role_name} if exist..', **vars()) as act: 63 | if current_arn.startswith('arn:aws:sts::{}:assumed-role/{}/'.format(account.id, role_name)): 64 | act.warning('role in use') 65 | else: 66 | try: 67 | iam.Role(role_name).arn 68 | except Exception: 69 | act.ok('not found') 70 | else: 71 | try: 72 | for policy in iam.Role(role_name).policies.all(): 73 | policy.delete() 74 | for policy in iam.Role(role_name).attached_policies.all(): 75 | policy.detach_role(RoleName=role_name) 76 | iam.Role(role_name).delete() 77 | act.ok('dropped') 78 | except Exception as e: 79 | act.error(e) 80 | 81 | else: 82 | role = iam.Role(role_name) 83 | 84 | expected_assume_role_policy_document = json.loads( 85 | json.dumps(role_cfg.get('assume_role_policy')).replace('{account_id}', account.id)) 86 | 87 | try: 88 | role.arn 89 | except botocore.exceptions.ClientError as e: 90 | if e.response["Error"]["Code"] == "NoSuchEntity": 91 | with ActionOnExit('Creating role {role_name}..', **vars()): 92 | iam.create_role(Path=role_cfg.get('path', '/'), 93 | RoleName=role_name, 94 | AssumeRolePolicyDocument=json.dumps(expected_assume_role_policy_document)) 95 | else: 96 | raise 97 | 98 | expected_policy_document = json.loads( 99 | json.dumps(role_cfg.get('policy')).replace('{account_id}', account.id)) 100 | expected_policies = {role_name: expected_policy_document} if expected_policy_document else {} 101 | policies = {p.policy_name: p.policy_document for p in role.policies.all()} 102 | if policies != expected_policies: 103 | with ActionOnExit('Updating policy for role {role_name}..', **vars()) as act: 104 | for name, document in expected_policies.items(): 105 | iam.RolePolicy(role_name, name).put(PolicyDocument=json.dumps(document)) 106 | for policy_name in policies: 107 | if policy_name not in expected_policies: 108 | act.warning('Deleting {} from {}'.format(policy_name, role_name)) 109 | iam.RolePolicy(role_name, policy_name).delete() 110 | 111 | if role.assume_role_policy_document != expected_assume_role_policy_document: 112 | with ActionOnExit('Updating assume role policy for role {role_name}..', **vars()): 113 | updated_assume_role_policy_document = json.dumps(expected_assume_role_policy_document) 114 | iam.AssumeRolePolicy(role_name).update(PolicyDocument=updated_assume_role_policy_document) 115 | 116 | configured_attached_policies = effective_attached_policies(account.config, role_name, role_cfg) 117 | attached_policies = set(p.arn for p in role.attached_policies.all()) 118 | expected_attached_policies = set( 119 | policy.replace("{account_id}", account.id) for policy in configured_attached_policies 120 | ) 121 | if attached_policies != expected_attached_policies: 122 | with ActionOnExit('Updating attached policies for {role_name}..', **vars()) as act: 123 | for arn in attached_policies - expected_attached_policies: 124 | act.warning('Detaching {} from {}'.format(arn, role_name)) 125 | iam.Policy(arn).detach_role(RoleName=role_name) 126 | for arn in expected_attached_policies - attached_policies: 127 | act.warning('Attaching {} to {}'.format(arn, role_name)) 128 | iam.Policy(arn).attach_role(RoleName=role_name) 129 | 130 | 131 | def configure_iam_saml(account: AccountData): 132 | iam = account.session.resource('iam') 133 | for name, url in account.config.get('saml_providers', {}).items(): 134 | arn = 'arn:aws:iam::{account_id}:saml-provider/{name}'.format(account_id=account.id, name=name) 135 | found = False 136 | for provider in iam.saml_providers.all(): 137 | if provider.arn == arn: 138 | found = True 139 | if found: 140 | info('Found existing SAML provider {name}'.format(name=name)) 141 | continue 142 | 143 | with ActionOnExit('Creating SAML provider {name}..', **vars()): 144 | if url.startswith('http'): 145 | r = requests.get(url) 146 | if r.status_code == 200: 147 | saml_metadata_document = r.text 148 | else: 149 | error('Error code: {}'.format(r.status_code)) 150 | error('Error msg: {}'.format(r.text)) 151 | else: 152 | saml_metadata_document = url 153 | 154 | iam.create_saml_provider(SAMLMetadataDocument=saml_metadata_document, Name=name) 155 | 156 | 157 | def configure_iam_certificate(session, dns_domain: str): 158 | iam = session.resource('iam') 159 | cert_name = dns_domain.replace('.', '-') 160 | certs = iam.server_certificates.all() 161 | cert_names = [d.name for d in certs] 162 | if cert_names: 163 | info('Found existing SSL certs: {}'.format(', '.join(cert_names))) 164 | else: 165 | info('No existing SSL certs found...') 166 | if cert_name not in cert_names: 167 | with ActionOnExit('Uploading SSL server certificate..') as act: 168 | dir = os.environ.get('SSLDIR') 169 | if dir and os.path.isdir(dir): 170 | dir += '/' 171 | else: 172 | dir = '' 173 | file = dir + '_.' + dns_domain 174 | try: 175 | with open(file + '.crt') as fd: 176 | cert_body = fd.read() 177 | if os.path.isfile(file + '.key') and os.path.getsize(file + '.key') > 0: 178 | with open(file + '.key') as fd: 179 | private_key = fd.read() 180 | elif os.path.isfile(file + '.key.gpg') and os.path.getsize(file + '.key.gpg') > 0: 181 | try: 182 | gpg = gnupg.GPG(gnupghome=os.path.abspath(os.path.join(os.environ.get('HOME', '~'), '.gnupg'))) 183 | except TypeError: 184 | fatal_error('Please install python-gnupg>=0.3.8 and remove gnupg>1.0.0!') 185 | with open(file + '.key.gpg', 'rb') as fd: 186 | gpg_obj = gpg.decrypt_file(fd) 187 | if gpg_obj.ok: 188 | private_key = gpg_obj.data 189 | else: 190 | act.error('decryption error: {}'.format(gpg_obj.stderr)) 191 | return 192 | with open(dir + 'trusted_chain_sha256.pem') as fd: 193 | cert_chain = fd.read() 194 | try: 195 | iam.create_server_certificate( 196 | Path='/', 197 | ServerCertificateName=cert_name, 198 | CertificateBody=cert_body, 199 | PrivateKey=private_key, 200 | CertificateChain=cert_chain 201 | ) 202 | except Exception: 203 | with open(dir + 'trusted_chain.pem') as fd: 204 | cert_chain = fd.read() 205 | iam.create_server_certificate( 206 | Path='/', 207 | ServerCertificateName=cert_name, 208 | CertificateBody=cert_body, 209 | PrivateKey=private_key, 210 | CertificateChain=cert_chain 211 | ) 212 | except FileNotFoundError as e: 213 | act.error('Could not upload SSL cert: {}'.format(e)) 214 | -------------------------------------------------------------------------------- /sevenseconds/config/kms.py: -------------------------------------------------------------------------------- 1 | from ..helper import ActionOnExit 2 | from botocore.exceptions import ClientError 3 | import json 4 | 5 | 6 | # TODO:support reverting Drop:true operation by either cancelling deletion or recreating the keys 7 | def configure_kms_keys(account: object, region): 8 | keys_config = account.config.get('kms', {}) 9 | kms_client = account.session.client('kms', region) 10 | for key_alias in keys_config: 11 | key_config = keys_config[key_alias] 12 | if key_config.get('drop', False): 13 | schedule_key_deletion(kms_client, key_alias) 14 | continue 15 | key = json.loads(json.dumps(key_config).replace('{account_id}', account.id)) 16 | with ActionOnExit('Searching for key "{}"..'.format(key_alias)) as act: 17 | try: 18 | alias = kms_client.describe_key(KeyId=key_alias) 19 | act.ok("key already exists, updating policy") 20 | put_key_response = kms_client.put_key_policy( 21 | KeyId=alias["KeyMetadata"]["KeyId"], 22 | PolicyName="default", 23 | Policy=json.dumps(key["key_policy"]), 24 | BypassPolicyLockoutSafetyCheck=False 25 | ) 26 | if put_key_response['ResponseMetadata']['HTTPStatusCode'] != 200: 27 | act.error( 28 | 'failed to update key policy for {} response: {}' 29 | .format(key_alias, put_key_response) 30 | ) 31 | break 32 | act.ok("updated key policy for {}".format(key_alias)) 33 | break 34 | except kms_client.exceptions.NotFoundException: 35 | create_response = kms_client.create_key( 36 | Description=key['description'], 37 | KeyUsage=key['key_usage'], 38 | Origin='AWS_KMS', 39 | BypassPolicyLockoutSafetyCheck=False, 40 | Policy=json.dumps(key['key_policy']), 41 | Tags=key['tags'] 42 | ) 43 | if create_response['ResponseMetadata']['HTTPStatusCode'] != 200: 44 | act.error('failed to create a key {} response: {}'.format(key_alias, create_response)) 45 | continue 46 | key_id = create_response['KeyMetadata']['KeyId'] 47 | alias_response = kms_client.create_alias( 48 | AliasName=key_alias, 49 | TargetKeyId=key_id 50 | ) 51 | if alias_response['ResponseMetadata']['HTTPStatusCode'] != 200: 52 | act.error( 53 | 'failed to create alias {} with key {} res:{}' 54 | .format(key_alias, key_id, alias_response) 55 | ) 56 | continue 57 | 58 | 59 | def schedule_key_deletion(kms_client, key_alias): 60 | with ActionOnExit('Checking deletion status for key "{}"..'.format(key_alias)) as act: 61 | try: 62 | describe_key_response = kms_client.describe_key( 63 | KeyId=key_alias 64 | ) 65 | except ClientError as ex: 66 | if ex.response['Error']['Code'] == 'NotFoundException': 67 | act.ok('key {} cannot be found, probably deleted'.format(key_alias)) 68 | return 69 | else: 70 | raise ex 71 | if describe_key_response['KeyMetadata']['KeyState'] == 'PendingDeletion': 72 | act.ok('key {} is already scheduled for deletion'.format(key_alias)) 73 | return 74 | schedule_response = kms_client.schedule_key_deletion( 75 | KeyId=describe_key_response['KeyMetadata']['KeyId'], 76 | PendingWindowInDays=7, 77 | ) 78 | if schedule_response['ResponseMetadata']['HTTPStatusCode'] != 200: 79 | act.error( 80 | 'failed to schedule key {} for deletion' 81 | .format(key_alias) 82 | ) 83 | return 84 | act.ok('successfully scheduled key {} for deletion'.format(key_alias)) 85 | -------------------------------------------------------------------------------- /sevenseconds/config/policysimulator.py: -------------------------------------------------------------------------------- 1 | import json 2 | import botocore.exceptions 3 | from ..helper import ActionOnExit, error, warning 4 | 5 | 6 | def check_policy_simulator(account: object): 7 | # return 8 | roles = account.config.get('roles', {}) 9 | checks = account.config.get('roles_simulator', {}) 10 | errorcount = 0 11 | for rolename, rolechecks in sorted(checks.items()): 12 | if 'policy' not in roles[rolename]: 13 | warning('{} has no policy'.format(rolename)) 14 | continue 15 | errormsg = run_simulation(account.session, roles, rolename, rolechecks) 16 | if len(errormsg): 17 | errorcount += len(errormsg) 18 | print('\n'.join(errormsg)) 19 | if errorcount: 20 | # fatal_error('found {} error(s) in the policys. Abort!'.format(errorcount)) 21 | error('found {} error(s) in the policys.'.format(errorcount)) 22 | 23 | 24 | def run_simulation(session, roles, rolename, rolechecks): 25 | iamc = session.client('iam') 26 | errormsg = [] 27 | with ActionOnExit('Checking role {rolename}..', **vars()) as act: 28 | for checkname, checkoptions in sorted(rolechecks.items()): 29 | try: 30 | result = iamc.simulate_custom_policy(PolicyInputList=[json.dumps(roles[rolename]['policy'])], 31 | **checkoptions['simulation_options']) 32 | except botocore.exceptions.ClientError as e: 33 | act.fatal_error(e) 34 | 35 | results = result['EvaluationResults'] 36 | while result.get('IsTruncated', False): 37 | result = iamc.simulate_custom_policy(Marker=result['Marker'], 38 | PolicyInputList=[json.dumps(roles[rolename]['policy'])], 39 | **checkoptions['simulation_options']) 40 | results.extend(result['EvaluationResults']) 41 | for result in results: 42 | if result['EvalDecision'] != checkoptions['simulation_result']: 43 | errormsg.append('[{}] {} is {} and NOT {}'.format(checkname, 44 | result['EvalActionName'], 45 | result['EvalDecision'], 46 | checkoptions['simulation_result'])) 47 | if len(errormsg): 48 | act.error('mismatch') 49 | return errormsg 50 | -------------------------------------------------------------------------------- /sevenseconds/config/rds.py: -------------------------------------------------------------------------------- 1 | from ..helper import ActionOnExit 2 | from ..helper.aws import filter_subnets 3 | 4 | 5 | def configure_rds(session, region, vpc): 6 | client = session.client('rds', region) 7 | subnet_ids = [sn.id for sn in filter_subnets(vpc, 'internal')] 8 | try: 9 | response = client.describe_db_subnet_groups( 10 | DBSubnetGroupName='internal', 11 | )['DBSubnetGroups'][0] 12 | 13 | if response['VpcId'] != vpc.id: 14 | with ActionOnExit('Remove RDS subnet group..'): 15 | client.delete_db_subnet_group(DBSubnetGroupName='internal') 16 | # go to except 17 | raise 18 | elif set(subnet_ids) != set([x['SubnetIdentifier'] for x in response['Subnets']]): 19 | with ActionOnExit('Replacing RDS subnet group..'): 20 | client.modify_db_subnet_group( 21 | DBSubnetGroupName='internal', 22 | DBSubnetGroupDescription='Default subnet group using all internal subnets', 23 | SubnetIds=subnet_ids 24 | ) 25 | except Exception: 26 | with ActionOnExit('Creating RDS subnet group..'): 27 | client.create_db_subnet_group( 28 | DBSubnetGroupName='internal', 29 | DBSubnetGroupDescription='Default subnet group using all internal subnets', 30 | SubnetIds=subnet_ids 31 | ) 32 | -------------------------------------------------------------------------------- /sevenseconds/config/route53.py: -------------------------------------------------------------------------------- 1 | from ..helper import ActionOnExit, error, info, warning 2 | from ..config import AccountData 3 | 4 | 5 | def configure_dns(account: AccountData): 6 | conn = account.session.client('route53') 7 | dns_domain = account.domain 8 | 9 | if not dns_domain: 10 | info('No domain configured for account, skipping DNS setup') 11 | return 12 | 13 | zone = list(filter(lambda x: x['Name'] == dns_domain + '.', 14 | conn.list_hosted_zones_by_name(DNSName=dns_domain + '.')['HostedZones'])) 15 | if not zone: 16 | with ActionOnExit('Creating hosted zone..'): 17 | conn.create_hosted_zone(Name=dns_domain + '.', 18 | CallerReference='sevenseconds-' + dns_domain, 19 | HostedZoneConfig={'Comment': 'Public Hosted Zone'}) 20 | zone = conn.list_hosted_zones_by_name(DNSName=dns_domain + '.')['HostedZones'][0] 21 | nameservers = conn.get_hosted_zone(Id=zone['Id'])['DelegationSet']['NameServers'] 22 | info('Hosted zone for {} has nameservers {}'.format(dns_domain, nameservers)) 23 | with ActionOnExit('Set up DNS Delegation..') as act: 24 | try: 25 | configure_dns_delegation(account.admin_session, dns_domain, nameservers) 26 | except Exception: 27 | raise 28 | act.error('DNS Delegation not possible') 29 | soa_ttl = account.config.get('domain_soa_ttl', '60') 30 | with ActionOnExit('Set SOA-TTL to {}..'.format(soa_ttl)): 31 | rr_list = conn.list_resource_record_sets(HostedZoneId=zone['Id'], 32 | StartRecordType='SOA', 33 | StartRecordName=zone['Name']) 34 | rr = rr_list['ResourceRecordSets'][0]['ResourceRecords'] 35 | changebatch = {'Comment': 'updated SOA TTL', 36 | 'Changes': [{'Action': 'UPSERT', 37 | 'ResourceRecordSet': {'Name': zone['Name'], 38 | 'Type': 'SOA', 39 | 'TTL': int(soa_ttl), 40 | 'ResourceRecords': rr}}]} 41 | conn.change_resource_record_sets(HostedZoneId=zone['Id'], ChangeBatch=changebatch) 42 | 43 | 44 | def configure_dns_delegation(admin_session: object, domain: str, nameservers: list, action: str = 'UPSERT'): 45 | route53 = admin_session.client('route53') 46 | zone_id = find_zoneid(domain, route53) 47 | if zone_id: 48 | response = route53.change_resource_record_sets( 49 | HostedZoneId=zone_id, 50 | ChangeBatch={ 51 | 'Comment': 'DNS delegation for {}'.format(domain), 52 | 'Changes': [ 53 | { 54 | 'Action': action, 55 | 'ResourceRecordSet': { 56 | 'Name': domain, 57 | 'Type': 'NS', 58 | 'TTL': 7200, 59 | 'ResourceRecords': [{'Value': x} for x in nameservers] 60 | } 61 | } 62 | ] 63 | } 64 | ) 65 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 66 | info('Request for {} successful: {}'.format(domain, response['ResponseMetadata']['RequestId'])) 67 | else: 68 | error('Request for {} failed: {}'.format(domain, response)) 69 | else: 70 | error('Can\'t find any Zone for {}'.format(domain)) 71 | 72 | 73 | def get_dns_record(account, dnsname, record_type='A'): 74 | route53 = account.session.client('route53') 75 | zone_id = find_zoneid(dnsname, route53) 76 | if not zone_id: 77 | return 78 | result = route53.list_resource_record_sets(HostedZoneId=zone_id, 79 | StartRecordType=record_type, 80 | StartRecordName=dnsname, 81 | MaxItems='1')['ResourceRecordSets'][0] 82 | if not result: 83 | return 84 | if result['Name'] == dnsname and result['Type'] == record_type: 85 | return result 86 | else: 87 | return 88 | 89 | 90 | def configure_dns_record(account: AccountData, hostname, value, type='A', action='UPSERT'): 91 | if isinstance(value, list): 92 | values = value 93 | else: 94 | values = [value] 95 | route53 = account.session.client('route53') 96 | dns_domain = account.domain 97 | if dns_domain is None: 98 | raise ValueError("No DNS domain configured for account") 99 | domain = '.'.join([hostname, dns_domain]) 100 | with ActionOnExit('{} DNS record {}: {}' 101 | .format('Adding' if action == 'UPSERT' else 'Deleting', domain, values)) as act: 102 | zone_id = find_zoneid(domain, route53) 103 | if zone_id: 104 | response = route53.change_resource_record_sets( 105 | HostedZoneId=zone_id, 106 | ChangeBatch={ 107 | 'Comment': 'DNS Entry for {}'.format(hostname), 108 | 'Changes': [ 109 | { 110 | 'Action': action, 111 | 'ResourceRecordSet': { 112 | 'Name': domain, 113 | 'Type': type, 114 | 'TTL': 600, 115 | 'ResourceRecords': [{'Value': x} for x in values] 116 | } 117 | } 118 | ] 119 | } 120 | ) 121 | if response['ResponseMetadata']['HTTPStatusCode'] == 200: 122 | act.ok('Request for {} successful: {}'.format(domain, response['ResponseMetadata']['RequestId'])) 123 | else: 124 | act.error('Request for {} failed: {}'.format(domain, response)) 125 | else: 126 | act.error('Can\'t find any Zone for {}'.format(domain)) 127 | 128 | 129 | def delete_dns_record(account, hostname, value, type='A', action='UPSERT'): 130 | configure_dns_record(account, hostname, value, type, 'DELETE') 131 | 132 | 133 | def find_zoneid(domain: str, route53: object): 134 | result = route53.list_hosted_zones() 135 | hosted_zones = result['HostedZones'] 136 | while result['IsTruncated']: 137 | result = route53.list_hosted_zones(Marker=result['NextMarker']) 138 | hosted_zones.extend(result['HostedZones']) 139 | 140 | while domain != '': 141 | id = [x['Id'] for x in hosted_zones if x['Name'] == domain + '.'] 142 | if not id: 143 | try: 144 | domain = '.'.join(domain.split('.')[1:]) 145 | except Exception: 146 | domain = '' 147 | else: 148 | return id[0] 149 | return None 150 | 151 | 152 | def cleanup_delegation(account: AccountData): 153 | route53 = account.admin_session.client('route53') 154 | account_list = account.auth.get_aws_accounts() 155 | tld = account.config.get('domain').format(account_name='').strip('.') 156 | zone_id = find_zoneid(tld, route53) 157 | 158 | if not zone_id: 159 | return 160 | 161 | result = route53.list_resource_record_sets( 162 | HostedZoneId=zone_id, 163 | StartRecordName=tld, 164 | StartRecordType='NS') 165 | zone_entries = result['ResourceRecordSets'] 166 | while (result['IsTruncated'] and result['NextRecordType'] == 'NS'): 167 | if 'NextRecordIdentifier' in result: 168 | result = route53.list_resource_record_sets( 169 | HostedZoneId=zone_id, 170 | StartRecordName=result['NextRecordName'], 171 | StartRecordType=result['NextRecordType'], 172 | StartRecordIdentifier=result['NextRecordIdentifier'] 173 | ) 174 | else: 175 | result = route53.list_resource_record_sets( 176 | HostedZoneId=zone_id, 177 | StartRecordName=result['NextRecordName'], 178 | StartRecordType=result['NextRecordType'] 179 | ) 180 | zone_entries.extend(result['ResourceRecordSets']) 181 | 182 | delegations = [x for x in zone_entries if x['Type'] == 'NS' and x['Name'] != tld + '.'] 183 | to_delete = [] 184 | for delegation in delegations: 185 | subpart = delegation['Name'].split('.')[0] 186 | matched = [x for x in account_list if x['name'] == subpart] 187 | if len(matched) == 1: 188 | # Enable/Disable 189 | if matched[0]['disabled']: 190 | to_delete.append(delegation) 191 | elif len(matched) > 0: 192 | error('Found more then 1 Account: {}'.format(matched)) 193 | else: 194 | warning('Can\'t find an Account for "{}" (Nameservers: {})'.format( 195 | delegation['Name'], 196 | ', '.join([x['Value'] for x in delegation['ResourceRecords']]))) 197 | for old_delegation in to_delete: 198 | configure_dns_delegation( 199 | account.admin_session, 200 | domain=old_delegation['Name'].strip('.'), 201 | nameservers=[x['Value'] for x in old_delegation['ResourceRecords']], 202 | action='DELETE') 203 | -------------------------------------------------------------------------------- /sevenseconds/config/s3.py: -------------------------------------------------------------------------------- 1 | import json 2 | from ..helper import ActionOnExit, info 3 | 4 | 5 | def configure_s3_buckets(account: object): 6 | for _, config in account.config.get('s3_buckets', {}).items(): 7 | for region in config.get('regions', []): 8 | bucket_name = config['name'].format(account_id=account.id, region=region) 9 | s3 = account.session.resource('s3', region) 10 | with ActionOnExit('Checking S3 bucket {}..'.format(bucket_name)) as act: 11 | bucket = s3.Bucket(bucket_name) 12 | if not bucket.creation_date: 13 | act.warning('not exist.. create bucket ..') 14 | bucket.create(CreateBucketConfiguration={'LocationConstraint': region}) 15 | bucket.wait_until_exists() 16 | 17 | policy = config.get('policy', None) 18 | if policy is not None: 19 | with ActionOnExit('Updating policy for S3 bucket {}..'.format(bucket_name)): 20 | policy_json = json.dumps(policy).replace('{bucket_name}', bucket_name) 21 | bucket.Policy().put(Policy=policy_json) 22 | 23 | main_lifecycle_config = config.get('lifecycle_configuration') 24 | if main_lifecycle_config is not None: 25 | configure_bucket_lifecycle(s3, main_lifecycle_config, bucket_name) 26 | 27 | encryption_config = config.get('encryption_config') 28 | if encryption_config is not None: 29 | s3.meta.client.put_bucket_encryption( 30 | Bucket=bucket_name, 31 | ServerSideEncryptionConfiguration=encryption_config) 32 | 33 | tags = config.get('tags') 34 | if tags is not None: 35 | tag_set = [] 36 | for k, v in tags.items(): 37 | tag_set.append({'Key': k, 'Value': v}) 38 | bucket.Tagging().put(Tagging={'TagSet': tag_set}) 39 | 40 | logging_target = config.get('logging_target', None) 41 | logging_lifecycle_config = config.get('logging_lifecycle_configuration') 42 | if logging_target is not None: 43 | logging_enabled = bucket.Logging().logging_enabled 44 | logging_target = logging_target.format(account_id=account.id, region=region) 45 | if logging_enabled and logging_target == logging_enabled['TargetBucket']: 46 | info('Logging for {} to {}:{} enabled'.format(bucket.name, 47 | logging_enabled['TargetBucket'], 48 | logging_enabled['TargetPrefix'])) 49 | else: 50 | logging_bucket = create_logging_target(s3, logging_target, region) 51 | enable_logging(bucket, logging_bucket) 52 | configure_bucket_lifecycle(s3, logging_lifecycle_config, logging_target) 53 | 54 | 55 | def create_logging_target(s3: object, logging_target: str, region: str): 56 | with ActionOnExit('Check logging target {}'.format(logging_target)) as act: 57 | logging_bucket = s3.Bucket(logging_target) 58 | if not logging_bucket.creation_date: 59 | act.warning('not exist.. create bucket ..') 60 | logging_bucket.create(CreateBucketConfiguration={'LocationConstraint': region}) 61 | logging_bucket.wait_until_exists() 62 | logging_bucket.Acl().put(ACL='log-delivery-write') 63 | return logging_bucket 64 | 65 | 66 | def enable_logging(bucket: object, logging_bucket: object): 67 | with ActionOnExit('Enable logging for S3 bucket {} to {}..'.format(bucket.name, 68 | logging_bucket.name)): 69 | bucket.Logging().put(BucketLoggingStatus={ 70 | 'LoggingEnabled': { 71 | 'TargetBucket': logging_bucket.name, 72 | 'TargetPrefix': 'logs/' 73 | } 74 | } 75 | ) 76 | 77 | 78 | def configure_bucket_lifecycle(s3: object, lifecycle_config: dict, bucket: str): 79 | with ActionOnExit('Check lifecycle for bucket {}'.format(bucket)) as act: 80 | if lifecycle_config: 81 | logging_lifecycle = s3.BucketLifecycle(bucket) 82 | logging_lifecycle.put(LifecycleConfiguration=lifecycle_config) 83 | else: 84 | act.warning('skip') 85 | -------------------------------------------------------------------------------- /sevenseconds/config/securitygroup.py: -------------------------------------------------------------------------------- 1 | import time 2 | import netaddr 3 | from netaddr import IPNetwork 4 | import botocore.exceptions 5 | from ..helper import ActionOnExit, info, warning 6 | 7 | 8 | def configure_security_groups(account: object, region: str, trusted_addresses: set, vpc: object): 9 | for sg_name, sg_config in account.config.get('security_groups', {}).items(): 10 | if sg_config.get('ip_permissions'): 11 | create_update_security_group(account.session, region, sg_name, sg_config, trusted_addresses, vpc) 12 | elif sg_config.get('allow_from_trusted'): 13 | update_security_group(account.session, region, sg_name, trusted_addresses) 14 | 15 | 16 | def chunks(lst, n): 17 | """ Yield successive n-sized chunks from l. 18 | >>> a = chunks('a b c d e f g h i j k l m'.split(), 2) 19 | >>> a.__next__() 20 | ['a', 'b'] 21 | >>> a.__next__() 22 | ['c', 'd'] 23 | >>> a.__next__() 24 | ['e', 'f'] 25 | >>> a.__next__() 26 | ['g', 'h'] 27 | >>> a.__next__() 28 | ['i', 'j'] 29 | >>> a.__next__() 30 | ['k', 'l'] 31 | >>> a.__next__() 32 | ['m'] 33 | >>> a.__next__() 34 | Traceback (most recent call last): 35 | ... 36 | StopIteration 37 | >>> a = chunks('a b c d e f g h i j k l m'.split(), 5) 38 | >>> a.__next__() 39 | ['a', 'b', 'c', 'd', 'e'] 40 | >>> a.__next__() 41 | ['f', 'g', 'h', 'i', 'j'] 42 | >>> a.__next__() 43 | ['k', 'l', 'm'] 44 | >>> a.__next__() 45 | Traceback (most recent call last): 46 | ... 47 | StopIteration 48 | """ 49 | for i in range(0, len(lst), n): 50 | yield lst[i:i + n] 51 | 52 | 53 | def consolidate_networks(networks: set, min_prefixlen: int): 54 | ''' 55 | >>> from pprint import pprint 56 | >>> test = [IPNetwork('10.47.0.0/17'), 57 | ... IPNetwork('10.47.128.0/19'), 58 | ... IPNetwork('10.47.168.0/21'), 59 | ... IPNetwork('10.47.192.0/18'), 60 | ... IPNetwork('10.48.0.0/16'), 61 | ... IPNetwork('10.28.0.0/17'), 62 | ... IPNetwork('10.28.128.0/19'), 63 | ... IPNetwork('172.16.18.205/32'), 64 | ... IPNetwork('172.16.48.0/20'), 65 | ... IPNetwork('172.16.64.0/18'), 66 | ... IPNetwork('172.16.128.0/17'), 67 | ... IPNetwork('172.20.0.0/19'), 68 | ... IPNetwork('172.20.57.229/32'), 69 | ... IPNetwork('172.20.96.0/19'), 70 | ... IPNetwork('172.20.128.0/17'), 71 | ... IPNetwork('172.21.77.0/16'), 72 | ... IPNetwork('173.124.7.8/32'), 73 | ... IPNetwork('173.124.34.0/18'), 74 | ... IPNetwork('173.93.160.0/19'), 75 | ... IPNetwork('173.93.19.241/32'), 76 | ... IPNetwork('173.93.25.95/32'), 77 | ... IPNetwork('173.154.0.0/17'), 78 | ... IPNetwork('173.154.128.0/18'), 79 | ... IPNetwork('173.154.192.0/20'), 80 | ... IPNetwork('193.99.144.85/32'), 81 | ... IPNetwork('54.239.32.138/32'), 82 | ... IPNetwork('91.240.34.5/32'), 83 | ... IPNetwork('85.183.69.83/32'), 84 | ... IPNetwork('95.100.66.202/32')] 85 | >>> pprint(consolidate_networks(test, 16)) 86 | [IPNetwork('10.28.0.0/16'), 87 | IPNetwork('10.47.0.0/16'), 88 | IPNetwork('10.48.0.0/16'), 89 | IPNetwork('54.239.32.138/32'), 90 | IPNetwork('85.183.69.83/32'), 91 | IPNetwork('91.240.34.5/32'), 92 | IPNetwork('95.100.66.202/32'), 93 | IPNetwork('172.16.0.0/16'), 94 | IPNetwork('172.20.0.0/15'), 95 | IPNetwork('173.93.16.0/20'), 96 | IPNetwork('173.93.160.0/19'), 97 | IPNetwork('173.124.0.0/18'), 98 | IPNetwork('173.154.0.0/16'), 99 | IPNetwork('193.99.144.85/32')] 100 | ''' 101 | networks = sorted([IPNetwork(net) for net in networks]) 102 | new_networks = [] 103 | for chunk in chunks(networks, 2): 104 | if len(chunk) > 1: 105 | spanning = netaddr.spanning_cidr(chunk) 106 | if spanning.prefixlen >= min_prefixlen: 107 | new_networks.append(spanning) 108 | else: 109 | new_networks.extend(chunk) 110 | else: 111 | new_networks.append(chunk[0]) 112 | merged = netaddr.cidr_merge(new_networks) 113 | return merged 114 | 115 | 116 | def update_security_group(session: object, region: str, sg_name: str, trusted_addresses: set): 117 | ec2 = session.resource('ec2', region) 118 | for sg in ec2.security_groups.filter(Filters=[{'Name': 'group-name', 'Values': [sg_name]}]): 119 | permission_count = len(sg.ip_permissions) 120 | if permission_count == 0: 121 | permission_count = 1 122 | networks = trusted_addresses 123 | prefixlen = 31 124 | while len(networks) > 50 / permission_count: 125 | networks = consolidate_networks(networks, prefixlen) 126 | prefixlen -= 1 127 | info('{}/{} Prefixlen: {}, {} networks: {}'.format(region, sg_name, prefixlen, len(networks), networks)) 128 | for ip_permission in sg.ip_permissions: 129 | ipgrants = [IPNetwork('{}'.format(cidr['CidrIp'])) for cidr in ip_permission.get('IpRanges')] 130 | info('Entrys from {}: {} {} {} {}'.format(sg.group_name, 131 | ip_permission['IpProtocol'], 132 | ip_permission.get('FromPort'), 133 | ip_permission.get('ToPort'), 134 | ipgrants)) 135 | for grant in ipgrants: 136 | if grant not in networks: 137 | warning('Remove {} from security group {}'.format(grant, sg.group_name)) 138 | sg.revoke_ingress(IpPermissions=[ 139 | { 140 | 'IpProtocol': ip_permission['IpProtocol'], 141 | 'FromPort': ip_permission.get('FromPort'), 142 | 'ToPort': ip_permission.get('ToPort'), 143 | 'IpRanges': [ 144 | { 145 | 'CidrIp': str(grant) 146 | } 147 | ] 148 | }]) 149 | with ActionOnExit('Updating security group {}..'.format(sg.group_name)) as act: 150 | for cidr in sorted(networks): 151 | try: 152 | sg.authorize_ingress(IpPermissions=[ 153 | { 154 | 'IpProtocol': ip_permission['IpProtocol'], 155 | 'FromPort': ip_permission.get('FromPort'), 156 | 'ToPort': ip_permission.get('ToPort'), 157 | 'IpRanges': [ 158 | { 159 | 'CidrIp': str(cidr) 160 | } 161 | ] 162 | }]) 163 | except botocore.exceptions.ClientError as e: 164 | if e.response['Error']['Code'] != 'InvalidPermission.Duplicate': 165 | raise 166 | act.progress() 167 | 168 | 169 | def parse_sg_config(sg_config: dict, networks: set): 170 | """ 171 | >>> from pprint import pprint 172 | >>> sg_config = {'allow_from_trusted': True, 173 | ... 'ip_permissions': [{'from_port': 0, 174 | ... 'ip_protocol': 'tcp', 175 | ... 'ip_ranges': ['127.0.0.1/8'], 176 | ... 'to_port': 65535}]} 177 | >>> networks = set(['4.64.0.0/10', 178 | ... '10.2.0.0/15', 179 | ... '10.34.0.0/16', 180 | ... '10.50.0.0/15', 181 | ... '10.84.0.0/14']) 182 | >>> pprint(parse_sg_config(sg_config, networks)) 183 | {'proto:tcp|from:0|to:65535': {'FromPort': 0, 184 | 'IpProtocol': 'tcp', 185 | 'ToPort': 65535, 186 | 'ip_ranges': {IPNetwork('4.64.0.0/10'), 187 | IPNetwork('10.2.0.0/15'), 188 | IPNetwork('10.34.0.0/16'), 189 | IPNetwork('10.50.0.0/15'), 190 | IPNetwork('10.84.0.0/14'), 191 | IPNetwork('127.0.0.1/8')}}} 192 | 193 | >>> sg_config = {'allow_from_trusted': True, 194 | ... 'ip_permissions': [{'from_port': 22, 195 | ... 'ip_protocol': 'tcp', 196 | ... 'ip_ranges': ['127.0.0.1/8'], 197 | ... 'to_port': 22}]} 198 | >>> networks = set() 199 | >>> pprint(parse_sg_config(sg_config, networks)) 200 | {'proto:tcp|from:22|to:22': {'FromPort': 22, 201 | 'IpProtocol': 'tcp', 202 | 'ToPort': 22, 203 | 'ip_ranges': {IPNetwork('127.0.0.1/8')}}} 204 | 205 | >>> sg_config = {'allow_from_trusted': True, 206 | ... 'ip_permissions': [{'from_port': 53, 207 | ... 'ip_protocol': 'udp', 208 | ... 'to_port': 53}]} 209 | >>> networks = set(['10.34.0.0/16']) 210 | >>> pprint(parse_sg_config(sg_config, networks)) 211 | {'proto:udp|from:53|to:53': {'FromPort': 53, 212 | 'IpProtocol': 'udp', 213 | 'ToPort': 53, 214 | 'ip_ranges': {IPNetwork('10.34.0.0/16')}}} 215 | """ 216 | parsed = {} 217 | for ip_permission in sg_config.get('ip_permissions'): 218 | key_name = 'proto:{}|from:{}|to:{}'.format(ip_permission['ip_protocol'], 219 | ip_permission.get('from_port'), 220 | ip_permission.get('to_port')) 221 | ipgrants = set([IPNetwork('{}'.format(cidr)) for cidr in ip_permission.get('ip_ranges', [])]) 222 | if sg_config.get('allow_from_trusted'): 223 | ipgrants.update([IPNetwork(cidr) for cidr in networks]) 224 | parsed[key_name] = { 225 | 'IpProtocol': ip_permission.get('ip_protocol'), 226 | 'FromPort': ip_permission.get('from_port'), 227 | 'ToPort': ip_permission.get('to_port'), 228 | 'ip_ranges': ipgrants} 229 | return parsed 230 | 231 | 232 | def create_update_security_group( 233 | session: object, region: str, sg_name: str, sg_config: dict, networks: set, vpc: object): 234 | parsed_sg_config = parse_sg_config(sg_config, networks) 235 | 236 | sg = [x for x in vpc.security_groups.all() if x.group_name == sg_name] 237 | if not sg: 238 | with ActionOnExit('Create new security group {}..'.format(sg_name)) as act: 239 | sg = vpc.create_security_group( 240 | GroupName=sg_name, 241 | Description=sg_config.get('description', 'Managed Security Group')) 242 | # We are to fast for AWS (InvalidGroup.NotFound) 243 | time.sleep(2) 244 | sg.create_tags(Tags=[{'Key': 'Name', 'Value': sg_name}]) 245 | else: 246 | sg = sg[0] 247 | 248 | for ip_permission in sg.ip_permissions: 249 | ipgrants = [IPNetwork('{}'.format(cidr['CidrIp'])) for cidr in ip_permission.get('IpRanges')] 250 | info('Entrys from {}: {}'.format(sg.group_name, ip_permission)) 251 | key_name = 'proto:{}|from:{}|to:{}'.format(ip_permission['IpProtocol'], 252 | ip_permission.get('FromPort'), 253 | ip_permission.get('ToPort')) 254 | if key_name not in parsed_sg_config: 255 | warning('Remove Entry from {}: {}'.format(sg.group_name, ip_permission)) 256 | sg.revoke_ingress(IpPermissions=[ip_permission]) 257 | continue 258 | 259 | for grant in ipgrants: 260 | if grant not in parsed_sg_config[key_name]['ip_ranges']: 261 | warning('Remove {} from security group {}'.format(grant, sg.group_name)) 262 | sg.revoke_ingress(IpPermissions=[ 263 | { 264 | 'IpProtocol': ip_permission['IpProtocol'], 265 | 'FromPort': ip_permission.get('FromPort'), 266 | 'ToPort': ip_permission.get('ToPort'), 267 | 'IpRanges': [ 268 | { 269 | 'CidrIp': str(grant) 270 | } 271 | ] 272 | }]) 273 | else: 274 | parsed_sg_config[key_name]['ip_ranges'].remove(grant) 275 | 276 | with ActionOnExit('Updating security group {}..'.format(sg.group_name)) as act: 277 | for key, conf in parsed_sg_config.items(): 278 | for cidr in sorted(conf['ip_ranges']): 279 | try: 280 | sg.authorize_ingress(IpPermissions=[ 281 | { 282 | 'IpProtocol': conf['IpProtocol'], 283 | 'FromPort': conf['FromPort'], 284 | 'ToPort': conf['ToPort'], 285 | 'IpRanges': [ 286 | { 287 | 'CidrIp': str(cidr) 288 | } 289 | ] 290 | }]) 291 | except botocore.exceptions.ClientError as e: 292 | if e.response['Error']['Code'] != 'InvalidPermission.Duplicate': 293 | raise 294 | act.progress() 295 | -------------------------------------------------------------------------------- /sevenseconds/config/vpc.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | import re 4 | import hashlib 5 | from collections import namedtuple 6 | from netaddr import IPNetwork 7 | from ..helper import ActionOnExit, info, warning, error 8 | from ..helper.network import calculate_subnet 9 | from ..helper.aws import filter_subnets, get_tag, get_az_names, associate_address 10 | from .route53 import configure_dns_record, delete_dns_record 11 | from clickclick import OutputFormat 12 | from clickclick.console import print_table 13 | from ..config import AccountData 14 | 15 | VPC_NET = IPNetwork('172.31.0.0/16') 16 | Subnet = namedtuple('Subnet', ['availability_zone', 'subnet_type', 'cidr', 'tags']) 17 | 18 | 19 | def configure_vpc(account: AccountData, region, base_ami_id): 20 | ec2 = account.session.resource('ec2', region) 21 | ec2c = account.session.client('ec2', region) 22 | 23 | vpc_net = VPC_NET 24 | 25 | vpc_config = account.config.get('vpc_net', {}).get(region) 26 | if vpc_config: 27 | vpc_net = IPNetwork(account.config['vpc_net'][region]['cidr']) 28 | info('Region with non default VPC-Network: {}'.format(vpc_net)) 29 | with ActionOnExit('Finding existing default VPC..'): 30 | vpc = find_vpc(ec2, VPC_NET) 31 | # we only need to delete it if we use different settings for the VPC: 32 | if vpc and vpc_net != VPC_NET: 33 | with ActionOnExit('Deleting old default VPC..') as act: 34 | delete_vpc(vpc, region) 35 | delete_vpc_addresses(account.session, region) 36 | delete_rds_subnet_group(account.session, region) 37 | try: 38 | vpc.delete() 39 | except Exception as e: 40 | act.error(e) 41 | raise Exception('{}! Please delete VPC ({}) manually.'.format(e, vpc.id)) 42 | 43 | with ActionOnExit('Finding VPC..') as act: 44 | vpc = find_vpc(ec2, vpc_net) 45 | if not vpc: 46 | act.error('VPC not found') 47 | if not vpc: 48 | with ActionOnExit('Creating VPC for {cidr_block}..', cidr_block=str(vpc_net)): 49 | if not account.dry_run: 50 | vpc = ec2.create_vpc(CidrBlock=str(vpc_net)) 51 | igw = ec2.create_internet_gateway() 52 | vpc.attach_internet_gateway(InternetGatewayId=igw.id) 53 | with ActionOnExit('Updating VPC..'): 54 | if not account.dry_run: 55 | tags = [{'Key': 'Name', 'Value': '{}-{}'.format(account.name, region)}, 56 | {'Key': 'LastUpdate', 'Value': time.strftime('%Y-%m-%dT%H:%M:%S%z')} 57 | ] 58 | for key, val in account.config.get('vpc', {}).get('tags', {}).items(): 59 | if base_ami_id: 60 | tags.append({ 61 | 'Key': key, 62 | 'Value': val.replace('{{ami_id}}', base_ami_id).replace( 63 | '{{base_ami_config}}', 64 | json.dumps(account.config.get('base_ami'), sort_keys=True)) 65 | }) 66 | vpc.create_tags(Tags=tags) 67 | vpc.modify_attribute(EnableDnsSupport={'Value': True}) 68 | vpc.modify_attribute(EnableDnsHostnames={'Value': True}) 69 | info(vpc) 70 | # FIXME check and add Expire for Flow Logs 71 | with ActionOnExit('Check Flow Logs') as act: 72 | if not exist_flowlog(account.session, region, vpc.id): 73 | ec2c.create_flow_logs(ResourceIds=[vpc.id], 74 | ResourceType='VPC', 75 | TrafficType='ALL', 76 | LogGroupName='vpc-flowgroup', 77 | DeliverLogsPermissionArn='arn:aws:iam::{}:role/vpc-flowlogs'.format(account.id)) 78 | 79 | with ActionOnExit('Checking region {region}..', **vars()): 80 | availability_zones = get_az_names(account.session, region) 81 | info('Availability zones: {}'.format(availability_zones)) 82 | for subnet in vpc.subnets.all(): 83 | if not get_tag(subnet.tags, 'Name'): 84 | with ActionOnExit('Deleting subnet {subnet_id}..', subnet_id=subnet.id): 85 | if not account.dry_run: 86 | subnet.delete() 87 | 88 | # Configure subnets 89 | if vpc_config and 'subnets' in vpc_config: 90 | subnets = custom_subnets(vpc_net, vpc_config['subnets'], availability_zones) 91 | else: 92 | subnets = default_subnets(vpc_net, availability_zones) 93 | 94 | for subnet in subnets: 95 | configure_subnet(vpc, subnet, account.dry_run, ec2c.get_waiter('subnet_available')) 96 | 97 | enable_nat = account.config.get("enable_nat", True) 98 | if enable_nat: 99 | nat_instances = create_nat_instances(account, vpc, region) 100 | else: 101 | nat_instances = {} 102 | 103 | create_routing_tables( 104 | vpc, nat_instances, 105 | account.options.get('re_add_defaultroute', False), 106 | account.config.get('enable_dedicated_dmz_route', False) 107 | ) 108 | create_vpc_endpoints(account, vpc, region) 109 | check_vpn_propagation(account, vpc, region) 110 | return vpc 111 | 112 | 113 | def custom_subnets(vpc_net, subnet_config, availability_zones): 114 | for az in sorted(availability_zones): 115 | for subnet in subnet_config[az]: 116 | cidr = IPNetwork(subnet['cidr']) 117 | if cidr not in vpc_net: 118 | raise Exception("Subnet {} doesn't belong to VPC {}".format(subnet, vpc_net)) 119 | yield Subnet(az, subnet['type'], cidr, subnet.get('tags', {})) 120 | 121 | 122 | def default_subnets(vpc_net, availability_zones): 123 | for subnet_type in 'dmz', 'internal': 124 | for i, az in enumerate(sorted(availability_zones)): 125 | tags = {} 126 | if subnet_type == 'dmz': 127 | tags['kubernetes.io/role/elb'] = '' 128 | tags['kubernetes.io/role/internal-elb'] = '' 129 | yield Subnet(az, subnet_type, calculate_subnet(vpc_net, subnet_type, i), tags) 130 | 131 | 132 | def exist_flowlog(session, region, vpc_id): 133 | client = session.client('ec2', region) 134 | for flowlog in client.describe_flow_logs()['FlowLogs']: 135 | if (flowlog['LogGroupName'] == 'vpc-flowgroup' and 136 | flowlog['ResourceId'] == vpc_id): 137 | return True 138 | return False 139 | 140 | 141 | def find_vpc(ec2: object, vpc_net: IPNetwork): 142 | for vpc in ec2.vpcs.all(): 143 | if vpc.cidr_block == str(vpc_net): 144 | return vpc 145 | 146 | 147 | def delete_vpc(vpc: object, region: str): 148 | ''' 149 | Delete only, if the VPC use only for NAT and ODD instances 150 | ''' 151 | instances2delete = [] 152 | instances2clarify = [] 153 | for instance in vpc.instances.all(): 154 | if (get_tag(instance.tags, 'Name', str).startswith('NAT {}'.format(region)) or 155 | get_tag(instance.tags, 'Name') == 'Odd (SSH Bastion Host)'): 156 | instances2delete.append(instance) 157 | else: 158 | instances2clarify.append(instance) 159 | 160 | if instances2clarify: 161 | raise Exception('Unknown Instances ({}) found. Please clear VPC ({}) manually.' 162 | .format(', '.join(map(lambda x: '{}/{}: {}'.format(x.id, get_tag(x.tags, 'Name'), x.state), 163 | instances2clarify)), vpc.id)) 164 | 165 | if instances2delete: 166 | for instance in instances2delete: 167 | info('terminate {}/{}'.format(instance.id, get_tag(instance.tags, 'Name'))) 168 | instance.modify_attribute(Attribute='disableApiTermination', Value='false') 169 | instance.terminate() 170 | 171 | for instance in instances2delete: 172 | instance.wait_until_terminated() 173 | instance.reload() 174 | info('instance status from {}/{}: {}'.format(instance.id, get_tag(instance.tags, 'Name'), instance.state)) 175 | 176 | network_interfaces = list(vpc.network_interfaces.all()) 177 | if network_interfaces: 178 | raise Exception('Unknown Interfaces ({}) found. Please clear VPC ({}) manually.' 179 | .format(', '.join(map(lambda x: '{}/{}: {}'.format(x.id, x.description, x.status), 180 | network_interfaces)), vpc.id)) 181 | 182 | for igw in vpc.internet_gateways.all(): 183 | igw.detach_from_vpc(VpcId=vpc.id) 184 | igw.delete() 185 | 186 | for subnet in vpc.subnets.all(): 187 | try: 188 | subnet.delete() 189 | except Exception as e: 190 | info(e) 191 | 192 | for sg in vpc.security_groups.all(): 193 | try: 194 | sg.delete() 195 | except Exception as e: 196 | info(e) 197 | 198 | for network_acl in vpc.network_acls.all(): 199 | try: 200 | network_acl.delete() 201 | except Exception as e: 202 | info(e) 203 | 204 | endpoints = vpc.meta.client.describe_vpc_endpoints( 205 | Filters=[ 206 | { 207 | 'Name': 'vpc-id', 208 | 'Values': [ 209 | vpc.id 210 | ] 211 | }, 212 | { 213 | 'Name': 'vpc-endpoint-state', 214 | 'Values': [ 215 | 'pending', 216 | 'available' 217 | ] 218 | } 219 | ] 220 | )['VpcEndpoints'] 221 | if endpoints: 222 | for endpoint in endpoints: 223 | vpc.meta.client.delete_vpc_endpoints( 224 | VpcEndpointIds=[endpoint['VpcEndpointId']] 225 | ) 226 | 227 | for route_table in vpc.route_tables.all(): 228 | try: 229 | route_table.delete() 230 | except Exception as e: 231 | info(e) 232 | 233 | # TODO missing? 234 | # VPN Attachments 235 | # VPC Peering Connections 236 | 237 | 238 | def delete_vpc_addresses(session: object, region: str): 239 | ec2 = session.resource('ec2', region) 240 | for vpc_address in ec2.vpc_addresses.all(): 241 | if vpc_address.association_id is None: 242 | try: 243 | vpc_address.release() 244 | except Exception as e: 245 | info(e) 246 | 247 | 248 | def delete_rds_subnet_group(session: object, region: str): 249 | rds = session.client('rds', region) 250 | for name in ['internal', 'default']: 251 | try: 252 | rds.delete_db_subnet_group(DBSubnetGroupName=name) 253 | except Exception as e: 254 | info(e) 255 | 256 | 257 | def configure_subnet(vpc, subnet: Subnet, dry_run: bool, waiter): 258 | name = '{}-{}'.format(subnet.subnet_type, subnet.availability_zone) 259 | tags = dict(subnet.tags) 260 | custom_name = tags.get('zalando.org/custom-subnet') 261 | name = custom_name and '{}-{}-{}'.format(subnet.subnet_type, custom_name, subnet.availability_zone) or name 262 | tags['Name'] = name 263 | existing_subnet = find_subnet(vpc, subnet.cidr) 264 | if not existing_subnet: 265 | with ActionOnExit('Creating subnet {name} with {cidr}..', name=name, cidr=subnet.cidr): 266 | if not dry_run: 267 | existing_subnet = vpc.create_subnet(CidrBlock=str(subnet.cidr), 268 | AvailabilityZone=subnet.availability_zone) 269 | waiter.wait(SubnetIds=[existing_subnet.id], Filters=[ 270 | {'Name': 'cidrBlock', 271 | 'Values': [str(subnet.cidr)]}, 272 | {'Name': 'availabilityZone', 273 | 'Values': [subnet.availability_zone]} 274 | ]) 275 | if not dry_run: 276 | existing_subnet.create_tags(Tags=[{'Key': k, 'Value': v} for k, v in tags.items()]) 277 | 278 | 279 | def find_subnet(vpc: object, cidr): 280 | for subnet in vpc.subnets.all(): 281 | if subnet.cidr_block == str(cidr): 282 | return subnet 283 | 284 | 285 | def create_nat_instances(account: AccountData, vpc: object, region: str): 286 | ec2 = account.session.resource('ec2', region) 287 | ec2c = account.session.client('ec2', region) 288 | logs = account.session.client('logs', region) 289 | nat_instance_by_az = {} 290 | nat_type = None 291 | for subnet in filter_subnets(vpc, 'dmz'): 292 | az_name = subnet.availability_zone 293 | private_ip = None 294 | sg_name = 'NAT {}'.format(az_name) 295 | # GroupNames-Filter: EC2-Classic and default VPC only 296 | sg = [x for x in vpc.security_groups.all() if x.group_name == sg_name] 297 | if not sg: 298 | sg = vpc.create_security_group(GroupName=sg_name, 299 | Description='Allow internet access through NAT instances') 300 | # We are to fast for AWS (InvalidGroup.NotFound) 301 | time.sleep(2) 302 | sg.create_tags(Tags=[{'Key': 'Name', 'Value': sg_name}]) 303 | 304 | for internal_subnet in filter_subnets(vpc, 'internal'): 305 | if internal_subnet.availability_zone == az_name: 306 | sg.authorize_ingress(IpProtocol='-1', 307 | FromPort=-1, 308 | ToPort=-1, 309 | CidrIp=internal_subnet.cidr_block) 310 | else: 311 | sg = sg[0] 312 | 313 | filters = [ 314 | {'Name': 'tag:Name', 315 | 'Values': [sg_name]}, 316 | {'Name': 'instance-state-name', 317 | 'Values': ['running', 'pending', 'stopping', 'stopped', 'shutting-down']}, 318 | ] 319 | instances = list(subnet.instances.filter(Filters=filters)) 320 | nat_gateway = None 321 | try: 322 | filters = [ 323 | {'Name': 'subnet-id', 'Values': [subnet.id]}, 324 | {'Name': 'state', 'Values': ['pending', 'available', 'deleting']} 325 | ] 326 | nat_gateway = ec2c.describe_nat_gateways(Filter=filters)['NatGateways'] 327 | support_nat_gateway = True 328 | except Exception: 329 | support_nat_gateway = False 330 | while len(nat_gateway) and nat_gateway[0]['State'] == 'deleting': 331 | warning('Nat Gateway in {} is deleting.. waiting..'.format(az_name)) 332 | time.sleep(10) 333 | nat_gateway = ec2c.describe_nat_gateways(Filter=filters)['NatGateways'] 334 | if nat_gateway: 335 | nat_instance_by_az[az_name] = {'NatGatewayId': nat_gateway[0]['NatGatewayId']} 336 | nat_type = 'gateway' 337 | while nat_gateway[0]['State'] == 'pending': 338 | warning('Nat Gateway in {} is pending.. waiting..'.format(az_name)) 339 | time.sleep(10) 340 | nat_gateway = ec2c.describe_nat_gateways(Filter=filters)['NatGateways'] 341 | ip = nat_gateway[0]['NatGatewayAddresses'][0].get('PublicIp') 342 | private_ip = nat_gateway[0]['NatGatewayAddresses'][0].get('PrivateIp') 343 | network_interface_id = nat_gateway[0]['NatGatewayAddresses'][0].get('NetworkInterfaceId') 344 | elif instances: 345 | instance = instances[0] 346 | nat_instance_by_az[az_name] = {'InstanceId': instance.id} 347 | nat_type = 'instance' 348 | ip = instance.public_ip_address 349 | private_ip = instance.private_ip_address 350 | network_interface_id = instance.network_interfaces[0].id 351 | if ip is None: 352 | with ActionOnExit('Associating Elastic IP..'): 353 | ip = associate_address(ec2c, instance.id) 354 | 355 | with ActionOnExit('Disabling source/destination checks..'): 356 | instance.modify_attribute(SourceDestCheck={'Value': False}) 357 | 358 | if support_nat_gateway: 359 | instance_count = 0 360 | all_instance_filters = [ 361 | {'Name': 'availability-zone', 362 | 'Values': [az_name]}, 363 | {'Name': 'instance-state-name', 364 | 'Values': ['running', 'pending', 'stopping', 'stopped', 'shutting-down']}, 365 | ] 366 | for inst in ec2.instances.filter(Filters=all_instance_filters): 367 | if get_tag(inst.tags, 'Name') != sg_name and get_tag(inst.tags, 'Name') != 'Odd (SSH Bastion Host)': 368 | instance_count += 1 369 | pattern = account.options.get('migrate2natgateway') 370 | if isinstance(pattern, str): 371 | if re.fullmatch(pattern, az_name) or re.fullmatch(pattern, region): 372 | terminitate_nat_instance(instance, az_name) 373 | instances = None 374 | instance = None 375 | elif instance.state.get('Name') in ('stopping', 'stopped', 'shutting-down'): 376 | warning('NAT Instance ({} in {}) are down. Terminate for Migration...'.format(instance.id, az_name)) 377 | terminitate_nat_instance(instance, az_name) 378 | instances = None 379 | instance = None 380 | elif account.options.get('migrate2natgateway_if_empty'): 381 | if instance_count == 0: 382 | terminitate_nat_instance(instance, az_name) 383 | instances = None 384 | instance = None 385 | else: 386 | warning('Skip migration from NAT Instance to NAT Gateway in {} (Instance Count: {})'.format( 387 | az_name, 388 | instance_count)) 389 | elif instance_count == 0: 390 | warning('Skip migration from NAT Instance to NAT Gateway in {} (Instance Count: {})'.format( 391 | az_name, 392 | instance_count)) 393 | 394 | if not nat_gateway and not instances: 395 | if support_nat_gateway: 396 | with ActionOnExit('Launching NAT Gateway in {az_name}..', **vars()): 397 | # create new Nat Gateway if no legacy Nat running 398 | allocation_id, ip = associate_address(ec2c) 399 | response = ec2c.create_nat_gateway( 400 | SubnetId=subnet.id, 401 | AllocationId=allocation_id, 402 | ClientToken='{}-{}'.format(sg_name, subnet.id) 403 | ) 404 | info(response) 405 | nat_instance_by_az[az_name] = {'NatGatewayId': response['NatGateway']['NatGatewayId']} 406 | nat_type = 'gateway' 407 | else: 408 | with ActionOnExit('Launching NAT instance in {az_name}..', **vars()): 409 | filters = [ 410 | {'Name': 'name', 411 | 'Values': ['amzn-ami-vpc-nat-hvm*']}, 412 | {'Name': 'owner-alias', 413 | 'Values': ['amazon']}, 414 | {'Name': 'state', 415 | 'Values': ['available']}, 416 | {'Name': 'root-device-type', 417 | 'Values': ['ebs']} 418 | ] 419 | images = sorted(ec2.images.filter(Filters=filters), key=lambda x: x.creation_date, reverse=True) 420 | most_recent_image = images[0] 421 | instance = subnet.create_instances(ImageId=most_recent_image.id, 422 | InstanceType=account.config.get('instance_type', 'm3.medium'), 423 | SecurityGroupIds=[sg.id], 424 | MinCount=1, 425 | MaxCount=1, 426 | DisableApiTermination=True, 427 | Monitoring={'Enabled': True})[0] 428 | 429 | waiter = ec2c.get_waiter('instance_running') 430 | waiter.wait(InstanceIds=[instance.id]) 431 | instance.create_tags(Tags=[{'Key': 'Name', 'Value': sg_name}]) 432 | ip = None 433 | # FIXME activate Autorecovery !! 434 | 435 | if ip is None: 436 | with ActionOnExit('Associating Elastic IP..'): 437 | ip = associate_address(ec2c, instance.id) 438 | 439 | with ActionOnExit('Disabling source/destination checks..'): 440 | instance.modify_attribute(SourceDestCheck={'Value': False}) 441 | nat_instance_by_az[az_name] = {'InstanceId': instance.id} 442 | nat_type = 'instance' 443 | 444 | if ip is not None and private_ip is not None and network_interface_id is not None: 445 | for direction in ('IN', 'OUT'): 446 | for filter_type in ('packets', 'bytes'): 447 | filter_name = 'NAT-{}-{}-{}'.format(az_name, direction, filter_type) 448 | with ActionOnExit('put metric filter for {}..'.format(filter_name)) as act: 449 | filter_pattern = '[version, accountid, interfaceid={}, '.format(network_interface_id) 450 | local_net_pattern = '.'.join(private_ip.split('.')[:1]) 451 | if direction == 'IN': 452 | filter_pattern += 'srcaddr!={}.*, dstaddr={}, '.format(local_net_pattern, private_ip) 453 | else: 454 | filter_pattern += 'dstaddr={}, srcaddr!={}.*, '.format(private_ip, local_net_pattern) 455 | filter_pattern += 'srcport, dstport, protocol, packets, bytes, start, end, action, log_status]' 456 | response = logs.put_metric_filter( 457 | logGroupName='vpc-flowgroup', 458 | filterName=filter_name, 459 | filterPattern=filter_pattern, 460 | metricTransformations=[ 461 | { 462 | 'metricName': filter_name, 463 | 'metricNamespace': 'NAT', 464 | 'metricValue': '${}'.format(filter_type) 465 | }, 466 | ] 467 | ) 468 | if (not isinstance(response, dict) or 469 | response.get('ResponseMetadata', {}).get('HTTPStatusCode') != 200): 470 | act.error(response) 471 | 472 | info('NAT {} {} is running with Elastic IP {} ({})'.format(nat_type, 473 | az_name, 474 | ip, 475 | nat_instance_by_az[az_name])) 476 | 477 | if account.domain is not None: 478 | configure_dns_record(account, 'nat-{}'.format(az_name), ip) 479 | else: 480 | warning('No DNS domain configured, skipping record creation') 481 | 482 | filters = [ 483 | {'Name': 'state', 'Values': ['pending']} 484 | ] 485 | pending_nat_gateway = ec2c.describe_nat_gateways(Filter=filters)['NatGateways'] 486 | if len(pending_nat_gateway): 487 | with ActionOnExit('Waiting of pending NAT Gateways..'): 488 | while len(ec2c.describe_nat_gateways(Filter=filters)['NatGateways']): 489 | time.sleep(15) 490 | 491 | return nat_instance_by_az 492 | 493 | 494 | def terminitate_nat_instance(instance, az_name): 495 | with ActionOnExit('Terminating NAT Instance for migration in {}..'.format(az_name)): 496 | instance.modify_attribute(Attribute='disableApiTermination', Value='false') 497 | instance.terminate() 498 | instance.wait_until_terminated() 499 | 500 | 501 | def create_routing_tables(vpc: object, nat_instance_by_az: dict, 502 | replace_default_route: bool, enable_dedicated_dmz_route: bool): 503 | for route_table in vpc.route_tables.all(): 504 | for association in route_table.associations: 505 | if association.main: 506 | for igw in vpc.internet_gateways.all(): 507 | route_table.create_route(DestinationCidrBlock='0.0.0.0/0', 508 | GatewayId=igw.id) 509 | # FIXME: Can we change the name of the default routing table? 510 | route_table.create_tags( 511 | Tags=[{'Key': 'Name', 'Value': 'DMZ Routing Table'}]) 512 | 513 | configure_routing_table(vpc, nat_instance_by_az, 514 | replace_default_route, 'internal', False) 515 | if enable_dedicated_dmz_route: 516 | configure_routing_table(vpc, nat_instance_by_az, 517 | replace_default_route, 'dmz', True) 518 | 519 | 520 | def configure_routing_table(vpc: object, nat_instance_by_az: dict, replace_default_route: bool, 521 | filter_name: str, route_via_igw: bool): 522 | for subnet in filter_subnets(vpc, filter_name): 523 | route_table = None 524 | for rt in vpc.route_tables.all(): 525 | if get_tag(rt.tags, 'Name', 'undef-rt-name') == get_tag(subnet.tags, 'Name', 'undef-subnet-name'): 526 | route_table = rt 527 | break 528 | destination = None 529 | if route_via_igw: 530 | for igw in vpc.internet_gateways.all(): 531 | destination = {'GatewayId': igw.id} 532 | else: 533 | destination = nat_instance_by_az.get(subnet.availability_zone) 534 | if destination is None: 535 | warning('Skip routing table for {} (no destination)') 536 | continue 537 | if not route_table: 538 | with ActionOnExit('Creating route table {}..'.format(get_tag(subnet.tags, 'Name'))): 539 | route_table = vpc.create_route_table() 540 | route_table.create_tags(Tags=[{'Key': 'Name', 'Value': get_tag(subnet.tags, 'Name')}]) 541 | route_table.create_route(DestinationCidrBlock='0.0.0.0/0', 542 | **destination) 543 | 544 | with ActionOnExit('Checking route table..') as act: 545 | found_default_route = False 546 | for route in route_table.routes: 547 | if route.destination_cidr_block == '0.0.0.0/0': 548 | if route.state == 'blackhole' or replace_default_route: 549 | act.warning('delete old default destination') 550 | vpc.meta.client.delete_route(RouteTableId=route_table.id, 551 | DestinationCidrBlock='0.0.0.0/0') 552 | else: 553 | found_default_route = True 554 | if not found_default_route: 555 | act.warning('add new default destination') 556 | route_table.create_route(DestinationCidrBlock='0.0.0.0/0', 557 | **destination) 558 | with ActionOnExit('Associating route table..'): 559 | route_table.associate_with_subnet(SubnetId=subnet.id) 560 | route_table.create_tags(Tags=[ 561 | { 562 | 'Key': 'AvailabilityZone', 563 | 'Value': subnet.availability_zone 564 | }, 565 | { 566 | 'Key': 'Type', 567 | 'Value': filter_name 568 | } 569 | ]) 570 | 571 | 572 | def create_vpc_endpoints(account: AccountData, vpc: object, region: str): 573 | ec2c = account.session.client('ec2', region) 574 | service_names = ec2c.describe_vpc_endpoint_services()['ServiceNames'] 575 | 576 | for service_name in service_names: 577 | if service_name.endswith('.s3') or service_name.endswith('.dynamodb'): 578 | create_gtw_vpc_endpoint(service_name, vpc, ec2c, region) 579 | elif service_name.endswith('.kms'): 580 | create_interface_vpc_endpoint(service_name, vpc, ec2c, region) 581 | else: 582 | info('found new possible service endpoint: {}'.format(service_name)) 583 | 584 | 585 | def create_gtw_vpc_endpoint(service_name: str, vpc: object, ec2c: object, region: str): 586 | router_tables = set([rt.id for rt in vpc.route_tables.all()]) 587 | with ActionOnExit('Checking VPC Endpoint {}..'.format(service_name)) as act: 588 | endpoints = ec2c.describe_vpc_endpoints( 589 | Filters=[ 590 | { 591 | 'Name': 'service-name', 592 | 'Values': [ 593 | service_name 594 | ] 595 | }, 596 | { 597 | 'Name': 'vpc-id', 598 | 'Values': [ 599 | vpc.id 600 | ] 601 | }, 602 | { 603 | 'Name': 'vpc-endpoint-state', 604 | 'Values': [ 605 | 'pending', 606 | 'available' 607 | ] 608 | } 609 | ] 610 | )['VpcEndpoints'] 611 | if endpoints: 612 | for endpoint in endpoints: 613 | rt_in_endpoint = set(endpoint['RouteTableIds']) 614 | if rt_in_endpoint != router_tables: 615 | options = {'VpcEndpointId': endpoint['VpcEndpointId']} 616 | if rt_in_endpoint.difference(router_tables): 617 | options['RemoveRouteTableIds'] = list(rt_in_endpoint.difference(router_tables)) 618 | if router_tables.difference(rt_in_endpoint): 619 | options['AddRouteTableIds'] = list(router_tables.difference(rt_in_endpoint)) 620 | response = ec2c.modify_vpc_endpoint(**options) 621 | act.warning('mismatch ({} vs. {}), make update: {}'.format( 622 | rt_in_endpoint, 623 | router_tables, 624 | response, 625 | )) 626 | else: 627 | options = { 628 | 'VpcId': vpc.id, 629 | 'ServiceName': service_name, 630 | 'RouteTableIds': list(router_tables), 631 | 'ClientToken': hashlib.md5( 632 | '{}-{}-{}:{}'.format( 633 | service_name, 634 | region, 635 | vpc.id, 636 | sorted(list(router_tables)) 637 | ).encode('utf-8')).hexdigest() 638 | } 639 | response = ec2c.create_vpc_endpoint(**options) 640 | act.warning('missing, make create: {}'.format(response)) 641 | 642 | 643 | def create_interface_vpc_endpoint(service_name: str, vpc: object, ec2c: object, region: str): 644 | subnets = set([subnet.id for subnet in filter_subnets(vpc, "internal")]) 645 | with ActionOnExit('Checking VPC Endpoint {}..'.format(service_name)) as act: 646 | sg_name = 'KMS VPC Endpoint' 647 | sg_desc = 'Allow access to the KMS VPC endpoint' 648 | sg = get_sg(sg_name, sg_desc, vpc.security_groups.all()) 649 | if not sg: 650 | sg = vpc.create_security_group( 651 | GroupName=sg_name, 652 | Description=sg_desc, 653 | ) 654 | time.sleep(2) 655 | sg.create_tags( 656 | Tags=[ 657 | {'Key': 'Name', 'Value': sg_name}, 658 | {'Key': 'InfrastructureComponent', 'Value': 'true'} 659 | ]) 660 | act.warning('missing, make create: {}'.format(sg)) 661 | if not allow_https_vpc_cidr(sg.ip_permissions, vpc.cidr_block): 662 | act.warning( 663 | 'missing HTTP permission, make authorize: {} port=443 CIDR={}'.format( 664 | sg, 665 | vpc.cidr_block, 666 | ) 667 | ) 668 | sg.authorize_ingress( 669 | IpProtocol='tcp', 670 | FromPort=443, 671 | ToPort=443, 672 | CidrIp=vpc.cidr_block, 673 | ) 674 | endpoints = ec2c.describe_vpc_endpoints( 675 | Filters=[ 676 | {'Name': 'service-name', 'Values': [service_name]}, 677 | {'Name': 'vpc-id', 'Values': [vpc.id]}, 678 | {'Name': 'vpc-endpoint-state', 'Values': ['pending', 'available']}, 679 | ] 680 | )['VpcEndpoints'] 681 | if endpoints: 682 | for endpoint in endpoints: 683 | sgs_in_endpoint = [group['GroupId'] for group in endpoint['Groups']] 684 | if sg.id not in sgs_in_endpoint: 685 | options = { 686 | 'VpcEndpointId': endpoint['VpcEndpointId'], 687 | 'AddSecurityGroupIds': [sg.id], 688 | } 689 | response = ec2c.modify_vpc_endpoint(**options) 690 | act.warning( 691 | 'mismatch ({} not in {}), make update: {}'.format( 692 | sg.id, 693 | sgs_in_endpoint, 694 | response, 695 | ) 696 | ) 697 | if not endpoint['PrivateDnsEnabled']: 698 | options = { 699 | 'VpcEndpointId': endpoint['VpcEndpointId'], 700 | 'PrivateDnsEnabled': True, 701 | } 702 | response = ec2c.modify_vpc_endpoint(**options) 703 | act.warning( 704 | 'mismatch (PrivateDns not enabled), make update: {}'.format(response) 705 | ) 706 | subnet_in_endpoint = set(endpoint['SubnetIds']) 707 | if subnet_in_endpoint != subnets: 708 | options = {'VpcEndpointId': endpoint['VpcEndpointId']} 709 | if subnet_in_endpoint.difference(subnets): 710 | options['RemoveSubnetIds'] = list( 711 | subnet_in_endpoint.difference(subnets) 712 | ) 713 | if subnets.difference(subnet_in_endpoint): 714 | options['AddSubnetIds'] = list( 715 | subnets.difference(subnet_in_endpoint) 716 | ) 717 | response = ec2c.modify_vpc_endpoint(**options) 718 | act.warning( 719 | 'mismatch ({} vs. {}), make update: {}'.format( 720 | subnet_in_endpoint, 721 | subnets, 722 | response, 723 | ) 724 | ) 725 | else: 726 | options = { 727 | 'VpcEndpointType': 'Interface', 728 | 'VpcId': vpc.id, 729 | 'ServiceName': service_name, 730 | 'SubnetIds': list(subnets), 731 | 'SecurityGroupIds': [sg.id], 732 | 'PrivateDnsEnabled': True, 733 | 'ClientToken': hashlib.md5( 734 | '{}-{}-{}:{}'.format( 735 | service_name, region, vpc.id, sorted(list(subnets)) 736 | ).encode('utf-8') 737 | ).hexdigest(), 738 | } 739 | response = ec2c.create_vpc_endpoint(**options) 740 | act.warning( 741 | 'missing, make create: {}'.format(response) 742 | ) 743 | 744 | 745 | def check_vpn_propagation(account: AccountData, vpc: object, region: str): 746 | ec2c = account.session.client('ec2', region) 747 | for vpn_gateway in ec2c.describe_vpn_gateways(Filters=[ 748 | { 749 | 'Name': 'attachment.vpc-id', 750 | 'Values': [ 751 | vpc.id, 752 | ] 753 | }, 754 | ]).get('VpnGateways', []): 755 | for route_table in vpc.route_tables.all(): 756 | msg = '{} | {} Route Propagation {} | {}: '.format( 757 | route_table.id, 758 | get_tag(route_table.tags, 'Name'), 759 | vpn_gateway['VpnGatewayId'], 760 | get_tag(vpn_gateway.get('Tags', {}), 'Name')) 761 | if is_vgw_propagation_active(route_table.propagating_vgws, vpn_gateway['VpnGatewayId']): 762 | info('{} {}'.format(msg, 'Yes')) 763 | else: 764 | error('{} {}'.format(msg, 'No')) 765 | 766 | 767 | def is_vgw_propagation_active(propagating_vgws: list, vgw_id: str): 768 | for propagated_vgw in propagating_vgws: 769 | if propagated_vgw.get('GatewayId', 'none') == vgw_id: 770 | return True 771 | return False 772 | 773 | 774 | def if_vpc_empty(account: AccountData, region: str): 775 | ec2 = account.session.resource('ec2', region) 776 | ec2c = account.session.client('ec2', region) 777 | 778 | def instance_state(instance_id): 779 | if instance_id: 780 | return ec2.Instance(id=instance_id).state.get('Name') 781 | 782 | def if_stups_tool(ni: dict): 783 | instance_id = ni.get('Attachment', {}).get('InstanceId') 784 | if instance_id: 785 | instance = ec2.Instance(id=instance_id) 786 | availability_zones = get_az_names(account.session, region) 787 | stups_names = ('Odd (SSH Bastion Host)',) + tuple(['NAT {}'.format(x) for x in availability_zones]) 788 | if get_tag(instance.tags, 'Name') in stups_names: 789 | return True 790 | if get_tag(instance.tags, 'aws:cloudformation:logical-id') == 'OddServerInstance': 791 | return True 792 | allocation_id = ni.get('Association', {}).get('AllocationId') 793 | if allocation_id: 794 | for gateway in ec2c.describe_nat_gateways()['NatGateways']: 795 | if gateway.get('NatGatewayAddresses', {})[0].get('AllocationId') == allocation_id: 796 | return True 797 | 798 | # use the SecurityGroup name on the ENI to determine if it's one belonging to the KMS VPC endpoint. 799 | sg_name = ','.join([group["GroupName"] for group in ni.get("Groups")]) 800 | if sg_name == "KMS VPC Endpoint": 801 | return True 802 | return False 803 | 804 | account_is_free = True 805 | rows = [] 806 | for ni in ec2c.describe_network_interfaces()['NetworkInterfaces']: 807 | can_remove = if_stups_tool(ni) 808 | if not can_remove: 809 | account_is_free = False 810 | # print(' '.join([str(ni), str(ni.groups), str(ni.attachment), ni.description])) 811 | rows.append({'network_id': ni.get('NetworkInterfaceId'), 812 | 'group_name': ', '.join([group['GroupName'] for group in ni.get('Groups')]), 813 | 'description': ni.get('Description'), 814 | 'status': ni.get('Attachment', {}).get('Status'), 815 | 'instance_owner_id': ni.get('Attachment', {}).get('InstanceOwnerId'), 816 | 'instance_id': ni.get('Attachment', {}).get('InstanceId', ''), 817 | 'state': instance_state(ni.get('Attachment', {}).get('InstanceId')), 818 | 'allocation_id': ni.get('Association', {}).get('AllocationId'), 819 | 'account_name': account.name, 820 | 'can_remove': '✔' if can_remove else '✘' 821 | 822 | }) 823 | rows.sort(key=lambda x: (x['account_name'], x['group_name'], x['instance_id'])) 824 | with OutputFormat('text'): 825 | print_table(''' 826 | can_remove 827 | account_name 828 | network_id 829 | allocation_id 830 | description 831 | group_name 832 | status 833 | instance_owner_id 834 | instance_id state 835 | '''.split(), 836 | rows, 837 | styles={ 838 | 'running': {'fg': 'green'}, 839 | 'stopped': {'fg': 'red', 'bold': True}, 840 | '✔': {'bg': 'green'}, 841 | '✘': {'bg': 'red', 'bold': True}, 842 | }) 843 | 844 | return account_is_free 845 | 846 | 847 | def delete_nat_host(account: AccountData, region: str): 848 | ec2 = account.session.resource('ec2', region) 849 | availability_zones = get_az_names(account.session, region) 850 | for instance in ec2.instances.all(): 851 | if instance.state.get('Name') in ('running', 'pending', 'stopping', 'stopped'): 852 | if account.domain is not None and instance.public_ip_address: 853 | delete_dns_record(account, 854 | 'nat-{}'.format(instance.subnet.availability_zone), 855 | instance.public_ip_address) 856 | # Drop Bastion and NAT Instances 857 | stups_names = tuple(['NAT {}'.format(x) for x in availability_zones]) 858 | if get_tag(instance.tags, 'Name') in stups_names: 859 | terminitate_nat_instance(instance, instance.subnet.availability_zone) 860 | 861 | 862 | def cleanup_vpc(account: AccountData, region: str): 863 | ec2 = account.session.resource('ec2', region) 864 | ec2c = account.session.client('ec2', region) 865 | 866 | with ActionOnExit('Delete Nat Gateways..'): 867 | for gateway in ec2c.describe_nat_gateways()['NatGateways']: 868 | if gateway['State'] == 'available': 869 | if account.domain is not None and gateway.get('NatGatewayAddresses', {})[0].get('PublicIp'): 870 | delete_dns_record(account, 871 | 'nat-{}'.format(ec2.Subnet(gateway['SubnetId']).availability_zone), 872 | gateway.get('NatGatewayAddresses', {})[0].get('PublicIp')) 873 | if gateway['State'] in ('pending', 'available'): 874 | ec2c.delete_nat_gateway(NatGatewayId=gateway['NatGatewayId']) 875 | filters = [ 876 | {'Name': 'state', 'Values': ['pending', 'available', 'deleting']} 877 | ] 878 | nat_gateway = ec2c.describe_nat_gateways(Filter=filters)['NatGateways'] 879 | while len(nat_gateway) and nat_gateway[0]['State'] == 'deleting': 880 | warning('Nat Gateway is deleting.. waiting..') 881 | time.sleep(10) 882 | nat_gateway = ec2c.describe_nat_gateways(Filter=filters)['NatGateways'] 883 | 884 | with ActionOnExit('Delete Endpoints..'): 885 | for endpoint in ec2c.describe_vpc_endpoints()['VpcEndpoints']: 886 | ec2c.delete_vpc_endpoints(VpcEndpointIds=[endpoint['VpcEndpointId']]) 887 | 888 | while len(ec2c.describe_vpc_endpoints()['VpcEndpoints']) > 0: 889 | warning('VPC Endpoint is deleting.. waiting..') 890 | time.sleep(10) 891 | 892 | with ActionOnExit('Delete Subnets..'): 893 | for subnet in ec2c.describe_subnets()['Subnets']: 894 | ec2c.delete_subnet(SubnetId=subnet['SubnetId']) 895 | 896 | with ActionOnExit('Delete Routing Table..'): 897 | for route_table in ec2c.describe_route_tables()['RouteTables']: 898 | if not route_table['Associations'] or not route_table['Associations'][0]['Main']: 899 | ec2c.delete_route_table(RouteTableId=route_table['RouteTableId']) 900 | 901 | with ActionOnExit('Delete non default VPCs..'): 902 | for vpc in ec2c.describe_vpcs()['Vpcs']: 903 | if not vpc['IsDefault']: 904 | ec2c.delete_vpc(VpcId=vpc['VpcId']) 905 | 906 | with ActionOnExit('Delete Elastic IPs..'): 907 | for eip in ec2c.describe_addresses()['Addresses']: 908 | ec2c.release_address(AllocationId=eip['AllocationId']) 909 | 910 | 911 | def get_sg(name: str, desc: str, sgs: list) -> object: 912 | for sg in sgs: 913 | if sg.group_name == name and sg.description == desc: 914 | return sg 915 | 916 | 917 | def allow_https_vpc_cidr(permissions: list, vpc_cidr: str) -> bool: 918 | for permission in permissions: 919 | if ( 920 | permission['IpProtocol'] == 'tcp' 921 | and permission['FromPort'] == 443 922 | and permission['ToPort'] == 443 923 | and {'CidrIp': vpc_cidr} in permission['IpRanges'] 924 | ): 925 | return True 926 | -------------------------------------------------------------------------------- /sevenseconds/helper/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import click 3 | from clickclick import secho 4 | import yaml 5 | from datetime import timedelta 6 | import time 7 | import threading 8 | 9 | START_TIME = time.time() 10 | THREADDATA = threading.local() 11 | PATTERNLENGTH = 25 12 | QUITE = False 13 | 14 | 15 | class ActionOnExit: 16 | def __init__(self, msg, **kwargs): 17 | self.msg_args = kwargs 18 | self.msg = click.style(msg.format(**kwargs), bold=True) 19 | self.errors = [] 20 | self._suppress_exception = False 21 | self.ok_msg = ' OK' 22 | self.call_time = time.time() 23 | if not QUITE: 24 | self._print(' ...') 25 | 26 | def __enter__(self): 27 | return self 28 | 29 | def __exit__(self, exc_type, exc_val, exc_tb): 30 | if exc_type is None: 31 | if not self.errors: 32 | self.msg += click.style(' {}'.format(self.ok_msg), fg='green', bold=True) 33 | elif not self._suppress_exception: 34 | self.msg += click.style(' EXCEPTION OCCURRED: {}'.format(exc_val), fg='red', bold=True) 35 | if not QUITE or self.errors: 36 | self._print(' +{:.6f}s'.format(time.time() - self.call_time)) 37 | 38 | def fatal_error(self, msg, **kwargs): 39 | self._suppress_exception = True # Avoid printing "EXCEPTION OCCURRED: -1" on exit 40 | self.error(msg, **kwargs) 41 | self._print(' +{:.6f}s'.format(time.time() - self.call_time)) 42 | sys.exit(1) 43 | 44 | def error(self, msg, **kwargs): 45 | self.msg += click.style(' {}'.format(msg), fg='red', bold=True, **kwargs) 46 | self.errors.append(msg) 47 | 48 | def progress(self): 49 | self.msg += click.style(' .'.format()) 50 | 51 | def warning(self, msg, **kwargs): 52 | self.msg += click.style(' {}'.format(msg), fg='yellow', bold=True, **kwargs) 53 | self.errors.append(msg) 54 | 55 | def ok(self, msg): 56 | self.ok_msg = ' {}'.format(msg) 57 | 58 | def _print(self, suffix=''): 59 | elapsed_seconds = time.time() - START_TIME 60 | # using timedelta here for convenient default formatting 61 | elapsed = timedelta(seconds=elapsed_seconds) 62 | print('[{} | {}] {}{}'.format( 63 | getattr(THREADDATA, 'name', 'GLOBAL').rjust(PATTERNLENGTH), 64 | elapsed, 65 | self.msg, 66 | suffix)) 67 | 68 | 69 | def _secho(msg, **kwargs): 70 | elapsed_seconds = time.time() - START_TIME 71 | # using timedelta here for convenient default formatting 72 | elapsed = timedelta(seconds=elapsed_seconds) 73 | secho('[{} | {}] {}'.format(getattr(THREADDATA, 'name', 'GLOBAL').rjust(PATTERNLENGTH), elapsed, msg), **kwargs) 74 | 75 | 76 | def error(msg, **kwargs): 77 | _secho(msg, fg='red', bold=True, **kwargs) 78 | 79 | 80 | def fatal_error(msg, **kwargs): 81 | error(msg, **kwargs) 82 | sys.exit(1) 83 | 84 | 85 | def ok(msg=' OK', **kwargs): 86 | if not QUITE: 87 | _secho(msg, fg='green', bold=True, **kwargs) 88 | 89 | 90 | def warning(msg, **kwargs): 91 | _secho(msg, fg='yellow', bold=True, **kwargs) 92 | 93 | 94 | def info(msg): 95 | if not QUITE: 96 | _secho(msg, fg='blue', bold=True) 97 | 98 | 99 | def substitute_template_vars(data, context: dict): 100 | ''' 101 | >>> substitute_template_vars({'test': {'foo': {'foobar': 'dummy-{bob}'}}}, 102 | ... {'bob': 'BOB-REPLACE', 'ann': 'ANN-REPLACE'}) 103 | {'test': {'foo': {'foobar': 'dummy-BOB-REPLACE'}}} 104 | ''' 105 | serialized = yaml.safe_dump(data) 106 | data = yaml.safe_load(serialized) 107 | for k, v in data.items(): 108 | if isinstance(v, str): 109 | data[k] = v.format(**context) 110 | elif isinstance(v, dict): 111 | data[k] = substitute_template_vars(v, context) 112 | return data 113 | -------------------------------------------------------------------------------- /sevenseconds/helper/auth.py: -------------------------------------------------------------------------------- 1 | import jwt 2 | import zign.api 3 | import requests 4 | import os 5 | import boto3 6 | import botocore.exceptions 7 | import multiprocessing 8 | from itertools import repeat 9 | from ..helper import ActionOnExit, error, fatal_error 10 | 11 | 12 | class AssumeRoleFailed(Exception): 13 | def __init__(self, msg): 14 | self.msg = msg 15 | 16 | def __str__(self): 17 | return 'Assuming role failed: {}'.format(self.msg) 18 | 19 | 20 | class OAuthServices: 21 | def __init__(self, 22 | aws_credentials_service_url: str, 23 | aws_credentials_service_resources: dict, 24 | account_list_url: str, 25 | token_managed_id_key: str, 26 | login_account: str, 27 | role_name: str, 28 | token: str): 29 | if token: 30 | self.token = token 31 | else: 32 | self.token = zign.api.get_token('sevenseconds', ['uid']) 33 | self.service_url = aws_credentials_service_url 34 | self.service_resources = aws_credentials_service_resources 35 | self.account_list_url = account_list_url 36 | self.token_managed_id_key = token_managed_id_key 37 | self.decoded_token = jwt.decode(self.token, options={"verify_signature": False}) 38 | 39 | if self.token_managed_id_key not in self.decoded_token: 40 | raise ValueError('Invalid token. Please check your ztoken configuration') 41 | self.user_id = self.decoded_token[self.token_managed_id_key] 42 | self._profiles = [] 43 | self._accounts = [] 44 | self.get_aws_accounts() 45 | self.get_profiles() 46 | 47 | self.use_master_account = False 48 | if login_account is not None: 49 | 50 | self.use_master_account = True 51 | self.master_account = login_account 52 | with ActionOnExit('Log in to Master Accounter..'): 53 | self.master_credentials = self.get_aws_credentials_from_aws_credentials_service( 54 | self.master_account, 55 | role_name 56 | ) 57 | 58 | def get_profiles(self): 59 | '''Returns the AWS profiles for a user. 60 | 61 | User is implicit from ztoken''' 62 | if self._profiles: 63 | return self._profiles 64 | with ActionOnExit('Contact to AWS Credential Service and get list of all profiles'): 65 | roles_url = self.service_url + self.service_resources['roles'].format(user_id=self.user_id) 66 | 67 | r = requests.get(roles_url, headers={'Authorization': 'Bearer {}'.format(self.token)}, timeout=20) 68 | r.raise_for_status() 69 | 70 | self._profiles = r.json()['account_roles'] 71 | return self._profiles 72 | 73 | def get_profile(self, account_name, role_name): 74 | '''Returns the profile information for the given role and account name.''' 75 | self.get_profiles() 76 | for item in self._profiles: 77 | if item['account_name'] == account_name and item['role_name'] == role_name: 78 | return item 79 | else: 80 | raise RuntimeError('Unable to find the role: {} for account: {}'.format(role_name, account_name)) 81 | 82 | def get_aws_credentials(self, account_name, role_name): 83 | '''Requests the specified AWS Temporary Credentials''' 84 | self.get_profiles() 85 | if self.use_master_account: 86 | try: 87 | return self.get_aws_credentials_from_master_account(account_name, role_name) 88 | except Exception: 89 | error('[{}] No matching role found for account {}/{}. Try profile from ~/.aws/credentials' 90 | .format(account_name, account_name, role_name)) 91 | return self.get_aws_credentials_from_profile(account_name) 92 | else: 93 | for profile in self._profiles: 94 | if account_name == profile['account_name'] and profile['role_name'] == role_name: 95 | return self.get_aws_credentials_from_aws_credentials_service( 96 | profile['account_name'], 97 | profile['role_name']) 98 | error('[{}] No matching role found for account {}/{}. Try profile from ~/.aws/credentials' 99 | .format(account_name, account_name, role_name)) 100 | return self.get_aws_credentials_from_profile(account_name) 101 | 102 | def get_aws_credentials_from_profile(self, account_name): 103 | try: 104 | if boto3.session.Session(profile_name=account_name): 105 | return {'profile_name': account_name} 106 | except botocore.exceptions.ProfileNotFound as e: 107 | error('[{}] {}' 108 | .format(account_name, e)) 109 | return None 110 | 111 | def get_aws_credentials_from_master_account(self, account_name, role_name): 112 | account = self.get_aws_account(account_name) 113 | with ActionOnExit('[{}] Assuming role {} via {}..'.format(account_name, role_name, self.master_account)): 114 | sts = boto3.client('sts', **self.master_credentials) 115 | role_arn = 'arn:aws:iam::{}:role/{}'.format( 116 | account['id'], 117 | role_name) 118 | response = sts.assume_role( 119 | RoleArn=role_arn, 120 | RoleSessionName='sevenseconds' 121 | ) 122 | return { 123 | 'aws_access_key_id': response['Credentials'].get('AccessKeyId'), 124 | 'aws_secret_access_key': response['Credentials'].get('SecretAccessKey'), 125 | 'aws_session_token': response['Credentials'].get('SessionToken') 126 | } 127 | 128 | def get_aws_credentials_from_aws_credentials_service(self, account_name, role_name): 129 | '''Requests the specified AWS Temporary Credentials from the provided Credential Service URL''' 130 | role_name = role_name.split('-', 1)[-1] 131 | profile = self.get_profile(account_name, role_name) 132 | with ActionOnExit('[{}] Assuming role {}..'.format(account_name, profile['role_name'])): 133 | credentials_url = self.service_url + self.service_resources['credentials'].format( 134 | account_id=profile['account_id'], 135 | role_name=role_name) 136 | r = requests.get(credentials_url, headers={'Authorization': 'Bearer {}'.format(self.token)}, 137 | timeout=30) 138 | 139 | r.raise_for_status() 140 | 141 | credentials = r.json() 142 | return { 143 | 'aws_access_key_id': credentials.get('access_key_id'), 144 | 'aws_secret_access_key': credentials.get('secret_access_key'), 145 | 'aws_session_token': credentials.get('session_token') 146 | } 147 | 148 | def get_aws_accounts(self): 149 | '''Returns a list of all AWS Accounts 150 | Get Accounts with Account ID from Account API 151 | http https://cmdb.example.org/aws-accounts.json 152 | [ 153 | { 154 | "name": "account_foo", 155 | "disabled": false, 156 | "id": "123456789012", 157 | }, 158 | { 159 | "name": "account_bar", 160 | "disabled": true, 161 | "id": "123123123123", 162 | } 163 | ] 164 | ''' 165 | if len(self._accounts) == 0: 166 | with ActionOnExit('get AWS Accounts from {}'.format(self.account_list_url)) as act: 167 | r = requests.get( 168 | self.account_list_url, 169 | headers={'Authorization': 'Bearer {}'.format(self.token)}, 170 | timeout=20) 171 | r.raise_for_status() 172 | self._accounts = r.json() 173 | act.ok('Count: {}'.format(len(self._accounts))) 174 | return self._accounts 175 | 176 | def get_aws_account(self, account_name): 177 | for account in self.get_aws_accounts(): 178 | if account['name'] == account_name: 179 | return account 180 | 181 | 182 | def get_credentials_map(batch, auth): 183 | credentials = {} 184 | worker_result = [] 185 | for aws_credentials_service_url in batch: 186 | with ActionOnExit('Authenticating against {}..'.format(aws_credentials_service_url)): 187 | profiles = auth[aws_credentials_service_url].get_profiles() 188 | 189 | with multiprocessing.Pool(processes=os.cpu_count() * 4) as pool: 190 | worker_result = pool.starmap(assume_role_worker, 191 | zip(batch[aws_credentials_service_url].values(), 192 | repeat(profiles), 193 | repeat(auth[aws_credentials_service_url]))) 194 | for worker_value in worker_result: 195 | if isinstance(worker_value, dict): 196 | credentials.update(worker_value) 197 | return credentials 198 | 199 | 200 | def assume_role_worker(batch, profiles, auth): 201 | account_name = batch['name'] 202 | role = batch['role'] 203 | cred_name = '{}/{}'.format(account_name, role) 204 | credentials = auth.get_aws_credentials(account_name, role) 205 | if credentials: 206 | return {cred_name: credentials} 207 | return None 208 | 209 | 210 | def get_sessions(account_names: list, 211 | config: dict, accounts: list, options: dict): 212 | global_cfg = config.get('global', {}) 213 | sessions_tmp = {} 214 | batch = {} 215 | auth = {} 216 | 217 | for account_name in account_names: 218 | cfg = accounts.get(account_name) or {} 219 | for key, val in global_cfg.items(): 220 | if key not in cfg: 221 | cfg[key] = val 222 | 223 | aws_credentials_service_url = cfg.get('aws_credentials_service_url') 224 | saml_role = cfg.get('saml_admin_login_role') 225 | account_alias = cfg.get('alias', account_name).format(account_name=account_name) 226 | base_ami = cfg.get('base_ami', {}).get('account_name') 227 | admin_account = cfg.get('admin_account') 228 | if not admin_account: 229 | fatal_error('Missing Option "admin_account" please set Account Name for Main-Account!') 230 | if not base_ami: 231 | fatal_error('Missing Option "account_name" for base AMI. Please set Account Name for AMI-Account!') 232 | 233 | if auth.get(aws_credentials_service_url) is None: 234 | auth[aws_credentials_service_url] = OAuthServices( 235 | aws_credentials_service_url=aws_credentials_service_url, 236 | aws_credentials_service_resources=cfg.get('aws_credentials_service_resources', {}), 237 | account_list_url=cfg.get('account_list_url'), 238 | token_managed_id_key=cfg.get('token_managed_id_key'), 239 | login_account=options.get('login_account', None), 240 | role_name=saml_role, 241 | token=options.get('token') 242 | ) 243 | if batch.get(aws_credentials_service_url) is None: 244 | batch[aws_credentials_service_url] = {} 245 | for account in (admin_account, base_ami, account_name): 246 | batch[aws_credentials_service_url]['{}/{}'.format(account, saml_role)] = { 247 | 'name': account, 248 | 'role': saml_role} 249 | sessions_tmp[account_name] = { 250 | 'admin_account_keyname': '{}/{}'.format(admin_account, saml_role), 251 | 'base_ami_account_keyname': '{}/{}'.format(base_ami, saml_role), 252 | 'account_keyname': '{}/{}'.format(account_name, saml_role), 253 | 'account_name': account_name, 254 | 'account_alias': account_alias, 255 | 'config': cfg, 256 | 'auth': auth[aws_credentials_service_url]} 257 | 258 | credentials = get_credentials_map(batch, auth) 259 | 260 | return rewrite_sessions_map(sessions_tmp, credentials, options) 261 | 262 | 263 | def rewrite_sessions_map(sessions_tmp: dict, credentials: dict, options: dict): 264 | from ..config import AccountData 265 | 266 | sessions = {} 267 | for account_name in sessions_tmp: 268 | account_keyname = sessions_tmp[account_name]['account_keyname'] 269 | admin_account_keyname = sessions_tmp[account_name]['admin_account_keyname'] 270 | base_ami_account_keyname = sessions_tmp[account_name]['base_ami_account_keyname'] 271 | if credentials.get(account_keyname): 272 | sessions[account_name] = AccountData(name=sessions_tmp[account_name]['account_name'], 273 | alias=sessions_tmp[account_name]['account_alias'], 274 | id=None, 275 | session=credentials[account_keyname], 276 | admin_session=credentials[admin_account_keyname], 277 | ami_session=credentials[base_ami_account_keyname], 278 | config=sessions_tmp[account_name]['config'], 279 | auth=sessions_tmp[account_name]['auth'], 280 | dry_run=options.get('dry_run', False), 281 | options=options) 282 | 283 | return sessions 284 | -------------------------------------------------------------------------------- /sevenseconds/helper/aws.py: -------------------------------------------------------------------------------- 1 | AZ_NAMES_BY_REGION = {} 2 | PENDING_ASSOCIATIONS = {} 3 | 4 | 5 | def filter_subnets(vpc: object, _type: str): 6 | for subnet in vpc.subnets.all(): 7 | if get_tag(subnet.tags, 'Name', '').startswith(_type + '-'): 8 | yield subnet 9 | 10 | 11 | def get_account_alias(session): 12 | conn = session.client('iam') 13 | return conn.list_account_aliases()['AccountAliases'] 14 | 15 | 16 | def set_account_alias(session, alias): 17 | conn = session.client('iam') 18 | conn.create_account_alias(AccountAlias=alias) 19 | 20 | 21 | def get_account_id(session): 22 | sts = session.client('sts') 23 | return sts.get_caller_identity()['Account'] 24 | 25 | 26 | def get_az_names(session, region: str): 27 | names = AZ_NAMES_BY_REGION.get(region) 28 | if not names: 29 | conn = session.client('ec2', region) 30 | ec2_zones = conn.describe_availability_zones(Filters=[{'Name': 'state', 'Values': ['available']}]) 31 | names = [z['ZoneName'] for z in ec2_zones['AvailabilityZones']] 32 | AZ_NAMES_BY_REGION[region] = names 33 | return names 34 | 35 | 36 | def get_tag(tags: list, key: str, default=None, prefix=''): 37 | ''' 38 | >>> tags = [{'Key': 'aws:cloudformation:stack-id', 39 | ... 'Value': 'arn:aws:cloudformation:eu-west-1:123:stack/test-123'}, 40 | ... {'Key': 'Name', 41 | ... 'Value': 'test-123'}, 42 | ... {'Key': 'StackVersion', 43 | ... 'Value': '123'}] 44 | >>> get_tag(tags, 'StackVersion') 45 | '123' 46 | >>> get_tag(tags, 'aws:cloudformation:stack-id') 47 | 'arn:aws:cloudformation:eu-west-1:123:stack/test-123' 48 | >>> get_tag(tags, 'notfound') is None 49 | True 50 | >>> parameters = [{'ParameterKey': 'VpcId', 'ParameterValue': 'vpc-123321'}, 51 | ... {'ParameterKey': 'TaupageId', 'ParameterValue': 'ami-123321'}, 52 | ... {'ParameterKey': 'EIPAllocation', 'ParameterValue': 'eipalloc-123321'}, 53 | ... {'ParameterKey': 'SubnetId', 'ParameterValue': 'subnet-123321'}, 54 | ... {'ParameterKey': 'InstanceType', 'ParameterValue': 't2.micro'}, 55 | ... {'ParameterKey': 'OddRelease', 'ParameterValue': 'v123'}] 56 | >>> get_tag(parameters, 'TaupageId', prefix='Parameter') 57 | 'ami-123321' 58 | >>> get_tag(parameters, 'OddRelease', prefix='Parameter') 59 | 'v123' 60 | ''' 61 | if isinstance(tags, list): 62 | found = [tag['{}Value'.format(prefix)] for tag in tags if tag['{}Key'.format(prefix)] == key] 63 | if len(found): 64 | return found[0] 65 | return default 66 | 67 | 68 | def associate_address(ec2c: object, instance_id: str = None): 69 | addr = None 70 | for vpc_addresse in ec2c.describe_addresses()['Addresses']: 71 | if (vpc_addresse.get('AssociationId') is None and 72 | vpc_addresse.get('AllocationId') not in PENDING_ASSOCIATIONS.keys()): 73 | # use existing Elastic IP (e.g. to re-use IP from previous bastion host) 74 | addr = vpc_addresse 75 | if addr is None: 76 | addr = ec2c.allocate_address(Domain='vpc') 77 | if instance_id is None: 78 | PENDING_ASSOCIATIONS[addr.get('AllocationId')] = addr.get('PublicIp') 79 | return addr.get('AllocationId'), addr.get('PublicIp') 80 | else: 81 | ec2c.associate_address(InstanceId=instance_id, 82 | AllocationId=addr.get('AllocationId')) 83 | return addr.get('PublicIp') 84 | -------------------------------------------------------------------------------- /sevenseconds/helper/network.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import os 3 | import socket 4 | import boto3 5 | from netaddr import IPNetwork 6 | from ..helper import ActionOnExit, info 7 | from .aws import get_az_names 8 | 9 | 10 | def calculate_subnet(vpc_net: IPNetwork, _type: str, az_index: int): 11 | ''' 12 | >>> calculate_subnet(IPNetwork('10.0.0.0/16'), 'dmz', 0) 13 | IPNetwork('10.0.0.0/21') 14 | 15 | >>> calculate_subnet(IPNetwork('10.0.0.0/16'), 'internal', 0) 16 | IPNetwork('10.0.128.0/20') 17 | 18 | >>> calculate_subnet(IPNetwork('10.0.0.0/19'), 'dmz', 0) 19 | IPNetwork('10.0.0.0/24') 20 | 21 | >>> calculate_subnet(IPNetwork('10.0.0.0/19'), 'dmz', 1) 22 | IPNetwork('10.0.1.0/24') 23 | 24 | >>> calculate_subnet(IPNetwork('10.0.0.0/18'), 'dmz', 1) 25 | IPNetwork('10.0.2.0/23') 26 | 27 | >>> calculate_subnet(IPNetwork('10.0.0.0/19'), 'internal', 0) 28 | IPNetwork('10.0.16.0/23') 29 | 30 | >>> calculate_subnet(IPNetwork('10.0.0.0/19'), 'internal', 1) 31 | IPNetwork('10.0.18.0/23') 32 | 33 | >>> calculate_subnet(IPNetwork('10.0.0.0/18'), 'internal', 1) 34 | IPNetwork('10.0.36.0/22') 35 | 36 | >>> calculate_subnet(IPNetwork('10.0.0.0/28'), 'internal', 1) 37 | IPNetwork('10.0.0.9/32') 38 | 39 | >>> calculate_subnet(IPNetwork('10.31.0.0/16'), 'nat', 1) 40 | IPNetwork('10.31.64.16/28') 41 | 42 | >>> calculate_subnet(IPNetwork('10.31.0.0/16'), 'nat', 0) 43 | IPNetwork('10.31.64.0/28') 44 | 45 | >>> calculate_subnet(IPNetwork('10.0.0.0/30'), 'internal', 1) 46 | Traceback (most recent call last): 47 | ... 48 | netaddr.core.AddrFormatError: invalid IPNetwork 10.0.0.2/34 49 | 50 | >>> calculate_subnet(IPNetwork('10.0.0.0/64'), 'internal', 1) 51 | Traceback (most recent call last): 52 | ... 53 | netaddr.core.AddrFormatError: invalid IPNetwork 10.0.0.0/64 54 | ''' 55 | if _type == 'dmz': 56 | networks = list(vpc_net.subnet(vpc_net.prefixlen + 5)) 57 | elif _type == 'nat': 58 | networks = list(list(vpc_net.subnet(vpc_net.prefixlen + 2))[1].subnet(28)) 59 | else: 60 | # use the "lower half" of the /16 network for the internal/private subnets 61 | networks = list(list(vpc_net.subnet(vpc_net.prefixlen + 1))[1].subnet(vpc_net.prefixlen + 4)) 62 | return networks[az_index] 63 | 64 | 65 | def get_address(domain): 66 | with ActionOnExit('Checking {}'.format(domain)) as act: 67 | try: 68 | ai = socket.getaddrinfo(domain, 443, family=socket.AF_INET, type=socket.SOCK_STREAM) 69 | except Exception: 70 | ai = [] 71 | act.error('n/a') 72 | pass 73 | for _, _, _, _, ip_port in ai: 74 | ip, _ = ip_port 75 | return '{}/32'.format(ip) 76 | 77 | 78 | def get_trusted_addresses(session_data, config: dict): 79 | session = boto3.session.Session(**session_data) 80 | 81 | accounts = config.get('accounts', {}) 82 | 83 | addresses = set() 84 | domains = set() 85 | for name, cidr in config.get('global', {}).get('trusted_networks', {}).items(): 86 | info('Adding trusted network {} ({})'.format(name, cidr)) 87 | addresses.add(cidr) 88 | 89 | if config.get('global', {}).get('trusted_networks_config', {}).get('disable_account_autogeneration', False): 90 | return addresses 91 | 92 | for account_name, _cfg in accounts.items(): 93 | cfg = {} 94 | cfg.update(config.get('global', {})) 95 | if _cfg: 96 | cfg.update(_cfg) 97 | for region in cfg['regions']: 98 | dns_domain = cfg.get('domain') 99 | if dns_domain: 100 | domains.update(['odd-{}.{}'.format(region, dns_domain.format(account_name=account_name))]) 101 | for az in get_az_names(session, region): 102 | domains.add('nat-{}.{}'.format(az, dns_domain.format(account_name=account_name))) 103 | 104 | with multiprocessing.Pool(processes=os.cpu_count() * 20) as pool: 105 | addresses.update(pool.map(get_address, sorted(domains))) 106 | return addresses 107 | -------------------------------------------------------------------------------- /sevenseconds/helper/regioninfo.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | 4 | def get_regions(servicename: str): 5 | return boto3.session.Session().get_available_regions(servicename) 6 | -------------------------------------------------------------------------------- /tests/test_aws.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from unittest.mock import MagicMock 3 | from sevenseconds.helper.aws import get_account_id, get_az_names 4 | from sevenseconds.config.cloudtrail import configure_cloudtrail 5 | from sevenseconds.config.s3 import configure_s3_buckets 6 | 7 | 8 | def test_get_account_id(monkeypatch): 9 | sts = MagicMock( 10 | get_caller_identity=lambda: { 11 | "Account": "01234567", 12 | "Arn": "arn:aws:iam::01234567:assumed-role/Administrator/sevenseconds", 13 | "UserId": "ABCDEFGHIJKLMNOPQ:sevenseconds", 14 | } 15 | ) 16 | session = MagicMock(client=MagicMock(return_value=sts)) 17 | id = get_account_id(session) 18 | assert id == "01234567", "ID from current User" 19 | 20 | 21 | def test_get_az_names(monkeypatch): 22 | conn = MagicMock( 23 | describe_availability_zones=lambda **kargs: { 24 | "AvailabilityZones": [ 25 | {"ZoneName": "eu-west-1a", "RegionName": "eu-west-1", "State": "available", "Messages": []}, 26 | {"ZoneName": "eu-west-1b", "RegionName": "eu-west-1", "State": "available", "Messages": []}, 27 | {"ZoneName": "eu-west-1c", "RegionName": "eu-west-1", "State": "available", "Messages": []}, 28 | ] 29 | } 30 | ) 31 | session = MagicMock(client=MagicMock(return_value=conn)) 32 | names = get_az_names(session, "eu-west-1") 33 | assert "eu-west-1b" in names, "AZ found" 34 | 35 | conn = MagicMock(describe_availability_zones=lambda **kargs: {"AvailabilityZones": []}) 36 | session = MagicMock(client=MagicMock(return_value=conn)) 37 | names = get_az_names(session, "eu-west-1") 38 | assert "eu-west-1b" in names, "AZ found from Cache" 39 | 40 | 41 | def test_configure_cloudtrail(monkeypatch): 42 | def myinfo(text): 43 | assert "Found no Cloudtrail Section in Configfile." in text 44 | 45 | monkeypatch.setattr("clickclick.info", myinfo) 46 | account = MagicMock(name="name", config={}) 47 | configure_cloudtrail(account) 48 | 49 | class _test: 50 | def _only_kwargs(f): 51 | def _filter(*args, **kwargs): 52 | if args or len(kwargs) == 0: 53 | raise TypeError("{} only accepts keyword arguments.".format(f.__name__)) 54 | return f(**kwargs) 55 | 56 | return _filter 57 | 58 | def describe_trails(): 59 | return { 60 | "trailList": [ 61 | { 62 | "IncludeGlobalServiceEvents": True, 63 | "Name": "Default", 64 | "S3BucketName": "bucketname", 65 | "S3KeyPrefix": "", 66 | } 67 | ] 68 | } 69 | 70 | @_only_kwargs 71 | def update_trail(Name, S3KeyPrefix, S3BucketName, IncludeGlobalServiceEvents, **kwargs): 72 | assert Name == "Default", "update Default" 73 | assert S3BucketName == "bucketname", "set bucketname" 74 | assert S3KeyPrefix == "", "set directory prefix" 75 | assert IncludeGlobalServiceEvents is True, "Include global" 76 | 77 | @_only_kwargs 78 | def create_trail(Name, S3KeyPrefix, S3BucketName, IncludeGlobalServiceEvents, **kwargs): 79 | assert Name == "Default", "update Default" 80 | assert S3BucketName == "bucketname", "set bucketname" 81 | assert S3KeyPrefix == "", "set directory prefix" 82 | assert IncludeGlobalServiceEvents is True, "Include global" 83 | 84 | @_only_kwargs 85 | def start_logging(Name): 86 | assert Name == "Default", "start logging for Default" 87 | 88 | @_only_kwargs 89 | def stop_logging(Name): 90 | assert Name == "wrongconfig", "stop wrong configuration" 91 | 92 | @_only_kwargs 93 | def delete_trail(Name): 94 | assert Name == "wrongconfig", "remove wrong configuration" 95 | 96 | @_only_kwargs 97 | def get_trail_status(Name): 98 | return {"IsLogging": True} 99 | 100 | account = MagicMock( 101 | name="name", 102 | config={"cloudtrail": {"s3_bucket_name": "bucketname", "s3_key_prefix": ""}}, 103 | client=MagicMock(return_value=_test), 104 | ) 105 | configure_cloudtrail(account) 106 | _test.get_trail_status = lambda Name: {"IsLogging": False} 107 | configure_cloudtrail(account) 108 | _test.get_trail_status = lambda Name: {"IsLogging": True} 109 | _test.describe_trails = lambda: { 110 | "trailList": [ 111 | { 112 | "IncludeGlobalServiceEvents": False, 113 | "Name": "Default", 114 | "S3BucketName": "oldbucketname", 115 | "S3KeyPrefix": "dummy", 116 | } 117 | ] 118 | } 119 | configure_cloudtrail(account) 120 | _test.describe_trails = lambda: { 121 | "trailList": [ 122 | { 123 | "IncludeGlobalServiceEvents": False, 124 | "Name": "wrongconfig", 125 | "S3BucketName": "oldbucketname", 126 | "S3KeyPrefix": "dummy", 127 | } 128 | ] 129 | } 130 | configure_cloudtrail(account) 131 | 132 | 133 | def test_configure_s3_buckets(): 134 | config = { 135 | "s3_buckets": { 136 | "bucket-1": { 137 | "name": "bucket-1", 138 | "regions": ["eu-central-1"], 139 | "lifecycle_configuration": {"Rules": [{"x": "y"}]}, 140 | "encryption_config": {"Rules": [{"a": "b"}]}, 141 | "tags": {"foo": "bar", "bee": "baz"}, 142 | } 143 | } 144 | } 145 | account = MagicMock(config=config) 146 | s3 = account.session.resource("s3", "eu-central-1") 147 | bucket = s3.Bucket("bucket-1") 148 | bucket.creation_date = None 149 | 150 | configure_s3_buckets(account) 151 | 152 | bucket.create.assert_called_once() 153 | s3.BucketLifecycle("bucket-1").put.assert_called_once_with(LifecycleConfiguration={"Rules": [{"x": "y"}]}) 154 | s3.meta.client.put_bucket_encryption.assert_called_once_with( 155 | Bucket="bucket-1", ServerSideEncryptionConfiguration={"Rules": [{"a": "b"}]} 156 | ) 157 | bucket.Tagging().put.assert_called_once_with( 158 | Tagging={"TagSet": [{"Key": "foo", "Value": "bar"}, {"Key": "bee", "Value": "baz"}]} 159 | ) 160 | 161 | 162 | if __name__ == "__main__": 163 | pytest.main() 164 | -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from click.testing import CliRunner 3 | from unittest.mock import MagicMock 4 | from sevenseconds.cli import SUPPORTED_CONFIG_VERSION, cli, yaml 5 | 6 | 7 | def test_print_version(): 8 | runner = CliRunner() 9 | 10 | with runner.isolated_filesystem(): 11 | result = runner.invoke(cli, ['--version'], catch_exceptions=False) 12 | 13 | assert 'AWS Account Configurator' in result.output 14 | assert 'unknown' not in result.output 15 | assert result.exit_code == 0 16 | 17 | 18 | def test_configure_nonexisting_account(monkeypatch): 19 | runner = CliRunner() 20 | config = { 21 | 'version': SUPPORTED_CONFIG_VERSION, 22 | 'accounts': {} 23 | } 24 | 25 | with runner.isolated_filesystem(): 26 | with open('config.yaml', 'w') as fd: 27 | yaml.safe_dump(config, fd) 28 | result = runner.invoke(cli, ['configure', 'config.yaml', 'myaccount'], catch_exceptions=False) 29 | 30 | assert 'No configuration found for account myaccount' in result.output 31 | 32 | 33 | def test_configure_missing_config_option(monkeypatch): 34 | 35 | myboto3 = MagicMock(list_account_aliases=lambda *args, **vargs: {'AccountAliases': ['myaccount']}, 36 | describe_availability_zones=lambda *args, **vargs: { 37 | 'AvailabilityZones': [ 38 | { 39 | 'ZoneName': 'eu-west-1a', 40 | 'RegionName': 'eu-west-1', 41 | 'State': 'available', 42 | 'Messages': [] 43 | }, { 44 | 'ZoneName': 'eu-west-1b', 45 | 'RegionName': 'eu-west-1', 46 | 'State': 'available', 47 | 'Messages': [] 48 | }, { 49 | 'ZoneName': 'eu-west-1c', 50 | 'RegionName': 'eu-west-1', 51 | 'State': 'available', 52 | 'Messages': [] 53 | }]}) 54 | monkeypatch.setattr('boto3.client', lambda *args: myboto3) 55 | 56 | runner = CliRunner() 57 | 58 | config = { 59 | 'version': SUPPORTED_CONFIG_VERSION, 60 | 'global': { 61 | 'base_ami': { 62 | 'name': 'MyBaseAmi*', 63 | 'is_public': False 64 | }, 65 | 'regions': ['region-1'], 66 | 'cloudtrail': { 67 | 's3_bucket_name': 'mybucket', 68 | 's3_key_prefix': 'myprefix' 69 | }, 70 | 'domain': '{account_name}.example.org', 71 | }, 72 | 'accounts': { 73 | 'myaccount': {}, 74 | 'mystaging': {} 75 | }} 76 | 77 | with runner.isolated_filesystem(): 78 | with open('config.yaml', 'w') as fd: 79 | yaml.safe_dump(config, fd) 80 | result = runner.invoke(cli, ['configure', 'config.yaml', 'my.*'], catch_exceptions=False) 81 | 82 | assert 'Start configuration of: myaccount, mystaging' in result.output 83 | assert 'Missing Option "admin_account" please set Account Name for Main-Account!' in result.output 84 | # Supports only SAML Login at the moment 85 | # assert 'Creating VPC for 172.31.0.0/16.. OK' in result.output 86 | # assert 'Enabling CloudTrail.. OK' in result.output 87 | assert result.exit_code == 1 88 | 89 | 90 | if __name__ == '__main__': 91 | pytest.main() 92 | -------------------------------------------------------------------------------- /tests/test_iam.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import sevenseconds.config.iam as iam 3 | 4 | 5 | SAMPLE_ROLES = { 6 | "Shibboleth-Administrator": { 7 | "attached_policies": ["arn:aws:iam::aws:policy/AdminDefaultPolicy"], 8 | "policy": { 9 | "Statement": [ 10 | {"Effect": "Allow", "Resource": "Test", "Action": "foo:*"}, 11 | {"Effect": "Deny", "Resource": "Test", "Action": "bar:*"}, 12 | ] 13 | }, 14 | }, 15 | "Shibboleth-PowerUser": { 16 | "attached_policies": ["arn:aws:iam::aws:policy/PowerUserDefaultPolicy"], 17 | "policy": { 18 | "Statement": [ 19 | {"Effect": "Allow", "Resource": "Test", "Action": "baz:*"}, 20 | ] 21 | }, 22 | }, 23 | } 24 | 25 | SAMPLE_POLICIES = [ 26 | { 27 | "role": "Shibboleth-Administrator", 28 | "statement": {"Effect": "Allow", "Resource": "Additional", "Action": "test:*"}, 29 | }, 30 | { 31 | "role": "Shibboleth-Administrator", 32 | "statement": {"Effect": "Deny", "Resource": "Additional", "Action": "abc:*"}, 33 | }, 34 | ] 35 | 36 | SAMPLE_ATTACHED_POLICIES = [ 37 | { 38 | "role": "Shibboleth-PowerUser", 39 | "policies": ["arn:aws:iam::aws:policy/PolicyA", "arn:aws:iam::aws:policy/PolicyB"], 40 | } 41 | ] 42 | 43 | 44 | def test_effective_policies_merge(): 45 | config = { 46 | "roles": SAMPLE_ROLES, 47 | "additional_policies": SAMPLE_POLICIES, 48 | } 49 | expected = { 50 | "Shibboleth-Administrator": { 51 | "attached_policies": ["arn:aws:iam::aws:policy/AdminDefaultPolicy"], 52 | "policy": { 53 | "Statement": [ 54 | {"Effect": "Allow", "Resource": "Test", "Action": "foo:*"}, 55 | {"Effect": "Deny", "Resource": "Test", "Action": "bar:*"}, 56 | {"Effect": "Allow", "Resource": "Additional", "Action": "test:*"}, 57 | {"Effect": "Deny", "Resource": "Additional", "Action": "abc:*"}, 58 | ] 59 | }, 60 | }, 61 | "Shibboleth-PowerUser": { 62 | "attached_policies": [ 63 | "arn:aws:iam::aws:policy/PowerUserDefaultPolicy", 64 | ], 65 | "policy": { 66 | "Statement": [ 67 | {"Effect": "Allow", "Resource": "Test", "Action": "baz:*"}, 68 | ] 69 | }, 70 | }, 71 | } 72 | 73 | assert expected == iam.effective_roles(config) 74 | 75 | # check that the original config was not affected 76 | assert 2 == len(config["roles"]["Shibboleth-Administrator"]["policy"]["Statement"]) 77 | 78 | 79 | def test_effective_attached_policies_merge(): 80 | config = { 81 | "roles": SAMPLE_ROLES, 82 | "additional_attached_policies": SAMPLE_ATTACHED_POLICIES, 83 | } 84 | expected = { 85 | "Shibboleth-Administrator": [ 86 | "arn:aws:iam::aws:policy/AdminDefaultPolicy", 87 | ], 88 | "Shibboleth-PowerUser": [ 89 | "arn:aws:iam::aws:policy/PowerUserDefaultPolicy", 90 | "arn:aws:iam::aws:policy/PolicyA", 91 | "arn:aws:iam::aws:policy/PolicyB", 92 | ], 93 | } 94 | 95 | for role_name, role_cfg in SAMPLE_ROLES.items(): 96 | assert expected[role_name] == iam.effective_attached_policies(config, role_name, role_cfg) 97 | 98 | 99 | @pytest.mark.parametrize( 100 | "roles", 101 | [ 102 | # Dropped role 103 | {"Shibboleth-Administrator": {"drop": True}}, 104 | # Missing role 105 | { 106 | "Shibboleth-PowerUser": { 107 | "policy": { 108 | "Statement": [ 109 | {"Effect": "Allow", "Resource": "Test", "Action": "baz:*"}, 110 | ] 111 | } 112 | } 113 | }, 114 | # No policy 115 | {"Shibboleth-Administrator": {}}, 116 | # Policy but no statement 117 | {"Shibboleth-Administrator": {"policy": {}}}, 118 | ], 119 | ) 120 | def test_effective_policies_fail_invalid(roles): 121 | config = { 122 | "roles": roles, 123 | "additional_policies": SAMPLE_POLICIES, 124 | } 125 | 126 | with pytest.raises(ValueError): 127 | iam.effective_roles(config) 128 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length=120 3 | extend-ignore = E741 4 | # travis-ci.org allows only 1 job 5 | jobs=1 6 | --------------------------------------------------------------------------------